Importing rustc-1.49.0

Bug: 176888219
Change-Id: Ib0805d37e7b485cd420bbff8a8b000cf87e7ede0
diff --git a/compiler/rustc_apfloat/src/ieee.rs b/compiler/rustc_apfloat/src/ieee.rs
index e3d941c..71bcb8f 100644
--- a/compiler/rustc_apfloat/src/ieee.rs
+++ b/compiler/rustc_apfloat/src/ieee.rs
@@ -1511,11 +1511,16 @@
                 sig::set_bit(&mut r.sig, T::PRECISION - 1);
             }
 
-            // gcc forces the Quiet bit on, which means (float)(double)(float_sNan)
-            // does not give you back the same bits. This is dubious, and we
-            // don't currently do it. You're really supposed to get
-            // an invalid operation signal at runtime, but nobody does that.
-            status = Status::OK;
+            // Convert of sNaN creates qNaN and raises an exception (invalid op).
+            // This also guarantees that a sNaN does not become Inf on a truncation
+            // that loses all payload bits.
+            if self.is_signaling() {
+                // Quiet signaling NaN.
+                sig::set_bit(&mut r.sig, T::QNAN_BIT);
+                status = Status::INVALID_OP;
+            } else {
+                status = Status::OK;
+            }
         } else {
             *loses_info = false;
             status = Status::OK;
diff --git a/compiler/rustc_apfloat/tests/ieee.rs b/compiler/rustc_apfloat/tests/ieee.rs
index 2d8bb7d..63d925c 100644
--- a/compiler/rustc_apfloat/tests/ieee.rs
+++ b/compiler/rustc_apfloat/tests/ieee.rs
@@ -567,6 +567,17 @@
 }
 
 #[test]
+fn issue_69532() {
+    let f = Double::from_bits(0x7FF0_0000_0000_0001u64 as u128);
+    let mut loses_info = false;
+    let sta = f.convert(&mut loses_info);
+    let r: Single = sta.value;
+    assert!(loses_info);
+    assert!(r.is_nan());
+    assert_eq!(sta.status, Status::INVALID_OP);
+}
+
+#[test]
 fn min_num() {
     let f1 = Double::from_f64(1.0);
     let f2 = Double::from_f64(2.0);
@@ -1492,27 +1503,32 @@
     assert_eq!(4294967295.0, test.to_f64());
     assert!(!loses_info);
 
-    let test = Single::snan(None);
-    let x87_snan = X87DoubleExtended::snan(None);
-    let test: X87DoubleExtended = test.convert(&mut loses_info).value;
-    assert!(test.bitwise_eq(x87_snan));
-    assert!(!loses_info);
-
     let test = Single::qnan(None);
     let x87_qnan = X87DoubleExtended::qnan(None);
     let test: X87DoubleExtended = test.convert(&mut loses_info).value;
     assert!(test.bitwise_eq(x87_qnan));
     assert!(!loses_info);
 
-    let test = X87DoubleExtended::snan(None);
-    let test: X87DoubleExtended = test.convert(&mut loses_info).value;
-    assert!(test.bitwise_eq(x87_snan));
+    let test = Single::snan(None);
+    let sta = test.convert(&mut loses_info);
+    let test: X87DoubleExtended = sta.value;
+    assert!(test.is_nan());
+    assert!(!test.is_signaling());
     assert!(!loses_info);
+    assert_eq!(sta.status, Status::INVALID_OP);
 
     let test = X87DoubleExtended::qnan(None);
     let test: X87DoubleExtended = test.convert(&mut loses_info).value;
     assert!(test.bitwise_eq(x87_qnan));
     assert!(!loses_info);
+
+    let test = X87DoubleExtended::snan(None);
+    let sta = test.convert(&mut loses_info);
+    let test: X87DoubleExtended = sta.value;
+    assert!(test.is_nan());
+    assert!(!test.is_signaling());
+    assert!(!loses_info);
+    assert_eq!(sta.status, Status::INVALID_OP);
 }
 
 #[test]
diff --git a/compiler/rustc_arena/Cargo.toml b/compiler/rustc_arena/Cargo.toml
index 41701f3..29caa85 100644
--- a/compiler/rustc_arena/Cargo.toml
+++ b/compiler/rustc_arena/Cargo.toml
@@ -5,5 +5,4 @@
 edition = "2018"
 
 [dependencies]
-rustc_data_structures = { path = "../rustc_data_structures" }
 smallvec = { version = "1.0", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs
index 166f7f5..b76e1e7 100644
--- a/compiler/rustc_arena/src/lib.rs
+++ b/compiler/rustc_arena/src/lib.rs
@@ -16,7 +16,6 @@
 #![feature(maybe_uninit_slice)]
 #![cfg_attr(test, feature(test))]
 
-use rustc_data_structures::cold_path;
 use smallvec::SmallVec;
 
 use std::alloc::Layout;
@@ -27,6 +26,12 @@
 use std::ptr;
 use std::slice;
 
+#[inline(never)]
+#[cold]
+pub fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
+    f()
+}
+
 /// An arena that can hold objects of only one type.
 pub struct TypedArena<T> {
     /// A pointer to the next object to be allocated.
@@ -212,16 +217,18 @@
             let mut chunks = self.chunks.borrow_mut();
             let mut new_cap;
             if let Some(last_chunk) = chunks.last_mut() {
-                let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize;
-                last_chunk.entries = used_bytes / mem::size_of::<T>();
+                // If a type is `!needs_drop`, we don't need to keep track of how many elements
+                // the chunk stores - the field will be ignored anyway.
+                if mem::needs_drop::<T>() {
+                    let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize;
+                    last_chunk.entries = used_bytes / mem::size_of::<T>();
+                }
 
                 // If the previous chunk's len is less than HUGE_PAGE
                 // bytes, then this chunk will be least double the previous
                 // chunk's size.
-                new_cap = last_chunk.storage.len();
-                if new_cap < HUGE_PAGE / elem_size {
-                    new_cap = new_cap.checked_mul(2).unwrap();
-                }
+                new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
+                new_cap *= 2;
             } else {
                 new_cap = PAGE / elem_size;
             }
@@ -338,10 +345,8 @@
                 // If the previous chunk's len is less than HUGE_PAGE
                 // bytes, then this chunk will be least double the previous
                 // chunk's size.
-                new_cap = last_chunk.storage.len();
-                if new_cap < HUGE_PAGE {
-                    new_cap = new_cap.checked_mul(2).unwrap();
-                }
+                new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
+                new_cap *= 2;
             } else {
                 new_cap = PAGE;
             }
@@ -528,7 +533,7 @@
         ptr::write(mem, object);
         let result = &mut *mem;
         // Record the destructor after doing the allocation as that may panic
-        // and would cause `object`'s destuctor to run twice if it was recorded before
+        // and would cause `object`'s destructor to run twice if it was recorded before
         self.destructors
             .borrow_mut()
             .push(DropType { drop_fn: drop_for_type::<T>, obj: result as *mut T as *mut u8 });
@@ -555,12 +560,10 @@
         mem::forget(vec.drain(..));
 
         // Record the destructors after doing the allocation as that may panic
-        // and would cause `object`'s destuctor to run twice if it was recorded before
+        // and would cause `object`'s destructor to run twice if it was recorded before
         for i in 0..len {
-            destructors.push(DropType {
-                drop_fn: drop_for_type::<T>,
-                obj: start_ptr.offset(i as isize) as *mut u8,
-            });
+            destructors
+                .push(DropType { drop_fn: drop_for_type::<T>, obj: start_ptr.add(i) as *mut u8 });
         }
 
         slice::from_raw_parts_mut(start_ptr, len)
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs
index 95abf55..3e95372 100644
--- a/compiler/rustc_ast/src/ast.rs
+++ b/compiler/rustc_ast/src/ast.rs
@@ -24,9 +24,10 @@
 
 use crate::ptr::P;
 use crate::token::{self, CommentKind, DelimToken};
-use crate::tokenstream::{DelimSpan, TokenStream, TokenTree};
+use crate::tokenstream::{DelimSpan, LazyTokenStream, TokenStream, TokenTree};
 
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::stack::ensure_sufficient_stack;
 use rustc_data_structures::sync::Lrc;
 use rustc_data_structures::thin_vec::ThinVec;
 use rustc_macros::HashStable_Generic;
@@ -96,7 +97,7 @@
     /// The segments in the path: the things separated by `::`.
     /// Global paths begin with `kw::PathRoot`.
     pub segments: Vec<PathSegment>,
-    pub tokens: Option<TokenStream>,
+    pub tokens: Option<LazyTokenStream>,
 }
 
 impl PartialEq<Symbol> for Path {
@@ -166,13 +167,6 @@
 }
 
 impl GenericArgs {
-    pub fn is_parenthesized(&self) -> bool {
-        match *self {
-            Parenthesized(..) => true,
-            _ => false,
-        }
-    }
-
     pub fn is_angle_bracketed(&self) -> bool {
         match *self {
             AngleBracketed(..) => true,
@@ -228,6 +222,15 @@
     Constraint(AssocTyConstraint),
 }
 
+impl AngleBracketedArg {
+    pub fn span(&self) -> Span {
+        match self {
+            AngleBracketedArg::Arg(arg) => arg.span(),
+            AngleBracketedArg::Constraint(constraint) => constraint.span,
+        }
+    }
+}
+
 impl Into<Option<P<GenericArgs>>> for AngleBracketedArgs {
     fn into(self) -> Option<P<GenericArgs>> {
         Some(P(GenericArgs::AngleBracketed(self)))
@@ -541,7 +544,7 @@
     /// Distinguishes between `unsafe { ... }` and `{ ... }`.
     pub rules: BlockCheckMode,
     pub span: Span,
-    pub tokens: Option<TokenStream>,
+    pub tokens: Option<LazyTokenStream>,
 }
 
 /// A match pattern.
@@ -552,7 +555,7 @@
     pub id: NodeId,
     pub kind: PatKind,
     pub span: Span,
-    pub tokens: Option<TokenStream>,
+    pub tokens: Option<LazyTokenStream>,
 }
 
 impl Pat {
@@ -856,13 +859,6 @@
         }
     }
 
-    pub fn is_shift(&self) -> bool {
-        match *self {
-            BinOpKind::Shl | BinOpKind::Shr => true,
-            _ => false,
-        }
-    }
-
     pub fn is_comparison(&self) -> bool {
         use BinOpKind::*;
         // Note for developers: please keep this as is;
@@ -872,11 +868,6 @@
             And | Or | Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Shl | Shr => false,
         }
     }
-
-    /// Returns `true` if the binary operator takes its arguments by value
-    pub fn is_by_value(&self) -> bool {
-        !self.is_comparison()
-    }
 }
 
 pub type BinOp = Spanned<BinOpKind>;
@@ -895,14 +886,6 @@
 }
 
 impl UnOp {
-    /// Returns `true` if the unary operator takes its argument by value
-    pub fn is_by_value(u: UnOp) -> bool {
-        match u {
-            UnOp::Neg | UnOp::Not => true,
-            _ => false,
-        }
-    }
-
     pub fn to_string(op: UnOp) -> &'static str {
         match op {
             UnOp::Deref => "*",
@@ -918,10 +901,17 @@
     pub id: NodeId,
     pub kind: StmtKind,
     pub span: Span,
-    pub tokens: Option<TokenStream>,
+    pub tokens: Option<LazyTokenStream>,
 }
 
 impl Stmt {
+    pub fn has_trailing_semicolon(&self) -> bool {
+        match &self.kind {
+            StmtKind::Semi(_) => true,
+            StmtKind::MacCall(mac) => matches!(mac.style, MacStmtStyle::Semicolon),
+            _ => false,
+        }
+    }
     pub fn add_trailing_semicolon(mut self) -> Self {
         self.kind = match self.kind {
             StmtKind::Expr(expr) => StmtKind::Semi(expr),
@@ -1066,12 +1056,12 @@
     pub kind: ExprKind,
     pub span: Span,
     pub attrs: AttrVec,
-    pub tokens: Option<TokenStream>,
+    pub tokens: Option<LazyTokenStream>,
 }
 
 // `Expr` is used a lot. Make sure it doesn't unintentionally get bigger.
 #[cfg(target_arch = "x86_64")]
-rustc_data_structures::static_assert_size!(Expr, 112);
+rustc_data_structures::static_assert_size!(Expr, 120);
 
 impl Expr {
     /// Returns `true` if this expression would be valid somewhere that expects a value;
@@ -1178,6 +1168,7 @@
         match self.kind {
             ExprKind::Box(_) => ExprPrecedence::Box,
             ExprKind::Array(_) => ExprPrecedence::Array,
+            ExprKind::ConstBlock(_) => ExprPrecedence::ConstBlock,
             ExprKind::Call(..) => ExprPrecedence::Call,
             ExprKind::MethodCall(..) => ExprPrecedence::MethodCall,
             ExprKind::Tup(_) => ExprPrecedence::Tup,
@@ -1228,11 +1219,23 @@
 }
 
 #[derive(Clone, Encodable, Decodable, Debug)]
+pub enum StructRest {
+    /// `..x`.
+    Base(P<Expr>),
+    /// `..`.
+    Rest(Span),
+    /// No trailing `..` or expression.
+    None,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
 pub enum ExprKind {
     /// A `box x` expression.
     Box(P<Expr>),
     /// An array (`[a, b, c, d]`)
     Array(Vec<P<Expr>>),
+    /// Allow anonymous constants from an inline `const` block
+    ConstBlock(AnonConst),
     /// A function call
     ///
     /// The first field resolves to the function itself,
@@ -1319,7 +1322,7 @@
     Field(P<Expr>, Ident),
     /// An indexing operation (e.g., `foo[2]`).
     Index(P<Expr>, P<Expr>),
-    /// A range (e.g., `1..2`, `1..`, `..2`, `1..=2`, `..=2`).
+    /// A range (e.g., `1..2`, `1..`, `..2`, `1..=2`, `..=2`; and `..` in destructuring assingment).
     Range(Option<P<Expr>>, Option<P<Expr>>, RangeLimits),
 
     /// Variable reference, possibly containing `::` and/or type
@@ -1347,9 +1350,8 @@
 
     /// A struct literal expression.
     ///
-    /// E.g., `Foo {x: 1, y: 2}`, or `Foo {x: 1, .. base}`,
-    /// where `base` is the `Option<Expr>`.
-    Struct(Path, Vec<Field>, Option<P<Expr>>),
+    /// E.g., `Foo {x: 1, y: 2}`, or `Foo {x: 1, .. rest}`.
+    Struct(Path, Vec<Field>, StructRest),
 
     /// An array literal constructed from one repeated element.
     ///
@@ -1606,7 +1608,7 @@
     /// A string literal (`"foo"`).
     Str(Symbol, StrStyle),
     /// A byte string (`b"foo"`).
-    ByteStr(Lrc<Vec<u8>>),
+    ByteStr(Lrc<[u8]>),
     /// A byte char (`b'f'`).
     Byte(u8),
     /// A character literal (`'a'`).
@@ -1752,13 +1754,6 @@
         }
     }
 
-    pub fn val_to_string(&self, val: i128) -> String {
-        // Cast to a `u128` so we can correctly print `INT128_MIN`. All integral types
-        // are parsed as `u128`, so we wouldn't want to print an extra negative
-        // sign.
-        format!("{}{}", val as u128, self.name_str())
-    }
-
     pub fn bit_width(&self) -> Option<u64> {
         Some(match *self {
             IntTy::Isize => return None,
@@ -1817,10 +1812,6 @@
         }
     }
 
-    pub fn val_to_string(&self, val: u128) -> String {
-        format!("{}{}", val, self.name_str())
-    }
-
     pub fn bit_width(&self) -> Option<u64> {
         Some(match *self {
             UintTy::Usize => return None,
@@ -1864,12 +1855,33 @@
     Bound { bounds: GenericBounds },
 }
 
-#[derive(Clone, Encodable, Decodable, Debug)]
+#[derive(Encodable, Decodable, Debug)]
 pub struct Ty {
     pub id: NodeId,
     pub kind: TyKind,
     pub span: Span,
-    pub tokens: Option<TokenStream>,
+    pub tokens: Option<LazyTokenStream>,
+}
+
+impl Clone for Ty {
+    fn clone(&self) -> Self {
+        ensure_sufficient_stack(|| Self {
+            id: self.id,
+            kind: self.kind.clone(),
+            span: self.span,
+            tokens: self.tokens.clone(),
+        })
+    }
+}
+
+impl Ty {
+    pub fn peel_refs(&self) -> &Self {
+        let mut final_ty = self;
+        while let TyKind::Rptr(_, MutTy { ty, .. }) = &final_ty.kind {
+            final_ty = &ty;
+        }
+        final_ty
+    }
 }
 
 #[derive(Clone, Encodable, Decodable, Debug)]
@@ -2421,7 +2433,7 @@
 pub struct AttrItem {
     pub path: Path,
     pub args: MacArgs,
-    pub tokens: Option<TokenStream>,
+    pub tokens: Option<LazyTokenStream>,
 }
 
 /// A list of attributes.
@@ -2441,7 +2453,7 @@
 #[derive(Clone, Encodable, Decodable, Debug)]
 pub enum AttrKind {
     /// A normal attribute.
-    Normal(AttrItem),
+    Normal(AttrItem, Option<LazyTokenStream>),
 
     /// A doc comment (e.g. `/// ...`, `//! ...`, `/** ... */`, `/*! ... */`).
     /// Doc attributes (e.g. `#[doc="..."]`) are represented with the `Normal`
@@ -2495,7 +2507,7 @@
 pub struct Visibility {
     pub kind: VisibilityKind,
     pub span: Span,
-    pub tokens: Option<TokenStream>,
+    pub tokens: Option<LazyTokenStream>,
 }
 
 #[derive(Clone, Encodable, Decodable, Debug)]
@@ -2582,7 +2594,7 @@
     ///
     /// Note that the tokens here do not include the outer attributes, but will
     /// include inner attributes.
-    pub tokens: Option<TokenStream>,
+    pub tokens: Option<LazyTokenStream>,
 }
 
 impl Item {
diff --git a/compiler/rustc_ast/src/attr/mod.rs b/compiler/rustc_ast/src/attr/mod.rs
index 2782869..2ff6573 100644
--- a/compiler/rustc_ast/src/attr/mod.rs
+++ b/compiler/rustc_ast/src/attr/mod.rs
@@ -8,7 +8,7 @@
 use crate::mut_visit::visit_clobber;
 use crate::ptr::P;
 use crate::token::{self, CommentKind, Token};
-use crate::tokenstream::{DelimSpan, TokenStream, TokenTree, TreeAndSpacing};
+use crate::tokenstream::{DelimSpan, LazyTokenStream, TokenStream, TokenTree, TreeAndSpacing};
 
 use rustc_index::bit_set::GrowableBitSet;
 use rustc_span::source_map::{BytePos, Spanned};
@@ -66,7 +66,7 @@
         self.meta_item().and_then(|meta_item| meta_item.ident())
     }
     pub fn name_or_empty(&self) -> Symbol {
-        self.ident().unwrap_or(Ident::invalid()).name
+        self.ident().unwrap_or_else(Ident::invalid).name
     }
 
     /// Gets the string value if `self` is a `MetaItem` and the `MetaItem` is a
@@ -101,11 +101,6 @@
         self.meta_item().is_some()
     }
 
-    /// Returns `true` if the variant is `Literal`.
-    pub fn is_literal(&self) -> bool {
-        self.literal().is_some()
-    }
-
     /// Returns `true` if `self` is a `MetaItem` and the meta item is a word.
     pub fn is_word(&self) -> bool {
         self.meta_item().map_or(false, |meta_item| meta_item.is_word())
@@ -125,7 +120,7 @@
 impl Attribute {
     pub fn has_name(&self, name: Symbol) -> bool {
         match self.kind {
-            AttrKind::Normal(ref item) => item.path == name,
+            AttrKind::Normal(ref item, _) => item.path == name,
             AttrKind::DocComment(..) => false,
         }
     }
@@ -133,7 +128,7 @@
     /// For a single-segment attribute, returns its name; otherwise, returns `None`.
     pub fn ident(&self) -> Option<Ident> {
         match self.kind {
-            AttrKind::Normal(ref item) => {
+            AttrKind::Normal(ref item, _) => {
                 if item.path.segments.len() == 1 {
                     Some(item.path.segments[0].ident)
                 } else {
@@ -144,19 +139,19 @@
         }
     }
     pub fn name_or_empty(&self) -> Symbol {
-        self.ident().unwrap_or(Ident::invalid()).name
+        self.ident().unwrap_or_else(Ident::invalid).name
     }
 
     pub fn value_str(&self) -> Option<Symbol> {
         match self.kind {
-            AttrKind::Normal(ref item) => item.meta(self.span).and_then(|meta| meta.value_str()),
+            AttrKind::Normal(ref item, _) => item.meta(self.span).and_then(|meta| meta.value_str()),
             AttrKind::DocComment(..) => None,
         }
     }
 
     pub fn meta_item_list(&self) -> Option<Vec<NestedMetaItem>> {
         match self.kind {
-            AttrKind::Normal(ref item) => match item.meta(self.span) {
+            AttrKind::Normal(ref item, _) => match item.meta(self.span) {
                 Some(MetaItem { kind: MetaItemKind::List(list), .. }) => Some(list),
                 _ => None,
             },
@@ -165,7 +160,7 @@
     }
 
     pub fn is_word(&self) -> bool {
-        if let AttrKind::Normal(item) = &self.kind {
+        if let AttrKind::Normal(item, _) = &self.kind {
             matches!(item.args, MacArgs::Empty)
         } else {
             false
@@ -188,7 +183,7 @@
         if self.path.segments.len() == 1 { Some(self.path.segments[0].ident) } else { None }
     }
     pub fn name_or_empty(&self) -> Symbol {
-        self.ident().unwrap_or(Ident::invalid()).name
+        self.ident().unwrap_or_else(Ident::invalid).name
     }
 
     // Example:
@@ -232,10 +227,6 @@
     pub fn is_value_str(&self) -> bool {
         self.value_str().is_some()
     }
-
-    pub fn is_meta_item_list(&self) -> bool {
-        self.meta_item_list().is_some()
-    }
 }
 
 impl AttrItem {
@@ -255,7 +246,7 @@
 impl Attribute {
     pub fn is_doc_comment(&self) -> bool {
         match self.kind {
-            AttrKind::Normal(_) => false,
+            AttrKind::Normal(..) => false,
             AttrKind::DocComment(..) => true,
         }
     }
@@ -263,7 +254,7 @@
     pub fn doc_str(&self) -> Option<Symbol> {
         match self.kind {
             AttrKind::DocComment(.., data) => Some(data),
-            AttrKind::Normal(ref item) if item.path == sym::doc => {
+            AttrKind::Normal(ref item, _) if item.path == sym::doc => {
                 item.meta(self.span).and_then(|meta| meta.value_str())
             }
             _ => None,
@@ -272,14 +263,14 @@
 
     pub fn get_normal_item(&self) -> &AttrItem {
         match self.kind {
-            AttrKind::Normal(ref item) => item,
+            AttrKind::Normal(ref item, _) => item,
             AttrKind::DocComment(..) => panic!("unexpected doc comment"),
         }
     }
 
     pub fn unwrap_normal_item(self) -> AttrItem {
         match self.kind {
-            AttrKind::Normal(item) => item,
+            AttrKind::Normal(item, _) => item,
             AttrKind::DocComment(..) => panic!("unexpected doc comment"),
         }
     }
@@ -287,10 +278,22 @@
     /// Extracts the MetaItem from inside this Attribute.
     pub fn meta(&self) -> Option<MetaItem> {
         match self.kind {
-            AttrKind::Normal(ref item) => item.meta(self.span),
+            AttrKind::Normal(ref item, _) => item.meta(self.span),
             AttrKind::DocComment(..) => None,
         }
     }
+
+    pub fn tokens(&self) -> TokenStream {
+        match self.kind {
+            AttrKind::Normal(_, ref tokens) => tokens
+                .as_ref()
+                .unwrap_or_else(|| panic!("attribute is missing tokens: {:?}", self))
+                .create_token_stream(),
+            AttrKind::DocComment(comment_kind, data) => TokenStream::from(TokenTree::Token(
+                Token::new(token::DocComment(comment_kind, self.style, data), self.span),
+            )),
+        }
+    }
 }
 
 /* Constructors */
@@ -330,11 +333,16 @@
 }
 
 pub fn mk_attr(style: AttrStyle, path: Path, args: MacArgs, span: Span) -> Attribute {
-    mk_attr_from_item(style, AttrItem { path, args, tokens: None }, span)
+    mk_attr_from_item(AttrItem { path, args, tokens: None }, None, style, span)
 }
 
-pub fn mk_attr_from_item(style: AttrStyle, item: AttrItem, span: Span) -> Attribute {
-    Attribute { kind: AttrKind::Normal(item), id: mk_attr_id(), style, span }
+pub fn mk_attr_from_item(
+    item: AttrItem,
+    tokens: Option<LazyTokenStream>,
+    style: AttrStyle,
+    span: Span,
+) -> Attribute {
+    Attribute { kind: AttrKind::Normal(item, tokens), id: mk_attr_id(), style, span }
 }
 
 /// Returns an inner attribute with the given value and span.
@@ -363,7 +371,7 @@
 impl MetaItem {
     fn token_trees_and_spacings(&self) -> Vec<TreeAndSpacing> {
         let mut idents = vec![];
-        let mut last_pos = BytePos(0 as u32);
+        let mut last_pos = BytePos(0_u32);
         for (i, segment) in self.path.segments.iter().enumerate() {
             let is_first = i == 0;
             if !is_first {
@@ -632,7 +640,8 @@
         match *self {
             StmtKind::Local(ref local) => local.attrs(),
             StmtKind::Expr(ref expr) | StmtKind::Semi(ref expr) => expr.attrs(),
-            StmtKind::Empty | StmtKind::Item(..) => &[],
+            StmtKind::Item(ref item) => item.attrs(),
+            StmtKind::Empty => &[],
             StmtKind::MacCall(ref mac) => mac.attrs.attrs(),
         }
     }
@@ -641,7 +650,8 @@
         match self {
             StmtKind::Local(local) => local.visit_attrs(f),
             StmtKind::Expr(expr) | StmtKind::Semi(expr) => expr.visit_attrs(f),
-            StmtKind::Empty | StmtKind::Item(..) => {}
+            StmtKind::Item(item) => item.visit_attrs(f),
+            StmtKind::Empty => {}
             StmtKind::MacCall(mac) => {
                 mac.attrs.visit_attrs(f);
             }
diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs
index 425ef83..2609798 100644
--- a/compiler/rustc_ast/src/mut_visit.rs
+++ b/compiler/rustc_ast/src/mut_visit.rs
@@ -34,6 +34,13 @@
 }
 
 pub trait MutVisitor: Sized {
+    /// Mutable token visiting only exists for the `macro_rules` token marker and should not be
+    /// used otherwise. Token visitor would be entirely separate from the regular visitor if
+    /// the marker didn't have to visit AST fragments in nonterminal tokens.
+    fn token_visiting_enabled(&self) -> bool {
+        false
+    }
+
     // Methods in this trait have one of three forms:
     //
     //   fn visit_t(&mut self, t: &mut T);                      // common
@@ -203,11 +210,8 @@
         noop_visit_local(l, self);
     }
 
-    fn visit_mac(&mut self, _mac: &mut MacCall) {
-        panic!("visit_mac disabled by default");
-        // N.B., see note about macros above. If you really want a visitor that
-        // works on macros, use this definition in your trait impl:
-        //   mut_visit::noop_visit_mac(_mac, self);
+    fn visit_mac_call(&mut self, mac: &mut MacCall) {
+        noop_visit_mac(mac, self);
     }
 
     fn visit_macro_def(&mut self, def: &mut MacroDef) {
@@ -246,22 +250,6 @@
         noop_flat_map_generic_param(param, self)
     }
 
-    fn visit_tt(&mut self, tt: &mut TokenTree) {
-        noop_visit_tt(tt, self);
-    }
-
-    fn visit_tts(&mut self, tts: &mut TokenStream) {
-        noop_visit_tts(tts, self);
-    }
-
-    fn visit_token(&mut self, t: &mut Token) {
-        noop_visit_token(t, self);
-    }
-
-    fn visit_interpolated(&mut self, nt: &mut token::Nonterminal) {
-        noop_visit_interpolated(nt, self);
-    }
-
     fn visit_param_bound(&mut self, tpb: &mut GenericBound) {
         noop_visit_param_bound(tpb, self);
     }
@@ -375,11 +363,30 @@
         MacArgs::Empty => {}
         MacArgs::Delimited(dspan, _delim, tokens) => {
             visit_delim_span(dspan, vis);
-            vis.visit_tts(tokens);
+            visit_tts(tokens, vis);
         }
         MacArgs::Eq(eq_span, tokens) => {
             vis.visit_span(eq_span);
-            vis.visit_tts(tokens);
+            visit_tts(tokens, vis);
+            // The value in `#[key = VALUE]` must be visited as an expression for backward
+            // compatibility, so that macros can be expanded in that position.
+            if !vis.token_visiting_enabled() {
+                if let Some(TokenTree::Token(token)) = tokens.trees_ref().next() {
+                    if let token::Interpolated(..) = token.kind {
+                        // ^^ Do not `make_mut` unless we have to.
+                        match Lrc::make_mut(&mut tokens.0).get_mut(0) {
+                            Some((TokenTree::Token(token), _spacing)) => match &mut token.kind {
+                                token::Interpolated(nt) => match Lrc::make_mut(nt) {
+                                    token::NtExpr(expr) => vis.visit_expr(expr),
+                                    t => panic!("unexpected token in key-value attribute: {:?}", t),
+                                },
+                                t => panic!("unexpected token in key-value attribute: {:?}", t),
+                            },
+                            t => panic!("unexpected token in key-value attribute: {:?}", t),
+                        }
+                    }
+                }
+            }
         }
     }
 }
@@ -451,7 +458,7 @@
 }
 
 pub fn noop_visit_ty<T: MutVisitor>(ty: &mut P<Ty>, vis: &mut T) {
-    let Ty { id, kind, span, tokens: _ } = ty.deref_mut();
+    let Ty { id, kind, span, tokens } = ty.deref_mut();
     vis.visit_id(id);
     match kind {
         TyKind::Infer | TyKind::ImplicitSelf | TyKind::Err | TyKind::Never | TyKind::CVarArgs => {}
@@ -484,9 +491,10 @@
             vis.visit_id(id);
             visit_vec(bounds, |bound| vis.visit_param_bound(bound));
         }
-        TyKind::MacCall(mac) => vis.visit_mac(mac),
+        TyKind::MacCall(mac) => vis.visit_mac_call(mac),
     }
     vis.visit_span(span);
+    visit_lazy_tts(tokens, vis);
 }
 
 pub fn noop_visit_foreign_mod<T: MutVisitor>(foreign_mod: &mut ForeignMod, vis: &mut T) {
@@ -513,13 +521,14 @@
     vis.visit_span(span);
 }
 
-pub fn noop_visit_path<T: MutVisitor>(Path { segments, span, tokens: _ }: &mut Path, vis: &mut T) {
+pub fn noop_visit_path<T: MutVisitor>(Path { segments, span, tokens }: &mut Path, vis: &mut T) {
     vis.visit_span(span);
     for PathSegment { ident, id, args } in segments {
         vis.visit_ident(ident);
         vis.visit_id(id);
         visit_opt(args, |args| vis.visit_generic_args(args));
     }
+    visit_lazy_tts(tokens, vis);
 }
 
 pub fn noop_visit_qself<T: MutVisitor>(qself: &mut Option<QSelf>, vis: &mut T) {
@@ -579,9 +588,11 @@
 pub fn noop_visit_attribute<T: MutVisitor>(attr: &mut Attribute, vis: &mut T) {
     let Attribute { kind, id: _, style: _, span } = attr;
     match kind {
-        AttrKind::Normal(AttrItem { path, args, tokens: _ }) => {
+        AttrKind::Normal(AttrItem { path, args, tokens }, attr_tokens) => {
             vis.visit_path(path);
             visit_mac_args(args, vis);
+            visit_lazy_tts(tokens, vis);
+            visit_lazy_tts(attr_tokens, vis);
         }
         AttrKind::DocComment(..) => {}
     }
@@ -626,28 +637,43 @@
     smallvec![param]
 }
 
-pub fn noop_visit_tt<T: MutVisitor>(tt: &mut TokenTree, vis: &mut T) {
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_tt<T: MutVisitor>(tt: &mut TokenTree, vis: &mut T) {
     match tt {
         TokenTree::Token(token) => {
-            vis.visit_token(token);
+            visit_token(token, vis);
         }
         TokenTree::Delimited(DelimSpan { open, close }, _delim, tts) => {
             vis.visit_span(open);
             vis.visit_span(close);
-            vis.visit_tts(tts);
+            visit_tts(tts, vis);
         }
     }
 }
 
-pub fn noop_visit_tts<T: MutVisitor>(TokenStream(tts): &mut TokenStream, vis: &mut T) {
-    let tts = Lrc::make_mut(tts);
-    visit_vec(tts, |(tree, _is_joint)| vis.visit_tt(tree));
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_tts<T: MutVisitor>(TokenStream(tts): &mut TokenStream, vis: &mut T) {
+    if vis.token_visiting_enabled() && !tts.is_empty() {
+        let tts = Lrc::make_mut(tts);
+        visit_vec(tts, |(tree, _is_joint)| visit_tt(tree, vis));
+    }
 }
 
+pub fn visit_lazy_tts<T: MutVisitor>(lazy_tts: &mut Option<LazyTokenStream>, vis: &mut T) {
+    if vis.token_visiting_enabled() {
+        visit_opt(lazy_tts, |lazy_tts| {
+            let mut tts = lazy_tts.create_token_stream();
+            visit_tts(&mut tts, vis);
+            *lazy_tts = LazyTokenStream::new(tts);
+        })
+    }
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
 // Applies ident visitor if it's an ident; applies other visits to interpolated nodes.
 // In practice the ident part is not actually used by specific visitors right now,
 // but there's a test below checking that it works.
-pub fn noop_visit_token<T: MutVisitor>(t: &mut Token, vis: &mut T) {
+pub fn visit_token<T: MutVisitor>(t: &mut Token, vis: &mut T) {
     let Token { kind, span } = t;
     match kind {
         token::Ident(name, _) | token::Lifetime(name) => {
@@ -659,13 +685,14 @@
         }
         token::Interpolated(nt) => {
             let mut nt = Lrc::make_mut(nt);
-            vis.visit_interpolated(&mut nt);
+            visit_interpolated(&mut nt, vis);
         }
         _ => {}
     }
     vis.visit_span(span);
 }
 
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
 /// Applies the visitor to elements of interpolated nodes.
 //
 // N.B., this can occur only when applying a visitor to partially expanded
@@ -689,7 +716,7 @@
 // contain multiple items, but decided against it when I looked at
 // `parse_item_or_view_item` and tried to figure out what I would do with
 // multiple items there....
-pub fn noop_visit_interpolated<T: MutVisitor>(nt: &mut token::Nonterminal, vis: &mut T) {
+pub fn visit_interpolated<T: MutVisitor>(nt: &mut token::Nonterminal, vis: &mut T) {
     match nt {
         token::NtItem(item) => visit_clobber(item, |item| {
             // This is probably okay, because the only visitors likely to
@@ -709,12 +736,13 @@
         token::NtLifetime(ident) => vis.visit_ident(ident),
         token::NtLiteral(expr) => vis.visit_expr(expr),
         token::NtMeta(item) => {
-            let AttrItem { path, args, tokens: _ } = item.deref_mut();
+            let AttrItem { path, args, tokens } = item.deref_mut();
             vis.visit_path(path);
             visit_mac_args(args, vis);
+            visit_lazy_tts(tokens, vis);
         }
         token::NtPath(path) => vis.visit_path(path),
-        token::NtTT(tt) => vis.visit_tt(tt),
+        token::NtTT(tt) => visit_tt(tt, vis),
         token::NtVis(visib) => vis.visit_vis(visib),
     }
 }
@@ -871,10 +899,11 @@
 }
 
 pub fn noop_visit_block<T: MutVisitor>(block: &mut P<Block>, vis: &mut T) {
-    let Block { id, stmts, rules: _, span, tokens: _ } = block.deref_mut();
+    let Block { id, stmts, rules: _, span, tokens } = block.deref_mut();
     vis.visit_id(id);
     stmts.flat_map_in_place(|stmt| vis.flat_map_stmt(stmt));
     vis.visit_span(span);
+    visit_lazy_tts(tokens, vis);
 }
 
 pub fn noop_visit_item_kind<T: MutVisitor>(kind: &mut ItemKind, vis: &mut T) {
@@ -930,7 +959,7 @@
             vis.visit_generics(generics);
             visit_bounds(bounds, vis);
         }
-        ItemKind::MacCall(m) => vis.visit_mac(m),
+        ItemKind::MacCall(m) => vis.visit_mac_call(m),
         ItemKind::MacroDef(def) => vis.visit_macro_def(def),
     }
 }
@@ -939,7 +968,7 @@
     mut item: P<AssocItem>,
     visitor: &mut T,
 ) -> SmallVec<[P<AssocItem>; 1]> {
-    let Item { id, ident, vis, attrs, kind, span, tokens: _ } = item.deref_mut();
+    let Item { id, ident, vis, attrs, kind, span, tokens } = item.deref_mut();
     visitor.visit_id(id);
     visitor.visit_ident(ident);
     visitor.visit_vis(vis);
@@ -959,9 +988,10 @@
             visit_bounds(bounds, visitor);
             visit_opt(ty, |ty| visitor.visit_ty(ty));
         }
-        AssocItemKind::MacCall(mac) => visitor.visit_mac(mac),
+        AssocItemKind::MacCall(mac) => visitor.visit_mac_call(mac),
     }
     visitor.visit_span(span);
+    visit_lazy_tts(tokens, visitor);
     smallvec![item]
 }
 
@@ -1012,16 +1042,14 @@
     mut item: P<Item>,
     visitor: &mut T,
 ) -> SmallVec<[P<Item>; 1]> {
-    let Item { ident, attrs, id, kind, vis, span, tokens: _ } = item.deref_mut();
+    let Item { ident, attrs, id, kind, vis, span, tokens } = item.deref_mut();
     visitor.visit_ident(ident);
     visit_attrs(attrs, visitor);
     visitor.visit_id(id);
     visitor.visit_item_kind(kind);
     visitor.visit_vis(vis);
     visitor.visit_span(span);
-
-    // FIXME: if `tokens` is modified with a call to `vis.visit_tts` it causes
-    //        an ICE during resolve... odd!
+    visit_lazy_tts(tokens, visitor);
 
     smallvec![item]
 }
@@ -1030,7 +1058,7 @@
     mut item: P<ForeignItem>,
     visitor: &mut T,
 ) -> SmallVec<[P<ForeignItem>; 1]> {
-    let Item { ident, attrs, id, kind, vis, span, tokens: _ } = item.deref_mut();
+    let Item { ident, attrs, id, kind, vis, span, tokens } = item.deref_mut();
     visitor.visit_id(id);
     visitor.visit_ident(ident);
     visitor.visit_vis(vis);
@@ -1050,14 +1078,15 @@
             visit_bounds(bounds, visitor);
             visit_opt(ty, |ty| visitor.visit_ty(ty));
         }
-        ForeignItemKind::MacCall(mac) => visitor.visit_mac(mac),
+        ForeignItemKind::MacCall(mac) => visitor.visit_mac_call(mac),
     }
     visitor.visit_span(span);
+    visit_lazy_tts(tokens, visitor);
     smallvec![item]
 }
 
 pub fn noop_visit_pat<T: MutVisitor>(pat: &mut P<Pat>, vis: &mut T) {
-    let Pat { id, kind, span, tokens: _ } = pat.deref_mut();
+    let Pat { id, kind, span, tokens } = pat.deref_mut();
     vis.visit_id(id);
     match kind {
         PatKind::Wild | PatKind::Rest => {}
@@ -1089,9 +1118,10 @@
             visit_vec(elems, |elem| vis.visit_pat(elem))
         }
         PatKind::Paren(inner) => vis.visit_pat(inner),
-        PatKind::MacCall(mac) => vis.visit_mac(mac),
+        PatKind::MacCall(mac) => vis.visit_mac_call(mac),
     }
     vis.visit_span(span);
+    visit_lazy_tts(tokens, vis);
 }
 
 pub fn noop_visit_anon_const<T: MutVisitor>(AnonConst { id, value }: &mut AnonConst, vis: &mut T) {
@@ -1100,12 +1130,15 @@
 }
 
 pub fn noop_visit_expr<T: MutVisitor>(
-    Expr { kind, id, span, attrs, tokens: _ }: &mut Expr,
+    Expr { kind, id, span, attrs, tokens }: &mut Expr,
     vis: &mut T,
 ) {
     match kind {
         ExprKind::Box(expr) => vis.visit_expr(expr),
         ExprKind::Array(exprs) => visit_exprs(exprs, vis),
+        ExprKind::ConstBlock(anon_const) => {
+            vis.visit_anon_const(anon_const);
+        }
         ExprKind::Repeat(expr, count) => {
             vis.visit_expr(expr);
             vis.visit_anon_const(count);
@@ -1251,11 +1284,15 @@
             }
             visit_vec(inputs, |(_c, expr)| vis.visit_expr(expr));
         }
-        ExprKind::MacCall(mac) => vis.visit_mac(mac),
+        ExprKind::MacCall(mac) => vis.visit_mac_call(mac),
         ExprKind::Struct(path, fields, expr) => {
             vis.visit_path(path);
             fields.flat_map_in_place(|field| vis.flat_map_field(field));
-            visit_opt(expr, |expr| vis.visit_expr(expr));
+            match expr {
+                StructRest::Base(expr) => vis.visit_expr(expr),
+                StructRest::Rest(_span) => {}
+                StructRest::None => {}
+            }
         }
         ExprKind::Paren(expr) => {
             vis.visit_expr(expr);
@@ -1276,6 +1313,7 @@
     vis.visit_id(id);
     vis.visit_span(span);
     visit_thin_attrs(attrs, vis);
+    visit_lazy_tts(tokens, vis);
 }
 
 pub fn noop_filter_map_expr<T: MutVisitor>(mut e: P<Expr>, vis: &mut T) -> Option<P<Expr>> {
@@ -1286,11 +1324,12 @@
 }
 
 pub fn noop_flat_map_stmt<T: MutVisitor>(
-    Stmt { kind, mut span, mut id, tokens }: Stmt,
+    Stmt { kind, mut span, mut id, mut tokens }: Stmt,
     vis: &mut T,
 ) -> SmallVec<[Stmt; 1]> {
     vis.visit_id(&mut id);
     vis.visit_span(&mut span);
+    visit_lazy_tts(&mut tokens, vis);
     noop_flat_map_stmt_kind(kind, vis)
         .into_iter()
         .map(|kind| Stmt { id, kind, span, tokens: tokens.clone() })
@@ -1312,7 +1351,7 @@
         StmtKind::Empty => smallvec![StmtKind::Empty],
         StmtKind::MacCall(mut mac) => {
             let MacCallStmt { mac: mac_, style: _, attrs } = mac.deref_mut();
-            vis.visit_mac(mac_);
+            vis.visit_mac_call(mac_);
             visit_thin_attrs(attrs, vis);
             smallvec![StmtKind::MacCall(mac)]
         }
diff --git a/compiler/rustc_ast/src/node_id.rs b/compiler/rustc_ast/src/node_id.rs
index 1035e94..6e7d2ba 100644
--- a/compiler/rustc_ast/src/node_id.rs
+++ b/compiler/rustc_ast/src/node_id.rs
@@ -13,8 +13,8 @@
 pub const CRATE_NODE_ID: NodeId = NodeId::from_u32(0);
 
 /// When parsing and doing expansions, we initially give all AST nodes this AST
-/// node value. Then later, in the renumber pass, we renumber them to have
-/// small, positive ids.
+/// node value. Then later, during expansion, we renumber them to have small,
+/// positive ids.
 pub const DUMMY_NODE_ID: NodeId = NodeId::MAX;
 
 impl NodeId {
diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs
index d5b3e87..2bba7e6 100644
--- a/compiler/rustc_ast/src/token.rs
+++ b/compiler/rustc_ast/src/token.rs
@@ -54,16 +54,6 @@
     NoDelim,
 }
 
-impl DelimToken {
-    pub fn len(self) -> usize {
-        if self == NoDelim { 0 } else { 1 }
-    }
-
-    pub fn is_empty(self) -> bool {
-        self == NoDelim
-    }
-}
-
 #[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
 pub enum LitKind {
     Bool, // AST only, must never appear in a `Token`
@@ -163,6 +153,7 @@
             kw::Do,
             kw::Box,
             kw::Break,
+            kw::Const,
             kw::Continue,
             kw::False,
             kw::For,
@@ -312,6 +303,13 @@
             _ => None,
         }
     }
+
+    pub fn should_end_const_arg(&self) -> bool {
+        match self {
+            Gt | Ge | BinOp(Shr) | BinOpEq(Shr) => true,
+            _ => false,
+        }
+    }
 }
 
 impl Token {
@@ -810,10 +808,10 @@
             if let ExpnKind::Macro(_, macro_name) = orig_span.ctxt().outer_expn_data().kind {
                 let filename = source_map.span_to_filename(orig_span);
                 if let FileName::Real(RealFileName::Named(path)) = filename {
-                    let matches_prefix = |prefix| {
-                        // Check for a path that ends with 'prefix*/src/lib.rs'
+                    let matches_prefix = |prefix, filename| {
+                        // Check for a path that ends with 'prefix*/src/<filename>'
                         let mut iter = path.components().rev();
-                        iter.next().and_then(|p| p.as_os_str().to_str()) == Some("lib.rs")
+                        iter.next().and_then(|p| p.as_os_str().to_str()) == Some(filename)
                             && iter.next().and_then(|p| p.as_os_str().to_str()) == Some("src")
                             && iter
                                 .next()
@@ -821,14 +819,25 @@
                                 .map_or(false, |p| p.starts_with(prefix))
                     };
 
-                    if (macro_name == sym::impl_macros && matches_prefix("time-macros-impl"))
-                        || (macro_name == sym::arrays && matches_prefix("js-sys"))
+                    if (macro_name == sym::impl_macros
+                        && matches_prefix("time-macros-impl", "lib.rs"))
+                        || (macro_name == sym::arrays && matches_prefix("js-sys", "lib.rs"))
                     {
                         let snippet = source_map.span_to_snippet(orig_span);
                         if snippet.as_deref() == Ok("$name") {
                             return Some((*ident, *is_raw));
                         }
                     }
+
+                    if macro_name == sym::tuple_from_req
+                        && (matches_prefix("actix-web", "extract.rs")
+                            || matches_prefix("actori-web", "extract.rs"))
+                    {
+                        let snippet = source_map.span_to_snippet(orig_span);
+                        if snippet.as_deref() == Ok("$T") {
+                            return Some((*ident, *is_raw));
+                        }
+                    }
                 }
             }
         }
diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs
index f201f0b..1e7001c 100644
--- a/compiler/rustc_ast/src/tokenstream.rs
+++ b/compiler/rustc_ast/src/tokenstream.rs
@@ -16,12 +16,13 @@
 use crate::token::{self, DelimToken, Token, TokenKind};
 
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::sync::{self, Lrc};
 use rustc_macros::HashStable_Generic;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
 use rustc_span::{Span, DUMMY_SP};
 use smallvec::{smallvec, SmallVec};
 
-use std::{iter, mem};
+use std::{fmt, iter, mem};
 
 /// When the main rust parser encounters a syntax-extension invocation, it
 /// parses the arguments to the invocation as a token-tree. This is a very
@@ -119,13 +120,64 @@
     }
 }
 
+pub trait CreateTokenStream: sync::Send + sync::Sync {
+    fn create_token_stream(&self) -> TokenStream;
+}
+
+impl CreateTokenStream for TokenStream {
+    fn create_token_stream(&self) -> TokenStream {
+        self.clone()
+    }
+}
+
+/// A lazy version of `TokenStream`, which defers creation
+/// of an actual `TokenStream` until it is needed.
+/// `Box` is here only to reduce the structure size.
+#[derive(Clone)]
+pub struct LazyTokenStream(Lrc<Box<dyn CreateTokenStream>>);
+
+impl LazyTokenStream {
+    pub fn new(inner: impl CreateTokenStream + 'static) -> LazyTokenStream {
+        LazyTokenStream(Lrc::new(Box::new(inner)))
+    }
+
+    pub fn create_token_stream(&self) -> TokenStream {
+        self.0.create_token_stream()
+    }
+}
+
+impl fmt::Debug for LazyTokenStream {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt("LazyTokenStream", f)
+    }
+}
+
+impl<S: Encoder> Encodable<S> for LazyTokenStream {
+    fn encode(&self, s: &mut S) -> Result<(), S::Error> {
+        // Used by AST json printing.
+        Encodable::encode(&self.create_token_stream(), s)
+    }
+}
+
+impl<D: Decoder> Decodable<D> for LazyTokenStream {
+    fn decode(_d: &mut D) -> Result<Self, D::Error> {
+        panic!("Attempted to decode LazyTokenStream");
+    }
+}
+
+impl<CTX> HashStable<CTX> for LazyTokenStream {
+    fn hash_stable(&self, _hcx: &mut CTX, _hasher: &mut StableHasher) {
+        panic!("Attempted to compute stable hash for LazyTokenStream");
+    }
+}
+
 /// A `TokenStream` is an abstract sequence of tokens, organized into `TokenTree`s.
 ///
 /// The goal is for procedural macros to work with `TokenStream`s and `TokenTree`s
 /// instead of a representation of the abstract syntax tree.
 /// Today's `TokenTree`s can still contain AST via `token::Interpolated` for back-compat.
 #[derive(Clone, Debug, Default, Encodable, Decodable)]
-pub struct TokenStream(pub Lrc<Vec<TreeAndSpacing>>);
+pub struct TokenStream(pub(crate) Lrc<Vec<TreeAndSpacing>>);
 
 pub type TreeAndSpacing = (TokenTree, Spacing);
 
@@ -266,6 +318,10 @@
         }
     }
 
+    pub fn trees_ref(&self) -> CursorRef<'_> {
+        CursorRef::new(self)
+    }
+
     pub fn trees(&self) -> Cursor {
         self.clone().into_trees()
     }
@@ -286,21 +342,15 @@
         t1.next().is_none() && t2.next().is_none()
     }
 
-    pub fn map_enumerated<F: FnMut(usize, TokenTree) -> TokenTree>(self, mut f: F) -> TokenStream {
+    pub fn map_enumerated<F: FnMut(usize, &TokenTree) -> TokenTree>(self, mut f: F) -> TokenStream {
         TokenStream(Lrc::new(
             self.0
                 .iter()
                 .enumerate()
-                .map(|(i, (tree, is_joint))| (f(i, tree.clone()), *is_joint))
+                .map(|(i, (tree, is_joint))| (f(i, tree), *is_joint))
                 .collect(),
         ))
     }
-
-    pub fn map<F: FnMut(TokenTree) -> TokenTree>(self, mut f: F) -> TokenStream {
-        TokenStream(Lrc::new(
-            self.0.iter().map(|(tree, is_joint)| (f(tree.clone()), *is_joint)).collect(),
-        ))
-    }
 }
 
 // 99.5%+ of the time we have 1 or 2 elements in this vector.
@@ -362,6 +412,36 @@
     }
 }
 
+/// By-reference iterator over a `TokenStream`.
+#[derive(Clone)]
+pub struct CursorRef<'t> {
+    stream: &'t TokenStream,
+    index: usize,
+}
+
+impl<'t> CursorRef<'t> {
+    fn new(stream: &TokenStream) -> CursorRef<'_> {
+        CursorRef { stream, index: 0 }
+    }
+
+    fn next_with_spacing(&mut self) -> Option<&'t TreeAndSpacing> {
+        self.stream.0.get(self.index).map(|tree| {
+            self.index += 1;
+            tree
+        })
+    }
+}
+
+impl<'t> Iterator for CursorRef<'t> {
+    type Item = &'t TokenTree;
+
+    fn next(&mut self) -> Option<&'t TokenTree> {
+        self.next_with_spacing().map(|(tree, _)| tree)
+    }
+}
+
+/// Owning by-value iterator over a `TokenStream`.
+/// FIXME: Many uses of this can be replaced with by-reference iterator to avoid clones.
 #[derive(Clone)]
 pub struct Cursor {
     pub stream: TokenStream,
@@ -400,8 +480,8 @@
         self.index = index;
     }
 
-    pub fn look_ahead(&self, n: usize) -> Option<TokenTree> {
-        self.stream.0[self.index..].get(n).map(|(tree, _)| tree.clone())
+    pub fn look_ahead(&self, n: usize) -> Option<&TokenTree> {
+        self.stream.0[self.index..].get(n).map(|(tree, _)| tree)
     }
 }
 
diff --git a/compiler/rustc_ast/src/util/lev_distance.rs b/compiler/rustc_ast/src/util/lev_distance.rs
index 754b1f1..21c2c92 100644
--- a/compiler/rustc_ast/src/util/lev_distance.rs
+++ b/compiler/rustc_ast/src/util/lev_distance.rs
@@ -54,7 +54,7 @@
     T: Iterator<Item = &'a Symbol>,
 {
     let lookup = &lookup.as_str();
-    let max_dist = dist.map_or_else(|| cmp::max(lookup.len(), 3) / 3, |d| d);
+    let max_dist = dist.unwrap_or_else(|| cmp::max(lookup.len(), 3) / 3);
     let name_vec: Vec<&Symbol> = iter_names.collect();
 
     let (case_insensitive_match, levenshtein_match) = name_vec
diff --git a/compiler/rustc_ast/src/util/literal.rs b/compiler/rustc_ast/src/util/literal.rs
index 597e5b4..f6f1ad0 100644
--- a/compiler/rustc_ast/src/util/literal.rs
+++ b/compiler/rustc_ast/src/util/literal.rs
@@ -4,7 +4,6 @@
 use crate::token::{self, Token};
 use crate::tokenstream::TokenTree;
 
-use rustc_data_structures::sync::Lrc;
 use rustc_lexer::unescape::{unescape_byte, unescape_char};
 use rustc_lexer::unescape::{unescape_byte_literal, unescape_literal, Mode};
 use rustc_span::symbol::{kw, sym, Symbol};
@@ -108,7 +107,7 @@
                 });
                 error?;
                 buf.shrink_to_fit();
-                LitKind::ByteStr(Lrc::new(buf))
+                LitKind::ByteStr(buf.into())
             }
             token::ByteStrRaw(_) => {
                 let s = symbol.as_str();
@@ -128,7 +127,7 @@
                     symbol.to_string().into_bytes()
                 };
 
-                LitKind::ByteStr(Lrc::new(bytes))
+                LitKind::ByteStr(bytes.into())
             }
             token::Err => LitKind::Err(symbol),
         })
diff --git a/compiler/rustc_ast/src/util/parser.rs b/compiler/rustc_ast/src/util/parser.rs
index 2ee9496..078dd4b 100644
--- a/compiler/rustc_ast/src/util/parser.rs
+++ b/compiler/rustc_ast/src/util/parser.rs
@@ -231,7 +231,6 @@
     }
 }
 
-pub const PREC_RESET: i8 = -100;
 pub const PREC_CLOSURE: i8 = -40;
 pub const PREC_JUMP: i8 = -30;
 pub const PREC_RANGE: i8 = -10;
@@ -283,6 +282,7 @@
     ForLoop,
     Loop,
     Match,
+    ConstBlock,
     Block,
     TryBlock,
     Struct,
@@ -347,6 +347,7 @@
             ExprPrecedence::ForLoop |
             ExprPrecedence::Loop |
             ExprPrecedence::Match |
+            ExprPrecedence::ConstBlock |
             ExprPrecedence::Block |
             ExprPrecedence::TryBlock |
             ExprPrecedence::Async |
diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs
index 86fd87f..49b521a 100644
--- a/compiler/rustc_ast/src/visit.rs
+++ b/compiler/rustc_ast/src/visit.rs
@@ -14,8 +14,8 @@
 //! those that are created by the expansion of a macro.
 
 use crate::ast::*;
-use crate::token::Token;
-use crate::tokenstream::{TokenStream, TokenTree};
+use crate::token;
+use crate::tokenstream::TokenTree;
 
 use rustc_span::symbol::{Ident, Symbol};
 use rustc_span::Span;
@@ -176,13 +176,8 @@
     fn visit_lifetime(&mut self, lifetime: &'ast Lifetime) {
         walk_lifetime(self, lifetime)
     }
-    fn visit_mac(&mut self, _mac: &'ast MacCall) {
-        panic!("visit_mac disabled by default");
-        // N.B., see note about macros above.
-        // if you really want a visitor that
-        // works on macros, use this
-        // definition in your trait impl:
-        // visit::walk_mac(self, _mac)
+    fn visit_mac_call(&mut self, mac: &'ast MacCall) {
+        walk_mac(self, mac)
     }
     fn visit_mac_def(&mut self, _mac: &'ast MacroDef, _id: NodeId) {
         // Nothing to do
@@ -200,11 +195,7 @@
         walk_generic_args(self, path_span, generic_args)
     }
     fn visit_generic_arg(&mut self, generic_arg: &'ast GenericArg) {
-        match generic_arg {
-            GenericArg::Lifetime(lt) => self.visit_lifetime(lt),
-            GenericArg::Type(ty) => self.visit_ty(ty),
-            GenericArg::Const(ct) => self.visit_anon_const(ct),
-        }
+        walk_generic_arg(self, generic_arg)
     }
     fn visit_assoc_ty_constraint(&mut self, constraint: &'ast AssocTyConstraint) {
         walk_assoc_ty_constraint(self, constraint)
@@ -212,14 +203,6 @@
     fn visit_attribute(&mut self, attr: &'ast Attribute) {
         walk_attribute(self, attr)
     }
-    fn visit_tt(&mut self, tt: TokenTree) {
-        walk_tt(self, tt)
-    }
-    fn visit_tts(&mut self, tts: TokenStream) {
-        walk_tts(self, tts)
-    }
-    fn visit_token(&mut self, _t: Token) {}
-    // FIXME: add `visit_interpolated` and `walk_interpolated`
     fn visit_vis(&mut self, vis: &'ast Visibility) {
         walk_vis(self, vis)
     }
@@ -358,7 +341,7 @@
             visitor.visit_generics(generics);
             walk_list!(visitor, visit_param_bound, bounds);
         }
-        ItemKind::MacCall(ref mac) => visitor.visit_mac(mac),
+        ItemKind::MacCall(ref mac) => visitor.visit_mac_call(mac),
         ItemKind::MacroDef(ref ts) => visitor.visit_mac_def(ts, item.id),
     }
     walk_list!(visitor, visit_attribute, &item.attrs);
@@ -426,7 +409,7 @@
         }
         TyKind::Typeof(ref expression) => visitor.visit_anon_const(expression),
         TyKind::Infer | TyKind::ImplicitSelf | TyKind::Err => {}
-        TyKind::MacCall(ref mac) => visitor.visit_mac(mac),
+        TyKind::MacCall(ref mac) => visitor.visit_mac_call(mac),
         TyKind::Never | TyKind::CVarArgs => {}
     }
 }
@@ -486,6 +469,17 @@
     }
 }
 
+pub fn walk_generic_arg<'a, V>(visitor: &mut V, generic_arg: &'a GenericArg)
+where
+    V: Visitor<'a>,
+{
+    match generic_arg {
+        GenericArg::Lifetime(lt) => visitor.visit_lifetime(lt),
+        GenericArg::Type(ty) => visitor.visit_ty(ty),
+        GenericArg::Const(ct) => visitor.visit_anon_const(ct),
+    }
+}
+
 pub fn walk_assoc_ty_constraint<'a, V: Visitor<'a>>(
     visitor: &mut V,
     constraint: &'a AssocTyConstraint,
@@ -533,7 +527,7 @@
         PatKind::Tuple(ref elems) | PatKind::Slice(ref elems) | PatKind::Or(ref elems) => {
             walk_list!(visitor, visit_pat, elems);
         }
-        PatKind::MacCall(ref mac) => visitor.visit_mac(mac),
+        PatKind::MacCall(ref mac) => visitor.visit_mac_call(mac),
     }
 }
 
@@ -558,7 +552,7 @@
             walk_list!(visitor, visit_ty, ty);
         }
         ForeignItemKind::MacCall(mac) => {
-            visitor.visit_mac(mac);
+            visitor.visit_mac_call(mac);
         }
     }
 }
@@ -663,7 +657,7 @@
             walk_list!(visitor, visit_ty, ty);
         }
         AssocItemKind::MacCall(mac) => {
-            visitor.visit_mac(mac);
+            visitor.visit_mac_call(mac);
         }
     }
 }
@@ -693,7 +687,7 @@
         StmtKind::Empty => {}
         StmtKind::MacCall(ref mac) => {
             let MacCallStmt { ref mac, style: _, ref attrs } = **mac;
-            visitor.visit_mac(mac);
+            visitor.visit_mac_call(mac);
             for attr in attrs.iter() {
                 visitor.visit_attribute(attr);
             }
@@ -717,6 +711,7 @@
         ExprKind::Array(ref subexpressions) => {
             walk_list!(visitor, visit_expr, subexpressions);
         }
+        ExprKind::ConstBlock(ref anon_const) => visitor.visit_anon_const(anon_const),
         ExprKind::Repeat(ref element, ref count) => {
             visitor.visit_expr(element);
             visitor.visit_anon_const(count)
@@ -724,7 +719,11 @@
         ExprKind::Struct(ref path, ref fields, ref optional_base) => {
             visitor.visit_path(path, expression.id);
             walk_list!(visitor, visit_field, fields);
-            walk_list!(visitor, visit_expr, optional_base);
+            match optional_base {
+                StructRest::Base(expr) => visitor.visit_expr(expr),
+                StructRest::Rest(_span) => {}
+                StructRest::None => {}
+            }
         }
         ExprKind::Tup(ref subexpressions) => {
             walk_list!(visitor, visit_expr, subexpressions);
@@ -823,7 +822,7 @@
         ExprKind::Ret(ref optional_expression) => {
             walk_list!(visitor, visit_expr, optional_expression);
         }
-        ExprKind::MacCall(ref mac) => visitor.visit_mac(mac),
+        ExprKind::MacCall(ref mac) => visitor.visit_mac_call(mac),
         ExprKind::Paren(ref subexpression) => visitor.visit_expr(subexpression),
         ExprKind::InlineAsm(ref ia) => {
             for (op, _) in &ia.operands {
@@ -886,7 +885,7 @@
 
 pub fn walk_attribute<'a, V: Visitor<'a>>(visitor: &mut V, attr: &'a Attribute) {
     match attr.kind {
-        AttrKind::Normal(ref item) => walk_mac_args(visitor, &item.args),
+        AttrKind::Normal(ref item, ref _tokens) => walk_mac_args(visitor, &item.args),
         AttrKind::DocComment(..) => {}
     }
 }
@@ -894,20 +893,19 @@
 pub fn walk_mac_args<'a, V: Visitor<'a>>(visitor: &mut V, args: &'a MacArgs) {
     match args {
         MacArgs::Empty => {}
-        MacArgs::Delimited(_dspan, _delim, tokens) => visitor.visit_tts(tokens.clone()),
-        MacArgs::Eq(_eq_span, tokens) => visitor.visit_tts(tokens.clone()),
-    }
-}
-
-pub fn walk_tt<'a, V: Visitor<'a>>(visitor: &mut V, tt: TokenTree) {
-    match tt {
-        TokenTree::Token(token) => visitor.visit_token(token),
-        TokenTree::Delimited(_, _, tts) => visitor.visit_tts(tts),
-    }
-}
-
-pub fn walk_tts<'a, V: Visitor<'a>>(visitor: &mut V, tts: TokenStream) {
-    for tt in tts.trees() {
-        visitor.visit_tt(tt);
+        MacArgs::Delimited(_dspan, _delim, _tokens) => {}
+        // The value in `#[key = VALUE]` must be visited as an expression for backward
+        // compatibility, so that macros can be expanded in that position.
+        MacArgs::Eq(_eq_span, tokens) => match tokens.trees_ref().next() {
+            Some(TokenTree::Token(token)) => match &token.kind {
+                token::Interpolated(nt) => match &**nt {
+                    token::NtExpr(expr) => visitor.visit_expr(expr),
+                    t => panic!("unexpected token in key-value attribute: {:?}", t),
+                },
+                token::Literal(..) | token::Ident(..) => {}
+                t => panic!("unexpected token in key-value attribute: {:?}", t),
+            },
+            t => panic!("unexpected token in key-value attribute: {:?}", t),
+        },
     }
 }
diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs
index c97f80c..330776f 100644
--- a/compiler/rustc_ast_lowering/src/expr.rs
+++ b/compiler/rustc_ast_lowering/src/expr.rs
@@ -9,6 +9,7 @@
 use rustc_errors::struct_span_err;
 use rustc_hir as hir;
 use rustc_hir::def::Res;
+use rustc_session::parse::feature_err;
 use rustc_span::hygiene::ForLoopLoc;
 use rustc_span::source_map::{respan, DesugaringKind, Span, Spanned};
 use rustc_span::symbol::{sym, Ident, Symbol};
@@ -30,6 +31,10 @@
             let kind = match e.kind {
                 ExprKind::Box(ref inner) => hir::ExprKind::Box(self.lower_expr(inner)),
                 ExprKind::Array(ref exprs) => hir::ExprKind::Array(self.lower_exprs(exprs)),
+                ExprKind::ConstBlock(ref anon_const) => {
+                    let anon_const = self.lower_anon_const(anon_const);
+                    hir::ExprKind::ConstBlock(anon_const)
+                }
                 ExprKind::Repeat(ref expr, ref count) => {
                     let expr = self.lower_expr(expr);
                     let count = self.lower_anon_const(count);
@@ -142,7 +147,7 @@
                     hir::ExprKind::Block(self.lower_block(blk, opt_label.is_some()), opt_label)
                 }
                 ExprKind::Assign(ref el, ref er, span) => {
-                    hir::ExprKind::Assign(self.lower_expr(el), self.lower_expr(er), span)
+                    self.lower_expr_assign(el, er, span, e.span)
                 }
                 ExprKind::AssignOp(op, ref el, ref er) => hir::ExprKind::AssignOp(
                     self.lower_binop(op),
@@ -182,8 +187,18 @@
                 }
                 ExprKind::InlineAsm(ref asm) => self.lower_expr_asm(e.span, asm),
                 ExprKind::LlvmInlineAsm(ref asm) => self.lower_expr_llvm_asm(asm),
-                ExprKind::Struct(ref path, ref fields, ref maybe_expr) => {
-                    let maybe_expr = maybe_expr.as_ref().map(|x| self.lower_expr(x));
+                ExprKind::Struct(ref path, ref fields, ref rest) => {
+                    let rest = match rest {
+                        StructRest::Base(e) => Some(self.lower_expr(e)),
+                        StructRest::Rest(sp) => {
+                            self.sess
+                                .struct_span_err(*sp, "base expression required after `..`")
+                                .span_label(*sp, "add a base expression here")
+                                .emit();
+                            Some(&*self.arena.alloc(self.expr_err(*sp)))
+                        }
+                        StructRest::None => None,
+                    };
                     hir::ExprKind::Struct(
                         self.arena.alloc(self.lower_qpath(
                             e.id,
@@ -193,7 +208,7 @@
                             ImplTraitContext::disallowed(),
                         )),
                         self.arena.alloc_from_iter(fields.iter().map(|x| self.lower_field(x))),
-                        maybe_expr,
+                        rest,
                     )
                 }
                 ExprKind::Yield(ref opt_expr) => self.lower_expr_yield(e.span, opt_expr.as_deref()),
@@ -206,9 +221,9 @@
                         ex.span = e.span;
                     }
                     // Merge attributes into the inner expression.
-                    let mut attrs = e.attrs.clone();
+                    let mut attrs: Vec<_> = e.attrs.iter().map(|a| self.lower_attr(a)).collect();
                     attrs.extend::<Vec<_>>(ex.attrs.into());
-                    ex.attrs = attrs;
+                    ex.attrs = attrs.into();
                     return ex;
                 }
 
@@ -432,17 +447,25 @@
         self.with_catch_scope(body.id, |this| {
             let mut block = this.lower_block_noalloc(body, true);
 
-            let try_span = this.mark_span_with_reason(
-                DesugaringKind::TryBlock,
-                body.span,
-                this.allow_try_trait.clone(),
-            );
-
             // Final expression of the block (if present) or `()` with span at the end of block
-            let tail_expr = block
-                .expr
-                .take()
-                .unwrap_or_else(|| this.expr_unit(this.sess.source_map().end_point(try_span)));
+            let (try_span, tail_expr) = if let Some(expr) = block.expr.take() {
+                (
+                    this.mark_span_with_reason(
+                        DesugaringKind::TryBlock,
+                        expr.span,
+                        this.allow_try_trait.clone(),
+                    ),
+                    expr,
+                )
+            } else {
+                let try_span = this.mark_span_with_reason(
+                    DesugaringKind::TryBlock,
+                    this.sess.source_map().end_point(body.span),
+                    this.allow_try_trait.clone(),
+                );
+
+                (try_span, this.expr_unit(try_span))
+            };
 
             let ok_wrapped_span =
                 this.mark_span_with_reason(DesugaringKind::TryBlock, tail_expr.span, None);
@@ -828,6 +851,236 @@
         })
     }
 
+    /// Destructure the LHS of complex assignments.
+    /// For instance, lower `(a, b) = t` to `{ let (lhs1, lhs2) = t; a = lhs1; b = lhs2; }`.
+    fn lower_expr_assign(
+        &mut self,
+        lhs: &Expr,
+        rhs: &Expr,
+        eq_sign_span: Span,
+        whole_span: Span,
+    ) -> hir::ExprKind<'hir> {
+        // Return early in case of an ordinary assignment.
+        fn is_ordinary(lower_ctx: &mut LoweringContext<'_, '_>, lhs: &Expr) -> bool {
+            match &lhs.kind {
+                ExprKind::Array(..) | ExprKind::Struct(..) | ExprKind::Tup(..) => false,
+                // Check for tuple struct constructor.
+                ExprKind::Call(callee, ..) => lower_ctx.extract_tuple_struct_path(callee).is_none(),
+                ExprKind::Paren(e) => {
+                    match e.kind {
+                        // We special-case `(..)` for consistency with patterns.
+                        ExprKind::Range(None, None, RangeLimits::HalfOpen) => false,
+                        _ => is_ordinary(lower_ctx, e),
+                    }
+                }
+                _ => true,
+            }
+        }
+        if is_ordinary(self, lhs) {
+            return hir::ExprKind::Assign(self.lower_expr(lhs), self.lower_expr(rhs), eq_sign_span);
+        }
+        if !self.sess.features_untracked().destructuring_assignment {
+            feature_err(
+                &self.sess.parse_sess,
+                sym::destructuring_assignment,
+                eq_sign_span,
+                "destructuring assignments are unstable",
+            )
+            .span_label(lhs.span, "cannot assign to this expression")
+            .emit();
+        }
+
+        let mut assignments = vec![];
+
+        // The LHS becomes a pattern: `(lhs1, lhs2)`.
+        let pat = self.destructure_assign(lhs, eq_sign_span, &mut assignments);
+        let rhs = self.lower_expr(rhs);
+
+        // Introduce a `let` for destructuring: `let (lhs1, lhs2) = t`.
+        let destructure_let = self.stmt_let_pat(
+            ThinVec::new(),
+            whole_span,
+            Some(rhs),
+            pat,
+            hir::LocalSource::AssignDesugar(eq_sign_span),
+        );
+
+        // `a = lhs1; b = lhs2;`.
+        let stmts = self
+            .arena
+            .alloc_from_iter(std::iter::once(destructure_let).chain(assignments.into_iter()));
+
+        // Wrap everything in a block.
+        hir::ExprKind::Block(&self.block_all(whole_span, stmts, None), None)
+    }
+
+    /// If the given expression is a path to a tuple struct, returns that path.
+    /// It is not a complete check, but just tries to reject most paths early
+    /// if they are not tuple structs.
+    /// Type checking will take care of the full validation later.
+    fn extract_tuple_struct_path<'a>(&mut self, expr: &'a Expr) -> Option<&'a Path> {
+        // For tuple struct destructuring, it must be a non-qualified path (like in patterns).
+        if let ExprKind::Path(None, path) = &expr.kind {
+            // Does the path resolves to something disallowed in a tuple struct/variant pattern?
+            if let Some(partial_res) = self.resolver.get_partial_res(expr.id) {
+                if partial_res.unresolved_segments() == 0
+                    && !partial_res.base_res().expected_in_tuple_struct_pat()
+                {
+                    return None;
+                }
+            }
+            return Some(path);
+        }
+        None
+    }
+
+    /// Convert the LHS of a destructuring assignment to a pattern.
+    /// Each sub-assignment is recorded in `assignments`.
+    fn destructure_assign(
+        &mut self,
+        lhs: &Expr,
+        eq_sign_span: Span,
+        assignments: &mut Vec<hir::Stmt<'hir>>,
+    ) -> &'hir hir::Pat<'hir> {
+        match &lhs.kind {
+            // Slice patterns.
+            ExprKind::Array(elements) => {
+                let (pats, rest) =
+                    self.destructure_sequence(elements, "slice", eq_sign_span, assignments);
+                let slice_pat = if let Some((i, span)) = rest {
+                    let (before, after) = pats.split_at(i);
+                    hir::PatKind::Slice(
+                        before,
+                        Some(self.pat_without_dbm(span, hir::PatKind::Wild)),
+                        after,
+                    )
+                } else {
+                    hir::PatKind::Slice(pats, None, &[])
+                };
+                return self.pat_without_dbm(lhs.span, slice_pat);
+            }
+            // Tuple structs.
+            ExprKind::Call(callee, args) => {
+                if let Some(path) = self.extract_tuple_struct_path(callee) {
+                    let (pats, rest) = self.destructure_sequence(
+                        args,
+                        "tuple struct or variant",
+                        eq_sign_span,
+                        assignments,
+                    );
+                    let qpath = self.lower_qpath(
+                        callee.id,
+                        &None,
+                        path,
+                        ParamMode::Optional,
+                        ImplTraitContext::disallowed(),
+                    );
+                    // Destructure like a tuple struct.
+                    let tuple_struct_pat =
+                        hir::PatKind::TupleStruct(qpath, pats, rest.map(|r| r.0));
+                    return self.pat_without_dbm(lhs.span, tuple_struct_pat);
+                }
+            }
+            // Structs.
+            ExprKind::Struct(path, fields, rest) => {
+                let field_pats = self.arena.alloc_from_iter(fields.iter().map(|f| {
+                    let pat = self.destructure_assign(&f.expr, eq_sign_span, assignments);
+                    hir::FieldPat {
+                        hir_id: self.next_id(),
+                        ident: f.ident,
+                        pat,
+                        is_shorthand: f.is_shorthand,
+                        span: f.span,
+                    }
+                }));
+                let qpath = self.lower_qpath(
+                    lhs.id,
+                    &None,
+                    path,
+                    ParamMode::Optional,
+                    ImplTraitContext::disallowed(),
+                );
+                let fields_omitted = match rest {
+                    StructRest::Base(e) => {
+                        self.sess
+                            .struct_span_err(
+                                e.span,
+                                "functional record updates are not allowed in destructuring \
+                                    assignments",
+                            )
+                            .span_suggestion(
+                                e.span,
+                                "consider removing the trailing pattern",
+                                String::new(),
+                                rustc_errors::Applicability::MachineApplicable,
+                            )
+                            .emit();
+                        true
+                    }
+                    StructRest::Rest(_) => true,
+                    StructRest::None => false,
+                };
+                let struct_pat = hir::PatKind::Struct(qpath, field_pats, fields_omitted);
+                return self.pat_without_dbm(lhs.span, struct_pat);
+            }
+            // Tuples.
+            ExprKind::Tup(elements) => {
+                let (pats, rest) =
+                    self.destructure_sequence(elements, "tuple", eq_sign_span, assignments);
+                let tuple_pat = hir::PatKind::Tuple(pats, rest.map(|r| r.0));
+                return self.pat_without_dbm(lhs.span, tuple_pat);
+            }
+            ExprKind::Paren(e) => {
+                // We special-case `(..)` for consistency with patterns.
+                if let ExprKind::Range(None, None, RangeLimits::HalfOpen) = e.kind {
+                    let tuple_pat = hir::PatKind::Tuple(&[], Some(0));
+                    return self.pat_without_dbm(lhs.span, tuple_pat);
+                } else {
+                    return self.destructure_assign(e, eq_sign_span, assignments);
+                }
+            }
+            _ => {}
+        }
+        // Treat all other cases as normal lvalue.
+        let ident = Ident::new(sym::lhs, lhs.span);
+        let (pat, binding) = self.pat_ident(lhs.span, ident);
+        let ident = self.expr_ident(lhs.span, ident, binding);
+        let assign = hir::ExprKind::Assign(self.lower_expr(lhs), ident, eq_sign_span);
+        let expr = self.expr(lhs.span, assign, ThinVec::new());
+        assignments.push(self.stmt_expr(lhs.span, expr));
+        pat
+    }
+
+    /// Destructure a sequence of expressions occurring on the LHS of an assignment.
+    /// Such a sequence occurs in a tuple (struct)/slice.
+    /// Return a sequence of corresponding patterns, and the index and the span of `..` if it
+    /// exists.
+    /// Each sub-assignment is recorded in `assignments`.
+    fn destructure_sequence(
+        &mut self,
+        elements: &[AstP<Expr>],
+        ctx: &str,
+        eq_sign_span: Span,
+        assignments: &mut Vec<hir::Stmt<'hir>>,
+    ) -> (&'hir [&'hir hir::Pat<'hir>], Option<(usize, Span)>) {
+        let mut rest = None;
+        let elements =
+            self.arena.alloc_from_iter(elements.iter().enumerate().filter_map(|(i, e)| {
+                // Check for `..` pattern.
+                if let ExprKind::Range(None, None, RangeLimits::HalfOpen) = e.kind {
+                    if let Some((_, prev_span)) = rest {
+                        self.ban_extra_rest_pat(e.span, prev_span, ctx);
+                    } else {
+                        rest = Some((i, e.span));
+                    }
+                    None
+                } else {
+                    Some(self.destructure_assign(e, eq_sign_span, assignments))
+                }
+            }));
+        (elements, rest)
+    }
+
     /// Desugar `<start>..=<end>` into `std::ops::RangeInclusive::new(<start>, <end>)`.
     fn lower_expr_range_closed(&mut self, span: Span, e1: &Expr, e2: &Expr) -> hir::ExprKind<'hir> {
         let e1 = self.lower_expr_mut(e1);
@@ -977,7 +1230,7 @@
                             asm::InlineAsmReg::parse(
                                 sess.asm_arch?,
                                 |feature| sess.target_features.contains(&Symbol::intern(feature)),
-                                &sess.target.target,
+                                &sess.target,
                                 s,
                             )
                             .map_err(|e| {
@@ -1178,52 +1431,47 @@
                                          input| {
                             match used_regs.entry(r) {
                                 Entry::Occupied(o) => {
-                                    if !skip {
-                                        skip = true;
-
-                                        let idx2 = *o.get();
-                                        let op2 = &operands[idx2];
-                                        let op_sp2 = asm.operands[idx2].1;
-                                        let reg2 = match op2.reg() {
-                                            Some(asm::InlineAsmRegOrRegClass::Reg(r)) => r,
-                                            _ => unreachable!(),
-                                        };
-
-                                        let msg = format!(
-                                            "register `{}` conflicts with register `{}`",
-                                            reg.name(),
-                                            reg2.name()
-                                        );
-                                        let mut err = sess.struct_span_err(op_sp, &msg);
-                                        err.span_label(
-                                            op_sp,
-                                            &format!("register `{}`", reg.name()),
-                                        );
-                                        err.span_label(
-                                            op_sp2,
-                                            &format!("register `{}`", reg2.name()),
-                                        );
-
-                                        match (op, op2) {
-                                            (
-                                                hir::InlineAsmOperand::In { .. },
-                                                hir::InlineAsmOperand::Out { late, .. },
-                                            )
-                                            | (
-                                                hir::InlineAsmOperand::Out { late, .. },
-                                                hir::InlineAsmOperand::In { .. },
-                                            ) => {
-                                                assert!(!*late);
-                                                let out_op_sp = if input { op_sp2 } else { op_sp };
-                                                let msg = "use `lateout` instead of \
-                                                     `out` to avoid conflict";
-                                                err.span_help(out_op_sp, msg);
-                                            }
-                                            _ => {}
-                                        }
-
-                                        err.emit();
+                                    if skip {
+                                        return;
                                     }
+                                    skip = true;
+
+                                    let idx2 = *o.get();
+                                    let op2 = &operands[idx2];
+                                    let op_sp2 = asm.operands[idx2].1;
+                                    let reg2 = match op2.reg() {
+                                        Some(asm::InlineAsmRegOrRegClass::Reg(r)) => r,
+                                        _ => unreachable!(),
+                                    };
+
+                                    let msg = format!(
+                                        "register `{}` conflicts with register `{}`",
+                                        reg.name(),
+                                        reg2.name()
+                                    );
+                                    let mut err = sess.struct_span_err(op_sp, &msg);
+                                    err.span_label(op_sp, &format!("register `{}`", reg.name()));
+                                    err.span_label(op_sp2, &format!("register `{}`", reg2.name()));
+
+                                    match (op, op2) {
+                                        (
+                                            hir::InlineAsmOperand::In { .. },
+                                            hir::InlineAsmOperand::Out { late, .. },
+                                        )
+                                        | (
+                                            hir::InlineAsmOperand::Out { late, .. },
+                                            hir::InlineAsmOperand::In { .. },
+                                        ) => {
+                                            assert!(!*late);
+                                            let out_op_sp = if input { op_sp2 } else { op_sp };
+                                            let msg = "use `lateout` instead of \
+                                                    `out` to avoid conflict";
+                                            err.span_help(out_op_sp, msg);
+                                        }
+                                        _ => {}
+                                    }
+
+                                    err.emit();
                                 }
                                 Entry::Vacant(v) => {
                                     v.insert(idx);
@@ -1464,13 +1712,15 @@
             hir::MatchSource::ForLoopDesugar,
         ));
 
+        let attrs: Vec<_> = e.attrs.iter().map(|a| self.lower_attr(a)).collect();
+
         // This is effectively `{ let _result = ...; _result }`.
         // The construct was introduced in #21984 and is necessary to make sure that
         // temporaries in the `head` expression are dropped and do not leak to the
         // surrounding scope of the `match` since the `match` is not a terminating scope.
         //
         // Also, add the attributes to the outer returned expr node.
-        self.expr_drop_temps_mut(desugared_span, match_expr, e.attrs.clone())
+        self.expr_drop_temps_mut(desugared_span, match_expr, attrs.into())
     }
 
     /// Desugar `ExprKind::Try` from: `<expr>?` into:
@@ -1553,7 +1803,7 @@
                 hir::LangItem::TryFromError,
                 unstable_span,
                 from_expr,
-                try_span,
+                unstable_span,
             );
             let thin_attrs = ThinVec::from(attrs);
             let catch_scope = self.catch_scopes.last().copied();
diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs
index 617cace..d353bc1 100644
--- a/compiler/rustc_ast_lowering/src/item.rs
+++ b/compiler/rustc_ast_lowering/src/item.rs
@@ -1096,8 +1096,18 @@
                 // Check if this is a binding pattern, if so, we can optimize and avoid adding a
                 // `let <pat> = __argN;` statement. In this case, we do not rename the parameter.
                 let (ident, is_simple_parameter) = match parameter.pat.kind {
-                    hir::PatKind::Binding(hir::BindingAnnotation::Unannotated, _, ident, _) => {
-                        (ident, true)
+                    hir::PatKind::Binding(
+                        hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Mutable,
+                        _,
+                        ident,
+                        _,
+                    ) => (ident, true),
+                    // For `ref mut` or wildcard arguments, we can't reuse the binding, but
+                    // we can keep the same name for the parameter.
+                    // This lets rustdoc render it correctly in documentation.
+                    hir::PatKind::Binding(_, _, ident, _) => (ident, false),
+                    hir::PatKind::Wild => {
+                        (Ident::with_dummy_span(rustc_span::symbol::kw::Underscore), false)
                     }
                     _ => {
                         // Replace the ident for bindings that aren't simple.
diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs
index a28d022..549b66e 100644
--- a/compiler/rustc_ast_lowering/src/lib.rs
+++ b/compiler/rustc_ast_lowering/src/lib.rs
@@ -148,7 +148,7 @@
     is_collecting_in_band_lifetimes: bool,
 
     /// Currently in-scope lifetimes defined in impl headers, fn headers, or HRTB.
-    /// When `is_collectin_in_band_lifetimes` is true, each lifetime is checked
+    /// When `is_collecting_in_band_lifetimes` is true, each lifetime is checked
     /// against this list to see if it is already in-scope, or if a definition
     /// needs to be created for it.
     ///
@@ -257,7 +257,7 @@
     /// Disallowed in `let` / `const` / `static` bindings.
     Binding,
 
-    /// All other posiitons.
+    /// All other positions.
     Other,
 }
 
@@ -363,7 +363,7 @@
 ///   elided bounds follow special rules. Note that this only covers
 ///   cases where *nothing* is written; the `'_` in `Box<dyn Foo +
 ///   '_>` is a case of "modern" elision.
-/// - **Deprecated** -- this coverse cases like `Ref<T>`, where the lifetime
+/// - **Deprecated** -- this covers cases like `Ref<T>`, where the lifetime
 ///   parameter to ref is completely elided. `Ref<'_, T>` would be the modern,
 ///   non-deprecated equivalent.
 ///
@@ -490,10 +490,7 @@
                         let count = generics
                             .params
                             .iter()
-                            .filter(|param| match param.kind {
-                                ast::GenericParamKind::Lifetime { .. } => true,
-                                _ => false,
-                            })
+                            .filter(|param| matches!(param.kind, ast::GenericParamKind::Lifetime { .. }))
                             .count();
                         self.lctx.type_def_lifetime_params.insert(def_id.to_def_id(), count);
                     }
@@ -538,6 +535,12 @@
                         }
                         self.visit_fn_ret_ty(&f.decl.output)
                     }
+                    TyKind::ImplTrait(def_node_id, _) => {
+                        self.lctx.allocate_hir_id_counter(def_node_id);
+                        self.with_hir_id_owner(Some(def_node_id), |this| {
+                            visit::walk_ty(this, t);
+                        });
+                    }
                     _ => visit::walk_ty(self, t),
                 }
             }
@@ -963,12 +966,16 @@
         // Note that we explicitly do not walk the path. Since we don't really
         // lower attributes (we use the AST version) there is nowhere to keep
         // the `HirId`s. We don't actually need HIR version of attributes anyway.
+        // Tokens are also not needed after macro expansion and parsing.
         let kind = match attr.kind {
-            AttrKind::Normal(ref item) => AttrKind::Normal(AttrItem {
-                path: item.path.clone(),
-                args: self.lower_mac_args(&item.args),
-                tokens: None,
-            }),
+            AttrKind::Normal(ref item, _) => AttrKind::Normal(
+                AttrItem {
+                    path: item.path.clone(),
+                    args: self.lower_mac_args(&item.args),
+                    tokens: None,
+                },
+                None,
+            ),
             AttrKind::DocComment(comment_kind, data) => AttrKind::DocComment(comment_kind, data),
         };
 
@@ -1346,10 +1353,14 @@
                         // Add a definition for the in-band `Param`.
                         let def_id = self.resolver.local_def_id(def_node_id);
 
-                        let hir_bounds = self.lower_param_bounds(
-                            bounds,
-                            ImplTraitContext::Universal(in_band_ty_params),
-                        );
+                        self.allocate_hir_id_counter(def_node_id);
+
+                        let hir_bounds = self.with_hir_id_owner(def_node_id, |this| {
+                            this.lower_param_bounds(
+                                bounds,
+                                ImplTraitContext::Universal(in_band_ty_params),
+                            )
+                        });
                         // Set the name to `impl Bound1 + Bound2`.
                         let ident = Ident::from_str_and_span(&pprust::ty_to_string(t), span);
                         in_band_ty_params.push(hir::GenericParam {
@@ -1713,7 +1724,7 @@
                 pat: self.lower_pat(&l.pat),
                 init,
                 span: l.span,
-                attrs: l.attrs.clone(),
+                attrs: l.attrs.iter().map(|a| self.lower_attr(a)).collect::<Vec<_>>().into(),
                 source: hir::LocalSource::Normal,
             },
             ids,
@@ -2200,7 +2211,7 @@
                         .attrs
                         .iter()
                         .filter(|attr| self.sess.check_name(attr, sym::rustc_synthetic))
-                        .map(|_| hir::SyntheticTyParamKind::ImplTrait)
+                        .map(|_| hir::SyntheticTyParamKind::FromAttr)
                         .next(),
                 };
 
@@ -2523,6 +2534,7 @@
                 hir_id,
                 kind: hir::PatKind::Binding(bm, hir_id, ident.with_span_pos(span), None),
                 span,
+                default_binding_modes: true,
             }),
             hir_id,
         )
@@ -2533,7 +2545,21 @@
     }
 
     fn pat(&mut self, span: Span, kind: hir::PatKind<'hir>) -> &'hir hir::Pat<'hir> {
-        self.arena.alloc(hir::Pat { hir_id: self.next_id(), kind, span })
+        self.arena.alloc(hir::Pat {
+            hir_id: self.next_id(),
+            kind,
+            span,
+            default_binding_modes: true,
+        })
+    }
+
+    fn pat_without_dbm(&mut self, span: Span, kind: hir::PatKind<'hir>) -> &'hir hir::Pat<'hir> {
+        self.arena.alloc(hir::Pat {
+            hir_id: self.next_id(),
+            kind,
+            span,
+            default_binding_modes: false,
+        })
     }
 
     fn ty_path(
diff --git a/compiler/rustc_ast_lowering/src/pat.rs b/compiler/rustc_ast_lowering/src/pat.rs
index cb7b7c0..e4e7b24 100644
--- a/compiler/rustc_ast_lowering/src/pat.rs
+++ b/compiler/rustc_ast_lowering/src/pat.rs
@@ -10,82 +10,90 @@
 use rustc_span::{source_map::Spanned, Span};
 
 impl<'a, 'hir> LoweringContext<'a, 'hir> {
-    crate fn lower_pat(&mut self, p: &Pat) -> &'hir hir::Pat<'hir> {
+    crate fn lower_pat(&mut self, mut pattern: &Pat) -> &'hir hir::Pat<'hir> {
         ensure_sufficient_stack(|| {
-            let node = match p.kind {
-                PatKind::Wild => hir::PatKind::Wild,
-                PatKind::Ident(ref binding_mode, ident, ref sub) => {
-                    let lower_sub = |this: &mut Self| sub.as_ref().map(|s| this.lower_pat(&*s));
-                    let node = self.lower_pat_ident(p, binding_mode, ident, lower_sub);
-                    node
-                }
-                PatKind::Lit(ref e) => hir::PatKind::Lit(self.lower_expr(e)),
-                PatKind::TupleStruct(ref path, ref pats) => {
-                    let qpath = self.lower_qpath(
-                        p.id,
-                        &None,
-                        path,
-                        ParamMode::Optional,
-                        ImplTraitContext::disallowed(),
-                    );
-                    let (pats, ddpos) = self.lower_pat_tuple(pats, "tuple struct");
-                    hir::PatKind::TupleStruct(qpath, pats, ddpos)
-                }
-                PatKind::Or(ref pats) => hir::PatKind::Or(
-                    self.arena.alloc_from_iter(pats.iter().map(|x| self.lower_pat(x))),
-                ),
-                PatKind::Path(ref qself, ref path) => {
-                    let qpath = self.lower_qpath(
-                        p.id,
-                        qself,
-                        path,
-                        ParamMode::Optional,
-                        ImplTraitContext::disallowed(),
-                    );
-                    hir::PatKind::Path(qpath)
-                }
-                PatKind::Struct(ref path, ref fields, etc) => {
-                    let qpath = self.lower_qpath(
-                        p.id,
-                        &None,
-                        path,
-                        ParamMode::Optional,
-                        ImplTraitContext::disallowed(),
-                    );
+            // loop here to avoid recursion
+            let node = loop {
+                match pattern.kind {
+                    PatKind::Wild => break hir::PatKind::Wild,
+                    PatKind::Ident(ref binding_mode, ident, ref sub) => {
+                        let lower_sub = |this: &mut Self| sub.as_ref().map(|s| this.lower_pat(&*s));
+                        break self.lower_pat_ident(pattern, binding_mode, ident, lower_sub);
+                    }
+                    PatKind::Lit(ref e) => break hir::PatKind::Lit(self.lower_expr(e)),
+                    PatKind::TupleStruct(ref path, ref pats) => {
+                        let qpath = self.lower_qpath(
+                            pattern.id,
+                            &None,
+                            path,
+                            ParamMode::Optional,
+                            ImplTraitContext::disallowed(),
+                        );
+                        let (pats, ddpos) = self.lower_pat_tuple(pats, "tuple struct");
+                        break hir::PatKind::TupleStruct(qpath, pats, ddpos);
+                    }
+                    PatKind::Or(ref pats) => {
+                        break hir::PatKind::Or(
+                            self.arena.alloc_from_iter(pats.iter().map(|x| self.lower_pat(x))),
+                        );
+                    }
+                    PatKind::Path(ref qself, ref path) => {
+                        let qpath = self.lower_qpath(
+                            pattern.id,
+                            qself,
+                            path,
+                            ParamMode::Optional,
+                            ImplTraitContext::disallowed(),
+                        );
+                        break hir::PatKind::Path(qpath);
+                    }
+                    PatKind::Struct(ref path, ref fields, etc) => {
+                        let qpath = self.lower_qpath(
+                            pattern.id,
+                            &None,
+                            path,
+                            ParamMode::Optional,
+                            ImplTraitContext::disallowed(),
+                        );
 
-                    let fs = self.arena.alloc_from_iter(fields.iter().map(|f| hir::FieldPat {
-                        hir_id: self.next_id(),
-                        ident: f.ident,
-                        pat: self.lower_pat(&f.pat),
-                        is_shorthand: f.is_shorthand,
-                        span: f.span,
-                    }));
-                    hir::PatKind::Struct(qpath, fs, etc)
+                        let fs = self.arena.alloc_from_iter(fields.iter().map(|f| hir::FieldPat {
+                            hir_id: self.next_id(),
+                            ident: f.ident,
+                            pat: self.lower_pat(&f.pat),
+                            is_shorthand: f.is_shorthand,
+                            span: f.span,
+                        }));
+                        break hir::PatKind::Struct(qpath, fs, etc);
+                    }
+                    PatKind::Tuple(ref pats) => {
+                        let (pats, ddpos) = self.lower_pat_tuple(pats, "tuple");
+                        break hir::PatKind::Tuple(pats, ddpos);
+                    }
+                    PatKind::Box(ref inner) => {
+                        break hir::PatKind::Box(self.lower_pat(inner));
+                    }
+                    PatKind::Ref(ref inner, mutbl) => {
+                        break hir::PatKind::Ref(self.lower_pat(inner), mutbl);
+                    }
+                    PatKind::Range(ref e1, ref e2, Spanned { node: ref end, .. }) => {
+                        break hir::PatKind::Range(
+                            e1.as_deref().map(|e| self.lower_expr(e)),
+                            e2.as_deref().map(|e| self.lower_expr(e)),
+                            self.lower_range_end(end, e2.is_some()),
+                        );
+                    }
+                    PatKind::Slice(ref pats) => break self.lower_pat_slice(pats),
+                    PatKind::Rest => {
+                        // If we reach here the `..` pattern is not semantically allowed.
+                        break self.ban_illegal_rest_pat(pattern.span);
+                    }
+                    // return inner to be processed in next loop
+                    PatKind::Paren(ref inner) => pattern = inner,
+                    PatKind::MacCall(_) => panic!("{:?} shouldn't exist here", pattern.span),
                 }
-                PatKind::Tuple(ref pats) => {
-                    let (pats, ddpos) = self.lower_pat_tuple(pats, "tuple");
-                    hir::PatKind::Tuple(pats, ddpos)
-                }
-                PatKind::Box(ref inner) => hir::PatKind::Box(self.lower_pat(inner)),
-                PatKind::Ref(ref inner, mutbl) => hir::PatKind::Ref(self.lower_pat(inner), mutbl),
-                PatKind::Range(ref e1, ref e2, Spanned { node: ref end, .. }) => {
-                    hir::PatKind::Range(
-                        e1.as_deref().map(|e| self.lower_expr(e)),
-                        e2.as_deref().map(|e| self.lower_expr(e)),
-                        self.lower_range_end(end, e2.is_some()),
-                    )
-                }
-                PatKind::Slice(ref pats) => self.lower_pat_slice(pats),
-                PatKind::Rest => {
-                    // If we reach here the `..` pattern is not semantically allowed.
-                    self.ban_illegal_rest_pat(p.span)
-                }
-                // FIXME: consider not using recursion to lower this.
-                PatKind::Paren(ref inner) => return self.lower_pat(inner),
-                PatKind::MacCall(_) => panic!("{:?} shouldn't exist here", p.span),
             };
 
-            self.pat_with_node_id_of(p, node)
+            self.pat_with_node_id_of(pattern, node)
         })
     }
 
@@ -265,11 +273,16 @@
 
     /// Construct a `Pat` with the `HirId` of `p.id` lowered.
     fn pat_with_node_id_of(&mut self, p: &Pat, kind: hir::PatKind<'hir>) -> &'hir hir::Pat<'hir> {
-        self.arena.alloc(hir::Pat { hir_id: self.lower_node_id(p.id), kind, span: p.span })
+        self.arena.alloc(hir::Pat {
+            hir_id: self.lower_node_id(p.id),
+            kind,
+            span: p.span,
+            default_binding_modes: true,
+        })
     }
 
     /// Emit a friendly error for extra `..` patterns in a tuple/tuple struct/slice pattern.
-    fn ban_extra_rest_pat(&self, sp: Span, prev_sp: Span, ctx: &str) {
+    crate fn ban_extra_rest_pat(&self, sp: Span, prev_sp: Span, ctx: &str) {
         self.diagnostic()
             .struct_span_err(sp, &format!("`..` can only be used once per {} pattern", ctx))
             .span_label(sp, &format!("can only be used once per {} pattern", ctx))
diff --git a/compiler/rustc_ast_lowering/src/path.rs b/compiler/rustc_ast_lowering/src/path.rs
index cf68dfb..6afed35 100644
--- a/compiler/rustc_ast_lowering/src/path.rs
+++ b/compiler/rustc_ast_lowering/src/path.rs
@@ -262,10 +262,8 @@
             self.lower_angle_bracketed_parameter_data(&Default::default(), param_mode, itctx)
         };
 
-        let has_lifetimes = generic_args.args.iter().any(|arg| match arg {
-            GenericArg::Lifetime(_) => true,
-            _ => false,
-        });
+        let has_lifetimes =
+            generic_args.args.iter().any(|arg| matches!(arg, GenericArg::Lifetime(_)));
         let first_generic_span = generic_args
             .args
             .iter()
@@ -310,8 +308,8 @@
                             E0726,
                             "implicit elided lifetime not allowed here"
                         );
-                        rustc_session::lint::add_elided_lifetime_in_path_suggestion(
-                            &self.sess,
+                        rustc_errors::add_elided_lifetime_in_path_suggestion(
+                            &self.sess.source_map(),
                             &mut err,
                             expected_lifetimes,
                             path_span,
diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs
index 232ee35..bb1d296 100644
--- a/compiler/rustc_ast_passes/src/ast_validation.rs
+++ b/compiler/rustc_ast_passes/src/ast_validation.rs
@@ -287,7 +287,7 @@
     // ```
     fn check_expr_within_pat(&self, expr: &Expr, allow_paths: bool) {
         match expr.kind {
-            ExprKind::Lit(..) | ExprKind::Err => {}
+            ExprKind::Lit(..) | ExprKind::ConstBlock(..) | ExprKind::Err => {}
             ExprKind::Path(..) if allow_paths => {}
             ExprKind::Unary(UnOp::Neg, ref inner) if matches!(inner.kind, ExprKind::Lit(_)) => {}
             _ => self.err_handler().span_err(
@@ -516,7 +516,7 @@
         self.session.source_map().guess_head_span(self.extern_mod.unwrap().span)
     }
 
-    /// An `fn` in `extern { ... }` cannot have qualfiers, e.g. `async fn`.
+    /// An `fn` in `extern { ... }` cannot have qualifiers, e.g. `async fn`.
     fn check_foreign_fn_headerless(&self, ident: Ident, span: Span, header: FnHeader) {
         if header.has_qualifiers() {
             self.err_handler()
@@ -796,7 +796,7 @@
 
     fn visit_expr(&mut self, expr: &'a Expr) {
         match &expr.kind {
-            ExprKind::LlvmInlineAsm(..) if !self.session.target.target.options.allow_asm => {
+            ExprKind::LlvmInlineAsm(..) if !self.session.target.allow_asm => {
                 struct_span_err!(
                     self.session,
                     expr.span,
diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs
index 00d3db7..2831675 100644
--- a/compiler/rustc_ast_passes/src/feature_gate.rs
+++ b/compiler/rustc_ast_passes/src/feature_gate.rs
@@ -629,6 +629,8 @@
     gate_all!(const_trait_bound_opt_out, "`?const` on trait bounds is experimental");
     gate_all!(const_trait_impl, "const trait impls are experimental");
     gate_all!(half_open_range_patterns, "half-open range patterns are unstable");
+    gate_all!(inline_const, "inline-const is experimental");
+    gate_all!(destructuring_assignment, "destructuring assignments are unstable");
 
     // All uses of `gate_all!` below this point were added in #65742,
     // and subsequently disabled (with the non-early gating readded).
diff --git a/compiler/rustc_ast_passes/src/node_count.rs b/compiler/rustc_ast_passes/src/node_count.rs
index 706dca2..6efc78c 100644
--- a/compiler/rustc_ast_passes/src/node_count.rs
+++ b/compiler/rustc_ast_passes/src/node_count.rs
@@ -114,9 +114,9 @@
         self.count += 1;
         walk_lifetime(self, lifetime)
     }
-    fn visit_mac(&mut self, _mac: &MacCall) {
+    fn visit_mac_call(&mut self, mac: &MacCall) {
         self.count += 1;
-        walk_mac(self, _mac)
+        walk_mac(self, mac)
     }
     fn visit_path(&mut self, path: &Path, _id: NodeId) {
         self.count += 1;
diff --git a/compiler/rustc_ast_passes/src/show_span.rs b/compiler/rustc_ast_passes/src/show_span.rs
index 053aba8..6cef26a 100644
--- a/compiler/rustc_ast_passes/src/show_span.rs
+++ b/compiler/rustc_ast_passes/src/show_span.rs
@@ -54,10 +54,6 @@
         }
         visit::walk_ty(self, t);
     }
-
-    fn visit_mac(&mut self, mac: &'a ast::MacCall) {
-        visit::walk_mac(self, mac);
-    }
 }
 
 pub fn run(span_diagnostic: &rustc_errors::Handler, mode: &str, krate: &ast::Crate) {
diff --git a/compiler/rustc_ast_pretty/src/pp.rs b/compiler/rustc_ast_pretty/src/pp.rs
index ca7f127..56e769b 100644
--- a/compiler/rustc_ast_pretty/src/pp.rs
+++ b/compiler/rustc_ast_pretty/src/pp.rs
@@ -170,17 +170,11 @@
 
 impl Token {
     crate fn is_eof(&self) -> bool {
-        match *self {
-            Token::Eof => true,
-            _ => false,
-        }
+        matches!(self, Token::Eof)
     }
 
     pub fn is_hardbreak_tok(&self) -> bool {
-        match *self {
-            Token::Break(BreakToken { offset: 0, blank_space: bs }) if bs == SIZE_INFINITY => true,
-            _ => false,
-        }
+        matches!(self, Token::Break(BreakToken { offset: 0, blank_space: SIZE_INFINITY }))
     }
 }
 
@@ -396,7 +390,7 @@
         self.scan_stack.pop_front().unwrap()
     }
 
-    fn scan_top(&mut self) -> usize {
+    fn scan_top(&self) -> usize {
         *self.scan_stack.front().unwrap()
     }
 
@@ -490,13 +484,10 @@
         self.pending_indentation += amount;
     }
 
-    fn get_top(&mut self) -> PrintStackElem {
-        match self.print_stack.last() {
-            Some(el) => *el,
-            None => {
-                PrintStackElem { offset: 0, pbreak: PrintStackBreak::Broken(Breaks::Inconsistent) }
-            }
-        }
+    fn get_top(&self) -> PrintStackElem {
+        *self.print_stack.last().unwrap_or({
+            &PrintStackElem { offset: 0, pbreak: PrintStackBreak::Broken(Breaks::Inconsistent) }
+        })
     }
 
     fn print_begin(&mut self, b: BeginToken, l: isize) {
diff --git a/compiler/rustc_ast_pretty/src/pprust/mod.rs b/compiler/rustc_ast_pretty/src/pprust/mod.rs
new file mode 100644
index 0000000..b34ea41
--- /dev/null
+++ b/compiler/rustc_ast_pretty/src/pprust/mod.rs
@@ -0,0 +1,104 @@
+#[cfg(test)]
+mod tests;
+
+pub mod state;
+pub use state::{print_crate, AnnNode, Comments, PpAnn, PrintState, State};
+
+use rustc_ast as ast;
+use rustc_ast::token::{Nonterminal, Token, TokenKind};
+use rustc_ast::tokenstream::{TokenStream, TokenTree};
+
+pub fn nonterminal_to_string_no_extra_parens(nt: &Nonterminal) -> String {
+    let state = State::without_insert_extra_parens();
+    state.nonterminal_to_string(nt)
+}
+
+pub fn nonterminal_to_string(nt: &Nonterminal) -> String {
+    State::new().nonterminal_to_string(nt)
+}
+
+/// Print the token kind precisely, without converting `$crate` into its respective crate name.
+pub fn token_kind_to_string(tok: &TokenKind) -> String {
+    State::new().token_kind_to_string(tok)
+}
+
+/// Print the token precisely, without converting `$crate` into its respective crate name.
+pub fn token_to_string(token: &Token) -> String {
+    State::new().token_to_string(token)
+}
+
+pub fn token_to_string_ext(token: &Token, convert_dollar_crate: bool) -> String {
+    State::new().token_to_string_ext(token, convert_dollar_crate)
+}
+
+pub fn ty_to_string(ty: &ast::Ty) -> String {
+    State::new().ty_to_string(ty)
+}
+
+pub fn bounds_to_string(bounds: &[ast::GenericBound]) -> String {
+    State::new().bounds_to_string(bounds)
+}
+
+pub fn pat_to_string(pat: &ast::Pat) -> String {
+    State::new().pat_to_string(pat)
+}
+
+pub fn expr_to_string(e: &ast::Expr) -> String {
+    State::new().expr_to_string(e)
+}
+
+pub fn tt_to_string(tt: &TokenTree) -> String {
+    State::new().tt_to_string(tt)
+}
+
+pub fn tts_to_string(tokens: &TokenStream) -> String {
+    State::new().tts_to_string(tokens)
+}
+
+pub fn stmt_to_string(stmt: &ast::Stmt) -> String {
+    State::new().stmt_to_string(stmt)
+}
+
+pub fn item_to_string(i: &ast::Item) -> String {
+    State::new().item_to_string(i)
+}
+
+pub fn generic_params_to_string(generic_params: &[ast::GenericParam]) -> String {
+    State::new().generic_params_to_string(generic_params)
+}
+
+pub fn path_to_string(p: &ast::Path) -> String {
+    State::new().path_to_string(p)
+}
+
+pub fn path_segment_to_string(p: &ast::PathSegment) -> String {
+    State::new().path_segment_to_string(p)
+}
+
+pub fn vis_to_string(v: &ast::Visibility) -> String {
+    State::new().vis_to_string(v)
+}
+
+pub fn block_to_string(blk: &ast::Block) -> String {
+    State::new().block_to_string(blk)
+}
+
+pub fn meta_list_item_to_string(li: &ast::NestedMetaItem) -> String {
+    State::new().meta_list_item_to_string(li)
+}
+
+pub fn attr_item_to_string(ai: &ast::AttrItem) -> String {
+    State::new().attr_item_to_string(ai)
+}
+
+pub fn attribute_to_string(attr: &ast::Attribute) -> String {
+    State::new().attribute_to_string(attr)
+}
+
+pub fn param_to_string(arg: &ast::Param) -> String {
+    State::new().param_to_string(arg)
+}
+
+pub fn to_string(f: impl FnOnce(&mut State<'_>)) -> String {
+    State::new().to_string(f)
+}
diff --git a/compiler/rustc_ast_pretty/src/pprust.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs
similarity index 89%
rename from compiler/rustc_ast_pretty/src/pprust.rs
rename to compiler/rustc_ast_pretty/src/pprust/state.rs
index d16b541..a566200 100644
--- a/compiler/rustc_ast_pretty/src/pprust.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state.rs
@@ -20,9 +20,6 @@
 
 use std::borrow::Cow;
 
-#[cfg(test)]
-mod tests;
-
 pub enum MacHeader<'a> {
     Path(&'a ast::Path),
     Keyword(&'static str),
@@ -66,7 +63,7 @@
     }
 
     pub fn trailing_comment(
-        &mut self,
+        &self,
         span: rustc_span::Span,
         next_pos: Option<BytePos>,
     ) -> Option<Comment> {
@@ -91,6 +88,13 @@
     comments: Option<Comments<'a>>,
     ann: &'a (dyn PpAnn + 'a),
     is_expanded: bool,
+    // If `true`, additional parenthesis (separate from `ExprKind::Paren`)
+    // are inserted to ensure that proper precedence is preserved
+    // in the pretty-printed output.
+    //
+    // This is usually `true`, except when performing the pretty-print/reparse
+    // check in `nt_to_tokenstream`
+    insert_extra_parens: bool,
 }
 
 crate const INDENT_UNIT: usize = 4;
@@ -112,6 +116,7 @@
         comments: Some(Comments::new(sm, filename, input)),
         ann,
         is_expanded,
+        insert_extra_parens: true,
     };
 
     if is_expanded && has_injected_crate {
@@ -142,13 +147,6 @@
     s.s.eof()
 }
 
-pub fn to_string(f: impl FnOnce(&mut State<'_>)) -> String {
-    let mut printer =
-        State { s: pp::mk_printer(), comments: None, ann: &NoAnn, is_expanded: false };
-    f(&mut printer);
-    printer.s.eof()
-}
-
 // This makes printed token streams look slightly nicer,
 // and also addresses some specific regressions described in #63896 and #73345.
 fn tt_prepend_space(tt: &TokenTree, prev: &TokenTree) -> bool {
@@ -158,24 +156,13 @@
         }
     }
     match tt {
-        TokenTree::Token(token) => match token.kind {
-            token::Comma => false,
-            _ => true,
-        },
-        TokenTree::Delimited(_, DelimToken::Paren, _) => match prev {
-            TokenTree::Token(token) => match token.kind {
-                token::Ident(_, _) => false,
-                _ => true,
-            },
-            _ => true,
-        },
-        TokenTree::Delimited(_, DelimToken::Bracket, _) => match prev {
-            TokenTree::Token(token) => match token.kind {
-                token::Pound => false,
-                _ => true,
-            },
-            _ => true,
-        },
+        TokenTree::Token(token) => token.kind != token::Comma,
+        TokenTree::Delimited(_, DelimToken::Paren, _) => {
+            !matches!(prev, TokenTree::Token(Token { kind: token::Ident(..), .. }))
+        }
+        TokenTree::Delimited(_, DelimToken::Bracket, _) => {
+            !matches!(prev, TokenTree::Token(Token { kind: token::Pound, .. }))
+        }
         TokenTree::Delimited(..) => true,
     }
 }
@@ -231,173 +218,8 @@
     out
 }
 
-/// Print the token kind precisely, without converting `$crate` into its respective crate name.
-pub fn token_kind_to_string(tok: &TokenKind) -> String {
-    token_kind_to_string_ext(tok, None)
-}
-
-fn token_kind_to_string_ext(tok: &TokenKind, convert_dollar_crate: Option<Span>) -> String {
-    match *tok {
-        token::Eq => "=".to_string(),
-        token::Lt => "<".to_string(),
-        token::Le => "<=".to_string(),
-        token::EqEq => "==".to_string(),
-        token::Ne => "!=".to_string(),
-        token::Ge => ">=".to_string(),
-        token::Gt => ">".to_string(),
-        token::Not => "!".to_string(),
-        token::Tilde => "~".to_string(),
-        token::OrOr => "||".to_string(),
-        token::AndAnd => "&&".to_string(),
-        token::BinOp(op) => binop_to_string(op).to_string(),
-        token::BinOpEq(op) => format!("{}=", binop_to_string(op)),
-
-        /* Structural symbols */
-        token::At => "@".to_string(),
-        token::Dot => ".".to_string(),
-        token::DotDot => "..".to_string(),
-        token::DotDotDot => "...".to_string(),
-        token::DotDotEq => "..=".to_string(),
-        token::Comma => ",".to_string(),
-        token::Semi => ";".to_string(),
-        token::Colon => ":".to_string(),
-        token::ModSep => "::".to_string(),
-        token::RArrow => "->".to_string(),
-        token::LArrow => "<-".to_string(),
-        token::FatArrow => "=>".to_string(),
-        token::OpenDelim(token::Paren) => "(".to_string(),
-        token::CloseDelim(token::Paren) => ")".to_string(),
-        token::OpenDelim(token::Bracket) => "[".to_string(),
-        token::CloseDelim(token::Bracket) => "]".to_string(),
-        token::OpenDelim(token::Brace) => "{".to_string(),
-        token::CloseDelim(token::Brace) => "}".to_string(),
-        token::OpenDelim(token::NoDelim) | token::CloseDelim(token::NoDelim) => "".to_string(),
-        token::Pound => "#".to_string(),
-        token::Dollar => "$".to_string(),
-        token::Question => "?".to_string(),
-        token::SingleQuote => "'".to_string(),
-
-        /* Literals */
-        token::Literal(lit) => literal_to_string(lit),
-
-        /* Name components */
-        token::Ident(s, is_raw) => IdentPrinter::new(s, is_raw, convert_dollar_crate).to_string(),
-        token::Lifetime(s) => s.to_string(),
-
-        /* Other */
-        token::DocComment(comment_kind, attr_style, data) => {
-            doc_comment_to_string(comment_kind, attr_style, data)
-        }
-        token::Eof => "<eof>".to_string(),
-
-        token::Interpolated(ref nt) => nonterminal_to_string(nt),
-    }
-}
-
-/// Print the token precisely, without converting `$crate` into its respective crate name.
-pub fn token_to_string(token: &Token) -> String {
-    token_to_string_ext(token, false)
-}
-
-fn token_to_string_ext(token: &Token, convert_dollar_crate: bool) -> String {
-    let convert_dollar_crate = convert_dollar_crate.then_some(token.span);
-    token_kind_to_string_ext(&token.kind, convert_dollar_crate)
-}
-
-pub fn nonterminal_to_string(nt: &Nonterminal) -> String {
-    match *nt {
-        token::NtExpr(ref e) => expr_to_string(e),
-        token::NtMeta(ref e) => attr_item_to_string(e),
-        token::NtTy(ref e) => ty_to_string(e),
-        token::NtPath(ref e) => path_to_string(e),
-        token::NtItem(ref e) => item_to_string(e),
-        token::NtBlock(ref e) => block_to_string(e),
-        token::NtStmt(ref e) => stmt_to_string(e),
-        token::NtPat(ref e) => pat_to_string(e),
-        token::NtIdent(e, is_raw) => IdentPrinter::for_ast_ident(e, is_raw).to_string(),
-        token::NtLifetime(e) => e.to_string(),
-        token::NtLiteral(ref e) => expr_to_string(e),
-        token::NtTT(ref tree) => tt_to_string(tree),
-        token::NtVis(ref e) => vis_to_string(e),
-    }
-}
-
-pub fn ty_to_string(ty: &ast::Ty) -> String {
-    to_string(|s| s.print_type(ty))
-}
-
-pub fn bounds_to_string(bounds: &[ast::GenericBound]) -> String {
-    to_string(|s| s.print_type_bounds("", bounds))
-}
-
-pub fn pat_to_string(pat: &ast::Pat) -> String {
-    to_string(|s| s.print_pat(pat))
-}
-
-pub fn expr_to_string(e: &ast::Expr) -> String {
-    to_string(|s| s.print_expr(e))
-}
-
-pub fn tt_to_string(tt: &TokenTree) -> String {
-    to_string(|s| s.print_tt(tt, false))
-}
-
-pub fn tts_to_string(tokens: &TokenStream) -> String {
-    to_string(|s| s.print_tts(tokens, false))
-}
-
-pub fn stmt_to_string(stmt: &ast::Stmt) -> String {
-    to_string(|s| s.print_stmt(stmt))
-}
-
-pub fn item_to_string(i: &ast::Item) -> String {
-    to_string(|s| s.print_item(i))
-}
-
-pub fn generic_params_to_string(generic_params: &[ast::GenericParam]) -> String {
-    to_string(|s| s.print_generic_params(generic_params))
-}
-
-pub fn path_to_string(p: &ast::Path) -> String {
-    to_string(|s| s.print_path(p, false, 0))
-}
-
-pub fn path_segment_to_string(p: &ast::PathSegment) -> String {
-    to_string(|s| s.print_path_segment(p, false))
-}
-
-pub fn vis_to_string(v: &ast::Visibility) -> String {
-    to_string(|s| s.print_visibility(v))
-}
-
-fn block_to_string(blk: &ast::Block) -> String {
-    to_string(|s| {
-        // Containing cbox, will be closed by `print_block` at `}`.
-        s.cbox(INDENT_UNIT);
-        // Head-ibox, will be closed by `print_block` after `{`.
-        s.ibox(0);
-        s.print_block(blk)
-    })
-}
-
-pub fn meta_list_item_to_string(li: &ast::NestedMetaItem) -> String {
-    to_string(|s| s.print_meta_list_item(li))
-}
-
-fn attr_item_to_string(ai: &ast::AttrItem) -> String {
-    to_string(|s| s.print_attr_item(ai, ai.path.span))
-}
-
-pub fn attribute_to_string(attr: &ast::Attribute) -> String {
-    to_string(|s| s.print_attribute(attr))
-}
-
-pub fn param_to_string(arg: &ast::Param) -> String {
-    to_string(|s| s.print_param(arg, false))
-}
-
 fn visibility_qualified(vis: &ast::Visibility, s: &str) -> String {
-    format!("{}{}", to_string(|s| s.print_visibility(vis)), s)
+    format!("{}{}", State::new().to_string(|s| s.print_visibility(vis)), s)
 }
 
 impl std::ops::Deref for State<'_> {
@@ -414,6 +236,7 @@
 }
 
 pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::DerefMut {
+    fn insert_extra_parens(&self) -> bool;
     fn comments(&mut self) -> &mut Option<Comments<'a>>;
     fn print_ident(&mut self, ident: Ident);
     fn print_generic_args(&mut self, args: &ast::GenericArgs, colons_before_params: bool);
@@ -603,7 +426,7 @@
         }
         self.maybe_print_comment(attr.span.lo());
         match attr.kind {
-            ast::AttrKind::Normal(ref item) => {
+            ast::AttrKind::Normal(ref item, _) => {
                 match attr.style {
                     ast::AttrStyle::Inner => self.word("#!["),
                     ast::AttrStyle::Outer => self.word("#["),
@@ -679,7 +502,8 @@
     fn print_tt(&mut self, tt: &TokenTree, convert_dollar_crate: bool) {
         match tt {
             TokenTree::Token(token) => {
-                self.word(token_to_string_ext(&token, convert_dollar_crate));
+                let token_str = self.token_to_string_ext(&token, convert_dollar_crate);
+                self.word(token_str);
                 if let token::DocComment(..) = token.kind {
                     self.hardbreak()
                 }
@@ -745,14 +569,20 @@
                     self.space();
                 }
             }
-            _ => self.word(token_kind_to_string(&token::OpenDelim(delim))),
+            _ => {
+                let token_str = self.token_kind_to_string(&token::OpenDelim(delim));
+                self.word(token_str)
+            }
         }
         self.ibox(0);
         self.print_tts(tts, convert_dollar_crate);
         self.end();
         match delim {
             DelimToken::Brace => self.bclose(span),
-            _ => self.word(token_kind_to_string(&token::CloseDelim(delim))),
+            _ => {
+                let token_str = self.token_kind_to_string(&token::CloseDelim(delim));
+                self.word(token_str)
+            }
         }
     }
 
@@ -809,18 +639,197 @@
     fn break_offset_if_not_bol(&mut self, n: usize, off: isize) {
         if !self.is_beginning_of_line() {
             self.break_offset(n, off)
-        } else {
-            if off != 0 && self.last_token().is_hardbreak_tok() {
-                // We do something pretty sketchy here: tuck the nonzero
-                // offset-adjustment we were going to deposit along with the
-                // break into the previous hardbreak.
-                self.replace_last_token(pp::Printer::hardbreak_tok_offset(off));
-            }
+        } else if off != 0 && self.last_token().is_hardbreak_tok() {
+            // We do something pretty sketchy here: tuck the nonzero
+            // offset-adjustment we were going to deposit along with the
+            // break into the previous hardbreak.
+            self.replace_last_token(pp::Printer::hardbreak_tok_offset(off));
         }
     }
+
+    fn nonterminal_to_string(&self, nt: &Nonterminal) -> String {
+        match *nt {
+            token::NtExpr(ref e) => self.expr_to_string(e),
+            token::NtMeta(ref e) => self.attr_item_to_string(e),
+            token::NtTy(ref e) => self.ty_to_string(e),
+            token::NtPath(ref e) => self.path_to_string(e),
+            token::NtItem(ref e) => self.item_to_string(e),
+            token::NtBlock(ref e) => self.block_to_string(e),
+            token::NtStmt(ref e) => self.stmt_to_string(e),
+            token::NtPat(ref e) => self.pat_to_string(e),
+            token::NtIdent(e, is_raw) => IdentPrinter::for_ast_ident(e, is_raw).to_string(),
+            token::NtLifetime(e) => e.to_string(),
+            token::NtLiteral(ref e) => self.expr_to_string(e),
+            token::NtTT(ref tree) => self.tt_to_string(tree),
+            token::NtVis(ref e) => self.vis_to_string(e),
+        }
+    }
+
+    /// Print the token kind precisely, without converting `$crate` into its respective crate name.
+    fn token_kind_to_string(&self, tok: &TokenKind) -> String {
+        self.token_kind_to_string_ext(tok, None)
+    }
+
+    fn token_kind_to_string_ext(
+        &self,
+        tok: &TokenKind,
+        convert_dollar_crate: Option<Span>,
+    ) -> String {
+        match *tok {
+            token::Eq => "=".to_string(),
+            token::Lt => "<".to_string(),
+            token::Le => "<=".to_string(),
+            token::EqEq => "==".to_string(),
+            token::Ne => "!=".to_string(),
+            token::Ge => ">=".to_string(),
+            token::Gt => ">".to_string(),
+            token::Not => "!".to_string(),
+            token::Tilde => "~".to_string(),
+            token::OrOr => "||".to_string(),
+            token::AndAnd => "&&".to_string(),
+            token::BinOp(op) => binop_to_string(op).to_string(),
+            token::BinOpEq(op) => format!("{}=", binop_to_string(op)),
+
+            /* Structural symbols */
+            token::At => "@".to_string(),
+            token::Dot => ".".to_string(),
+            token::DotDot => "..".to_string(),
+            token::DotDotDot => "...".to_string(),
+            token::DotDotEq => "..=".to_string(),
+            token::Comma => ",".to_string(),
+            token::Semi => ";".to_string(),
+            token::Colon => ":".to_string(),
+            token::ModSep => "::".to_string(),
+            token::RArrow => "->".to_string(),
+            token::LArrow => "<-".to_string(),
+            token::FatArrow => "=>".to_string(),
+            token::OpenDelim(token::Paren) => "(".to_string(),
+            token::CloseDelim(token::Paren) => ")".to_string(),
+            token::OpenDelim(token::Bracket) => "[".to_string(),
+            token::CloseDelim(token::Bracket) => "]".to_string(),
+            token::OpenDelim(token::Brace) => "{".to_string(),
+            token::CloseDelim(token::Brace) => "}".to_string(),
+            token::OpenDelim(token::NoDelim) | token::CloseDelim(token::NoDelim) => "".to_string(),
+            token::Pound => "#".to_string(),
+            token::Dollar => "$".to_string(),
+            token::Question => "?".to_string(),
+            token::SingleQuote => "'".to_string(),
+
+            /* Literals */
+            token::Literal(lit) => literal_to_string(lit),
+
+            /* Name components */
+            token::Ident(s, is_raw) => {
+                IdentPrinter::new(s, is_raw, convert_dollar_crate).to_string()
+            }
+            token::Lifetime(s) => s.to_string(),
+
+            /* Other */
+            token::DocComment(comment_kind, attr_style, data) => {
+                doc_comment_to_string(comment_kind, attr_style, data)
+            }
+            token::Eof => "<eof>".to_string(),
+
+            token::Interpolated(ref nt) => self.nonterminal_to_string(nt),
+        }
+    }
+
+    /// Print the token precisely, without converting `$crate` into its respective crate name.
+    fn token_to_string(&self, token: &Token) -> String {
+        self.token_to_string_ext(token, false)
+    }
+
+    fn token_to_string_ext(&self, token: &Token, convert_dollar_crate: bool) -> String {
+        let convert_dollar_crate = convert_dollar_crate.then_some(token.span);
+        self.token_kind_to_string_ext(&token.kind, convert_dollar_crate)
+    }
+
+    fn ty_to_string(&self, ty: &ast::Ty) -> String {
+        self.to_string(|s| s.print_type(ty))
+    }
+
+    fn bounds_to_string(&self, bounds: &[ast::GenericBound]) -> String {
+        self.to_string(|s| s.print_type_bounds("", bounds))
+    }
+
+    fn pat_to_string(&self, pat: &ast::Pat) -> String {
+        self.to_string(|s| s.print_pat(pat))
+    }
+
+    fn expr_to_string(&self, e: &ast::Expr) -> String {
+        self.to_string(|s| s.print_expr(e))
+    }
+
+    fn tt_to_string(&self, tt: &TokenTree) -> String {
+        self.to_string(|s| s.print_tt(tt, false))
+    }
+
+    fn tts_to_string(&self, tokens: &TokenStream) -> String {
+        self.to_string(|s| s.print_tts(tokens, false))
+    }
+
+    fn stmt_to_string(&self, stmt: &ast::Stmt) -> String {
+        self.to_string(|s| s.print_stmt(stmt))
+    }
+
+    fn item_to_string(&self, i: &ast::Item) -> String {
+        self.to_string(|s| s.print_item(i))
+    }
+
+    fn generic_params_to_string(&self, generic_params: &[ast::GenericParam]) -> String {
+        self.to_string(|s| s.print_generic_params(generic_params))
+    }
+
+    fn path_to_string(&self, p: &ast::Path) -> String {
+        self.to_string(|s| s.print_path(p, false, 0))
+    }
+
+    fn path_segment_to_string(&self, p: &ast::PathSegment) -> String {
+        self.to_string(|s| s.print_path_segment(p, false))
+    }
+
+    fn vis_to_string(&self, v: &ast::Visibility) -> String {
+        self.to_string(|s| s.print_visibility(v))
+    }
+
+    fn block_to_string(&self, blk: &ast::Block) -> String {
+        self.to_string(|s| {
+            // Containing cbox, will be closed by `print_block` at `}`.
+            s.cbox(INDENT_UNIT);
+            // Head-ibox, will be closed by `print_block` after `{`.
+            s.ibox(0);
+            s.print_block(blk)
+        })
+    }
+
+    fn meta_list_item_to_string(&self, li: &ast::NestedMetaItem) -> String {
+        self.to_string(|s| s.print_meta_list_item(li))
+    }
+
+    fn attr_item_to_string(&self, ai: &ast::AttrItem) -> String {
+        self.to_string(|s| s.print_attr_item(ai, ai.path.span))
+    }
+
+    fn attribute_to_string(&self, attr: &ast::Attribute) -> String {
+        self.to_string(|s| s.print_attribute(attr))
+    }
+
+    fn param_to_string(&self, arg: &ast::Param) -> String {
+        self.to_string(|s| s.print_param(arg, false))
+    }
+
+    fn to_string(&self, f: impl FnOnce(&mut State<'_>)) -> String {
+        let mut printer = State::new();
+        printer.insert_extra_parens = self.insert_extra_parens();
+        f(&mut printer);
+        printer.s.eof()
+    }
 }
 
 impl<'a> PrintState<'a> for State<'a> {
+    fn insert_extra_parens(&self) -> bool {
+        self.insert_extra_parens
+    }
     fn comments(&mut self) -> &mut Option<Comments<'a>> {
         &mut self.comments
     }
@@ -856,6 +865,20 @@
 }
 
 impl<'a> State<'a> {
+    pub fn new() -> State<'a> {
+        State {
+            s: pp::mk_printer(),
+            comments: None,
+            ann: &NoAnn,
+            is_expanded: false,
+            insert_extra_parens: true,
+        }
+    }
+
+    pub(super) fn without_insert_extra_parens() -> State<'a> {
+        State { insert_extra_parens: false, ..State::new() }
+    }
+
     // Synthesizes a comment that was not textually present in the original source
     // file.
     pub fn synth_comment(&mut self, text: String) {
@@ -1139,7 +1162,7 @@
                 self.print_fn_full(sig, item.ident, gen, &item.vis, def, body, &item.attrs);
             }
             ast::ItemKind::Mod(ref _mod) => {
-                self.head(to_string(|s| {
+                self.head(self.to_string(|s| {
                     s.print_visibility(&item.vis);
                     s.print_unsafety(_mod.unsafety);
                     s.word("mod");
@@ -1158,7 +1181,7 @@
                 }
             }
             ast::ItemKind::ForeignMod(ref nmod) => {
-                self.head(to_string(|s| {
+                self.head(self.to_string(|s| {
                     s.print_unsafety(nmod.unsafety);
                     s.word("extern");
                 }));
@@ -1366,7 +1389,7 @@
                 ast::CrateSugar::JustCrate => self.word_nbsp("crate"),
             },
             ast::VisibilityKind::Restricted { ref path, .. } => {
-                let path = to_string(|s| s.print_path(path, false, 0));
+                let path = self.to_string(|s| s.print_path(path, false, 0));
                 if path == "self" || path == "super" {
                     self.word_nbsp(format!("pub({})", path))
                 } else {
@@ -1658,7 +1681,8 @@
     }
 
     /// Prints `expr` or `(expr)` when `needs_par` holds.
-    fn print_expr_cond_paren(&mut self, expr: &ast::Expr, needs_par: bool) {
+    fn print_expr_cond_paren(&mut self, expr: &ast::Expr, mut needs_par: bool) {
+        needs_par &= self.insert_extra_parens;
         if needs_par {
             self.popen();
         }
@@ -1677,6 +1701,14 @@
         self.end();
     }
 
+    fn print_expr_anon_const(&mut self, expr: &ast::AnonConst, attrs: &[ast::Attribute]) {
+        self.ibox(INDENT_UNIT);
+        self.s.word("const");
+        self.print_inner_attributes_inline(attrs);
+        self.print_expr(&expr.value);
+        self.end();
+    }
+
     fn print_expr_repeat(
         &mut self,
         element: &ast::Expr,
@@ -1697,7 +1729,7 @@
         &mut self,
         path: &ast::Path,
         fields: &[ast::Field],
-        wth: &Option<P<ast::Expr>>,
+        rest: &ast::StructRest,
         attrs: &[ast::Attribute],
     ) {
         self.print_path(path, true, 0);
@@ -1718,22 +1750,21 @@
             },
             |f| f.span,
         );
-        match *wth {
-            Some(ref expr) => {
+        match rest {
+            ast::StructRest::Base(_) | ast::StructRest::Rest(_) => {
                 self.ibox(INDENT_UNIT);
                 if !fields.is_empty() {
                     self.s.word(",");
                     self.s.space();
                 }
                 self.s.word("..");
-                self.print_expr(expr);
+                if let ast::StructRest::Base(ref expr) = *rest {
+                    self.print_expr(expr);
+                }
                 self.end();
             }
-            _ => {
-                if !fields.is_empty() {
-                    self.s.word(",")
-                }
-            }
+            ast::StructRest::None if !fields.is_empty() => self.s.word(","),
+            _ => {}
         }
         self.s.word("}");
     }
@@ -1853,11 +1884,14 @@
             ast::ExprKind::Array(ref exprs) => {
                 self.print_expr_vec(&exprs[..], attrs);
             }
+            ast::ExprKind::ConstBlock(ref anon_const) => {
+                self.print_expr_anon_const(anon_const, attrs);
+            }
             ast::ExprKind::Repeat(ref element, ref count) => {
                 self.print_expr_repeat(element, count, attrs);
             }
-            ast::ExprKind::Struct(ref path, ref fields, ref wth) => {
-                self.print_expr_struct(path, &fields[..], wth, attrs);
+            ast::ExprKind::Struct(ref path, ref fields, ref rest) => {
+                self.print_expr_struct(path, &fields[..], rest, attrs);
             }
             ast::ExprKind::Tup(ref exprs) => {
                 self.print_expr_tup(&exprs[..], attrs);
diff --git a/compiler/rustc_attr/src/builtin.rs b/compiler/rustc_attr/src/builtin.rs
index 94e2a40..2fd625c 100644
--- a/compiler/rustc_attr/src/builtin.rs
+++ b/compiler/rustc_attr/src/builtin.rs
@@ -76,6 +76,12 @@
 }
 
 #[derive(Clone, Encodable, Decodable)]
+pub enum InstructionSetAttr {
+    ArmA32,
+    ArmT32,
+}
+
+#[derive(Clone, Encodable, Decodable)]
 pub enum OptimizeAttr {
     None,
     Speed,
@@ -148,7 +154,7 @@
 }
 
 /// The available stability levels.
-#[derive(Encodable, Decodable, PartialEq, PartialOrd, Copy, Clone, Debug, Eq, Hash)]
+#[derive(Encodable, Decodable, PartialEq, Copy, Clone, Debug, Eq, Hash)]
 #[derive(HashStable_Generic)]
 pub enum StabilityLevel {
     // Reason for the current stability level and the relevant rust-lang issue
@@ -631,19 +637,15 @@
 }
 
 /// Finds the deprecation attribute. `None` if none exists.
-pub fn find_deprecation(sess: &Session, attrs: &[Attribute], item_sp: Span) -> Option<Deprecation> {
-    find_deprecation_generic(sess, attrs.iter(), item_sp)
+pub fn find_deprecation(sess: &Session, attrs: &[Attribute]) -> Option<(Deprecation, Span)> {
+    find_deprecation_generic(sess, attrs.iter())
 }
 
-fn find_deprecation_generic<'a, I>(
-    sess: &Session,
-    attrs_iter: I,
-    item_sp: Span,
-) -> Option<Deprecation>
+fn find_deprecation_generic<'a, I>(sess: &Session, attrs_iter: I) -> Option<(Deprecation, Span)>
 where
     I: Iterator<Item = &'a Attribute>,
 {
-    let mut depr: Option<Deprecation> = None;
+    let mut depr: Option<(Deprecation, Span)> = None;
     let diagnostic = &sess.parse_sess.span_diagnostic;
 
     'outer: for attr in attrs_iter {
@@ -652,8 +654,11 @@
             continue;
         }
 
-        if depr.is_some() {
-            struct_span_err!(diagnostic, item_sp, E0550, "multiple deprecated attributes").emit();
+        if let Some((_, span)) = &depr {
+            struct_span_err!(diagnostic, attr.span, E0550, "multiple deprecated attributes")
+                .span_label(attr.span, "repeated deprecation attribute")
+                .span_label(*span, "first deprecation attribute")
+                .emit();
             break;
         }
 
@@ -774,7 +779,7 @@
         sess.mark_attr_used(&attr);
 
         let is_since_rustc_version = sess.check_name(attr, sym::rustc_deprecated);
-        depr = Some(Deprecation { since, note, suggestion, is_since_rustc_version });
+        depr = Some((Deprecation { since, note, suggestion, is_since_rustc_version }, attr.span));
     }
 
     depr
@@ -895,38 +900,36 @@
                         )
                         .emit();
                     }
-                } else {
-                    if let Some(meta_item) = item.meta_item() {
-                        if meta_item.has_name(sym::align) {
-                            if let MetaItemKind::NameValue(ref value) = meta_item.kind {
-                                recognised = true;
-                                let mut err = struct_span_err!(
-                                    diagnostic,
-                                    item.span(),
-                                    E0693,
-                                    "incorrect `repr(align)` attribute format"
-                                );
-                                match value.kind {
-                                    ast::LitKind::Int(int, ast::LitIntType::Unsuffixed) => {
-                                        err.span_suggestion(
-                                            item.span(),
-                                            "use parentheses instead",
-                                            format!("align({})", int),
-                                            Applicability::MachineApplicable,
-                                        );
-                                    }
-                                    ast::LitKind::Str(s, _) => {
-                                        err.span_suggestion(
-                                            item.span(),
-                                            "use parentheses instead",
-                                            format!("align({})", s),
-                                            Applicability::MachineApplicable,
-                                        );
-                                    }
-                                    _ => {}
+                } else if let Some(meta_item) = item.meta_item() {
+                    if meta_item.has_name(sym::align) {
+                        if let MetaItemKind::NameValue(ref value) = meta_item.kind {
+                            recognised = true;
+                            let mut err = struct_span_err!(
+                                diagnostic,
+                                item.span(),
+                                E0693,
+                                "incorrect `repr(align)` attribute format"
+                            );
+                            match value.kind {
+                                ast::LitKind::Int(int, ast::LitIntType::Unsuffixed) => {
+                                    err.span_suggestion(
+                                        item.span(),
+                                        "use parentheses instead",
+                                        format!("align({})", int),
+                                        Applicability::MachineApplicable,
+                                    );
                                 }
-                                err.emit();
+                                ast::LitKind::Str(s, _) => {
+                                    err.span_suggestion(
+                                        item.span(),
+                                        "use parentheses instead",
+                                        format!("align({})", s),
+                                        Applicability::MachineApplicable,
+                                    );
+                                }
+                                _ => {}
                             }
+                            err.emit();
                         }
                     }
                 }
@@ -1007,13 +1010,28 @@
     sess: &'a Session,
     attrs: &'a [Attribute],
 ) -> Option<impl Iterator<Item = Symbol> + 'a> {
-    let attrs = sess.filter_by_name(attrs, sym::allow_internal_unstable);
+    allow_unstable(sess, attrs, sym::allow_internal_unstable)
+}
+
+pub fn rustc_allow_const_fn_unstable<'a>(
+    sess: &'a Session,
+    attrs: &'a [Attribute],
+) -> Option<impl Iterator<Item = Symbol> + 'a> {
+    allow_unstable(sess, attrs, sym::rustc_allow_const_fn_unstable)
+}
+
+fn allow_unstable<'a>(
+    sess: &'a Session,
+    attrs: &'a [Attribute],
+    symbol: Symbol,
+) -> Option<impl Iterator<Item = Symbol> + 'a> {
+    let attrs = sess.filter_by_name(attrs, symbol);
     let list = attrs
         .filter_map(move |attr| {
             attr.meta_item_list().or_else(|| {
                 sess.diagnostic().span_err(
                     attr.span,
-                    "`allow_internal_unstable` expects a list of feature names",
+                    &format!("`{}` expects a list of feature names", symbol.to_ident_string()),
                 );
                 None
             })
@@ -1023,8 +1041,10 @@
     Some(list.into_iter().filter_map(move |it| {
         let name = it.ident().map(|ident| ident.name);
         if name.is_none() {
-            sess.diagnostic()
-                .span_err(it.span(), "`allow_internal_unstable` expects feature names");
+            sess.diagnostic().span_err(
+                it.span(),
+                &format!("`{}` expects feature names", symbol.to_ident_string()),
+            );
         }
         name
     }))
diff --git a/compiler/rustc_builtin_macros/src/asm.rs b/compiler/rustc_builtin_macros/src/asm.rs
index 0998595..36cd6c2 100644
--- a/compiler/rustc_builtin_macros/src/asm.rs
+++ b/compiler/rustc_builtin_macros/src/asm.rs
@@ -81,7 +81,7 @@
         } // accept trailing commas
 
         // Parse options
-        if p.eat(&token::Ident(sym::options, false)) {
+        if p.eat_keyword(sym::options) {
             parse_options(&mut p, &mut args)?;
             allow_templates = false;
             continue;
@@ -101,19 +101,19 @@
         };
 
         let mut explicit_reg = false;
-        let op = if p.eat(&token::Ident(kw::In, false)) {
+        let op = if p.eat_keyword(kw::In) {
             let reg = parse_reg(&mut p, &mut explicit_reg)?;
             let expr = p.parse_expr()?;
             ast::InlineAsmOperand::In { reg, expr }
-        } else if p.eat(&token::Ident(sym::out, false)) {
+        } else if p.eat_keyword(sym::out) {
             let reg = parse_reg(&mut p, &mut explicit_reg)?;
             let expr = if p.eat_keyword(kw::Underscore) { None } else { Some(p.parse_expr()?) };
             ast::InlineAsmOperand::Out { reg, expr, late: false }
-        } else if p.eat(&token::Ident(sym::lateout, false)) {
+        } else if p.eat_keyword(sym::lateout) {
             let reg = parse_reg(&mut p, &mut explicit_reg)?;
             let expr = if p.eat_keyword(kw::Underscore) { None } else { Some(p.parse_expr()?) };
             ast::InlineAsmOperand::Out { reg, expr, late: true }
-        } else if p.eat(&token::Ident(sym::inout, false)) {
+        } else if p.eat_keyword(sym::inout) {
             let reg = parse_reg(&mut p, &mut explicit_reg)?;
             let expr = p.parse_expr()?;
             if p.eat(&token::FatArrow) {
@@ -123,7 +123,7 @@
             } else {
                 ast::InlineAsmOperand::InOut { reg, expr, late: false }
             }
-        } else if p.eat(&token::Ident(sym::inlateout, false)) {
+        } else if p.eat_keyword(sym::inlateout) {
             let reg = parse_reg(&mut p, &mut explicit_reg)?;
             let expr = p.parse_expr()?;
             if p.eat(&token::FatArrow) {
@@ -133,10 +133,10 @@
             } else {
                 ast::InlineAsmOperand::InOut { reg, expr, late: true }
             }
-        } else if p.eat(&token::Ident(kw::Const, false)) {
+        } else if p.eat_keyword(kw::Const) {
             let expr = p.parse_expr()?;
             ast::InlineAsmOperand::Const { expr }
-        } else if p.eat(&token::Ident(sym::sym, false)) {
+        } else if p.eat_keyword(sym::sym) {
             let expr = p.parse_expr()?;
             match expr.kind {
                 ast::ExprKind::Path(..) => {}
@@ -164,7 +164,7 @@
             args.templates.push(template);
             continue;
         } else {
-            return Err(p.expect_one_of(&[], &[]).unwrap_err());
+            return p.unexpected();
         };
 
         allow_templates = false;
@@ -333,21 +333,22 @@
     p.expect(&token::OpenDelim(token::DelimToken::Paren))?;
 
     while !p.eat(&token::CloseDelim(token::DelimToken::Paren)) {
-        if p.eat(&token::Ident(sym::pure, false)) {
+        if p.eat_keyword(sym::pure) {
             try_set_option(p, args, sym::pure, ast::InlineAsmOptions::PURE);
-        } else if p.eat(&token::Ident(sym::nomem, false)) {
+        } else if p.eat_keyword(sym::nomem) {
             try_set_option(p, args, sym::nomem, ast::InlineAsmOptions::NOMEM);
-        } else if p.eat(&token::Ident(sym::readonly, false)) {
+        } else if p.eat_keyword(sym::readonly) {
             try_set_option(p, args, sym::readonly, ast::InlineAsmOptions::READONLY);
-        } else if p.eat(&token::Ident(sym::preserves_flags, false)) {
+        } else if p.eat_keyword(sym::preserves_flags) {
             try_set_option(p, args, sym::preserves_flags, ast::InlineAsmOptions::PRESERVES_FLAGS);
-        } else if p.eat(&token::Ident(sym::noreturn, false)) {
+        } else if p.eat_keyword(sym::noreturn) {
             try_set_option(p, args, sym::noreturn, ast::InlineAsmOptions::NORETURN);
-        } else if p.eat(&token::Ident(sym::nostack, false)) {
+        } else if p.eat_keyword(sym::nostack) {
             try_set_option(p, args, sym::nostack, ast::InlineAsmOptions::NOSTACK);
-        } else {
-            p.expect(&token::Ident(sym::att_syntax, false))?;
+        } else if p.eat_keyword(sym::att_syntax) {
             try_set_option(p, args, sym::att_syntax, ast::InlineAsmOptions::ATT_SYNTAX);
+        } else {
+            return p.unexpected();
         }
 
         // Allow trailing commas
diff --git a/compiler/rustc_builtin_macros/src/assert.rs b/compiler/rustc_builtin_macros/src/assert.rs
index 2518171..5bfd8a2 100644
--- a/compiler/rustc_builtin_macros/src/assert.rs
+++ b/compiler/rustc_builtin_macros/src/assert.rs
@@ -120,8 +120,7 @@
         };
 
     if parser.token != token::Eof {
-        parser.expect_one_of(&[], &[])?;
-        unreachable!();
+        return parser.unexpected();
     }
 
     Ok(Assert { cond_expr, custom_message })
diff --git a/compiler/rustc_builtin_macros/src/cmdline_attrs.rs b/compiler/rustc_builtin_macros/src/cmdline_attrs.rs
index 5ed8b69..747e48e 100644
--- a/compiler/rustc_builtin_macros/src/cmdline_attrs.rs
+++ b/compiler/rustc_builtin_macros/src/cmdline_attrs.rs
@@ -15,7 +15,7 @@
         );
 
         let start_span = parser.token.span;
-        let AttrItem { path, args, tokens: _ } = match parser.parse_attr_item() {
+        let AttrItem { path, args, tokens: _ } = match parser.parse_attr_item(false) {
             Ok(ai) => ai,
             Err(mut err) => {
                 err.emit();
diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
index f492499..0642edf 100644
--- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
@@ -358,7 +358,7 @@
             visit::walk_ty(self, ty)
         }
 
-        fn visit_mac(&mut self, mac: &ast::MacCall) {
+        fn visit_mac_call(&mut self, mac: &ast::MacCall) {
             self.cx.span_err(mac.span(), "`derive` cannot be used on items with type macros");
         }
     }
@@ -1137,12 +1137,9 @@
     /// for each of the self-args, carried in precomputed variables.
 
     /// ```{.text}
-    /// let __self0_vi = unsafe {
-    ///     std::intrinsics::discriminant_value(&self) };
-    /// let __self1_vi = unsafe {
-    ///     std::intrinsics::discriminant_value(&arg1) };
-    /// let __self2_vi = unsafe {
-    ///     std::intrinsics::discriminant_value(&arg2) };
+    /// let __self0_vi = std::intrinsics::discriminant_value(&self);
+    /// let __self1_vi = std::intrinsics::discriminant_value(&arg1);
+    /// let __self2_vi = std::intrinsics::discriminant_value(&arg2);
     ///
     /// if __self0_vi == __self1_vi && __self0_vi == __self2_vi && ... {
     ///     match (...) {
@@ -1325,7 +1322,7 @@
                 // Since we know that all the arguments will match if we reach
                 // the match expression we add the unreachable intrinsics as the
                 // result of the catch all which should help llvm in optimizing it
-                Some(deriving::call_intrinsic(cx, sp, sym::unreachable, vec![]))
+                Some(deriving::call_unreachable(cx, sp))
             }
             _ => None,
         };
@@ -1356,12 +1353,9 @@
             // with three Self args, builds three statements:
             //
             // ```
-            // let __self0_vi = unsafe {
-            //     std::intrinsics::discriminant_value(&self) };
-            // let __self1_vi = unsafe {
-            //     std::intrinsics::discriminant_value(&arg1) };
-            // let __self2_vi = unsafe {
-            //     std::intrinsics::discriminant_value(&arg2) };
+            // let __self0_vi = std::intrinsics::discriminant_value(&self);
+            // let __self1_vi = std::intrinsics::discriminant_value(&arg1);
+            // let __self2_vi = std::intrinsics::discriminant_value(&arg2);
             // ```
             let mut index_let_stmts: Vec<ast::Stmt> = Vec::with_capacity(vi_idents.len() + 1);
 
@@ -1474,7 +1468,7 @@
             // derive Debug on such a type could here generate code
             // that needs the feature gate enabled.)
 
-            deriving::call_intrinsic(cx, sp, sym::unreachable, vec![])
+            deriving::call_unreachable(cx, sp)
         } else {
             // Final wrinkle: the self_args are expressions that deref
             // down to desired places, but we cannot actually deref
diff --git a/compiler/rustc_builtin_macros/src/deriving/mod.rs b/compiler/rustc_builtin_macros/src/deriving/mod.rs
index 9c8e0fc..bf95093 100644
--- a/compiler/rustc_builtin_macros/src/deriving/mod.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/mod.rs
@@ -68,7 +68,14 @@
 ) -> P<ast::Expr> {
     let span = cx.with_def_site_ctxt(span);
     let path = cx.std_path(&[sym::intrinsics, intrinsic]);
-    let call = cx.expr_call_global(span, path, args);
+    cx.expr_call_global(span, path, args)
+}
+
+/// Constructs an expression that calls the `unreachable` intrinsic.
+fn call_unreachable(cx: &ExtCtxt<'_>, span: Span) -> P<ast::Expr> {
+    let span = cx.with_def_site_ctxt(span);
+    let path = cx.std_path(&[sym::intrinsics, sym::unreachable]);
+    let call = cx.expr_call_global(span, path, vec![]);
 
     cx.expr_block(P(ast::Block {
         stmts: vec![cx.stmt_expr(call)],
diff --git a/compiler/rustc_builtin_macros/src/format_foreign.rs b/compiler/rustc_builtin_macros/src/format_foreign.rs
index ff81b5e..b69b00d 100644
--- a/compiler/rustc_builtin_macros/src/format_foreign.rs
+++ b/compiler/rustc_builtin_macros/src/format_foreign.rs
@@ -385,7 +385,7 @@
         if let Start = state {
             match c {
                 '1'..='9' => {
-                    let end = at_next_cp_while(next, is_digit);
+                    let end = at_next_cp_while(next, char::is_ascii_digit);
                     match end.next_cp() {
                         // Yes, this *is* the parameter.
                         Some(('$', end2)) => {
@@ -427,7 +427,7 @@
                     move_to!(next);
                 }
                 '1'..='9' => {
-                    let end = at_next_cp_while(next, is_digit);
+                    let end = at_next_cp_while(next, char::is_ascii_digit);
                     state = Prec;
                     width = Some(Num::from_str(at.slice_between(end).unwrap(), None));
                     move_to!(end);
@@ -441,7 +441,7 @@
         }
 
         if let WidthArg = state {
-            let end = at_next_cp_while(at, is_digit);
+            let end = at_next_cp_while(at, char::is_ascii_digit);
             match end.next_cp() {
                 Some(('$', end2)) => {
                     state = Prec;
@@ -473,7 +473,7 @@
         if let PrecInner = state {
             match c {
                 '*' => {
-                    let end = at_next_cp_while(next, is_digit);
+                    let end = at_next_cp_while(next, char::is_ascii_digit);
                     match end.next_cp() {
                         Some(('$', end2)) => {
                             state = Length;
@@ -488,7 +488,7 @@
                     }
                 }
                 '0'..='9' => {
-                    let end = at_next_cp_while(next, is_digit);
+                    let end = at_next_cp_while(next, char::is_ascii_digit);
                     state = Length;
                     precision = Some(Num::from_str(at.slice_between(end).unwrap(), None));
                     move_to!(end);
@@ -563,12 +563,12 @@
 
     fn at_next_cp_while<F>(mut cur: Cur<'_>, mut pred: F) -> Cur<'_>
     where
-        F: FnMut(char) -> bool,
+        F: FnMut(&char) -> bool,
     {
         loop {
             match cur.next_cp() {
                 Some((c, next)) => {
-                    if pred(c) {
+                    if pred(&c) {
                         cur = next;
                     } else {
                         return cur;
@@ -579,14 +579,7 @@
         }
     }
 
-    fn is_digit(c: char) -> bool {
-        match c {
-            '0'..='9' => true,
-            _ => false,
-        }
-    }
-
-    fn is_flag(c: char) -> bool {
+    fn is_flag(c: &char) -> bool {
         match c {
             '0' | '-' | '+' | ' ' | '#' | '\'' => true,
             _ => false,
@@ -723,17 +716,11 @@
     }
 
     fn is_ident_head(c: char) -> bool {
-        match c {
-            'a'..='z' | 'A'..='Z' | '_' => true,
-            _ => false,
-        }
+        c.is_ascii_alphabetic() || c == '_'
     }
 
     fn is_ident_tail(c: char) -> bool {
-        match c {
-            '0'..='9' => true,
-            c => is_ident_head(c),
-        }
+        c.is_ascii_alphanumeric() || c == '_'
     }
 
     #[cfg(test)]
diff --git a/compiler/rustc_builtin_macros/src/proc_macro_harness.rs b/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
index c6ab3fa..4e91436 100644
--- a/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
+++ b/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
@@ -344,10 +344,6 @@
         visit::walk_item(self, item);
         self.in_root = prev_in_root;
     }
-
-    fn visit_mac(&mut self, mac: &'a ast::MacCall) {
-        visit::walk_mac(self, mac)
-    }
 }
 
 // Creates a new module which looks like:
diff --git a/compiler/rustc_builtin_macros/src/source_util.rs b/compiler/rustc_builtin_macros/src/source_util.rs
index 7075320..f76bbd8 100644
--- a/compiler/rustc_builtin_macros/src/source_util.rs
+++ b/compiler/rustc_builtin_macros/src/source_util.rs
@@ -13,8 +13,6 @@
 use smallvec::SmallVec;
 use std::rc::Rc;
 
-use rustc_data_structures::sync::Lrc;
-
 // These macros all relate to the file system; they either return
 // the column/row/filename of the expression, or they include
 // a given file into the current one.
@@ -216,7 +214,7 @@
         }
     };
     match cx.source_map().load_binary_file(&file) {
-        Ok(bytes) => base::MacEager::expr(cx.expr_lit(sp, ast::LitKind::ByteStr(Lrc::new(bytes)))),
+        Ok(bytes) => base::MacEager::expr(cx.expr_lit(sp, ast::LitKind::ByteStr(bytes.into()))),
         Err(e) => {
             cx.span_err(sp, &format!("couldn't read {}: {}", file.display(), e));
             DummyResult::any(sp)
diff --git a/compiler/rustc_builtin_macros/src/test_harness.rs b/compiler/rustc_builtin_macros/src/test_harness.rs
index 0a60ca8..9976140 100644
--- a/compiler/rustc_builtin_macros/src/test_harness.rs
+++ b/compiler/rustc_builtin_macros/src/test_harness.rs
@@ -37,7 +37,7 @@
 pub fn inject(sess: &Session, resolver: &mut dyn ResolverExpand, krate: &mut ast::Crate) {
     let span_diagnostic = sess.diagnostic();
     let panic_strategy = sess.panic_strategy();
-    let platform_panic_strategy = sess.target.target.options.panic_strategy;
+    let platform_panic_strategy = sess.target.panic_strategy;
 
     // Check for #![reexport_test_harness_main = "some_name"] which gives the
     // main test function the name `some_name` without hygiene. This needs to be
@@ -130,10 +130,6 @@
         }
         smallvec![P(item)]
     }
-
-    fn visit_mac(&mut self, _mac: &mut ast::MacCall) {
-        // Do nothing.
-    }
 }
 
 // Beware, this is duplicated in librustc_passes/entry.rs (with
@@ -201,10 +197,6 @@
 
         smallvec![item]
     }
-
-    fn visit_mac(&mut self, _mac: &mut ast::MacCall) {
-        // Do nothing.
-    }
 }
 
 /// Crawl over the crate, inserting test reexports and the test main function
@@ -290,7 +282,7 @@
     let mut test_runner = cx
         .test_runner
         .clone()
-        .unwrap_or(ecx.path(sp, vec![test_id, Ident::from_str_and_span(runner_name, sp)]));
+        .unwrap_or_else(|| ecx.path(sp, vec![test_id, Ident::from_str_and_span(runner_name, sp)]));
 
     test_runner.span = sp;
 
diff --git a/compiler/rustc_codegen_cranelift/.github/workflows/bootstrap_rustc.yml b/compiler/rustc_codegen_cranelift/.github/workflows/bootstrap_rustc.yml
new file mode 100644
index 0000000..8c94a0a
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.github/workflows/bootstrap_rustc.yml
@@ -0,0 +1,44 @@
+name: Bootstrap rustc using cg_clif
+
+on:
+  - push
+
+jobs:
+  bootstrap_rustc:
+    runs-on: ubuntu-latest
+
+    steps:
+    - uses: actions/checkout@v2
+
+    - name: Cache cargo installed crates
+      uses: actions/cache@v2
+      with:
+        path: ~/.cargo/bin
+        key: ${{ runner.os }}-cargo-installed-crates
+
+    - name: Cache cargo registry and index
+      uses: actions/cache@v2
+      with:
+        path: |
+            ~/.cargo/registry
+            ~/.cargo/git
+        key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+    - name: Cache cargo target dir
+      uses: actions/cache@v2
+      with:
+        path: target
+        key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+    - name: Prepare dependencies
+      run: |
+        git config --global user.email "[email protected]"
+        git config --global user.name "User"
+        ./prepare.sh
+
+    - name: Test
+      run: |
+        # Enable backtraces for easier debugging
+        export RUST_BACKTRACE=1
+
+        ./scripts/test_bootstrap.sh
diff --git a/compiler/rustc_codegen_cranelift/.github/workflows/main.yml b/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
new file mode 100644
index 0000000..e6d3375
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
@@ -0,0 +1,63 @@
+name: CI
+
+on:
+  - push
+  - pull_request
+
+jobs:
+  build:
+    runs-on: ${{ matrix.os }}
+
+    strategy:
+      fail-fast: false
+      matrix:
+        os: [ubuntu-latest, macos-latest]
+
+    steps:
+    - uses: actions/checkout@v2
+
+    - name: Cache cargo installed crates
+      uses: actions/cache@v2
+      with:
+        path: ~/.cargo/bin
+        key: ${{ runner.os }}-cargo-installed-crates
+
+    - name: Cache cargo registry and index
+      uses: actions/cache@v2
+      with:
+        path: |
+            ~/.cargo/registry
+            ~/.cargo/git
+        key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+    - name: Cache cargo target dir
+      uses: actions/cache@v2
+      with:
+        path: target
+        key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+    - name: Prepare dependencies
+      run: |
+        git config --global user.email "[email protected]"
+        git config --global user.name "User"
+        ./prepare.sh
+
+    - name: Test
+      run: |
+        # Enable backtraces for easier debugging
+        export RUST_BACKTRACE=1
+
+        # Reduce amount of benchmark runs as they are slow
+        export COMPILE_RUNS=2
+        export RUN_RUNS=2
+
+        ./test.sh
+
+    - name: Package prebuilt cg_clif
+      run: tar cvfJ cg_clif.tar.xz build
+
+    - name: Upload prebuilt cg_clif
+      uses: actions/upload-artifact@v2
+      with:
+        name: cg_clif-${{ runner.os }}
+        path: cg_clif.tar.xz
diff --git a/compiler/rustc_codegen_cranelift/.vscode/settings.json b/compiler/rustc_codegen_cranelift/.vscode/settings.json
new file mode 100644
index 0000000..04ab508
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.vscode/settings.json
@@ -0,0 +1,53 @@
+{
+    // source for rustc_* is not included in the rust-src component; disable the errors about this
+    "rust-analyzer.diagnostics.disabled": ["unresolved-extern-crate"],
+    "rust-analyzer.cargo.loadOutDirsFromCheck": true,
+    "rust-analyzer.linkedProjects": [
+        "./Cargo.toml",
+        //"./build_sysroot/sysroot_src/src/libstd/Cargo.toml",
+        {
+            "roots": [
+                "./example/mini_core.rs",
+                "./example/mini_core_hello_world.rs",
+                "./example/mod_bench.rs"
+            ],
+            "crates": [
+                {
+                    "root_module": "./example/mini_core.rs",
+                    "edition": "2018",
+                    "deps": [],
+                    "cfg": [],
+                },
+                {
+                    "root_module": "./example/mini_core_hello_world.rs",
+                    "edition": "2018",
+                    "deps": [{ "crate": 0, "name": "mini_core" }],
+                    "cfg": [],
+                },
+                {
+                    "root_module": "./example/mod_bench.rs",
+                    "edition": "2018",
+                    "deps": [],
+                    "cfg": [],
+                },
+            ]
+        },
+        {
+            "roots": ["./scripts/filter_profile.rs"],
+            "crates": [
+                {
+                    "root_module": "./scripts/filter_profile.rs",
+                    "edition": "2018",
+                    "deps": [{ "crate": 1, "name": "std" }],
+                    "cfg": [],
+                },
+                {
+                    "root_module": "./build_sysroot/sysroot_src/library/std/src/lib.rs",
+                    "edition": "2018",
+                    "deps": [],
+                    "cfg": [],
+                },
+            ]
+        }
+    ]
+}
diff --git a/compiler/rustc_codegen_cranelift/Cargo.lock b/compiler/rustc_codegen_cranelift/Cargo.lock
new file mode 100644
index 0000000..2889fac
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/Cargo.lock
@@ -0,0 +1,425 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "anyhow"
+version = "1.0.33"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1fd36ffbb1fb7c834eac128ea8d0e310c5aeb635548f9d58861e1308d46e71c"
+
+[[package]]
+name = "ar"
+version = "0.8.0"
+source = "git+https://github.com/bjorn3/rust-ar.git?branch=do_not_remove_cg_clif_ranlib#de9ab0e56bf3a208381d342aa5b60f9ff2891648"
+
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "bitflags"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+
+[[package]]
+name = "byteorder"
+version = "1.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
+
+[[package]]
+name = "cc"
+version = "1.0.61"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+
+[[package]]
+name = "cranelift-bforest"
+version = "0.67.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
+dependencies = [
+ "cranelift-entity",
+]
+
+[[package]]
+name = "cranelift-codegen"
+version = "0.67.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
+dependencies = [
+ "byteorder",
+ "cranelift-bforest",
+ "cranelift-codegen-meta",
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+ "gimli",
+ "log",
+ "regalloc",
+ "smallvec",
+ "target-lexicon",
+ "thiserror",
+]
+
+[[package]]
+name = "cranelift-codegen-meta"
+version = "0.67.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
+dependencies = [
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+]
+
+[[package]]
+name = "cranelift-codegen-shared"
+version = "0.67.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
+
+[[package]]
+name = "cranelift-entity"
+version = "0.67.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
+
+[[package]]
+name = "cranelift-frontend"
+version = "0.67.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
+dependencies = [
+ "cranelift-codegen",
+ "log",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-module"
+version = "0.67.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-entity",
+ "log",
+ "thiserror",
+]
+
+[[package]]
+name = "cranelift-native"
+version = "0.67.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
+dependencies = [
+ "cranelift-codegen",
+ "raw-cpuid",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-object"
+version = "0.67.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-module",
+ "log",
+ "object",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-simplejit"
+version = "0.67.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
+dependencies = [
+ "cranelift-codegen",
+ "cranelift-entity",
+ "cranelift-module",
+ "cranelift-native",
+ "errno",
+ "libc",
+ "log",
+ "region",
+ "target-lexicon",
+ "winapi",
+]
+
+[[package]]
+name = "crc32fast"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "errno"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6eab5ee3df98a279d9b316b1af6ac95422127b1290317e6d18c1743c99418b01"
+dependencies = [
+ "errno-dragonfly",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "errno-dragonfly"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067"
+dependencies = [
+ "gcc",
+ "libc",
+]
+
+[[package]]
+name = "gcc"
+version = "0.3.55"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
+
+[[package]]
+name = "gimli"
+version = "0.22.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724"
+dependencies = [
+ "indexmap",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
+
+[[package]]
+name = "indexmap"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2"
+dependencies = [
+ "autocfg",
+ "hashbrown",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.79"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743"
+
+[[package]]
+name = "libloading"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3557c9384f7f757f6d139cd3a4c62ef4e850696c16bf27924a5538c8a09717a1"
+dependencies = [
+ "cfg-if",
+ "winapi",
+]
+
+[[package]]
+name = "log"
+version = "0.4.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "mach"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "object"
+version = "0.21.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37fd5004feb2ce328a52b0b3d01dbf4ffff72583493900ed15f22d4111c51693"
+dependencies = [
+ "crc32fast",
+ "indexmap",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
+dependencies = [
+ "unicode-xid",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "raw-cpuid"
+version = "7.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4a349ca83373cfa5d6dbb66fd76e58b2cca08da71a5f6400de0a0a6a9bceeaf"
+dependencies = [
+ "bitflags",
+ "cc",
+ "rustc_version",
+]
+
+[[package]]
+name = "regalloc"
+version = "0.0.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5"
+dependencies = [
+ "log",
+ "rustc-hash",
+ "smallvec",
+]
+
+[[package]]
+name = "region"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0"
+dependencies = [
+ "bitflags",
+ "libc",
+ "mach",
+ "winapi",
+]
+
+[[package]]
+name = "rustc-hash"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
+
+[[package]]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+dependencies = [
+ "ar",
+ "cranelift-codegen",
+ "cranelift-frontend",
+ "cranelift-module",
+ "cranelift-object",
+ "cranelift-simplejit",
+ "gimli",
+ "indexmap",
+ "libloading",
+ "object",
+ "target-lexicon",
+]
+
+[[package]]
+name = "rustc_version"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
+dependencies = [
+ "semver",
+]
+
+[[package]]
+name = "semver"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
+dependencies = [
+ "semver-parser",
+]
+
+[[package]]
+name = "semver-parser"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
+
+[[package]]
+name = "smallvec"
+version = "1.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252"
+
+[[package]]
+name = "syn"
+version = "1.0.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e03e57e4fcbfe7749842d53e24ccb9aa12b7252dbe5e91d2acad31834c8b8fdd"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
+]
+
+[[package]]
+name = "target-lexicon"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fe2635952a442a01fd4cb53d98858b5e4bb461b02c0d111f22f31772e3e7a8b2"
+
+[[package]]
+name = "thiserror"
+version = "1.0.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "318234ffa22e0920fe9a40d7b8369b5f649d490980cf7aadcf1eb91594869b42"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cae2447b6282786c3493999f40a9be2a6ad20cb8bd268b0a0dbf5a065535c0ab"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/compiler/rustc_codegen_cranelift/Cargo.toml b/compiler/rustc_codegen_cranelift/Cargo.toml
new file mode 100644
index 0000000..1c8e350
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/Cargo.toml
@@ -0,0 +1,76 @@
+[package]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+authors = ["bjorn3 <[email protected]>"]
+edition = "2018"
+
+[lib]
+crate-type = ["dylib"]
+
+[dependencies]
+# These have to be in sync with each other
+cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", features = ["unwind"] }
+cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
+cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
+cranelift-simplejit = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", optional = true }
+cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
+target-lexicon = "0.11.0"
+gimli = { version = "0.22.0", default-features = false, features = ["write"]}
+object = { version = "0.21.1", default-features = false, features = ["std", "read_core", "write", "coff", "elf", "macho", "pe"] }
+
+ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
+indexmap = "1.0.2"
+libloading = { version = "0.6.0", optional = true }
+
+# Uncomment to use local checkout of cranelift
+#[patch."https://github.com/bytecodealliance/wasmtime/"]
+#cranelift-codegen = { path = "../wasmtime/cranelift/codegen" }
+#cranelift-frontend = { path = "../wasmtime/cranelift/frontend" }
+#cranelift-module = { path = "../wasmtime/cranelift/module" }
+#cranelift-simplejit = { path = "../wasmtime/cranelift/simplejit" }
+#cranelift-object = { path = "../wasmtime/cranelift/object" }
+
+#[patch.crates-io]
+#gimli = { path = "../" }
+
+[features]
+default = ["jit", "inline_asm"]
+jit = ["cranelift-simplejit", "libloading"]
+inline_asm = []
+
+[profile.dev]
+# By compiling dependencies with optimizations, performing tests gets much faster.
+opt-level = 3
+
+[profile.dev.package.rustc_codegen_cranelift]
+# Disabling optimizations for cg_clif itself makes compilation after a change faster.
+opt-level = 0
+
+[profile.release.package.rustc_codegen_cranelift]
+incremental = true
+
+# Disable optimizations and debuginfo of build scripts and some of the heavy build deps, as the
+# execution time of build scripts is so fast that optimizing them slows down the total build time.
+[profile.dev.build-override]
+opt-level = 0
+debug = false
+
+[profile.release.build-override]
+opt-level = 0
+debug = false
+
+[profile.dev.package.cranelift-codegen-meta]
+opt-level = 0
+debug = false
+
+[profile.release.package.cranelift-codegen-meta]
+opt-level = 0
+debug = false
+
+[profile.dev.package.syn]
+opt-level = 0
+debug = false
+
+[profile.release.package.syn]
+opt-level = 0
+debug = false
diff --git a/compiler/rustc_codegen_cranelift/LICENSE-APACHE b/compiler/rustc_codegen_cranelift/LICENSE-APACHE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/compiler/rustc_codegen_cranelift/LICENSE-MIT b/compiler/rustc_codegen_cranelift/LICENSE-MIT
new file mode 100644
index 0000000..31aa793
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/compiler/rustc_codegen_cranelift/Readme.md b/compiler/rustc_codegen_cranelift/Readme.md
new file mode 100644
index 0000000..f8a5e13
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/Readme.md
@@ -0,0 +1,103 @@
+# WIP Cranelift codegen backend for rust
+
+> ⚠⚠⚠ Certain kinds of FFI don't work yet. ⚠⚠⚠
+
+The goal of this project is to create an alternative codegen backend for the rust compiler based on [Cranelift](https://github.com/bytecodealliance/wasmtime/blob/master/cranelift).
+This has the potential to improve compilation times in debug mode.
+If your project doesn't use any of the things listed under "Not yet supported", it should work fine.
+If not please open an issue.
+
+## Building and testing
+
+```bash
+$ git clone https://github.com/bjorn3/rustc_codegen_cranelift.git
+$ cd rustc_codegen_cranelift
+$ ./prepare.sh # download and patch sysroot src and install hyperfine for benchmarking
+$ ./build.sh
+```
+
+To run the test suite replace the last command with:
+
+```bash
+$ ./test.sh
+```
+
+This will implicitly build cg_clif too. Both `build.sh` and `test.sh` accept a `--debug` argument to
+build in debug mode.
+
+Alternatively you can download a pre built version from [GHA]. It is listed in the artifacts section
+of workflow runs. Unfortunately due to GHA restrictions you need to be logged in to access it.
+
+[GHA]: https://github.com/bjorn3/rustc_codegen_cranelift/actions?query=branch%3Amaster+event%3Apush+is%3Asuccess
+
+## Usage
+
+rustc_codegen_cranelift can be used as a near-drop-in replacement for `cargo build` or `cargo run` for existing projects.
+
+Assuming `$cg_clif_dir` is the directory you cloned this repo into and you followed the instructions (`prepare.sh` and `build.sh` or `test.sh`).
+
+### Cargo
+
+In the directory with your project (where you can do the usual `cargo build`), run:
+
+```bash
+$ $cg_clif_dir/build/cargo.sh run
+```
+
+This should build and run your project with rustc_codegen_cranelift instead of the usual LLVM backend.
+
+### Rustc
+
+> You should prefer using the Cargo method.
+
+```bash
+$ $cg_clif_dir/build/cg_clif my_crate.rs
+```
+
+### Jit mode
+
+In jit mode cg_clif will immediately execute your code without creating an executable file.
+
+> This requires all dependencies to be available as dynamic library.
+> The jit mode will probably need cargo integration to make this possible.
+
+```bash
+$ $cg_clif_dir/build/cargo.sh jit
+```
+
+or
+
+```bash
+$ $cg_clif_dir/build/cg_clif --jit my_crate.rs
+```
+
+### Shell
+
+These are a few functions that allow you to easily run rust code from the shell using cg_clif as jit.
+
+```bash
+function jit_naked() {
+    echo "$@" | $cg_clif_dir/build/cg_clif - --jit
+}
+
+function jit() {
+    jit_naked "fn main() { $@ }"
+}
+
+function jit_calc() {
+    jit 'println!("0x{:x}", ' $@ ');';
+}
+```
+
+## Env vars
+
+[see env_vars.md](docs/env_vars.md)
+
+## Not yet supported
+
+* Good non-rust abi support ([several problems](https://github.com/bjorn3/rustc_codegen_cranelift/issues/10))
+* Inline assembly ([no cranelift support](https://github.com/bytecodealliance/wasmtime/issues/1041)
+    * On Linux there is support for invoking an external assembler for `global_asm!` and `asm!`.
+      `llvm_asm!` will remain unimplemented forever. `asm!` doesn't yet support reg classes. You
+      have to specify specific registers instead.
+* SIMD ([tracked here](https://github.com/bjorn3/rustc_codegen_cranelift/issues/171), some basic things work)
diff --git a/compiler/rustc_codegen_cranelift/build.sh b/compiler/rustc_codegen_cranelift/build.sh
new file mode 100755
index 0000000..f9a87e6
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+set -e
+
+# Settings
+export CHANNEL="release"
+build_sysroot=1
+target_dir='build'
+while [[ $# != 0 ]]; do
+    case $1 in
+        "--debug")
+            export CHANNEL="debug"
+            ;;
+        "--without-sysroot")
+            build_sysroot=0
+            ;;
+        "--target-dir")
+            target_dir=$2
+            shift
+            ;;
+        *)
+            echo "Unknown flag '$1'"
+            echo "Usage: ./build.sh [--debug] [--without-sysroot] [--target-dir DIR]"
+            ;;
+    esac
+    shift
+done
+
+# Build cg_clif
+export RUSTFLAGS="-Zrun_dsymutil=no"
+if [[ "$CHANNEL" == "release" ]]; then
+    cargo build --release
+else
+    cargo build
+fi
+
+rm -rf $target_dir
+mkdir $target_dir
+cp -a target/$CHANNEL/cg_clif{,_build_sysroot} target/$CHANNEL/*rustc_codegen_cranelift* $target_dir/
+cp -a rust-toolchain scripts/config.sh scripts/cargo.sh $target_dir
+
+if [[ "$build_sysroot" == "1" ]]; then
+    echo "[BUILD] sysroot"
+    export CG_CLIF_INCR_CACHE_DISABLED=1
+    dir=$(pwd)
+    cd $target_dir
+    time $dir/build_sysroot/build_sysroot.sh
+fi
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
new file mode 100644
index 0000000..03ba5b5
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
@@ -0,0 +1,324 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "addr2line"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072"
+dependencies = [
+ "compiler_builtins",
+ "gimli",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "adler"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "alloc"
+version = "0.0.0"
+dependencies = [
+ "compiler_builtins",
+ "core",
+]
+
+[[package]]
+name = "alloc_system"
+version = "0.0.0"
+dependencies = [
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "cc"
+version = "1.0.61"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "compiler_builtins"
+version = "0.1.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7cd0782e0a7da7598164153173e5a5d4d9b1da094473c98dce0ff91406112369"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "core"
+version = "0.0.0"
+
+[[package]]
+name = "dlmalloc"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "35055b1021724f4eb5262eb49130eebff23fc59fc5a14160e05faad8eeb36673"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "fortanix-sgx-abi"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c56c422ef86062869b2d57ae87270608dc5929969dd130a6e248979cf4fb6ca6"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+ "unicode-width",
+]
+
+[[package]]
+name = "gimli"
+version = "0.22.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.80"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "miniz_oxide"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
+dependencies = [
+ "adler",
+ "autocfg",
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "object"
+version = "0.20.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "panic_abort"
+version = "0.0.0"
+dependencies = [
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "panic_unwind"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+ "unwind",
+]
+
+[[package]]
+name = "proc_macro"
+version = "0.0.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "rustc-std-workspace-alloc"
+version = "1.99.0"
+dependencies = [
+ "alloc",
+]
+
+[[package]]
+name = "rustc-std-workspace-core"
+version = "1.99.0"
+dependencies = [
+ "core",
+]
+
+[[package]]
+name = "rustc-std-workspace-std"
+version = "1.99.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "std"
+version = "0.0.0"
+dependencies = [
+ "addr2line",
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "dlmalloc",
+ "fortanix-sgx-abi",
+ "hashbrown",
+ "hermit-abi",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "panic_abort",
+ "panic_unwind",
+ "rustc-demangle",
+ "unwind",
+ "wasi",
+]
+
+[[package]]
+name = "sysroot"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "alloc_system",
+ "compiler_builtins",
+ "core",
+ "std",
+ "test",
+]
+
+[[package]]
+name = "term"
+version = "0.0.0"
+dependencies = [
+ "core",
+ "std",
+]
+
+[[package]]
+name = "test"
+version = "0.0.0"
+dependencies = [
+ "cfg-if",
+ "core",
+ "getopts",
+ "libc",
+ "panic_abort",
+ "panic_unwind",
+ "proc_macro",
+ "std",
+ "term",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+]
+
+[[package]]
+name = "unwind"
+version = "0.0.0"
+dependencies = [
+ "cc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "wasi"
+version = "0.9.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml
new file mode 100644
index 0000000..e562ded
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml
@@ -0,0 +1,26 @@
+[package]
+authors = ["bjorn3 <[email protected]>"]
+name = "sysroot"
+version = "0.0.0"
+
+[dependencies]
+core = { path = "./sysroot_src/library/core" }
+compiler_builtins = "0.1"
+alloc = { path = "./sysroot_src/library/alloc" }
+std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] }
+test = { path = "./sysroot_src/library/test" }
+
+alloc_system = { path = "./alloc_system" }
+
+[patch.crates-io]
+rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" }
+rustc-std-workspace-alloc = { path = "./sysroot_src/library/rustc-std-workspace-alloc" }
+rustc-std-workspace-std = { path = "./sysroot_src/library/rustc-std-workspace-std" }
+
+[profile.dev]
+lto = "off"
+
+[profile.release]
+debug = true
+incremental = true
+lto = "off"
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/alloc_system/Cargo.toml b/compiler/rustc_codegen_cranelift/build_sysroot/alloc_system/Cargo.toml
new file mode 100644
index 0000000..9fffca8
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/alloc_system/Cargo.toml
@@ -0,0 +1,13 @@
+[package]
+authors = ["The Rust Project Developers", "bjorn3 (edited to be usable outside the rust source)"]
+name = "alloc_system"
+version = "0.0.0"
+[lib]
+name = "alloc_system"
+path = "lib.rs"
+test = false
+doc = false
+[dependencies]
+core = { path = "../sysroot_src/library/core" }
+libc = { version = "0.2.43", features = ['rustc-dep-of-std'], default-features = false }
+compiler_builtins = "0.1"
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/alloc_system/lib.rs b/compiler/rustc_codegen_cranelift/build_sysroot/alloc_system/lib.rs
new file mode 100644
index 0000000..ca145e4
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/alloc_system/lib.rs
@@ -0,0 +1,342 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![no_std]
+#![allow(unused_attributes)]
+#![unstable(feature = "alloc_system",
+            reason = "this library is unlikely to be stabilized in its current \
+                      form or name",
+            issue = "32838")]
+#![feature(allocator_api)]
+#![feature(core_intrinsics)]
+#![feature(nll)]
+#![feature(staged_api)]
+#![feature(rustc_attrs)]
+#![feature(alloc_layout_extra)]
+#![cfg_attr(
+    all(target_arch = "wasm32", not(target_os = "emscripten")),
+    feature(integer_atomics, stdsimd)
+)]
+#![cfg_attr(any(unix, target_os = "cloudabi", target_os = "redox"), feature(libc))]
+// The minimum alignment guaranteed by the architecture. This value is used to
+// add fast paths for low alignment values.
+#[cfg(all(any(target_arch = "x86",
+              target_arch = "arm",
+              target_arch = "mips",
+              target_arch = "powerpc",
+              target_arch = "powerpc64",
+              target_arch = "asmjs",
+              target_arch = "wasm32")))]
+#[allow(dead_code)]
+const MIN_ALIGN: usize = 8;
+#[cfg(all(any(target_arch = "x86_64",
+              target_arch = "aarch64",
+              target_arch = "mips64",
+              target_arch = "s390x",
+              target_arch = "sparc64")))]
+#[allow(dead_code)]
+const MIN_ALIGN: usize = 16;
+
+/// The default memory allocator provided by the operating system.
+///
+/// This is based on `malloc` on Unix platforms and `HeapAlloc` on Windows,
+/// plus related functions.
+///
+/// This type can be used in a `static` item
+/// with the `#[global_allocator]` attribute
+/// to force the global allocator to be the system’s one.
+/// (The default is jemalloc for executables, on some platforms.)
+///
+/// ```rust
+/// use std::alloc::System;
+///
+/// #[global_allocator]
+/// static A: System = System;
+///
+/// fn main() {
+///     let a = Box::new(4); // Allocates from the system allocator.
+///     println!("{}", a);
+/// }
+/// ```
+///
+/// It can also be used directly to allocate memory
+/// independently of the standard library’s global allocator.
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+pub struct System;
+#[cfg(any(windows, unix, target_os = "cloudabi", target_os = "redox"))]
+mod realloc_fallback {
+    use core::alloc::{GlobalAlloc, Layout};
+    use core::cmp;
+    use core::ptr;
+    impl super::System {
+        pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout,
+                                              new_size: usize) -> *mut u8 {
+            // Docs for GlobalAlloc::realloc require this to be valid:
+            let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
+            let new_ptr = GlobalAlloc::alloc(self, new_layout);
+            if !new_ptr.is_null() {
+                let size = cmp::min(old_layout.size(), new_size);
+                ptr::copy_nonoverlapping(ptr, new_ptr, size);
+                GlobalAlloc::dealloc(self, ptr, old_layout);
+            }
+            new_ptr
+        }
+    }
+}
+#[cfg(any(unix, target_os = "cloudabi", target_os = "redox"))]
+mod platform {
+    extern crate libc;
+    use core::ptr;
+    use MIN_ALIGN;
+    use System;
+    use core::alloc::{GlobalAlloc, Layout};
+    #[stable(feature = "alloc_system_type", since = "1.28.0")]
+    unsafe impl GlobalAlloc for System {
+        #[inline]
+        unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+            if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+                libc::malloc(layout.size()) as *mut u8
+            } else {
+                #[cfg(target_os = "macos")]
+                {
+                    if layout.align() > (1 << 31) {
+                        return ptr::null_mut()
+                    }
+                }
+                aligned_malloc(&layout)
+            }
+        }
+        #[inline]
+        unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+            if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+                libc::calloc(layout.size(), 1) as *mut u8
+            } else {
+                let ptr = self.alloc(layout.clone());
+                if !ptr.is_null() {
+                    ptr::write_bytes(ptr, 0, layout.size());
+                }
+                ptr
+            }
+        }
+        #[inline]
+        unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+            libc::free(ptr as *mut libc::c_void)
+        }
+        #[inline]
+        unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+            if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
+                libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
+            } else {
+                self.realloc_fallback(ptr, layout, new_size)
+            }
+        }
+    }
+    #[cfg(any(target_os = "android",
+              target_os = "hermit",
+              target_os = "redox",
+              target_os = "solaris"))]
+    #[inline]
+    unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+        // On android we currently target API level 9 which unfortunately
+        // doesn't have the `posix_memalign` API used below. Instead we use
+        // `memalign`, but this unfortunately has the property on some systems
+        // where the memory returned cannot be deallocated by `free`!
+        //
+        // Upon closer inspection, however, this appears to work just fine with
+        // Android, so for this platform we should be fine to call `memalign`
+        // (which is present in API level 9). Some helpful references could
+        // possibly be chromium using memalign [1], attempts at documenting that
+        // memalign + free is ok [2] [3], or the current source of chromium
+        // which still uses memalign on android [4].
+        //
+        // [1]: https://codereview.chromium.org/10796020/
+        // [2]: https://code.google.com/p/android/issues/detail?id=35391
+        // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
+        // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
+        //                                       /memory/aligned_memory.cc
+        libc::memalign(layout.align(), layout.size()) as *mut u8
+    }
+    #[cfg(not(any(target_os = "android",
+                  target_os = "hermit",
+                  target_os = "redox",
+                  target_os = "solaris")))]
+    #[inline]
+    unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+        let mut out = ptr::null_mut();
+        let ret = libc::posix_memalign(&mut out, layout.align(), layout.size());
+        if ret != 0 {
+            ptr::null_mut()
+        } else {
+            out as *mut u8
+        }
+    }
+}
+#[cfg(windows)]
+#[allow(nonstandard_style)]
+mod platform {
+    use MIN_ALIGN;
+    use System;
+    use core::alloc::{GlobalAlloc, Layout};
+    type LPVOID = *mut u8;
+    type HANDLE = LPVOID;
+    type SIZE_T = usize;
+    type DWORD = u32;
+    type BOOL = i32;
+    extern "system" {
+        fn GetProcessHeap() -> HANDLE;
+        fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
+        fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
+        fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
+        fn GetLastError() -> DWORD;
+    }
+    #[repr(C)]
+    struct Header(*mut u8);
+    const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
+    unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
+        &mut *(ptr as *mut Header).offset(-1)
+    }
+    unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
+        let aligned = ptr.add(align - (ptr as usize & (align - 1)));
+        *get_header(aligned) = Header(ptr);
+        aligned
+    }
+    #[inline]
+    unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 {
+        let ptr = if layout.align() <= MIN_ALIGN {
+            HeapAlloc(GetProcessHeap(), flags, layout.size())
+        } else {
+            let size = layout.size() + layout.align();
+            let ptr = HeapAlloc(GetProcessHeap(), flags, size);
+            if ptr.is_null() {
+                ptr
+            } else {
+                align_ptr(ptr, layout.align())
+            }
+        };
+        ptr as *mut u8
+    }
+    #[stable(feature = "alloc_system_type", since = "1.28.0")]
+    unsafe impl GlobalAlloc for System {
+        #[inline]
+        unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+            allocate_with_flags(layout, 0)
+        }
+        #[inline]
+        unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+            allocate_with_flags(layout, HEAP_ZERO_MEMORY)
+        }
+        #[inline]
+        unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+            if layout.align() <= MIN_ALIGN {
+                let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
+                debug_assert!(err != 0, "Failed to free heap memory: {}",
+                              GetLastError());
+            } else {
+                let header = get_header(ptr);
+                let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
+                debug_assert!(err != 0, "Failed to free heap memory: {}",
+                              GetLastError());
+            }
+        }
+        #[inline]
+        unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+            if layout.align() <= MIN_ALIGN {
+                HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8
+            } else {
+                self.realloc_fallback(ptr, layout, new_size)
+            }
+        }
+    }
+}
+// This is an implementation of a global allocator on the wasm32 platform when
+// emscripten is not in use. In that situation there's no actual runtime for us
+// to lean on for allocation, so instead we provide our own!
+//
+// The wasm32 instruction set has two instructions for getting the current
+// amount of memory and growing the amount of memory. These instructions are the
+// foundation on which we're able to build an allocator, so we do so! Note that
+// the instructions are also pretty "global" and this is the "global" allocator
+// after all!
+//
+// The current allocator here is the `dlmalloc` crate which we've got included
+// in the rust-lang/rust repository as a submodule. The crate is a port of
+// dlmalloc.c from C to Rust and is basically just so we can have "pure Rust"
+// for now which is currently technically required (can't link with C yet).
+//
+// The crate itself provides a global allocator which on wasm has no
+// synchronization as there are no threads!
+#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+mod platform {
+    extern crate dlmalloc;
+    use core::alloc::{GlobalAlloc, Layout};
+    use System;
+    static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::DLMALLOC_INIT;
+    #[stable(feature = "alloc_system_type", since = "1.28.0")]
+    unsafe impl GlobalAlloc for System {
+        #[inline]
+        unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+            let _lock = lock::lock();
+            DLMALLOC.malloc(layout.size(), layout.align())
+        }
+        #[inline]
+        unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+            let _lock = lock::lock();
+            DLMALLOC.calloc(layout.size(), layout.align())
+        }
+        #[inline]
+        unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+            let _lock = lock::lock();
+            DLMALLOC.free(ptr, layout.size(), layout.align())
+        }
+        #[inline]
+        unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+            let _lock = lock::lock();
+            DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size)
+        }
+    }
+    #[cfg(target_feature = "atomics")]
+    mod lock {
+        use core::arch::wasm32;
+        use core::sync::atomic::{AtomicI32, Ordering::SeqCst};
+        static LOCKED: AtomicI32 = AtomicI32::new(0);
+        pub struct DropLock;
+        pub fn lock() -> DropLock {
+            loop {
+                if LOCKED.swap(1, SeqCst) == 0 {
+                    return DropLock
+                }
+                unsafe {
+                    let r = wasm32::atomic::wait_i32(
+                        &LOCKED as *const AtomicI32 as *mut i32,
+                        1,  // expected value
+                        -1, // timeout
+                    );
+                    debug_assert!(r == 0 || r == 1);
+                }
+            }
+        }
+        impl Drop for DropLock {
+            fn drop(&mut self) {
+                let r = LOCKED.swap(0, SeqCst);
+                debug_assert_eq!(r, 1);
+                unsafe {
+                    wasm32::atomic::wake(
+                        &LOCKED as *const AtomicI32 as *mut i32,
+                        1, // only one thread
+                    );
+                }
+            }
+        }
+    }
+    #[cfg(not(target_feature = "atomics"))]
+    mod lock {
+        #[inline]
+        pub fn lock() {} // no atomics, no threads, that's easy!
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/build_sysroot.sh b/compiler/rustc_codegen_cranelift/build_sysroot/build_sysroot.sh
new file mode 100755
index 0000000..eba15c0
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/build_sysroot.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Requires the CHANNEL env var to be set to `debug` or `release.`
+
+set -e
+
+source ./config.sh
+
+dir=$(pwd)
+
+# Use rustc with cg_clif as hotpluggable backend instead of the custom cg_clif driver so that
+# build scripts are still compiled using cg_llvm.
+export RUSTC=$dir"/cg_clif_build_sysroot"
+export RUSTFLAGS=$RUSTFLAGS" --clif"
+
+cd $(dirname "$0")
+
+# Cleanup for previous run
+#     v Clean target dir except for build scripts and incremental cache
+rm -r target/*/{debug,release}/{build,deps,examples,libsysroot*,native} 2>/dev/null || true
+
+# We expect the target dir in the default location. Guard against the user changing it.
+export CARGO_TARGET_DIR=target
+
+# Build libs
+export RUSTFLAGS="$RUSTFLAGS -Zforce-unstable-if-unmarked -Cpanic=abort"
+if [[ "$1" != "--debug" ]]; then
+    sysroot_channel='release'
+    # FIXME Enable incremental again once rust-lang/rust#74946 is fixed
+    # FIXME Enable -Zmir-opt-level=2 again once it doesn't ice anymore
+    CARGO_INCREMENTAL=0 RUSTFLAGS="$RUSTFLAGS" cargo build --target $TARGET_TRIPLE --release
+else
+    sysroot_channel='debug'
+    cargo build --target $TARGET_TRIPLE
+fi
+
+# Copy files to sysroot
+mkdir -p $dir/sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
+cp -a target/$TARGET_TRIPLE/$sysroot_channel/deps/* $dir/sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/prepare_sysroot_src.sh b/compiler/rustc_codegen_cranelift/build_sysroot/prepare_sysroot_src.sh
new file mode 100755
index 0000000..d0fb09c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/prepare_sysroot_src.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+set -e
+cd $(dirname "$0")
+
+SRC_DIR=$(dirname $(rustup which rustc))"/../lib/rustlib/src/rust/"
+DST_DIR="sysroot_src"
+
+if [ ! -e $SRC_DIR ]; then
+    echo "Please install rust-src component"
+    exit 1
+fi
+
+rm -rf $DST_DIR
+mkdir -p $DST_DIR/library
+cp -a $SRC_DIR/library $DST_DIR/
+
+pushd $DST_DIR
+echo "[GIT] init"
+git init
+echo "[GIT] add"
+git add .
+echo "[GIT] commit"
+git commit -m "Initial commit" -q
+for file in $(ls ../../patches/ | grep -v patcha); do
+echo "[GIT] apply" $file
+git apply ../../patches/$file
+git add -A
+git commit --no-gpg-sign -m "Patch $file"
+done
+popd
+
+echo "Successfully prepared libcore for building"
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/src/lib.rs b/compiler/rustc_codegen_cranelift/build_sysroot/src/lib.rs
new file mode 100644
index 0000000..0c9ac1a
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/src/lib.rs
@@ -0,0 +1 @@
+#![no_std]
diff --git a/compiler/rustc_codegen_cranelift/clean_all.sh b/compiler/rustc_codegen_cranelift/clean_all.sh
new file mode 100755
index 0000000..5a69c86
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/clean_all.sh
@@ -0,0 +1,5 @@
+#!/bin/bash --verbose
+set -e
+
+rm -rf target/ build/ build_sysroot/{sysroot_src/,target/} perf.data{,.old}
+rm -rf rand/ regex/ simple-raytracer/
diff --git a/compiler/rustc_codegen_cranelift/crate_patches/0001-rand-Enable-c2-chacha-simd-feature.patch b/compiler/rustc_codegen_cranelift/crate_patches/0001-rand-Enable-c2-chacha-simd-feature.patch
new file mode 100644
index 0000000..01dc0fc
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/crate_patches/0001-rand-Enable-c2-chacha-simd-feature.patch
@@ -0,0 +1,23 @@
+From 9c5663e36391fa20becf84f3af2e82afa5bb720b Mon Sep 17 00:00:00 2001
+From: bjorn3 <[email protected]>
+Date: Sat, 15 Aug 2020 19:56:03 +0200
+Subject: [PATCH] [rand] Enable c2-chacha simd feature
+
+---
+ rand_chacha/Cargo.toml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/rand_chacha/Cargo.toml b/rand_chacha/Cargo.toml
+index 9190b7f..872cca2 100644
+--- a/rand_chacha/Cargo.toml
++++ b/rand_chacha/Cargo.toml
+@@ -24,5 +24,5 @@ ppv-lite86 = { version = "0.2.8", default-features = false }
+ 
+ [features]
+ default = ["std"]
+-std = ["ppv-lite86/std"]
++std = ["ppv-lite86/std", "ppv-lite86/simd"]
+ simd = [] # deprecated
+-- 
+2.20.1
+
diff --git a/compiler/rustc_codegen_cranelift/crate_patches/0002-rand-Disable-failing-test.patch b/compiler/rustc_codegen_cranelift/crate_patches/0002-rand-Disable-failing-test.patch
new file mode 100644
index 0000000..19fd20d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/crate_patches/0002-rand-Disable-failing-test.patch
@@ -0,0 +1,33 @@
+From a8fb97120d71252538b6b026695df40d02696bdb Mon Sep 17 00:00:00 2001
+From: bjorn3 <[email protected]>
+Date: Sat, 15 Aug 2020 20:04:38 +0200
+Subject: [PATCH] [rand] Disable failing test
+
+---
+ src/distributions/uniform.rs | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/src/distributions/uniform.rs b/src/distributions/uniform.rs
+index 480b859..c80bb6f 100644
+--- a/src/distributions/uniform.rs
++++ b/src/distributions/uniform.rs
+@@ -1085,7 +1085,7 @@ mod tests {
+             _ => panic!("`UniformDurationMode` was not serialized/deserialized correctly")
+         }
+     }
+-    
++
+     #[test]
+     #[cfg(feature = "serde1")]
+     fn test_uniform_serialization() {
+@@ -1314,6 +1314,7 @@ mod tests {
+         not(target_arch = "wasm32"),
+         not(target_arch = "asmjs")
+     ))]
++    #[ignore] // FIXME
+     fn test_float_assertions() {
+         use super::SampleUniform;
+         use std::panic::catch_unwind;
+-- 
+2.20.1
+
diff --git a/compiler/rustc_codegen_cranelift/docs/dwarf.md b/compiler/rustc_codegen_cranelift/docs/dwarf.md
new file mode 100644
index 0000000..502b1b0
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/docs/dwarf.md
@@ -0,0 +1,153 @@
+# Line number information
+
+Line number information maps between machine code instructions and the source level location.
+
+## Encoding
+
+The line number information is stored in the `.debug_line` section for ELF and `__debug_line`
+section of the `__DWARF` segment for Mach-O object files. The line number information contains a
+header followed by the line program. The line program is a program for a virtual machine with
+instructions like set line number for the current machine code instruction and advance the current
+machine code instruction.
+
+## Tips
+
+You need to set either `DW_AT_low_pc` and `DW_AT_high_pc` **or** `DW_AT_ranges` of a
+`DW_TAG_compilation_unit` to the range of addresses in the compilation unit. After that you need
+to set `DW_AT_stmt_list` to the `.debug_line` section offset of the line program. Otherwise a
+debugger won't find the line number information. On macOS the debuginfo relocations **must** be
+section relative and not symbol relative.
+See [#303 (comment)](https://github.com/bjorn3/rustc_codegen_cranelift/issues/303#issuecomment-457825535)
+for more information.
+
+# Function debuginfo
+
+## Tips
+
+`DW_TAG_subprogram` requires `DW_AT_name`, `DW_AT_low_pc` and `DW_AT_high_pc` **or** `DW_AT_ranges`.
+Otherwise gdb will silently skip it. When `DW_AT_high_pc` is a length instead of an address, the
+DWARF version must be at least 4.
+
+<details>
+<summary>IRC log of #gdb on irc.freenode.org at 2020-04-23</summary>
+
+```
+(13:46:11) bjorn3: i am writing a backend for a compiler that uses DWARF for debuginfo. for some reason gdb seems to completely ignore all DW_TAG_subprogram, while lldb works fine. any idea what the problem could be?
+(13:47:49) bjorn3: this is the output of llvm-dwarfdump: https://gist.github.com/bjorn3/8a34e333c80f13cb048381e94b4a3756
+(13:47:50) osa1: luispm: why is that problem not exists in 'commands'? (the target vs. host)
+(13:52:16) luispm: osa1, commands is a bit more high level. It executes isolated commands. Breakpoint conditions need to be evaluated in the context of a valid expression. That expression may involve variables, symbols etc.
+(13:52:36) luispm: osa1, Oh, i see your point now. Commands is only executed on the host.
+(13:53:18) luispm: osa1, The commands are not tied to the execution context of the debugged program. The breakpoint conditions determine if execution must stop or continue etc.
+(13:55:00) luispm: bjorn3, Likely something GDB thinks is wrong. Does enabling "set debug dwarf*" show anything?
+(13:56:01) bjorn3: luispm: no
+(13:56:12) bjorn3: for more context: https://github.com/bjorn3/rustc_codegen_cranelift/pull/978
+(13:58:16) osa1 verliet de ruimte (quit: Quit: osa1).
+(13:58:28) bjorn3: luispm: wait, for b m<TAB> it shows nothing, but when stepping into a new function it does
+(13:58:45) bjorn3: it still doesn't show anything for `info args` though
+(13:58:50) bjorn3: No symbol table info available.
+(14:00:50) luispm: bjorn3, Is that expected given the nature of the binary?
+(14:01:17) bjorn3: b main<TAB> may show nothing as I only set DW_AT_linkage_name and not DW_AT_name
+(14:01:24) bjorn3: info args should work though
+(14:03:26) luispm: Sorry, I'm not sure what's up. There may be a genuine bug there.
+(14:03:41) luispm: tromey (not currently in the channel, but maybe later today) may have more input.
+(14:04:08) bjorn3: okay, thanks luispm!
+(14:04:27) luispm: In the worst case, reporting a bug may prompt someone to look into that as well.
+(14:04:48) luispm: Or send an e-mail to the [email protected] mailing list.
+(14:05:11) bjorn3: I don't know if it is a bug in gdb, or just me producing (slightly) wrong DWARF
+(14:39:40) irker749: gdb: tom binutils-gdb.git:master * 740480b88af / gdb/ChangeLog gdb/darwin-nat.c gdb/inferior.c gdb/inferior.h: Remove iterate_over_inferiors
+(15:22:45) irker749: gdb: tromey binutils-gdb.git:master * ecc6c6066b5 / gdb/ChangeLog gdb/dwarf2/read.c gdb/unittests/lookup_name_info-selftests.c: Fix Ada crash with .debug_names
+(15:23:13) bjorn3: tromey: ping
+(15:23:29) tromey: bjorn3: hey
+(15:24:16) bjorn3: I am writing a backend for a compiler which uses DWARF for debuginfo. I unfortunately can't get gdb to show arguments. lldb works fine.
+(15:25:13) bjorn3: it just says: No symbol table info available.
+(15:25:21) bjorn3: any idea what it could be?
+(15:25:34) bjorn3: dwarfdump output: https://gist.github.com/bjorn3/8a34e333c80f13cb048381e94b4a3756
+(15:26:48) bjorn3: more context: https://github.com/bjorn3/rustc_codegen_cranelift/pull/978
+(15:28:05) tromey: offhand I don't know, but if you can send me an executable I can look
+(15:28:17) bjorn3: how should I send it?
+(15:29:26) tromey: good question
+(15:29:41) tromey: you could try emailing it to tromey at adacore.com
+(15:29:47) tromey: dunno if that will work or not
+(15:30:26) bjorn3: i will try
+(15:37:27) bjorn3: tromey: i sent an email with the subject "gdb args not showing"
+(15:38:29) tromey: will check now
+(15:38:40) bjorn3: thanks!
+(15:42:51) irker749: gdb: tdevries binutils-gdb.git:master * de82891ce5b / gdb/ChangeLog gdb/block.c gdb/block.h gdb/symtab.c gdb/testsuite/ChangeLog gdb/testsuite/gdb.base/decl-before-def-decl.c gdb/testsuite/gdb.base/decl-before-def-def.c gdb/testsuite/gdb.base/decl-before-def.exp: [gdb/symtab] Prefer def over decl (inter-CU case)
+(15:42:52) irker749: gdb: tdevries binutils-gdb.git:master * 70bc38f5138 / gdb/ChangeLog gdb/symtab.c gdb/testsuite/ChangeLog gdb/testsuite/gdb.base/decl-before-def.exp: [gdb/symtab] Prefer def over decl (inter-CU case, with context)
+(15:43:36) tromey: bjorn3: sorry, got distracted.  I have the file now
+(15:45:35) tromey: my first thing when investigating was to enable complaints
+(15:45:37) tromey: so I did
+(15:45:40) tromey: set complaints 1000
+(15:45:42) tromey: then
+(15:45:51) tromey: file -readnow mini_core_hello_world
+(15:46:00) tromey: gdb printed just one style of complaint
+(15:46:07) tromey: During symbol reading: missing name for subprogram DIE at 0x3f7
+(15:46:18) tromey: (which is really pretty good, most compilers manage to generate a bunch)
+(15:46:29) tromey: and then the gdb DWARF reader says
+(15:46:34) tromey:   /* Ignore functions with missing or empty names.  These are actually
+(15:46:34) tromey:      illegal according to the DWARF standard.  */
+(15:46:34) tromey:   if (name == NULL)
+(15:46:34) tromey:     {
+(15:46:37) tromey:       complaint (_("missing name for subprogram DIE at %s"),
+(15:46:40) tromey: 		 sect_offset_str (die->sect_off));
+(15:46:47) tromey: I wonder if that comment is correct though
+(15:47:34) tromey: I guess pedantically maybe it is, DWARF 5 3.3.1 says
+(15:47:43) tromey: The subroutine or entry point entry has a DW_AT_name attribute whose value is
+(15:47:43) tromey: a null-terminated string containing the subroutine or entry point name.
+(15:48:14) bjorn3: i tried set complaints, but it returned complaints for system files. i didn't know about file -readnow.
+(15:48:21) tromey: cool
+(15:48:26) bjorn3: i will try adding DW_AT_name
+(15:48:45) tromey: without readnow unfortunately you get less stuff, because for whatever reason gdb has 2 separate DWARF scanners
+(15:49:02) tromey: sort of anyway
+(15:49:43) tromey: this seems kind of pedantic of gdb, like if there's a linkage name but no DW_AT_name, then why bail?
+(15:50:01) tromey: also what about anonymous functions
+(15:50:17) tromey: but anyway this explains the current situation and if you don't mind adding DW_AT_name, then that's probably simplest
+(15:51:47) bjorn3: i added DW_AT_name.
+(15:51:54) bjorn3: now it says cannot get low and high bounds for subprogram DIE at ...
+(15:52:01) tromey: ugh
+(15:52:10) bjorn3: i will add DW_AT_low_pc and DW_AT_high_pc
+(15:52:15) tromey:   /* Ignore functions with missing or invalid low and high pc attributes.  */
+(15:52:37) tromey: you can also use DW_AT_ranges
+(15:52:55) tromey: if you'd prefer
+(15:53:08) bjorn3: already using DW_AT_ranges for DW_TAG_compilation_unit
+(15:53:19) bjorn3: for individual functions, there are no gaps
+(15:57:07) bjorn3: still the same error with DW_AT_low_pc and DW_AT_high_pc
+(15:57:24) bjorn3: tromey: ^
+(15:58:08) tromey: hmmm
+(15:58:30) bjorn3: should i send the new executable?
+(15:58:31) tromey: send me another executable & I will debug
+(15:58:33) tromey: yep
+(15:59:23) bjorn3: sent as repy of the previous mail
+(16:03:23) tromey: the low PC has DW_FORM_addr, but the high PC has DW_FORM_udata, which seems weird
+(16:03:50) mjw: no
+(16:03:54) tromey: no?
+(16:04:00) mjw: I suggested that for the DWARF standard...
+(16:04:05) mjw: sorry
+(16:04:58) mjw: The idea was that instead of two relocations and two address wide fields, you have one address and a constant offset.
+(16:05:05) tromey: ahh, I see the code now
+(16:05:07) tromey: I forgot about this
+(16:05:18) tromey: 	  if (cu->header.version >= 4 && attr_high->form_is_constant ())
+(16:05:18) tromey: 	    high += low;
+(16:05:36) mjw: that second offset doesn't need a relocation and can often be packed in something small, like an uleb128
+(16:05:51) mjw: using udata might not be ideal though, but is allowed
+(16:05:51) tromey: bjorn3: the problem is that this CU claims to be DWARF 3 but is using a DWARF 4 feature
+(16:05:58) mjw: aha
+(16:05:59) bjorn3: which one?
+(16:06:03) ryoshu: hi
+(16:06:08) tromey:              high_pc              (udata) 107 (+0x00000000000011b0 <_ZN21mini_core_hello_world5start17hec55b7ca64fc434eE>)
+(16:06:08) tromey:
+(16:06:12) ryoshu: just soft ping, I have a queue of patches :)
+(16:06:22) tromey: using this as a length requires DWARF 4
+(16:06:36) tromey: for gdb at least it's fine to always emit DWARF 4
+(16:06:44) bjorn3: trying dwarf 4 now
+(16:06:48) tromey: I think there are some DWARF 5 features still in the works but DWARF 4 should be solid AFAIK
+(16:07:03) tromey: fini
+(16:07:08) tromey: lol wrong window
+(16:07:56) mjw: Maybe you can accept it for DWARF < 4. But if I remember correctly it might be that people might have been using udata as if it was an address...
+(16:08:13) tromey: yeah, I vaguely recall this as well, though I'd expect there to be a comment
+(16:08:21) mjw: Cannot really remember why it needed version >= 4. Maybe there was no good reason?
+(16:08:32) bjorn3: tromey: it works!!!! thanks for all the help!
+(16:08:41) tromey: my pleasure bjorn3
+```
+
+</details>
diff --git a/compiler/rustc_codegen_cranelift/docs/env_vars.md b/compiler/rustc_codegen_cranelift/docs/env_vars.md
new file mode 100644
index 0000000..f0a0a6a
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/docs/env_vars.md
@@ -0,0 +1,12 @@
+# List of env vars recognized by cg_clif
+
+<dl>
+    <dt>CG_CLIF_JIT_ARGS</dt>
+    <dd>When JIT mode is enable pass these arguments to the program.</dd>
+    <dt>CG_CLIF_INCR_CACHE_DISABLED</dt>
+    <dd>Don't cache object files in the incremental cache. Useful during development of cg_clif
+    to make it possible to use incremental mode for all analyses performed by rustc without caching
+    object files when their content should have been changed by a change to cg_clif.</dd>
+    <dt>CG_CLIF_DISPLAY_CG_TIME</dt>
+    <dd>If "1", display the time it took to perform codegen for a crate</dd>
+</dl>
diff --git a/compiler/rustc_codegen_cranelift/example/alloc_example.rs b/compiler/rustc_codegen_cranelift/example/alloc_example.rs
new file mode 100644
index 0000000..dc2ad4c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/alloc_example.rs
@@ -0,0 +1,37 @@
+#![feature(start, box_syntax, alloc_system, core_intrinsics, alloc_prelude, alloc_error_handler)]
+#![no_std]
+
+extern crate alloc;
+extern crate alloc_system;
+
+use alloc::prelude::v1::*;
+
+use alloc_system::System;
+
+#[global_allocator]
+static ALLOC: System = System;
+
+#[link(name = "c")]
+extern "C" {
+    fn puts(s: *const u8) -> i32;
+}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+    core::intrinsics::abort();
+}
+
+#[alloc_error_handler]
+fn alloc_error_handler(_: alloc::alloc::Layout) -> ! {
+    core::intrinsics::abort();
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+    let world: Box<&str> = box "Hello World!\0";
+    unsafe {
+        puts(*world as *const str as *const u8);
+    }
+
+    0
+}
diff --git a/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs b/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs
new file mode 100644
index 0000000..0b0039a
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs
@@ -0,0 +1,82 @@
+// Adapted from rustc run-pass test suite
+
+#![feature(no_core, arbitrary_self_types, box_syntax)]
+#![feature(rustc_attrs)]
+
+#![feature(start, lang_items)]
+#![no_core]
+
+extern crate mini_core;
+
+use mini_core::*;
+
+macro_rules! assert_eq {
+    ($l:expr, $r: expr) => {
+        if $l != $r {
+            panic(stringify!($l != $r));
+        }
+    }
+}
+
+struct Ptr<T: ?Sized>(Box<T>);
+
+impl<T: ?Sized> Deref for Ptr<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        &*self.0
+    }
+}
+
+impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {}
+impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {}
+
+struct Wrapper<T: ?Sized>(T);
+
+impl<T: ?Sized> Deref for Wrapper<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        &self.0
+    }
+}
+
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
+impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
+
+
+trait Trait {
+    // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
+    // without unsized_locals), but wrappers arond `Self` currently are not.
+    // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
+    // fn wrapper(self: Wrapper<Self>) -> i32;
+    fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
+    fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
+    fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;
+}
+
+impl Trait for i32 {
+    fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 {
+        **self
+    }
+    fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 {
+        **self
+    }
+    fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 {
+        ***self
+    }
+}
+
+#[start]
+fn main(_: isize, _: *const *const u8) -> isize {
+    let pw = Ptr(box Wrapper(5)) as Ptr<Wrapper<dyn Trait>>;
+    assert_eq!(pw.ptr_wrapper(), 5);
+
+    let wp = Wrapper(Ptr(box 6)) as Wrapper<Ptr<dyn Trait>>;
+    assert_eq!(wp.wrapper_ptr(), 6);
+
+    let wpw = Wrapper(Ptr(box Wrapper(7))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
+    assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
+
+    0
+}
diff --git a/compiler/rustc_codegen_cranelift/example/dst-field-align.rs b/compiler/rustc_codegen_cranelift/example/dst-field-align.rs
new file mode 100644
index 0000000..6c338e9
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/dst-field-align.rs
@@ -0,0 +1,67 @@
+// run-pass
+#![allow(dead_code)]
+struct Foo<T: ?Sized> {
+    a: u16,
+    b: T
+}
+
+trait Bar {
+    fn get(&self) -> usize;
+}
+
+impl Bar for usize {
+    fn get(&self) -> usize { *self }
+}
+
+struct Baz<T: ?Sized> {
+    a: T
+}
+
+struct HasDrop<T: ?Sized> {
+    ptr: Box<usize>,
+    data: T
+}
+
+fn main() {
+    // Test that zero-offset works properly
+    let b : Baz<usize> = Baz { a: 7 };
+    assert_eq!(b.a.get(), 7);
+    let b : &Baz<dyn Bar> = &b;
+    assert_eq!(b.a.get(), 7);
+
+    // Test that the field is aligned properly
+    let f : Foo<usize> = Foo { a: 0, b: 11 };
+    assert_eq!(f.b.get(), 11);
+    let ptr1 : *const u8 = &f.b as *const _ as *const u8;
+
+    let f : &Foo<dyn Bar> = &f;
+    let ptr2 : *const u8 = &f.b as *const _ as *const u8;
+    assert_eq!(f.b.get(), 11);
+
+    // The pointers should be the same
+    assert_eq!(ptr1, ptr2);
+
+    // Test that nested DSTs work properly
+    let f : Foo<Foo<usize>> = Foo { a: 0, b: Foo { a: 1, b: 17 }};
+    assert_eq!(f.b.b.get(), 17);
+    let f : &Foo<Foo<dyn Bar>> = &f;
+    assert_eq!(f.b.b.get(), 17);
+
+    // Test that get the pointer via destructuring works
+
+    let f : Foo<usize> = Foo { a: 0, b: 11 };
+    let f : &Foo<dyn Bar> = &f;
+    let &Foo { a: _, b: ref bar } = f;
+    assert_eq!(bar.get(), 11);
+
+    // Make sure that drop flags don't screw things up
+
+    let d : HasDrop<Baz<[i32; 4]>> = HasDrop {
+        ptr: Box::new(0),
+        data: Baz { a: [1,2,3,4] }
+    };
+    assert_eq!([1,2,3,4], d.data.a);
+
+    let d : &HasDrop<Baz<[i32]>> = &d;
+    assert_eq!(&[1,2,3,4], &d.data.a);
+}
diff --git a/compiler/rustc_codegen_cranelift/example/example.rs b/compiler/rustc_codegen_cranelift/example/example.rs
new file mode 100644
index 0000000..d5c122b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/example.rs
@@ -0,0 +1,208 @@
+#![feature(no_core, unboxed_closures)]
+#![no_core]
+#![allow(dead_code)]
+
+extern crate mini_core;
+
+use mini_core::*;
+
+pub fn abc(a: u8) -> u8 {
+    a * 2
+}
+
+pub fn bcd(b: bool, a: u8) -> u8 {
+    if b {
+        a * 2
+    } else {
+        a * 3
+    }
+}
+
+pub fn call() {
+    abc(42);
+}
+
+pub fn indirect_call() {
+    let f: fn() = call;
+    f();
+}
+
+pub enum BoolOption {
+    Some(bool),
+    None,
+}
+
+pub fn option_unwrap_or(o: BoolOption, d: bool) -> bool {
+    match o {
+        BoolOption::Some(b) => b,
+        BoolOption::None => d,
+    }
+}
+
+pub fn ret_42() -> u8 {
+    42
+}
+
+pub fn return_str() -> &'static str {
+    "hello world"
+}
+
+pub fn promoted_val() -> &'static u8 {
+    &(1 * 2)
+}
+
+pub fn cast_ref_to_raw_ptr(abc: &u8) -> *const u8 {
+    abc as *const u8
+}
+
+pub fn cmp_raw_ptr(a: *const u8, b: *const u8) -> bool {
+    a == b
+}
+
+pub fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) {
+    (
+        a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8,
+        b as u32,
+    )
+}
+
+pub fn char_cast(c: char) -> u8 {
+    c as u8
+}
+
+pub struct DebugTuple(());
+
+pub fn debug_tuple() -> DebugTuple {
+    DebugTuple(())
+}
+
+pub fn size_of<T>() -> usize {
+    intrinsics::size_of::<T>()
+}
+
+pub fn use_size_of() -> usize {
+    size_of::<u64>()
+}
+
+pub unsafe fn use_copy_intrinsic(src: *const u8, dst: *mut u8) {
+    intrinsics::copy::<u8>(src, dst, 1);
+}
+
+pub unsafe fn use_copy_intrinsic_ref(src: *const u8, dst: *mut u8) {
+    let copy2 = &intrinsics::copy::<u8>;
+    copy2(src, dst, 1);
+}
+
+pub const ABC: u8 = 6 * 7;
+
+pub fn use_const() -> u8 {
+    ABC
+}
+
+pub fn call_closure_3arg() {
+    (|_, _, _| {})(0u8, 42u16, 0u8)
+}
+
+pub fn call_closure_2arg() {
+    (|_, _| {})(0u8, 42u16)
+}
+
+pub struct IsNotEmpty;
+
+impl<'a, 'b> FnOnce<(&'a &'b [u16],)> for IsNotEmpty {
+    type Output = (u8, u8);
+
+    #[inline]
+    extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u16],)) -> (u8, u8) {
+        self.call_mut(arg)
+    }
+}
+
+impl<'a, 'b> FnMut<(&'a &'b [u16],)> for IsNotEmpty {
+    #[inline]
+    extern "rust-call" fn call_mut(&mut self, _arg: (&'a &'b [u16],)) -> (u8, u8) {
+        (0, 42)
+    }
+}
+
+pub fn call_is_not_empty() {
+    IsNotEmpty.call_once((&(&[0u16] as &[_]),));
+}
+
+pub fn eq_char(a: char, b: char) -> bool {
+    a == b
+}
+
+pub unsafe fn transmute(c: char) -> u32 {
+    intrinsics::transmute(c)
+}
+
+pub unsafe fn deref_str_ptr(s: *const str) -> &'static str {
+    &*s
+}
+
+pub fn use_array(arr: [u8; 3]) -> u8 {
+    arr[1]
+}
+
+pub fn repeat_array() -> [u8; 3] {
+    [0; 3]
+}
+
+pub fn array_as_slice(arr: &[u8; 3]) -> &[u8] {
+    arr
+}
+
+pub unsafe fn use_ctlz_nonzero(a: u16) -> u16 {
+    intrinsics::ctlz_nonzero(a)
+}
+
+pub fn ptr_as_usize(ptr: *const u8) -> usize {
+    ptr as usize
+}
+
+pub fn float_cast(a: f32, b: f64) -> (f64, f32) {
+    (a as f64, b as f32)
+}
+
+pub fn int_to_float(a: u8, b: i32) -> (f64, f32) {
+    (a as f64, b as f32)
+}
+
+pub fn make_array() -> [u8; 3] {
+    [42, 0, 5]
+}
+
+pub fn some_promoted_tuple() -> &'static (&'static str, &'static str) {
+    &("abc", "some")
+}
+
+pub fn index_slice(s: &[u8]) -> u8 {
+    s[2]
+}
+
+pub struct StrWrapper {
+    s: str,
+}
+
+pub fn str_wrapper_get(w: &StrWrapper) -> &str {
+    &w.s
+}
+
+pub fn i16_as_i8(a: i16) -> i8 {
+    a as i8
+}
+
+pub struct Unsized(u8, str);
+
+pub fn get_sized_field_ref_from_unsized_type(u: &Unsized) -> &u8 {
+    &u.0
+}
+
+pub fn get_unsized_field_ref_from_unsized_type(u: &Unsized) -> &str {
+    &u.1
+}
+
+pub fn reuse_byref_argument_storage(a: (u8, u16, u32)) -> u8 {
+    a.0
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core.rs b/compiler/rustc_codegen_cranelift/example/mini_core.rs
new file mode 100644
index 0000000..ce07fe8
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/mini_core.rs
@@ -0,0 +1,613 @@
+#![feature(
+    no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types,
+    untagged_unions, decl_macro, rustc_attrs, transparent_unions, optin_builtin_traits,
+    thread_local,
+)]
+#![no_core]
+#![allow(dead_code)]
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "dispatch_from_dyn"]
+pub trait DispatchFromDyn<T> {}
+
+// &T -> &U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
+// &mut T -> &mut U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
+// *const T -> *const U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
+// *mut T -> *mut U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
+
+#[lang = "receiver"]
+pub trait Receiver {}
+
+impl<T: ?Sized> Receiver for &T {}
+impl<T: ?Sized> Receiver for &mut T {}
+impl<T: ?Sized> Receiver for Box<T> {}
+
+#[lang = "copy"]
+pub unsafe trait Copy {}
+
+unsafe impl Copy for bool {}
+unsafe impl Copy for u8 {}
+unsafe impl Copy for u16 {}
+unsafe impl Copy for u32 {}
+unsafe impl Copy for u64 {}
+unsafe impl Copy for u128 {}
+unsafe impl Copy for usize {}
+unsafe impl Copy for i8 {}
+unsafe impl Copy for i16 {}
+unsafe impl Copy for i32 {}
+unsafe impl Copy for isize {}
+unsafe impl Copy for f32 {}
+unsafe impl Copy for char {}
+unsafe impl<'a, T: ?Sized> Copy for &'a T {}
+unsafe impl<T: ?Sized> Copy for *const T {}
+unsafe impl<T: ?Sized> Copy for *mut T {}
+unsafe impl<T: Copy> Copy for Option<T> {}
+
+#[lang = "sync"]
+pub unsafe trait Sync {}
+
+unsafe impl Sync for bool {}
+unsafe impl Sync for u8 {}
+unsafe impl Sync for u16 {}
+unsafe impl Sync for u32 {}
+unsafe impl Sync for u64 {}
+unsafe impl Sync for usize {}
+unsafe impl Sync for i8 {}
+unsafe impl Sync for i16 {}
+unsafe impl Sync for i32 {}
+unsafe impl Sync for isize {}
+unsafe impl Sync for char {}
+unsafe impl<'a, T: ?Sized> Sync for &'a T {}
+unsafe impl Sync for [u8; 16] {}
+
+#[lang = "freeze"]
+unsafe auto trait Freeze {}
+
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<T: ?Sized> Freeze for &T {}
+unsafe impl<T: ?Sized> Freeze for &mut T {}
+
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {}
+
+#[lang = "structural_teq"]
+pub trait StructuralEq {}
+
+#[lang = "not"]
+pub trait Not {
+    type Output;
+
+    fn not(self) -> Self::Output;
+}
+
+impl Not for bool {
+    type Output = bool;
+
+    fn not(self) -> bool {
+        !self
+    }
+}
+
+#[lang = "mul"]
+pub trait Mul<RHS = Self> {
+    type Output;
+
+    #[must_use]
+    fn mul(self, rhs: RHS) -> Self::Output;
+}
+
+impl Mul for u8 {
+    type Output = Self;
+
+    fn mul(self, rhs: Self) -> Self::Output {
+        self * rhs
+    }
+}
+
+impl Mul for usize {
+    type Output = Self;
+
+    fn mul(self, rhs: Self) -> Self::Output {
+        self * rhs
+    }
+}
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+    type Output;
+
+    fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for usize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+    type Output;
+
+    fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for u8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i16 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+#[lang = "rem"]
+pub trait Rem<RHS = Self> {
+    type Output;
+
+    fn rem(self, rhs: RHS) -> Self::Output;
+}
+
+impl Rem for usize {
+    type Output = Self;
+
+    fn rem(self, rhs: Self) -> Self {
+        self % rhs
+    }
+}
+
+#[lang = "bitor"]
+pub trait BitOr<RHS = Self> {
+    type Output;
+
+    #[must_use]
+    fn bitor(self, rhs: RHS) -> Self::Output;
+}
+
+impl BitOr for bool {
+    type Output = bool;
+
+    fn bitor(self, rhs: bool) -> bool {
+        self | rhs
+    }
+}
+
+impl<'a> BitOr<bool> for &'a bool {
+    type Output = bool;
+
+    fn bitor(self, rhs: bool) -> bool {
+        *self | rhs
+    }
+}
+
+#[lang = "eq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+    fn eq(&self, other: &Rhs) -> bool;
+    fn ne(&self, other: &Rhs) -> bool;
+}
+
+impl PartialEq for u8 {
+    fn eq(&self, other: &u8) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u8) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for u16 {
+    fn eq(&self, other: &u16) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u16) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for u32 {
+    fn eq(&self, other: &u32) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u32) -> bool {
+        (*self) != (*other)
+    }
+}
+
+
+impl PartialEq for u64 {
+    fn eq(&self, other: &u64) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u64) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for u128 {
+    fn eq(&self, other: &u128) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u128) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for usize {
+    fn eq(&self, other: &usize) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &usize) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for i8 {
+    fn eq(&self, other: &i8) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &i8) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for i32 {
+    fn eq(&self, other: &i32) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &i32) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for isize {
+    fn eq(&self, other: &isize) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &isize) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for char {
+    fn eq(&self, other: &char) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &char) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl<T: ?Sized> PartialEq for *const T {
+    fn eq(&self, other: &*const T) -> bool {
+        *self == *other
+    }
+    fn ne(&self, other: &*const T) -> bool {
+        *self != *other
+    }
+}
+
+impl <T: PartialEq> PartialEq for Option<T> {
+    fn eq(&self, other: &Self) -> bool {
+        match (self, other) {
+            (Some(lhs), Some(rhs)) => *lhs == *rhs,
+            (None, None) => true,
+            _ => false,
+        }
+    }
+
+    fn ne(&self, other: &Self) -> bool {
+        match (self, other) {
+            (Some(lhs), Some(rhs)) => *lhs != *rhs,
+            (None, None) => false,
+            _ => true,
+        }
+    }
+}
+
+#[lang = "neg"]
+pub trait Neg {
+    type Output;
+
+    fn neg(self) -> Self::Output;
+}
+
+impl Neg for i8 {
+    type Output = i8;
+
+    fn neg(self) -> i8 {
+        -self
+    }
+}
+
+impl Neg for i16 {
+    type Output = i16;
+
+    fn neg(self) -> i16 {
+        self
+    }
+}
+
+impl Neg for isize {
+    type Output = isize;
+
+    fn neg(self) -> isize {
+        -self
+    }
+}
+
+impl Neg for f32 {
+    type Output = f32;
+
+    fn neg(self) -> f32 {
+        -self
+    }
+}
+
+pub enum Option<T> {
+    Some(T),
+    None,
+}
+
+pub use Option::*;
+
+#[lang = "phantom_data"]
+pub struct PhantomData<T: ?Sized>;
+
+#[lang = "fn_once"]
+#[rustc_paren_sugar]
+pub trait FnOnce<Args> {
+    #[lang = "fn_once_output"]
+    type Output;
+
+    extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[lang = "fn_mut"]
+#[rustc_paren_sugar]
+pub trait FnMut<Args>: FnOnce<Args> {
+    extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+#[lang = "panic"]
+#[track_caller]
+pub fn panic(_msg: &str) -> ! {
+    unsafe {
+        libc::puts("Panicking\n\0" as *const str as *const i8);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+    unsafe {
+        libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "eh_personality"]
+fn eh_personality() -> ! {
+    loop {}
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+    // Code here does not matter - this is replaced by the
+    // real drop glue by the compiler.
+    drop_in_place(to_drop);
+}
+
+#[lang = "deref"]
+pub trait Deref {
+    type Target: ?Sized;
+
+    fn deref(&self) -> &Self::Target;
+}
+
+#[lang = "owned_box"]
+pub struct Box<T: ?Sized>(*mut T);
+
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+
+impl<T: ?Sized> Drop for Box<T> {
+    fn drop(&mut self) {
+        // drop is currently performed by compiler.
+    }
+}
+
+impl<T> Deref for Box<T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        &**self
+    }
+}
+
+#[lang = "exchange_malloc"]
+unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
+    libc::malloc(size)
+}
+
+#[lang = "box_free"]
+unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
+    libc::free(ptr as *mut u8);
+}
+
+#[lang = "drop"]
+pub trait Drop {
+    fn drop(&mut self);
+}
+
+#[lang = "manually_drop"]
+#[repr(transparent)]
+pub struct ManuallyDrop<T: ?Sized> {
+    pub value: T,
+}
+
+#[lang = "maybe_uninit"]
+#[repr(transparent)]
+pub union MaybeUninit<T> {
+    pub uninit: (),
+    pub value: ManuallyDrop<T>,
+}
+
+pub mod intrinsics {
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+        pub fn size_of<T>() -> usize;
+        pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
+        pub fn min_align_of<T>() -> usize;
+        pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
+        pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
+        pub fn transmute<T, U>(e: T) -> U;
+        pub fn ctlz_nonzero<T>(x: T) -> T;
+        pub fn needs_drop<T>() -> bool;
+        pub fn bitreverse<T>(x: T) -> T;
+        pub fn bswap<T>(x: T) -> T;
+        pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
+    }
+}
+
+pub mod libc {
+    #[cfg_attr(not(windows), link(name = "c"))]
+    #[cfg_attr(windows, link(name = "msvcrt"))]
+    extern "C" {
+        pub fn puts(s: *const i8) -> i32;
+        pub fn printf(format: *const i8, ...) -> i32;
+        pub fn malloc(size: usize) -> *mut u8;
+        pub fn free(ptr: *mut u8);
+        pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
+        pub fn memmove(dst: *mut u8, src: *const u8, size: usize);
+        pub fn strncpy(dst: *mut u8, src: *const u8, size: usize);
+    }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+    type Output: ?Sized;
+    fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+impl<T> Index<usize> for [T] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+extern {
+    type VaListImpl;
+}
+
+#[lang = "va_list"]
+#[repr(transparent)]
+pub struct VaList<'a>(&'a mut VaListImpl);
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro stringify($($t:tt)*) { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro file() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro line() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro cfg() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro global_asm() { /* compiler built-in */ }
+
+pub static A_STATIC: u8 = 42;
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+#[no_mangle]
+pub fn get_tls() -> u8 {
+    #[thread_local]
+    static A: u8 = 42;
+
+    A
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
new file mode 100644
index 0000000..4a8375a
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
@@ -0,0 +1,470 @@
+#![feature(
+    no_core, start, lang_items, box_syntax, never_type, linkage,
+    extern_types, thread_local
+)]
+#![no_core]
+#![allow(dead_code, non_camel_case_types)]
+
+extern crate mini_core;
+
+use mini_core::*;
+use mini_core::libc::*;
+
+unsafe extern "C" fn my_puts(s: *const i8) {
+    puts(s);
+}
+
+#[lang = "termination"]
+trait Termination {
+    fn report(self) -> i32;
+}
+
+impl Termination for () {
+    fn report(self) -> i32 {
+        unsafe {
+            NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
+            *NUM_REF as i32
+        }
+    }
+}
+
+trait SomeTrait {
+    fn object_safe(&self);
+}
+
+impl SomeTrait for &'static str {
+    fn object_safe(&self) {
+        unsafe {
+            puts(*self as *const str as *const i8);
+        }
+    }
+}
+
+struct NoisyDrop {
+    text: &'static str,
+    inner: NoisyDropInner,
+}
+
+struct NoisyDropInner;
+
+impl Drop for NoisyDrop {
+    fn drop(&mut self) {
+        unsafe {
+            puts(self.text as *const str as *const i8);
+        }
+    }
+}
+
+impl Drop for NoisyDropInner {
+    fn drop(&mut self) {
+        unsafe {
+            puts("Inner got dropped!\0" as *const str as *const i8);
+        }
+    }
+}
+
+impl SomeTrait for NoisyDrop {
+    fn object_safe(&self) {}
+}
+
+enum Ordering {
+    Less = -1,
+    Equal = 0,
+    Greater = 1,
+}
+
+#[lang = "start"]
+fn start<T: Termination + 'static>(
+    main: fn() -> T,
+    argc: isize,
+    argv: *const *const u8,
+) -> isize {
+    if argc == 3 {
+        unsafe { puts(*argv as *const i8); }
+        unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const i8)); }
+        unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const i8)); }
+    }
+
+    main().report();
+    0
+}
+
+static mut NUM: u8 = 6 * 7;
+static NUM_REF: &'static u8 = unsafe { &NUM };
+
+macro_rules! assert {
+    ($e:expr) => {
+        if !$e {
+            panic(stringify!(! $e));
+        }
+    };
+}
+
+macro_rules! assert_eq {
+    ($l:expr, $r: expr) => {
+        if $l != $r {
+            panic(stringify!($l != $r));
+        }
+    }
+}
+
+struct Unique<T: ?Sized> {
+    pointer: *const T,
+    _marker: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+unsafe fn zeroed<T>() -> T {
+    let mut uninit = MaybeUninit { uninit: () };
+    intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
+    uninit.value.value
+}
+
+fn take_f32(_f: f32) {}
+fn take_unique(_u: Unique<()>) {}
+
+fn return_u128_pair() -> (u128, u128) {
+    (0, 0)
+}
+
+fn call_return_u128_pair() {
+    return_u128_pair();
+}
+
+fn main() {
+    take_unique(Unique {
+        pointer: 0 as *const (),
+        _marker: PhantomData,
+    });
+    take_f32(0.1);
+
+    call_return_u128_pair();
+
+    let slice = &[0, 1] as &[i32];
+    let slice_ptr = slice as *const [i32] as *const i32;
+
+    assert_eq!(slice_ptr as usize % 4, 0);
+
+    //return;
+
+    unsafe {
+        printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
+
+        let hello: &[u8] = b"Hello\0" as &[u8; 6];
+        let ptr: *const i8 = hello as *const [u8] as *const i8;
+        puts(ptr);
+
+        let world: Box<&str> = box "World!\0";
+        puts(*world as *const str as *const i8);
+        world as Box<dyn SomeTrait>;
+
+        assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
+
+        assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
+        assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
+        assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
+        assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
+
+        assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
+
+        let chars = &['C', 'h', 'a', 'r', 's'];
+        let chars = chars as &[char];
+        assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
+
+        let a: &dyn SomeTrait = &"abc\0";
+        a.object_safe();
+
+        assert_eq!(intrinsics::size_of_val(a) as u8, 16);
+        assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
+
+        assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
+        assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
+
+        assert!(!intrinsics::needs_drop::<u8>());
+        assert!(intrinsics::needs_drop::<NoisyDrop>());
+
+        Unique {
+            pointer: 0 as *const &str,
+            _marker: PhantomData,
+        } as Unique<dyn SomeTrait>;
+
+        struct MyDst<T: ?Sized>(T);
+
+        intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
+
+        struct Foo {
+            x: u8,
+            y: !,
+        }
+
+        unsafe fn uninitialized<T>() -> T {
+            MaybeUninit { uninit: () }.value.value
+        }
+
+        zeroed::<(u8, u8)>();
+        #[allow(unreachable_code)]
+        {
+            if false {
+                zeroed::<!>();
+                zeroed::<Foo>();
+                uninitialized::<Foo>();
+            }
+        }
+    }
+
+    let _ = box NoisyDrop {
+        text: "Boxed outer got dropped!\0",
+        inner: NoisyDropInner,
+    } as Box<dyn SomeTrait>;
+
+    const FUNC_REF: Option<fn()> = Some(main);
+    match FUNC_REF {
+        Some(_) => {},
+        None => assert!(false),
+    }
+
+    match Ordering::Less {
+        Ordering::Less => {},
+        _ => assert!(false),
+    }
+
+    [NoisyDropInner, NoisyDropInner];
+
+    let x = &[0u32, 42u32] as &[u32];
+    match x {
+        [] => assert_eq!(0u32, 1),
+        [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
+    }
+
+    assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
+
+    #[cfg(not(jit))]
+    {
+        extern {
+            #[linkage = "extern_weak"]
+            static ABC: *const u8;
+        }
+
+        {
+            extern {
+                #[linkage = "extern_weak"]
+                static ABC: *const u8;
+            }
+        }
+
+        unsafe { assert_eq!(ABC as usize, 0); }
+    }
+
+    &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
+
+    let f = 1000.0;
+    assert_eq!(f as u8, 255);
+    let f2 = -1000.0;
+    assert_eq!(f2 as i8, -128);
+    assert_eq!(f2 as u8, 0);
+
+    static ANOTHER_STATIC: &u8 = &A_STATIC;
+    assert_eq!(*ANOTHER_STATIC, 42);
+
+    check_niche_behavior();
+
+    extern "C" {
+        type ExternType;
+    }
+
+    struct ExternTypeWrapper {
+        _a: ExternType,
+    }
+
+    let nullptr = 0 as *const ();
+    let extern_nullptr = nullptr as *const ExternTypeWrapper;
+    extern_nullptr as *const ();
+    let slice_ptr = &[] as *const [u8];
+    slice_ptr as *const u8;
+
+    let repeat = [Some(42); 2];
+    assert_eq!(repeat[0], Some(42));
+    assert_eq!(repeat[1], Some(42));
+
+    from_decimal_string();
+
+    #[cfg(not(jit))]
+    test_tls();
+
+    #[cfg(all(not(jit), target_os = "linux"))]
+    unsafe {
+        global_asm_test();
+    }
+}
+
+#[cfg(all(not(jit), target_os = "linux"))]
+extern "C" {
+    fn global_asm_test();
+}
+
+#[cfg(all(not(jit), target_os = "linux"))]
+global_asm! {
+    "
+    .global global_asm_test
+    global_asm_test:
+    // comment that would normally be removed by LLVM
+    ret
+    "
+}
+
+#[repr(C)]
+enum c_void {
+    _1,
+    _2,
+}
+
+type c_int = i32;
+type c_ulong = u64;
+
+type pthread_t = c_ulong;
+
+#[repr(C)]
+struct pthread_attr_t {
+    __size: [u64; 7],
+}
+
+#[link(name = "pthread")]
+extern "C" {
+    fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
+
+    fn pthread_create(
+        native: *mut pthread_t,
+        attr: *const pthread_attr_t,
+        f: extern "C" fn(_: *mut c_void) -> *mut c_void,
+        value: *mut c_void
+    ) -> c_int;
+
+    fn pthread_join(
+        native: pthread_t,
+        value: *mut *mut c_void
+    ) -> c_int;
+}
+
+#[thread_local]
+#[cfg(not(jit))]
+static mut TLS: u8 = 42;
+
+#[cfg(not(jit))]
+extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
+    unsafe { TLS = 0; }
+    0 as *mut c_void
+}
+
+#[cfg(not(jit))]
+fn test_tls() {
+    unsafe {
+        let mut attr: pthread_attr_t = zeroed();
+        let mut thread: pthread_t = 0;
+
+        assert_eq!(TLS, 42);
+
+        if pthread_attr_init(&mut attr) != 0 {
+            assert!(false);
+        }
+
+        if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
+            assert!(false);
+        }
+
+        let mut res = 0 as *mut c_void;
+        pthread_join(thread, &mut res);
+
+        // TLS of main thread must not have been changed by the other thread.
+        assert_eq!(TLS, 42);
+
+        puts("TLS works!\n\0" as *const str as *const i8);
+    }
+}
+
+// Copied ui/issues/issue-61696.rs
+
+pub enum Infallible {}
+
+// The check that the `bool` field of `V1` is encoding a "niche variant"
+// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
+// causing valid `V1` values to be interpreted as other variants.
+pub enum E1 {
+    V1 { f: bool },
+    V2 { f: Infallible },
+    V3,
+    V4,
+}
+
+// Computing the discriminant used to be done using the niche type (here `u8`,
+// from the `bool` field of `V1`), overflowing for variants with large enough
+// indices (`V3` and `V4`), causing them to be interpreted as other variants.
+pub enum E2<X> {
+    V1 { f: bool },
+
+    /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
+    _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
+    _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
+    _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
+    _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
+    _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
+    _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
+    _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
+    _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
+    _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
+    _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
+    _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
+    _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
+    _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
+    _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
+    _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
+    _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
+    _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
+    _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
+    _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
+    _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
+    _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
+    _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
+    _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
+    _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
+    _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
+    _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
+    _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
+    _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
+    _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
+    _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
+    _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
+
+    V3,
+    V4,
+}
+
+fn check_niche_behavior () {
+    if let E1::V2 { .. } = (E1::V1 { f: true }) {
+        intrinsics::abort();
+    }
+
+    if let E2::V1 { .. } = E2::V3::<Infallible> {
+        intrinsics::abort();
+    }
+}
+
+fn from_decimal_string() {
+    loop {
+        let multiplier = 1;
+
+        take_multiplier_ref(&multiplier);
+
+        if multiplier == 1 {
+            break;
+        }
+
+        unreachable();
+    }
+}
+
+fn take_multiplier_ref(_multiplier: &u128) {}
+
+fn unreachable() -> ! {
+    panic("unreachable")
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mod_bench.rs b/compiler/rustc_codegen_cranelift/example/mod_bench.rs
new file mode 100644
index 0000000..bc65221
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/mod_bench.rs
@@ -0,0 +1,35 @@
+#![feature(start, box_syntax, core_intrinsics, lang_items)]
+#![no_std]
+
+#[link(name = "c")]
+extern {}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+    core::intrinsics::abort();
+}
+
+#[lang="eh_personality"]
+fn eh_personality(){}
+
+// Required for rustc_codegen_llvm
+#[no_mangle]
+unsafe extern "C" fn _Unwind_Resume() {
+    core::intrinsics::unreachable();
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+    for i in 2..10_000_000 {
+        black_box((i + 1) % i);
+    }
+
+    0
+}
+
+#[inline(never)]
+fn black_box(i: u32) {
+    if i != 1 {
+        core::intrinsics::abort();
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/std_example.rs b/compiler/rustc_codegen_cranelift/example/std_example.rs
new file mode 100644
index 0000000..cb512a4
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/std_example.rs
@@ -0,0 +1,343 @@
+#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
+
+#[cfg(target_arch = "x86_64")]
+use std::arch::x86_64::*;
+use std::io::Write;
+use std::ops::Generator;
+
+fn main() {
+    println!("{:?}", std::env::args().collect::<Vec<_>>());
+
+    let mutex = std::sync::Mutex::new(());
+    let _guard = mutex.lock().unwrap();
+
+    let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
+    let stderr = ::std::io::stderr();
+    let mut stderr = stderr.lock();
+
+    std::thread::spawn(move || {
+        println!("Hello from another thread!");
+    });
+
+    writeln!(stderr, "some {} text", "<unknown>").unwrap();
+
+    let _ = std::process::Command::new("true").env("c", "d").spawn();
+
+    println!("cargo:rustc-link-lib=z");
+
+    static ONCE: std::sync::Once = std::sync::Once::new();
+    ONCE.call_once(|| {});
+
+    let _eq = LoopState::Continue(()) == LoopState::Break(());
+
+    // Make sure ByValPair values with differently sized components are correctly passed
+    map(None::<(u8, Box<Instruction>)>);
+
+    println!("{}", 2.3f32.exp());
+    println!("{}", 2.3f32.exp2());
+    println!("{}", 2.3f32.abs());
+    println!("{}", 2.3f32.sqrt());
+    println!("{}", 2.3f32.floor());
+    println!("{}", 2.3f32.ceil());
+    println!("{}", 2.3f32.min(1.0));
+    println!("{}", 2.3f32.max(1.0));
+    println!("{}", 2.3f32.powi(2));
+    println!("{}", 2.3f32.log2());
+    assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
+    println!("{}", 2.3f32.powf(2.0));
+
+    assert_eq!(-128i8, (-128i8).saturating_sub(1));
+    assert_eq!(127i8, 127i8.saturating_sub(-128));
+    assert_eq!(-128i8, (-128i8).saturating_add(-128));
+    assert_eq!(127i8, 127i8.saturating_add(1));
+
+    assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
+    assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
+
+    let _d = 0i128.checked_div(2i128);
+    let _d = 0u128.checked_div(2u128);
+    assert_eq!(1u128 + 2, 3);
+
+    assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
+    assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
+    assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
+
+    let tmp = 353985398u128;
+    assert_eq!(tmp * 932490u128, 330087843781020u128);
+
+    let tmp = -0x1234_5678_9ABC_DEF0i64;
+    assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
+
+    // Check that all u/i128 <-> float casts work correctly.
+    let houndred_u128 = 100u128;
+    let houndred_i128 = 100i128;
+    let houndred_f32 = 100.0f32;
+    let houndred_f64 = 100.0f64;
+    assert_eq!(houndred_u128 as f32, 100.0);
+    assert_eq!(houndred_u128 as f64, 100.0);
+    assert_eq!(houndred_f32 as u128, 100);
+    assert_eq!(houndred_f64 as u128, 100);
+    assert_eq!(houndred_i128 as f32, 100.0);
+    assert_eq!(houndred_i128 as f64, 100.0);
+    assert_eq!(houndred_f32 as i128, 100);
+    assert_eq!(houndred_f64 as i128, 100);
+
+    // Test signed 128bit comparing
+    let max = usize::MAX as i128;
+    if 100i128 < 0i128 || 100i128 > max {
+        panic!();
+    }
+
+    test_checked_mul();
+
+    let _a = 1u32 << 2u8;
+
+    let empty: [i32; 0] = [];
+    assert!(empty.is_sorted());
+
+    println!("{:?}", std::intrinsics::caller_location());
+
+    #[cfg(target_arch = "x86_64")]
+    unsafe {
+        test_simd();
+    }
+
+    Box::pin(move |mut _task_context| {
+        yield ();
+    }).as_mut().resume(0);
+
+    #[derive(Copy, Clone)]
+    enum Nums {
+        NegOne = -1,
+    }
+
+    let kind = Nums::NegOne;
+    assert_eq!(-1i128, kind as i128);
+
+    let options = [1u128];
+    match options[0] {
+        1 => (),
+        0 => loop {},
+        v => panic(v),
+    };
+}
+
+fn panic(_: u128) {
+    panic!();
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_simd() {
+    assert!(is_x86_feature_detected!("sse2"));
+
+    let x = _mm_setzero_si128();
+    let y = _mm_set1_epi16(7);
+    let or = _mm_or_si128(x, y);
+    let cmp_eq = _mm_cmpeq_epi8(y, y);
+    let cmp_lt = _mm_cmplt_epi8(y, y);
+
+    assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
+    assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
+    assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
+
+    test_mm_slli_si128();
+    test_mm_movemask_epi8();
+    test_mm256_movemask_epi8();
+    test_mm_add_epi8();
+    test_mm_add_pd();
+    test_mm_cvtepi8_epi16();
+    test_mm_cvtsi128_si64();
+
+    test_mm_extract_epi8();
+    test_mm_insert_epi16();
+
+    let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
+    assert_eq!(mask1, 1);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_slli_si128() {
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, 1);
+    let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+    assert_eq_m128i(r, e);
+
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, 15);
+    let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
+    assert_eq_m128i(r, e);
+
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, 16);
+    assert_eq_m128i(r, _mm_set1_epi8(0));
+
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, -1);
+    assert_eq_m128i(_mm_set1_epi8(0), r);
+
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, -0x80000000);
+    assert_eq_m128i(r, _mm_set1_epi8(0));
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_movemask_epi8() {
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
+        0b0101, 0b1111_0000u8 as i8, 0, 0,
+        0, 0, 0b1111_0000u8 as i8, 0b0101,
+        0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
+    );
+    let r = _mm_movemask_epi8(a);
+    assert_eq!(r, 0b10100100_00100101);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "avx2")]
+unsafe fn test_mm256_movemask_epi8() {
+    let a = _mm256_set1_epi8(-1);
+    let r = _mm256_movemask_epi8(a);
+    let e = -1;
+    assert_eq!(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_epi8() {
+    let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+    #[rustfmt::skip]
+    let b = _mm_setr_epi8(
+        16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+    );
+    let r = _mm_add_epi8(a, b);
+    #[rustfmt::skip]
+    let e = _mm_setr_epi8(
+        16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
+    );
+    assert_eq_m128i(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_pd() {
+    let a = _mm_setr_pd(1.0, 2.0);
+    let b = _mm_setr_pd(5.0, 10.0);
+    let r = _mm_add_pd(a, b);
+    assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
+}
+
+#[cfg(target_arch = "x86_64")]
+fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
+    unsafe {
+        assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
+    }
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
+    if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
+        panic!("{:?} != {:?}", a, b);
+    }
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_cvtsi128_si64() {
+    let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
+    assert_eq!(r, 5);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_cvtepi8_epi16() {
+    let a = _mm_set1_epi8(10);
+    let r = _mm_cvtepi8_epi16(a);
+    let e = _mm_set1_epi16(10);
+    assert_eq_m128i(r, e);
+    let a = _mm_set1_epi8(-10);
+    let r = _mm_cvtepi8_epi16(a);
+    let e = _mm_set1_epi16(-10);
+    assert_eq_m128i(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_extract_epi8() {
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        -1, 1, 2, 3, 4, 5, 6, 7,
+        8, 9, 10, 11, 12, 13, 14, 15
+    );
+    let r1 = _mm_extract_epi8(a, 0);
+    let r2 = _mm_extract_epi8(a, 19);
+    assert_eq!(r1, 0xFF);
+    assert_eq!(r2, 3);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_insert_epi16() {
+    let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
+    let r = _mm_insert_epi16(a, 9, 0);
+    let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7);
+    assert_eq_m128i(r, e);
+}
+
+fn test_checked_mul() {
+    let u: Option<u8> = u8::from_str_radix("1000", 10).ok();
+    assert_eq!(u, None);
+
+    assert_eq!(1u8.checked_mul(255u8), Some(255u8));
+    assert_eq!(255u8.checked_mul(255u8), None);
+    assert_eq!(1i8.checked_mul(127i8), Some(127i8));
+    assert_eq!(127i8.checked_mul(127i8), None);
+    assert_eq!((-1i8).checked_mul(-127i8), Some(127i8));
+    assert_eq!(1i8.checked_mul(-128i8), Some(-128i8));
+    assert_eq!((-128i8).checked_mul(-128i8), None);
+
+    assert_eq!(1u64.checked_mul(u64::MAX), Some(u64::MAX));
+    assert_eq!(u64::MAX.checked_mul(u64::MAX), None);
+    assert_eq!(1i64.checked_mul(i64::MAX), Some(i64::MAX));
+    assert_eq!(i64::MAX.checked_mul(i64::MAX), None);
+    assert_eq!((-1i64).checked_mul(i64::MIN + 1), Some(i64::MAX));
+    assert_eq!(1i64.checked_mul(i64::MIN), Some(i64::MIN));
+    assert_eq!(i64::MIN.checked_mul(i64::MIN), None);
+}
+
+#[derive(PartialEq)]
+enum LoopState {
+    Continue(()),
+    Break(())
+}
+
+pub enum Instruction {
+    Increment,
+    Loop,
+}
+
+fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
+    match a {
+        None => None,
+        Some((_, instr)) => Some(instr),
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs b/compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs
new file mode 100644
index 0000000..2cb8478
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs
@@ -0,0 +1,97 @@
+// Based on https://github.com/rust-lang/rust/blob/c5840f9d252c2f5cc16698dbf385a29c5de3ca07/src/test/ui/array-slice-vec/subslice-patterns-const-eval-match.rs
+
+// Test that array subslice patterns are correctly handled in const evaluation.
+
+// run-pass
+
+#[derive(PartialEq, Debug, Clone)]
+struct N(u8);
+
+#[derive(PartialEq, Debug, Clone)]
+struct Z;
+
+macro_rules! n {
+    ($($e:expr),* $(,)?) => {
+        [$(N($e)),*]
+    }
+}
+
+// This macro has an unused variable so that it can be repeated base on the
+// number of times a repeated variable (`$e` in `z`) occurs.
+macro_rules! zed {
+    ($e:expr) => { Z }
+}
+
+macro_rules! z {
+    ($($e:expr),* $(,)?) => {
+        [$(zed!($e)),*]
+    }
+}
+
+// Compare constant evaluation and runtime evaluation of a given expression.
+macro_rules! compare_evaluation {
+    ($e:expr, $t:ty $(,)?) => {{
+        const CONST_EVAL: $t = $e;
+        const fn const_eval() -> $t { $e }
+        static CONST_EVAL2: $t = const_eval();
+        let runtime_eval = $e;
+        assert_eq!(CONST_EVAL, runtime_eval);
+        assert_eq!(CONST_EVAL2, runtime_eval);
+    }}
+}
+
+// Repeat `$test`, substituting the given macro variables with the given
+// identifiers.
+//
+// For example:
+//
+// repeat! {
+//     ($name); X; Y:
+//     struct $name;
+// }
+//
+// Expands to:
+//
+// struct X; struct Y;
+//
+// This is used to repeat the tests using both the `N` and `Z`
+// types.
+macro_rules! repeat {
+    (($($dollar:tt $placeholder:ident)*); $($($values:ident),+);*: $($test:tt)*) => {
+        macro_rules! single {
+            ($($dollar $placeholder:ident),*) => { $($test)* }
+        }
+        $(single!($($values),+);)*
+    }
+}
+
+fn main() {
+    repeat! {
+        ($arr $Ty); n, N; z, Z:
+        compare_evaluation!({ let [_, x @ .., _] = $arr!(1, 2, 3, 4); x }, [$Ty; 2]);
+        compare_evaluation!({ let [_, ref x @ .., _] = $arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+        compare_evaluation!({ let [_, x @ .., _] = &$arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+
+        compare_evaluation!({ let [_, _, x @ .., _, _] = $arr!(1, 2, 3, 4); x }, [$Ty; 0]);
+        compare_evaluation!(
+            { let [_, _, ref x @ .., _, _] = $arr!(1, 2, 3, 4); x },
+            &'static [$Ty; 0],
+        );
+        compare_evaluation!(
+            { let [_, _, x @ .., _, _] = &$arr!(1, 2, 3, 4); x },
+            &'static [$Ty; 0],
+        );
+
+        compare_evaluation!({ let [_, .., x] = $arr!(1, 2, 3, 4); x }, $Ty);
+        compare_evaluation!({ let [_, .., ref x] = $arr!(1, 2, 3, 4); x }, &'static $Ty);
+        compare_evaluation!({ let [_, _y @ .., x] = &$arr!(1, 2, 3, 4); x }, &'static $Ty);
+    }
+
+    compare_evaluation!({ let [_, .., N(x)] = n!(1, 2, 3, 4); x }, u8);
+    compare_evaluation!({ let [_, .., N(ref x)] = n!(1, 2, 3, 4); x }, &'static u8);
+    compare_evaluation!({ let [_, .., N(x)] = &n!(1, 2, 3, 4); x }, &'static u8);
+
+    compare_evaluation!({ let [N(x), .., _] = n!(1, 2, 3, 4); x }, u8);
+    compare_evaluation!({ let [N(ref x), .., _] = n!(1, 2, 3, 4); x }, &'static u8);
+    compare_evaluation!({ let [N(x), .., _] = &n!(1, 2, 3, 4); x }, &'static u8);
+}
diff --git a/compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs b/compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs
new file mode 100644
index 0000000..93bab17
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs
@@ -0,0 +1,40 @@
+// Based on https://github.com/anp/rust/blob/175631311716d7dfeceec40d2587cde7142ffa8c/src/test/ui/rfc-2091-track-caller/track-caller-attribute.rs
+
+// run-pass
+
+use std::panic::Location;
+
+#[track_caller]
+fn tracked() -> &'static Location<'static> {
+    Location::caller()
+}
+
+fn nested_intrinsic() -> &'static Location<'static> {
+    Location::caller()
+}
+
+fn nested_tracked() -> &'static Location<'static> {
+    tracked()
+}
+
+fn main() {
+    let location = Location::caller();
+    assert_eq!(location.file(), file!());
+    assert_eq!(location.line(), 21);
+    assert_eq!(location.column(), 20);
+
+    let tracked = tracked();
+    assert_eq!(tracked.file(), file!());
+    assert_eq!(tracked.line(), 26);
+    assert_eq!(tracked.column(), 19);
+
+    let nested = nested_intrinsic();
+    assert_eq!(nested.file(), file!());
+    assert_eq!(nested.line(), 13);
+    assert_eq!(nested.column(), 5);
+
+    let contained = nested_tracked();
+    assert_eq!(contained.file(), file!());
+    assert_eq!(contained.line(), 17);
+    assert_eq!(contained.column(), 5);
+}
diff --git a/compiler/rustc_codegen_cranelift/patches/0022-core-Disable-not-compiling-tests.patch b/compiler/rustc_codegen_cranelift/patches/0022-core-Disable-not-compiling-tests.patch
new file mode 100644
index 0000000..ee85487
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0022-core-Disable-not-compiling-tests.patch
@@ -0,0 +1,123 @@
+From f6befc4bb51d84f5f1cf35938a168c953d421350 Mon Sep 17 00:00:00 2001
+From: bjorn3 <[email protected]>
+Date: Sun, 24 Nov 2019 15:10:23 +0100
+Subject: [PATCH] [core] Disable not compiling tests
+
+---
+ library/core/tests/Cargo.toml         | 8 ++++++++
+ library/core/tests/num/flt2dec/mod.rs | 1 -
+ library/core/tests/num/int_macros.rs  | 2 ++
+ library/core/tests/num/uint_macros.rs | 2 ++
+ library/core/tests/ptr.rs             | 2 ++
+ library/core/tests/slice.rs           | 2 ++
+ 6 files changed, 16 insertions(+), 1 deletion(-)
+ create mode 100644 library/core/tests/Cargo.toml
+
+diff --git a/library/core/tests/Cargo.toml b/library/core/tests/Cargo.toml
+new file mode 100644
+index 0000000..46fd999
+--- /dev/null
++++ b/library/core/tests/Cargo.toml
+@@ -0,0 +1,8 @@
++[package]
++name = "core"
++version = "0.0.0"
++edition = "2018"
++
++[lib]
++name = "coretests"
++path = "lib.rs"
+diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
+index a35897e..f0bf645 100644
+--- a/library/core/tests/num/flt2dec/mod.rs
++++ b/library/core/tests/num/flt2dec/mod.rs
+@@ -13,7 +13,6 @@ mod strategy {
+     mod dragon;
+     mod grisu;
+ }
+-mod random;
+ 
+ pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+     match decode(v).1 {
+diff --git a/library/core/tests/num/int_macros.rs b/library/core/tests/num/int_macros.rs
+index 0475aeb..9558198 100644
+--- a/library/core/tests/num/int_macros.rs
++++ b/library/core/tests/num/int_macros.rs
+@@ -88,6 +88,7 @@ mod tests {
+                 assert_eq!(x.trailing_ones(), 0);
+             }
+ 
++            /*
+             #[test]
+             fn test_rotate() {
+                 assert_eq!(A.rotate_left(6).rotate_right(2).rotate_right(4), A);
+@@ -112,6 +113,7 @@ mod tests {
+                 assert_eq!(B.rotate_left(64), B);
+                 assert_eq!(C.rotate_left(64), C);
+             }
++            */
+ 
+             #[test]
+             fn test_swap_bytes() {
+diff --git a/library/core/tests/num/uint_macros.rs b/library/core/tests/num/uint_macros.rs
+index 04ed14f..a6e372e 100644
+--- a/library/core/tests/num/uint_macros.rs
++++ b/library/core/tests/num/uint_macros.rs
+@@ -52,6 +52,7 @@ mod tests {
+                 assert_eq!(x.trailing_ones(), 0);
+             }
+ 
++            /*
+             #[test]
+             fn test_rotate() {
+                 assert_eq!(A.rotate_left(6).rotate_right(2).rotate_right(4), A);
+@@ -76,6 +77,7 @@ mod tests {
+                 assert_eq!(B.rotate_left(64), B);
+                 assert_eq!(C.rotate_left(64), C);
+             }
++            */
+ 
+             #[test]
+             fn test_swap_bytes() {
+diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
+index 1a6be3a..42dbd59 100644
+--- a/library/core/tests/ptr.rs
++++ b/library/core/tests/ptr.rs
+@@ -250,6 +250,7 @@ fn test_unsized_nonnull() {
+     assert!(ys == zs);
+ }
+ 
++/*
+ #[test]
+ #[allow(warnings)]
+ // Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the
+@@ -289,6 +290,7 @@ fn write_unaligned_drop() {
+     }
+     DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
+ }
++*/
+ 
+ #[test]
+ fn align_offset_zst() {
+diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
+index 6609bc3..241b497 100644
+--- a/library/core/tests/slice.rs
++++ b/library/core/tests/slice.rs
+@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() {
+     }
+ }
+ 
++/*
+ #[test]
+ #[cfg(not(target_arch = "wasm32"))]
+ fn sort_unstable() {
+@@ -1394,6 +1395,7 @@ fn partition_at_index() {
+     v.select_nth_unstable(0);
+     assert!(v == [0xDEADBEEF]);
+ }
++*/
+ 
+ #[test]
+ #[should_panic(expected = "index 0 greater than length of slice")]
+--
+2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_cranelift/patches/0023-core-Ignore-failing-tests.patch b/compiler/rustc_codegen_cranelift/patches/0023-core-Ignore-failing-tests.patch
new file mode 100644
index 0000000..5d2c304
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0023-core-Ignore-failing-tests.patch
@@ -0,0 +1,90 @@
+From dd82e95c9de212524e14fc60155de1ae40156dfc Mon Sep 17 00:00:00 2001
+From: bjorn3 <[email protected]>
+Date: Sun, 24 Nov 2019 15:34:06 +0100
+Subject: [PATCH] [core] Ignore failing tests
+
+---
+ library/core/tests/iter.rs       |  4 ++++
+ library/core/tests/num/bignum.rs | 10 ++++++++++
+ library/core/tests/num/mod.rs    |  5 +++--
+ library/core/tests/time.rs       |  1 +
+ 4 files changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
+index 4bc44e9..8e3c7a4 100644
+--- a/library/core/tests/array.rs
++++ b/library/core/tests/array.rs
+@@ -242,6 +242,7 @@ fn iterator_drops() {
+     assert_eq!(i.get(), 5);
+ }
+ 
++/*
+ // This test does not work on targets without panic=unwind support.
+ // To work around this problem, test is marked is should_panic, so it will
+ // be automagically skipped on unsuitable targets, such as
+@@ -283,6 +284,7 @@ fn array_default_impl_avoids_leaks_on_panic() {
+     assert_eq!(COUNTER.load(Relaxed), 0);
+     panic!("test succeeded")
+ }
++*/
+ 
+ #[test]
+ fn empty_array_is_always_default() {
+@@ -304,6 +304,7 @@ fn array_map() {
+     assert_eq!(b, [1, 2, 3]);
+ }
+ 
++/*
+ // See note on above test for why `should_panic` is used.
+ #[test]
+ #[should_panic(expected = "test succeeded")]
+@@ -332,6 +333,7 @@ fn array_map_drop_safety() {
+     assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create);
+     panic!("test succeeded")
+ }
++*/
+ 
+ #[test]
+ fn cell_allows_array_cycle() {
+diff --git a/library/core/tests/num/mod.rs b/library/core/tests/num/mod.rs
+index a17c094..5bb11d2 100644
+--- a/library/core/tests/num/mod.rs
++++ b/library/core/tests/num/mod.rs
+@@ -651,11 +651,12 @@ macro_rules! test_float {
+                 assert_eq!((9.0 as $fty).min($neginf), $neginf);
+                 assert_eq!(($neginf as $fty).min(-9.0), $neginf);
+                 assert_eq!((-9.0 as $fty).min($neginf), $neginf);
+-                assert_eq!(($nan as $fty).min(9.0), 9.0);
+-                assert_eq!(($nan as $fty).min(-9.0), -9.0);
+-                assert_eq!((9.0 as $fty).min($nan), 9.0);
+-                assert_eq!((-9.0 as $fty).min($nan), -9.0);
+-                assert!(($nan as $fty).min($nan).is_nan());
++                // Cranelift fmin has NaN propagation
++                //assert_eq!(($nan as $fty).min(9.0), 9.0);
++                //assert_eq!(($nan as $fty).min(-9.0), -9.0);
++                //assert_eq!((9.0 as $fty).min($nan), 9.0);
++                //assert_eq!((-9.0 as $fty).min($nan), -9.0);
++                //assert!(($nan as $fty).min($nan).is_nan());
+             }
+             #[test]
+             fn max() {
+@@ -673,11 +674,12 @@ macro_rules! test_float {
+                 assert_eq!((9.0 as $fty).max($neginf), 9.0);
+                 assert_eq!(($neginf as $fty).max(-9.0), -9.0);
+                 assert_eq!((-9.0 as $fty).max($neginf), -9.0);
+-                assert_eq!(($nan as $fty).max(9.0), 9.0);
+-                assert_eq!(($nan as $fty).max(-9.0), -9.0);
+-                assert_eq!((9.0 as $fty).max($nan), 9.0);
+-                assert_eq!((-9.0 as $fty).max($nan), -9.0);
+-                assert!(($nan as $fty).max($nan).is_nan());
++                // Cranelift fmax has NaN propagation
++                //assert_eq!(($nan as $fty).max(9.0), 9.0);
++                //assert_eq!(($nan as $fty).max(-9.0), -9.0);
++                //assert_eq!((9.0 as $fty).max($nan), 9.0);
++                //assert_eq!((-9.0 as $fty).max($nan), -9.0);
++                //assert!(($nan as $fty).max($nan).is_nan());
+             }
+             #[test]
+             fn rem_euclid() {
+-- 
+2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_cranelift/prepare.sh b/compiler/rustc_codegen_cranelift/prepare.sh
new file mode 100755
index 0000000..87f96f5
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/prepare.sh
@@ -0,0 +1,29 @@
+#!/bin/bash --verbose
+set -e
+
+rustup component add rust-src rustc-dev llvm-tools-preview
+./build_sysroot/prepare_sysroot_src.sh
+cargo install hyperfine || echo "Skipping hyperfine install"
+
+git clone https://github.com/rust-random/rand.git || echo "rust-random/rand has already been cloned"
+pushd rand
+git checkout -- .
+git checkout 0f933f9c7176e53b2a3c7952ded484e1783f0bf1
+git am ../crate_patches/*-rand-*.patch
+popd
+
+git clone https://github.com/rust-lang/regex.git || echo "rust-lang/regex has already been cloned"
+pushd regex
+git checkout -- .
+git checkout 341f207c1071f7290e3f228c710817c280c8dca1
+popd
+
+git clone https://github.com/ebobby/simple-raytracer || echo "ebobby/simple-raytracer has already been cloned"
+pushd simple-raytracer
+git checkout -- .
+git checkout 804a7a21b9e673a482797aa289a18ed480e4d813
+
+# build with cg_llvm for perf comparison
+cargo build
+mv target/debug/main raytracer_cg_llvm
+popd
diff --git a/compiler/rustc_codegen_cranelift/rust-toolchain b/compiler/rustc_codegen_cranelift/rust-toolchain
new file mode 100644
index 0000000..0ca96be
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/rust-toolchain
@@ -0,0 +1 @@
+nightly-2020-10-31
diff --git a/compiler/rustc_codegen_cranelift/scripts/Readme.md b/compiler/rustc_codegen_cranelift/scripts/Readme.md
new file mode 100644
index 0000000..83cec9c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/Readme.md
@@ -0,0 +1,2 @@
+This directory is for scripts that are either never directly invoked or are not used very often.
+Scripts that are frequently used should be kept at the project root.
diff --git a/compiler/rustc_codegen_cranelift/scripts/cargo.sh b/compiler/rustc_codegen_cranelift/scripts/cargo.sh
new file mode 100755
index 0000000..947b4a2
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/cargo.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+dir=$(dirname "$0")
+source $dir/config.sh
+
+# read nightly compiler from rust-toolchain file
+TOOLCHAIN=$(cat $dir/rust-toolchain)
+
+cmd=$1
+shift || true
+
+if [[ "$cmd" = "jit" ]]; then
+cargo +${TOOLCHAIN} rustc "$@" -- --jit
+else
+cargo +${TOOLCHAIN} $cmd "$@"
+fi
diff --git a/compiler/rustc_codegen_cranelift/scripts/config.sh b/compiler/rustc_codegen_cranelift/scripts/config.sh
new file mode 100644
index 0000000..6120a55
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/config.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+set -e
+
+unamestr=`uname`
+if [[ "$unamestr" == 'Linux' ]]; then
+   dylib_ext='so'
+elif [[ "$unamestr" == 'Darwin' ]]; then
+   dylib_ext='dylib'
+else
+   echo "Unsupported os"
+   exit 1
+fi
+
+HOST_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
+TARGET_TRIPLE=$HOST_TRIPLE
+#TARGET_TRIPLE="x86_64-pc-windows-gnu"
+#TARGET_TRIPLE="aarch64-unknown-linux-gnu"
+
+linker=''
+RUN_WRAPPER=''
+export JIT_SUPPORTED=1
+if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
+   export JIT_SUPPORTED=0
+   if [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then
+      # We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
+      linker='-Clinker=aarch64-linux-gnu-gcc'
+      RUN_WRAPPER='qemu-aarch64 -L /usr/aarch64-linux-gnu'
+   elif [[ "$TARGET_TRIPLE" == "x86_64-pc-windows-gnu" ]]; then
+      # We are cross-compiling for Windows. Run tests in wine.
+      RUN_WRAPPER='wine'
+   else
+      echo "Unknown non-native platform"
+   fi
+fi
+
+if echo "$RUSTC_WRAPPER" | grep sccache; then
+echo
+echo -e "\x1b[1;93m=== Warning: Unset RUSTC_WRAPPER to prevent interference with sccache ===\x1b[0m"
+echo
+export RUSTC_WRAPPER=
+fi
+
+dir=$(cd $(dirname "$BASH_SOURCE"); pwd)
+
+export RUSTC=$dir"/cg_clif"
+export RUSTFLAGS=$linker
+export RUSTDOCFLAGS=$linker' -Ztrim-diagnostic-paths=no -Cpanic=abort -Zpanic-abort-tests '\
+'-Zcodegen-backend='$dir'/librustc_codegen_cranelift.'$dylib_ext' --sysroot '$dir'/sysroot'
+
+# FIXME remove once the atomic shim is gone
+if [[ `uname` == 'Darwin' ]]; then
+   export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
+fi
+
+export LD_LIBRARY_PATH="$dir:$(rustc --print sysroot)/lib:$dir/target/out:$dir/sysroot/lib/rustlib/"$TARGET_TRIPLE"/lib"
+export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
+
+export CG_CLIF_DISPLAY_CG_TIME=1
diff --git a/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs b/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
new file mode 100755
index 0000000..3327c10
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
@@ -0,0 +1,125 @@
+#!/bin/bash
+#![forbid(unsafe_code)]/* This line is ignored by bash
+# This block is ignored by rustc
+pushd $(dirname "$0")/../
+source build/config.sh
+popd
+PROFILE=$1 OUTPUT=$2 exec $RUSTC $RUSTFLAGS --jit $0
+#*/
+
+//! This program filters away uninteresting samples and trims uninteresting frames for stackcollapse
+//! profiles.
+//!
+//! Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>
+//!
+//! This file is specially crafted to be both a valid bash script and valid rust source file. If
+//! executed as bash script this will run the rust source using cg_clif in JIT mode.
+
+use std::io::Write;
+
+fn main() -> Result<(), Box<dyn std::error::Error>> {
+    let profile_name = std::env::var("PROFILE").unwrap();
+    let output_name = std::env::var("OUTPUT").unwrap();
+    if profile_name.is_empty() || output_name.is_empty() {
+        println!("Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>");
+        std::process::exit(1);
+    }
+    let profile = std::fs::read_to_string(profile_name)
+        .map_err(|err| format!("Failed to read profile {}", err))?;
+    let mut output = std::fs::OpenOptions::new()
+        .create(true)
+        .write(true)
+        .truncate(true)
+        .open(output_name)?;
+
+    for line in profile.lines() {
+        let mut stack = &line[..line.rfind(" ").unwrap()];
+        let count = &line[line.rfind(" ").unwrap() + 1..];
+
+        // Filter away uninteresting samples
+        if !stack.contains("rustc_codegen_cranelift") {
+            continue;
+        }
+
+        if stack.contains("rustc_mir::monomorphize::partitioning::collect_and_partition_mono_items")
+            || stack.contains("rustc_incremental::assert_dep_graph::assert_dep_graph")
+            || stack.contains("rustc_symbol_mangling::test::report_symbol_names")
+        {
+            continue;
+        }
+
+        // Trim start
+        if let Some(index) = stack.find("rustc_interface::passes::configure_and_expand") {
+            stack = &stack[index..];
+        } else if let Some(index) = stack.find("rustc_interface::passes::analysis") {
+            stack = &stack[index..];
+        } else if let Some(index) = stack.find("rustc_interface::passes::start_codegen") {
+            stack = &stack[index..];
+        } else if let Some(index) = stack.find("rustc_interface::queries::Linker::link") {
+            stack = &stack[index..];
+        }
+
+        if let Some(index) = stack.find("rustc_codegen_cranelift::driver::aot::module_codegen") {
+            stack = &stack[index..];
+        }
+
+        // Trim end
+        const MALLOC: &str = "malloc";
+        if let Some(index) = stack.find(MALLOC) {
+            stack = &stack[..index + MALLOC.len()];
+        }
+
+        const FREE: &str = "free";
+        if let Some(index) = stack.find(FREE) {
+            stack = &stack[..index + FREE.len()];
+        }
+
+        const TYPECK_ITEM_BODIES: &str = "rustc_typeck::check::typeck_item_bodies";
+        if let Some(index) = stack.find(TYPECK_ITEM_BODIES) {
+            stack = &stack[..index + TYPECK_ITEM_BODIES.len()];
+        }
+
+        const COLLECT_AND_PARTITION_MONO_ITEMS: &str =
+            "rustc_mir::monomorphize::partitioning::collect_and_partition_mono_items";
+        if let Some(index) = stack.find(COLLECT_AND_PARTITION_MONO_ITEMS) {
+            stack = &stack[..index + COLLECT_AND_PARTITION_MONO_ITEMS.len()];
+        }
+
+        const ASSERT_DEP_GRAPH: &str = "rustc_incremental::assert_dep_graph::assert_dep_graph";
+        if let Some(index) = stack.find(ASSERT_DEP_GRAPH) {
+            stack = &stack[..index + ASSERT_DEP_GRAPH.len()];
+        }
+
+        const REPORT_SYMBOL_NAMES: &str = "rustc_symbol_mangling::test::report_symbol_names";
+        if let Some(index) = stack.find(REPORT_SYMBOL_NAMES) {
+            stack = &stack[..index + REPORT_SYMBOL_NAMES.len()];
+        }
+
+        const ENCODE_METADATA: &str = "rustc_middle::ty::context::TyCtxt::encode_metadata";
+        if let Some(index) = stack.find(ENCODE_METADATA) {
+            stack = &stack[..index + ENCODE_METADATA.len()];
+        }
+
+        const SUBST_AND_NORMALIZE_ERASING_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::subst_and_normalize_erasing_regions";
+        if let Some(index) = stack.find(SUBST_AND_NORMALIZE_ERASING_REGIONS) {
+            stack = &stack[..index + SUBST_AND_NORMALIZE_ERASING_REGIONS.len()];
+        }
+
+        const NORMALIZE_ERASING_LATE_BOUND_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::normalize_erasing_late_bound_regions";
+        if let Some(index) = stack.find(NORMALIZE_ERASING_LATE_BOUND_REGIONS) {
+            stack = &stack[..index + NORMALIZE_ERASING_LATE_BOUND_REGIONS.len()];
+        }
+
+        const INST_BUILD: &str = "<cranelift_frontend::frontend::FuncInstBuilder as cranelift_codegen::ir::builder::InstBuilderBase>::build";
+        if let Some(index) = stack.find(INST_BUILD) {
+            stack = &stack[..index + INST_BUILD.len()];
+        }
+
+        output.write_all(stack.as_bytes())?;
+        output.write_all(&*b" ")?;
+        output.write_all(count.as_bytes())?;
+        output.write_all(&*b"\n")?;
+    }
+
+    Ok(())
+}
diff --git a/compiler/rustc_codegen_cranelift/scripts/rustup.sh b/compiler/rustc_codegen_cranelift/scripts/rustup.sh
new file mode 100755
index 0000000..541b3c6
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/rustup.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+set -e
+
+case $1 in
+    "prepare")
+        TOOLCHAIN=$(date +%Y-%m-%d)
+
+        echo "=> Installing new nightly"
+        rustup toolchain install --profile minimal nightly-${TOOLCHAIN} # Sanity check to see if the nightly exists
+        echo nightly-${TOOLCHAIN} > rust-toolchain
+        rustup component add rustfmt || true
+
+        echo "=> Uninstalling all old nighlies"
+        for nightly in $(rustup toolchain list | grep nightly | grep -v $TOOLCHAIN | grep -v nightly-x86_64); do
+            rustup toolchain uninstall $nightly
+        done
+
+        ./clean_all.sh
+        ./prepare.sh
+
+        (cd build_sysroot && cargo update)
+
+        ;;
+    "commit")
+        git add rust-toolchain build_sysroot/Cargo.lock
+        git commit -m "Rustup to $(rustc -V)"
+        ;;
+    "push")
+	cg_clif=$(pwd)
+	pushd ../rust
+	branch=update_cg_clif-$(date +%Y-%m-%d)
+	git checkout -b $branch
+	git subtree pull --prefix=compiler/rustc_codegen_cranelift/ https://github.com/bjorn3/rustc_codegen_cranelift.git master
+	git push -u my $branch
+	popd
+	;;
+    *)
+        echo "Unknown command '$1'"
+        echo "Usage: ./rustup.sh prepare|commit"
+        ;;
+esac
diff --git a/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh b/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh
new file mode 100755
index 0000000..7f43f81
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+set -e
+
+cd $(dirname "$0")/../
+
+./build.sh
+source build/config.sh
+
+echo "[TEST] Bootstrap of rustc"
+git clone https://github.com/rust-lang/rust.git || true
+pushd rust
+git fetch
+git checkout -- .
+git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(')
+
+git apply - <<EOF
+diff --git a/.gitmodules b/.gitmodules
+index 984113151de..c1e9d960d56 100644
+--- a/.gitmodules
++++ b/.gitmodules
+@@ -34,10 +34,6 @@
+ [submodule "src/doc/edition-guide"]
+ 	path = src/doc/edition-guide
+ 	url = https://github.com/rust-lang/edition-guide.git
+-[submodule "src/llvm-project"]
+-	path = src/llvm-project
+-	url = https://github.com/rust-lang/llvm-project.git
+-	branch = rustc/11.0-2020-10-12
+ [submodule "src/doc/embedded-book"]
+ 	path = src/doc/embedded-book
+ 	url = https://github.com/rust-embedded/book.git
+diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
+index 23e689fcae7..5f077b765b6 100644
+--- a/compiler/rustc_data_structures/Cargo.toml
++++ b/compiler/rustc_data_structures/Cargo.toml
+@@ -32,7 +32,6 @@ tempfile = "3.0.5"
+
+ [dependencies.parking_lot]
+ version = "0.11"
+-features = ["nightly"]
+
+ [target.'cfg(windows)'.dependencies]
+ winapi = { version = "0.3", features = ["fileapi", "psapi"] }
+EOF
+
+cat > config.toml <<EOF
+[llvm]
+ninja = false
+
+[build]
+rustc = "$(pwd)/../build/cg_clif"
+cargo = "$(rustup which cargo)"
+full-bootstrap = true
+local-rebuild = true
+
+[rust]
+codegen-backends = ["cranelift"]
+EOF
+
+rm -r compiler/rustc_codegen_cranelift/{Cargo.*,src}
+cp ../Cargo.* compiler/rustc_codegen_cranelift/
+cp -r ../src compiler/rustc_codegen_cranelift/src
+
+./x.py build --stage 1 library/std
+popd
diff --git a/compiler/rustc_codegen_cranelift/scripts/tests.sh b/compiler/rustc_codegen_cranelift/scripts/tests.sh
new file mode 100755
index 0000000..d941b73c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/tests.sh
@@ -0,0 +1,123 @@
+#!/bin/bash
+
+set -e
+
+source build/config.sh
+export CG_CLIF_INCR_CACHE_DISABLED=1
+MY_RUSTC=$RUSTC" "$RUSTFLAGS" -L crate=target/out --out-dir target/out -Cdebuginfo=2"
+
+function no_sysroot_tests() {
+    echo "[BUILD] mini_core"
+    $MY_RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target $TARGET_TRIPLE
+
+    echo "[BUILD] example"
+    $MY_RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE
+
+    if [[ "$JIT_SUPPORTED" = "1" ]]; then
+        echo "[JIT] mini_core_hello_world"
+        CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC --jit example/mini_core_hello_world.rs --cfg jit --target $HOST_TRIPLE
+    else
+        echo "[JIT] mini_core_hello_world (skipped)"
+    fi
+
+    echo "[AOT] mini_core_hello_world"
+    $MY_RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
+    # (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
+
+    echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
+    $MY_RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
+}
+
+function base_sysroot_tests() {
+    echo "[AOT] alloc_example"
+    $MY_RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/alloc_example
+
+    if [[ "$JIT_SUPPORTED" = "1" ]]; then
+        echo "[JIT] std_example"
+        $MY_RUSTC --jit example/std_example.rs --target $HOST_TRIPLE
+    else
+        echo "[JIT] std_example (skipped)"
+    fi
+
+    echo "[AOT] dst_field_align"
+    # FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
+    $MY_RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
+
+    echo "[AOT] std_example"
+    $MY_RUSTC example/std_example.rs --crate-type bin --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/std_example arg
+
+    echo "[AOT] subslice-patterns-const-eval"
+    $MY_RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
+
+    echo "[AOT] track-caller-attribute"
+    $MY_RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/track-caller-attribute
+
+    echo "[AOT] mod_bench"
+    $MY_RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/mod_bench
+
+    pushd rand
+    rm -r ./target || true
+    ../build/cargo.sh test --workspace
+    popd
+}
+
+function extended_sysroot_tests() {
+    pushd simple-raytracer
+    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+        echo "[BENCH COMPILE] ebobby/simple-raytracer"
+        hyperfine --runs ${RUN_RUNS:-10} --warmup 1 --prepare "cargo clean" \
+        "RUSTC=rustc RUSTFLAGS='' cargo build" \
+        "../build/cargo.sh build"
+
+        echo "[BENCH RUN] ebobby/simple-raytracer"
+        cp ./target/debug/main ./raytracer_cg_clif
+        hyperfine --runs ${RUN_RUNS:-10} ./raytracer_cg_llvm ./raytracer_cg_clif
+    else
+        echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
+        echo "[COMPILE] ebobby/simple-raytracer"
+        ../cargo.sh build
+        echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
+    fi
+    popd
+
+    pushd build_sysroot/sysroot_src/library/core/tests
+    echo "[TEST] libcore"
+    rm -r ./target || true
+    ../../../../../build/cargo.sh test
+    popd
+
+    pushd regex
+    echo "[TEST] rust-lang/regex example shootout-regex-dna"
+    ../build/cargo.sh clean
+    # Make sure `[codegen mono items] start` doesn't poison the diff
+    ../build/cargo.sh build --example shootout-regex-dna
+    cat examples/regexdna-input.txt | ../build/cargo.sh run --example shootout-regex-dna | grep -v "Spawned thread" > res.txt
+    diff -u res.txt examples/regexdna-output.txt
+
+    echo "[TEST] rust-lang/regex tests"
+    ../build/cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q
+    popd
+}
+
+case "$1" in
+    "no_sysroot")
+        no_sysroot_tests
+        ;;
+    "base_sysroot")
+        base_sysroot_tests
+        ;;
+    "extended_sysroot")
+        extended_sysroot_tests
+        ;;
+    *)
+        echo "unknown test suite"
+        ;;
+esac
diff --git a/compiler/rustc_codegen_cranelift/src/abi/comments.rs b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
new file mode 100644
index 0000000..01073d2
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
@@ -0,0 +1,130 @@
+//! Annotate the clif ir with comments describing how arguments are passed into the current function
+//! and where all locals are stored.
+
+use std::borrow::Cow;
+
+use rustc_middle::mir;
+
+use cranelift_codegen::entity::EntityRef;
+
+use crate::abi::pass_mode::*;
+use crate::prelude::*;
+
+pub(super) fn add_args_header_comment(fx: &mut FunctionCx<'_, '_, impl Module>) {
+    fx.add_global_comment(
+        "kind  loc.idx   param    pass mode                            ty".to_string(),
+    );
+}
+
+pub(super) fn add_arg_comment<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    kind: &str,
+    local: Option<mir::Local>,
+    local_field: Option<usize>,
+    params: EmptySinglePair<Value>,
+    pass_mode: PassMode,
+    ty: Ty<'tcx>,
+) {
+    let local = if let Some(local) = local {
+        Cow::Owned(format!("{:?}", local))
+    } else {
+        Cow::Borrowed("???")
+    };
+    let local_field = if let Some(local_field) = local_field {
+        Cow::Owned(format!(".{}", local_field))
+    } else {
+        Cow::Borrowed("")
+    };
+
+    let params = match params {
+        Empty => Cow::Borrowed("-"),
+        Single(param) => Cow::Owned(format!("= {:?}", param)),
+        Pair(param_a, param_b) => Cow::Owned(format!("= {:?}, {:?}", param_a, param_b)),
+    };
+
+    let pass_mode = format!("{:?}", pass_mode);
+    fx.add_global_comment(format!(
+        "{kind:5}{local:>3}{local_field:<5} {params:10} {pass_mode:36} {ty:?}",
+        kind = kind,
+        local = local,
+        local_field = local_field,
+        params = params,
+        pass_mode = pass_mode,
+        ty = ty,
+    ));
+}
+
+pub(super) fn add_locals_header_comment(fx: &mut FunctionCx<'_, '_, impl Module>) {
+    fx.add_global_comment(String::new());
+    fx.add_global_comment(
+        "kind  local ty                              size align (abi,pref)".to_string(),
+    );
+}
+
+pub(super) fn add_local_place_comments<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    place: CPlace<'tcx>,
+    local: Local,
+) {
+    let TyAndLayout { ty, layout } = place.layout();
+    let rustc_target::abi::Layout {
+        size,
+        align,
+        abi: _,
+        variants: _,
+        fields: _,
+        largest_niche: _,
+    } = layout;
+
+    let (kind, extra) = match *place.inner() {
+        CPlaceInner::Var(place_local, var) => {
+            assert_eq!(local, place_local);
+            ("ssa", Cow::Owned(format!(",var={}", var.index())))
+        }
+        CPlaceInner::VarPair(place_local, var1, var2) => {
+            assert_eq!(local, place_local);
+            (
+                "ssa",
+                Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())),
+            )
+        }
+        CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
+        CPlaceInner::Addr(ptr, meta) => {
+            let meta = if let Some(meta) = meta {
+                Cow::Owned(format!(",meta={}", meta))
+            } else {
+                Cow::Borrowed("")
+            };
+            match ptr.base_and_offset() {
+                (crate::pointer::PointerBase::Addr(addr), offset) => (
+                    "reuse",
+                    format!("storage={}{}{}", addr, offset, meta).into(),
+                ),
+                (crate::pointer::PointerBase::Stack(stack_slot), offset) => (
+                    "stack",
+                    format!("storage={}{}{}", stack_slot, offset, meta).into(),
+                ),
+                (crate::pointer::PointerBase::Dangling(align), offset) => (
+                    "zst",
+                    format!("align={},offset={}", align.bytes(), offset).into(),
+                ),
+            }
+        }
+    };
+
+    fx.add_global_comment(format!(
+        "{:<5} {:5} {:30} {:4}b {}, {}{}{}",
+        kind,
+        format!("{:?}", local),
+        format!("{:?}", ty),
+        size.bytes(),
+        align.abi.bytes(),
+        align.pref.bytes(),
+        if extra.is_empty() {
+            ""
+        } else {
+            "              "
+        },
+        extra,
+    ));
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
new file mode 100644
index 0000000..8109172
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -0,0 +1,763 @@
+//! Handling of everything related to the calling convention. Also fills `fx.local_map`.
+
+#[cfg(debug_assertions)]
+mod comments;
+mod pass_mode;
+mod returning;
+
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_target::spec::abi::Abi;
+
+use cranelift_codegen::ir::{AbiParam, ArgumentPurpose};
+
+use self::pass_mode::*;
+use crate::prelude::*;
+
+pub(crate) use self::returning::{can_return_to_ssa_var, codegen_return};
+
+// Copied from https://github.com/rust-lang/rust/blob/f52c72948aa1dd718cc1f168d21c91c584c0a662/src/librustc_middle/ty/layout.rs#L2301
+#[rustfmt::skip]
+pub(crate) fn fn_sig_for_fn_abi<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> ty::PolyFnSig<'tcx> {
+    use rustc_middle::ty::subst::Subst;
+
+    // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
+    let ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
+    match *ty.kind() {
+        ty::FnDef(..) => {
+            // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
+            // parameters unused if they show up in the signature, but not in the `mir::Body`
+            // (i.e. due to being inside a projection that got normalized, see
+            // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
+            // track of a polymorphization `ParamEnv` to allow normalizing later.
+            let mut sig = match *ty.kind() {
+                ty::FnDef(def_id, substs) => tcx
+                    .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
+                    .subst(tcx, substs),
+                _ => unreachable!(),
+            };
+
+            if let ty::InstanceDef::VtableShim(..) = instance.def {
+                // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
+                sig = sig.map_bound(|mut sig| {
+                    let mut inputs_and_output = sig.inputs_and_output.to_vec();
+                    inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
+                    sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
+                    sig
+                });
+            }
+            sig
+        }
+        ty::Closure(def_id, substs) => {
+            let sig = substs.as_closure().sig();
+
+            let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
+            sig.map_bound(|sig| {
+                tcx.mk_fn_sig(
+                    std::iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
+                    sig.output(),
+                    sig.c_variadic,
+                    sig.unsafety,
+                    sig.abi,
+                )
+            })
+        }
+        ty::Generator(_, substs, _) => {
+            let sig = substs.as_generator().poly_sig();
+
+            let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
+            let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
+
+            let pin_did = tcx.require_lang_item(rustc_hir::LangItem::Pin, None);
+            let pin_adt_ref = tcx.adt_def(pin_did);
+            let pin_substs = tcx.intern_substs(&[env_ty.into()]);
+            let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
+
+            sig.map_bound(|sig| {
+                let state_did = tcx.require_lang_item(rustc_hir::LangItem::GeneratorState, None);
+                let state_adt_ref = tcx.adt_def(state_did);
+                let state_substs =
+                    tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
+                let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
+
+                tcx.mk_fn_sig(
+                    [env_ty, sig.resume_ty].iter(),
+                    &ret_ty,
+                    false,
+                    rustc_hir::Unsafety::Normal,
+                    rustc_target::spec::abi::Abi::Rust,
+                )
+            })
+        }
+        _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
+    }
+}
+
+fn clif_sig_from_fn_sig<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    triple: &target_lexicon::Triple,
+    sig: FnSig<'tcx>,
+    span: Span,
+    is_vtable_fn: bool,
+    requires_caller_location: bool,
+) -> Signature {
+    let abi = match sig.abi {
+        Abi::System => Abi::C,
+        abi => abi,
+    };
+    let (call_conv, inputs, output): (CallConv, Vec<Ty<'tcx>>, Ty<'tcx>) = match abi {
+        Abi::Rust => (
+            CallConv::triple_default(triple),
+            sig.inputs().to_vec(),
+            sig.output(),
+        ),
+        Abi::C | Abi::Unadjusted => (
+            CallConv::triple_default(triple),
+            sig.inputs().to_vec(),
+            sig.output(),
+        ),
+        Abi::SysV64 => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()),
+        Abi::RustCall => {
+            assert_eq!(sig.inputs().len(), 2);
+            let extra_args = match sig.inputs().last().unwrap().kind() {
+                ty::Tuple(ref tupled_arguments) => tupled_arguments,
+                _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
+            };
+            let mut inputs: Vec<Ty<'tcx>> = vec![sig.inputs()[0]];
+            inputs.extend(extra_args.types());
+            (CallConv::triple_default(triple), inputs, sig.output())
+        }
+        Abi::System => unreachable!(),
+        Abi::RustIntrinsic => (
+            CallConv::triple_default(triple),
+            sig.inputs().to_vec(),
+            sig.output(),
+        ),
+        _ => unimplemented!("unsupported abi {:?}", sig.abi),
+    };
+
+    let inputs = inputs
+        .into_iter()
+        .enumerate()
+        .map(|(i, ty)| {
+            let mut layout = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap();
+            if i == 0 && is_vtable_fn {
+                // Virtual calls turn their self param into a thin pointer.
+                // See https://github.com/rust-lang/rust/blob/37b6a5e5e82497caf5353d9d856e4eb5d14cbe06/src/librustc/ty/layout.rs#L2519-L2572 for more info
+                layout = tcx
+                    .layout_of(ParamEnv::reveal_all().and(tcx.mk_mut_ptr(tcx.mk_unit())))
+                    .unwrap();
+            }
+            let pass_mode = get_pass_mode(tcx, layout);
+            if abi != Abi::Rust && abi != Abi::RustCall && abi != Abi::RustIntrinsic {
+                match pass_mode {
+                    PassMode::NoPass | PassMode::ByVal(_) => {}
+                    PassMode::ByRef { size: Some(size) } => {
+                        let purpose = ArgumentPurpose::StructArgument(u32::try_from(size.bytes()).expect("struct too big to pass on stack"));
+                        return EmptySinglePair::Single(AbiParam::special(pointer_ty(tcx), purpose)).into_iter();
+                    }
+                    PassMode::ByValPair(_, _) | PassMode::ByRef { size: None } => {
+                        tcx.sess.span_warn(
+                            span,
+                            &format!(
+                                "Argument of type `{:?}` with pass mode `{:?}` is not yet supported \
+                                for non-rust abi `{}`. Calling this function may result in a crash.",
+                                layout.ty,
+                                pass_mode,
+                                abi,
+                            ),
+                        );
+                    }
+                }
+            }
+            pass_mode.get_param_ty(tcx).map(AbiParam::new).into_iter()
+        })
+        .flatten();
+
+    let (mut params, returns): (Vec<_>, Vec<_>) = match get_pass_mode(
+        tcx,
+        tcx.layout_of(ParamEnv::reveal_all().and(output)).unwrap(),
+    ) {
+        PassMode::NoPass => (inputs.collect(), vec![]),
+        PassMode::ByVal(ret_ty) => (inputs.collect(), vec![AbiParam::new(ret_ty)]),
+        PassMode::ByValPair(ret_ty_a, ret_ty_b) => (
+            inputs.collect(),
+            vec![AbiParam::new(ret_ty_a), AbiParam::new(ret_ty_b)],
+        ),
+        PassMode::ByRef { size: Some(_) } => {
+            (
+                Some(pointer_ty(tcx)) // First param is place to put return val
+                    .into_iter()
+                    .map(|ty| AbiParam::special(ty, ArgumentPurpose::StructReturn))
+                    .chain(inputs)
+                    .collect(),
+                vec![],
+            )
+        }
+        PassMode::ByRef { size: None } => todo!(),
+    };
+
+    if requires_caller_location {
+        params.push(AbiParam::new(pointer_ty(tcx)));
+    }
+
+    Signature {
+        params,
+        returns,
+        call_conv,
+    }
+}
+
+pub(crate) fn get_function_name_and_sig<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    triple: &target_lexicon::Triple,
+    inst: Instance<'tcx>,
+    support_vararg: bool,
+) -> (String, Signature) {
+    assert!(!inst.substs.needs_infer());
+    let fn_sig = tcx.normalize_erasing_late_bound_regions(
+        ParamEnv::reveal_all(),
+        &fn_sig_for_fn_abi(tcx, inst),
+    );
+    if fn_sig.c_variadic && !support_vararg {
+        tcx.sess.span_fatal(
+            tcx.def_span(inst.def_id()),
+            "Variadic function definitions are not yet supported",
+        );
+    }
+    let sig = clif_sig_from_fn_sig(
+        tcx,
+        triple,
+        fn_sig,
+        tcx.def_span(inst.def_id()),
+        false,
+        inst.def.requires_caller_location(tcx),
+    );
+    (tcx.symbol_name(inst).name.to_string(), sig)
+}
+
+/// Instance must be monomorphized
+pub(crate) fn import_function<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    module: &mut impl Module,
+    inst: Instance<'tcx>,
+) -> FuncId {
+    let (name, sig) = get_function_name_and_sig(tcx, module.isa().triple(), inst, true);
+    module
+        .declare_function(&name, Linkage::Import, &sig)
+        .unwrap()
+}
+
+impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> {
+    /// Instance must be monomorphized
+    pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
+        let func_id = import_function(self.tcx, &mut self.cx.module, inst);
+        let func_ref = self
+            .cx
+            .module
+            .declare_func_in_func(func_id, &mut self.bcx.func);
+
+        #[cfg(debug_assertions)]
+        self.add_comment(func_ref, format!("{:?}", inst));
+
+        func_ref
+    }
+
+    pub(crate) fn lib_call(
+        &mut self,
+        name: &str,
+        input_tys: Vec<types::Type>,
+        output_tys: Vec<types::Type>,
+        args: &[Value],
+    ) -> &[Value] {
+        let sig = Signature {
+            params: input_tys.iter().cloned().map(AbiParam::new).collect(),
+            returns: output_tys.iter().cloned().map(AbiParam::new).collect(),
+            call_conv: CallConv::triple_default(self.triple()),
+        };
+        let func_id = self
+            .cx
+            .module
+            .declare_function(&name, Linkage::Import, &sig)
+            .unwrap();
+        let func_ref = self
+            .cx
+            .module
+            .declare_func_in_func(func_id, &mut self.bcx.func);
+        let call_inst = self.bcx.ins().call(func_ref, args);
+        #[cfg(debug_assertions)]
+        {
+            self.add_comment(call_inst, format!("easy_call {}", name));
+        }
+        let results = self.bcx.inst_results(call_inst);
+        assert!(results.len() <= 2, "{}", results.len());
+        results
+    }
+
+    pub(crate) fn easy_call(
+        &mut self,
+        name: &str,
+        args: &[CValue<'tcx>],
+        return_ty: Ty<'tcx>,
+    ) -> CValue<'tcx> {
+        let (input_tys, args): (Vec<_>, Vec<_>) = args
+            .iter()
+            .map(|arg| {
+                (
+                    self.clif_type(arg.layout().ty).unwrap(),
+                    arg.load_scalar(self),
+                )
+            })
+            .unzip();
+        let return_layout = self.layout_of(return_ty);
+        let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
+            tup.types().map(|ty| self.clif_type(ty).unwrap()).collect()
+        } else {
+            vec![self.clif_type(return_ty).unwrap()]
+        };
+        let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
+        match *ret_vals {
+            [] => CValue::by_ref(
+                Pointer::const_addr(self, i64::from(self.pointer_type.bytes())),
+                return_layout,
+            ),
+            [val] => CValue::by_val(val, return_layout),
+            [val, extra] => CValue::by_val_pair(val, extra, return_layout),
+            _ => unreachable!(),
+        }
+    }
+}
+
+/// Make a [`CPlace`] capable of holding value of the specified type.
+fn make_local_place<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    local: Local,
+    layout: TyAndLayout<'tcx>,
+    is_ssa: bool,
+) -> CPlace<'tcx> {
+    let place = if is_ssa {
+        if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
+            CPlace::new_var_pair(fx, local, layout)
+        } else {
+            CPlace::new_var(fx, local, layout)
+        }
+    } else {
+        CPlace::new_stack_slot(fx, layout)
+    };
+
+    #[cfg(debug_assertions)]
+    self::comments::add_local_place_comments(fx, place, local);
+
+    place
+}
+
+pub(crate) fn codegen_fn_prelude<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    start_block: Block,
+) {
+    let ssa_analyzed = crate::analyze::analyze(fx);
+
+    #[cfg(debug_assertions)]
+    self::comments::add_args_header_comment(fx);
+
+    let ret_place = self::returning::codegen_return_param(fx, &ssa_analyzed, start_block);
+    assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
+
+    // None means pass_mode == NoPass
+    enum ArgKind<'tcx> {
+        Normal(Option<CValue<'tcx>>),
+        Spread(Vec<Option<CValue<'tcx>>>),
+    }
+
+    let func_params = fx
+        .mir
+        .args_iter()
+        .map(|local| {
+            let arg_ty = fx.monomorphize(&fx.mir.local_decls[local].ty);
+
+            // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
+            if Some(local) == fx.mir.spread_arg {
+                // This argument (e.g. the last argument in the "rust-call" ABI)
+                // is a tuple that was spread at the ABI level and now we have
+                // to reconstruct it into a tuple local variable, from multiple
+                // individual function arguments.
+
+                let tupled_arg_tys = match arg_ty.kind() {
+                    ty::Tuple(ref tys) => tys,
+                    _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
+                };
+
+                let mut params = Vec::new();
+                for (i, arg_ty) in tupled_arg_tys.types().enumerate() {
+                    let param = cvalue_for_param(fx, start_block, Some(local), Some(i), arg_ty);
+                    params.push(param);
+                }
+
+                (local, ArgKind::Spread(params), arg_ty)
+            } else {
+                let param = cvalue_for_param(fx, start_block, Some(local), None, arg_ty);
+                (local, ArgKind::Normal(param), arg_ty)
+            }
+        })
+        .collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
+
+    assert!(fx.caller_location.is_none());
+    if fx.instance.def.requires_caller_location(fx.tcx) {
+        // Store caller location for `#[track_caller]`.
+        fx.caller_location = Some(
+            cvalue_for_param(fx, start_block, None, None, fx.tcx.caller_location_ty()).unwrap(),
+        );
+    }
+
+    fx.bcx.switch_to_block(start_block);
+    fx.bcx.ins().nop();
+
+    #[cfg(debug_assertions)]
+    self::comments::add_locals_header_comment(fx);
+
+    for (local, arg_kind, ty) in func_params {
+        let layout = fx.layout_of(ty);
+
+        let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+        // While this is normally an optimization to prevent an unnecessary copy when an argument is
+        // not mutated by the current function, this is necessary to support unsized arguments.
+        if let ArgKind::Normal(Some(val)) = arg_kind {
+            if let Some((addr, meta)) = val.try_to_ptr() {
+                let local_decl = &fx.mir.local_decls[local];
+                //                       v this ! is important
+                let internally_mutable = !val.layout().ty.is_freeze(
+                    fx.tcx.at(local_decl.source_info.span),
+                    ParamEnv::reveal_all(),
+                );
+                if local_decl.mutability == mir::Mutability::Not && !internally_mutable {
+                    // We wont mutate this argument, so it is fine to borrow the backing storage
+                    // of this argument, to prevent a copy.
+
+                    let place = if let Some(meta) = meta {
+                        CPlace::for_ptr_with_extra(addr, meta, val.layout())
+                    } else {
+                        CPlace::for_ptr(addr, val.layout())
+                    };
+
+                    #[cfg(debug_assertions)]
+                    self::comments::add_local_place_comments(fx, place, local);
+
+                    assert_eq!(fx.local_map.push(place), local);
+                    continue;
+                }
+            }
+        }
+
+        let place = make_local_place(fx, local, layout, is_ssa);
+        assert_eq!(fx.local_map.push(place), local);
+
+        match arg_kind {
+            ArgKind::Normal(param) => {
+                if let Some(param) = param {
+                    place.write_cvalue(fx, param);
+                }
+            }
+            ArgKind::Spread(params) => {
+                for (i, param) in params.into_iter().enumerate() {
+                    if let Some(param) = param {
+                        place
+                            .place_field(fx, mir::Field::new(i))
+                            .write_cvalue(fx, param);
+                    }
+                }
+            }
+        }
+    }
+
+    for local in fx.mir.vars_and_temps_iter() {
+        let ty = fx.monomorphize(&fx.mir.local_decls[local].ty);
+        let layout = fx.layout_of(ty);
+
+        let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+        let place = make_local_place(fx, local, layout, is_ssa);
+        assert_eq!(fx.local_map.push(place), local);
+    }
+
+    fx.bcx
+        .ins()
+        .jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
+}
+
+pub(crate) fn codegen_terminator_call<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    span: Span,
+    current_block: Block,
+    func: &Operand<'tcx>,
+    args: &[Operand<'tcx>],
+    destination: Option<(Place<'tcx>, BasicBlock)>,
+) {
+    let fn_ty = fx.monomorphize(&func.ty(fx.mir, fx.tcx));
+    let fn_sig = fx
+        .tcx
+        .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &fn_ty.fn_sig(fx.tcx));
+
+    let destination = destination.map(|(place, bb)| (codegen_place(fx, place), bb));
+
+    // Handle special calls like instrinsics and empty drop glue.
+    let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
+        let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+            .unwrap()
+            .unwrap()
+            .polymorphize(fx.tcx);
+
+        if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
+            crate::intrinsics::codegen_llvm_intrinsic_call(
+                fx,
+                &fx.tcx.symbol_name(instance).name,
+                substs,
+                args,
+                destination,
+            );
+            return;
+        }
+
+        match instance.def {
+            InstanceDef::Intrinsic(_) => {
+                crate::intrinsics::codegen_intrinsic_call(fx, instance, args, destination, span);
+                return;
+            }
+            InstanceDef::DropGlue(_, None) => {
+                // empty drop glue - a nop.
+                let (_, dest) = destination.expect("Non terminating drop_in_place_real???");
+                let ret_block = fx.get_block(dest);
+                fx.bcx.ins().jump(ret_block, &[]);
+                return;
+            }
+            _ => Some(instance),
+        }
+    } else {
+        None
+    };
+
+    let is_cold = instance
+        .map(|inst| {
+            fx.tcx
+                .codegen_fn_attrs(inst.def_id())
+                .flags
+                .contains(CodegenFnAttrFlags::COLD)
+        })
+        .unwrap_or(false);
+    if is_cold {
+        fx.cold_blocks.insert(current_block);
+    }
+
+    // Unpack arguments tuple for closures
+    let args = if fn_sig.abi == Abi::RustCall {
+        assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
+        let self_arg = codegen_operand(fx, &args[0]);
+        let pack_arg = codegen_operand(fx, &args[1]);
+
+        let tupled_arguments = match pack_arg.layout().ty.kind() {
+            ty::Tuple(ref tupled_arguments) => tupled_arguments,
+            _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
+        };
+
+        let mut args = Vec::with_capacity(1 + tupled_arguments.len());
+        args.push(self_arg);
+        for i in 0..tupled_arguments.len() {
+            args.push(pack_arg.value_field(fx, mir::Field::new(i)));
+        }
+        args
+    } else {
+        args.iter()
+            .map(|arg| codegen_operand(fx, arg))
+            .collect::<Vec<_>>()
+    };
+
+    //   | indirect call target
+    //   |         | the first argument to be passed
+    //   v         v          v virtual calls are special cased below
+    let (func_ref, first_arg, is_virtual_call) = match instance {
+        // Trait object call
+        Some(Instance {
+            def: InstanceDef::Virtual(_, idx),
+            ..
+        }) => {
+            #[cfg(debug_assertions)]
+            {
+                let nop_inst = fx.bcx.ins().nop();
+                fx.add_comment(
+                    nop_inst,
+                    format!(
+                        "virtual call; self arg pass mode: {:?}",
+                        get_pass_mode(fx.tcx, args[0].layout())
+                    ),
+                );
+            }
+            let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0], idx);
+            (Some(method), Single(ptr), true)
+        }
+
+        // Normal call
+        Some(_) => (
+            None,
+            args.get(0)
+                .map(|arg| adjust_arg_for_abi(fx, *arg))
+                .unwrap_or(Empty),
+            false,
+        ),
+
+        // Indirect call
+        None => {
+            #[cfg(debug_assertions)]
+            {
+                let nop_inst = fx.bcx.ins().nop();
+                fx.add_comment(nop_inst, "indirect call");
+            }
+            let func = codegen_operand(fx, func).load_scalar(fx);
+            (
+                Some(func),
+                args.get(0)
+                    .map(|arg| adjust_arg_for_abi(fx, *arg))
+                    .unwrap_or(Empty),
+                false,
+            )
+        }
+    };
+
+    let ret_place = destination.map(|(place, _)| place);
+    let (call_inst, call_args) =
+        self::returning::codegen_with_call_return_arg(fx, fn_sig, ret_place, |fx, return_ptr| {
+            let mut call_args: Vec<Value> = return_ptr
+                .into_iter()
+                .chain(first_arg.into_iter())
+                .chain(
+                    args.into_iter()
+                        .skip(1)
+                        .map(|arg| adjust_arg_for_abi(fx, arg).into_iter())
+                        .flatten(),
+                )
+                .collect::<Vec<_>>();
+
+            if instance
+                .map(|inst| inst.def.requires_caller_location(fx.tcx))
+                .unwrap_or(false)
+            {
+                // Pass the caller location for `#[track_caller]`.
+                let caller_location = fx.get_caller_location(span);
+                call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter());
+            }
+
+            let call_inst = if let Some(func_ref) = func_ref {
+                let sig = clif_sig_from_fn_sig(
+                    fx.tcx,
+                    fx.triple(),
+                    fn_sig,
+                    span,
+                    is_virtual_call,
+                    false, // calls through function pointers never pass the caller location
+                );
+                let sig = fx.bcx.import_signature(sig);
+                fx.bcx.ins().call_indirect(sig, func_ref, &call_args)
+            } else {
+                let func_ref =
+                    fx.get_function_ref(instance.expect("non-indirect call on non-FnDef type"));
+                fx.bcx.ins().call(func_ref, &call_args)
+            };
+
+            (call_inst, call_args)
+        });
+
+    // FIXME find a cleaner way to support varargs
+    if fn_sig.c_variadic {
+        if fn_sig.abi != Abi::C {
+            fx.tcx.sess.span_fatal(
+                span,
+                &format!("Variadic call for non-C abi {:?}", fn_sig.abi),
+            );
+        }
+        let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
+        let abi_params = call_args
+            .into_iter()
+            .map(|arg| {
+                let ty = fx.bcx.func.dfg.value_type(arg);
+                if !ty.is_int() {
+                    // FIXME set %al to upperbound on float args once floats are supported
+                    fx.tcx
+                        .sess
+                        .span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
+                }
+                AbiParam::new(ty)
+            })
+            .collect::<Vec<AbiParam>>();
+        fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
+    }
+
+    if let Some((_, dest)) = destination {
+        let ret_block = fx.get_block(dest);
+        fx.bcx.ins().jump(ret_block, &[]);
+    } else {
+        trap_unreachable(fx, "[corruption] Diverging function returned");
+    }
+}
+
+pub(crate) fn codegen_drop<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    span: Span,
+    drop_place: CPlace<'tcx>,
+) {
+    let ty = drop_place.layout().ty;
+    let drop_fn = Instance::resolve_drop_in_place(fx.tcx, ty).polymorphize(fx.tcx);
+
+    if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
+        // we don't actually need to drop anything
+    } else {
+        let drop_fn_ty = drop_fn.ty(fx.tcx, ParamEnv::reveal_all());
+        let fn_sig = fx.tcx.normalize_erasing_late_bound_regions(
+            ParamEnv::reveal_all(),
+            &drop_fn_ty.fn_sig(fx.tcx),
+        );
+        assert_eq!(fn_sig.output(), fx.tcx.mk_unit());
+
+        match ty.kind() {
+            ty::Dynamic(..) => {
+                let (ptr, vtable) = drop_place.to_ptr_maybe_unsized();
+                let ptr = ptr.get_addr(fx);
+                let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
+
+                let sig = clif_sig_from_fn_sig(
+                    fx.tcx,
+                    fx.triple(),
+                    fn_sig,
+                    span,
+                    true,
+                    false, // `drop_in_place` is never `#[track_caller]`
+                );
+                let sig = fx.bcx.import_signature(sig);
+                fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]);
+            }
+            _ => {
+                assert!(!matches!(drop_fn.def, InstanceDef::Virtual(_, _)));
+
+                let arg_value = drop_place.place_ref(
+                    fx,
+                    fx.layout_of(fx.tcx.mk_ref(
+                        &ty::RegionKind::ReErased,
+                        TypeAndMut {
+                            ty,
+                            mutbl: crate::rustc_hir::Mutability::Mut,
+                        },
+                    )),
+                );
+                let arg_value = adjust_arg_for_abi(fx, arg_value);
+
+                let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
+
+                if drop_fn.def.requires_caller_location(fx.tcx) {
+                    // Pass the caller location for `#[track_caller]`.
+                    let caller_location = fx.get_caller_location(span);
+                    call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter());
+                }
+
+                let func_ref = fx.get_function_ref(drop_fn);
+                fx.bcx.ins().call(func_ref, &call_args);
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
new file mode 100644
index 0000000..8e3682c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
@@ -0,0 +1,188 @@
+//! Argument passing
+
+use crate::prelude::*;
+
+pub(super) use EmptySinglePair::*;
+
+#[derive(Copy, Clone, Debug)]
+pub(super) enum PassMode {
+    NoPass,
+    ByVal(Type),
+    ByValPair(Type, Type),
+    ByRef { size: Option<Size> },
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(super) enum EmptySinglePair<T> {
+    Empty,
+    Single(T),
+    Pair(T, T),
+}
+
+impl<T> EmptySinglePair<T> {
+    pub(super) fn into_iter(self) -> EmptySinglePairIter<T> {
+        EmptySinglePairIter(self)
+    }
+
+    pub(super) fn map<U>(self, mut f: impl FnMut(T) -> U) -> EmptySinglePair<U> {
+        match self {
+            Empty => Empty,
+            Single(v) => Single(f(v)),
+            Pair(a, b) => Pair(f(a), f(b)),
+        }
+    }
+}
+
+pub(super) struct EmptySinglePairIter<T>(EmptySinglePair<T>);
+
+impl<T> Iterator for EmptySinglePairIter<T> {
+    type Item = T;
+
+    fn next(&mut self) -> Option<T> {
+        match std::mem::replace(&mut self.0, Empty) {
+            Empty => None,
+            Single(v) => Some(v),
+            Pair(a, b) => {
+                self.0 = Single(b);
+                Some(a)
+            }
+        }
+    }
+}
+
+impl<T: std::fmt::Debug> EmptySinglePair<T> {
+    pub(super) fn assert_single(self) -> T {
+        match self {
+            Single(v) => v,
+            _ => panic!("Called assert_single on {:?}", self),
+        }
+    }
+
+    pub(super) fn assert_pair(self) -> (T, T) {
+        match self {
+            Pair(a, b) => (a, b),
+            _ => panic!("Called assert_pair on {:?}", self),
+        }
+    }
+}
+
+impl PassMode {
+    pub(super) fn get_param_ty(self, tcx: TyCtxt<'_>) -> EmptySinglePair<Type> {
+        match self {
+            PassMode::NoPass => Empty,
+            PassMode::ByVal(clif_type) => Single(clif_type),
+            PassMode::ByValPair(a, b) => Pair(a, b),
+            PassMode::ByRef { size: Some(_) } => Single(pointer_ty(tcx)),
+            PassMode::ByRef { size: None } => Pair(pointer_ty(tcx), pointer_ty(tcx)),
+        }
+    }
+}
+
+pub(super) fn get_pass_mode<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> PassMode {
+    if layout.is_zst() {
+        // WARNING zst arguments must never be passed, as that will break CastKind::ClosureFnPointer
+        PassMode::NoPass
+    } else {
+        match &layout.abi {
+            Abi::Uninhabited => PassMode::NoPass,
+            Abi::Scalar(scalar) => PassMode::ByVal(scalar_to_clif_type(tcx, scalar.clone())),
+            Abi::ScalarPair(a, b) => {
+                let a = scalar_to_clif_type(tcx, a.clone());
+                let b = scalar_to_clif_type(tcx, b.clone());
+                if a == types::I128 && b == types::I128 {
+                    // Returning (i128, i128) by-val-pair would take 4 regs, while only 3 are
+                    // available on x86_64. Cranelift gets confused when too many return params
+                    // are used.
+                    PassMode::ByRef {
+                        size: Some(layout.size),
+                    }
+                } else {
+                    PassMode::ByValPair(a, b)
+                }
+            }
+
+            // FIXME implement Vector Abi in a cg_llvm compatible way
+            Abi::Vector { .. } => {
+                if let Some(vector_ty) = crate::intrinsics::clif_vector_type(tcx, layout) {
+                    PassMode::ByVal(vector_ty)
+                } else {
+                    PassMode::ByRef {
+                        size: Some(layout.size),
+                    }
+                }
+            }
+
+            Abi::Aggregate { sized: true } => PassMode::ByRef {
+                size: Some(layout.size),
+            },
+            Abi::Aggregate { sized: false } => PassMode::ByRef { size: None },
+        }
+    }
+}
+
+/// Get a set of values to be passed as function arguments.
+pub(super) fn adjust_arg_for_abi<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    arg: CValue<'tcx>,
+) -> EmptySinglePair<Value> {
+    match get_pass_mode(fx.tcx, arg.layout()) {
+        PassMode::NoPass => Empty,
+        PassMode::ByVal(_) => Single(arg.load_scalar(fx)),
+        PassMode::ByValPair(_, _) => {
+            let (a, b) = arg.load_scalar_pair(fx);
+            Pair(a, b)
+        }
+        PassMode::ByRef { size: _ } => match arg.force_stack(fx) {
+            (ptr, None) => Single(ptr.get_addr(fx)),
+            (ptr, Some(meta)) => Pair(ptr.get_addr(fx), meta),
+        },
+    }
+}
+
+/// Create a [`CValue`] containing the value of a function parameter adding clif function parameters
+/// as necessary.
+pub(super) fn cvalue_for_param<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    start_block: Block,
+    #[cfg_attr(not(debug_assertions), allow(unused_variables))] local: Option<mir::Local>,
+    #[cfg_attr(not(debug_assertions), allow(unused_variables))] local_field: Option<usize>,
+    arg_ty: Ty<'tcx>,
+) -> Option<CValue<'tcx>> {
+    let layout = fx.layout_of(arg_ty);
+    let pass_mode = get_pass_mode(fx.tcx, layout);
+
+    if let PassMode::NoPass = pass_mode {
+        return None;
+    }
+
+    let clif_types = pass_mode.get_param_ty(fx.tcx);
+    let block_params = clif_types.map(|t| fx.bcx.append_block_param(start_block, t));
+
+    #[cfg(debug_assertions)]
+    crate::abi::comments::add_arg_comment(
+        fx,
+        "arg",
+        local,
+        local_field,
+        block_params,
+        pass_mode,
+        arg_ty,
+    );
+
+    match pass_mode {
+        PassMode::NoPass => unreachable!(),
+        PassMode::ByVal(_) => Some(CValue::by_val(block_params.assert_single(), layout)),
+        PassMode::ByValPair(_, _) => {
+            let (a, b) = block_params.assert_pair();
+            Some(CValue::by_val_pair(a, b, layout))
+        }
+        PassMode::ByRef { size: Some(_) } => Some(CValue::by_ref(
+            Pointer::new(block_params.assert_single()),
+            layout,
+        )),
+        PassMode::ByRef { size: None } => {
+            let (ptr, meta) = block_params.assert_pair();
+            Some(CValue::by_ref_unsized(Pointer::new(ptr), meta, layout))
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/returning.rs b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
new file mode 100644
index 0000000..f6d40c8
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
@@ -0,0 +1,130 @@
+//! Return value handling
+
+use crate::abi::pass_mode::*;
+use crate::prelude::*;
+
+fn return_layout<'a, 'tcx>(fx: &mut FunctionCx<'a, 'tcx, impl Module>) -> TyAndLayout<'tcx> {
+    fx.layout_of(fx.monomorphize(&fx.mir.local_decls[RETURN_PLACE].ty))
+}
+
+/// Can the given type be returned into an ssa var or does it need to be returned on the stack.
+pub(crate) fn can_return_to_ssa_var<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    dest_layout: TyAndLayout<'tcx>,
+) -> bool {
+    match get_pass_mode(tcx, dest_layout) {
+        PassMode::NoPass | PassMode::ByVal(_) | PassMode::ByValPair(_, _) => true,
+        // FIXME Make it possible to return ByRef to an ssa var.
+        PassMode::ByRef { size: _ } => false,
+    }
+}
+
+/// Return a place where the return value of the current function can be written to. If necessary
+/// this adds an extra parameter pointing to where the return value needs to be stored.
+pub(super) fn codegen_return_param<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    ssa_analyzed: &rustc_index::vec::IndexVec<Local, crate::analyze::SsaKind>,
+    start_block: Block,
+) -> CPlace<'tcx> {
+    let ret_layout = return_layout(fx);
+    let ret_pass_mode = get_pass_mode(fx.tcx, ret_layout);
+    let (ret_place, ret_param) = match ret_pass_mode {
+        PassMode::NoPass => (CPlace::no_place(ret_layout), Empty),
+        PassMode::ByVal(_) | PassMode::ByValPair(_, _) => {
+            let is_ssa = ssa_analyzed[RETURN_PLACE] == crate::analyze::SsaKind::Ssa;
+            (
+                super::make_local_place(fx, RETURN_PLACE, ret_layout, is_ssa),
+                Empty,
+            )
+        }
+        PassMode::ByRef { size: Some(_) } => {
+            let ret_param = fx.bcx.append_block_param(start_block, fx.pointer_type);
+            (
+                CPlace::for_ptr(Pointer::new(ret_param), ret_layout),
+                Single(ret_param),
+            )
+        }
+        PassMode::ByRef { size: None } => todo!(),
+    };
+
+    #[cfg(not(debug_assertions))]
+    let _ = ret_param;
+
+    #[cfg(debug_assertions)]
+    crate::abi::comments::add_arg_comment(
+        fx,
+        "ret",
+        Some(RETURN_PLACE),
+        None,
+        ret_param,
+        ret_pass_mode,
+        ret_layout.ty,
+    );
+
+    ret_place
+}
+
+/// Invokes the closure with if necessary a value representing the return pointer. When the closure
+/// returns the call return value(s) if any are written to the correct place.
+pub(super) fn codegen_with_call_return_arg<'tcx, M: Module, T>(
+    fx: &mut FunctionCx<'_, 'tcx, M>,
+    fn_sig: FnSig<'tcx>,
+    ret_place: Option<CPlace<'tcx>>,
+    f: impl FnOnce(&mut FunctionCx<'_, 'tcx, M>, Option<Value>) -> (Inst, T),
+) -> (Inst, T) {
+    let ret_layout = fx.layout_of(fn_sig.output());
+
+    let output_pass_mode = get_pass_mode(fx.tcx, ret_layout);
+    let return_ptr = match output_pass_mode {
+        PassMode::NoPass => None,
+        PassMode::ByRef { size: Some(_) } => match ret_place {
+            Some(ret_place) => Some(ret_place.to_ptr().get_addr(fx)),
+            None => Some(fx.bcx.ins().iconst(fx.pointer_type, 43)), // FIXME allocate temp stack slot
+        },
+        PassMode::ByRef { size: None } => todo!(),
+        PassMode::ByVal(_) | PassMode::ByValPair(_, _) => None,
+    };
+
+    let (call_inst, meta) = f(fx, return_ptr);
+
+    match output_pass_mode {
+        PassMode::NoPass => {}
+        PassMode::ByVal(_) => {
+            if let Some(ret_place) = ret_place {
+                let ret_val = fx.bcx.inst_results(call_inst)[0];
+                ret_place.write_cvalue(fx, CValue::by_val(ret_val, ret_layout));
+            }
+        }
+        PassMode::ByValPair(_, _) => {
+            if let Some(ret_place) = ret_place {
+                let ret_val_a = fx.bcx.inst_results(call_inst)[0];
+                let ret_val_b = fx.bcx.inst_results(call_inst)[1];
+                ret_place.write_cvalue(fx, CValue::by_val_pair(ret_val_a, ret_val_b, ret_layout));
+            }
+        }
+        PassMode::ByRef { size: Some(_) } => {}
+        PassMode::ByRef { size: None } => todo!(),
+    }
+
+    (call_inst, meta)
+}
+
+/// Codegen a return instruction with the right return value(s) if any.
+pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, impl Module>) {
+    match get_pass_mode(fx.tcx, return_layout(fx)) {
+        PassMode::NoPass | PassMode::ByRef { size: Some(_) } => {
+            fx.bcx.ins().return_(&[]);
+        }
+        PassMode::ByRef { size: None } => todo!(),
+        PassMode::ByVal(_) => {
+            let place = fx.get_local_place(RETURN_PLACE);
+            let ret_val = place.to_cvalue(fx).load_scalar(fx);
+            fx.bcx.ins().return_(&[ret_val]);
+        }
+        PassMode::ByValPair(_, _) => {
+            let place = fx.get_local_place(RETURN_PLACE);
+            let (ret_val_a, ret_val_b) = place.to_cvalue(fx).load_scalar_pair(fx);
+            fx.bcx.ins().return_(&[ret_val_a, ret_val_b]);
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/allocator.rs b/compiler/rustc_codegen_cranelift/src/allocator.rs
new file mode 100644
index 0000000..6c59165
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/allocator.rs
@@ -0,0 +1,153 @@
+//! Allocator shim
+// Adapted from rustc
+
+use crate::prelude::*;
+
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_span::symbol::sym;
+
+/// Returns whether an allocator shim was created
+pub(crate) fn codegen(
+    tcx: TyCtxt<'_>,
+    module: &mut impl Module,
+    unwind_context: &mut UnwindContext<'_>,
+) -> bool {
+    let any_dynamic_crate = tcx.dependency_formats(LOCAL_CRATE).iter().any(|(_, list)| {
+        use rustc_middle::middle::dependency_format::Linkage;
+        list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+    });
+    if any_dynamic_crate {
+        false
+    } else if let Some(kind) = tcx.allocator_kind() {
+        codegen_inner(module, unwind_context, kind);
+        true
+    } else {
+        false
+    }
+}
+
+fn codegen_inner(
+    module: &mut impl Module,
+    unwind_context: &mut UnwindContext<'_>,
+    kind: AllocatorKind,
+) {
+    let usize_ty = module.target_config().pointer_type();
+
+    for method in ALLOCATOR_METHODS {
+        let mut arg_tys = Vec::with_capacity(method.inputs.len());
+        for ty in method.inputs.iter() {
+            match *ty {
+                AllocatorTy::Layout => {
+                    arg_tys.push(usize_ty); // size
+                    arg_tys.push(usize_ty); // align
+                }
+                AllocatorTy::Ptr => arg_tys.push(usize_ty),
+                AllocatorTy::Usize => arg_tys.push(usize_ty),
+
+                AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+            }
+        }
+        let output = match method.output {
+            AllocatorTy::ResultPtr => Some(usize_ty),
+            AllocatorTy::Unit => None,
+
+            AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+                panic!("invalid allocator output")
+            }
+        };
+
+        let sig = Signature {
+            call_conv: CallConv::triple_default(module.isa().triple()),
+            params: arg_tys.iter().cloned().map(AbiParam::new).collect(),
+            returns: output.into_iter().map(AbiParam::new).collect(),
+        };
+
+        let caller_name = format!("__rust_{}", method.name);
+        let callee_name = kind.fn_name(method.name);
+        //eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
+
+        let func_id = module
+            .declare_function(&caller_name, Linkage::Export, &sig)
+            .unwrap();
+
+        let callee_func_id = module
+            .declare_function(&callee_name, Linkage::Import, &sig)
+            .unwrap();
+
+        let mut ctx = Context::new();
+        ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone());
+        {
+            let mut func_ctx = FunctionBuilderContext::new();
+            let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+            let block = bcx.create_block();
+            bcx.switch_to_block(block);
+            let args = arg_tys
+                .into_iter()
+                .map(|ty| bcx.append_block_param(block, ty))
+                .collect::<Vec<Value>>();
+
+            let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+            let call_inst = bcx.ins().call(callee_func_ref, &args);
+            let results = bcx.inst_results(call_inst).to_vec(); // Clone to prevent borrow error
+
+            bcx.ins().return_(&results);
+            bcx.seal_all_blocks();
+            bcx.finalize();
+        }
+        module
+            .define_function(
+                func_id,
+                &mut ctx,
+                &mut cranelift_codegen::binemit::NullTrapSink {},
+            )
+            .unwrap();
+        unwind_context.add_function(func_id, &ctx, module.isa());
+    }
+
+    let sig = Signature {
+        call_conv: CallConv::triple_default(module.isa().triple()),
+        params: vec![AbiParam::new(usize_ty), AbiParam::new(usize_ty)],
+        returns: vec![],
+    };
+
+    let callee_name = kind.fn_name(sym::oom);
+    //eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
+
+    let func_id = module
+        .declare_function("__rust_alloc_error_handler", Linkage::Export, &sig)
+        .unwrap();
+
+    let callee_func_id = module
+        .declare_function(&callee_name, Linkage::Import, &sig)
+        .unwrap();
+
+    let mut ctx = Context::new();
+    ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
+    {
+        let mut func_ctx = FunctionBuilderContext::new();
+        let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+        let block = bcx.create_block();
+        bcx.switch_to_block(block);
+        let args = (&[usize_ty, usize_ty])
+            .iter()
+            .map(|&ty| bcx.append_block_param(block, ty))
+            .collect::<Vec<Value>>();
+
+        let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+        bcx.ins().call(callee_func_ref, &args);
+
+        bcx.ins().trap(TrapCode::UnreachableCodeReached);
+        bcx.seal_all_blocks();
+        bcx.finalize();
+    }
+    module
+        .define_function(
+            func_id,
+            &mut ctx,
+            &mut cranelift_codegen::binemit::NullTrapSink {},
+        )
+        .unwrap();
+    unwind_context.add_function(func_id, &ctx, module.isa());
+}
diff --git a/compiler/rustc_codegen_cranelift/src/analyze.rs b/compiler/rustc_codegen_cranelift/src/analyze.rs
new file mode 100644
index 0000000..fd25b19
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/analyze.rs
@@ -0,0 +1,61 @@
+//! SSA analysis
+
+use crate::prelude::*;
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::StatementKind::*;
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) enum SsaKind {
+    NotSsa,
+    Ssa,
+}
+
+pub(crate) fn analyze(fx: &FunctionCx<'_, '_, impl Module>) -> IndexVec<Local, SsaKind> {
+    let mut flag_map = fx
+        .mir
+        .local_decls
+        .iter()
+        .map(|local_decl| {
+            let ty = fx.monomorphize(&local_decl.ty);
+            if fx.clif_type(ty).is_some() || fx.clif_pair_type(ty).is_some() {
+                SsaKind::Ssa
+            } else {
+                SsaKind::NotSsa
+            }
+        })
+        .collect::<IndexVec<Local, SsaKind>>();
+
+    for bb in fx.mir.basic_blocks().iter() {
+        for stmt in bb.statements.iter() {
+            match &stmt.kind {
+                Assign(place_and_rval) => match &place_and_rval.1 {
+                    Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+                        not_ssa(&mut flag_map, place.local)
+                    }
+                    _ => {}
+                },
+                _ => {}
+            }
+        }
+
+        match &bb.terminator().kind {
+            TerminatorKind::Call { destination, .. } => {
+                if let Some((dest_place, _dest_bb)) = destination {
+                    let dest_layout = fx
+                        .layout_of(fx.monomorphize(&dest_place.ty(&fx.mir.local_decls, fx.tcx).ty));
+                    if !crate::abi::can_return_to_ssa_var(fx.tcx, dest_layout) {
+                        not_ssa(&mut flag_map, dest_place.local)
+                    }
+                }
+            }
+            _ => {}
+        }
+    }
+
+    flag_map
+}
+
+fn not_ssa(flag_map: &mut IndexVec<Local, SsaKind>, local: Local) {
+    flag_map[local] = SsaKind::NotSsa;
+}
diff --git a/compiler/rustc_codegen_cranelift/src/archive.rs b/compiler/rustc_codegen_cranelift/src/archive.rs
new file mode 100644
index 0000000..daf9fa6
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/archive.rs
@@ -0,0 +1,309 @@
+//! Creation of ar archives like for the lib and staticlib crate type
+
+use std::collections::BTreeMap;
+use std::fs::File;
+use std::path::{Path, PathBuf};
+
+use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder};
+use rustc_codegen_ssa::METADATA_FILENAME;
+use rustc_session::Session;
+
+use object::{Object, SymbolKind};
+
+#[derive(Debug)]
+enum ArchiveEntry {
+    FromArchive {
+        archive_index: usize,
+        entry_index: usize,
+    },
+    File(PathBuf),
+}
+
+pub(crate) struct ArArchiveBuilder<'a> {
+    sess: &'a Session,
+    dst: PathBuf,
+    lib_search_paths: Vec<PathBuf>,
+    use_gnu_style_archive: bool,
+    no_builtin_ranlib: bool,
+
+    src_archives: Vec<(PathBuf, ar::Archive<File>)>,
+    // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
+    // the end of an archive for linkers to not get confused.
+    entries: Vec<(String, ArchiveEntry)>,
+    update_symbols: bool,
+}
+
+impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
+    fn new(sess: &'a Session, output: &Path, input: Option<&Path>) -> Self {
+        use rustc_codegen_ssa::back::link::archive_search_paths;
+
+        let (src_archives, entries) = if let Some(input) = input {
+            let mut archive = ar::Archive::new(File::open(input).unwrap());
+            let mut entries = Vec::new();
+
+            let mut i = 0;
+            while let Some(entry) = archive.next_entry() {
+                let entry = entry.unwrap();
+                entries.push((
+                    String::from_utf8(entry.header().identifier().to_vec()).unwrap(),
+                    ArchiveEntry::FromArchive {
+                        archive_index: 0,
+                        entry_index: i,
+                    },
+                ));
+                i += 1;
+            }
+
+            (vec![(input.to_owned(), archive)], entries)
+        } else {
+            (vec![], Vec::new())
+        };
+
+        ArArchiveBuilder {
+            sess,
+            dst: output.to_path_buf(),
+            lib_search_paths: archive_search_paths(sess),
+            use_gnu_style_archive: sess.target.archive_format == "gnu",
+            // FIXME fix builtin ranlib on macOS
+            no_builtin_ranlib: sess.target.is_like_osx,
+
+            src_archives,
+            entries,
+            update_symbols: false,
+        }
+    }
+
+    fn src_files(&mut self) -> Vec<String> {
+        self.entries.iter().map(|(name, _)| name.clone()).collect()
+    }
+
+    fn remove_file(&mut self, name: &str) {
+        let index = self
+            .entries
+            .iter()
+            .position(|(entry_name, _)| entry_name == name)
+            .expect("Tried to remove file not existing in src archive");
+        self.entries.remove(index);
+    }
+
+    fn add_file(&mut self, file: &Path) {
+        self.entries.push((
+            file.file_name().unwrap().to_str().unwrap().to_string(),
+            ArchiveEntry::File(file.to_owned()),
+        ));
+    }
+
+    fn add_native_library(&mut self, name: rustc_span::symbol::Symbol) {
+        let location = find_library(name, &self.lib_search_paths, self.sess);
+        self.add_archive(location.clone(), |_| false)
+            .unwrap_or_else(|e| {
+                panic!(
+                    "failed to add native library {}: {}",
+                    location.to_string_lossy(),
+                    e
+                );
+            });
+    }
+
+    fn add_rlib(
+        &mut self,
+        rlib: &Path,
+        name: &str,
+        lto: bool,
+        skip_objects: bool,
+    ) -> std::io::Result<()> {
+        let obj_start = name.to_owned();
+
+        self.add_archive(rlib.to_owned(), move |fname: &str| {
+            // Ignore metadata files, no matter the name.
+            if fname == METADATA_FILENAME {
+                return true;
+            }
+
+            // Don't include Rust objects if LTO is enabled
+            if lto && fname.starts_with(&obj_start) && fname.ends_with(".o") {
+                return true;
+            }
+
+            // Otherwise if this is *not* a rust object and we're skipping
+            // objects then skip this file
+            if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
+                return true;
+            }
+
+            // ok, don't skip this
+            false
+        })
+    }
+
+    fn update_symbols(&mut self) {
+        self.update_symbols = true;
+    }
+
+    fn build(mut self) {
+        enum BuilderKind {
+            Bsd(ar::Builder<File>),
+            Gnu(ar::GnuBuilder<File>),
+        }
+
+        let sess = self.sess;
+
+        let mut symbol_table = BTreeMap::new();
+
+        let mut entries = Vec::new();
+
+        for (entry_name, entry) in self.entries {
+            // FIXME only read the symbol table of the object files to avoid having to keep all
+            // object files in memory at once, or read them twice.
+            let data = match entry {
+                ArchiveEntry::FromArchive {
+                    archive_index,
+                    entry_index,
+                } => {
+                    // FIXME read symbols from symtab
+                    use std::io::Read;
+                    let (ref _src_archive_path, ref mut src_archive) =
+                        self.src_archives[archive_index];
+                    let mut entry = src_archive.jump_to_entry(entry_index).unwrap();
+                    let mut data = Vec::new();
+                    entry.read_to_end(&mut data).unwrap();
+                    data
+                }
+                ArchiveEntry::File(file) => std::fs::read(file).unwrap_or_else(|err| {
+                    sess.fatal(&format!(
+                        "error while reading object file during archive building: {}",
+                        err
+                    ));
+                }),
+            };
+
+            if !self.no_builtin_ranlib {
+                match object::File::parse(&data) {
+                    Ok(object) => {
+                        symbol_table.insert(
+                            entry_name.as_bytes().to_vec(),
+                            object
+                                .symbols()
+                                .filter_map(|(_index, symbol)| {
+                                    if symbol.is_undefined()
+                                        || symbol.is_local()
+                                        || symbol.kind() != SymbolKind::Data
+                                            && symbol.kind() != SymbolKind::Text
+                                            && symbol.kind() != SymbolKind::Tls
+                                    {
+                                        None
+                                    } else {
+                                        symbol.name().map(|name| name.as_bytes().to_vec())
+                                    }
+                                })
+                                .collect::<Vec<_>>(),
+                        );
+                    }
+                    Err(err) => {
+                        let err = err.to_string();
+                        if err == "Unknown file magic" {
+                            // Not an object file; skip it.
+                        } else {
+                            sess.fatal(&format!(
+                                "error parsing `{}` during archive creation: {}",
+                                entry_name, err
+                            ));
+                        }
+                    }
+                }
+            }
+
+            entries.push((entry_name, data));
+        }
+
+        let mut builder = if self.use_gnu_style_archive {
+            BuilderKind::Gnu(
+                ar::GnuBuilder::new(
+                    File::create(&self.dst).unwrap_or_else(|err| {
+                        sess.fatal(&format!(
+                            "error opening destination during archive building: {}",
+                            err
+                        ));
+                    }),
+                    entries
+                        .iter()
+                        .map(|(name, _)| name.as_bytes().to_vec())
+                        .collect(),
+                    ar::GnuSymbolTableFormat::Size32,
+                    symbol_table,
+                )
+                .unwrap(),
+            )
+        } else {
+            BuilderKind::Bsd(
+                ar::Builder::new(
+                    File::create(&self.dst).unwrap_or_else(|err| {
+                        sess.fatal(&format!(
+                            "error opening destination during archive building: {}",
+                            err
+                        ));
+                    }),
+                    symbol_table,
+                )
+                .unwrap(),
+            )
+        };
+
+        // Add all files
+        for (entry_name, data) in entries.into_iter() {
+            let header = ar::Header::new(entry_name.into_bytes(), data.len() as u64);
+            match builder {
+                BuilderKind::Bsd(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+                BuilderKind::Gnu(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+            }
+        }
+
+        // Finalize archive
+        std::mem::drop(builder);
+
+        if self.no_builtin_ranlib {
+            let ranlib = crate::toolchain::get_toolchain_binary(self.sess, "ranlib");
+
+            // Run ranlib to be able to link the archive
+            let status = std::process::Command::new(ranlib)
+                .arg(self.dst)
+                .status()
+                .expect("Couldn't run ranlib");
+
+            if !status.success() {
+                self.sess
+                    .fatal(&format!("Ranlib exited with code {:?}", status.code()));
+            }
+        }
+    }
+}
+
+impl<'a> ArArchiveBuilder<'a> {
+    fn add_archive<F>(&mut self, archive_path: PathBuf, mut skip: F) -> std::io::Result<()>
+    where
+        F: FnMut(&str) -> bool + 'static,
+    {
+        let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?);
+        let archive_index = self.src_archives.len();
+
+        let mut i = 0;
+        while let Some(entry) = archive.next_entry() {
+            let entry = entry?;
+            let file_name = String::from_utf8(entry.header().identifier().to_vec())
+                .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?;
+            if !skip(&file_name) {
+                self.entries.push((
+                    file_name,
+                    ArchiveEntry::FromArchive {
+                        archive_index,
+                        entry_index: i,
+                    },
+                ));
+            }
+            i += 1;
+        }
+
+        self.src_archives.push((archive_path, archive));
+        Ok(())
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/atomic_shim.rs b/compiler/rustc_codegen_cranelift/src/atomic_shim.rs
new file mode 100644
index 0000000..2f0157c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/atomic_shim.rs
@@ -0,0 +1,186 @@
+//! Atomic intrinsics are implemented using a global lock for now, as Cranelift doesn't support
+//! atomic operations yet.
+
+// FIXME implement atomic instructions in Cranelift.
+
+use crate::prelude::*;
+
+#[cfg(all(feature = "jit", unix))]
+#[no_mangle]
+static mut __cg_clif_global_atomic_mutex: libc::pthread_mutex_t =
+    libc::PTHREAD_MUTEX_INITIALIZER;
+
+pub(crate) fn init_global_lock(
+    module: &mut impl Module,
+    bcx: &mut FunctionBuilder<'_>,
+    use_jit: bool,
+) {
+    if use_jit {
+        // When using JIT, dylibs won't find the __cg_clif_global_atomic_mutex data object defined here,
+        // so instead we define it in the cg_clif dylib.
+
+        return;
+    }
+
+    let mut data_ctx = DataContext::new();
+    data_ctx.define_zeroinit(1024); // 1024 bytes should be big enough on all platforms.
+    data_ctx.set_align(16);
+    let atomic_mutex = module
+        .declare_data(
+            "__cg_clif_global_atomic_mutex",
+            Linkage::Export,
+            true,
+            false,
+        )
+        .unwrap();
+    module.define_data(atomic_mutex, &data_ctx).unwrap();
+
+    let pthread_mutex_init = module
+        .declare_function(
+            "pthread_mutex_init",
+            Linkage::Import,
+            &cranelift_codegen::ir::Signature {
+                call_conv: module.target_config().default_call_conv,
+                params: vec![
+                    AbiParam::new(
+                        module.target_config().pointer_type(), /* *mut pthread_mutex_t */
+                    ),
+                    AbiParam::new(
+                        module.target_config().pointer_type(), /* *const pthread_mutex_attr_t */
+                    ),
+                ],
+                returns: vec![AbiParam::new(types::I32 /* c_int */)],
+            },
+        )
+        .unwrap();
+
+    let pthread_mutex_init = module.declare_func_in_func(pthread_mutex_init, bcx.func);
+
+    let atomic_mutex = module.declare_data_in_func(atomic_mutex, bcx.func);
+    let atomic_mutex = bcx
+        .ins()
+        .global_value(module.target_config().pointer_type(), atomic_mutex);
+
+    let nullptr = bcx.ins().iconst(module.target_config().pointer_type(), 0);
+
+    bcx.ins().call(pthread_mutex_init, &[atomic_mutex, nullptr]);
+}
+
+pub(crate) fn init_global_lock_constructor(
+    module: &mut impl Module,
+    constructor_name: &str,
+) -> FuncId {
+    let sig = Signature::new(CallConv::SystemV);
+    let init_func_id = module
+        .declare_function(constructor_name, Linkage::Export, &sig)
+        .unwrap();
+
+    let mut ctx = Context::new();
+    ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
+    {
+        let mut func_ctx = FunctionBuilderContext::new();
+        let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+        let block = bcx.create_block();
+        bcx.switch_to_block(block);
+
+        crate::atomic_shim::init_global_lock(module, &mut bcx, false);
+
+        bcx.ins().return_(&[]);
+        bcx.seal_all_blocks();
+        bcx.finalize();
+    }
+    module
+        .define_function(
+            init_func_id,
+            &mut ctx,
+            &mut cranelift_codegen::binemit::NullTrapSink {},
+        )
+        .unwrap();
+
+    init_func_id
+}
+
+pub(crate) fn lock_global_lock(fx: &mut FunctionCx<'_, '_, impl Module>) {
+    let atomic_mutex = fx
+        .cx
+        .module
+        .declare_data(
+            "__cg_clif_global_atomic_mutex",
+            Linkage::Import,
+            true,
+            false,
+        )
+        .unwrap();
+
+    let pthread_mutex_lock = fx
+        .cx
+        .module
+        .declare_function(
+            "pthread_mutex_lock",
+            Linkage::Import,
+            &cranelift_codegen::ir::Signature {
+                call_conv: fx.cx.module.target_config().default_call_conv,
+                params: vec![AbiParam::new(
+                    fx.cx.module.target_config().pointer_type(), /* *mut pthread_mutex_t */
+                )],
+                returns: vec![AbiParam::new(types::I32 /* c_int */)],
+            },
+        )
+        .unwrap();
+
+    let pthread_mutex_lock = fx
+        .cx
+        .module
+        .declare_func_in_func(pthread_mutex_lock, fx.bcx.func);
+
+    let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
+    let atomic_mutex = fx
+        .bcx
+        .ins()
+        .global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
+
+    fx.bcx.ins().call(pthread_mutex_lock, &[atomic_mutex]);
+}
+
+pub(crate) fn unlock_global_lock(fx: &mut FunctionCx<'_, '_, impl Module>) {
+    let atomic_mutex = fx
+        .cx
+        .module
+        .declare_data(
+            "__cg_clif_global_atomic_mutex",
+            Linkage::Import,
+            true,
+            false,
+        )
+        .unwrap();
+
+    let pthread_mutex_unlock = fx
+        .cx
+        .module
+        .declare_function(
+            "pthread_mutex_unlock",
+            Linkage::Import,
+            &cranelift_codegen::ir::Signature {
+                call_conv: fx.cx.module.target_config().default_call_conv,
+                params: vec![AbiParam::new(
+                    fx.cx.module.target_config().pointer_type(), /* *mut pthread_mutex_t */
+                )],
+                returns: vec![AbiParam::new(types::I32 /* c_int */)],
+            },
+        )
+        .unwrap();
+
+    let pthread_mutex_unlock = fx
+        .cx
+        .module
+        .declare_func_in_func(pthread_mutex_unlock, fx.bcx.func);
+
+    let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
+    let atomic_mutex = fx
+        .bcx
+        .ins()
+        .global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
+
+    fx.bcx.ins().call(pthread_mutex_unlock, &[atomic_mutex]);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/backend.rs b/compiler/rustc_codegen_cranelift/src/backend.rs
new file mode 100644
index 0000000..9e32259
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/backend.rs
@@ -0,0 +1,206 @@
+//! Abstraction around the object writing crate
+
+use std::convert::{TryFrom, TryInto};
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_session::Session;
+
+use cranelift_module::FuncId;
+
+use object::write::*;
+use object::{RelocationEncoding, RelocationKind, SectionKind, SymbolFlags};
+
+use cranelift_object::{ObjectBuilder, ObjectModule, ObjectProduct};
+
+use gimli::SectionId;
+
+use crate::debuginfo::{DebugReloc, DebugRelocName};
+
+pub(crate) trait WriteMetadata {
+    fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, is_like_osx: bool);
+}
+
+impl WriteMetadata for object::write::Object {
+    fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, _is_like_osx: bool) {
+        let segment = self
+            .segment_name(object::write::StandardSegment::Data)
+            .to_vec();
+        let section_id = self.add_section(segment, b".rustc".to_vec(), object::SectionKind::Data);
+        let offset = self.append_section_data(section_id, &data, 1);
+        // For MachO and probably PE this is necessary to prevent the linker from throwing away the
+        // .rustc section. For ELF this isn't necessary, but it also doesn't harm.
+        self.add_symbol(object::write::Symbol {
+            name: symbol_name.into_bytes(),
+            value: offset,
+            size: data.len() as u64,
+            kind: object::SymbolKind::Data,
+            scope: object::SymbolScope::Dynamic,
+            weak: false,
+            section: SymbolSection::Section(section_id),
+            flags: SymbolFlags::None,
+        });
+    }
+}
+
+pub(crate) trait WriteDebugInfo {
+    type SectionId: Copy;
+
+    fn add_debug_section(&mut self, name: SectionId, data: Vec<u8>) -> Self::SectionId;
+    fn add_debug_reloc(
+        &mut self,
+        section_map: &FxHashMap<SectionId, Self::SectionId>,
+        from: &Self::SectionId,
+        reloc: &DebugReloc,
+    );
+}
+
+impl WriteDebugInfo for ObjectProduct {
+    type SectionId = (object::write::SectionId, object::write::SymbolId);
+
+    fn add_debug_section(
+        &mut self,
+        id: SectionId,
+        data: Vec<u8>,
+    ) -> (object::write::SectionId, object::write::SymbolId) {
+        let name = if self.object.format() == object::BinaryFormat::MachO {
+            id.name().replace('.', "__") // machO expects __debug_info instead of .debug_info
+        } else {
+            id.name().to_string()
+        }
+        .into_bytes();
+
+        let segment = self.object.segment_name(StandardSegment::Debug).to_vec();
+        // FIXME use SHT_X86_64_UNWIND for .eh_frame
+        let section_id = self.object.add_section(
+            segment,
+            name,
+            if id == SectionId::EhFrame {
+                SectionKind::ReadOnlyData
+            } else {
+                SectionKind::Debug
+            },
+        );
+        self.object
+            .section_mut(section_id)
+            .set_data(data, if id == SectionId::EhFrame { 8 } else { 1 });
+        let symbol_id = self.object.section_symbol(section_id);
+        (section_id, symbol_id)
+    }
+
+    fn add_debug_reloc(
+        &mut self,
+        section_map: &FxHashMap<SectionId, Self::SectionId>,
+        from: &Self::SectionId,
+        reloc: &DebugReloc,
+    ) {
+        let (symbol, symbol_offset) = match reloc.name {
+            DebugRelocName::Section(id) => (section_map.get(&id).unwrap().1, 0),
+            DebugRelocName::Symbol(id) => {
+                let symbol_id = self.function_symbol(FuncId::from_u32(id.try_into().unwrap()));
+                self.object
+                    .symbol_section_and_offset(symbol_id)
+                    .expect("Debug reloc for undef sym???")
+            }
+        };
+        self.object
+            .add_relocation(
+                from.0,
+                Relocation {
+                    offset: u64::from(reloc.offset),
+                    symbol,
+                    kind: reloc.kind,
+                    encoding: RelocationEncoding::Generic,
+                    size: reloc.size * 8,
+                    addend: i64::try_from(symbol_offset).unwrap() + reloc.addend,
+                },
+            )
+            .unwrap();
+    }
+}
+
+// FIXME remove once atomic instructions are implemented in Cranelift.
+pub(crate) trait AddConstructor {
+    fn add_constructor(&mut self, func_id: FuncId);
+}
+
+impl AddConstructor for ObjectProduct {
+    fn add_constructor(&mut self, func_id: FuncId) {
+        let symbol = self.function_symbol(func_id);
+        let segment = self
+            .object
+            .segment_name(object::write::StandardSegment::Data);
+        let init_array_section =
+            self.object
+                .add_section(segment.to_vec(), b".init_array".to_vec(), SectionKind::Data);
+        let address_size = self
+            .object
+            .architecture()
+            .address_size()
+            .expect("address_size must be known")
+            .bytes();
+        self.object.append_section_data(
+            init_array_section,
+            &std::iter::repeat(0)
+                .take(address_size.into())
+                .collect::<Vec<u8>>(),
+            8,
+        );
+        self.object
+            .add_relocation(
+                init_array_section,
+                object::write::Relocation {
+                    offset: 0,
+                    size: address_size * 8,
+                    kind: RelocationKind::Absolute,
+                    encoding: RelocationEncoding::Generic,
+                    symbol,
+                    addend: 0,
+                },
+            )
+            .unwrap();
+    }
+}
+
+pub(crate) fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Object)) -> Vec<u8> {
+    let triple = crate::build_isa(sess, true).triple().clone();
+
+    let binary_format = match triple.binary_format {
+        target_lexicon::BinaryFormat::Elf => object::BinaryFormat::Elf,
+        target_lexicon::BinaryFormat::Coff => object::BinaryFormat::Coff,
+        target_lexicon::BinaryFormat::Macho => object::BinaryFormat::MachO,
+        binary_format => sess.fatal(&format!("binary format {} is unsupported", binary_format)),
+    };
+    let architecture = match triple.architecture {
+        target_lexicon::Architecture::X86_32(_) => object::Architecture::I386,
+        target_lexicon::Architecture::X86_64 => object::Architecture::X86_64,
+        target_lexicon::Architecture::Arm(_) => object::Architecture::Arm,
+        target_lexicon::Architecture::Aarch64(_) => object::Architecture::Aarch64,
+        architecture => sess.fatal(&format!(
+            "target architecture {:?} is unsupported",
+            architecture,
+        )),
+    };
+    let endian = match triple.endianness().unwrap() {
+        target_lexicon::Endianness::Little => object::Endianness::Little,
+        target_lexicon::Endianness::Big => object::Endianness::Big,
+    };
+
+    let mut metadata_object = object::write::Object::new(binary_format, architecture, endian);
+    metadata_object.add_file_symbol(name.as_bytes().to_vec());
+    f(&mut metadata_object);
+    metadata_object.write().unwrap()
+}
+
+pub(crate) fn make_module(sess: &Session, name: String) -> ObjectModule {
+    let mut builder = ObjectBuilder::new(
+        crate::build_isa(sess, true),
+        name + ".o",
+        cranelift_module::default_libcall_names(),
+    )
+    .unwrap();
+    // Unlike cg_llvm, cg_clif defaults to disabling -Zfunction-sections. For cg_llvm binary size
+    // is important, while cg_clif cares more about compilation times. Enabling -Zfunction-sections
+    // can easily double the amount of time necessary to perform linking.
+    builder.per_function_section(sess.opts.debugging_opts.function_sections.unwrap_or(false));
+    ObjectModule::new(builder)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
new file mode 100644
index 0000000..bfe5514
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -0,0 +1,1018 @@
+//! Codegen of a single function
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::adjustment::PointerCast;
+
+use crate::prelude::*;
+
+pub(crate) fn codegen_fn<'tcx>(
+    cx: &mut crate::CodegenCx<'tcx, impl Module>,
+    instance: Instance<'tcx>,
+    linkage: Linkage,
+) {
+    let tcx = cx.tcx;
+
+    let mir = tcx.instance_mir(instance.def);
+
+    // Declare function
+    let (name, sig) = get_function_name_and_sig(tcx, cx.module.isa().triple(), instance, false);
+    let func_id = cx.module.declare_function(&name, linkage, &sig).unwrap();
+
+    cx.cached_context.clear();
+
+    // Make the FunctionBuilder
+    let mut func_ctx = FunctionBuilderContext::new();
+    let mut func = std::mem::replace(&mut cx.cached_context.func, Function::new());
+    func.name = ExternalName::user(0, func_id.as_u32());
+    func.signature = sig;
+    func.collect_debug_info();
+
+    let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
+
+    // Predefine blocks
+    let start_block = bcx.create_block();
+    let block_map: IndexVec<BasicBlock, Block> = (0..mir.basic_blocks().len())
+        .map(|_| bcx.create_block())
+        .collect();
+
+    // Make FunctionCx
+    let pointer_type = cx.module.target_config().pointer_type();
+    let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
+
+    let mut fx = FunctionCx {
+        cx,
+        tcx,
+        pointer_type,
+
+        instance,
+        mir,
+
+        bcx,
+        block_map,
+        local_map: IndexVec::with_capacity(mir.local_decls.len()),
+        caller_location: None, // set by `codegen_fn_prelude`
+        cold_blocks: EntitySet::new(),
+
+        clif_comments,
+        source_info_set: indexmap::IndexSet::new(),
+        next_ssa_var: 0,
+
+        inline_asm_index: 0,
+    };
+
+    let arg_uninhabited = fx.mir.args_iter().any(|arg| {
+        fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty))
+            .abi
+            .is_uninhabited()
+    });
+
+    if arg_uninhabited {
+        fx.bcx
+            .append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+        fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+        crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument");
+    } else {
+        tcx.sess.time("codegen clif ir", || {
+            tcx.sess.time("codegen prelude", || {
+                crate::abi::codegen_fn_prelude(&mut fx, start_block)
+            });
+            codegen_fn_content(&mut fx);
+        });
+    }
+
+    // Recover all necessary data from fx, before accessing func will prevent future access to it.
+    let instance = fx.instance;
+    let mut clif_comments = fx.clif_comments;
+    let source_info_set = fx.source_info_set;
+    let local_map = fx.local_map;
+    let cold_blocks = fx.cold_blocks;
+
+    // Store function in context
+    let context = &mut cx.cached_context;
+    context.func = func;
+
+    crate::pretty_clif::write_clif_file(tcx, "unopt", None, instance, &context, &clif_comments);
+
+    // Verify function
+    verify_func(tcx, &clif_comments, &context.func);
+
+    // Perform rust specific optimizations
+    tcx.sess.time("optimize clif ir", || {
+        crate::optimize::optimize_function(
+            tcx,
+            instance,
+            context,
+            &cold_blocks,
+            &mut clif_comments,
+        );
+    });
+
+    // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
+    // instruction, which doesn't have an encoding.
+    context.compute_cfg();
+    context.compute_domtree();
+    context.eliminate_unreachable_code(cx.module.isa()).unwrap();
+    context.dce(cx.module.isa()).unwrap();
+
+    // Define function
+    let module = &mut cx.module;
+    tcx.sess.time("define function", || {
+        module
+            .define_function(
+                func_id,
+                context,
+                &mut cranelift_codegen::binemit::NullTrapSink {},
+            )
+            .unwrap()
+    });
+
+    // Write optimized function to file for debugging
+    crate::pretty_clif::write_clif_file(
+        tcx,
+        "opt",
+        Some(cx.module.isa()),
+        instance,
+        &context,
+        &clif_comments,
+    );
+
+    // Define debuginfo for function
+    let isa = cx.module.isa();
+    let debug_context = &mut cx.debug_context;
+    let unwind_context = &mut cx.unwind_context;
+    tcx.sess.time("generate debug info", || {
+        if let Some(debug_context) = debug_context {
+            debug_context.define_function(
+                instance,
+                func_id,
+                &name,
+                isa,
+                context,
+                &source_info_set,
+                local_map,
+            );
+        }
+        unwind_context.add_function(func_id, &context, isa);
+    });
+
+    // Clear context to make it usable for the next function
+    context.clear();
+}
+
+pub(crate) fn verify_func(
+    tcx: TyCtxt<'_>,
+    writer: &crate::pretty_clif::CommentWriter,
+    func: &Function,
+) {
+    tcx.sess.time("verify clif ir", || {
+        let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
+        match cranelift_codegen::verify_function(&func, &flags) {
+            Ok(_) => {}
+            Err(err) => {
+                tcx.sess.err(&format!("{:?}", err));
+                let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
+                    &func,
+                    None,
+                    Some(Box::new(writer)),
+                    err,
+                );
+                tcx.sess
+                    .fatal(&format!("cranelift verify error:\n{}", pretty_error));
+            }
+        }
+    });
+}
+
+fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Module>) {
+    crate::constant::check_constants(fx);
+
+    for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() {
+        let block = fx.get_block(bb);
+        fx.bcx.switch_to_block(block);
+
+        if bb_data.is_cleanup {
+            // Unwinding after panicking is not supported
+            continue;
+
+            // FIXME once unwinding is supported uncomment next lines
+            // // Unwinding is unlikely to happen, so mark cleanup block's as cold.
+            // fx.cold_blocks.insert(block);
+        }
+
+        fx.bcx.ins().nop();
+        for stmt in &bb_data.statements {
+            fx.set_debug_loc(stmt.source_info);
+            codegen_stmt(fx, block, stmt);
+        }
+
+        #[cfg(debug_assertions)]
+        {
+            let mut terminator_head = "\n".to_string();
+            bb_data
+                .terminator()
+                .kind
+                .fmt_head(&mut terminator_head)
+                .unwrap();
+            let inst = fx.bcx.func.layout.last_inst(block).unwrap();
+            fx.add_comment(inst, terminator_head);
+        }
+
+        fx.set_debug_loc(bb_data.terminator().source_info);
+
+        match &bb_data.terminator().kind {
+            TerminatorKind::Goto { target } => {
+                if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
+                    let mut can_immediately_return = true;
+                    for stmt in &fx.mir[*target].statements {
+                        if let StatementKind::StorageDead(_) = stmt.kind {
+                        } else {
+                            // FIXME Can sometimes happen, see rust-lang/rust#70531
+                            can_immediately_return = false;
+                            break;
+                        }
+                    }
+
+                    if can_immediately_return {
+                        crate::abi::codegen_return(fx);
+                        continue;
+                    }
+                }
+
+                let block = fx.get_block(*target);
+                fx.bcx.ins().jump(block, &[]);
+            }
+            TerminatorKind::Return => {
+                crate::abi::codegen_return(fx);
+            }
+            TerminatorKind::Assert {
+                cond,
+                expected,
+                msg,
+                target,
+                cleanup: _,
+            } => {
+                if !fx.tcx.sess.overflow_checks() {
+                    if let mir::AssertKind::OverflowNeg(_) = *msg {
+                        let target = fx.get_block(*target);
+                        fx.bcx.ins().jump(target, &[]);
+                        continue;
+                    }
+                }
+                let cond = codegen_operand(fx, cond).load_scalar(fx);
+
+                let target = fx.get_block(*target);
+                let failure = fx.bcx.create_block();
+                fx.cold_blocks.insert(failure);
+
+                if *expected {
+                    fx.bcx.ins().brz(cond, failure, &[]);
+                } else {
+                    fx.bcx.ins().brnz(cond, failure, &[]);
+                };
+                fx.bcx.ins().jump(target, &[]);
+
+                fx.bcx.switch_to_block(failure);
+                fx.bcx.ins().nop();
+
+                match msg {
+                    AssertKind::BoundsCheck { ref len, ref index } => {
+                        let len = codegen_operand(fx, len).load_scalar(fx);
+                        let index = codegen_operand(fx, index).load_scalar(fx);
+                        let location = fx
+                            .get_caller_location(bb_data.terminator().source_info.span)
+                            .load_scalar(fx);
+
+                        codegen_panic_inner(
+                            fx,
+                            rustc_hir::LangItem::PanicBoundsCheck,
+                            &[index, len, location],
+                            bb_data.terminator().source_info.span,
+                        );
+                    }
+                    _ => {
+                        let msg_str = msg.description();
+                        codegen_panic(fx, msg_str, bb_data.terminator().source_info.span);
+                    }
+                }
+            }
+
+            TerminatorKind::SwitchInt {
+                discr,
+                switch_ty,
+                targets,
+            } => {
+                let discr = codegen_operand(fx, discr).load_scalar(fx);
+
+                if switch_ty.kind() == fx.tcx.types.bool.kind() {
+                    assert_eq!(targets.iter().count(), 1);
+                    let (then_value, then_block) = targets.iter().next().unwrap();
+                    let then_block = fx.get_block(then_block);
+                    let else_block = fx.get_block(targets.otherwise());
+                    let test_zero = match then_value {
+                        0 => true,
+                        1 => false,
+                        _ => unreachable!("{:?}", targets),
+                    };
+
+                    let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+                    let (discr, is_inverted) =
+                        crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
+                    let test_zero = if is_inverted { !test_zero } else { test_zero };
+                    let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+                    let discr =
+                        crate::optimize::peephole::make_branchable_value(&mut fx.bcx, discr);
+                    if test_zero {
+                        fx.bcx.ins().brz(discr, then_block, &[]);
+                        fx.bcx.ins().jump(else_block, &[]);
+                    } else {
+                        fx.bcx.ins().brnz(discr, then_block, &[]);
+                        fx.bcx.ins().jump(else_block, &[]);
+                    }
+                } else {
+                    let mut switch = ::cranelift_frontend::Switch::new();
+                    for (value, block) in targets.iter() {
+                        let block = fx.get_block(block);
+                        switch.set_entry(value, block);
+                    }
+                    let otherwise_block = fx.get_block(targets.otherwise());
+                    switch.emit(&mut fx.bcx, discr, otherwise_block);
+                }
+            }
+            TerminatorKind::Call {
+                func,
+                args,
+                destination,
+                fn_span,
+                cleanup: _,
+                from_hir_call: _,
+            } => {
+                fx.tcx.sess.time("codegen call", || {
+                    crate::abi::codegen_terminator_call(
+                        fx,
+                        *fn_span,
+                        block,
+                        func,
+                        args,
+                        *destination,
+                    )
+                });
+            }
+            TerminatorKind::InlineAsm {
+                template,
+                operands,
+                options,
+                destination,
+                line_spans: _,
+            } => {
+                crate::inline_asm::codegen_inline_asm(
+                    fx,
+                    bb_data.terminator().source_info.span,
+                    template,
+                    operands,
+                    *options,
+                );
+
+                match *destination {
+                    Some(destination) => {
+                        let destination_block = fx.get_block(destination);
+                        fx.bcx.ins().jump(destination_block, &[]);
+                    }
+                    None => {
+                        crate::trap::trap_unreachable(
+                            fx,
+                            "[corruption] Returned from noreturn inline asm",
+                        );
+                    }
+                }
+            }
+            TerminatorKind::Resume | TerminatorKind::Abort => {
+                trap_unreachable(fx, "[corruption] Unwinding bb reached.");
+            }
+            TerminatorKind::Unreachable => {
+                trap_unreachable(fx, "[corruption] Hit unreachable code.");
+            }
+            TerminatorKind::Yield { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::DropAndReplace { .. }
+            | TerminatorKind::GeneratorDrop => {
+                bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
+            }
+            TerminatorKind::Drop {
+                place,
+                target,
+                unwind: _,
+            } => {
+                let drop_place = codegen_place(fx, *place);
+                crate::abi::codegen_drop(fx, bb_data.terminator().source_info.span, drop_place);
+
+                let target_block = fx.get_block(*target);
+                fx.bcx.ins().jump(target_block, &[]);
+            }
+        };
+    }
+
+    fx.bcx.seal_all_blocks();
+    fx.bcx.finalize();
+}
+
+fn codegen_stmt<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    #[allow(unused_variables)] cur_block: Block,
+    stmt: &Statement<'tcx>,
+) {
+    let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
+
+    fx.set_debug_loc(stmt.source_info);
+
+    #[cfg(false_debug_assertions)]
+    match &stmt.kind {
+        StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
+        _ => {
+            let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
+            fx.add_comment(inst, format!("{:?}", stmt));
+        }
+    }
+
+    match &stmt.kind {
+        StatementKind::SetDiscriminant {
+            place,
+            variant_index,
+        } => {
+            let place = codegen_place(fx, **place);
+            crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
+        }
+        StatementKind::Assign(to_place_and_rval) => {
+            let lval = codegen_place(fx, to_place_and_rval.0);
+            let dest_layout = lval.layout();
+            match &to_place_and_rval.1 {
+                Rvalue::Use(operand) => {
+                    let val = codegen_operand(fx, operand);
+                    lval.write_cvalue(fx, val);
+                }
+                Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+                    let place = codegen_place(fx, *place);
+                    let ref_ = place.place_ref(fx, lval.layout());
+                    lval.write_cvalue(fx, ref_);
+                }
+                Rvalue::ThreadLocalRef(def_id) => {
+                    let val = crate::constant::codegen_tls_ref(fx, *def_id, lval.layout());
+                    lval.write_cvalue(fx, val);
+                }
+                Rvalue::BinaryOp(bin_op, lhs, rhs) => {
+                    let lhs = codegen_operand(fx, lhs);
+                    let rhs = codegen_operand(fx, rhs);
+
+                    let res = crate::num::codegen_binop(fx, *bin_op, lhs, rhs);
+                    lval.write_cvalue(fx, res);
+                }
+                Rvalue::CheckedBinaryOp(bin_op, lhs, rhs) => {
+                    let lhs = codegen_operand(fx, lhs);
+                    let rhs = codegen_operand(fx, rhs);
+
+                    let res = if !fx.tcx.sess.overflow_checks() {
+                        let val =
+                            crate::num::codegen_int_binop(fx, *bin_op, lhs, rhs).load_scalar(fx);
+                        let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
+                        CValue::by_val_pair(val, is_overflow, lval.layout())
+                    } else {
+                        crate::num::codegen_checked_int_binop(fx, *bin_op, lhs, rhs)
+                    };
+
+                    lval.write_cvalue(fx, res);
+                }
+                Rvalue::UnaryOp(un_op, operand) => {
+                    let operand = codegen_operand(fx, operand);
+                    let layout = operand.layout();
+                    let val = operand.load_scalar(fx);
+                    let res = match un_op {
+                        UnOp::Not => match layout.ty.kind() {
+                            ty::Bool => {
+                                let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
+                                CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
+                            }
+                            ty::Uint(_) | ty::Int(_) => {
+                                CValue::by_val(fx.bcx.ins().bnot(val), layout)
+                            }
+                            _ => unreachable!("un op Not for {:?}", layout.ty),
+                        },
+                        UnOp::Neg => match layout.ty.kind() {
+                            ty::Int(IntTy::I128) => {
+                                // FIXME remove this case once ineg.i128 works
+                                let zero = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+                                crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
+                            }
+                            ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
+                            ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
+                            _ => unreachable!("un op Neg for {:?}", layout.ty),
+                        },
+                    };
+                    lval.write_cvalue(fx, res);
+                }
+                Rvalue::Cast(CastKind::Pointer(PointerCast::ReifyFnPointer), operand, to_ty) => {
+                    let from_ty = fx.monomorphize(&operand.ty(&fx.mir.local_decls, fx.tcx));
+                    let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+                    match *from_ty.kind() {
+                        ty::FnDef(def_id, substs) => {
+                            let func_ref = fx.get_function_ref(
+                                Instance::resolve_for_fn_ptr(
+                                    fx.tcx,
+                                    ParamEnv::reveal_all(),
+                                    def_id,
+                                    substs,
+                                )
+                                .unwrap()
+                                .polymorphize(fx.tcx),
+                            );
+                            let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+                            lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
+                        }
+                        _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
+                    }
+                }
+                Rvalue::Cast(CastKind::Pointer(PointerCast::UnsafeFnPointer), operand, to_ty)
+                | Rvalue::Cast(CastKind::Pointer(PointerCast::MutToConstPointer), operand, to_ty)
+                | Rvalue::Cast(CastKind::Pointer(PointerCast::ArrayToPointer), operand, to_ty) => {
+                    let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+                    let operand = codegen_operand(fx, operand);
+                    lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
+                }
+                Rvalue::Cast(CastKind::Misc, operand, to_ty) => {
+                    let operand = codegen_operand(fx, operand);
+                    let from_ty = operand.layout().ty;
+                    let to_ty = fx.monomorphize(to_ty);
+
+                    fn is_fat_ptr<'tcx>(
+                        fx: &FunctionCx<'_, 'tcx, impl Module>,
+                        ty: Ty<'tcx>,
+                    ) -> bool {
+                        ty.builtin_deref(true)
+                            .map(
+                                |ty::TypeAndMut {
+                                     ty: pointee_ty,
+                                     mutbl: _,
+                                 }| {
+                                    has_ptr_meta(fx.tcx, pointee_ty)
+                                },
+                            )
+                            .unwrap_or(false)
+                    }
+
+                    if is_fat_ptr(fx, from_ty) {
+                        if is_fat_ptr(fx, to_ty) {
+                            // fat-ptr -> fat-ptr
+                            lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
+                        } else {
+                            // fat-ptr -> thin-ptr
+                            let (ptr, _extra) = operand.load_scalar_pair(fx);
+                            lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
+                        }
+                    } else if let ty::Adt(adt_def, _substs) = from_ty.kind() {
+                        // enum -> discriminant value
+                        assert!(adt_def.is_enum());
+                        match to_ty.kind() {
+                            ty::Uint(_) | ty::Int(_) => {}
+                            _ => unreachable!("cast adt {} -> {}", from_ty, to_ty),
+                        }
+
+                        use rustc_target::abi::{Int, TagEncoding, Variants};
+
+                        match &operand.layout().variants {
+                            Variants::Single { index } => {
+                                let discr = operand
+                                    .layout()
+                                    .ty
+                                    .discriminant_for_variant(fx.tcx, *index)
+                                    .unwrap();
+                                let discr = if discr.ty.is_signed() {
+                                    fx.layout_of(discr.ty).size.sign_extend(discr.val)
+                                } else {
+                                    discr.val
+                                };
+                                let discr = discr.into();
+
+                                let discr = CValue::const_val(fx, fx.layout_of(to_ty), discr);
+                                lval.write_cvalue(fx, discr);
+                            }
+                            Variants::Multiple {
+                                tag,
+                                tag_field,
+                                tag_encoding: TagEncoding::Direct,
+                                variants: _,
+                            } => {
+                                let cast_to = fx.clif_type(dest_layout.ty).unwrap();
+
+                                // Read the tag/niche-encoded discriminant from memory.
+                                let encoded_discr =
+                                    operand.value_field(fx, mir::Field::new(*tag_field));
+                                let encoded_discr = encoded_discr.load_scalar(fx);
+
+                                // Decode the discriminant (specifically if it's niche-encoded).
+                                let signed = match tag.value {
+                                    Int(_, signed) => signed,
+                                    _ => false,
+                                };
+                                let val = clif_intcast(fx, encoded_discr, cast_to, signed);
+                                let val = CValue::by_val(val, dest_layout);
+                                lval.write_cvalue(fx, val);
+                            }
+                            Variants::Multiple { .. } => unreachable!(),
+                        }
+                    } else {
+                        let to_clif_ty = fx.clif_type(to_ty).unwrap();
+                        let from = operand.load_scalar(fx);
+
+                        let res = clif_int_or_float_cast(
+                            fx,
+                            from,
+                            type_sign(from_ty),
+                            to_clif_ty,
+                            type_sign(to_ty),
+                        );
+                        lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
+                    }
+                }
+                Rvalue::Cast(
+                    CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
+                    operand,
+                    _to_ty,
+                ) => {
+                    let operand = codegen_operand(fx, operand);
+                    match *operand.layout().ty.kind() {
+                        ty::Closure(def_id, substs) => {
+                            let instance = Instance::resolve_closure(
+                                fx.tcx,
+                                def_id,
+                                substs,
+                                ty::ClosureKind::FnOnce,
+                            )
+                            .polymorphize(fx.tcx);
+                            let func_ref = fx.get_function_ref(instance);
+                            let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+                            lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
+                        }
+                        _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
+                    }
+                }
+                Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), operand, _to_ty) => {
+                    let operand = codegen_operand(fx, operand);
+                    operand.unsize_value(fx, lval);
+                }
+                Rvalue::Discriminant(place) => {
+                    let place = codegen_place(fx, *place);
+                    let value = place.to_cvalue(fx);
+                    let discr =
+                        crate::discriminant::codegen_get_discriminant(fx, value, dest_layout);
+                    lval.write_cvalue(fx, discr);
+                }
+                Rvalue::Repeat(operand, times) => {
+                    let operand = codegen_operand(fx, operand);
+                    let times = fx
+                        .monomorphize(times)
+                        .eval(fx.tcx, ParamEnv::reveal_all())
+                        .val
+                        .try_to_bits(fx.tcx.data_layout.pointer_size)
+                        .unwrap();
+                    if fx.clif_type(operand.layout().ty) == Some(types::I8) {
+                        let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
+                        // FIXME use emit_small_memset where possible
+                        let addr = lval.to_ptr().get_addr(fx);
+                        let val = operand.load_scalar(fx);
+                        fx.bcx
+                            .call_memset(fx.cx.module.target_config(), addr, val, times);
+                    } else {
+                        let loop_block = fx.bcx.create_block();
+                        let loop_block2 = fx.bcx.create_block();
+                        let done_block = fx.bcx.create_block();
+                        let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
+                        let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+                        fx.bcx.ins().jump(loop_block, &[zero]);
+
+                        fx.bcx.switch_to_block(loop_block);
+                        let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
+                        fx.bcx.ins().brnz(done, done_block, &[]);
+                        fx.bcx.ins().jump(loop_block2, &[]);
+
+                        fx.bcx.switch_to_block(loop_block2);
+                        let to = lval.place_index(fx, index);
+                        to.write_cvalue(fx, operand);
+                        let index = fx.bcx.ins().iadd_imm(index, 1);
+                        fx.bcx.ins().jump(loop_block, &[index]);
+
+                        fx.bcx.switch_to_block(done_block);
+                        fx.bcx.ins().nop();
+                    }
+                }
+                Rvalue::Len(place) => {
+                    let place = codegen_place(fx, *place);
+                    let usize_layout = fx.layout_of(fx.tcx.types.usize);
+                    let len = codegen_array_len(fx, place);
+                    lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
+                }
+                Rvalue::NullaryOp(NullOp::Box, content_ty) => {
+                    let usize_type = fx.clif_type(fx.tcx.types.usize).unwrap();
+                    let content_ty = fx.monomorphize(content_ty);
+                    let layout = fx.layout_of(content_ty);
+                    let llsize = fx.bcx.ins().iconst(usize_type, layout.size.bytes() as i64);
+                    let llalign = fx
+                        .bcx
+                        .ins()
+                        .iconst(usize_type, layout.align.abi.bytes() as i64);
+                    let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
+
+                    // Allocate space:
+                    let def_id = match fx
+                        .tcx
+                        .lang_items()
+                        .require(rustc_hir::LangItem::ExchangeMalloc)
+                    {
+                        Ok(id) => id,
+                        Err(s) => {
+                            fx.tcx
+                                .sess
+                                .fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
+                        }
+                    };
+                    let instance = ty::Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+                    let func_ref = fx.get_function_ref(instance);
+                    let call = fx.bcx.ins().call(func_ref, &[llsize, llalign]);
+                    let ptr = fx.bcx.inst_results(call)[0];
+                    lval.write_cvalue(fx, CValue::by_val(ptr, box_layout));
+                }
+                Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
+                    assert!(lval
+                        .layout()
+                        .ty
+                        .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all()));
+                    let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes();
+                    let val =
+                        CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into());
+                    lval.write_cvalue(fx, val);
+                }
+                Rvalue::Aggregate(kind, operands) => match **kind {
+                    AggregateKind::Array(_ty) => {
+                        for (i, operand) in operands.iter().enumerate() {
+                            let operand = codegen_operand(fx, operand);
+                            let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
+                            let to = lval.place_index(fx, index);
+                            to.write_cvalue(fx, operand);
+                        }
+                    }
+                    _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
+                },
+            }
+        }
+        StatementKind::StorageLive(_)
+        | StatementKind::StorageDead(_)
+        | StatementKind::Nop
+        | StatementKind::FakeRead(..)
+        | StatementKind::Retag { .. }
+        | StatementKind::AscribeUserType(..) => {}
+
+        StatementKind::LlvmInlineAsm(asm) => {
+            use rustc_span::symbol::Symbol;
+            let LlvmInlineAsm {
+                asm,
+                outputs,
+                inputs,
+            } = &**asm;
+            let rustc_hir::LlvmInlineAsmInner {
+                asm: asm_code,         // Name
+                outputs: output_names, // Vec<LlvmInlineAsmOutput>
+                inputs: input_names,   // Vec<Name>
+                clobbers,              // Vec<Name>
+                volatile,              // bool
+                alignstack,            // bool
+                dialect: _,
+                asm_str_style: _,
+            } = asm;
+            match asm_code.as_str().trim() {
+                "" => {
+                    // Black box
+                }
+                "mov %rbx, %rsi\n                  cpuid\n                  xchg %rbx, %rsi" => {
+                    assert_eq!(
+                        input_names,
+                        &[Symbol::intern("{eax}"), Symbol::intern("{ecx}")]
+                    );
+                    assert_eq!(output_names.len(), 4);
+                    for (i, c) in (&["={eax}", "={esi}", "={ecx}", "={edx}"])
+                        .iter()
+                        .enumerate()
+                    {
+                        assert_eq!(&output_names[i].constraint.as_str(), c);
+                        assert!(!output_names[i].is_rw);
+                        assert!(!output_names[i].is_indirect);
+                    }
+
+                    assert_eq!(clobbers, &[]);
+
+                    assert!(!volatile);
+                    assert!(!alignstack);
+
+                    assert_eq!(inputs.len(), 2);
+                    let leaf = codegen_operand(fx, &inputs[0].1).load_scalar(fx); // %eax
+                    let subleaf = codegen_operand(fx, &inputs[1].1).load_scalar(fx); // %ecx
+
+                    let (eax, ebx, ecx, edx) =
+                        crate::intrinsics::codegen_cpuid_call(fx, leaf, subleaf);
+
+                    assert_eq!(outputs.len(), 4);
+                    codegen_place(fx, outputs[0])
+                        .write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
+                    codegen_place(fx, outputs[1])
+                        .write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
+                    codegen_place(fx, outputs[2])
+                        .write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
+                    codegen_place(fx, outputs[3])
+                        .write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
+                }
+                "xgetbv" => {
+                    assert_eq!(input_names, &[Symbol::intern("{ecx}")]);
+
+                    assert_eq!(output_names.len(), 2);
+                    for (i, c) in (&["={eax}", "={edx}"]).iter().enumerate() {
+                        assert_eq!(&output_names[i].constraint.as_str(), c);
+                        assert!(!output_names[i].is_rw);
+                        assert!(!output_names[i].is_indirect);
+                    }
+
+                    assert_eq!(clobbers, &[]);
+
+                    assert!(!volatile);
+                    assert!(!alignstack);
+
+                    crate::trap::trap_unimplemented(fx, "_xgetbv arch intrinsic is not supported");
+                }
+                // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
+                _ if fx
+                    .tcx
+                    .symbol_name(fx.instance)
+                    .name
+                    .starts_with("___chkstk") =>
+                {
+                    crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
+                }
+                _ if fx.tcx.symbol_name(fx.instance).name == "__alloca" => {
+                    crate::trap::trap_unimplemented(fx, "Alloca is not supported");
+                }
+                // Used in sys::windows::abort_internal
+                "int $$0x29" => {
+                    crate::trap::trap_unimplemented(fx, "Windows abort");
+                }
+                _ => fx
+                    .tcx
+                    .sess
+                    .span_fatal(stmt.source_info.span, "Inline assembly is not supported"),
+            }
+        }
+        StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
+    }
+}
+
+fn codegen_array_len<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    place: CPlace<'tcx>,
+) -> Value {
+    match *place.layout().ty.kind() {
+        ty::Array(_elem_ty, len) => {
+            let len = fx
+                .monomorphize(&len)
+                .eval(fx.tcx, ParamEnv::reveal_all())
+                .eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
+            fx.bcx.ins().iconst(fx.pointer_type, len)
+        }
+        ty::Slice(_elem_ty) => place
+            .to_ptr_maybe_unsized()
+            .1
+            .expect("Length metadata for slice place"),
+        _ => bug!("Rvalue::Len({:?})", place),
+    }
+}
+
+pub(crate) fn codegen_place<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    place: Place<'tcx>,
+) -> CPlace<'tcx> {
+    let mut cplace = fx.get_local_place(place.local);
+
+    for elem in place.projection {
+        match elem {
+            PlaceElem::Deref => {
+                cplace = cplace.place_deref(fx);
+            }
+            PlaceElem::Field(field, _ty) => {
+                cplace = cplace.place_field(fx, field);
+            }
+            PlaceElem::Index(local) => {
+                let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
+                cplace = cplace.place_index(fx, index);
+            }
+            PlaceElem::ConstantIndex {
+                offset,
+                min_length: _,
+                from_end,
+            } => {
+                let offset: u64 = offset;
+                let index = if !from_end {
+                    fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
+                } else {
+                    let len = codegen_array_len(fx, cplace);
+                    fx.bcx.ins().iadd_imm(len, -(offset as i64))
+                };
+                cplace = cplace.place_index(fx, index);
+            }
+            PlaceElem::Subslice { from, to, from_end } => {
+                // These indices are generated by slice patterns.
+                // slice[from:-to] in Python terms.
+
+                let from: u64 = from;
+                let to: u64 = to;
+
+                match cplace.layout().ty.kind() {
+                    ty::Array(elem_ty, _len) => {
+                        assert!(!from_end, "array subslices are never `from_end`");
+                        let elem_layout = fx.layout_of(elem_ty);
+                        let ptr = cplace.to_ptr();
+                        cplace = CPlace::for_ptr(
+                            ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+                            fx.layout_of(fx.tcx.mk_array(elem_ty, to - from)),
+                        );
+                    }
+                    ty::Slice(elem_ty) => {
+                        assert!(from_end, "slice subslices should be `from_end`");
+                        let elem_layout = fx.layout_of(elem_ty);
+                        let (ptr, len) = cplace.to_ptr_maybe_unsized();
+                        let len = len.unwrap();
+                        cplace = CPlace::for_ptr_with_extra(
+                            ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+                            fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
+                            cplace.layout(),
+                        );
+                    }
+                    _ => unreachable!(),
+                }
+            }
+            PlaceElem::Downcast(_adt_def, variant) => {
+                cplace = cplace.downcast_variant(fx, variant);
+            }
+        }
+    }
+
+    cplace
+}
+
+pub(crate) fn codegen_operand<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    operand: &Operand<'tcx>,
+) -> CValue<'tcx> {
+    match operand {
+        Operand::Move(place) | Operand::Copy(place) => {
+            let cplace = codegen_place(fx, *place);
+            cplace.to_cvalue(fx)
+        }
+        Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_),
+    }
+}
+
+pub(crate) fn codegen_panic<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    msg_str: &str,
+    span: Span,
+) {
+    let location = fx.get_caller_location(span).load_scalar(fx);
+
+    let msg_ptr = fx.anonymous_str("assert", msg_str);
+    let msg_len = fx
+        .bcx
+        .ins()
+        .iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
+    let args = [msg_ptr, msg_len, location];
+
+    codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, span);
+}
+
+pub(crate) fn codegen_panic_inner<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    lang_item: rustc_hir::LangItem,
+    args: &[Value],
+    span: Span,
+) {
+    let def_id = fx
+        .tcx
+        .lang_items()
+        .require(lang_item)
+        .unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
+
+    let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+    let symbol_name = fx.tcx.symbol_name(instance).name;
+
+    fx.lib_call(
+        &*symbol_name,
+        vec![fx.pointer_type, fx.pointer_type, fx.pointer_type],
+        vec![],
+        args,
+    );
+
+    crate::trap::trap_unreachable(fx, "panic lang item returned");
+}
diff --git a/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs b/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs
new file mode 100644
index 0000000..cd01acc
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs
@@ -0,0 +1,82 @@
+#![feature(rustc_private)]
+
+extern crate rustc_data_structures;
+extern crate rustc_driver;
+extern crate rustc_interface;
+extern crate rustc_session;
+extern crate rustc_target;
+
+use rustc_data_structures::profiling::print_time_passes_entry;
+use rustc_interface::interface;
+use rustc_session::config::ErrorOutputType;
+use rustc_session::early_error;
+use rustc_target::spec::PanicStrategy;
+
+#[derive(Default)]
+pub struct CraneliftPassesCallbacks {
+    time_passes: bool,
+}
+
+impl rustc_driver::Callbacks for CraneliftPassesCallbacks {
+    fn config(&mut self, config: &mut interface::Config) {
+        // If a --prints=... option has been given, we don't print the "total"
+        // time because it will mess up the --prints output. See #64339.
+        self.time_passes = config.opts.prints.is_empty()
+            && (config.opts.debugging_opts.time_passes || config.opts.debugging_opts.time);
+
+        config.opts.cg.panic = Some(PanicStrategy::Abort);
+        config.opts.debugging_opts.panic_abort_tests = true;
+        config.opts.maybe_sysroot = Some(
+            config.opts.maybe_sysroot.clone().unwrap_or_else(
+                || std::env::current_exe()
+                    .unwrap()
+                    .parent()
+                    .unwrap()
+                    .join("sysroot"),
+            ),
+        );
+    }
+}
+
+fn main() {
+    let start = std::time::Instant::now();
+    rustc_driver::init_rustc_env_logger();
+    let mut callbacks = CraneliftPassesCallbacks::default();
+    rustc_driver::install_ice_hook();
+    let exit_code = rustc_driver::catch_with_exit_code(|| {
+        let mut use_jit = false;
+
+        let mut args = std::env::args_os()
+            .enumerate()
+            .map(|(i, arg)| {
+                arg.into_string().unwrap_or_else(|arg| {
+                    early_error(
+                        ErrorOutputType::default(),
+                        &format!("Argument {} is not valid Unicode: {:?}", i, arg),
+                    )
+                })
+            })
+            .filter(|arg| {
+                if arg == "--jit" {
+                    use_jit = true;
+                    false
+                } else {
+                    true
+                }
+            })
+            .collect::<Vec<_>>();
+        if use_jit {
+            args.push("-Cprefer-dynamic".to_string());
+        }
+        let mut run_compiler = rustc_driver::RunCompiler::new(&args, &mut callbacks);
+        run_compiler.set_make_codegen_backend(Some(Box::new(move |_| {
+            Box::new(rustc_codegen_cranelift::CraneliftCodegenBackend {
+                config: rustc_codegen_cranelift::BackendConfig { use_jit },
+            })
+        })));
+        run_compiler.run()
+    });
+    // The extra `\t` is necessary to align this label with the others.
+    print_time_passes_entry(callbacks.time_passes, "\ttotal", start.elapsed());
+    std::process::exit(exit_code)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/bin/cg_clif_build_sysroot.rs b/compiler/rustc_codegen_cranelift/src/bin/cg_clif_build_sysroot.rs
new file mode 100644
index 0000000..165d33d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/bin/cg_clif_build_sysroot.rs
@@ -0,0 +1,103 @@
+//! The only difference between this and cg_clif.rs is that this binary defaults to using cg_llvm
+//! instead of cg_clif and requires `--clif` to use cg_clif and that this binary doesn't have JIT
+//! support.
+//! This is necessary as with Cargo `RUSTC` applies to both target crates and host crates. The host
+//! crates must be built with cg_llvm as we are currently building a sysroot for cg_clif.
+//! `RUSTFLAGS` however is only applied to target crates, so `--clif` would only be passed to the
+//! target crates.
+
+#![feature(rustc_private)]
+
+extern crate rustc_data_structures;
+extern crate rustc_driver;
+extern crate rustc_interface;
+extern crate rustc_session;
+extern crate rustc_target;
+
+use std::path::PathBuf;
+
+use rustc_interface::interface;
+use rustc_session::config::ErrorOutputType;
+use rustc_session::early_error;
+use rustc_target::spec::PanicStrategy;
+
+fn find_sysroot() -> String {
+    // Taken from https://github.com/Manishearth/rust-clippy/pull/911.
+    let home = option_env!("RUSTUP_HOME").or(option_env!("MULTIRUST_HOME"));
+    let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN"));
+    match (home, toolchain) {
+        (Some(home), Some(toolchain)) => format!("{}/toolchains/{}", home, toolchain),
+        _ => option_env!("RUST_SYSROOT")
+            .expect("need to specify RUST_SYSROOT env var or use rustup or multirust")
+            .to_owned(),
+    }
+}
+
+pub struct CraneliftPassesCallbacks {
+    use_clif: bool,
+}
+
+impl rustc_driver::Callbacks for CraneliftPassesCallbacks {
+    fn config(&mut self, config: &mut interface::Config) {
+        if !self.use_clif {
+            config.opts.maybe_sysroot = Some(PathBuf::from(find_sysroot()));
+            return;
+        }
+
+        config.opts.cg.panic = Some(PanicStrategy::Abort);
+        config.opts.debugging_opts.panic_abort_tests = true;
+        config.opts.maybe_sysroot = Some(
+            std::env::current_exe()
+                .unwrap()
+                .parent()
+                .unwrap()
+                .parent()
+                .unwrap()
+                .parent()
+                .unwrap()
+                .join("build_sysroot")
+                .join("sysroot"),
+        );
+    }
+}
+
+fn main() {
+    rustc_driver::init_rustc_env_logger();
+    rustc_driver::install_ice_hook();
+    let exit_code = rustc_driver::catch_with_exit_code(|| {
+        let mut use_clif = false;
+
+        let args = std::env::args_os()
+            .enumerate()
+            .map(|(i, arg)| {
+                arg.into_string().unwrap_or_else(|arg| {
+                    early_error(
+                        ErrorOutputType::default(),
+                        &format!("Argument {} is not valid Unicode: {:?}", i, arg),
+                    )
+                })
+            })
+            .filter(|arg| {
+                if arg == "--clif" {
+                    use_clif = true;
+                    false
+                } else {
+                    true
+                }
+            })
+            .collect::<Vec<_>>();
+
+        let mut callbacks = CraneliftPassesCallbacks { use_clif };
+
+        let mut run_compiler = rustc_driver::RunCompiler::new(&args, &mut callbacks);
+        if use_clif {
+            run_compiler.set_make_codegen_backend(Some(Box::new(move |_| {
+                Box::new(rustc_codegen_cranelift::CraneliftCodegenBackend {
+                    config: rustc_codegen_cranelift::BackendConfig { use_jit: false },
+                })
+            })));
+        }
+        run_compiler.run()
+    });
+    std::process::exit(exit_code)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/cast.rs b/compiler/rustc_codegen_cranelift/src/cast.rs
new file mode 100644
index 0000000..57204de
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/cast.rs
@@ -0,0 +1,199 @@
+//! Various number casting functions
+
+use crate::prelude::*;
+
+pub(crate) fn clif_intcast(
+    fx: &mut FunctionCx<'_, '_, impl Module>,
+    val: Value,
+    to: Type,
+    signed: bool,
+) -> Value {
+    let from = fx.bcx.func.dfg.value_type(val);
+    match (from, to) {
+        // equal
+        (_, _) if from == to => val,
+
+        // extend
+        (_, types::I128) => {
+            let lo = if from == types::I64 {
+                val
+            } else if signed {
+                fx.bcx.ins().sextend(types::I64, val)
+            } else {
+                fx.bcx.ins().uextend(types::I64, val)
+            };
+            let hi = if signed {
+                fx.bcx.ins().sshr_imm(lo, 63)
+            } else {
+                fx.bcx.ins().iconst(types::I64, 0)
+            };
+            fx.bcx.ins().iconcat(lo, hi)
+        }
+        (_, _) if to.wider_or_equal(from) => {
+            if signed {
+                fx.bcx.ins().sextend(to, val)
+            } else {
+                fx.bcx.ins().uextend(to, val)
+            }
+        }
+
+        // reduce
+        (types::I128, _) => {
+            let (lsb, _msb) = fx.bcx.ins().isplit(val);
+            if to == types::I64 {
+                lsb
+            } else {
+                fx.bcx.ins().ireduce(to, lsb)
+            }
+        }
+        (_, _) => fx.bcx.ins().ireduce(to, val),
+    }
+}
+
+pub(crate) fn clif_int_or_float_cast(
+    fx: &mut FunctionCx<'_, '_, impl Module>,
+    from: Value,
+    from_signed: bool,
+    to_ty: Type,
+    to_signed: bool,
+) -> Value {
+    let from_ty = fx.bcx.func.dfg.value_type(from);
+
+    if from_ty.is_int() && to_ty.is_int() {
+        // int-like -> int-like
+        clif_intcast(
+            fx,
+            from,
+            to_ty,
+            // This is correct as either from_signed == to_signed (=> this is trivially correct)
+            // Or from_clif_ty == to_clif_ty, which means this is a no-op.
+            from_signed,
+        )
+    } else if from_ty.is_int() && to_ty.is_float() {
+        if from_ty == types::I128 {
+            // _______ss__f_
+            // __float  tisf: i128 -> f32
+            // __float  tidf: i128 -> f64
+            // __floatuntisf: u128 -> f32
+            // __floatuntidf: u128 -> f64
+
+            let name = format!(
+                "__float{sign}ti{flt}f",
+                sign = if from_signed { "" } else { "un" },
+                flt = match to_ty {
+                    types::F32 => "s",
+                    types::F64 => "d",
+                    _ => unreachable!("{:?}", to_ty),
+                },
+            );
+
+            let from_rust_ty = if from_signed {
+                fx.tcx.types.i128
+            } else {
+                fx.tcx.types.u128
+            };
+
+            let to_rust_ty = match to_ty {
+                types::F32 => fx.tcx.types.f32,
+                types::F64 => fx.tcx.types.f64,
+                _ => unreachable!(),
+            };
+
+            return fx
+                .easy_call(
+                    &name,
+                    &[CValue::by_val(from, fx.layout_of(from_rust_ty))],
+                    to_rust_ty,
+                )
+                .load_scalar(fx);
+        }
+
+        // int-like -> float
+        if from_signed {
+            fx.bcx.ins().fcvt_from_sint(to_ty, from)
+        } else {
+            fx.bcx.ins().fcvt_from_uint(to_ty, from)
+        }
+    } else if from_ty.is_float() && to_ty.is_int() {
+        if to_ty == types::I128 {
+            // _____sssf___
+            // __fix   sfti: f32 -> i128
+            // __fix   dfti: f64 -> i128
+            // __fixunssfti: f32 -> u128
+            // __fixunsdfti: f64 -> u128
+
+            let name = format!(
+                "__fix{sign}{flt}fti",
+                sign = if to_signed { "" } else { "uns" },
+                flt = match from_ty {
+                    types::F32 => "s",
+                    types::F64 => "d",
+                    _ => unreachable!("{:?}", to_ty),
+                },
+            );
+
+            let from_rust_ty = match from_ty {
+                types::F32 => fx.tcx.types.f32,
+                types::F64 => fx.tcx.types.f64,
+                _ => unreachable!(),
+            };
+
+            let to_rust_ty = if to_signed {
+                fx.tcx.types.i128
+            } else {
+                fx.tcx.types.u128
+            };
+
+            return fx
+                .easy_call(
+                    &name,
+                    &[CValue::by_val(from, fx.layout_of(from_rust_ty))],
+                    to_rust_ty,
+                )
+                .load_scalar(fx);
+        }
+
+        // float -> int-like
+        if to_ty == types::I8 || to_ty == types::I16 {
+            // FIXME implement fcvt_to_*int_sat.i8/i16
+            let val = if to_signed {
+                fx.bcx.ins().fcvt_to_sint_sat(types::I32, from)
+            } else {
+                fx.bcx.ins().fcvt_to_uint_sat(types::I32, from)
+            };
+            let (min, max) = match (to_ty, to_signed) {
+                (types::I8, false) => (0, i64::from(u8::MAX)),
+                (types::I16, false) => (0, i64::from(u16::MAX)),
+                (types::I8, true) => (i64::from(i8::MIN), i64::from(i8::MAX)),
+                (types::I16, true) => (i64::from(i16::MIN), i64::from(i16::MAX)),
+                _ => unreachable!(),
+            };
+            let min_val = fx.bcx.ins().iconst(types::I32, min);
+            let max_val = fx.bcx.ins().iconst(types::I32, max);
+
+            let val = if to_signed {
+                let has_underflow = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, min);
+                let has_overflow = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, val, max);
+                let bottom_capped = fx.bcx.ins().select(has_underflow, min_val, val);
+                fx.bcx.ins().select(has_overflow, max_val, bottom_capped)
+            } else {
+                let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, val, max);
+                fx.bcx.ins().select(has_overflow, max_val, val)
+            };
+            fx.bcx.ins().ireduce(to_ty, val)
+        } else if to_signed {
+            fx.bcx.ins().fcvt_to_sint_sat(to_ty, from)
+        } else {
+            fx.bcx.ins().fcvt_to_uint_sat(to_ty, from)
+        }
+    } else if from_ty.is_float() && to_ty.is_float() {
+        // float -> float
+        match (from_ty, to_ty) {
+            (types::F32, types::F64) => fx.bcx.ins().fpromote(types::F64, from),
+            (types::F64, types::F32) => fx.bcx.ins().fdemote(types::F32, from),
+            _ => from,
+        }
+    } else {
+        unreachable!("cast value from {:?} to {:?}", from_ty, to_ty);
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/codegen_i128.rs b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
new file mode 100644
index 0000000..d6a38bd
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
@@ -0,0 +1,165 @@
+//! Replaces 128-bit operators with lang item calls where necessary
+
+use crate::prelude::*;
+
+pub(crate) fn maybe_codegen<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    bin_op: BinOp,
+    checked: bool,
+    lhs: CValue<'tcx>,
+    rhs: CValue<'tcx>,
+) -> Option<CValue<'tcx>> {
+    if lhs.layout().ty != fx.tcx.types.u128 && lhs.layout().ty != fx.tcx.types.i128 {
+        return None;
+    }
+
+    let lhs_val = lhs.load_scalar(fx);
+    let rhs_val = rhs.load_scalar(fx);
+
+    let is_signed = type_sign(lhs.layout().ty);
+
+    match bin_op {
+        BinOp::BitAnd | BinOp::BitOr | BinOp::BitXor => {
+            assert!(!checked);
+            None
+        }
+        BinOp::Add | BinOp::Sub if !checked => None,
+        BinOp::Add => {
+            let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+            return Some(if is_signed {
+                fx.easy_call("__rust_i128_addo", &[lhs, rhs], out_ty)
+            } else {
+                fx.easy_call("__rust_u128_addo", &[lhs, rhs], out_ty)
+            });
+        }
+        BinOp::Sub => {
+            let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+            return Some(if is_signed {
+                fx.easy_call("__rust_i128_subo", &[lhs, rhs], out_ty)
+            } else {
+                fx.easy_call("__rust_u128_subo", &[lhs, rhs], out_ty)
+            });
+        }
+        BinOp::Offset => unreachable!("offset should only be used on pointers, not 128bit ints"),
+        BinOp::Mul => {
+            let res = if checked {
+                let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+                if is_signed {
+                    fx.easy_call("__rust_i128_mulo", &[lhs, rhs], out_ty)
+                } else {
+                    fx.easy_call("__rust_u128_mulo", &[lhs, rhs], out_ty)
+                }
+            } else {
+                let val_ty = if is_signed {
+                    fx.tcx.types.i128
+                } else {
+                    fx.tcx.types.u128
+                };
+                fx.easy_call("__multi3", &[lhs, rhs], val_ty)
+            };
+            Some(res)
+        }
+        BinOp::Div => {
+            assert!(!checked);
+            if is_signed {
+                Some(fx.easy_call("__divti3", &[lhs, rhs], fx.tcx.types.i128))
+            } else {
+                Some(fx.easy_call("__udivti3", &[lhs, rhs], fx.tcx.types.u128))
+            }
+        }
+        BinOp::Rem => {
+            assert!(!checked);
+            if is_signed {
+                Some(fx.easy_call("__modti3", &[lhs, rhs], fx.tcx.types.i128))
+            } else {
+                Some(fx.easy_call("__umodti3", &[lhs, rhs], fx.tcx.types.u128))
+            }
+        }
+        BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => {
+            assert!(!checked);
+            None
+        }
+        BinOp::Shl | BinOp::Shr => {
+            let is_overflow = if checked {
+                // rhs >= 128
+
+                // FIXME support non 128bit rhs
+                /*let (rhs_lsb, rhs_msb) = fx.bcx.ins().isplit(rhs_val);
+                let rhs_msb_gt_0 = fx.bcx.ins().icmp_imm(IntCC::NotEqual, rhs_msb, 0);
+                let rhs_lsb_ge_128 = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, rhs_lsb, 127);
+                let is_overflow = fx.bcx.ins().bor(rhs_msb_gt_0, rhs_lsb_ge_128);*/
+                let is_overflow = fx.bcx.ins().bconst(types::B1, false);
+
+                Some(fx.bcx.ins().bint(types::I8, is_overflow))
+            } else {
+                None
+            };
+
+            // Optimize `val >> 64`, because compiler_builtins uses it to deconstruct an 128bit
+            // integer into its lsb and msb.
+            // https://github.com/rust-lang-nursery/compiler-builtins/blob/79a6a1603d5672cbb9187ff41ff4d9b5048ac1cb/src/int/mod.rs#L217
+            if resolve_value_imm(fx.bcx.func, rhs_val) == Some(64) {
+                let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs_val);
+                let all_zeros = fx.bcx.ins().iconst(types::I64, 0);
+                let val = match (bin_op, is_signed) {
+                    (BinOp::Shr, false) => {
+                        let val = fx.bcx.ins().iconcat(lhs_msb, all_zeros);
+                        Some(CValue::by_val(val, fx.layout_of(fx.tcx.types.u128)))
+                    }
+                    (BinOp::Shr, true) => {
+                        let sign = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, lhs_msb, 0);
+                        let all_ones = fx.bcx.ins().iconst(types::I64, u64::MAX as i64);
+                        let all_sign_bits = fx.bcx.ins().select(sign, all_zeros, all_ones);
+
+                        let val = fx.bcx.ins().iconcat(lhs_msb, all_sign_bits);
+                        Some(CValue::by_val(val, fx.layout_of(fx.tcx.types.i128)))
+                    }
+                    (BinOp::Shl, _) => {
+                        let val_ty = if is_signed {
+                            fx.tcx.types.i128
+                        } else {
+                            fx.tcx.types.u128
+                        };
+                        let val = fx.bcx.ins().iconcat(all_zeros, lhs_lsb);
+                        Some(CValue::by_val(val, fx.layout_of(val_ty)))
+                    }
+                    _ => None,
+                };
+                if let Some(val) = val {
+                    if let Some(is_overflow) = is_overflow {
+                        let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+                        let val = val.load_scalar(fx);
+                        return Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)));
+                    } else {
+                        return Some(val);
+                    }
+                }
+            }
+
+            let truncated_rhs = clif_intcast(fx, rhs_val, types::I32, false);
+            let truncated_rhs = CValue::by_val(truncated_rhs, fx.layout_of(fx.tcx.types.u32));
+            let val = match (bin_op, is_signed) {
+                (BinOp::Shl, false) => {
+                    fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.tcx.types.u128)
+                }
+                (BinOp::Shl, true) => {
+                    fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.tcx.types.i128)
+                }
+                (BinOp::Shr, false) => {
+                    fx.easy_call("__lshrti3", &[lhs, truncated_rhs], fx.tcx.types.u128)
+                }
+                (BinOp::Shr, true) => {
+                    fx.easy_call("__ashrti3", &[lhs, truncated_rhs], fx.tcx.types.i128)
+                }
+                (_, _) => unreachable!(),
+            };
+            if let Some(is_overflow) = is_overflow {
+                let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+                let val = val.load_scalar(fx);
+                Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)))
+            } else {
+                Some(val)
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs
new file mode 100644
index 0000000..466758f
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/common.rs
@@ -0,0 +1,444 @@
+use rustc_index::vec::IndexVec;
+use rustc_target::abi::{Integer, Primitive};
+use rustc_target::spec::{HasTargetSpec, Target};
+
+use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
+
+use crate::prelude::*;
+
+pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
+    match tcx.data_layout.pointer_size.bits() {
+        16 => types::I16,
+        32 => types::I32,
+        64 => types::I64,
+        bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits),
+    }
+}
+
+pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type {
+    match scalar.value {
+        Primitive::Int(int, _sign) => match int {
+            Integer::I8 => types::I8,
+            Integer::I16 => types::I16,
+            Integer::I32 => types::I32,
+            Integer::I64 => types::I64,
+            Integer::I128 => types::I128,
+        },
+        Primitive::F32 => types::F32,
+        Primitive::F64 => types::F64,
+        Primitive::Pointer => pointer_ty(tcx),
+    }
+}
+
+fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Type> {
+    Some(match ty.kind() {
+        ty::Bool => types::I8,
+        ty::Uint(size) => match size {
+            UintTy::U8 => types::I8,
+            UintTy::U16 => types::I16,
+            UintTy::U32 => types::I32,
+            UintTy::U64 => types::I64,
+            UintTy::U128 => types::I128,
+            UintTy::Usize => pointer_ty(tcx),
+        },
+        ty::Int(size) => match size {
+            IntTy::I8 => types::I8,
+            IntTy::I16 => types::I16,
+            IntTy::I32 => types::I32,
+            IntTy::I64 => types::I64,
+            IntTy::I128 => types::I128,
+            IntTy::Isize => pointer_ty(tcx),
+        },
+        ty::Char => types::I32,
+        ty::Float(size) => match size {
+            FloatTy::F32 => types::F32,
+            FloatTy::F64 => types::F64,
+        },
+        ty::FnPtr(_) => pointer_ty(tcx),
+        ty::RawPtr(TypeAndMut {
+            ty: pointee_ty,
+            mutbl: _,
+        })
+        | ty::Ref(_, pointee_ty, _) => {
+            if has_ptr_meta(tcx, pointee_ty) {
+                return None;
+            } else {
+                pointer_ty(tcx)
+            }
+        }
+        ty::Adt(adt_def, _) if adt_def.repr.simd() => {
+            let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
+            {
+                Abi::Vector { element, count } => (element.clone(), *count),
+                _ => unreachable!(),
+            };
+
+            match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+                // Cranelift currently only implements icmp for 128bit vectors.
+                Some(vector_ty) if vector_ty.bits() == 128 => vector_ty,
+                _ => return None,
+            }
+        }
+        ty::Param(_) => bug!("ty param {:?}", ty),
+        _ => return None,
+    })
+}
+
+fn clif_pair_type_from_ty<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    ty: Ty<'tcx>,
+) -> Option<(types::Type, types::Type)> {
+    Some(match ty.kind() {
+        ty::Tuple(substs) if substs.len() == 2 => {
+            let mut types = substs.types();
+            let a = clif_type_from_ty(tcx, types.next().unwrap())?;
+            let b = clif_type_from_ty(tcx, types.next().unwrap())?;
+            if a.is_vector() || b.is_vector() {
+                return None;
+            }
+            (a, b)
+        }
+        ty::RawPtr(TypeAndMut {
+            ty: pointee_ty,
+            mutbl: _,
+        })
+        | ty::Ref(_, pointee_ty, _) => {
+            if has_ptr_meta(tcx, pointee_ty) {
+                (pointer_ty(tcx), pointer_ty(tcx))
+            } else {
+                return None;
+            }
+        }
+        _ => return None,
+    })
+}
+
+/// Is a pointer to this type a fat ptr?
+pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+    let ptr_ty = tcx.mk_ptr(TypeAndMut {
+        ty,
+        mutbl: rustc_hir::Mutability::Not,
+    });
+    match &tcx
+        .layout_of(ParamEnv::reveal_all().and(ptr_ty))
+        .unwrap()
+        .abi
+    {
+        Abi::Scalar(_) => false,
+        Abi::ScalarPair(_, _) => true,
+        abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
+    }
+}
+
+pub(crate) fn codegen_icmp_imm(
+    fx: &mut FunctionCx<'_, '_, impl Module>,
+    intcc: IntCC,
+    lhs: Value,
+    rhs: i128,
+) -> Value {
+    let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+    if lhs_ty == types::I128 {
+        // FIXME legalize `icmp_imm.i128` in Cranelift
+
+        let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs);
+        let (rhs_lsb, rhs_msb) = (rhs as u128 as u64 as i64, (rhs as u128 >> 64) as u64 as i64);
+
+        match intcc {
+            IntCC::Equal => {
+                let lsb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_lsb, rhs_lsb);
+                let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
+                fx.bcx.ins().band(lsb_eq, msb_eq)
+            }
+            IntCC::NotEqual => {
+                let lsb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_lsb, rhs_lsb);
+                let msb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_msb, rhs_msb);
+                fx.bcx.ins().bor(lsb_ne, msb_ne)
+            }
+            _ => {
+                // if msb_eq {
+                //     lsb_cc
+                // } else {
+                //     msb_cc
+                // }
+
+                let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
+                let lsb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_lsb, rhs_lsb);
+                let msb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_msb, rhs_msb);
+
+                fx.bcx.ins().select(msb_eq, lsb_cc, msb_cc)
+            }
+        }
+    } else {
+        let rhs = i64::try_from(rhs).expect("codegen_icmp_imm rhs out of range for <128bit int");
+        fx.bcx.ins().icmp_imm(intcc, lhs, rhs)
+    }
+}
+
+fn resolve_normal_value_imm(func: &Function, val: Value) -> Option<i64> {
+    if let ValueDef::Result(inst, 0 /*param*/) = func.dfg.value_def(val) {
+        if let InstructionData::UnaryImm {
+            opcode: Opcode::Iconst,
+            imm,
+        } = func.dfg[inst]
+        {
+            Some(imm.into())
+        } else {
+            None
+        }
+    } else {
+        None
+    }
+}
+
+fn resolve_128bit_value_imm(func: &Function, val: Value) -> Option<u128> {
+    let (lsb, msb) = if let ValueDef::Result(inst, 0 /*param*/) = func.dfg.value_def(val) {
+        if let InstructionData::Binary {
+            opcode: Opcode::Iconcat,
+            args: [lsb, msb],
+        } = func.dfg[inst]
+        {
+            (lsb, msb)
+        } else {
+            return None;
+        }
+    } else {
+        return None;
+    };
+
+    let lsb = u128::from(resolve_normal_value_imm(func, lsb)? as u64);
+    let msb = u128::from(resolve_normal_value_imm(func, msb)? as u64);
+
+    Some(msb << 64 | lsb)
+}
+
+pub(crate) fn resolve_value_imm(func: &Function, val: Value) -> Option<u128> {
+    if func.dfg.value_type(val) == types::I128 {
+        resolve_128bit_value_imm(func, val)
+    } else {
+        resolve_normal_value_imm(func, val).map(|imm| u128::from(imm as u64))
+    }
+}
+
+pub(crate) fn type_min_max_value(
+    bcx: &mut FunctionBuilder<'_>,
+    ty: Type,
+    signed: bool,
+) -> (Value, Value) {
+    assert!(ty.is_int());
+
+    if ty == types::I128 {
+        if signed {
+            let min = i128::MIN as u128;
+            let min_lsb = bcx.ins().iconst(types::I64, min as u64 as i64);
+            let min_msb = bcx.ins().iconst(types::I64, (min >> 64) as u64 as i64);
+            let min = bcx.ins().iconcat(min_lsb, min_msb);
+
+            let max = i128::MIN as u128;
+            let max_lsb = bcx.ins().iconst(types::I64, max as u64 as i64);
+            let max_msb = bcx.ins().iconst(types::I64, (max >> 64) as u64 as i64);
+            let max = bcx.ins().iconcat(max_lsb, max_msb);
+
+            return (min, max);
+        } else {
+            let min_half = bcx.ins().iconst(types::I64, 0);
+            let min = bcx.ins().iconcat(min_half, min_half);
+
+            let max_half = bcx.ins().iconst(types::I64, u64::MAX as i64);
+            let max = bcx.ins().iconcat(max_half, max_half);
+
+            return (min, max);
+        }
+    }
+
+    let min = match (ty, signed) {
+        (types::I8, false) | (types::I16, false) | (types::I32, false) | (types::I64, false) => {
+            0i64
+        }
+        (types::I8, true) => i64::from(i8::MIN),
+        (types::I16, true) => i64::from(i16::MIN),
+        (types::I32, true) => i64::from(i32::MIN),
+        (types::I64, true) => i64::MIN,
+        _ => unreachable!(),
+    };
+
+    let max = match (ty, signed) {
+        (types::I8, false) => i64::from(u8::MAX),
+        (types::I16, false) => i64::from(u16::MAX),
+        (types::I32, false) => i64::from(u32::MAX),
+        (types::I64, false) => u64::MAX as i64,
+        (types::I8, true) => i64::from(i8::MAX),
+        (types::I16, true) => i64::from(i16::MAX),
+        (types::I32, true) => i64::from(i32::MAX),
+        (types::I64, true) => i64::MAX,
+        _ => unreachable!(),
+    };
+
+    let (min, max) = (bcx.ins().iconst(ty, min), bcx.ins().iconst(ty, max));
+
+    (min, max)
+}
+
+pub(crate) fn type_sign(ty: Ty<'_>) -> bool {
+    match ty.kind() {
+        ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) | ty::Char | ty::Uint(..) | ty::Bool => false,
+        ty::Int(..) => true,
+        ty::Float(..) => false, // `signed` is unused for floats
+        _ => panic!("{}", ty),
+    }
+}
+
+pub(crate) struct FunctionCx<'clif, 'tcx, M: Module> {
+    pub(crate) cx: &'clif mut crate::CodegenCx<'tcx, M>,
+    pub(crate) tcx: TyCtxt<'tcx>,
+    pub(crate) pointer_type: Type, // Cached from module
+
+    pub(crate) instance: Instance<'tcx>,
+    pub(crate) mir: &'tcx Body<'tcx>,
+
+    pub(crate) bcx: FunctionBuilder<'clif>,
+    pub(crate) block_map: IndexVec<BasicBlock, Block>,
+    pub(crate) local_map: IndexVec<Local, CPlace<'tcx>>,
+
+    /// When `#[track_caller]` is used, the implicit caller location is stored in this variable.
+    pub(crate) caller_location: Option<CValue<'tcx>>,
+
+    /// See [`crate::optimize::code_layout`] for more information.
+    pub(crate) cold_blocks: EntitySet<Block>,
+
+    pub(crate) clif_comments: crate::pretty_clif::CommentWriter,
+    pub(crate) source_info_set: indexmap::IndexSet<SourceInfo>,
+
+    /// This should only be accessed by `CPlace::new_var`.
+    pub(crate) next_ssa_var: u32,
+
+    pub(crate) inline_asm_index: u32,
+}
+
+impl<'tcx, M: Module> LayoutOf for FunctionCx<'_, 'tcx, M> {
+    type Ty = Ty<'tcx>;
+    type TyAndLayout = TyAndLayout<'tcx>;
+
+    fn layout_of(&self, ty: Ty<'tcx>) -> TyAndLayout<'tcx> {
+        assert!(!ty.still_further_specializable());
+        self.tcx
+            .layout_of(ParamEnv::reveal_all().and(&ty))
+            .unwrap_or_else(|e| {
+                if let layout::LayoutError::SizeOverflow(_) = e {
+                    self.tcx.sess.fatal(&e.to_string())
+                } else {
+                    bug!("failed to get layout for `{}`: {}", ty, e)
+                }
+            })
+    }
+}
+
+impl<'tcx, M: Module> layout::HasTyCtxt<'tcx> for FunctionCx<'_, 'tcx, M> {
+    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+}
+
+impl<'tcx, M: Module> rustc_target::abi::HasDataLayout for FunctionCx<'_, 'tcx, M> {
+    fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
+        &self.tcx.data_layout
+    }
+}
+
+impl<'tcx, M: Module> layout::HasParamEnv<'tcx> for FunctionCx<'_, 'tcx, M> {
+    fn param_env(&self) -> ParamEnv<'tcx> {
+        ParamEnv::reveal_all()
+    }
+}
+
+impl<'tcx, M: Module> HasTargetSpec for FunctionCx<'_, 'tcx, M> {
+    fn target_spec(&self) -> &Target {
+        &self.tcx.sess.target
+    }
+}
+
+impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> {
+    pub(crate) fn monomorphize<T>(&self, value: &T) -> T
+    where
+        T: TypeFoldable<'tcx> + Copy,
+    {
+        self.instance.subst_mir_and_normalize_erasing_regions(
+            self.tcx,
+            ty::ParamEnv::reveal_all(),
+            value
+        )
+    }
+
+    pub(crate) fn clif_type(&self, ty: Ty<'tcx>) -> Option<Type> {
+        clif_type_from_ty(self.tcx, ty)
+    }
+
+    pub(crate) fn clif_pair_type(&self, ty: Ty<'tcx>) -> Option<(Type, Type)> {
+        clif_pair_type_from_ty(self.tcx, ty)
+    }
+
+    pub(crate) fn get_block(&self, bb: BasicBlock) -> Block {
+        *self.block_map.get(bb).unwrap()
+    }
+
+    pub(crate) fn get_local_place(&mut self, local: Local) -> CPlace<'tcx> {
+        *self.local_map.get(local).unwrap_or_else(|| {
+            panic!("Local {:?} doesn't exist", local);
+        })
+    }
+
+    pub(crate) fn set_debug_loc(&mut self, source_info: mir::SourceInfo) {
+        let (index, _) = self.source_info_set.insert_full(source_info);
+        self.bcx.set_srcloc(SourceLoc::new(index as u32));
+    }
+
+    pub(crate) fn get_caller_location(&mut self, span: Span) -> CValue<'tcx> {
+        if let Some(loc) = self.caller_location {
+            // `#[track_caller]` is used; return caller location instead of current location.
+            return loc;
+        }
+
+        let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+        let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+        let const_loc = self.tcx.const_caller_location((
+            rustc_span::symbol::Symbol::intern(&caller.file.name.to_string()),
+            caller.line as u32,
+            caller.col_display as u32 + 1,
+        ));
+        crate::constant::codegen_const_value(self, const_loc, self.tcx.caller_location_ty())
+    }
+
+    pub(crate) fn triple(&self) -> &target_lexicon::Triple {
+        self.cx.module.isa().triple()
+    }
+
+    pub(crate) fn anonymous_str(&mut self, prefix: &str, msg: &str) -> Value {
+        use std::collections::hash_map::DefaultHasher;
+        use std::hash::{Hash, Hasher};
+
+        let mut hasher = DefaultHasher::new();
+        msg.hash(&mut hasher);
+        let msg_hash = hasher.finish();
+        let mut data_ctx = DataContext::new();
+        data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
+        let msg_id = self
+            .cx
+            .module
+            .declare_data(
+                &format!("__{}_{:08x}", prefix, msg_hash),
+                Linkage::Local,
+                false,
+                false,
+            )
+            .unwrap();
+
+        // Ignore DuplicateDefinition error, as the data will be the same
+        let _ = self.cx.module.define_data(msg_id, &data_ctx);
+
+        let local_msg_id = self.cx.module.declare_data_in_func(msg_id, self.bcx.func);
+        #[cfg(debug_assertions)]
+        {
+            self.add_comment(local_msg_id, msg);
+        }
+        self.bcx.ins().global_value(self.pointer_type, local_msg_id)
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
new file mode 100644
index 0000000..41cfae4
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -0,0 +1,473 @@
+//! Handling of `static`s, `const`s and promoted allocations
+
+use rustc_span::DUMMY_SP;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::ErrorReported;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::interpret::{
+    read_target_uint, AllocId, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Pointer, Scalar,
+};
+use rustc_middle::ty::{Const, ConstKind};
+
+use cranelift_codegen::ir::GlobalValueData;
+use cranelift_module::*;
+
+use crate::prelude::*;
+
+#[derive(Default)]
+pub(crate) struct ConstantCx {
+    todo: Vec<TodoItem>,
+    done: FxHashSet<DataId>,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum TodoItem {
+    Alloc(AllocId),
+    Static(DefId),
+}
+
+impl ConstantCx {
+    pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut impl Module) {
+        //println!("todo {:?}", self.todo);
+        define_all_allocs(tcx, module, &mut self);
+        //println!("done {:?}", self.done);
+        self.done.clear();
+    }
+}
+
+pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, impl Module>) {
+    for constant in &fx.mir.required_consts {
+        let const_ = fx.monomorphize(&constant.literal);
+        match const_.val {
+            ConstKind::Value(_) => {}
+            ConstKind::Unevaluated(def, ref substs, promoted) => {
+                if let Err(err) =
+                    fx.tcx
+                        .const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
+                {
+                    match err {
+                        ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
+                            fx.tcx
+                                .sess
+                                .span_err(constant.span, "erroneous constant encountered");
+                        }
+                        ErrorHandled::TooGeneric => {
+                            span_bug!(
+                                constant.span,
+                                "codgen encountered polymorphic constant: {:?}",
+                                err
+                            );
+                        }
+                    }
+                }
+            }
+            ConstKind::Param(_)
+            | ConstKind::Infer(_)
+            | ConstKind::Bound(_, _)
+            | ConstKind::Placeholder(_)
+            | ConstKind::Error(_) => unreachable!("{:?}", const_),
+        }
+    }
+}
+
+pub(crate) fn codegen_static(constants_cx: &mut ConstantCx, def_id: DefId) {
+    constants_cx.todo.push(TodoItem::Static(def_id));
+}
+
+pub(crate) fn codegen_tls_ref<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    def_id: DefId,
+    layout: TyAndLayout<'tcx>,
+) -> CValue<'tcx> {
+    let data_id = data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
+    let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+    #[cfg(debug_assertions)]
+    fx.add_comment(local_data_id, format!("tls {:?}", def_id));
+    let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
+    CValue::by_val(tls_ptr, layout)
+}
+
+fn codegen_static_ref<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    def_id: DefId,
+    layout: TyAndLayout<'tcx>,
+) -> CPlace<'tcx> {
+    let data_id = data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
+    let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+    #[cfg(debug_assertions)]
+    fx.add_comment(local_data_id, format!("{:?}", def_id));
+    let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+    assert!(!layout.is_unsized(), "unsized statics aren't supported");
+    assert!(
+        matches!(fx.bcx.func.global_values[local_data_id], GlobalValueData::Symbol { tls: false, ..}),
+        "tls static referenced without Rvalue::ThreadLocalRef"
+    );
+    CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
+}
+
+pub(crate) fn codegen_constant<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    constant: &Constant<'tcx>,
+) -> CValue<'tcx> {
+    let const_ = fx.monomorphize(&constant.literal);
+    let const_val = match const_.val {
+        ConstKind::Value(const_val) => const_val,
+        ConstKind::Unevaluated(def, ref substs, promoted) if fx.tcx.is_static(def.did) => {
+            assert!(substs.is_empty());
+            assert!(promoted.is_none());
+
+            return codegen_static_ref(
+                fx,
+                def.did,
+                fx.layout_of(fx.monomorphize(&constant.literal.ty)),
+            )
+            .to_cvalue(fx);
+        }
+        ConstKind::Unevaluated(def, ref substs, promoted) => {
+            match fx
+                .tcx
+                .const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
+            {
+                Ok(const_val) => const_val,
+                Err(_) => {
+                    if promoted.is_none() {
+                        fx.tcx
+                            .sess
+                            .span_err(constant.span, "erroneous constant encountered");
+                    }
+                    return crate::trap::trap_unreachable_ret_value(
+                        fx,
+                        fx.layout_of(const_.ty),
+                        "erroneous constant encountered",
+                    );
+                }
+            }
+        }
+        ConstKind::Param(_)
+        | ConstKind::Infer(_)
+        | ConstKind::Bound(_, _)
+        | ConstKind::Placeholder(_)
+        | ConstKind::Error(_) => unreachable!("{:?}", const_),
+    };
+
+    codegen_const_value(fx, const_val, const_.ty)
+}
+
+pub(crate) fn codegen_const_value<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    const_val: ConstValue<'tcx>,
+    ty: Ty<'tcx>,
+) -> CValue<'tcx> {
+    let layout = fx.layout_of(ty);
+    assert!(!layout.is_unsized(), "sized const value");
+
+    if layout.is_zst() {
+        return CValue::by_ref(
+            crate::Pointer::dangling(layout.align.pref),
+            layout,
+        );
+    }
+
+    match const_val {
+        ConstValue::Scalar(x) => {
+            if fx.clif_type(layout.ty).is_none() {
+                let (size, align) = (layout.size, layout.align.pref);
+                let mut alloc = Allocation::from_bytes(
+                    std::iter::repeat(0)
+                        .take(size.bytes_usize())
+                        .collect::<Vec<u8>>(),
+                    align,
+                );
+                let ptr = Pointer::new(AllocId(!0), Size::ZERO); // The alloc id is never used
+                alloc.write_scalar(fx, ptr, x.into(), size).unwrap();
+                let alloc = fx.tcx.intern_const_alloc(alloc);
+                return CValue::by_ref(pointer_for_allocation(fx, alloc), layout);
+            }
+
+            match x {
+                Scalar::Int(int) => {
+                    CValue::const_val(fx, layout, int)
+                }
+                Scalar::Ptr(ptr) => {
+                    let alloc_kind = fx.tcx.get_global_alloc(ptr.alloc_id);
+                    let base_addr = match alloc_kind {
+                        Some(GlobalAlloc::Memory(alloc)) => {
+                            fx.cx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id));
+                            let data_id = data_id_for_alloc_id(
+                                &mut fx.cx.module,
+                                ptr.alloc_id,
+                                alloc.mutability,
+                            );
+                            let local_data_id =
+                                fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+                            #[cfg(debug_assertions)]
+                            fx.add_comment(local_data_id, format!("{:?}", ptr.alloc_id));
+                            fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+                        }
+                        Some(GlobalAlloc::Function(instance)) => {
+                            let func_id =
+                                crate::abi::import_function(fx.tcx, &mut fx.cx.module, instance);
+                            let local_func_id =
+                                fx.cx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
+                            fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
+                        }
+                        Some(GlobalAlloc::Static(def_id)) => {
+                            assert!(fx.tcx.is_static(def_id));
+                            let data_id =
+                                data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
+                            let local_data_id =
+                                fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+                            #[cfg(debug_assertions)]
+                            fx.add_comment(local_data_id, format!("{:?}", def_id));
+                            fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+                        }
+                        None => bug!("missing allocation {:?}", ptr.alloc_id),
+                    };
+                    let val = if ptr.offset.bytes() != 0 {
+                        fx.bcx
+                            .ins()
+                            .iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap())
+                    } else {
+                        base_addr
+                    };
+                    CValue::by_val(val, layout)
+                }
+            }
+        }
+        ConstValue::ByRef { alloc, offset } => CValue::by_ref(
+            pointer_for_allocation(fx, alloc)
+                .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
+            layout,
+        ),
+        ConstValue::Slice { data, start, end } => {
+            let ptr = pointer_for_allocation(fx, data)
+                .offset_i64(fx, i64::try_from(start).unwrap())
+                .get_addr(fx);
+            let len = fx.bcx.ins().iconst(
+                fx.pointer_type,
+                i64::try_from(end.checked_sub(start).unwrap()).unwrap(),
+            );
+            CValue::by_val_pair(ptr, len, layout)
+        }
+    }
+}
+
+fn pointer_for_allocation<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    alloc: &'tcx Allocation,
+) -> crate::pointer::Pointer {
+    let alloc_id = fx.tcx.create_memory_alloc(alloc);
+    fx.cx.constants_cx.todo.push(TodoItem::Alloc(alloc_id));
+    let data_id = data_id_for_alloc_id(&mut fx.cx.module, alloc_id, alloc.mutability);
+
+    let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+    #[cfg(debug_assertions)]
+    fx.add_comment(local_data_id, format!("{:?}", alloc_id));
+    let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+    crate::pointer::Pointer::new(global_ptr)
+}
+
+fn data_id_for_alloc_id(
+    module: &mut impl Module,
+    alloc_id: AllocId,
+    mutability: rustc_hir::Mutability,
+) -> DataId {
+    module
+        .declare_data(
+            &format!(".L__alloc_{:x}", alloc_id.0),
+            Linkage::Local,
+            mutability == rustc_hir::Mutability::Mut,
+            false,
+        )
+        .unwrap()
+}
+
+fn data_id_for_static(
+    tcx: TyCtxt<'_>,
+    module: &mut impl Module,
+    def_id: DefId,
+    definition: bool,
+) -> DataId {
+    let rlinkage = tcx.codegen_fn_attrs(def_id).linkage;
+    let linkage = if definition {
+        crate::linkage::get_static_linkage(tcx, def_id)
+    } else if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
+        || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
+    {
+        Linkage::Preemptible
+    } else {
+        Linkage::Import
+    };
+
+    let instance = Instance::mono(tcx, def_id).polymorphize(tcx);
+    let symbol_name = tcx.symbol_name(instance).name;
+    let ty = instance.ty(tcx, ParamEnv::reveal_all());
+    let is_mutable = if tcx.is_mutable_static(def_id) {
+        true
+    } else {
+        !ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all())
+    };
+    let align = tcx
+        .layout_of(ParamEnv::reveal_all().and(ty))
+        .unwrap()
+        .align
+        .pref
+        .bytes();
+
+    let attrs = tcx.codegen_fn_attrs(def_id);
+
+    let data_id = module
+        .declare_data(
+            &*symbol_name,
+            linkage,
+            is_mutable,
+            attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
+        )
+        .unwrap();
+
+    if rlinkage.is_some() {
+        // Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
+        // Declare an internal global `extern_with_linkage_foo` which
+        // is initialized with the address of `foo`.  If `foo` is
+        // discarded during linking (for example, if `foo` has weak
+        // linkage and there are no definitions), then
+        // `extern_with_linkage_foo` will instead be initialized to
+        // zero.
+
+        let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
+        let ref_data_id = module
+            .declare_data(&ref_name, Linkage::Local, false, false)
+            .unwrap();
+        let mut data_ctx = DataContext::new();
+        data_ctx.set_align(align);
+        let data = module.declare_data_in_data(data_id, &mut data_ctx);
+        data_ctx.define(
+            std::iter::repeat(0)
+                .take(pointer_ty(tcx).bytes() as usize)
+                .collect(),
+        );
+        data_ctx.write_data_addr(0, data, 0);
+        match module.define_data(ref_data_id, &data_ctx) {
+            // Every time the static is referenced there will be another definition of this global,
+            // so duplicate definitions are expected and allowed.
+            Err(ModuleError::DuplicateDefinition(_)) => {}
+            res => res.unwrap(),
+        }
+        ref_data_id
+    } else {
+        data_id
+    }
+}
+
+fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut impl Module, cx: &mut ConstantCx) {
+    while let Some(todo_item) = cx.todo.pop() {
+        let (data_id, alloc, section_name) = match todo_item {
+            TodoItem::Alloc(alloc_id) => {
+                //println!("alloc_id {}", alloc_id);
+                let alloc = match tcx.get_global_alloc(alloc_id).unwrap() {
+                    GlobalAlloc::Memory(alloc) => alloc,
+                    GlobalAlloc::Function(_) | GlobalAlloc::Static(_) => unreachable!(),
+                };
+                let data_id = data_id_for_alloc_id(module, alloc_id, alloc.mutability);
+                (data_id, alloc, None)
+            }
+            TodoItem::Static(def_id) => {
+                //println!("static {:?}", def_id);
+
+                let section_name = tcx
+                    .codegen_fn_attrs(def_id)
+                    .link_section
+                    .map(|s| s.as_str());
+
+                let alloc = tcx.eval_static_initializer(def_id).unwrap();
+
+                let data_id = data_id_for_static(tcx, module, def_id, true);
+                (data_id, alloc, section_name)
+            }
+        };
+
+        //("data_id {}", data_id);
+        if cx.done.contains(&data_id) {
+            continue;
+        }
+
+        let mut data_ctx = DataContext::new();
+        data_ctx.set_align(alloc.align.bytes());
+
+        if let Some(section_name) = section_name {
+            // FIXME set correct segment for Mach-O files
+            data_ctx.set_segment_section("", &*section_name);
+        }
+
+        let bytes = alloc
+            .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
+            .to_vec();
+        data_ctx.define(bytes.into_boxed_slice());
+
+        for &(offset, (_tag, reloc)) in alloc.relocations().iter() {
+            let addend = {
+                let endianness = tcx.data_layout.endian;
+                let offset = offset.bytes() as usize;
+                let ptr_size = tcx.data_layout.pointer_size;
+                let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
+                    offset..offset + ptr_size.bytes() as usize,
+                );
+                read_target_uint(endianness, bytes).unwrap()
+            };
+
+            let reloc_target_alloc = tcx.get_global_alloc(reloc).unwrap();
+            let data_id = match reloc_target_alloc {
+                GlobalAlloc::Function(instance) => {
+                    assert_eq!(addend, 0);
+                    let func_id = crate::abi::import_function(tcx, module, instance);
+                    let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
+                    data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
+                    continue;
+                }
+                GlobalAlloc::Memory(target_alloc) => {
+                    cx.todo.push(TodoItem::Alloc(reloc));
+                    data_id_for_alloc_id(module, reloc, target_alloc.mutability)
+                }
+                GlobalAlloc::Static(def_id) => {
+                    if tcx
+                        .codegen_fn_attrs(def_id)
+                        .flags
+                        .contains(CodegenFnAttrFlags::THREAD_LOCAL)
+                    {
+                        tcx.sess.fatal(&format!(
+                            "Allocation {:?} contains reference to TLS value {:?}",
+                            alloc, def_id
+                        ));
+                    }
+
+                    // Don't push a `TodoItem::Static` here, as it will cause statics used by
+                    // multiple crates to be duplicated between them. It isn't necessary anyway,
+                    // as it will get pushed by `codegen_static` when necessary.
+                    data_id_for_static(tcx, module, def_id, false)
+                }
+            };
+
+            let global_value = module.declare_data_in_data(data_id, &mut data_ctx);
+            data_ctx.write_data_addr(offset.bytes() as u32, global_value, addend as i64);
+        }
+
+        module.define_data(data_id, &data_ctx).unwrap();
+        cx.done.insert(data_id);
+    }
+
+    assert!(cx.todo.is_empty(), "{:?}", cx.todo);
+}
+
+pub(crate) fn mir_operand_get_const_val<'tcx>(
+    fx: &FunctionCx<'_, 'tcx, impl Module>,
+    operand: &Operand<'tcx>,
+) -> Option<&'tcx Const<'tcx>> {
+    match operand {
+        Operand::Copy(_) | Operand::Move(_) => None,
+        Operand::Constant(const_) => Some(
+            fx.monomorphize(&const_.literal)
+                .eval(fx.tcx, ParamEnv::reveal_all()),
+        ),
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
new file mode 100644
index 0000000..f6f795e
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
@@ -0,0 +1,202 @@
+//! Write the debuginfo into an object file.
+
+use rustc_data_structures::fx::FxHashMap;
+
+use gimli::write::{Address, AttributeValue, EndianVec, Result, Sections, Writer};
+use gimli::{RunTimeEndian, SectionId};
+
+use crate::backend::WriteDebugInfo;
+
+use super::DebugContext;
+
+impl DebugContext<'_> {
+    pub(crate) fn emit<P: WriteDebugInfo>(&mut self, product: &mut P) {
+        let unit_range_list_id = self.dwarf.unit.ranges.add(self.unit_range_list.clone());
+        let root = self.dwarf.unit.root();
+        let root = self.dwarf.unit.get_mut(root);
+        root.set(
+            gimli::DW_AT_ranges,
+            AttributeValue::RangeListRef(unit_range_list_id),
+        );
+
+        let mut sections = Sections::new(WriterRelocate::new(self.endian));
+        self.dwarf.write(&mut sections).unwrap();
+
+        let mut section_map = FxHashMap::default();
+        let _: Result<()> = sections.for_each_mut(|id, section| {
+            if !section.writer.slice().is_empty() {
+                let section_id = product.add_debug_section(id, section.writer.take());
+                section_map.insert(id, section_id);
+            }
+            Ok(())
+        });
+
+        let _: Result<()> = sections.for_each(|id, section| {
+            if let Some(section_id) = section_map.get(&id) {
+                for reloc in &section.relocs {
+                    product.add_debug_reloc(&section_map, section_id, reloc);
+                }
+            }
+            Ok(())
+        });
+    }
+}
+
+#[derive(Clone)]
+pub(crate) struct DebugReloc {
+    pub(crate) offset: u32,
+    pub(crate) size: u8,
+    pub(crate) name: DebugRelocName,
+    pub(crate) addend: i64,
+    pub(crate) kind: object::RelocationKind,
+}
+
+#[derive(Clone)]
+pub(crate) enum DebugRelocName {
+    Section(SectionId),
+    Symbol(usize),
+}
+
+/// A [`Writer`] that collects all necessary relocations.
+#[derive(Clone)]
+pub(super) struct WriterRelocate {
+    pub(super) relocs: Vec<DebugReloc>,
+    pub(super) writer: EndianVec<RunTimeEndian>,
+}
+
+impl WriterRelocate {
+    pub(super) fn new(endian: RunTimeEndian) -> Self {
+        WriterRelocate {
+            relocs: Vec::new(),
+            writer: EndianVec::new(endian),
+        }
+    }
+
+    /// Perform the collected relocations to be usable for JIT usage.
+    #[cfg(feature = "jit")]
+    pub(super) fn relocate_for_jit(
+        mut self,
+        jit_product: &cranelift_simplejit::SimpleJITProduct,
+    ) -> Vec<u8> {
+        use std::convert::TryInto;
+
+        for reloc in self.relocs.drain(..) {
+            match reloc.name {
+                super::DebugRelocName::Section(_) => unreachable!(),
+                super::DebugRelocName::Symbol(sym) => {
+                    let addr = jit_product
+                        .lookup_func(cranelift_module::FuncId::from_u32(sym.try_into().unwrap()));
+                    let val = (addr as u64 as i64 + reloc.addend) as u64;
+                    self.writer
+                        .write_udata_at(reloc.offset as usize, val, reloc.size)
+                        .unwrap();
+                }
+            }
+        }
+        self.writer.into_vec()
+    }
+}
+
+impl Writer for WriterRelocate {
+    type Endian = RunTimeEndian;
+
+    fn endian(&self) -> Self::Endian {
+        self.writer.endian()
+    }
+
+    fn len(&self) -> usize {
+        self.writer.len()
+    }
+
+    fn write(&mut self, bytes: &[u8]) -> Result<()> {
+        self.writer.write(bytes)
+    }
+
+    fn write_at(&mut self, offset: usize, bytes: &[u8]) -> Result<()> {
+        self.writer.write_at(offset, bytes)
+    }
+
+    fn write_address(&mut self, address: Address, size: u8) -> Result<()> {
+        match address {
+            Address::Constant(val) => self.write_udata(val, size),
+            Address::Symbol { symbol, addend } => {
+                let offset = self.len() as u64;
+                self.relocs.push(DebugReloc {
+                    offset: offset as u32,
+                    size,
+                    name: DebugRelocName::Symbol(symbol),
+                    addend: addend as i64,
+                    kind: object::RelocationKind::Absolute,
+                });
+                self.write_udata(0, size)
+            }
+        }
+    }
+
+    fn write_offset(&mut self, val: usize, section: SectionId, size: u8) -> Result<()> {
+        let offset = self.len() as u32;
+        self.relocs.push(DebugReloc {
+            offset,
+            size,
+            name: DebugRelocName::Section(section),
+            addend: val as i64,
+            kind: object::RelocationKind::Absolute,
+        });
+        self.write_udata(0, size)
+    }
+
+    fn write_offset_at(
+        &mut self,
+        offset: usize,
+        val: usize,
+        section: SectionId,
+        size: u8,
+    ) -> Result<()> {
+        self.relocs.push(DebugReloc {
+            offset: offset as u32,
+            size,
+            name: DebugRelocName::Section(section),
+            addend: val as i64,
+            kind: object::RelocationKind::Absolute,
+        });
+        self.write_udata_at(offset, 0, size)
+    }
+
+    fn write_eh_pointer(&mut self, address: Address, eh_pe: gimli::DwEhPe, size: u8) -> Result<()> {
+        match address {
+            // Address::Constant arm copied from gimli
+            Address::Constant(val) => {
+                // Indirect doesn't matter here.
+                let val = match eh_pe.application() {
+                    gimli::DW_EH_PE_absptr => val,
+                    gimli::DW_EH_PE_pcrel => {
+                        // TODO: better handling of sign
+                        let offset = self.len() as u64;
+                        offset.wrapping_sub(val)
+                    }
+                    _ => {
+                        return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe));
+                    }
+                };
+                self.write_eh_pointer_data(val, eh_pe.format(), size)
+            }
+            Address::Symbol { symbol, addend } => match eh_pe.application() {
+                gimli::DW_EH_PE_pcrel => {
+                    let size = match eh_pe.format() {
+                        gimli::DW_EH_PE_sdata4 => 4,
+                        _ => return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
+                    };
+                    self.relocs.push(DebugReloc {
+                        offset: self.len() as u32,
+                        size,
+                        name: DebugRelocName::Symbol(symbol),
+                        addend,
+                        kind: object::RelocationKind::Relative,
+                    });
+                    self.write_udata(0, size)
+                }
+                _ => Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
+            },
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
new file mode 100644
index 0000000..d226755
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
@@ -0,0 +1,258 @@
+//! Line info generation (`.debug_line`)
+
+use std::ffi::OsStr;
+use std::path::{Component, Path};
+
+use crate::prelude::*;
+
+use rustc_span::{
+    FileName, Pos, SourceFile, SourceFileAndLine, SourceFileHash, SourceFileHashAlgorithm,
+};
+
+use cranelift_codegen::binemit::CodeOffset;
+use cranelift_codegen::machinst::MachSrcLoc;
+
+use gimli::write::{
+    Address, AttributeValue, FileId, FileInfo, LineProgram, LineString, LineStringTable,
+    UnitEntryId,
+};
+
+// OPTIMIZATION: It is cheaper to do this in one pass than using `.parent()` and `.file_name()`.
+fn split_path_dir_and_file(path: &Path) -> (&Path, &OsStr) {
+    let mut iter = path.components();
+    let file_name = match iter.next_back() {
+        Some(Component::Normal(p)) => p,
+        component => {
+            panic!(
+                "Path component {:?} of path {} is an invalid filename",
+                component,
+                path.display()
+            );
+        }
+    };
+    let parent = iter.as_path();
+    (parent, file_name)
+}
+
+// OPTIMIZATION: Avoid UTF-8 validation on UNIX.
+fn osstr_as_utf8_bytes(path: &OsStr) -> &[u8] {
+    #[cfg(unix)]
+    {
+        use std::os::unix::ffi::OsStrExt;
+        return path.as_bytes();
+    }
+    #[cfg(not(unix))]
+    {
+        return path.to_str().unwrap().as_bytes();
+    }
+}
+
+pub(crate) const MD5_LEN: usize = 16;
+
+pub(crate) fn make_file_info(hash: SourceFileHash) -> Option<FileInfo> {
+    if hash.kind == SourceFileHashAlgorithm::Md5 {
+        let mut buf = [0u8; MD5_LEN];
+        buf.copy_from_slice(hash.hash_bytes());
+        Some(FileInfo {
+            timestamp: 0,
+            size: 0,
+            md5: buf,
+        })
+    } else {
+        None
+    }
+}
+
+fn line_program_add_file(
+    line_program: &mut LineProgram,
+    line_strings: &mut LineStringTable,
+    file: &SourceFile,
+) -> FileId {
+    match &file.name {
+        FileName::Real(path) => {
+            let (dir_path, file_name) = split_path_dir_and_file(path.stable_name());
+            let dir_name = osstr_as_utf8_bytes(dir_path.as_os_str());
+            let file_name = osstr_as_utf8_bytes(file_name);
+
+            let dir_id = if !dir_name.is_empty() {
+                let dir_name = LineString::new(dir_name, line_program.encoding(), line_strings);
+                line_program.add_directory(dir_name)
+            } else {
+                line_program.default_directory()
+            };
+            let file_name = LineString::new(file_name, line_program.encoding(), line_strings);
+
+            let info = make_file_info(file.src_hash);
+
+            line_program.file_has_md5 &= info.is_some();
+            line_program.add_file(file_name, dir_id, info)
+        }
+        // FIXME give more appropriate file names
+        filename => {
+            let dir_id = line_program.default_directory();
+            let dummy_file_name = LineString::new(
+                filename.to_string().into_bytes(),
+                line_program.encoding(),
+                line_strings,
+            );
+            line_program.add_file(dummy_file_name, dir_id, None)
+        }
+    }
+}
+
+impl<'tcx> DebugContext<'tcx> {
+    pub(super) fn emit_location(&mut self, entry_id: UnitEntryId, span: Span) {
+        let loc = self.tcx.sess.source_map().lookup_char_pos(span.lo());
+
+        let file_id = line_program_add_file(
+            &mut self.dwarf.unit.line_program,
+            &mut self.dwarf.line_strings,
+            &loc.file,
+        );
+
+        let entry = self.dwarf.unit.get_mut(entry_id);
+
+        entry.set(
+            gimli::DW_AT_decl_file,
+            AttributeValue::FileIndex(Some(file_id)),
+        );
+        entry.set(
+            gimli::DW_AT_decl_line,
+            AttributeValue::Udata(loc.line as u64),
+        );
+        // FIXME: probably omit this
+        entry.set(
+            gimli::DW_AT_decl_column,
+            AttributeValue::Udata(loc.col.to_usize() as u64),
+        );
+    }
+
+    pub(super) fn create_debug_lines(
+        &mut self,
+        isa: &dyn cranelift_codegen::isa::TargetIsa,
+        symbol: usize,
+        entry_id: UnitEntryId,
+        context: &Context,
+        function_span: Span,
+        source_info_set: &indexmap::IndexSet<SourceInfo>,
+    ) -> CodeOffset {
+        let tcx = self.tcx;
+        let line_program = &mut self.dwarf.unit.line_program;
+        let func = &context.func;
+
+        let line_strings = &mut self.dwarf.line_strings;
+        let mut last_span = None;
+        let mut last_file = None;
+        let mut create_row_for_span = |line_program: &mut LineProgram, span: Span| {
+            if let Some(last_span) = last_span {
+                if span == last_span {
+                    line_program.generate_row();
+                    return;
+                }
+            }
+            last_span = Some(span);
+
+            // Based on https://github.com/rust-lang/rust/blob/e369d87b015a84653343032833d65d0545fd3f26/src/librustc_codegen_ssa/mir/mod.rs#L116-L131
+            // In order to have a good line stepping behavior in debugger, we overwrite debug
+            // locations of macro expansions with that of the outermost expansion site
+            // (unless the crate is being compiled with `-Z debug-macros`).
+            let span = if !span.from_expansion() || tcx.sess.opts.debugging_opts.debug_macros {
+                span
+            } else {
+                // Walk up the macro expansion chain until we reach a non-expanded span.
+                // We also stop at the function body level because no line stepping can occur
+                // at the level above that.
+                rustc_span::hygiene::walk_chain(span, function_span.ctxt())
+            };
+
+            let (file, line, col) = match tcx.sess.source_map().lookup_line(span.lo()) {
+                Ok(SourceFileAndLine { sf: file, line }) => {
+                    let line_pos = file.line_begin_pos(span.lo());
+
+                    (
+                        file,
+                        u64::try_from(line).unwrap() + 1,
+                        u64::from((span.lo() - line_pos).to_u32()) + 1,
+                    )
+                }
+                Err(file) => (file, 0, 0),
+            };
+
+            // line_program_add_file is very slow.
+            // Optimize for the common case of the current file not being changed.
+            let current_file_changed = if let Some(last_file) = &last_file {
+                // If the allocations are not equal, then the files may still be equal, but that
+                // is not a problem, as this is just an optimization.
+                !rustc_data_structures::sync::Lrc::ptr_eq(last_file, &file)
+            } else {
+                true
+            };
+            if current_file_changed {
+                let file_id = line_program_add_file(line_program, line_strings, &file);
+                line_program.row().file = file_id;
+                last_file = Some(file);
+            }
+
+            line_program.row().line = line;
+            line_program.row().column = col;
+            line_program.generate_row();
+        };
+
+        line_program.begin_sequence(Some(Address::Symbol { symbol, addend: 0 }));
+
+        let mut func_end = 0;
+
+        if let Some(ref mcr) = &context.mach_compile_result {
+            for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
+                line_program.row().address_offset = u64::from(start);
+                if !loc.is_default() {
+                    let source_info = *source_info_set.get_index(loc.bits() as usize).unwrap();
+                    create_row_for_span(line_program, source_info.span);
+                } else {
+                    create_row_for_span(line_program, function_span);
+                }
+                func_end = end;
+            }
+
+            line_program.end_sequence(u64::from(func_end));
+
+            func_end = mcr.buffer.total_size();
+        } else {
+            let encinfo = isa.encoding_info();
+            let mut blocks = func.layout.blocks().collect::<Vec<_>>();
+            blocks.sort_by_key(|block| func.offsets[*block]); // Ensure inst offsets always increase
+
+            for block in blocks {
+                for (offset, inst, size) in func.inst_offsets(block, &encinfo) {
+                    let srcloc = func.srclocs[inst];
+                    line_program.row().address_offset = u64::from(offset);
+                    if !srcloc.is_default() {
+                        let source_info =
+                            *source_info_set.get_index(srcloc.bits() as usize).unwrap();
+                        create_row_for_span(line_program, source_info.span);
+                    } else {
+                        create_row_for_span(line_program, function_span);
+                    }
+                    func_end = offset + size;
+                }
+            }
+            line_program.end_sequence(u64::from(func_end));
+        }
+
+        assert_ne!(func_end, 0);
+
+        let entry = self.dwarf.unit.get_mut(entry_id);
+        entry.set(
+            gimli::DW_AT_low_pc,
+            AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
+        );
+        entry.set(
+            gimli::DW_AT_high_pc,
+            AttributeValue::Udata(u64::from(func_end)),
+        );
+
+        self.emit_location(entry_id, function_span);
+
+        func_end
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
new file mode 100644
index 0000000..85e8158
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
@@ -0,0 +1,487 @@
+//! Handling of everything related to debuginfo.
+
+mod emit;
+mod line_info;
+mod unwind;
+
+use crate::prelude::*;
+
+use rustc_index::vec::IndexVec;
+
+use cranelift_codegen::entity::EntityRef;
+use cranelift_codegen::ir::{StackSlots, ValueLabel, ValueLoc};
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_codegen::ValueLocRange;
+
+use gimli::write::{
+    Address, AttributeValue, DwarfUnit, Expression, LineProgram, LineString, Location,
+    LocationList, Range, RangeList, UnitEntryId,
+};
+use gimli::{Encoding, Format, LineEncoding, RunTimeEndian, X86_64};
+
+pub(crate) use emit::{DebugReloc, DebugRelocName};
+pub(crate) use unwind::UnwindContext;
+
+fn target_endian(tcx: TyCtxt<'_>) -> RunTimeEndian {
+    use rustc_target::abi::Endian;
+
+    match tcx.data_layout.endian {
+        Endian::Big => RunTimeEndian::Big,
+        Endian::Little => RunTimeEndian::Little,
+    }
+}
+
+pub(crate) struct DebugContext<'tcx> {
+    tcx: TyCtxt<'tcx>,
+
+    endian: RunTimeEndian,
+
+    dwarf: DwarfUnit,
+    unit_range_list: RangeList,
+
+    clif_types: FxHashMap<Type, UnitEntryId>,
+    types: FxHashMap<Ty<'tcx>, UnitEntryId>,
+}
+
+impl<'tcx> DebugContext<'tcx> {
+    pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
+        let encoding = Encoding {
+            format: Format::Dwarf32,
+            // TODO: this should be configurable
+            // macOS doesn't seem to support DWARF > 3
+            // 5 version is required for md5 file hash
+            version: if tcx.sess.target.is_like_osx {
+                3
+            } else {
+                // FIXME change to version 5 once the gdb and lldb shipping with the latest debian
+                // support it.
+                4
+            },
+            address_size: isa.frontend_config().pointer_bytes(),
+        };
+
+        let mut dwarf = DwarfUnit::new(encoding);
+
+        // FIXME: how to get version when building out of tree?
+        // Normally this would use option_env!("CFG_VERSION").
+        let producer = format!("cg_clif (rustc {})", "unknown version");
+        let comp_dir = tcx.sess.working_dir.0.to_string_lossy().into_owned();
+        let (name, file_info) = match tcx.sess.local_crate_source_file.clone() {
+            Some(path) => {
+                let name = path.to_string_lossy().into_owned();
+                (name, None)
+            }
+            None => (tcx.crate_name(LOCAL_CRATE).to_string(), None),
+        };
+
+        let mut line_program = LineProgram::new(
+            encoding,
+            LineEncoding::default(),
+            LineString::new(comp_dir.as_bytes(), encoding, &mut dwarf.line_strings),
+            LineString::new(name.as_bytes(), encoding, &mut dwarf.line_strings),
+            file_info,
+        );
+        line_program.file_has_md5 = file_info.is_some();
+
+        dwarf.unit.line_program = line_program;
+
+        {
+            let name = dwarf.strings.add(name);
+            let comp_dir = dwarf.strings.add(comp_dir);
+
+            let root = dwarf.unit.root();
+            let root = dwarf.unit.get_mut(root);
+            root.set(
+                gimli::DW_AT_producer,
+                AttributeValue::StringRef(dwarf.strings.add(producer)),
+            );
+            root.set(
+                gimli::DW_AT_language,
+                AttributeValue::Language(gimli::DW_LANG_Rust),
+            );
+            root.set(gimli::DW_AT_name, AttributeValue::StringRef(name));
+            root.set(gimli::DW_AT_comp_dir, AttributeValue::StringRef(comp_dir));
+            root.set(
+                gimli::DW_AT_low_pc,
+                AttributeValue::Address(Address::Constant(0)),
+            );
+        }
+
+        DebugContext {
+            tcx,
+
+            endian: target_endian(tcx),
+
+            dwarf,
+            unit_range_list: RangeList(Vec::new()),
+
+            clif_types: FxHashMap::default(),
+            types: FxHashMap::default(),
+        }
+    }
+
+    fn dwarf_ty_for_clif_ty(&mut self, ty: Type) -> UnitEntryId {
+        if let Some(type_id) = self.clif_types.get(&ty) {
+            return *type_id;
+        }
+
+        let new_entry = |dwarf: &mut DwarfUnit, tag| dwarf.unit.add(dwarf.unit.root(), tag);
+
+        let primitive = |dwarf: &mut DwarfUnit, ate| {
+            let type_id = new_entry(dwarf, gimli::DW_TAG_base_type);
+            let type_entry = dwarf.unit.get_mut(type_id);
+            type_entry.set(gimli::DW_AT_encoding, AttributeValue::Encoding(ate));
+            type_id
+        };
+
+        let type_id = if ty.is_bool() {
+            primitive(&mut self.dwarf, gimli::DW_ATE_boolean)
+        } else if ty.is_int() {
+            primitive(&mut self.dwarf, gimli::DW_ATE_address)
+        } else if ty.is_float() {
+            primitive(&mut self.dwarf, gimli::DW_ATE_float)
+        } else {
+            new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type)
+        };
+
+        let type_entry = self.dwarf.unit.get_mut(type_id);
+        type_entry.set(
+            gimli::DW_AT_name,
+            AttributeValue::String(format!("{}", ty).replace('i', "u").into_bytes()),
+        );
+        type_entry.set(
+            gimli::DW_AT_byte_size,
+            AttributeValue::Udata(u64::from(ty.bytes())),
+        );
+
+        type_id
+    }
+
+    fn dwarf_ty(&mut self, ty: Ty<'tcx>) -> UnitEntryId {
+        if let Some(type_id) = self.types.get(ty) {
+            return *type_id;
+        }
+
+        let new_entry = |dwarf: &mut DwarfUnit, tag| dwarf.unit.add(dwarf.unit.root(), tag);
+
+        let primitive = |dwarf: &mut DwarfUnit, ate| {
+            let type_id = new_entry(dwarf, gimli::DW_TAG_base_type);
+            let type_entry = dwarf.unit.get_mut(type_id);
+            type_entry.set(gimli::DW_AT_encoding, AttributeValue::Encoding(ate));
+            type_id
+        };
+
+        let name = format!("{}", ty);
+        let layout = self.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap();
+
+        let type_id = match ty.kind() {
+            ty::Bool => primitive(&mut self.dwarf, gimli::DW_ATE_boolean),
+            ty::Char => primitive(&mut self.dwarf, gimli::DW_ATE_UTF),
+            ty::Uint(_) => primitive(&mut self.dwarf, gimli::DW_ATE_unsigned),
+            ty::Int(_) => primitive(&mut self.dwarf, gimli::DW_ATE_signed),
+            ty::Float(_) => primitive(&mut self.dwarf, gimli::DW_ATE_float),
+            ty::Ref(_, pointee_ty, _mutbl)
+            | ty::RawPtr(ty::TypeAndMut {
+                ty: pointee_ty,
+                mutbl: _mutbl,
+            }) => {
+                let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_pointer_type);
+
+                // Ensure that type is inserted before recursing to avoid duplicates
+                self.types.insert(ty, type_id);
+
+                let pointee = self.dwarf_ty(pointee_ty);
+
+                let type_entry = self.dwarf.unit.get_mut(type_id);
+
+                //type_entry.set(gimli::DW_AT_mutable, AttributeValue::Flag(mutbl == rustc_hir::Mutability::Mut));
+                type_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(pointee));
+
+                type_id
+            }
+            ty::Adt(adt_def, _substs) if adt_def.is_struct() && !layout.is_unsized() => {
+                let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type);
+
+                // Ensure that type is inserted before recursing to avoid duplicates
+                self.types.insert(ty, type_id);
+
+                let variant = adt_def.non_enum_variant();
+
+                for (field_idx, field_def) in variant.fields.iter().enumerate() {
+                    let field_offset = layout.fields.offset(field_idx);
+                    let field_layout = layout
+                        .field(
+                            &layout::LayoutCx {
+                                tcx: self.tcx,
+                                param_env: ParamEnv::reveal_all(),
+                            },
+                            field_idx,
+                        )
+                        .unwrap();
+
+                    let field_type = self.dwarf_ty(field_layout.ty);
+
+                    let field_id = self.dwarf.unit.add(type_id, gimli::DW_TAG_member);
+                    let field_entry = self.dwarf.unit.get_mut(field_id);
+
+                    field_entry.set(
+                        gimli::DW_AT_name,
+                        AttributeValue::String(field_def.ident.as_str().to_string().into_bytes()),
+                    );
+                    field_entry.set(
+                        gimli::DW_AT_data_member_location,
+                        AttributeValue::Udata(field_offset.bytes()),
+                    );
+                    field_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(field_type));
+                }
+
+                type_id
+            }
+            _ => new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type),
+        };
+
+        let type_entry = self.dwarf.unit.get_mut(type_id);
+
+        type_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
+        type_entry.set(
+            gimli::DW_AT_byte_size,
+            AttributeValue::Udata(layout.size.bytes()),
+        );
+
+        self.types.insert(ty, type_id);
+
+        type_id
+    }
+
+    fn define_local(&mut self, scope: UnitEntryId, name: String, ty: Ty<'tcx>) -> UnitEntryId {
+        let dw_ty = self.dwarf_ty(ty);
+
+        let var_id = self.dwarf.unit.add(scope, gimli::DW_TAG_variable);
+        let var_entry = self.dwarf.unit.get_mut(var_id);
+
+        var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
+        var_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(dw_ty));
+
+        var_id
+    }
+
+    pub(crate) fn define_function(
+        &mut self,
+        instance: Instance<'tcx>,
+        func_id: FuncId,
+        name: &str,
+        isa: &dyn TargetIsa,
+        context: &Context,
+        source_info_set: &indexmap::IndexSet<SourceInfo>,
+        local_map: IndexVec<mir::Local, CPlace<'tcx>>,
+    ) {
+        let symbol = func_id.as_u32() as usize;
+        let mir = self.tcx.instance_mir(instance.def);
+
+        // FIXME: add to appropriate scope instead of root
+        let scope = self.dwarf.unit.root();
+
+        let entry_id = self.dwarf.unit.add(scope, gimli::DW_TAG_subprogram);
+        let entry = self.dwarf.unit.get_mut(entry_id);
+        let name_id = self.dwarf.strings.add(name);
+        // Gdb requires DW_AT_name. Otherwise the DW_TAG_subprogram is skipped.
+        entry.set(gimli::DW_AT_name, AttributeValue::StringRef(name_id));
+        entry.set(
+            gimli::DW_AT_linkage_name,
+            AttributeValue::StringRef(name_id),
+        );
+
+        let end =
+            self.create_debug_lines(isa, symbol, entry_id, context, mir.span, source_info_set);
+
+        self.unit_range_list.0.push(Range::StartLength {
+            begin: Address::Symbol { symbol, addend: 0 },
+            length: u64::from(end),
+        });
+
+        if isa.get_mach_backend().is_some() {
+            return; // Not yet implemented for the AArch64 backend.
+        }
+
+        let func_entry = self.dwarf.unit.get_mut(entry_id);
+        // Gdb requires both DW_AT_low_pc and DW_AT_high_pc. Otherwise the DW_TAG_subprogram is skipped.
+        func_entry.set(
+            gimli::DW_AT_low_pc,
+            AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
+        );
+        // Using Udata for DW_AT_high_pc requires at least DWARF4
+        func_entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(end)));
+
+        // FIXME Remove once actual debuginfo for locals works.
+        for (i, (param, &val)) in context
+            .func
+            .signature
+            .params
+            .iter()
+            .zip(
+                context
+                    .func
+                    .dfg
+                    .block_params(context.func.layout.entry_block().unwrap()),
+            )
+            .enumerate()
+        {
+            use cranelift_codegen::ir::ArgumentPurpose;
+            let base_name = match param.purpose {
+                ArgumentPurpose::Normal => "arg",
+                ArgumentPurpose::StructArgument(_) => "struct_arg",
+                ArgumentPurpose::StructReturn => "sret",
+                ArgumentPurpose::Link
+                | ArgumentPurpose::FramePointer
+                | ArgumentPurpose::CalleeSaved => continue,
+                ArgumentPurpose::VMContext
+                | ArgumentPurpose::SignatureId
+                | ArgumentPurpose::CallerTLS
+                | ArgumentPurpose::CalleeTLS
+                | ArgumentPurpose::StackLimit => unreachable!(),
+            };
+            let name = format!("{}{}", base_name, i);
+
+            let dw_ty = self.dwarf_ty_for_clif_ty(param.value_type);
+            let loc =
+                translate_loc(isa, context.func.locations[val], &context.func.stack_slots).unwrap();
+
+            let arg_id = self
+                .dwarf
+                .unit
+                .add(entry_id, gimli::DW_TAG_formal_parameter);
+            let var_entry = self.dwarf.unit.get_mut(arg_id);
+
+            var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
+            var_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(dw_ty));
+            var_entry.set(gimli::DW_AT_location, AttributeValue::Exprloc(loc));
+        }
+
+        // FIXME make it more reliable and implement scopes before re-enabling this.
+        if false {
+            let value_labels_ranges = context.build_value_labels_ranges(isa).unwrap();
+
+            for (local, _local_decl) in mir.local_decls.iter_enumerated() {
+                let ty = self.tcx.subst_and_normalize_erasing_regions(
+                    instance.substs,
+                    ty::ParamEnv::reveal_all(),
+                    &mir.local_decls[local].ty,
+                );
+                let var_id = self.define_local(entry_id, format!("{:?}", local), ty);
+
+                let location = place_location(
+                    self,
+                    isa,
+                    symbol,
+                    context,
+                    &local_map,
+                    &value_labels_ranges,
+                    Place {
+                        local,
+                        projection: ty::List::empty(),
+                    },
+                );
+
+                let var_entry = self.dwarf.unit.get_mut(var_id);
+                var_entry.set(gimli::DW_AT_location, location);
+            }
+        }
+
+        // FIXME create locals for all entries in mir.var_debug_info
+    }
+}
+
+fn place_location<'tcx>(
+    debug_context: &mut DebugContext<'tcx>,
+    isa: &dyn TargetIsa,
+    symbol: usize,
+    context: &Context,
+    local_map: &IndexVec<mir::Local, CPlace<'tcx>>,
+    #[allow(rustc::default_hash_types)] value_labels_ranges: &std::collections::HashMap<
+        ValueLabel,
+        Vec<ValueLocRange>,
+    >,
+    place: Place<'tcx>,
+) -> AttributeValue {
+    assert!(place.projection.is_empty()); // FIXME implement them
+
+    match local_map[place.local].inner() {
+        CPlaceInner::Var(_local, var) => {
+            let value_label = cranelift_codegen::ir::ValueLabel::new(var.index());
+            if let Some(value_loc_ranges) = value_labels_ranges.get(&value_label) {
+                let loc_list = LocationList(
+                    value_loc_ranges
+                        .iter()
+                        .map(|value_loc_range| Location::StartEnd {
+                            begin: Address::Symbol {
+                                symbol,
+                                addend: i64::from(value_loc_range.start),
+                            },
+                            end: Address::Symbol {
+                                symbol,
+                                addend: i64::from(value_loc_range.end),
+                            },
+                            data: translate_loc(
+                                isa,
+                                value_loc_range.loc,
+                                &context.func.stack_slots,
+                            )
+                            .unwrap(),
+                        })
+                        .collect(),
+                );
+                let loc_list_id = debug_context.dwarf.unit.locations.add(loc_list);
+
+                AttributeValue::LocationListRef(loc_list_id)
+            } else {
+                // FIXME set value labels for unused locals
+
+                AttributeValue::Exprloc(Expression::new())
+            }
+        }
+        CPlaceInner::VarPair(_, _, _) => {
+            // FIXME implement this
+
+            AttributeValue::Exprloc(Expression::new())
+        }
+        CPlaceInner::VarLane(_, _, _) => {
+            // FIXME implement this
+
+            AttributeValue::Exprloc(Expression::new())
+        }
+        CPlaceInner::Addr(_, _) => {
+            // FIXME implement this (used by arguments and returns)
+
+            AttributeValue::Exprloc(Expression::new())
+
+            // For PointerBase::Stack:
+            //AttributeValue::Exprloc(translate_loc(ValueLoc::Stack(*stack_slot), &context.func.stack_slots).unwrap())
+        }
+    }
+}
+
+// Adapted from https://github.com/CraneStation/wasmtime/blob/5a1845b4caf7a5dba8eda1fef05213a532ed4259/crates/debug/src/transform/expression.rs#L59-L137
+fn translate_loc(
+    isa: &dyn TargetIsa,
+    loc: ValueLoc,
+    stack_slots: &StackSlots,
+) -> Option<Expression> {
+    match loc {
+        ValueLoc::Reg(reg) => {
+            let machine_reg = isa.map_dwarf_register(reg).unwrap();
+            let mut expr = Expression::new();
+            expr.op_reg(gimli::Register(machine_reg));
+            Some(expr)
+        }
+        ValueLoc::Stack(ss) => {
+            if let Some(ss_offset) = stack_slots[ss].offset {
+                let mut expr = Expression::new();
+                expr.op_breg(X86_64::RBP, i64::from(ss_offset) + 16);
+                Some(expr)
+            } else {
+                None
+            }
+        }
+        _ => None,
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
new file mode 100644
index 0000000..6813840
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
@@ -0,0 +1,168 @@
+//! Unwind info generation (`.eh_frame`)
+
+use crate::prelude::*;
+
+use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
+
+use gimli::write::{Address, CieId, EhFrame, FrameTable, Section};
+
+use crate::backend::WriteDebugInfo;
+
+pub(crate) struct UnwindContext<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    frame_table: FrameTable,
+    cie_id: Option<CieId>,
+}
+
+impl<'tcx> UnwindContext<'tcx> {
+    pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
+        let mut frame_table = FrameTable::default();
+
+        let cie_id = if let Some(mut cie) = isa.create_systemv_cie() {
+            if isa.flags().is_pic() {
+                cie.fde_address_encoding =
+                    gimli::DwEhPe(gimli::DW_EH_PE_pcrel.0 | gimli::DW_EH_PE_sdata4.0);
+            }
+            Some(frame_table.add_cie(cie))
+        } else {
+            None
+        };
+
+        UnwindContext {
+            tcx,
+            frame_table,
+            cie_id,
+        }
+    }
+
+    pub(crate) fn add_function(&mut self, func_id: FuncId, context: &Context, isa: &dyn TargetIsa) {
+        let unwind_info = if let Some(unwind_info) = context.create_unwind_info(isa).unwrap() {
+            unwind_info
+        } else {
+            return;
+        };
+
+        match unwind_info {
+            UnwindInfo::SystemV(unwind_info) => {
+                self.frame_table.add_fde(
+                    self.cie_id.unwrap(),
+                    unwind_info.to_fde(Address::Symbol {
+                        symbol: func_id.as_u32() as usize,
+                        addend: 0,
+                    }),
+                );
+            }
+            UnwindInfo::WindowsX64(_) => {
+                // FIXME implement this
+            }
+            unwind_info => unimplemented!("{:?}", unwind_info),
+        }
+    }
+
+    pub(crate) fn emit<P: WriteDebugInfo>(self, product: &mut P) {
+        let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
+            self.tcx,
+        )));
+        self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
+
+        if !eh_frame.0.writer.slice().is_empty() {
+            let id = eh_frame.id();
+            let section_id = product.add_debug_section(id, eh_frame.0.writer.into_vec());
+            let mut section_map = FxHashMap::default();
+            section_map.insert(id, section_id);
+
+            for reloc in &eh_frame.0.relocs {
+                product.add_debug_reloc(&section_map, &section_id, reloc);
+            }
+        }
+    }
+
+    #[cfg(feature = "jit")]
+    pub(crate) unsafe fn register_jit(
+        self,
+        jit_product: &cranelift_simplejit::SimpleJITProduct,
+    ) -> Option<UnwindRegistry> {
+        let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
+            self.tcx,
+        )));
+        self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
+
+        if eh_frame.0.writer.slice().is_empty() {
+            return None;
+        }
+
+        let mut eh_frame = eh_frame.0.relocate_for_jit(jit_product);
+
+        // GCC expects a terminating "empty" length, so write a 0 length at the end of the table.
+        eh_frame.extend(&[0, 0, 0, 0]);
+
+        let mut registrations = Vec::new();
+
+        // =======================================================================
+        // Everything after this line up to the end of the file is loosly based on
+        // https://github.com/bytecodealliance/wasmtime/blob/4471a82b0c540ff48960eca6757ccce5b1b5c3e4/crates/jit/src/unwind/systemv.rs
+        #[cfg(target_os = "macos")]
+        {
+            // On macOS, `__register_frame` takes a pointer to a single FDE
+            let start = eh_frame.as_ptr();
+            let end = start.add(eh_frame.len());
+            let mut current = start;
+
+            // Walk all of the entries in the frame table and register them
+            while current < end {
+                let len = std::ptr::read::<u32>(current as *const u32) as usize;
+
+                // Skip over the CIE
+                if current != start {
+                    __register_frame(current);
+                    registrations.push(current as usize);
+                }
+
+                // Move to the next table entry (+4 because the length itself is not inclusive)
+                current = current.add(len + 4);
+            }
+        }
+        #[cfg(not(target_os = "macos"))]
+        {
+            // On other platforms, `__register_frame` will walk the FDEs until an entry of length 0
+            let ptr = eh_frame.as_ptr();
+            __register_frame(ptr);
+            registrations.push(ptr as usize);
+        }
+
+        Some(UnwindRegistry {
+            _frame_table: eh_frame,
+            registrations,
+        })
+    }
+}
+
+/// Represents a registry of function unwind information for System V ABI.
+pub(crate) struct UnwindRegistry {
+    _frame_table: Vec<u8>,
+    registrations: Vec<usize>,
+}
+
+extern "C" {
+    // libunwind import
+    fn __register_frame(fde: *const u8);
+    fn __deregister_frame(fde: *const u8);
+}
+
+impl Drop for UnwindRegistry {
+    fn drop(&mut self) {
+        unsafe {
+            // libgcc stores the frame entries as a linked list in decreasing sort order
+            // based on the PC value of the registered entry.
+            //
+            // As we store the registrations in increasing order, it would be O(N^2) to
+            // deregister in that order.
+            //
+            // To ensure that we just pop off the first element in the list upon every
+            // deregistration, walk our list of registrations backwards.
+            for fde in self.registrations.iter().rev() {
+                __deregister_frame(*fde as *const _);
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/discriminant.rs b/compiler/rustc_codegen_cranelift/src/discriminant.rs
new file mode 100644
index 0000000..1e8e86a
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/discriminant.rs
@@ -0,0 +1,171 @@
+//! Handling of enum discriminants
+//!
+//! Adapted from <https://github.com/rust-lang/rust/blob/d760df5aea483aae041c9a241e7acacf48f75035/src/librustc_codegen_ssa/mir/place.rs>
+
+use rustc_target::abi::{Int, TagEncoding, Variants};
+
+use crate::prelude::*;
+
+pub(crate) fn codegen_set_discriminant<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    place: CPlace<'tcx>,
+    variant_index: VariantIdx,
+) {
+    let layout = place.layout();
+    if layout.for_variant(fx, variant_index).abi.is_uninhabited() {
+        return;
+    }
+    match layout.variants {
+        Variants::Single { index } => {
+            assert_eq!(index, variant_index);
+        }
+        Variants::Multiple {
+            tag: _,
+            tag_field,
+            tag_encoding: TagEncoding::Direct,
+            variants: _,
+        } => {
+            let ptr = place.place_field(fx, mir::Field::new(tag_field));
+            let to = layout
+                .ty
+                .discriminant_for_variant(fx.tcx, variant_index)
+                .unwrap()
+                .val
+                .into();
+            let discr = CValue::const_val(fx, ptr.layout(), to);
+            ptr.write_cvalue(fx, discr);
+        }
+        Variants::Multiple {
+            tag: _,
+            tag_field,
+            tag_encoding:
+                TagEncoding::Niche {
+                    dataful_variant,
+                    ref niche_variants,
+                    niche_start,
+                },
+            variants: _,
+        } => {
+            if variant_index != dataful_variant {
+                let niche = place.place_field(fx, mir::Field::new(tag_field));
+                let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
+                let niche_value = u128::from(niche_value).wrapping_add(niche_start);
+                let niche_llval = CValue::const_val(fx, niche.layout(), niche_value.into());
+                niche.write_cvalue(fx, niche_llval);
+            }
+        }
+    }
+}
+
+pub(crate) fn codegen_get_discriminant<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    value: CValue<'tcx>,
+    dest_layout: TyAndLayout<'tcx>,
+) -> CValue<'tcx> {
+    let layout = value.layout();
+
+    if layout.abi == Abi::Uninhabited {
+        return trap_unreachable_ret_value(
+            fx,
+            dest_layout,
+            "[panic] Tried to get discriminant for uninhabited type.",
+        );
+    }
+
+    let (tag_scalar, tag_field, tag_encoding) = match &layout.variants {
+        Variants::Single { index } => {
+            let discr_val = layout
+                .ty
+                .discriminant_for_variant(fx.tcx, *index)
+                .map_or(u128::from(index.as_u32()), |discr| discr.val);
+            return CValue::const_val(fx, dest_layout, discr_val.into());
+        }
+        Variants::Multiple {
+            tag,
+            tag_field,
+            tag_encoding,
+            variants: _,
+        } => (tag, *tag_field, tag_encoding),
+    };
+
+    let cast_to = fx.clif_type(dest_layout.ty).unwrap();
+
+    // Read the tag/niche-encoded discriminant from memory.
+    let tag = value.value_field(fx, mir::Field::new(tag_field));
+    let tag = tag.load_scalar(fx);
+
+    // Decode the discriminant (specifically if it's niche-encoded).
+    match *tag_encoding {
+        TagEncoding::Direct => {
+            let signed = match tag_scalar.value {
+                Int(_, signed) => signed,
+                _ => false,
+            };
+            let val = clif_intcast(fx, tag, cast_to, signed);
+            CValue::by_val(val, dest_layout)
+        }
+        TagEncoding::Niche {
+            dataful_variant,
+            ref niche_variants,
+            niche_start,
+        } => {
+            // Rebase from niche values to discriminants, and check
+            // whether the result is in range for the niche variants.
+
+            // We first compute the "relative discriminant" (wrt `niche_variants`),
+            // that is, if `n = niche_variants.end() - niche_variants.start()`,
+            // we remap `niche_start..=niche_start + n` (which may wrap around)
+            // to (non-wrap-around) `0..=n`, to be able to check whether the
+            // discriminant corresponds to a niche variant with one comparison.
+            // We also can't go directly to the (variant index) discriminant
+            // and check that it is in the range `niche_variants`, because
+            // that might not fit in the same type, on top of needing an extra
+            // comparison (see also the comment on `let niche_discr`).
+            let relative_discr = if niche_start == 0 {
+                tag
+            } else {
+                // FIXME handle niche_start > i64::MAX
+                fx.bcx
+                    .ins()
+                    .iadd_imm(tag, -i64::try_from(niche_start).unwrap())
+            };
+            let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
+            let is_niche = {
+                codegen_icmp_imm(
+                    fx,
+                    IntCC::UnsignedLessThanOrEqual,
+                    relative_discr,
+                    i128::from(relative_max),
+                )
+            };
+
+            // NOTE(eddyb) this addition needs to be performed on the final
+            // type, in case the niche itself can't represent all variant
+            // indices (e.g. `u8` niche with more than `256` variants,
+            // but enough uninhabited variants so that the remaining variants
+            // fit in the niche).
+            // In other words, `niche_variants.end - niche_variants.start`
+            // is representable in the niche, but `niche_variants.end`
+            // might not be, in extreme cases.
+            let niche_discr = {
+                let relative_discr = if relative_max == 0 {
+                    // HACK(eddyb) since we have only one niche, we know which
+                    // one it is, and we can avoid having a dynamic value here.
+                    fx.bcx.ins().iconst(cast_to, 0)
+                } else {
+                    clif_intcast(fx, relative_discr, cast_to, false)
+                };
+                fx.bcx
+                    .ins()
+                    .iadd_imm(relative_discr, i64::from(niche_variants.start().as_u32()))
+            };
+
+            let dataful_variant = fx
+                .bcx
+                .ins()
+                .iconst(cast_to, i64::from(dataful_variant.as_u32()));
+            let discr = fx.bcx.ins().select(is_niche, niche_discr, dataful_variant);
+            CValue::by_val(discr, dest_layout)
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
new file mode 100644
index 0000000..c0245aa
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
@@ -0,0 +1,450 @@
+//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
+//! standalone executable.
+
+use std::path::PathBuf;
+
+use rustc_codegen_ssa::back::linker::LinkerInfo;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{DebugInfo, OutputType};
+
+use cranelift_object::{ObjectModule, ObjectProduct};
+
+use crate::prelude::*;
+
+use crate::backend::AddConstructor;
+
+fn new_module(tcx: TyCtxt<'_>, name: String) -> ObjectModule {
+    let module = crate::backend::make_module(tcx.sess, name);
+    assert_eq!(pointer_ty(tcx), module.target_config().pointer_type());
+    module
+}
+
+struct ModuleCodegenResult(CompiledModule, Option<(WorkProductId, WorkProduct)>);
+
+impl<HCX> HashStable<HCX> for ModuleCodegenResult {
+    fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
+        // do nothing
+    }
+}
+
+fn emit_module(
+    tcx: TyCtxt<'_>,
+    name: String,
+    kind: ModuleKind,
+    module: ObjectModule,
+    debug: Option<DebugContext<'_>>,
+    unwind_context: UnwindContext<'_>,
+    map_product: impl FnOnce(ObjectProduct) -> ObjectProduct,
+) -> ModuleCodegenResult {
+    let mut product = module.finish();
+
+    if let Some(mut debug) = debug {
+        debug.emit(&mut product);
+    }
+
+    unwind_context.emit(&mut product);
+
+    let product = map_product(product);
+
+    let tmp_file = tcx
+        .output_filenames(LOCAL_CRATE)
+        .temp_path(OutputType::Object, Some(&name));
+    let obj = product.object.write().unwrap();
+    if let Err(err) = std::fs::write(&tmp_file, obj) {
+        tcx.sess
+            .fatal(&format!("error writing object file: {}", err));
+    }
+
+    let work_product = if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() {
+        None
+    } else {
+        rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+            tcx.sess,
+            &name,
+            &Some(tmp_file.clone()),
+        )
+    };
+
+    ModuleCodegenResult(
+        CompiledModule {
+            name,
+            kind,
+            object: Some(tmp_file),
+            bytecode: None,
+        },
+        work_product,
+    )
+}
+
+fn reuse_workproduct_for_cgu(
+    tcx: TyCtxt<'_>,
+    cgu: &CodegenUnit<'_>,
+    work_products: &mut FxHashMap<WorkProductId, WorkProduct>,
+) -> CompiledModule {
+    let incr_comp_session_dir = tcx.sess.incr_comp_session_dir();
+    let mut object = None;
+    let work_product = cgu.work_product(tcx);
+    if let Some(saved_file) = &work_product.saved_file {
+        let obj_out = tcx
+            .output_filenames(LOCAL_CRATE)
+            .temp_path(OutputType::Object, Some(&cgu.name().as_str()));
+        object = Some(obj_out.clone());
+        let source_file = rustc_incremental::in_incr_comp_dir(&incr_comp_session_dir, &saved_file);
+        if let Err(err) = rustc_fs_util::link_or_copy(&source_file, &obj_out) {
+            tcx.sess.err(&format!(
+                "unable to copy {} to {}: {}",
+                source_file.display(),
+                obj_out.display(),
+                err
+            ));
+        }
+    }
+
+    work_products.insert(cgu.work_product_id(), work_product);
+
+    CompiledModule {
+        name: cgu.name().to_string(),
+        kind: ModuleKind::Regular,
+        object,
+        bytecode: None,
+    }
+}
+
+fn module_codegen(tcx: TyCtxt<'_>, cgu_name: rustc_span::Symbol) -> ModuleCodegenResult {
+    let cgu = tcx.codegen_unit(cgu_name);
+    let mono_items = cgu.items_in_deterministic_order(tcx);
+
+    let mut module = new_module(tcx, cgu_name.as_str().to_string());
+
+    // Initialize the global atomic mutex using a constructor for proc-macros.
+    // FIXME implement atomic instructions in Cranelift.
+    let mut init_atomics_mutex_from_constructor = None;
+    if tcx
+        .sess
+        .crate_types()
+        .contains(&rustc_session::config::CrateType::ProcMacro)
+    {
+        if mono_items.iter().any(|(mono_item, _)| match mono_item {
+            rustc_middle::mir::mono::MonoItem::Static(def_id) => tcx
+                .symbol_name(Instance::mono(tcx, *def_id))
+                .name
+                .contains("__rustc_proc_macro_decls_"),
+            _ => false,
+        }) {
+            init_atomics_mutex_from_constructor =
+                Some(crate::atomic_shim::init_global_lock_constructor(
+                    &mut module,
+                    &format!("{}_init_atomics_mutex", cgu_name.as_str()),
+                ));
+        }
+    }
+
+    let mut cx = crate::CodegenCx::new(tcx, module, tcx.sess.opts.debuginfo != DebugInfo::None);
+    super::codegen_mono_items(&mut cx, mono_items);
+    let (mut module, global_asm, debug, mut unwind_context) =
+        tcx.sess.time("finalize CodegenCx", || cx.finalize());
+    crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context, false);
+
+    let codegen_result = emit_module(
+        tcx,
+        cgu.name().as_str().to_string(),
+        ModuleKind::Regular,
+        module,
+        debug,
+        unwind_context,
+        |mut product| {
+            if let Some(func_id) = init_atomics_mutex_from_constructor {
+                product.add_constructor(func_id);
+            }
+
+            product
+        },
+    );
+
+    codegen_global_asm(tcx, &cgu.name().as_str(), &global_asm);
+
+    codegen_result
+}
+
+pub(super) fn run_aot(
+    tcx: TyCtxt<'_>,
+    metadata: EncodedMetadata,
+    need_metadata_module: bool,
+) -> Box<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)> {
+    let mut work_products = FxHashMap::default();
+
+    let cgus = if tcx.sess.opts.output_types.should_codegen() {
+        tcx.collect_and_partition_mono_items(LOCAL_CRATE).1
+    } else {
+        // If only `--emit metadata` is used, we shouldn't perform any codegen.
+        // Also `tcx.collect_and_partition_mono_items` may panic in that case.
+        &[]
+    };
+
+    if tcx.dep_graph.is_fully_enabled() {
+        for cgu in &*cgus {
+            tcx.ensure().codegen_unit(cgu.name());
+        }
+    }
+
+    let modules = super::time(tcx, "codegen mono items", || {
+        cgus.iter()
+            .map(|cgu| {
+                let cgu_reuse = determine_cgu_reuse(tcx, cgu);
+                tcx.sess
+                    .cgu_reuse_tracker
+                    .set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
+
+                match cgu_reuse {
+                    _ if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() => {}
+                    CguReuse::No => {}
+                    CguReuse::PreLto => {
+                        return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products);
+                    }
+                    CguReuse::PostLto => unreachable!(),
+                }
+
+                let dep_node = cgu.codegen_dep_node(tcx);
+                let (ModuleCodegenResult(module, work_product), _) = tcx.dep_graph.with_task(
+                    dep_node,
+                    tcx,
+                    cgu.name(),
+                    module_codegen,
+                    rustc_middle::dep_graph::hash_result,
+                );
+
+                if let Some((id, product)) = work_product {
+                    work_products.insert(id, product);
+                }
+
+                module
+            })
+            .collect::<Vec<_>>()
+    });
+
+    tcx.sess.abort_if_errors();
+
+    let mut allocator_module = new_module(tcx, "allocator_shim".to_string());
+    let mut allocator_unwind_context = UnwindContext::new(tcx, allocator_module.isa());
+    let created_alloc_shim =
+        crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
+
+    let allocator_module = if created_alloc_shim {
+        let ModuleCodegenResult(module, work_product) = emit_module(
+            tcx,
+            "allocator_shim".to_string(),
+            ModuleKind::Allocator,
+            allocator_module,
+            None,
+            allocator_unwind_context,
+            |product| product,
+        );
+        if let Some((id, product)) = work_product {
+            work_products.insert(id, product);
+        }
+        Some(module)
+    } else {
+        None
+    };
+
+    rustc_incremental::assert_dep_graph(tcx);
+    rustc_incremental::save_dep_graph(tcx);
+
+    let metadata_module = if need_metadata_module {
+        let _timer = tcx.prof.generic_activity("codegen crate metadata");
+        let (metadata_cgu_name, tmp_file) = tcx.sess.time("write compressed metadata", || {
+            use rustc_middle::mir::mono::CodegenUnitNameBuilder;
+
+            let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+            let metadata_cgu_name = cgu_name_builder
+                .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
+                .as_str()
+                .to_string();
+
+            let tmp_file = tcx
+                .output_filenames(LOCAL_CRATE)
+                .temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
+
+            let obj = crate::backend::with_object(tcx.sess, &metadata_cgu_name, |object| {
+                crate::metadata::write_metadata(tcx, object);
+            });
+
+            if let Err(err) = std::fs::write(&tmp_file, obj) {
+                tcx.sess
+                    .fatal(&format!("error writing metadata object file: {}", err));
+            }
+
+            (metadata_cgu_name, tmp_file)
+        });
+
+        Some(CompiledModule {
+            name: metadata_cgu_name,
+            kind: ModuleKind::Metadata,
+            object: Some(tmp_file),
+            bytecode: None,
+        })
+    } else {
+        None
+    };
+
+    if tcx.sess.opts.output_types.should_codegen() {
+        rustc_incremental::assert_module_sources::assert_module_sources(tcx);
+    }
+
+    Box::new((
+        CodegenResults {
+            crate_name: tcx.crate_name(LOCAL_CRATE),
+            modules,
+            allocator_module,
+            metadata_module,
+            metadata,
+            windows_subsystem: None, // Windows is not yet supported
+            linker_info: LinkerInfo::new(tcx),
+            crate_info: CrateInfo::new(tcx),
+        },
+        work_products,
+    ))
+}
+
+fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
+    use std::io::Write;
+    use std::process::{Command, Stdio};
+
+    if global_asm.is_empty() {
+        return;
+    }
+
+    if cfg!(not(feature = "inline_asm"))
+        || tcx.sess.target.is_like_osx
+        || tcx.sess.target.is_like_windows
+    {
+        if global_asm.contains("__rust_probestack") {
+            return;
+        }
+
+        // FIXME fix linker error on macOS
+        if cfg!(not(feature = "inline_asm")) {
+            tcx.sess.fatal(
+                "asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift",
+            );
+        } else {
+            tcx.sess
+                .fatal("asm! and global_asm! are not yet supported on macOS and Windows");
+        }
+    }
+
+    let assembler = crate::toolchain::get_toolchain_binary(tcx.sess, "as");
+    let linker = crate::toolchain::get_toolchain_binary(tcx.sess, "ld");
+
+    // Remove all LLVM style comments
+    let global_asm = global_asm
+        .lines()
+        .map(|line| {
+            if let Some(index) = line.find("//") {
+                &line[0..index]
+            } else {
+                line
+            }
+        })
+        .collect::<Vec<_>>()
+        .join("\n");
+
+    let output_object_file = tcx
+        .output_filenames(LOCAL_CRATE)
+        .temp_path(OutputType::Object, Some(cgu_name));
+
+    // Assemble `global_asm`
+    let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
+    let mut child = Command::new(assembler)
+        .arg("-o")
+        .arg(&global_asm_object_file)
+        .stdin(Stdio::piped())
+        .spawn()
+        .expect("Failed to spawn `as`.");
+    child
+        .stdin
+        .take()
+        .unwrap()
+        .write_all(global_asm.as_bytes())
+        .unwrap();
+    let status = child.wait().expect("Failed to wait for `as`.");
+    if !status.success() {
+        tcx.sess
+            .fatal(&format!("Failed to assemble `{}`", global_asm));
+    }
+
+    // Link the global asm and main object file together
+    let main_object_file = add_file_stem_postfix(output_object_file.clone(), ".main");
+    std::fs::rename(&output_object_file, &main_object_file).unwrap();
+    let status = Command::new(linker)
+        .arg("-r") // Create a new object file
+        .arg("-o")
+        .arg(output_object_file)
+        .arg(&main_object_file)
+        .arg(&global_asm_object_file)
+        .status()
+        .unwrap();
+    if !status.success() {
+        tcx.sess.fatal(&format!(
+            "Failed to link `{}` and `{}` together",
+            main_object_file.display(),
+            global_asm_object_file.display(),
+        ));
+    }
+
+    std::fs::remove_file(global_asm_object_file).unwrap();
+    std::fs::remove_file(main_object_file).unwrap();
+}
+
+fn add_file_stem_postfix(mut path: PathBuf, postfix: &str) -> PathBuf {
+    let mut new_filename = path.file_stem().unwrap().to_owned();
+    new_filename.push(postfix);
+    if let Some(extension) = path.extension() {
+        new_filename.push(".");
+        new_filename.push(extension);
+    }
+    path.set_file_name(new_filename);
+    path
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/303d8aff6092709edd4dbd35b1c88e9aa40bf6d8/src/librustc_codegen_ssa/base.rs#L922-L953
+fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
+    if !tcx.dep_graph.is_fully_enabled() {
+        return CguReuse::No;
+    }
+
+    let work_product_id = &cgu.work_product_id();
+    if tcx
+        .dep_graph
+        .previous_work_product(work_product_id)
+        .is_none()
+    {
+        // We don't have anything cached for this CGU. This can happen
+        // if the CGU did not exist in the previous session.
+        return CguReuse::No;
+    }
+
+    // Try to mark the CGU as green. If it we can do so, it means that nothing
+    // affecting the LLVM module has changed and we can re-use a cached version.
+    // If we compile with any kind of LTO, this means we can re-use the bitcode
+    // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
+    // know that later). If we are not doing LTO, there is only one optimized
+    // version of each module, so we re-use that.
+    let dep_node = cgu.codegen_dep_node(tcx);
+    assert!(
+        !tcx.dep_graph.dep_node_exists(&dep_node),
+        "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+        cgu.name()
+    );
+
+    if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
+        CguReuse::PreLto
+    } else {
+        CguReuse::No
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/jit.rs b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
new file mode 100644
index 0000000..3f47df7
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
@@ -0,0 +1,169 @@
+//! The JIT driver uses [`cranelift_simplejit`] to JIT execute programs without writing any object
+//! files.
+
+use std::ffi::CString;
+use std::os::raw::{c_char, c_int};
+
+use rustc_codegen_ssa::CrateInfo;
+
+use cranelift_simplejit::{SimpleJITBuilder, SimpleJITModule};
+
+use crate::prelude::*;
+
+pub(super) fn run_jit(tcx: TyCtxt<'_>) -> ! {
+    if !tcx.sess.opts.output_types.should_codegen() {
+        tcx.sess.fatal("JIT mode doesn't work with `cargo check`.");
+    }
+
+    #[cfg(unix)]
+    unsafe {
+        // When not using our custom driver rustc will open us without the RTLD_GLOBAL flag, so
+        // __cg_clif_global_atomic_mutex will not be exported. We fix this by opening ourself again
+        // as global.
+        // FIXME remove once atomic_shim is gone
+
+        let mut dl_info: libc::Dl_info = std::mem::zeroed();
+        assert_ne!(
+            libc::dladdr(run_jit as *const libc::c_void, &mut dl_info),
+            0
+        );
+        assert_ne!(
+            libc::dlopen(dl_info.dli_fname, libc::RTLD_NOW | libc::RTLD_GLOBAL),
+            std::ptr::null_mut(),
+        );
+    }
+
+    let imported_symbols = load_imported_symbols_for_jit(tcx);
+
+    let mut jit_builder = SimpleJITBuilder::with_isa(
+        crate::build_isa(tcx.sess, false),
+        cranelift_module::default_libcall_names(),
+    );
+    jit_builder.symbols(imported_symbols);
+    let mut jit_module = SimpleJITModule::new(jit_builder);
+    assert_eq!(pointer_ty(tcx), jit_module.target_config().pointer_type());
+
+    let sig = Signature {
+        params: vec![
+            AbiParam::new(jit_module.target_config().pointer_type()),
+            AbiParam::new(jit_module.target_config().pointer_type()),
+        ],
+        returns: vec![AbiParam::new(
+            jit_module.target_config().pointer_type(), /*isize*/
+        )],
+        call_conv: CallConv::triple_default(&crate::target_triple(tcx.sess)),
+    };
+    let main_func_id = jit_module
+        .declare_function("main", Linkage::Import, &sig)
+        .unwrap();
+
+    let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
+    let mono_items = cgus
+        .iter()
+        .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
+        .flatten()
+        .collect::<FxHashMap<_, (_, _)>>()
+        .into_iter()
+        .collect::<Vec<(_, (_, _))>>();
+
+    let mut cx = crate::CodegenCx::new(tcx, jit_module, false);
+
+    let (mut jit_module, global_asm, _debug, mut unwind_context) =
+        super::time(tcx, "codegen mono items", || {
+            super::codegen_mono_items(&mut cx, mono_items);
+            tcx.sess.time("finalize CodegenCx", || cx.finalize())
+        });
+    if !global_asm.is_empty() {
+        tcx.sess.fatal("Global asm is not supported in JIT mode");
+    }
+    crate::main_shim::maybe_create_entry_wrapper(tcx, &mut jit_module, &mut unwind_context, true);
+    crate::allocator::codegen(tcx, &mut jit_module, &mut unwind_context);
+
+    tcx.sess.abort_if_errors();
+
+    let jit_product = jit_module.finish();
+
+    let _unwind_register_guard = unsafe { unwind_context.register_jit(&jit_product) };
+
+    let finalized_main: *const u8 = jit_product.lookup_func(main_func_id);
+
+    println!("Rustc codegen cranelift will JIT run the executable, because --jit was passed");
+
+    let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
+        unsafe { ::std::mem::transmute(finalized_main) };
+
+    let args = ::std::env::var("CG_CLIF_JIT_ARGS").unwrap_or_else(|_| String::new());
+    let args = std::iter::once(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())
+        .chain(args.split(' '))
+        .map(|arg| CString::new(arg).unwrap())
+        .collect::<Vec<_>>();
+    let mut argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
+
+    // Push a null pointer as a terminating argument. This is required by POSIX and
+    // useful as some dynamic linkers use it as a marker to jump over.
+    argv.push(std::ptr::null());
+
+    let ret = f(args.len() as c_int, argv.as_ptr());
+
+    std::process::exit(ret);
+}
+
+fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> {
+    use rustc_middle::middle::dependency_format::Linkage;
+
+    let mut dylib_paths = Vec::new();
+
+    let crate_info = CrateInfo::new(tcx);
+    let formats = tcx.dependency_formats(LOCAL_CRATE);
+    let data = &formats
+        .iter()
+        .find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
+        .unwrap()
+        .1;
+    for &(cnum, _) in &crate_info.used_crates_dynamic {
+        let src = &crate_info.used_crate_source[&cnum];
+        match data[cnum.as_usize() - 1] {
+            Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+            Linkage::Static => {
+                let name = tcx.crate_name(cnum);
+                let mut err = tcx
+                    .sess
+                    .struct_err(&format!("Can't load static lib {}", name.as_str()));
+                err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
+                err.emit();
+            }
+            Linkage::Dynamic => {
+                dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
+            }
+        }
+    }
+
+    let mut imported_symbols = Vec::new();
+    for path in dylib_paths {
+        use object::Object;
+        let lib = libloading::Library::new(&path).unwrap();
+        let obj = std::fs::read(path).unwrap();
+        let obj = object::File::parse(&obj).unwrap();
+        imported_symbols.extend(obj.dynamic_symbols().filter_map(|(_idx, symbol)| {
+            let name = symbol.name().unwrap().to_string();
+            if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
+                return None;
+            }
+            let dlsym_name = if cfg!(target_os = "macos") {
+                // On macOS `dlsym` expects the name without leading `_`.
+                assert!(name.starts_with('_'), "{:?}", name);
+                &name[1..]
+            } else {
+                &name
+            };
+            let symbol: libloading::Symbol<'_, *const u8> =
+                unsafe { lib.get(dlsym_name.as_bytes()) }.unwrap();
+            Some((name, *symbol))
+        }));
+        std::mem::forget(lib)
+    }
+
+    tcx.sess.abort_if_errors();
+
+    imported_symbols
+}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/mod.rs b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
new file mode 100644
index 0000000..a11dc57
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
@@ -0,0 +1,120 @@
+//! Drivers are responsible for calling [`codegen_mono_items`] and performing any further actions
+//! like JIT executing or writing object files.
+
+use std::any::Any;
+
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+
+use crate::prelude::*;
+
+mod aot;
+#[cfg(feature = "jit")]
+mod jit;
+
+pub(crate) fn codegen_crate(
+    tcx: TyCtxt<'_>,
+    metadata: EncodedMetadata,
+    need_metadata_module: bool,
+    config: crate::BackendConfig,
+) -> Box<dyn Any> {
+    tcx.sess.abort_if_errors();
+
+    if config.use_jit {
+        let is_executable = tcx
+            .sess
+            .crate_types()
+            .contains(&rustc_session::config::CrateType::Executable);
+        if !is_executable {
+            tcx.sess.fatal("can't jit non-executable crate");
+        }
+
+        #[cfg(feature = "jit")]
+        let _: ! = jit::run_jit(tcx);
+
+        #[cfg(not(feature = "jit"))]
+        tcx.sess
+            .fatal("jit support was disabled when compiling rustc_codegen_cranelift");
+    }
+
+    aot::run_aot(tcx, metadata, need_metadata_module)
+}
+
+fn codegen_mono_items<'tcx>(
+    cx: &mut crate::CodegenCx<'tcx, impl Module>,
+    mono_items: Vec<(MonoItem<'tcx>, (RLinkage, Visibility))>,
+) {
+    cx.tcx.sess.time("predefine functions", || {
+        for &(mono_item, (linkage, visibility)) in &mono_items {
+            match mono_item {
+                MonoItem::Fn(instance) => {
+                    let (name, sig) = get_function_name_and_sig(
+                        cx.tcx,
+                        cx.module.isa().triple(),
+                        instance,
+                        false,
+                    );
+                    let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
+                    cx.module.declare_function(&name, linkage, &sig).unwrap();
+                }
+                MonoItem::Static(_) | MonoItem::GlobalAsm(_) => {}
+            }
+        }
+    });
+
+    for (mono_item, (linkage, visibility)) in mono_items {
+        let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
+        codegen_mono_item(cx, mono_item, linkage);
+    }
+}
+
+fn codegen_mono_item<'tcx, M: Module>(
+    cx: &mut crate::CodegenCx<'tcx, M>,
+    mono_item: MonoItem<'tcx>,
+    linkage: Linkage,
+) {
+    let tcx = cx.tcx;
+    match mono_item {
+        MonoItem::Fn(inst) => {
+            let _inst_guard =
+                crate::PrintOnPanic(|| format!("{:?} {}", inst, tcx.symbol_name(inst).name));
+            debug_assert!(!inst.substs.needs_infer());
+            tcx.sess
+                .time("codegen fn", || crate::base::codegen_fn(cx, inst, linkage));
+        }
+        MonoItem::Static(def_id) => {
+            crate::constant::codegen_static(&mut cx.constants_cx, def_id);
+        }
+        MonoItem::GlobalAsm(hir_id) => {
+            let item = tcx.hir().expect_item(hir_id);
+            if let rustc_hir::ItemKind::GlobalAsm(rustc_hir::GlobalAsm { asm }) = item.kind {
+                cx.global_asm.push_str(&*asm.as_str());
+                cx.global_asm.push_str("\n\n");
+            } else {
+                bug!("Expected GlobalAsm found {:?}", item);
+            }
+        }
+    }
+}
+
+fn time<R>(tcx: TyCtxt<'_>, name: &'static str, f: impl FnOnce() -> R) -> R {
+    if std::env::var("CG_CLIF_DISPLAY_CG_TIME")
+        .as_ref()
+        .map(|val| &**val)
+        == Ok("1")
+    {
+        println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
+        let before = std::time::Instant::now();
+        let res = tcx.sess.time(name, f);
+        let after = std::time::Instant::now();
+        println!(
+            "[{:<30}: {}] end time: {:?}",
+            tcx.crate_name(LOCAL_CRATE),
+            name,
+            after - before
+        );
+        res
+    } else {
+        tcx.sess.time(name, f)
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
new file mode 100644
index 0000000..04aac78
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
@@ -0,0 +1,293 @@
+//! Codegen of [`asm!`] invocations.
+
+use crate::prelude::*;
+
+use std::fmt::Write;
+
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_middle::mir::InlineAsmOperand;
+use rustc_target::asm::*;
+
+pub(crate) fn codegen_inline_asm<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    _span: Span,
+    template: &[InlineAsmTemplatePiece],
+    operands: &[InlineAsmOperand<'tcx>],
+    options: InlineAsmOptions,
+) {
+    // FIXME add .eh_frame unwind info directives
+
+    if template.is_empty() {
+        // Black box
+        return;
+    }
+
+    let mut slot_size = Size::from_bytes(0);
+    let mut clobbered_regs = Vec::new();
+    let mut inputs = Vec::new();
+    let mut outputs = Vec::new();
+
+    let mut new_slot = |reg_class: InlineAsmRegClass| {
+        let reg_size = reg_class
+            .supported_types(InlineAsmArch::X86_64)
+            .iter()
+            .map(|(ty, _)| ty.size())
+            .max()
+            .unwrap();
+        let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
+        slot_size = slot_size.align_to(align);
+        let offset = slot_size;
+        slot_size += reg_size;
+        offset
+    };
+
+    // FIXME overlap input and output slots to save stack space
+    for operand in operands {
+        match *operand {
+            InlineAsmOperand::In { reg, ref value } => {
+                let reg = expect_reg(reg);
+                clobbered_regs.push((reg, new_slot(reg.reg_class())));
+                inputs.push((
+                    reg,
+                    new_slot(reg.reg_class()),
+                    crate::base::codegen_operand(fx, value).load_scalar(fx),
+                ));
+            }
+            InlineAsmOperand::Out {
+                reg,
+                late: _,
+                place,
+            } => {
+                let reg = expect_reg(reg);
+                clobbered_regs.push((reg, new_slot(reg.reg_class())));
+                if let Some(place) = place {
+                    outputs.push((
+                        reg,
+                        new_slot(reg.reg_class()),
+                        crate::base::codegen_place(fx, place),
+                    ));
+                }
+            }
+            InlineAsmOperand::InOut {
+                reg,
+                late: _,
+                ref in_value,
+                out_place,
+            } => {
+                let reg = expect_reg(reg);
+                clobbered_regs.push((reg, new_slot(reg.reg_class())));
+                inputs.push((
+                    reg,
+                    new_slot(reg.reg_class()),
+                    crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+                ));
+                if let Some(out_place) = out_place {
+                    outputs.push((
+                        reg,
+                        new_slot(reg.reg_class()),
+                        crate::base::codegen_place(fx, out_place),
+                    ));
+                }
+            }
+            InlineAsmOperand::Const { value: _ } => todo!(),
+            InlineAsmOperand::SymFn { value: _ } => todo!(),
+            InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
+        }
+    }
+
+    let inline_asm_index = fx.inline_asm_index;
+    fx.inline_asm_index += 1;
+    let asm_name = format!(
+        "{}__inline_asm_{}",
+        fx.tcx.symbol_name(fx.instance).name,
+        inline_asm_index
+    );
+
+    let generated_asm = generate_asm_wrapper(
+        &asm_name,
+        InlineAsmArch::X86_64,
+        options,
+        template,
+        clobbered_regs,
+        &inputs,
+        &outputs,
+    );
+    fx.cx.global_asm.push_str(&generated_asm);
+
+    call_inline_asm(fx, &asm_name, slot_size, inputs, outputs);
+}
+
+fn generate_asm_wrapper(
+    asm_name: &str,
+    arch: InlineAsmArch,
+    options: InlineAsmOptions,
+    template: &[InlineAsmTemplatePiece],
+    clobbered_regs: Vec<(InlineAsmReg, Size)>,
+    inputs: &[(InlineAsmReg, Size, Value)],
+    outputs: &[(InlineAsmReg, Size, CPlace<'_>)],
+) -> String {
+    let mut generated_asm = String::new();
+    writeln!(generated_asm, ".globl {}", asm_name).unwrap();
+    writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
+    writeln!(
+        generated_asm,
+        ".section .text.{},\"ax\",@progbits",
+        asm_name
+    )
+    .unwrap();
+    writeln!(generated_asm, "{}:", asm_name).unwrap();
+
+    generated_asm.push_str(".intel_syntax noprefix\n");
+    generated_asm.push_str("    push rbp\n");
+    generated_asm.push_str("    mov rbp,rdi\n");
+
+    // Save clobbered registers
+    if !options.contains(InlineAsmOptions::NORETURN) {
+        // FIXME skip registers saved by the calling convention
+        for &(reg, offset) in &clobbered_regs {
+            save_register(&mut generated_asm, arch, reg, offset);
+        }
+    }
+
+    // Write input registers
+    for &(reg, offset, _value) in inputs {
+        restore_register(&mut generated_asm, arch, reg, offset);
+    }
+
+    if options.contains(InlineAsmOptions::ATT_SYNTAX) {
+        generated_asm.push_str(".att_syntax\n");
+    }
+
+    // The actual inline asm
+    for piece in template {
+        match piece {
+            InlineAsmTemplatePiece::String(s) => {
+                generated_asm.push_str(s);
+            }
+            InlineAsmTemplatePiece::Placeholder {
+                operand_idx: _,
+                modifier: _,
+                span: _,
+            } => todo!(),
+        }
+    }
+    generated_asm.push('\n');
+
+    if options.contains(InlineAsmOptions::ATT_SYNTAX) {
+        generated_asm.push_str(".intel_syntax noprefix\n");
+    }
+
+    if !options.contains(InlineAsmOptions::NORETURN) {
+        // Read output registers
+        for &(reg, offset, _place) in outputs {
+            save_register(&mut generated_asm, arch, reg, offset);
+        }
+
+        // Restore clobbered registers
+        for &(reg, offset) in clobbered_regs.iter().rev() {
+            restore_register(&mut generated_asm, arch, reg, offset);
+        }
+
+        generated_asm.push_str("    pop rbp\n");
+        generated_asm.push_str("    ret\n");
+    } else {
+        generated_asm.push_str("    ud2\n");
+    }
+
+    generated_asm.push_str(".att_syntax\n");
+    writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
+    generated_asm.push_str(".text\n");
+    generated_asm.push_str("\n\n");
+
+    generated_asm
+}
+
+fn call_inline_asm<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    asm_name: &str,
+    slot_size: Size,
+    inputs: Vec<(InlineAsmReg, Size, Value)>,
+    outputs: Vec<(InlineAsmReg, Size, CPlace<'tcx>)>,
+) {
+    let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
+        kind: StackSlotKind::ExplicitSlot,
+        offset: None,
+        size: u32::try_from(slot_size.bytes()).unwrap(),
+    });
+    #[cfg(debug_assertions)]
+    fx.add_comment(stack_slot, "inline asm scratch slot");
+
+    let inline_asm_func = fx
+        .cx
+        .module
+        .declare_function(
+            asm_name,
+            Linkage::Import,
+            &Signature {
+                call_conv: CallConv::SystemV,
+                params: vec![AbiParam::new(fx.pointer_type)],
+                returns: vec![],
+            },
+        )
+        .unwrap();
+    let inline_asm_func = fx
+        .cx
+        .module
+        .declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
+    #[cfg(debug_assertions)]
+    fx.add_comment(inline_asm_func, asm_name);
+
+    for (_reg, offset, value) in inputs {
+        fx.bcx
+            .ins()
+            .stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
+    }
+
+    let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
+    fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
+
+    for (_reg, offset, place) in outputs {
+        let ty = fx.clif_type(place.layout().ty).unwrap();
+        let value = fx
+            .bcx
+            .ins()
+            .stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
+        place.write_cvalue(fx, CValue::by_val(value, place.layout()));
+    }
+}
+
+fn expect_reg(reg_or_class: InlineAsmRegOrRegClass) -> InlineAsmReg {
+    match reg_or_class {
+        InlineAsmRegOrRegClass::Reg(reg) => reg,
+        InlineAsmRegOrRegClass::RegClass(class) => unimplemented!("{:?}", class),
+    }
+}
+
+fn save_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) {
+    match arch {
+        InlineAsmArch::X86_64 => {
+            write!(generated_asm, "    mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
+            reg.emit(generated_asm, InlineAsmArch::X86_64, None)
+                .unwrap();
+            generated_asm.push('\n');
+        }
+        _ => unimplemented!("save_register for {:?}", arch),
+    }
+}
+
+fn restore_register(
+    generated_asm: &mut String,
+    arch: InlineAsmArch,
+    reg: InlineAsmReg,
+    offset: Size,
+) {
+    match arch {
+        InlineAsmArch::X86_64 => {
+            generated_asm.push_str("    mov ");
+            reg.emit(generated_asm, InlineAsmArch::X86_64, None)
+                .unwrap();
+            writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
+        }
+        _ => unimplemented!("restore_register for {:?}", arch),
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
new file mode 100644
index 0000000..c1a1cdb
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
@@ -0,0 +1,93 @@
+//! Emulation of a subset of the cpuid x86 instruction.
+
+use crate::prelude::*;
+
+/// Emulates a subset of the cpuid x86 instruction.
+///
+/// This emulates an intel cpu with sse and sse2 support, but which doesn't support anything else.
+pub(crate) fn codegen_cpuid_call<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    leaf: Value,
+    _subleaf: Value,
+) -> (Value, Value, Value, Value) {
+    let leaf_0 = fx.bcx.create_block();
+    let leaf_1 = fx.bcx.create_block();
+    let leaf_8000_0000 = fx.bcx.create_block();
+    let leaf_8000_0001 = fx.bcx.create_block();
+    let unsupported_leaf = fx.bcx.create_block();
+
+    let dest = fx.bcx.create_block();
+    let eax = fx.bcx.append_block_param(dest, types::I32);
+    let ebx = fx.bcx.append_block_param(dest, types::I32);
+    let ecx = fx.bcx.append_block_param(dest, types::I32);
+    let edx = fx.bcx.append_block_param(dest, types::I32);
+
+    let mut switch = cranelift_frontend::Switch::new();
+    switch.set_entry(0, leaf_0);
+    switch.set_entry(1, leaf_1);
+    switch.set_entry(0x8000_0000, leaf_8000_0000);
+    switch.set_entry(0x8000_0001, leaf_8000_0001);
+    switch.emit(&mut fx.bcx, leaf, unsupported_leaf);
+
+    fx.bcx.switch_to_block(leaf_0);
+    let max_basic_leaf = fx.bcx.ins().iconst(types::I32, 1);
+    let vend0 = fx
+        .bcx
+        .ins()
+        .iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu")));
+    let vend2 = fx
+        .bcx
+        .ins()
+        .iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI")));
+    let vend1 = fx
+        .bcx
+        .ins()
+        .iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel")));
+    fx.bcx
+        .ins()
+        .jump(dest, &[max_basic_leaf, vend0, vend1, vend2]);
+
+    fx.bcx.switch_to_block(leaf_1);
+    let cpu_signature = fx.bcx.ins().iconst(types::I32, 0);
+    let additional_information = fx.bcx.ins().iconst(types::I32, 0);
+    let ecx_features = fx.bcx.ins().iconst(types::I32, 0);
+    let edx_features = fx
+        .bcx
+        .ins()
+        .iconst(types::I32, 1 << 25 /* sse */ | 1 << 26 /* sse2 */);
+    fx.bcx.ins().jump(
+        dest,
+        &[
+            cpu_signature,
+            additional_information,
+            ecx_features,
+            edx_features,
+        ],
+    );
+
+    fx.bcx.switch_to_block(leaf_8000_0000);
+    let extended_max_basic_leaf = fx.bcx.ins().iconst(types::I32, 0);
+    let zero = fx.bcx.ins().iconst(types::I32, 0);
+    fx.bcx
+        .ins()
+        .jump(dest, &[extended_max_basic_leaf, zero, zero, zero]);
+
+    fx.bcx.switch_to_block(leaf_8000_0001);
+    let zero = fx.bcx.ins().iconst(types::I32, 0);
+    let proc_info_ecx = fx.bcx.ins().iconst(types::I32, 0);
+    let proc_info_edx = fx.bcx.ins().iconst(types::I32, 0);
+    fx.bcx
+        .ins()
+        .jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
+
+    fx.bcx.switch_to_block(unsupported_leaf);
+    crate::trap::trap_unreachable(
+        fx,
+        "__cpuid_count arch intrinsic doesn't yet support specified leaf",
+    );
+
+    fx.bcx.switch_to_block(dest);
+    fx.bcx.ins().nop();
+
+    (eax, ebx, ecx, edx)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
new file mode 100644
index 0000000..171445f
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
@@ -0,0 +1,123 @@
+//! Emulate LLVM intrinsics
+
+use crate::intrinsics::*;
+use crate::prelude::*;
+
+use rustc_middle::ty::subst::SubstsRef;
+
+pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    intrinsic: &str,
+    substs: SubstsRef<'tcx>,
+    args: &[mir::Operand<'tcx>],
+    destination: Option<(CPlace<'tcx>, BasicBlock)>,
+) {
+    let ret = destination.unwrap().0;
+
+    intrinsic_match! {
+        fx, intrinsic, substs, args,
+        _ => {
+            fx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
+            crate::trap::trap_unimplemented(fx, intrinsic);
+        };
+
+        // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
+        llvm.x86.sse2.pmovmskb.128 | llvm.x86.avx2.pmovmskb | llvm.x86.sse2.movmsk.pd, (c a) {
+            let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, a.layout());
+            let lane_ty = fx.clif_type(lane_layout.ty).unwrap();
+            assert!(lane_count <= 32);
+
+            let mut res = fx.bcx.ins().iconst(types::I32, 0);
+
+            for lane in (0..lane_count).rev() {
+                let a_lane = a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
+
+                // cast float to int
+                let a_lane = match lane_ty {
+                    types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
+                    types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
+                    _ => a_lane,
+                };
+
+                // extract sign bit of an int
+                let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
+
+                // shift sign bit into result
+                let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
+                res = fx.bcx.ins().ishl_imm(res, 1);
+                res = fx.bcx.ins().bor(res, a_lane_sign);
+            }
+
+            let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
+            ret.write_cvalue(fx, res);
+        };
+        llvm.x86.sse2.cmp.ps | llvm.x86.sse2.cmp.pd, (c x, c y, o kind) {
+            let kind_const = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const");
+            let flt_cc = match kind_const.val.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind_const)) {
+                0 => FloatCC::Equal,
+                1 => FloatCC::LessThan,
+                2 => FloatCC::LessThanOrEqual,
+                7 => {
+                    unimplemented!("Compares corresponding elements in `a` and `b` to see if neither is `NaN`.");
+                }
+                3 => {
+                    unimplemented!("Compares corresponding elements in `a` and `b` to see if either is `NaN`.");
+                }
+                4 => FloatCC::NotEqual,
+                5 => {
+                    unimplemented!("not less than");
+                }
+                6 => {
+                    unimplemented!("not less than or equal");
+                }
+                kind => unreachable!("kind {:?}", kind),
+            };
+
+            simd_pair_for_each_lane(fx, x, y, ret, |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+                let res_lane = match lane_layout.ty.kind() {
+                    ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
+                    _ => unreachable!("{:?}", lane_layout.ty),
+                };
+                bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+            });
+        };
+        llvm.x86.sse2.psrli.d, (c a, o imm8) {
+            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
+            simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
+                let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
+                    imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
+                    _ => fx.bcx.ins().iconst(types::I32, 0),
+                };
+                CValue::by_val(res_lane, res_lane_layout)
+            });
+        };
+        llvm.x86.sse2.pslli.d, (c a, o imm8) {
+            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
+            simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
+                let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
+                    imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
+                    _ => fx.bcx.ins().iconst(types::I32, 0),
+                };
+                CValue::by_val(res_lane, res_lane_layout)
+            });
+        };
+        llvm.x86.sse2.storeu.dq, (v mem_addr, c a) {
+            // FIXME correctly handle the unalignment
+            let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
+            dest.write_cvalue(fx, a);
+        };
+    }
+
+    if let Some((_, dest)) = destination {
+        let ret_block = fx.get_block(dest);
+        fx.bcx.ins().jump(ret_block, &[]);
+    } else {
+        trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
+    }
+}
+
+// llvm.x86.avx2.vperm2i128
+// llvm.x86.ssse3.pshuf.b.128
+// llvm.x86.avx2.pshuf.b
+// llvm.x86.avx2.psrli.w
+// llvm.x86.sse2.psrli.w
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
new file mode 100644
index 0000000..ab16fab
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -0,0 +1,1101 @@
+//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
+//! and LLVM intrinsics that have symbol names starting with `llvm.`.
+
+mod cpuid;
+mod llvm;
+mod simd;
+
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use crate::prelude::*;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+
+macro intrinsic_pat {
+    (_) => {
+        _
+    },
+    ($name:ident) => {
+        stringify!($name)
+    },
+    ($name:literal) => {
+        stringify!($name)
+    },
+    ($x:ident . $($xs:tt).*) => {
+        concat!(stringify!($x), ".", intrinsic_pat!($($xs).*))
+    }
+}
+
+macro intrinsic_arg {
+    (o $fx:expr, $arg:ident) => {
+        $arg
+    },
+    (c $fx:expr, $arg:ident) => {
+        codegen_operand($fx, $arg)
+    },
+    (v $fx:expr, $arg:ident) => {
+        codegen_operand($fx, $arg).load_scalar($fx)
+    }
+}
+
+macro intrinsic_substs {
+    ($substs:expr, $index:expr,) => {},
+    ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
+        let $first = $substs.type_at($index);
+        intrinsic_substs!($substs, $index+1, $($rest),*);
+    }
+}
+
+macro intrinsic_match {
+    ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
+    _ => $unknown:block;
+    $(
+        $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
+    )*) => {
+        let _ = $substs; // Silence warning when substs is unused.
+        match $intrinsic {
+            $(
+                $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
+                    #[allow(unused_parens, non_snake_case)]
+                    {
+                        $(
+                            intrinsic_substs!($substs, 0, $($subst),*);
+                        )?
+                        if let [$($arg),*] = $args {
+                            let ($($arg,)*) = (
+                                $(intrinsic_arg!($a $fx, $arg),)*
+                            );
+                            #[warn(unused_parens, non_snake_case)]
+                            {
+                                $content
+                            }
+                        } else {
+                            bug!("wrong number of args for intrinsic {:?}", $intrinsic);
+                        }
+                    }
+                }
+            )*
+            _ => $unknown,
+        }
+    }
+}
+
+macro call_intrinsic_match {
+    ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
+        $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
+    )*) => {
+        match $intrinsic {
+            $(
+                stringify!($name) => {
+                    assert!($substs.is_noop());
+                    if let [$(ref $arg),*] = *$args {
+                        let ($($arg,)*) = (
+                            $(codegen_operand($fx, $arg),)*
+                        );
+                        let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
+                        $ret.write_cvalue($fx, res);
+
+                        if let Some((_, dest)) = $destination {
+                            let ret_block = $fx.get_block(dest);
+                            $fx.bcx.ins().jump(ret_block, &[]);
+                            return;
+                        } else {
+                            unreachable!();
+                        }
+                    } else {
+                        bug!("wrong number of args for intrinsic {:?}", $intrinsic);
+                    }
+                }
+            )*
+            _ => {}
+        }
+    }
+}
+
+macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
+    crate::atomic_shim::lock_global_lock($fx);
+
+    let clif_ty = $fx.clif_type($T).unwrap();
+    let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
+    let new = $fx.bcx.ins().$op(old, $src);
+    $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
+    $ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
+
+    crate::atomic_shim::unlock_global_lock($fx);
+}
+
+macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) {
+    crate::atomic_shim::lock_global_lock($fx);
+
+    // Read old
+    let clif_ty = $fx.clif_type($T).unwrap();
+    let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
+
+    // Compare
+    let is_eq = $fx.bcx.ins().icmp(IntCC::SignedGreaterThan, old, $src);
+    let new = $fx.bcx.ins().select(is_eq, old, $src);
+
+    // Write new
+    $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
+
+    let ret_val = CValue::by_val(old, $ret.layout());
+    $ret.write_cvalue($fx, ret_val);
+
+    crate::atomic_shim::unlock_global_lock($fx);
+}
+
+macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
+    match $ty.kind() {
+        ty::Uint(_) | ty::Int(_) => {}
+        _ => {
+            $fx.tcx.sess.span_err(
+                $span,
+                &format!(
+                    "`{}` intrinsic: expected basic integer type, found `{:?}`",
+                    $intrinsic, $ty
+                ),
+            );
+            // Prevent verifier error
+            crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
+            return;
+        }
+    }
+}
+
+macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
+    if !$ty.is_simd() {
+        $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
+        // Prevent verifier error
+        crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
+        return;
+    }
+}
+
+fn lane_type_and_count<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    layout: TyAndLayout<'tcx>,
+) -> (TyAndLayout<'tcx>, u16) {
+    assert!(layout.ty.is_simd());
+    let lane_count = match layout.fields {
+        rustc_target::abi::FieldsShape::Array { stride: _, count } => u16::try_from(count).unwrap(),
+        _ => unreachable!("lane_type_and_count({:?})", layout),
+    };
+    let lane_layout = layout
+        .field(
+            &ty::layout::LayoutCx {
+                tcx,
+                param_env: ParamEnv::reveal_all(),
+            },
+            0,
+        )
+        .unwrap();
+    (lane_layout, lane_count)
+}
+
+pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
+    let (element, count) = match &layout.abi {
+        Abi::Vector { element, count } => (element.clone(), *count),
+        _ => unreachable!(),
+    };
+
+    match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+        // Cranelift currently only implements icmp for 128bit vectors.
+        Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
+        _ => None,
+    }
+}
+
+fn simd_for_each_lane<'tcx, M: Module>(
+    fx: &mut FunctionCx<'_, 'tcx, M>,
+    val: CValue<'tcx>,
+    ret: CPlace<'tcx>,
+    f: impl Fn(
+        &mut FunctionCx<'_, 'tcx, M>,
+        TyAndLayout<'tcx>,
+        TyAndLayout<'tcx>,
+        Value,
+    ) -> CValue<'tcx>,
+) {
+    let layout = val.layout();
+
+    let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
+    let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
+    assert_eq!(lane_count, ret_lane_count);
+
+    for lane_idx in 0..lane_count {
+        let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
+        let lane = val.value_field(fx, lane_idx).load_scalar(fx);
+
+        let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
+
+        ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
+    }
+}
+
+fn simd_pair_for_each_lane<'tcx, M: Module>(
+    fx: &mut FunctionCx<'_, 'tcx, M>,
+    x: CValue<'tcx>,
+    y: CValue<'tcx>,
+    ret: CPlace<'tcx>,
+    f: impl Fn(
+        &mut FunctionCx<'_, 'tcx, M>,
+        TyAndLayout<'tcx>,
+        TyAndLayout<'tcx>,
+        Value,
+        Value,
+    ) -> CValue<'tcx>,
+) {
+    assert_eq!(x.layout(), y.layout());
+    let layout = x.layout();
+
+    let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
+    let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
+    assert_eq!(lane_count, ret_lane_count);
+
+    for lane in 0..lane_count {
+        let lane = mir::Field::new(lane.try_into().unwrap());
+        let x_lane = x.value_field(fx, lane).load_scalar(fx);
+        let y_lane = y.value_field(fx, lane).load_scalar(fx);
+
+        let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
+
+        ret.place_field(fx, lane).write_cvalue(fx, res_lane);
+    }
+}
+
+fn bool_to_zero_or_max_uint<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    layout: TyAndLayout<'tcx>,
+    val: Value,
+) -> CValue<'tcx> {
+    let ty = fx.clif_type(layout.ty).unwrap();
+
+    let int_ty = match ty {
+        types::F32 => types::I32,
+        types::F64 => types::I64,
+        ty => ty,
+    };
+
+    let val = fx.bcx.ins().bint(int_ty, val);
+    let mut res = fx.bcx.ins().ineg(val);
+
+    if ty.is_float() {
+        res = fx.bcx.ins().bitcast(ty, res);
+    }
+
+    CValue::by_val(res, layout)
+}
+
+macro simd_cmp {
+    ($fx:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
+        let vector_ty = clif_vector_type($fx.tcx, $x.layout());
+
+        if let Some(vector_ty) = vector_ty {
+            let x = $x.load_scalar($fx);
+            let y = $y.load_scalar($fx);
+            let val = $fx.bcx.ins().icmp(IntCC::$cc, x, y);
+
+            // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
+            let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
+
+            $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
+        } else {
+            simd_pair_for_each_lane(
+                $fx,
+                $x,
+                $y,
+                $ret,
+                |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+                    let res_lane = match lane_layout.ty.kind() {
+                        ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
+                        _ => unreachable!("{:?}", lane_layout.ty),
+                    };
+                    bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+                },
+            );
+        }
+    },
+    ($fx:expr, $cc_u:ident|$cc_s:ident($x:ident, $y:ident) -> $ret:ident) => {
+        // FIXME use vector icmp when possible
+        simd_pair_for_each_lane(
+            $fx,
+            $x,
+            $y,
+            $ret,
+            |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+                let res_lane = match lane_layout.ty.kind() {
+                    ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
+                    ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
+                    _ => unreachable!("{:?}", lane_layout.ty),
+                };
+                bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+            },
+        );
+    },
+}
+
+macro simd_int_binop {
+    ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
+    },
+    ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_pair_for_each_lane(
+            $fx,
+            $x,
+            $y,
+            $ret,
+            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+                let res_lane = match lane_layout.ty.kind() {
+                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
+                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
+                    _ => unreachable!("{:?}", lane_layout.ty),
+                };
+                CValue::by_val(res_lane, ret_lane_layout)
+            },
+        );
+    },
+}
+
+macro simd_int_flt_binop {
+    ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
+    },
+    ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_pair_for_each_lane(
+            $fx,
+            $x,
+            $y,
+            $ret,
+            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+                let res_lane = match lane_layout.ty.kind() {
+                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
+                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
+                    ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
+                    _ => unreachable!("{:?}", lane_layout.ty),
+                };
+                CValue::by_val(res_lane, ret_lane_layout)
+            },
+        );
+    },
+}
+
+macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
+    simd_pair_for_each_lane(
+        $fx,
+        $x,
+        $y,
+        $ret,
+        |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+            let res_lane = match lane_layout.ty.kind() {
+                ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
+                _ => unreachable!("{:?}", lane_layout.ty),
+            };
+            CValue::by_val(res_lane, ret_lane_layout)
+        },
+    );
+}
+
+pub(crate) fn codegen_intrinsic_call<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    instance: Instance<'tcx>,
+    args: &[mir::Operand<'tcx>],
+    destination: Option<(CPlace<'tcx>, BasicBlock)>,
+    span: Span,
+) {
+    let def_id = instance.def_id();
+    let substs = instance.substs;
+
+    let intrinsic = fx.tcx.item_name(def_id).as_str();
+    let intrinsic = &intrinsic[..];
+
+    let ret = match destination {
+        Some((place, _)) => place,
+        None => {
+            // Insert non returning intrinsics here
+            match intrinsic {
+                "abort" => {
+                    trap_abort(fx, "Called intrinsic::abort.");
+                }
+                "unreachable" => {
+                    trap_unreachable(fx, "[corruption] Called intrinsic::unreachable.");
+                }
+                "transmute" => {
+                    crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
+                }
+                _ => unimplemented!("unsupported instrinsic {}", intrinsic),
+            }
+            return;
+        }
+    };
+
+    if intrinsic.starts_with("simd_") {
+        self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
+        let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
+        fx.bcx.ins().jump(ret_block, &[]);
+        return;
+    }
+
+    let usize_layout = fx.layout_of(fx.tcx.types.usize);
+
+    call_intrinsic_match! {
+        fx, intrinsic, substs, ret, destination, args,
+        expf32(flt) -> f32 => expf,
+        expf64(flt) -> f64 => exp,
+        exp2f32(flt) -> f32 => exp2f,
+        exp2f64(flt) -> f64 => exp2,
+        sqrtf32(flt) -> f32 => sqrtf,
+        sqrtf64(flt) -> f64 => sqrt,
+        powif32(a, x) -> f32 => __powisf2, // compiler-builtins
+        powif64(a, x) -> f64 => __powidf2, // compiler-builtins
+        powf32(a, x) -> f32 => powf,
+        powf64(a, x) -> f64 => pow,
+        logf32(flt) -> f32 => logf,
+        logf64(flt) -> f64 => log,
+        log2f32(flt) -> f32 => log2f,
+        log2f64(flt) -> f64 => log2,
+        log10f32(flt) -> f32 => log10f,
+        log10f64(flt) -> f64 => log10,
+        fabsf32(flt) -> f32 => fabsf,
+        fabsf64(flt) -> f64 => fabs,
+        fmaf32(x, y, z) -> f32 => fmaf,
+        fmaf64(x, y, z) -> f64 => fma,
+        copysignf32(x, y) -> f32 => copysignf,
+        copysignf64(x, y) -> f64 => copysign,
+
+        // rounding variants
+        // FIXME use clif insts
+        floorf32(flt) -> f32 => floorf,
+        floorf64(flt) -> f64 => floor,
+        ceilf32(flt) -> f32 => ceilf,
+        ceilf64(flt) -> f64 => ceil,
+        truncf32(flt) -> f32 => truncf,
+        truncf64(flt) -> f64 => trunc,
+        roundf32(flt) -> f32 => roundf,
+        roundf64(flt) -> f64 => round,
+
+        // trigonometry
+        sinf32(flt) -> f32 => sinf,
+        sinf64(flt) -> f64 => sin,
+        cosf32(flt) -> f32 => cosf,
+        cosf64(flt) -> f64 => cos,
+        tanf32(flt) -> f32 => tanf,
+        tanf64(flt) -> f64 => tan,
+    }
+
+    intrinsic_match! {
+        fx, intrinsic, substs, args,
+        _ => {
+            fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
+        };
+
+        assume, (c _a) {};
+        likely | unlikely, (c a) {
+            ret.write_cvalue(fx, a);
+        };
+        breakpoint, () {
+            fx.bcx.ins().debugtrap();
+        };
+        copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
+            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+            let elem_size = fx
+                .bcx
+                .ins()
+                .iconst(fx.pointer_type, elem_size as i64);
+            assert_eq!(args.len(), 3);
+            let byte_amount = fx.bcx.ins().imul(count, elem_size);
+
+            if intrinsic.contains("nonoverlapping") {
+                // FIXME emit_small_memcpy
+                fx.bcx.call_memcpy(fx.cx.module.target_config(), dst, src, byte_amount);
+            } else {
+                // FIXME emit_small_memmove
+                fx.bcx.call_memmove(fx.cx.module.target_config(), dst, src, byte_amount);
+            }
+        };
+        // NOTE: the volatile variants have src and dst swapped
+        volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
+            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+            let elem_size = fx
+                .bcx
+                .ins()
+                .iconst(fx.pointer_type, elem_size as i64);
+            assert_eq!(args.len(), 3);
+            let byte_amount = fx.bcx.ins().imul(count, elem_size);
+
+            // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
+            if intrinsic.contains("nonoverlapping") {
+                // FIXME emit_small_memcpy
+                fx.bcx.call_memcpy(fx.cx.module.target_config(), dst, src, byte_amount);
+            } else {
+                // FIXME emit_small_memmove
+                fx.bcx.call_memmove(fx.cx.module.target_config(), dst, src, byte_amount);
+            }
+        };
+        discriminant_value, (c ptr) {
+            let pointee_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+            let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), pointee_layout);
+            let discr = crate::discriminant::codegen_get_discriminant(fx, val, ret.layout());
+            ret.write_cvalue(fx, discr);
+        };
+        size_of_val, <T> (c ptr) {
+            let layout = fx.layout_of(T);
+            let size = if layout.is_unsized() {
+                let (_ptr, info) = ptr.load_scalar_pair(fx);
+                let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+                size
+            } else {
+                fx
+                    .bcx
+                    .ins()
+                    .iconst(fx.pointer_type, layout.size.bytes() as i64)
+            };
+            ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+        };
+        min_align_of_val, <T> (c ptr) {
+            let layout = fx.layout_of(T);
+            let align = if layout.is_unsized() {
+                let (_ptr, info) = ptr.load_scalar_pair(fx);
+                let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+                align
+            } else {
+                fx
+                    .bcx
+                    .ins()
+                    .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
+            };
+            ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+        };
+
+        _ if intrinsic.starts_with("unchecked_") || intrinsic == "exact_div", (c x, c y) {
+            // FIXME trap on overflow
+            let bin_op = match intrinsic {
+                "unchecked_add" => BinOp::Add,
+                "unchecked_sub" => BinOp::Sub,
+                "unchecked_div" | "exact_div" => BinOp::Div,
+                "unchecked_rem" => BinOp::Rem,
+                "unchecked_shl" => BinOp::Shl,
+                "unchecked_shr" => BinOp::Shr,
+                _ => unreachable!("intrinsic {}", intrinsic),
+            };
+            let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
+            ret.write_cvalue(fx, res);
+        };
+        _ if intrinsic.ends_with("_with_overflow"), (c x, c y) {
+            assert_eq!(x.layout().ty, y.layout().ty);
+            let bin_op = match intrinsic {
+                "add_with_overflow" => BinOp::Add,
+                "sub_with_overflow" => BinOp::Sub,
+                "mul_with_overflow" => BinOp::Mul,
+                _ => unreachable!("intrinsic {}", intrinsic),
+            };
+
+            let res = crate::num::codegen_checked_int_binop(
+                fx,
+                bin_op,
+                x,
+                y,
+            );
+            ret.write_cvalue(fx, res);
+        };
+        _ if intrinsic.starts_with("wrapping_"), (c x, c y) {
+            assert_eq!(x.layout().ty, y.layout().ty);
+            let bin_op = match intrinsic {
+                "wrapping_add" => BinOp::Add,
+                "wrapping_sub" => BinOp::Sub,
+                "wrapping_mul" => BinOp::Mul,
+                _ => unreachable!("intrinsic {}", intrinsic),
+            };
+            let res = crate::num::codegen_int_binop(
+                fx,
+                bin_op,
+                x,
+                y,
+            );
+            ret.write_cvalue(fx, res);
+        };
+        _ if intrinsic.starts_with("saturating_"), <T> (c lhs, c rhs) {
+            assert_eq!(lhs.layout().ty, rhs.layout().ty);
+            let bin_op = match intrinsic {
+                "saturating_add" => BinOp::Add,
+                "saturating_sub" => BinOp::Sub,
+                _ => unreachable!("intrinsic {}", intrinsic),
+            };
+
+            let signed = type_sign(T);
+
+            let checked_res = crate::num::codegen_checked_int_binop(
+                fx,
+                bin_op,
+                lhs,
+                rhs,
+            );
+
+            let (val, has_overflow) = checked_res.load_scalar_pair(fx);
+            let clif_ty = fx.clif_type(T).unwrap();
+
+            // `select.i8` is not implemented by Cranelift.
+            let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
+
+            let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
+
+            let val = match (intrinsic, signed) {
+                ("saturating_add", false) => fx.bcx.ins().select(has_overflow, max, val),
+                ("saturating_sub", false) => fx.bcx.ins().select(has_overflow, min, val),
+                ("saturating_add", true) => {
+                    let rhs = rhs.load_scalar(fx);
+                    let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+                    let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
+                    fx.bcx.ins().select(has_overflow, sat_val, val)
+                }
+                ("saturating_sub", true) => {
+                    let rhs = rhs.load_scalar(fx);
+                    let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+                    let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
+                    fx.bcx.ins().select(has_overflow, sat_val, val)
+                }
+                _ => unreachable!(),
+            };
+
+            let res = CValue::by_val(val, fx.layout_of(T));
+
+            ret.write_cvalue(fx, res);
+        };
+        rotate_left, <T>(v x, v y) {
+            let layout = fx.layout_of(T);
+            let res = fx.bcx.ins().rotl(x, y);
+            ret.write_cvalue(fx, CValue::by_val(res, layout));
+        };
+        rotate_right, <T>(v x, v y) {
+            let layout = fx.layout_of(T);
+            let res = fx.bcx.ins().rotr(x, y);
+            ret.write_cvalue(fx, CValue::by_val(res, layout));
+        };
+
+        // The only difference between offset and arith_offset is regarding UB. Because Cranelift
+        // doesn't have UB both are codegen'ed the same way
+        offset | arith_offset, (c base, v offset) {
+            let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
+            let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+            let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
+            let base_val = base.load_scalar(fx);
+            let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+            ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
+        };
+
+        transmute, (c from) {
+            ret.write_cvalue_transmute(fx, from);
+        };
+        write_bytes | volatile_set_memory, (c dst, v val, v count) {
+            let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
+            let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+            let count = fx.bcx.ins().imul_imm(count, pointee_size as i64);
+            let dst_ptr = dst.load_scalar(fx);
+            // FIXME make the memset actually volatile when switching to emit_small_memset
+            // FIXME use emit_small_memset
+            fx.bcx.call_memset(fx.cx.module.target_config(), dst_ptr, val, count);
+        };
+        ctlz | ctlz_nonzero, <T> (v arg) {
+            // FIXME trap on `ctlz_nonzero` with zero arg.
+            let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
+                // FIXME verify this algorithm is correct
+                let (lsb, msb) = fx.bcx.ins().isplit(arg);
+                let lsb_lz = fx.bcx.ins().clz(lsb);
+                let msb_lz = fx.bcx.ins().clz(msb);
+                let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
+                let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
+                let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
+                fx.bcx.ins().uextend(types::I128, res)
+            } else {
+                fx.bcx.ins().clz(arg)
+            };
+            let res = CValue::by_val(res, fx.layout_of(T));
+            ret.write_cvalue(fx, res);
+        };
+        cttz | cttz_nonzero, <T> (v arg) {
+            // FIXME trap on `cttz_nonzero` with zero arg.
+            let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
+                // FIXME verify this algorithm is correct
+                let (lsb, msb) = fx.bcx.ins().isplit(arg);
+                let lsb_tz = fx.bcx.ins().ctz(lsb);
+                let msb_tz = fx.bcx.ins().ctz(msb);
+                let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
+                let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
+                let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
+                fx.bcx.ins().uextend(types::I128, res)
+            } else {
+                fx.bcx.ins().ctz(arg)
+            };
+            let res = CValue::by_val(res, fx.layout_of(T));
+            ret.write_cvalue(fx, res);
+        };
+        ctpop, <T> (v arg) {
+            let res = fx.bcx.ins().popcnt(arg);
+            let res = CValue::by_val(res, fx.layout_of(T));
+            ret.write_cvalue(fx, res);
+        };
+        bitreverse, <T> (v arg) {
+            let res = fx.bcx.ins().bitrev(arg);
+            let res = CValue::by_val(res, fx.layout_of(T));
+            ret.write_cvalue(fx, res);
+        };
+        bswap, <T> (v arg) {
+            // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
+            fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
+                match bcx.func.dfg.value_type(v) {
+                    types::I8 => v,
+
+                    // https://code.woboq.org/gcc/include/bits/byteswap.h.html
+                    types::I16 => {
+                        let tmp1 = bcx.ins().ishl_imm(v, 8);
+                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
+
+                        let tmp2 = bcx.ins().ushr_imm(v, 8);
+                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
+
+                        bcx.ins().bor(n1, n2)
+                    }
+                    types::I32 => {
+                        let tmp1 = bcx.ins().ishl_imm(v, 24);
+                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
+
+                        let tmp2 = bcx.ins().ishl_imm(v, 8);
+                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
+
+                        let tmp3 = bcx.ins().ushr_imm(v, 8);
+                        let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
+
+                        let tmp4 = bcx.ins().ushr_imm(v, 24);
+                        let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
+
+                        let or_tmp1 = bcx.ins().bor(n1, n2);
+                        let or_tmp2 = bcx.ins().bor(n3, n4);
+                        bcx.ins().bor(or_tmp1, or_tmp2)
+                    }
+                    types::I64 => {
+                        let tmp1 = bcx.ins().ishl_imm(v, 56);
+                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
+
+                        let tmp2 = bcx.ins().ishl_imm(v, 40);
+                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
+
+                        let tmp3 = bcx.ins().ishl_imm(v, 24);
+                        let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
+
+                        let tmp4 = bcx.ins().ishl_imm(v, 8);
+                        let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
+
+                        let tmp5 = bcx.ins().ushr_imm(v, 8);
+                        let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
+
+                        let tmp6 = bcx.ins().ushr_imm(v, 24);
+                        let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
+
+                        let tmp7 = bcx.ins().ushr_imm(v, 40);
+                        let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
+
+                        let tmp8 = bcx.ins().ushr_imm(v, 56);
+                        let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
+
+                        let or_tmp1 = bcx.ins().bor(n1, n2);
+                        let or_tmp2 = bcx.ins().bor(n3, n4);
+                        let or_tmp3 = bcx.ins().bor(n5, n6);
+                        let or_tmp4 = bcx.ins().bor(n7, n8);
+
+                        let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
+                        let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
+                        bcx.ins().bor(or_tmp5, or_tmp6)
+                    }
+                    types::I128 => {
+                        let (lo, hi) = bcx.ins().isplit(v);
+                        let lo = swap(bcx, lo);
+                        let hi = swap(bcx, hi);
+                        bcx.ins().iconcat(hi, lo)
+                    }
+                    ty => unreachable!("bswap {}", ty),
+                }
+            };
+            let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
+            ret.write_cvalue(fx, res);
+        };
+        assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
+            let layout = fx.layout_of(T);
+            if layout.abi.is_uninhabited() {
+                with_no_trimmed_paths(|| crate::base::codegen_panic(
+                    fx,
+                    &format!("attempted to instantiate uninhabited type `{}`", T),
+                    span,
+                ));
+                return;
+            }
+
+            if intrinsic == "assert_zero_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
+                with_no_trimmed_paths(|| crate::base::codegen_panic(
+                    fx,
+                    &format!("attempted to zero-initialize type `{}`, which is invalid", T),
+                    span,
+                ));
+                return;
+            }
+
+            if intrinsic == "assert_uninit_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
+                with_no_trimmed_paths(|| crate::base::codegen_panic(
+                    fx,
+                    &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
+                    span,
+                ));
+                return;
+            }
+        };
+
+        volatile_load | unaligned_volatile_load, (c ptr) {
+            // Cranelift treats loads as volatile by default
+            // FIXME ignore during stack2reg optimization
+            // FIXME correctly handle unaligned_volatile_load
+            let inner_layout =
+                fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+            let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+            ret.write_cvalue(fx, val);
+        };
+        volatile_store | unaligned_volatile_store, (v ptr, c val) {
+            // Cranelift treats stores as volatile by default
+            // FIXME ignore during stack2reg optimization
+            // FIXME correctly handle unaligned_volatile_store
+            let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
+            dest.write_cvalue(fx, val);
+        };
+
+        size_of | pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
+            let const_val =
+                fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
+            let val = crate::constant::codegen_const_value(
+                fx,
+                const_val,
+                ret.layout().ty,
+            );
+            ret.write_cvalue(fx, val);
+        };
+
+        ptr_offset_from, <T> (v ptr, v base) {
+            let isize_layout = fx.layout_of(fx.tcx.types.isize);
+
+            let pointee_size: u64 = fx.layout_of(T).size.bytes();
+            let diff = fx.bcx.ins().isub(ptr, base);
+            // FIXME this can be an exact division.
+            let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
+            ret.write_cvalue(fx, val);
+        };
+
+        ptr_guaranteed_eq, (c a, c b) {
+            let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
+            ret.write_cvalue(fx, val);
+        };
+
+        ptr_guaranteed_ne, (c a, c b) {
+            let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
+            ret.write_cvalue(fx, val);
+        };
+
+        caller_location, () {
+            let caller_location = fx.get_caller_location(span);
+            ret.write_cvalue(fx, caller_location);
+        };
+
+        _ if intrinsic.starts_with("atomic_fence"), () {
+            crate::atomic_shim::lock_global_lock(fx);
+            crate::atomic_shim::unlock_global_lock(fx);
+        };
+        _ if intrinsic.starts_with("atomic_singlethreadfence"), () {
+            crate::atomic_shim::lock_global_lock(fx);
+            crate::atomic_shim::unlock_global_lock(fx);
+        };
+        _ if intrinsic.starts_with("atomic_load"), (c ptr) {
+            crate::atomic_shim::lock_global_lock(fx);
+
+            let inner_layout =
+                fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+            validate_atomic_type!(fx, intrinsic, span, inner_layout.ty);
+            let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+            ret.write_cvalue(fx, val);
+
+            crate::atomic_shim::unlock_global_lock(fx);
+        };
+        _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
+            validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
+
+            crate::atomic_shim::lock_global_lock(fx);
+
+            let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
+            dest.write_cvalue(fx, val);
+
+            crate::atomic_shim::unlock_global_lock(fx);
+        };
+        _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
+            validate_atomic_type!(fx, intrinsic, span, T);
+
+            crate::atomic_shim::lock_global_lock(fx);
+
+            // Read old
+            let clif_ty = fx.clif_type(T).unwrap();
+            let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
+            ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
+
+            // Write new
+            let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
+            dest.write_cvalue(fx, src);
+
+            crate::atomic_shim::unlock_global_lock(fx);
+        };
+        _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
+            validate_atomic_type!(fx, intrinsic, span, T);
+
+            let test_old = test_old.load_scalar(fx);
+            let new = new.load_scalar(fx);
+
+            crate::atomic_shim::lock_global_lock(fx);
+
+            // Read old
+            let clif_ty = fx.clif_type(T).unwrap();
+            let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
+
+            // Compare
+            let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
+            let new = fx.bcx.ins().select(is_eq, new, old); // Keep old if not equal to test_old
+
+            // Write new
+            fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
+
+            let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
+            ret.write_cvalue(fx, ret_val);
+
+            crate::atomic_shim::unlock_global_lock(fx);
+        };
+
+        _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, c amount) {
+            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+            let amount = amount.load_scalar(fx);
+            atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
+        };
+        _ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, c amount) {
+            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+            let amount = amount.load_scalar(fx);
+            atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
+        };
+        _ if intrinsic.starts_with("atomic_and"), <T> (v ptr, c src) {
+            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+            let src = src.load_scalar(fx);
+            atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
+        };
+        _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, c src) {
+            validate_atomic_type!(fx, intrinsic, span, T);
+
+            let src = src.load_scalar(fx);
+
+            crate::atomic_shim::lock_global_lock(fx);
+
+            let clif_ty = fx.clif_type(T).unwrap();
+            let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
+            let and = fx.bcx.ins().band(old, src);
+            let new = fx.bcx.ins().bnot(and);
+            fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
+            ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
+
+            crate::atomic_shim::unlock_global_lock(fx);
+        };
+        _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, c src) {
+            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+            let src = src.load_scalar(fx);
+            atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
+        };
+        _ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, c src) {
+            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+            let src = src.load_scalar(fx);
+            atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
+        };
+
+        _ if intrinsic.starts_with("atomic_max"), <T> (v ptr, c src) {
+            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+            let src = src.load_scalar(fx);
+            atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
+        };
+        _ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, c src) {
+            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+            let src = src.load_scalar(fx);
+            atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
+        };
+        _ if intrinsic.starts_with("atomic_min"), <T> (v ptr, c src) {
+            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+            let src = src.load_scalar(fx);
+            atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
+        };
+        _ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, c src) {
+            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+            let src = src.load_scalar(fx);
+            atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
+        };
+
+        minnumf32, (v a, v b) {
+            let val = fx.bcx.ins().fmin(a, b);
+            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+            ret.write_cvalue(fx, val);
+        };
+        minnumf64, (v a, v b) {
+            let val = fx.bcx.ins().fmin(a, b);
+            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+            ret.write_cvalue(fx, val);
+        };
+        maxnumf32, (v a, v b) {
+            let val = fx.bcx.ins().fmax(a, b);
+            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+            ret.write_cvalue(fx, val);
+        };
+        maxnumf64, (v a, v b) {
+            let val = fx.bcx.ins().fmax(a, b);
+            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+            ret.write_cvalue(fx, val);
+        };
+
+        try, (v f, v data, v _catch_fn) {
+            // FIXME once unwinding is supported, change this to actually catch panics
+            let f_sig = fx.bcx.func.import_signature(Signature {
+                call_conv: CallConv::triple_default(fx.triple()),
+                params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
+                returns: vec![],
+            });
+
+            fx.bcx.ins().call_indirect(f_sig, f, &[data]);
+
+            let layout = ret.layout();
+            let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+            ret.write_cvalue(fx, ret_val);
+        };
+
+        fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
+            let res = crate::num::codegen_float_binop(fx, match intrinsic {
+                "fadd_fast" => BinOp::Add,
+                "fsub_fast" => BinOp::Sub,
+                "fmul_fast" => BinOp::Mul,
+                "fdiv_fast" => BinOp::Div,
+                "frem_fast" => BinOp::Rem,
+                _ => unreachable!(),
+            }, x, y);
+            ret.write_cvalue(fx, res);
+        };
+        float_to_int_unchecked, (v f) {
+            let res = crate::cast::clif_int_or_float_cast(
+                fx,
+                f,
+                false,
+                fx.clif_type(ret.layout().ty).unwrap(),
+                type_sign(ret.layout().ty),
+            );
+            ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
+        };
+    }
+
+    if let Some((_, dest)) = destination {
+        let ret_block = fx.get_block(dest);
+        fx.bcx.ins().jump(ret_block, &[]);
+    } else {
+        trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
new file mode 100644
index 0000000..2e31c46
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -0,0 +1,238 @@
+//! Codegen `extern "platform-intrinsic"` intrinsics.
+
+use super::*;
+use crate::prelude::*;
+
+pub(super) fn codegen_simd_intrinsic_call<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    instance: Instance<'tcx>,
+    args: &[mir::Operand<'tcx>],
+    ret: CPlace<'tcx>,
+    span: Span,
+) {
+    let def_id = instance.def_id();
+    let substs = instance.substs;
+
+    let intrinsic = fx.tcx.item_name(def_id).as_str();
+    let intrinsic = &intrinsic[..];
+
+    intrinsic_match! {
+        fx, intrinsic, substs, args,
+        _ => {
+            fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
+        };
+
+        simd_cast, (c a) {
+            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+            simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
+                let ret_lane_ty = fx.clif_type(ret_lane_layout.ty).unwrap();
+
+                let from_signed = type_sign(lane_layout.ty);
+                let to_signed = type_sign(ret_lane_layout.ty);
+
+                let ret_lane = clif_int_or_float_cast(fx, lane, from_signed, ret_lane_ty, to_signed);
+                CValue::by_val(ret_lane, ret_lane_layout)
+            });
+        };
+
+        // FIXME support float comparisons
+        simd_eq, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_cmp!(fx, Equal(x, y) -> ret);
+        };
+        simd_ne, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_cmp!(fx, NotEqual(x, y) -> ret);
+        };
+        simd_lt, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_cmp!(fx, UnsignedLessThan|SignedLessThan(x, y) -> ret);
+        };
+        simd_le, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_cmp!(fx, UnsignedLessThanOrEqual|SignedLessThanOrEqual(x, y) -> ret);
+        };
+        simd_gt, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_cmp!(fx, UnsignedGreaterThan|SignedGreaterThan(x, y) -> ret);
+        };
+        simd_ge, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_cmp!(fx, UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual(x, y) -> ret);
+        };
+
+        // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
+        _ if intrinsic.starts_with("simd_shuffle"), (c x, c y, o idx) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+
+            let n: u16 = intrinsic["simd_shuffle".len()..].parse().unwrap();
+
+            assert_eq!(x.layout(), y.layout());
+            let layout = x.layout();
+
+            let (lane_type, lane_count) = lane_type_and_count(fx.tcx, layout);
+            let (ret_lane_type, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
+
+            assert_eq!(lane_type, ret_lane_type);
+            assert_eq!(n, ret_lane_count);
+
+            let total_len = lane_count * 2;
+
+            let indexes = {
+                use rustc_middle::mir::interpret::*;
+                let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const");
+
+                let idx_bytes = match idx_const.val {
+                    ty::ConstKind::Value(ConstValue::ByRef { alloc, offset }) => {
+                        let ptr = Pointer::new(AllocId(0 /* dummy */), offset);
+                        let size = Size::from_bytes(4 * u64::from(ret_lane_count) /* size_of([u32; ret_lane_count]) */);
+                        alloc.get_bytes(fx, ptr, size).unwrap()
+                    }
+                    _ => unreachable!("{:?}", idx_const),
+                };
+
+                (0..ret_lane_count).map(|i| {
+                    let i = usize::try_from(i).unwrap();
+                    let idx = rustc_middle::mir::interpret::read_target_uint(
+                        fx.tcx.data_layout.endian,
+                        &idx_bytes[4*i.. 4*i + 4],
+                    ).expect("read_target_uint");
+                    u16::try_from(idx).expect("try_from u32")
+                }).collect::<Vec<u16>>()
+            };
+
+            for &idx in &indexes {
+                assert!(idx < total_len, "idx {} out of range 0..{}", idx, total_len);
+            }
+
+            for (out_idx, in_idx) in indexes.into_iter().enumerate() {
+                let in_lane = if in_idx < lane_count {
+                    x.value_field(fx, mir::Field::new(in_idx.try_into().unwrap()))
+                } else {
+                    y.value_field(fx, mir::Field::new((in_idx - lane_count).try_into().unwrap()))
+                };
+                let out_lane = ret.place_field(fx, mir::Field::new(out_idx));
+                out_lane.write_cvalue(fx, in_lane);
+            }
+        };
+
+        simd_insert, (c base, o idx, c val) {
+            // FIXME validate
+            let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
+                idx_const
+            } else {
+                fx.tcx.sess.span_fatal(
+                    span,
+                    "Index argument for `simd_insert` is not a constant",
+                );
+            };
+
+            let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+            let (_lane_type, lane_count) = lane_type_and_count(fx.tcx, base.layout());
+            if idx >= lane_count.into() {
+                fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
+            }
+
+            ret.write_cvalue(fx, base);
+            let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
+            ret_lane.write_cvalue(fx, val);
+        };
+
+        simd_extract, (c v, o idx) {
+            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+            let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
+                idx_const
+            } else {
+                fx.tcx.sess.span_fatal(
+                    span,
+                    "Index argument for `simd_extract` is not a constant",
+                );
+            };
+
+            let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+            let (_lane_type, lane_count) = lane_type_and_count(fx.tcx, v.layout());
+            if idx >= lane_count.into() {
+                fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
+            }
+
+            let ret_lane = v.value_field(fx, mir::Field::new(idx.try_into().unwrap()));
+            ret.write_cvalue(fx, ret_lane);
+        };
+
+        simd_add, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_flt_binop!(fx, iadd|fadd(x, y) -> ret);
+        };
+        simd_sub, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_flt_binop!(fx, isub|fsub(x, y) -> ret);
+        };
+        simd_mul, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_flt_binop!(fx, imul|fmul(x, y) -> ret);
+        };
+        simd_div, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_flt_binop!(fx, udiv|sdiv|fdiv(x, y) -> ret);
+        };
+        simd_shl, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_binop!(fx, ishl(x, y) -> ret);
+        };
+        simd_shr, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_binop!(fx, ushr|sshr(x, y) -> ret);
+        };
+        simd_and, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_binop!(fx, band(x, y) -> ret);
+        };
+        simd_or, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_binop!(fx, bor(x, y) -> ret);
+        };
+        simd_xor, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_binop!(fx, bxor(x, y) -> ret);
+        };
+
+        simd_fma, (c a, c b, c c) {
+            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+            assert_eq!(a.layout(), b.layout());
+            assert_eq!(a.layout(), c.layout());
+            let layout = a.layout();
+
+            let (_lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
+            let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
+            assert_eq!(lane_count, ret_lane_count);
+
+            for lane in 0..lane_count {
+                let lane = mir::Field::new(lane.try_into().unwrap());
+                let a_lane = a.value_field(fx, lane).load_scalar(fx);
+                let b_lane = b.value_field(fx, lane).load_scalar(fx);
+                let c_lane = c.value_field(fx, lane).load_scalar(fx);
+
+                let mul_lane = fx.bcx.ins().fmul(a_lane, b_lane);
+                let res_lane = CValue::by_val(fx.bcx.ins().fadd(mul_lane, c_lane), ret_lane_layout);
+
+                ret.place_field(fx, lane).write_cvalue(fx, res_lane);
+            }
+        };
+
+        simd_fmin, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_flt_binop!(fx, fmin(x, y) -> ret);
+        };
+        simd_fmax, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_flt_binop!(fx, fmax(x, y) -> ret);
+        };
+
+        // simd_fabs
+        // simd_saturating_add
+        // simd_bitmask
+        // simd_select
+        // simd_reduce_add_{,un}ordered
+        // simd_rem
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
new file mode 100644
index 0000000..ba9ee0d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -0,0 +1,317 @@
+#![feature(
+    rustc_private,
+    decl_macro,
+    type_alias_impl_trait,
+    associated_type_bounds,
+    never_type,
+    try_blocks,
+    hash_drain_filter
+)]
+#![warn(rust_2018_idioms)]
+#![warn(unused_lifetimes)]
+#![warn(unreachable_pub)]
+
+#[cfg(feature = "jit")]
+extern crate libc;
+extern crate snap;
+#[macro_use]
+extern crate rustc_middle;
+extern crate rustc_ast;
+extern crate rustc_codegen_ssa;
+extern crate rustc_data_structures;
+extern crate rustc_errors;
+extern crate rustc_fs_util;
+extern crate rustc_hir;
+extern crate rustc_incremental;
+extern crate rustc_index;
+extern crate rustc_session;
+extern crate rustc_span;
+extern crate rustc_symbol_mangling;
+extern crate rustc_target;
+
+// This prevents duplicating functions and statics that are already part of the host rustc process.
+#[allow(unused_extern_crates)]
+extern crate rustc_driver;
+
+use std::any::Any;
+
+use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_codegen_ssa::CodegenResults;
+use rustc_errors::ErrorReported;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
+use rustc_middle::ty::query::Providers;
+use rustc_session::config::OutputFilenames;
+use rustc_session::Session;
+
+use cranelift_codegen::settings::{self, Configurable};
+
+use crate::constant::ConstantCx;
+use crate::prelude::*;
+
+mod abi;
+mod allocator;
+mod analyze;
+mod archive;
+mod atomic_shim;
+mod backend;
+mod base;
+mod cast;
+mod codegen_i128;
+mod common;
+mod constant;
+mod debuginfo;
+mod discriminant;
+mod driver;
+mod inline_asm;
+mod intrinsics;
+mod linkage;
+mod main_shim;
+mod metadata;
+mod num;
+mod optimize;
+mod pointer;
+mod pretty_clif;
+mod toolchain;
+mod trap;
+mod unsize;
+mod value_and_place;
+mod vtable;
+
+mod prelude {
+    pub(crate) use std::convert::{TryFrom, TryInto};
+
+    pub(crate) use rustc_ast::ast::{FloatTy, IntTy, UintTy};
+    pub(crate) use rustc_span::Span;
+
+    pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+    pub(crate) use rustc_middle::bug;
+    pub(crate) use rustc_middle::mir::{self, *};
+    pub(crate) use rustc_middle::ty::layout::{self, TyAndLayout};
+    pub(crate) use rustc_middle::ty::{
+        self, FnSig, Instance, InstanceDef, ParamEnv, Ty, TyCtxt, TypeAndMut, TypeFoldable,
+    };
+    pub(crate) use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx};
+
+    pub(crate) use rustc_data_structures::fx::FxHashMap;
+
+    pub(crate) use rustc_index::vec::Idx;
+
+    pub(crate) use cranelift_codegen::entity::EntitySet;
+    pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
+    pub(crate) use cranelift_codegen::ir::function::Function;
+    pub(crate) use cranelift_codegen::ir::types;
+    pub(crate) use cranelift_codegen::ir::{
+        AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
+        StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
+    };
+    pub(crate) use cranelift_codegen::isa::{self, CallConv};
+    pub(crate) use cranelift_codegen::Context;
+    pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
+    pub(crate) use cranelift_module::{self, DataContext, DataId, FuncId, Linkage, Module};
+
+    pub(crate) use crate::abi::*;
+    pub(crate) use crate::base::{codegen_operand, codegen_place};
+    pub(crate) use crate::cast::*;
+    pub(crate) use crate::common::*;
+    pub(crate) use crate::debuginfo::{DebugContext, UnwindContext};
+    pub(crate) use crate::pointer::Pointer;
+    pub(crate) use crate::trap::*;
+    pub(crate) use crate::value_and_place::{CPlace, CPlaceInner, CValue};
+}
+
+struct PrintOnPanic<F: Fn() -> String>(F);
+impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
+    fn drop(&mut self) {
+        if ::std::thread::panicking() {
+            println!("{}", (self.0)());
+        }
+    }
+}
+
+struct CodegenCx<'tcx, M: Module> {
+    tcx: TyCtxt<'tcx>,
+    module: M,
+    global_asm: String,
+    constants_cx: ConstantCx,
+    cached_context: Context,
+    vtables: FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), DataId>,
+    debug_context: Option<DebugContext<'tcx>>,
+    unwind_context: UnwindContext<'tcx>,
+}
+
+impl<'tcx, M: Module> CodegenCx<'tcx, M> {
+    fn new(tcx: TyCtxt<'tcx>, module: M, debug_info: bool) -> Self {
+        let unwind_context = UnwindContext::new(tcx, module.isa());
+        let debug_context = if debug_info {
+            Some(DebugContext::new(tcx, module.isa()))
+        } else {
+            None
+        };
+        CodegenCx {
+            tcx,
+            module,
+            global_asm: String::new(),
+            constants_cx: ConstantCx::default(),
+            cached_context: Context::new(),
+            vtables: FxHashMap::default(),
+            debug_context,
+            unwind_context,
+        }
+    }
+
+    fn finalize(mut self) -> (M, String, Option<DebugContext<'tcx>>, UnwindContext<'tcx>) {
+        self.constants_cx.finalize(self.tcx, &mut self.module);
+        (
+            self.module,
+            self.global_asm,
+            self.debug_context,
+            self.unwind_context,
+        )
+    }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct BackendConfig {
+    pub use_jit: bool,
+}
+
+pub struct CraneliftCodegenBackend {
+    pub config: BackendConfig,
+}
+
+impl CodegenBackend for CraneliftCodegenBackend {
+    fn init(&self, sess: &Session) {
+        if sess.lto() != rustc_session::config::Lto::No && sess.opts.cg.embed_bitcode {
+            sess.warn("LTO is not supported. You may get a linker error.");
+        }
+    }
+
+    fn metadata_loader(&self) -> Box<dyn MetadataLoader + Sync> {
+        Box::new(crate::metadata::CraneliftMetadataLoader)
+    }
+
+    fn provide(&self, _providers: &mut Providers) {}
+    fn provide_extern(&self, _providers: &mut Providers) {}
+
+    fn target_features(&self, _sess: &Session) -> Vec<rustc_span::Symbol> {
+        vec![]
+    }
+
+    fn codegen_crate<'tcx>(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        metadata: EncodedMetadata,
+        need_metadata_module: bool,
+    ) -> Box<dyn Any> {
+        let res = driver::codegen_crate(tcx, metadata, need_metadata_module, self.config);
+
+        rustc_symbol_mangling::test::report_symbol_names(tcx);
+
+        res
+    }
+
+    fn join_codegen(
+        &self,
+        ongoing_codegen: Box<dyn Any>,
+        _sess: &Session,
+    ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported> {
+        Ok(*ongoing_codegen
+            .downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>()
+            .unwrap())
+    }
+
+    fn link(
+        &self,
+        sess: &Session,
+        codegen_results: CodegenResults,
+        outputs: &OutputFilenames,
+    ) -> Result<(), ErrorReported> {
+        use rustc_codegen_ssa::back::link::link_binary;
+
+        let _timer = sess.prof.generic_activity("link_crate");
+
+        sess.time("linking", || {
+            let target_cpu = crate::target_triple(sess).to_string();
+            link_binary::<crate::archive::ArArchiveBuilder<'_>>(
+                sess,
+                &codegen_results,
+                outputs,
+                &codegen_results.crate_name.as_str(),
+                &target_cpu,
+            );
+        });
+
+        Ok(())
+    }
+}
+
+fn target_triple(sess: &Session) -> target_lexicon::Triple {
+    sess.target.llvm_target.parse().unwrap()
+}
+
+fn build_isa(sess: &Session, enable_pic: bool) -> Box<dyn isa::TargetIsa + 'static> {
+    use target_lexicon::BinaryFormat;
+
+    let target_triple = crate::target_triple(sess);
+
+    let mut flags_builder = settings::builder();
+    if enable_pic {
+        flags_builder.enable("is_pic").unwrap();
+    } else {
+        flags_builder.set("is_pic", "false").unwrap();
+    }
+    flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
+    flags_builder
+        .set(
+            "enable_verifier",
+            if cfg!(debug_assertions) {
+                "true"
+            } else {
+                "false"
+            },
+        )
+        .unwrap();
+
+    let tls_model = match target_triple.binary_format {
+        BinaryFormat::Elf => "elf_gd",
+        BinaryFormat::Macho => "macho",
+        BinaryFormat::Coff => "coff",
+        _ => "none",
+    };
+    flags_builder.set("tls_model", tls_model).unwrap();
+
+    flags_builder.set("enable_simd", "true").unwrap();
+
+    // FIXME(CraneStation/cranelift#732) fix LICM in presence of jump tables
+    /*
+    use rustc_session::config::OptLevel;
+    match sess.opts.optimize {
+        OptLevel::No => {
+            flags_builder.set("opt_level", "none").unwrap();
+        }
+        OptLevel::Less | OptLevel::Default => {}
+        OptLevel::Aggressive => {
+            flags_builder.set("opt_level", "speed_and_size").unwrap();
+        }
+        OptLevel::Size | OptLevel::SizeMin => {
+            sess.warn("Optimizing for size is not supported. Just ignoring the request");
+        }
+    }*/
+
+    let flags = settings::Flags::new(flags_builder);
+
+    let mut isa_builder = cranelift_codegen::isa::lookup(target_triple).unwrap();
+    // Don't use "haswell", as it implies `has_lzcnt`.macOS CI is still at Ivy Bridge EP, so `lzcnt`
+    // is interpreted as `bsr`.
+    isa_builder.enable("nehalem").unwrap();
+    isa_builder.finish(flags)
+}
+
+/// This is the entrypoint for a hot plugged rustc_codegen_cranelift
+#[no_mangle]
+pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
+    Box::new(CraneliftCodegenBackend {
+        config: BackendConfig { use_jit: false },
+    })
+}
diff --git a/compiler/rustc_codegen_cranelift/src/linkage.rs b/compiler/rustc_codegen_cranelift/src/linkage.rs
new file mode 100644
index 0000000..dc1e210
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/linkage.rs
@@ -0,0 +1,33 @@
+use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+
+use crate::prelude::*;
+
+pub(crate) fn get_clif_linkage(
+    mono_item: MonoItem<'_>,
+    linkage: RLinkage,
+    visibility: Visibility,
+) -> Linkage {
+    match (linkage, visibility) {
+        (RLinkage::External, Visibility::Default) => Linkage::Export,
+        (RLinkage::Internal, Visibility::Default) => Linkage::Local,
+        (RLinkage::External, Visibility::Hidden) => Linkage::Hidden,
+        _ => panic!("{:?} = {:?} {:?}", mono_item, linkage, visibility),
+    }
+}
+
+pub(crate) fn get_static_linkage(tcx: TyCtxt<'_>, def_id: DefId) -> Linkage {
+    let fn_attrs = tcx.codegen_fn_attrs(def_id);
+
+    if let Some(linkage) = fn_attrs.linkage {
+        match linkage {
+            RLinkage::External => Linkage::Export,
+            RLinkage::Internal => Linkage::Local,
+            RLinkage::ExternalWeak | RLinkage::WeakAny => Linkage::Preemptible,
+            _ => panic!("{:?}", linkage),
+        }
+    } else if tcx.is_reachable_non_generic(def_id) {
+        Linkage::Export
+    } else {
+        Linkage::Hidden
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/main_shim.rs b/compiler/rustc_codegen_cranelift/src/main_shim.rs
new file mode 100644
index 0000000..10f515e
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/main_shim.rs
@@ -0,0 +1,130 @@
+use rustc_hir::LangItem;
+use rustc_session::config::EntryFnType;
+
+use crate::prelude::*;
+
+/// Create the `main` function which will initialize the rust runtime and call
+/// users main function.
+pub(crate) fn maybe_create_entry_wrapper(
+    tcx: TyCtxt<'_>,
+    module: &mut impl Module,
+    unwind_context: &mut UnwindContext<'_>,
+    use_jit: bool,
+) {
+    let (main_def_id, use_start_lang_item) = match tcx.entry_fn(LOCAL_CRATE) {
+        Some((def_id, entry_ty)) => (
+            def_id.to_def_id(),
+            match entry_ty {
+                EntryFnType::Main => true,
+                EntryFnType::Start => false,
+            },
+        ),
+        None => return,
+    };
+
+    let instance = Instance::mono(tcx, main_def_id).polymorphize(tcx);
+    if module.get_name(&*tcx.symbol_name(instance).name).is_none() {
+        return;
+    }
+
+    create_entry_fn(
+        tcx,
+        module,
+        unwind_context,
+        main_def_id,
+        use_start_lang_item,
+        use_jit,
+    );
+
+    fn create_entry_fn(
+        tcx: TyCtxt<'_>,
+        m: &mut impl Module,
+        unwind_context: &mut UnwindContext<'_>,
+        rust_main_def_id: DefId,
+        use_start_lang_item: bool,
+        use_jit: bool,
+    ) {
+        let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
+        // Given that `main()` has no arguments,
+        // then its return type cannot have
+        // late-bound regions, since late-bound
+        // regions must appear in the argument
+        // listing.
+        let main_ret_ty = tcx.erase_regions(&main_ret_ty.no_bound_vars().unwrap());
+
+        let cmain_sig = Signature {
+            params: vec![
+                AbiParam::new(m.target_config().pointer_type()),
+                AbiParam::new(m.target_config().pointer_type()),
+            ],
+            returns: vec![AbiParam::new(
+                m.target_config().pointer_type(), /*isize*/
+            )],
+            call_conv: CallConv::triple_default(m.isa().triple()),
+        };
+
+        let cmain_func_id = m
+            .declare_function("main", Linkage::Export, &cmain_sig)
+            .unwrap();
+
+        let instance = Instance::mono(tcx, rust_main_def_id).polymorphize(tcx);
+
+        let (main_name, main_sig) =
+            get_function_name_and_sig(tcx, m.isa().triple(), instance, false);
+        let main_func_id = m
+            .declare_function(&main_name, Linkage::Import, &main_sig)
+            .unwrap();
+
+        let mut ctx = Context::new();
+        ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig);
+        {
+            let mut func_ctx = FunctionBuilderContext::new();
+            let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+            let block = bcx.create_block();
+            bcx.switch_to_block(block);
+            let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
+            let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
+
+            crate::atomic_shim::init_global_lock(m, &mut bcx, use_jit);
+
+            let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
+
+            let call_inst = if use_start_lang_item {
+                let start_def_id = tcx.require_lang_item(LangItem::Start, None);
+                let start_instance = Instance::resolve(
+                    tcx,
+                    ParamEnv::reveal_all(),
+                    start_def_id,
+                    tcx.intern_substs(&[main_ret_ty.into()]),
+                )
+                .unwrap()
+                .unwrap()
+                .polymorphize(tcx);
+                let start_func_id = import_function(tcx, m, start_instance);
+
+                let main_val = bcx
+                    .ins()
+                    .func_addr(m.target_config().pointer_type(), main_func_ref);
+
+                let func_ref = m.declare_func_in_func(start_func_id, &mut bcx.func);
+                bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv])
+            } else {
+                // using user-defined start fn
+                bcx.ins().call(main_func_ref, &[arg_argc, arg_argv])
+            };
+
+            let result = bcx.inst_results(call_inst)[0];
+            bcx.ins().return_(&[result]);
+            bcx.seal_all_blocks();
+            bcx.finalize();
+        }
+        m.define_function(
+            cmain_func_id,
+            &mut ctx,
+            &mut cranelift_codegen::binemit::NullTrapSink {},
+        )
+        .unwrap();
+        unwind_context.add_function(cmain_func_id, &ctx, m.isa());
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/metadata.rs b/compiler/rustc_codegen_cranelift/src/metadata.rs
new file mode 100644
index 0000000..2e3b9fb
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/metadata.rs
@@ -0,0 +1,108 @@
+//! Reading and writing of the rustc metadata for rlibs and dylibs
+
+use std::convert::TryFrom;
+use std::fs::File;
+use std::path::Path;
+
+use rustc_codegen_ssa::METADATA_FILENAME;
+use rustc_data_structures::owning_ref::OwningRef;
+use rustc_data_structures::rustc_erase_owner;
+use rustc_data_structures::sync::MetadataRef;
+use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config;
+use rustc_target::spec::Target;
+
+use crate::backend::WriteMetadata;
+
+pub(crate) struct CraneliftMetadataLoader;
+
+impl MetadataLoader for CraneliftMetadataLoader {
+    fn get_rlib_metadata(&self, _target: &Target, path: &Path) -> Result<MetadataRef, String> {
+        let mut archive = ar::Archive::new(File::open(path).map_err(|e| format!("{:?}", e))?);
+        // Iterate over all entries in the archive:
+        while let Some(entry_result) = archive.next_entry() {
+            let mut entry = entry_result.map_err(|e| format!("{:?}", e))?;
+            if entry.header().identifier() == METADATA_FILENAME.as_bytes() {
+                let mut buf = Vec::with_capacity(
+                    usize::try_from(entry.header().size())
+                        .expect("Rlib metadata file too big to load into memory."),
+                );
+                ::std::io::copy(&mut entry, &mut buf).map_err(|e| format!("{:?}", e))?;
+                let buf: OwningRef<Vec<u8>, [u8]> = OwningRef::new(buf);
+                return Ok(rustc_erase_owner!(buf.map_owner_box()));
+            }
+        }
+
+        Err("couldn't find metadata entry".to_string())
+    }
+
+    fn get_dylib_metadata(&self, _target: &Target, path: &Path) -> Result<MetadataRef, String> {
+        use object::{Object, ObjectSection};
+        let file = std::fs::read(path).map_err(|e| format!("read:{:?}", e))?;
+        let file = object::File::parse(&file).map_err(|e| format!("parse: {:?}", e))?;
+        let buf = file
+            .section_by_name(".rustc")
+            .ok_or("no .rustc section")?
+            .data()
+            .map_err(|e| format!("failed to read .rustc section: {:?}", e))?
+            .to_owned();
+        let buf: OwningRef<Vec<u8>, [u8]> = OwningRef::new(buf);
+        Ok(rustc_erase_owner!(buf.map_owner_box()))
+    }
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/da573206f87b5510de4b0ee1a9c044127e409bd3/src/librustc_codegen_llvm/base.rs#L47-L112
+pub(crate) fn write_metadata<P: WriteMetadata>(
+    tcx: TyCtxt<'_>,
+    product: &mut P,
+) -> EncodedMetadata {
+    use snap::write::FrameEncoder;
+    use std::io::Write;
+
+    #[derive(PartialEq, Eq, PartialOrd, Ord)]
+    enum MetadataKind {
+        None,
+        Uncompressed,
+        Compressed,
+    }
+
+    let kind = tcx
+        .sess
+        .crate_types()
+        .iter()
+        .map(|ty| match *ty {
+            config::CrateType::Executable
+            | config::CrateType::Staticlib
+            | config::CrateType::Cdylib => MetadataKind::None,
+
+            config::CrateType::Rlib => MetadataKind::Uncompressed,
+
+            config::CrateType::Dylib | config::CrateType::ProcMacro => MetadataKind::Compressed,
+        })
+        .max()
+        .unwrap_or(MetadataKind::None);
+
+    if kind == MetadataKind::None {
+        return EncodedMetadata::new();
+    }
+
+    let metadata = tcx.encode_metadata();
+    if kind == MetadataKind::Uncompressed {
+        return metadata;
+    }
+
+    assert!(kind == MetadataKind::Compressed);
+    let mut compressed = tcx.metadata_encoding_version();
+    FrameEncoder::new(&mut compressed)
+        .write_all(&metadata.raw_data)
+        .unwrap();
+
+    product.add_rustc_section(
+        rustc_middle::middle::exported_symbols::metadata_symbol_name(tcx),
+        compressed,
+        tcx.sess.target.is_like_osx,
+    );
+
+    metadata
+}
diff --git a/compiler/rustc_codegen_cranelift/src/num.rs b/compiler/rustc_codegen_cranelift/src/num.rs
new file mode 100644
index 0000000..41f4a9b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/num.rs
@@ -0,0 +1,475 @@
+//! Various operations on integer and floating-point numbers
+
+use crate::prelude::*;
+
+pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> {
+    use BinOp::*;
+    use IntCC::*;
+    Some(match bin_op {
+        Eq => Equal,
+        Lt => {
+            if signed {
+                SignedLessThan
+            } else {
+                UnsignedLessThan
+            }
+        }
+        Le => {
+            if signed {
+                SignedLessThanOrEqual
+            } else {
+                UnsignedLessThanOrEqual
+            }
+        }
+        Ne => NotEqual,
+        Ge => {
+            if signed {
+                SignedGreaterThanOrEqual
+            } else {
+                UnsignedGreaterThanOrEqual
+            }
+        }
+        Gt => {
+            if signed {
+                SignedGreaterThan
+            } else {
+                UnsignedGreaterThan
+            }
+        }
+        _ => return None,
+    })
+}
+
+fn codegen_compare_bin_op<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    bin_op: BinOp,
+    signed: bool,
+    lhs: Value,
+    rhs: Value,
+) -> CValue<'tcx> {
+    let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
+    let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
+    let val = fx.bcx.ins().bint(types::I8, val);
+    CValue::by_val(val, fx.layout_of(fx.tcx.types.bool))
+}
+
+pub(crate) fn codegen_binop<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    bin_op: BinOp,
+    in_lhs: CValue<'tcx>,
+    in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+    match bin_op {
+        BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+            match in_lhs.layout().ty.kind() {
+                ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => {
+                    let signed = type_sign(in_lhs.layout().ty);
+                    let lhs = in_lhs.load_scalar(fx);
+                    let rhs = in_rhs.load_scalar(fx);
+
+                    let (lhs, rhs) = if (bin_op == BinOp::Eq || bin_op == BinOp::Ne)
+                        && (in_lhs.layout().ty.kind() == fx.tcx.types.i8.kind()
+                            || in_lhs.layout().ty.kind() == fx.tcx.types.i16.kind())
+                    {
+                        // FIXME(CraneStation/cranelift#896) icmp_imm.i8/i16 with eq/ne for signed ints is implemented wrong.
+                        (
+                            fx.bcx.ins().sextend(types::I32, lhs),
+                            fx.bcx.ins().sextend(types::I32, rhs),
+                        )
+                    } else {
+                        (lhs, rhs)
+                    };
+
+                    return codegen_compare_bin_op(fx, bin_op, signed, lhs, rhs);
+                }
+                _ => {}
+            }
+        }
+        _ => {}
+    }
+
+    match in_lhs.layout().ty.kind() {
+        ty::Bool => crate::num::codegen_bool_binop(fx, bin_op, in_lhs, in_rhs),
+        ty::Uint(_) | ty::Int(_) => crate::num::codegen_int_binop(fx, bin_op, in_lhs, in_rhs),
+        ty::Float(_) => crate::num::codegen_float_binop(fx, bin_op, in_lhs, in_rhs),
+        ty::RawPtr(..) | ty::FnPtr(..) => crate::num::codegen_ptr_binop(fx, bin_op, in_lhs, in_rhs),
+        _ => unreachable!(
+            "{:?}({:?}, {:?})",
+            bin_op,
+            in_lhs.layout().ty,
+            in_rhs.layout().ty
+        ),
+    }
+}
+
+pub(crate) fn codegen_bool_binop<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    bin_op: BinOp,
+    in_lhs: CValue<'tcx>,
+    in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+    let lhs = in_lhs.load_scalar(fx);
+    let rhs = in_rhs.load_scalar(fx);
+
+    let b = fx.bcx.ins();
+    let res = match bin_op {
+        BinOp::BitXor => b.bxor(lhs, rhs),
+        BinOp::BitAnd => b.band(lhs, rhs),
+        BinOp::BitOr => b.bor(lhs, rhs),
+        // Compare binops handles by `codegen_binop`.
+        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+    };
+
+    CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
+}
+
+pub(crate) fn codegen_int_binop<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    bin_op: BinOp,
+    in_lhs: CValue<'tcx>,
+    in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+    if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
+        assert_eq!(
+            in_lhs.layout().ty,
+            in_rhs.layout().ty,
+            "int binop requires lhs and rhs of same type"
+        );
+    }
+
+    if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, false, in_lhs, in_rhs) {
+        return res;
+    }
+
+    let signed = type_sign(in_lhs.layout().ty);
+
+    let lhs = in_lhs.load_scalar(fx);
+    let rhs = in_rhs.load_scalar(fx);
+
+    let b = fx.bcx.ins();
+    let val = match bin_op {
+        BinOp::Add => b.iadd(lhs, rhs),
+        BinOp::Sub => b.isub(lhs, rhs),
+        BinOp::Mul => b.imul(lhs, rhs),
+        BinOp::Div => {
+            if signed {
+                b.sdiv(lhs, rhs)
+            } else {
+                b.udiv(lhs, rhs)
+            }
+        }
+        BinOp::Rem => {
+            if signed {
+                b.srem(lhs, rhs)
+            } else {
+                b.urem(lhs, rhs)
+            }
+        }
+        BinOp::BitXor => b.bxor(lhs, rhs),
+        BinOp::BitAnd => b.band(lhs, rhs),
+        BinOp::BitOr => b.bor(lhs, rhs),
+        BinOp::Shl => {
+            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+            let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
+            fx.bcx.ins().ishl(lhs, actual_shift)
+        }
+        BinOp::Shr => {
+            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+            let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
+            if signed {
+                fx.bcx.ins().sshr(lhs, actual_shift)
+            } else {
+                fx.bcx.ins().ushr(lhs, actual_shift)
+            }
+        }
+        // Compare binops handles by `codegen_binop`.
+        _ => unreachable!(
+            "{:?}({:?}, {:?})",
+            bin_op,
+            in_lhs.layout().ty,
+            in_rhs.layout().ty
+        ),
+    };
+
+    CValue::by_val(val, in_lhs.layout())
+}
+
+pub(crate) fn codegen_checked_int_binop<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    bin_op: BinOp,
+    in_lhs: CValue<'tcx>,
+    in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+    if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
+        assert_eq!(
+            in_lhs.layout().ty,
+            in_rhs.layout().ty,
+            "checked int binop requires lhs and rhs of same type"
+        );
+    }
+
+    let lhs = in_lhs.load_scalar(fx);
+    let rhs = in_rhs.load_scalar(fx);
+
+    if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, true, in_lhs, in_rhs) {
+        return res;
+    }
+
+    let signed = type_sign(in_lhs.layout().ty);
+
+    let (res, has_overflow) = match bin_op {
+        BinOp::Add => {
+            /*let (val, c_out) = fx.bcx.ins().iadd_cout(lhs, rhs);
+            (val, c_out)*/
+            // FIXME(CraneStation/cranelift#849) legalize iadd_cout for i8 and i16
+            let val = fx.bcx.ins().iadd(lhs, rhs);
+            let has_overflow = if !signed {
+                fx.bcx.ins().icmp(IntCC::UnsignedLessThan, val, lhs)
+            } else {
+                let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
+                let slt = fx.bcx.ins().icmp(IntCC::SignedLessThan, val, lhs);
+                fx.bcx.ins().bxor(rhs_is_negative, slt)
+            };
+            (val, has_overflow)
+        }
+        BinOp::Sub => {
+            /*let (val, b_out) = fx.bcx.ins().isub_bout(lhs, rhs);
+            (val, b_out)*/
+            // FIXME(CraneStation/cranelift#849) legalize isub_bout for i8 and i16
+            let val = fx.bcx.ins().isub(lhs, rhs);
+            let has_overflow = if !signed {
+                fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, val, lhs)
+            } else {
+                let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
+                let sgt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, val, lhs);
+                fx.bcx.ins().bxor(rhs_is_negative, sgt)
+            };
+            (val, has_overflow)
+        }
+        BinOp::Mul => {
+            let ty = fx.bcx.func.dfg.value_type(lhs);
+            match ty {
+                types::I8 | types::I16 | types::I32 if !signed => {
+                    let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
+                    let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
+                    let val = fx.bcx.ins().imul(lhs, rhs);
+                    let has_overflow = fx.bcx.ins().icmp_imm(
+                        IntCC::UnsignedGreaterThan,
+                        val,
+                        (1 << ty.bits()) - 1,
+                    );
+                    let val = fx.bcx.ins().ireduce(ty, val);
+                    (val, has_overflow)
+                }
+                types::I8 | types::I16 | types::I32 if signed => {
+                    let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
+                    let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
+                    let val = fx.bcx.ins().imul(lhs, rhs);
+                    let has_underflow =
+                        fx.bcx
+                            .ins()
+                            .icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
+                    let has_overflow = fx.bcx.ins().icmp_imm(
+                        IntCC::SignedGreaterThan,
+                        val,
+                        (1 << (ty.bits() - 1)) - 1,
+                    );
+                    let val = fx.bcx.ins().ireduce(ty, val);
+                    (val, fx.bcx.ins().bor(has_underflow, has_overflow))
+                }
+                types::I64 => {
+                    //let val = fx.easy_call("__mulodi4", &[lhs, rhs, overflow_ptr], types::I64);
+                    let val = fx.bcx.ins().imul(lhs, rhs);
+                    let has_overflow = if !signed {
+                        let val_hi = fx.bcx.ins().umulhi(lhs, rhs);
+                        fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0)
+                    } else {
+                        let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
+                        let not_all_zero = fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0);
+                        let not_all_ones = fx.bcx.ins().icmp_imm(
+                            IntCC::NotEqual,
+                            val_hi,
+                            u64::try_from((1u128 << ty.bits()) - 1).unwrap() as i64,
+                        );
+                        fx.bcx.ins().band(not_all_zero, not_all_ones)
+                    };
+                    (val, has_overflow)
+                }
+                types::I128 => {
+                    unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
+                }
+                _ => unreachable!("invalid non-integer type {}", ty),
+            }
+        }
+        BinOp::Shl => {
+            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+            let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
+            let val = fx.bcx.ins().ishl(lhs, actual_shift);
+            let ty = fx.bcx.func.dfg.value_type(val);
+            let max_shift = i64::from(ty.bits()) - 1;
+            let has_overflow = fx
+                .bcx
+                .ins()
+                .icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+            (val, has_overflow)
+        }
+        BinOp::Shr => {
+            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+            let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
+            let val = if !signed {
+                fx.bcx.ins().ushr(lhs, actual_shift)
+            } else {
+                fx.bcx.ins().sshr(lhs, actual_shift)
+            };
+            let ty = fx.bcx.func.dfg.value_type(val);
+            let max_shift = i64::from(ty.bits()) - 1;
+            let has_overflow = fx
+                .bcx
+                .ins()
+                .icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+            (val, has_overflow)
+        }
+        _ => bug!(
+            "binop {:?} on checked int/uint lhs: {:?} rhs: {:?}",
+            bin_op,
+            in_lhs,
+            in_rhs
+        ),
+    };
+
+    let has_overflow = fx.bcx.ins().bint(types::I8, has_overflow);
+
+    // FIXME directly write to result place instead
+    let out_place = CPlace::new_stack_slot(
+        fx,
+        fx.layout_of(
+            fx.tcx
+                .mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()),
+        ),
+    );
+    let out_layout = out_place.layout();
+    out_place.write_cvalue(fx, CValue::by_val_pair(res, has_overflow, out_layout));
+
+    out_place.to_cvalue(fx)
+}
+
+pub(crate) fn codegen_float_binop<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    bin_op: BinOp,
+    in_lhs: CValue<'tcx>,
+    in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+    assert_eq!(in_lhs.layout().ty, in_rhs.layout().ty);
+
+    let lhs = in_lhs.load_scalar(fx);
+    let rhs = in_rhs.load_scalar(fx);
+
+    let b = fx.bcx.ins();
+    let res = match bin_op {
+        BinOp::Add => b.fadd(lhs, rhs),
+        BinOp::Sub => b.fsub(lhs, rhs),
+        BinOp::Mul => b.fmul(lhs, rhs),
+        BinOp::Div => b.fdiv(lhs, rhs),
+        BinOp::Rem => {
+            let name = match in_lhs.layout().ty.kind() {
+                ty::Float(FloatTy::F32) => "fmodf",
+                ty::Float(FloatTy::F64) => "fmod",
+                _ => bug!(),
+            };
+            return fx.easy_call(name, &[in_lhs, in_rhs], in_lhs.layout().ty);
+        }
+        BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+            let fltcc = match bin_op {
+                BinOp::Eq => FloatCC::Equal,
+                BinOp::Lt => FloatCC::LessThan,
+                BinOp::Le => FloatCC::LessThanOrEqual,
+                BinOp::Ne => FloatCC::NotEqual,
+                BinOp::Ge => FloatCC::GreaterThanOrEqual,
+                BinOp::Gt => FloatCC::GreaterThan,
+                _ => unreachable!(),
+            };
+            let val = fx.bcx.ins().fcmp(fltcc, lhs, rhs);
+            let val = fx.bcx.ins().bint(types::I8, val);
+            return CValue::by_val(val, fx.layout_of(fx.tcx.types.bool));
+        }
+        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+    };
+
+    CValue::by_val(res, in_lhs.layout())
+}
+
+pub(crate) fn codegen_ptr_binop<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    bin_op: BinOp,
+    in_lhs: CValue<'tcx>,
+    in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+    let is_thin_ptr = in_lhs
+        .layout()
+        .ty
+        .builtin_deref(true)
+        .map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
+        .unwrap_or(true);
+
+    if is_thin_ptr {
+        match bin_op {
+            BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+                let lhs = in_lhs.load_scalar(fx);
+                let rhs = in_rhs.load_scalar(fx);
+
+                return codegen_compare_bin_op(fx, bin_op, false, lhs, rhs);
+            }
+            BinOp::Offset => {
+                let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap().ty;
+                let (base, offset) = (in_lhs, in_rhs.load_scalar(fx));
+                let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+                let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
+                let base_val = base.load_scalar(fx);
+                let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+                return CValue::by_val(res, base.layout());
+            }
+            _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+        };
+    } else {
+        let (lhs_ptr, lhs_extra) = in_lhs.load_scalar_pair(fx);
+        let (rhs_ptr, rhs_extra) = in_rhs.load_scalar_pair(fx);
+
+        let res = match bin_op {
+            BinOp::Eq => {
+                let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
+                let extra_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_extra, rhs_extra);
+                fx.bcx.ins().band(ptr_eq, extra_eq)
+            }
+            BinOp::Ne => {
+                let ptr_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_ptr, rhs_ptr);
+                let extra_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_extra, rhs_extra);
+                fx.bcx.ins().bor(ptr_ne, extra_ne)
+            }
+            BinOp::Lt | BinOp::Le | BinOp::Ge | BinOp::Gt => {
+                let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
+
+                let ptr_cmp =
+                    fx.bcx
+                        .ins()
+                        .icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
+                let extra_cmp = fx.bcx.ins().icmp(
+                    bin_op_to_intcc(bin_op, false).unwrap(),
+                    lhs_extra,
+                    rhs_extra,
+                );
+
+                fx.bcx.ins().select(ptr_eq, extra_cmp, ptr_cmp)
+            }
+            _ => panic!("bin_op {:?} on ptr", bin_op),
+        };
+
+        CValue::by_val(
+            fx.bcx.ins().bint(types::I8, res),
+            fx.layout_of(fx.tcx.types.bool),
+        )
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/code_layout.rs b/compiler/rustc_codegen_cranelift/src/optimize/code_layout.rs
new file mode 100644
index 0000000..f027320
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/optimize/code_layout.rs
@@ -0,0 +1,40 @@
+//! This optimization moves cold code to the end of the function.
+//!
+//! Some code is executed much less often than other code. For example panicking or the
+//! landingpads for unwinding. By moving this cold code to the end of the function the average
+//! amount of jumps is reduced and the code locality is improved.
+//!
+//! # Undefined behaviour
+//!
+//! This optimization doesn't assume anything that isn't already assumed by Cranelift itself.
+
+use crate::prelude::*;
+
+pub(super) fn optimize_function(ctx: &mut Context, cold_blocks: &EntitySet<Block>) {
+    // FIXME Move the block in place instead of remove and append once
+    // bytecodealliance/cranelift#1339 is implemented.
+
+    let mut block_insts = FxHashMap::default();
+    for block in cold_blocks
+        .keys()
+        .filter(|&block| cold_blocks.contains(block))
+    {
+        let insts = ctx.func.layout.block_insts(block).collect::<Vec<_>>();
+        for &inst in &insts {
+            ctx.func.layout.remove_inst(inst);
+        }
+        block_insts.insert(block, insts);
+        ctx.func.layout.remove_block(block);
+    }
+
+    // And then append them at the back again.
+    for block in cold_blocks
+        .keys()
+        .filter(|&block| cold_blocks.contains(block))
+    {
+        ctx.func.layout.append_block(block);
+        for inst in block_insts.remove(&block).unwrap() {
+            ctx.func.layout.append_inst(inst, block);
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/mod.rs b/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
new file mode 100644
index 0000000..3ce7f8c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
@@ -0,0 +1,25 @@
+//! Various optimizations specific to cg_clif
+
+use crate::prelude::*;
+
+mod code_layout;
+pub(crate) mod peephole;
+mod stack2reg;
+
+pub(crate) fn optimize_function<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    instance: Instance<'tcx>,
+    ctx: &mut Context,
+    cold_blocks: &EntitySet<Block>,
+    clif_comments: &mut crate::pretty_clif::CommentWriter,
+) {
+    // The code_layout optimization is very cheap.
+    self::code_layout::optimize_function(ctx, cold_blocks);
+
+    if tcx.sess.opts.optimize == rustc_session::config::OptLevel::No {
+        return; // FIXME classify optimizations over opt levels
+    }
+    self::stack2reg::optimize_function(ctx, clif_comments);
+    crate::pretty_clif::write_clif_file(tcx, "stack2reg", None, instance, &ctx, &*clif_comments);
+    crate::base::verify_func(tcx, &*clif_comments, &ctx.func);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs b/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
new file mode 100644
index 0000000..f8e0f3a
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
@@ -0,0 +1,83 @@
+//! Peephole optimizations that can be performed while creating clif ir.
+
+use cranelift_codegen::ir::{
+    condcodes::IntCC, types, InstBuilder, InstructionData, Opcode, Value, ValueDef,
+};
+use cranelift_frontend::FunctionBuilder;
+
+/// If the given value was produced by a `bint` instruction, return it's input, otherwise return the
+/// given value.
+pub(crate) fn maybe_unwrap_bint(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
+    if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+        match bcx.func.dfg[arg_inst] {
+            InstructionData::Unary {
+                opcode: Opcode::Bint,
+                arg,
+            } => arg,
+            _ => arg,
+        }
+    } else {
+        arg
+    }
+}
+
+/// If the given value was produced by the lowering of `Rvalue::Not` return the input and true,
+/// otherwise return the given value and false.
+pub(crate) fn maybe_unwrap_bool_not(bcx: &mut FunctionBuilder<'_>, arg: Value) -> (Value, bool) {
+    if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+        match bcx.func.dfg[arg_inst] {
+            // This is the lowering of `Rvalue::Not`
+            InstructionData::IntCompareImm {
+                opcode: Opcode::IcmpImm,
+                cond: IntCC::Equal,
+                arg,
+                imm,
+            } if imm.bits() == 0 => (arg, true),
+            _ => (arg, false),
+        }
+    } else {
+        (arg, false)
+    }
+}
+
+pub(crate) fn make_branchable_value(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
+    if bcx.func.dfg.value_type(arg).is_bool() {
+        return arg;
+    }
+
+    (|| {
+        let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+            arg_inst
+        } else {
+            return None;
+        };
+
+        match bcx.func.dfg[arg_inst] {
+            // This is the lowering of Rvalue::Not
+            InstructionData::Load {
+                opcode: Opcode::Load,
+                arg: ptr,
+                flags,
+                offset,
+            } => {
+                // Using `load.i8 + uextend.i32` would legalize to `uload8 + ireduce.i8 +
+                // uextend.i32`. Just `uload8` is much faster.
+                match bcx.func.dfg.ctrl_typevar(arg_inst) {
+                    types::I8 => Some(bcx.ins().uload8(types::I32, flags, ptr, offset)),
+                    types::I16 => Some(bcx.ins().uload16(types::I32, flags, ptr, offset)),
+                    _ => None,
+                }
+            }
+            _ => None,
+        }
+    })()
+    .unwrap_or_else(|| {
+        match bcx.func.dfg.value_type(arg) {
+            types::I8 | types::I32 => {
+                // WORKAROUND for brz.i8 and brnz.i8 not yet being implemented
+                bcx.ins().uextend(types::I32, arg)
+            }
+            _ => arg,
+        }
+    })
+}
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/stack2reg.rs b/compiler/rustc_codegen_cranelift/src/optimize/stack2reg.rs
new file mode 100644
index 0000000..3c939d5
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/optimize/stack2reg.rs
@@ -0,0 +1,508 @@
+//! This optimization replaces stack accesses with SSA variables and removes dead stores when possible.
+//!
+//! # Undefined behaviour
+//!
+//! This optimization is based on the assumption that stack slots which don't have their address
+//! leaked through `stack_addr` are only accessed using `stack_load` and `stack_store` in the
+//! function which has the stack slots. This optimization also assumes that stack slot accesses
+//! are never out of bounds. If these assumptions are not correct, then this optimization may remove
+//! `stack_store` instruction incorrectly, or incorrectly use a previously stored value as the value
+//! being loaded by a `stack_load`.
+
+use std::collections::BTreeMap;
+use std::fmt;
+use std::ops::Not;
+
+use rustc_data_structures::fx::FxHashSet;
+
+use cranelift_codegen::cursor::{Cursor, FuncCursor};
+use cranelift_codegen::ir::immediates::Offset32;
+use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
+
+use crate::prelude::*;
+
+/// Workaround for `StackSlot` not implementing `Ord`.
+#[derive(Copy, Clone, PartialEq, Eq)]
+struct OrdStackSlot(StackSlot);
+
+impl fmt::Debug for OrdStackSlot {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{:?}", self.0)
+    }
+}
+
+impl PartialOrd for OrdStackSlot {
+    fn partial_cmp(&self, rhs: &Self) -> Option<std::cmp::Ordering> {
+        self.0.as_u32().partial_cmp(&rhs.0.as_u32())
+    }
+}
+
+impl Ord for OrdStackSlot {
+    fn cmp(&self, rhs: &Self) -> std::cmp::Ordering {
+        self.0.as_u32().cmp(&rhs.0.as_u32())
+    }
+}
+
+#[derive(Debug, Default)]
+struct StackSlotUsage {
+    stack_addr: FxHashSet<Inst>,
+    stack_load: FxHashSet<Inst>,
+    stack_store: FxHashSet<Inst>,
+}
+
+impl StackSlotUsage {
+    fn potential_stores_for_load(&self, ctx: &Context, load: Inst) -> Vec<Inst> {
+        self.stack_store
+            .iter()
+            .cloned()
+            .filter(|&store| {
+                match spatial_overlap(&ctx.func, store, load) {
+                    SpatialOverlap::No => false, // Can never be the source of the loaded value.
+                    SpatialOverlap::Partial | SpatialOverlap::Full => true,
+                }
+            })
+            .filter(|&store| {
+                match temporal_order(ctx, store, load) {
+                    TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value.
+                    TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true,
+                }
+            })
+            .collect::<Vec<Inst>>()
+    }
+
+    fn potential_loads_of_store(&self, ctx: &Context, store: Inst) -> Vec<Inst> {
+        self.stack_load
+            .iter()
+            .cloned()
+            .filter(|&load| {
+                match spatial_overlap(&ctx.func, store, load) {
+                    SpatialOverlap::No => false, // Can never be the source of the loaded value.
+                    SpatialOverlap::Partial | SpatialOverlap::Full => true,
+                }
+            })
+            .filter(|&load| {
+                match temporal_order(ctx, store, load) {
+                    TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value.
+                    TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true,
+                }
+            })
+            .collect::<Vec<Inst>>()
+    }
+
+    fn remove_unused_stack_addr(func: &mut Function, inst: Inst) {
+        func.dfg.detach_results(inst);
+        func.dfg.replace(inst).nop();
+    }
+
+    fn remove_unused_load(func: &mut Function, load: Inst) {
+        func.dfg.detach_results(load);
+        func.dfg.replace(load).nop();
+    }
+
+    fn remove_dead_store(&mut self, func: &mut Function, store: Inst) {
+        func.dfg.replace(store).nop();
+        self.stack_store.remove(&store);
+    }
+
+    fn change_load_to_alias(&mut self, func: &mut Function, load: Inst, value: Value) {
+        let loaded_value = func.dfg.inst_results(load)[0];
+        let loaded_type = func.dfg.value_type(loaded_value);
+
+        if func.dfg.value_type(value) == loaded_type {
+            func.dfg.detach_results(load);
+            func.dfg.replace(load).nop();
+            func.dfg.change_to_alias(loaded_value, value);
+        } else {
+            func.dfg.replace(load).bitcast(loaded_type, value);
+        }
+
+        self.stack_load.remove(&load);
+    }
+}
+
+struct OptimizeContext<'a> {
+    ctx: &'a mut Context,
+    stack_slot_usage_map: BTreeMap<OrdStackSlot, StackSlotUsage>,
+}
+
+impl<'a> OptimizeContext<'a> {
+    fn for_context(ctx: &'a mut Context) -> Self {
+        ctx.flowgraph(); // Compute cfg and domtree.
+
+        // Record all stack_addr, stack_load and stack_store instructions.
+        let mut stack_slot_usage_map = BTreeMap::<OrdStackSlot, StackSlotUsage>::new();
+
+        let mut cursor = FuncCursor::new(&mut ctx.func);
+        while let Some(_block) = cursor.next_block() {
+            while let Some(inst) = cursor.next_inst() {
+                match cursor.func.dfg[inst] {
+                    InstructionData::StackLoad {
+                        opcode: Opcode::StackAddr,
+                        stack_slot,
+                        offset: _,
+                    } => {
+                        stack_slot_usage_map
+                            .entry(OrdStackSlot(stack_slot))
+                            .or_insert_with(StackSlotUsage::default)
+                            .stack_addr
+                            .insert(inst);
+                    }
+                    InstructionData::StackLoad {
+                        opcode: Opcode::StackLoad,
+                        stack_slot,
+                        offset: _,
+                    } => {
+                        stack_slot_usage_map
+                            .entry(OrdStackSlot(stack_slot))
+                            .or_insert_with(StackSlotUsage::default)
+                            .stack_load
+                            .insert(inst);
+                    }
+                    InstructionData::StackStore {
+                        opcode: Opcode::StackStore,
+                        arg: _,
+                        stack_slot,
+                        offset: _,
+                    } => {
+                        stack_slot_usage_map
+                            .entry(OrdStackSlot(stack_slot))
+                            .or_insert_with(StackSlotUsage::default)
+                            .stack_store
+                            .insert(inst);
+                    }
+                    _ => {}
+                }
+            }
+        }
+
+        OptimizeContext {
+            ctx,
+            stack_slot_usage_map,
+        }
+    }
+}
+
+pub(super) fn optimize_function(
+    ctx: &mut Context,
+    #[cfg_attr(not(debug_assertions), allow(unused_variables))] clif_comments: &mut crate::pretty_clif::CommentWriter,
+) {
+    combine_stack_addr_with_load_store(&mut ctx.func);
+
+    let mut opt_ctx = OptimizeContext::for_context(ctx);
+
+    // FIXME Repeat following instructions until fixpoint.
+
+    remove_unused_stack_addr_and_stack_load(&mut opt_ctx);
+
+    #[cfg(debug_assertions)]
+    {
+        for (&OrdStackSlot(stack_slot), usage) in &opt_ctx.stack_slot_usage_map {
+            clif_comments.add_comment(stack_slot, format!("used by: {:?}", usage));
+        }
+    }
+
+    for (stack_slot, users) in opt_ctx.stack_slot_usage_map.iter_mut() {
+        if users.stack_addr.is_empty().not() {
+            // Stack addr leaked; there may be unknown loads and stores.
+            // FIXME use stacked borrows to optimize
+            continue;
+        }
+
+        for load in users.stack_load.clone().into_iter() {
+            let potential_stores = users.potential_stores_for_load(&opt_ctx.ctx, load);
+
+            #[cfg(debug_assertions)]
+            for &store in &potential_stores {
+                clif_comments.add_comment(
+                    load,
+                    format!(
+                        "Potential store -> load forwarding {} -> {} ({:?}, {:?})",
+                        opt_ctx.ctx.func.dfg.display_inst(store, None),
+                        opt_ctx.ctx.func.dfg.display_inst(load, None),
+                        spatial_overlap(&opt_ctx.ctx.func, store, load),
+                        temporal_order(&opt_ctx.ctx, store, load),
+                    ),
+                );
+            }
+
+            match *potential_stores {
+                [] => {
+                    #[cfg(debug_assertions)]
+                    clif_comments
+                        .add_comment(load, "[BUG?] Reading uninitialized memory".to_string());
+                }
+                [store]
+                    if spatial_overlap(&opt_ctx.ctx.func, store, load) == SpatialOverlap::Full
+                        && temporal_order(&opt_ctx.ctx, store, load)
+                            == TemporalOrder::DefinitivelyBefore =>
+                {
+                    // Only one store could have been the origin of the value.
+                    let stored_value = opt_ctx.ctx.func.dfg.inst_args(store)[0];
+
+                    #[cfg(debug_assertions)]
+                    clif_comments
+                        .add_comment(load, format!("Store to load forward {} -> {}", store, load));
+
+                    users.change_load_to_alias(&mut opt_ctx.ctx.func, load, stored_value);
+                }
+                _ => {} // FIXME implement this
+            }
+        }
+
+        for store in users.stack_store.clone().into_iter() {
+            let potential_loads = users.potential_loads_of_store(&opt_ctx.ctx, store);
+
+            #[cfg(debug_assertions)]
+            for &load in &potential_loads {
+                clif_comments.add_comment(
+                    store,
+                    format!(
+                        "Potential load from store {} <- {} ({:?}, {:?})",
+                        opt_ctx.ctx.func.dfg.display_inst(load, None),
+                        opt_ctx.ctx.func.dfg.display_inst(store, None),
+                        spatial_overlap(&opt_ctx.ctx.func, store, load),
+                        temporal_order(&opt_ctx.ctx, store, load),
+                    ),
+                );
+            }
+
+            if potential_loads.is_empty() {
+                // Never loaded; can safely remove all stores and the stack slot.
+                // FIXME also remove stores when there is always a next store before a load.
+
+                #[cfg(debug_assertions)]
+                clif_comments.add_comment(
+                    store,
+                    format!(
+                        "Remove dead stack store {} of {}",
+                        opt_ctx.ctx.func.dfg.display_inst(store, None),
+                        stack_slot.0
+                    ),
+                );
+
+                users.remove_dead_store(&mut opt_ctx.ctx.func, store);
+            }
+        }
+
+        if users.stack_store.is_empty() && users.stack_load.is_empty() {
+            opt_ctx.ctx.func.stack_slots[stack_slot.0].size = 0;
+        }
+    }
+}
+
+fn combine_stack_addr_with_load_store(func: &mut Function) {
+    // Turn load and store into stack_load and stack_store when possible.
+    let mut cursor = FuncCursor::new(func);
+    while let Some(_block) = cursor.next_block() {
+        while let Some(inst) = cursor.next_inst() {
+            match cursor.func.dfg[inst] {
+                InstructionData::Load {
+                    opcode: Opcode::Load,
+                    arg: addr,
+                    flags: _,
+                    offset,
+                } => {
+                    if cursor.func.dfg.ctrl_typevar(inst) == types::I128
+                        || cursor.func.dfg.ctrl_typevar(inst).is_vector()
+                    {
+                        continue; // WORKAROUD: stack_load.i128 not yet implemented
+                    }
+                    if let Some((stack_slot, stack_addr_offset)) =
+                        try_get_stack_slot_and_offset_for_addr(cursor.func, addr)
+                    {
+                        if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into())
+                        {
+                            let ty = cursor.func.dfg.ctrl_typevar(inst);
+                            cursor.func.dfg.replace(inst).stack_load(
+                                ty,
+                                stack_slot,
+                                combined_offset,
+                            );
+                        }
+                    }
+                }
+                InstructionData::Store {
+                    opcode: Opcode::Store,
+                    args: [value, addr],
+                    flags: _,
+                    offset,
+                } => {
+                    if cursor.func.dfg.ctrl_typevar(inst) == types::I128
+                        || cursor.func.dfg.ctrl_typevar(inst).is_vector()
+                    {
+                        continue; // WORKAROUND: stack_store.i128 not yet implemented
+                    }
+                    if let Some((stack_slot, stack_addr_offset)) =
+                        try_get_stack_slot_and_offset_for_addr(cursor.func, addr)
+                    {
+                        if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into())
+                        {
+                            cursor.func.dfg.replace(inst).stack_store(
+                                value,
+                                stack_slot,
+                                combined_offset,
+                            );
+                        }
+                    }
+                }
+                _ => {}
+            }
+        }
+    }
+}
+
+fn remove_unused_stack_addr_and_stack_load(opt_ctx: &mut OptimizeContext<'_>) {
+    // FIXME incrementally rebuild on each call?
+    let mut stack_addr_load_insts_users = FxHashMap::<Inst, FxHashSet<Inst>>::default();
+
+    let mut cursor = FuncCursor::new(&mut opt_ctx.ctx.func);
+    while let Some(_block) = cursor.next_block() {
+        while let Some(inst) = cursor.next_inst() {
+            for &arg in cursor.func.dfg.inst_args(inst) {
+                if let ValueDef::Result(arg_origin, 0) = cursor.func.dfg.value_def(arg) {
+                    match cursor.func.dfg[arg_origin].opcode() {
+                        Opcode::StackAddr | Opcode::StackLoad => {
+                            stack_addr_load_insts_users
+                                .entry(arg_origin)
+                                .or_insert_with(FxHashSet::default)
+                                .insert(inst);
+                        }
+                        _ => {}
+                    }
+                }
+            }
+        }
+    }
+
+    #[cfg(debug_assertions)]
+    for inst in stack_addr_load_insts_users.keys() {
+        let mut is_recorded_stack_addr_or_stack_load = false;
+        for stack_slot_users in opt_ctx.stack_slot_usage_map.values() {
+            is_recorded_stack_addr_or_stack_load |= stack_slot_users.stack_addr.contains(inst)
+                || stack_slot_users.stack_load.contains(inst);
+        }
+        assert!(is_recorded_stack_addr_or_stack_load);
+    }
+
+    // Replace all unused stack_addr and stack_load instructions with nop.
+    let mut func = &mut opt_ctx.ctx.func;
+
+    for stack_slot_users in opt_ctx.stack_slot_usage_map.values_mut() {
+        stack_slot_users
+            .stack_addr
+            .drain_filter(|inst| {
+                stack_addr_load_insts_users
+                    .get(inst)
+                    .map(|users| users.is_empty())
+                    .unwrap_or(true)
+            })
+            .for_each(|inst| StackSlotUsage::remove_unused_stack_addr(&mut func, inst));
+
+        stack_slot_users
+            .stack_load
+            .drain_filter(|inst| {
+                stack_addr_load_insts_users
+                    .get(inst)
+                    .map(|users| users.is_empty())
+                    .unwrap_or(true)
+            })
+            .for_each(|inst| StackSlotUsage::remove_unused_load(&mut func, inst));
+    }
+}
+
+fn try_get_stack_slot_and_offset_for_addr(
+    func: &Function,
+    addr: Value,
+) -> Option<(StackSlot, Offset32)> {
+    if let ValueDef::Result(addr_inst, 0) = func.dfg.value_def(addr) {
+        if let InstructionData::StackLoad {
+            opcode: Opcode::StackAddr,
+            stack_slot,
+            offset,
+        } = func.dfg[addr_inst]
+        {
+            return Some((stack_slot, offset));
+        }
+    }
+    None
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum SpatialOverlap {
+    No,
+    Partial,
+    Full,
+}
+
+fn spatial_overlap(func: &Function, src: Inst, dest: Inst) -> SpatialOverlap {
+    fn inst_info(func: &Function, inst: Inst) -> (StackSlot, Offset32, u32) {
+        match func.dfg[inst] {
+            InstructionData::StackLoad {
+                opcode: Opcode::StackAddr,
+                stack_slot,
+                offset,
+            }
+            | InstructionData::StackLoad {
+                opcode: Opcode::StackLoad,
+                stack_slot,
+                offset,
+            }
+            | InstructionData::StackStore {
+                opcode: Opcode::StackStore,
+                stack_slot,
+                offset,
+                arg: _,
+            } => (stack_slot, offset, func.dfg.ctrl_typevar(inst).bytes()),
+            _ => unreachable!("{:?}", func.dfg[inst]),
+        }
+    }
+
+    debug_assert_ne!(src, dest);
+
+    let (src_ss, src_offset, src_size) = inst_info(func, src);
+    let (dest_ss, dest_offset, dest_size) = inst_info(func, dest);
+
+    if src_ss != dest_ss {
+        return SpatialOverlap::No;
+    }
+
+    if src_offset == dest_offset && src_size == dest_size {
+        return SpatialOverlap::Full;
+    }
+
+    let src_end: i64 = src_offset.try_add_i64(i64::from(src_size)).unwrap().into();
+    let dest_end: i64 = dest_offset
+        .try_add_i64(i64::from(dest_size))
+        .unwrap()
+        .into();
+    if src_end <= dest_offset.into() || dest_end <= src_offset.into() {
+        return SpatialOverlap::No;
+    }
+
+    SpatialOverlap::Partial
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum TemporalOrder {
+    /// `src` will never be executed before `dest`.
+    NeverBefore,
+
+    /// `src` may be executed before `dest`.
+    MaybeBefore,
+
+    /// `src` will always be executed before `dest`.
+    /// There may still be other instructions in between.
+    DefinitivelyBefore,
+}
+
+fn temporal_order(ctx: &Context, src: Inst, dest: Inst) -> TemporalOrder {
+    debug_assert_ne!(src, dest);
+
+    if ctx.domtree.dominates(src, dest, &ctx.func.layout) {
+        TemporalOrder::DefinitivelyBefore
+    } else if ctx.domtree.dominates(src, dest, &ctx.func.layout) {
+        TemporalOrder::NeverBefore
+    } else {
+        TemporalOrder::MaybeBefore
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/pointer.rs b/compiler/rustc_codegen_cranelift/src/pointer.rs
new file mode 100644
index 0000000..b2036d7b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/pointer.rs
@@ -0,0 +1,206 @@
+//! Defines [`Pointer`] which is used to improve the quality of the generated clif ir for pointer
+//! operations.
+
+use crate::prelude::*;
+
+use rustc_target::abi::Align;
+
+use cranelift_codegen::ir::immediates::Offset32;
+
+/// A pointer pointing either to a certain address, a certain stack slot or nothing.
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct Pointer {
+    base: PointerBase,
+    offset: Offset32,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum PointerBase {
+    Addr(Value),
+    Stack(StackSlot),
+    Dangling(Align),
+}
+
+impl Pointer {
+    pub(crate) fn new(addr: Value) -> Self {
+        Pointer {
+            base: PointerBase::Addr(addr),
+            offset: Offset32::new(0),
+        }
+    }
+
+    pub(crate) fn stack_slot(stack_slot: StackSlot) -> Self {
+        Pointer {
+            base: PointerBase::Stack(stack_slot),
+            offset: Offset32::new(0),
+        }
+    }
+
+    pub(crate) fn const_addr<'a, 'tcx>(
+        fx: &mut FunctionCx<'a, 'tcx, impl Module>,
+        addr: i64,
+    ) -> Self {
+        let addr = fx.bcx.ins().iconst(fx.pointer_type, addr);
+        Pointer {
+            base: PointerBase::Addr(addr),
+            offset: Offset32::new(0),
+        }
+    }
+
+    pub(crate) fn dangling(align: Align) -> Self {
+        Pointer {
+            base: PointerBase::Dangling(align),
+            offset: Offset32::new(0),
+        }
+    }
+
+    #[cfg(debug_assertions)]
+    pub(crate) fn base_and_offset(self) -> (PointerBase, Offset32) {
+        (self.base, self.offset)
+    }
+
+    pub(crate) fn get_addr<'a, 'tcx>(self, fx: &mut FunctionCx<'a, 'tcx, impl Module>) -> Value {
+        match self.base {
+            PointerBase::Addr(base_addr) => {
+                let offset: i64 = self.offset.into();
+                if offset == 0 {
+                    base_addr
+                } else {
+                    fx.bcx.ins().iadd_imm(base_addr, offset)
+                }
+            }
+            PointerBase::Stack(stack_slot) => {
+                fx.bcx
+                    .ins()
+                    .stack_addr(fx.pointer_type, stack_slot, self.offset)
+            }
+            PointerBase::Dangling(align) => fx
+                .bcx
+                .ins()
+                .iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()),
+        }
+    }
+
+    pub(crate) fn offset<'a, 'tcx>(
+        self,
+        fx: &mut FunctionCx<'a, 'tcx, impl Module>,
+        extra_offset: Offset32,
+    ) -> Self {
+        self.offset_i64(fx, extra_offset.into())
+    }
+
+    pub(crate) fn offset_i64<'a, 'tcx>(
+        self,
+        fx: &mut FunctionCx<'a, 'tcx, impl Module>,
+        extra_offset: i64,
+    ) -> Self {
+        if let Some(new_offset) = self.offset.try_add_i64(extra_offset) {
+            Pointer {
+                base: self.base,
+                offset: new_offset,
+            }
+        } else {
+            let base_offset: i64 = self.offset.into();
+            if let Some(new_offset) = base_offset.checked_add(extra_offset) {
+                let base_addr = match self.base {
+                    PointerBase::Addr(addr) => addr,
+                    PointerBase::Stack(stack_slot) => {
+                        fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0)
+                    }
+                    PointerBase::Dangling(align) => fx
+                        .bcx
+                        .ins()
+                        .iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()),
+                };
+                let addr = fx.bcx.ins().iadd_imm(base_addr, new_offset);
+                Pointer {
+                    base: PointerBase::Addr(addr),
+                    offset: Offset32::new(0),
+                }
+            } else {
+                panic!(
+                    "self.offset ({}) + extra_offset ({}) not representable in i64",
+                    base_offset, extra_offset
+                );
+            }
+        }
+    }
+
+    pub(crate) fn offset_value<'a, 'tcx>(
+        self,
+        fx: &mut FunctionCx<'a, 'tcx, impl Module>,
+        extra_offset: Value,
+    ) -> Self {
+        match self.base {
+            PointerBase::Addr(addr) => Pointer {
+                base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
+                offset: self.offset,
+            },
+            PointerBase::Stack(stack_slot) => {
+                let base_addr = fx
+                    .bcx
+                    .ins()
+                    .stack_addr(fx.pointer_type, stack_slot, self.offset);
+                Pointer {
+                    base: PointerBase::Addr(fx.bcx.ins().iadd(base_addr, extra_offset)),
+                    offset: Offset32::new(0),
+                }
+            }
+            PointerBase::Dangling(align) => {
+                let addr = fx
+                    .bcx
+                    .ins()
+                    .iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap());
+                Pointer {
+                    base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
+                    offset: self.offset,
+                }
+            }
+        }
+    }
+
+    pub(crate) fn load<'a, 'tcx>(
+        self,
+        fx: &mut FunctionCx<'a, 'tcx, impl Module>,
+        ty: Type,
+        flags: MemFlags,
+    ) -> Value {
+        match self.base {
+            PointerBase::Addr(base_addr) => fx.bcx.ins().load(ty, flags, base_addr, self.offset),
+            PointerBase::Stack(stack_slot) => {
+                if ty == types::I128 || ty.is_vector() {
+                    // WORKAROUND for stack_load.i128 and stack_load.iXxY not being implemented
+                    let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
+                    fx.bcx.ins().load(ty, flags, base_addr, self.offset)
+                } else {
+                    fx.bcx.ins().stack_load(ty, stack_slot, self.offset)
+                }
+            }
+            PointerBase::Dangling(_align) => unreachable!(),
+        }
+    }
+
+    pub(crate) fn store<'a, 'tcx>(
+        self,
+        fx: &mut FunctionCx<'a, 'tcx, impl Module>,
+        value: Value,
+        flags: MemFlags,
+    ) {
+        match self.base {
+            PointerBase::Addr(base_addr) => {
+                fx.bcx.ins().store(flags, value, base_addr, self.offset);
+            }
+            PointerBase::Stack(stack_slot) => {
+                let val_ty = fx.bcx.func.dfg.value_type(value);
+                if val_ty == types::I128 || val_ty.is_vector() {
+                    // WORKAROUND for stack_store.i128 and stack_store.iXxY not being implemented
+                    let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
+                    fx.bcx.ins().store(flags, value, base_addr, self.offset);
+                } else {
+                    fx.bcx.ins().stack_store(value, stack_slot, self.offset);
+                }
+            }
+            PointerBase::Dangling(_align) => unreachable!(),
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
new file mode 100644
index 0000000..ff878af
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
@@ -0,0 +1,287 @@
+//! This module provides the [CommentWriter] which makes it possible
+//! to add comments to the written cranelift ir.
+//!
+//! # Example
+//!
+//! ```clif
+//! test compile
+//! target x86_64
+//!
+//! function u0:0(i64, i64, i64) system_v {
+//! ; symbol _ZN119_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$u27$a$u20$$RF$$u27$b$u20$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17he85059d5e6a760a0E
+//! ; instance Instance { def: Item(DefId(0/0:29 ~ example[8787]::{{impl}}[0]::call_once[0])), substs: [ReErased, ReErased] }
+//! ; sig ([IsNotEmpty, (&&[u16],)]; c_variadic: false)->(u8, u8)
+//!
+//! ; ssa {_2: NOT_SSA, _4: NOT_SSA, _0: NOT_SSA, _3: (empty), _1: NOT_SSA}
+//! ; msg   loc.idx    param    pass mode            ssa flags  ty
+//! ; ret    _0      = v0       ByRef                NOT_SSA    (u8, u8)
+//! ; arg    _1      = v1       ByRef                NOT_SSA    IsNotEmpty
+//! ; arg    _2.0    = v2       ByVal(types::I64)    NOT_SSA    &&[u16]
+//!
+//!     ss0 = explicit_slot 0 ; _1: IsNotEmpty size=0 align=1,8
+//!     ss1 = explicit_slot 8 ; _2: (&&[u16],) size=8 align=8,8
+//!     ss2 = explicit_slot 8 ; _4: (&&[u16],) size=8 align=8,8
+//!     sig0 = (i64, i64, i64) system_v
+//!     sig1 = (i64, i64, i64) system_v
+//!     fn0 = colocated u0:6 sig1 ; Instance { def: Item(DefId(0/0:31 ~ example[8787]::{{impl}}[1]::call_mut[0])), substs: [ReErased, ReErased] }
+//!
+//! block0(v0: i64, v1: i64, v2: i64):
+//!     v3 = stack_addr.i64 ss0
+//!     v4 = stack_addr.i64 ss1
+//!     store v2, v4
+//!     v5 = stack_addr.i64 ss2
+//!     jump block1
+//!
+//! block1:
+//!     nop
+//! ; _3 = &mut _1
+//! ; _4 = _2
+//!     v6 = load.i64 v4
+//!     store v6, v5
+//! ;
+//! ; _0 = const mini_core::FnMut::call_mut(move _3, move _4)
+//!     v7 = load.i64 v5
+//!     call fn0(v0, v3, v7)
+//!     jump block2
+//!
+//! block2:
+//!     nop
+//! ;
+//! ; return
+//!     return
+//! }
+//! ```
+
+use std::fmt;
+
+use cranelift_codegen::{
+    entity::SecondaryMap,
+    ir::{entities::AnyEntity, function::DisplayFunctionAnnotations},
+    write::{FuncWriter, PlainWriter},
+};
+
+use rustc_session::config::OutputType;
+
+use crate::prelude::*;
+
+#[derive(Debug)]
+pub(crate) struct CommentWriter {
+    global_comments: Vec<String>,
+    entity_comments: FxHashMap<AnyEntity, String>,
+}
+
+impl CommentWriter {
+    pub(crate) fn new<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+        let global_comments = if cfg!(debug_assertions) {
+            vec![
+                format!("symbol {}", tcx.symbol_name(instance).name),
+                format!("instance {:?}", instance),
+                format!(
+                    "sig {:?}",
+                    tcx.normalize_erasing_late_bound_regions(
+                        ParamEnv::reveal_all(),
+                        &crate::abi::fn_sig_for_fn_abi(tcx, instance)
+                    )
+                ),
+                String::new(),
+            ]
+        } else {
+            vec![]
+        };
+
+        CommentWriter {
+            global_comments,
+            entity_comments: FxHashMap::default(),
+        }
+    }
+}
+
+#[cfg(debug_assertions)]
+impl CommentWriter {
+    pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+        self.global_comments.push(comment.into());
+    }
+
+    pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+        &mut self,
+        entity: E,
+        comment: S,
+    ) {
+        use std::collections::hash_map::Entry;
+        match self.entity_comments.entry(entity.into()) {
+            Entry::Occupied(mut occ) => {
+                occ.get_mut().push('\n');
+                occ.get_mut().push_str(comment.as_ref());
+            }
+            Entry::Vacant(vac) => {
+                vac.insert(comment.into());
+            }
+        }
+    }
+}
+
+impl FuncWriter for &'_ CommentWriter {
+    fn write_preamble(
+        &mut self,
+        w: &mut dyn fmt::Write,
+        func: &Function,
+        reg_info: Option<&isa::RegInfo>,
+    ) -> Result<bool, fmt::Error> {
+        for comment in &self.global_comments {
+            if !comment.is_empty() {
+                writeln!(w, "; {}", comment)?;
+            } else {
+                writeln!(w)?;
+            }
+        }
+        if !self.global_comments.is_empty() {
+            writeln!(w)?;
+        }
+
+        self.super_preamble(w, func, reg_info)
+    }
+
+    fn write_entity_definition(
+        &mut self,
+        w: &mut dyn fmt::Write,
+        _func: &Function,
+        entity: AnyEntity,
+        value: &dyn fmt::Display,
+    ) -> fmt::Result {
+        write!(w, "    {} = {}", entity, value)?;
+
+        if let Some(comment) = self.entity_comments.get(&entity) {
+            writeln!(w, " ; {}", comment.replace('\n', "\n; "))
+        } else {
+            writeln!(w)
+        }
+    }
+
+    fn write_block_header(
+        &mut self,
+        w: &mut dyn fmt::Write,
+        func: &Function,
+        isa: Option<&dyn isa::TargetIsa>,
+        block: Block,
+        indent: usize,
+    ) -> fmt::Result {
+        PlainWriter.write_block_header(w, func, isa, block, indent)
+    }
+
+    fn write_instruction(
+        &mut self,
+        w: &mut dyn fmt::Write,
+        func: &Function,
+        aliases: &SecondaryMap<Value, Vec<Value>>,
+        isa: Option<&dyn isa::TargetIsa>,
+        inst: Inst,
+        indent: usize,
+    ) -> fmt::Result {
+        PlainWriter.write_instruction(w, func, aliases, isa, inst, indent)?;
+        if let Some(comment) = self.entity_comments.get(&inst.into()) {
+            writeln!(w, "; {}", comment.replace('\n', "\n; "))?;
+        }
+        Ok(())
+    }
+}
+
+#[cfg(debug_assertions)]
+impl<M: Module> FunctionCx<'_, '_, M> {
+    pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+        self.clif_comments.add_global_comment(comment);
+    }
+
+    pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+        &mut self,
+        entity: E,
+        comment: S,
+    ) {
+        self.clif_comments.add_comment(entity, comment);
+    }
+}
+
+pub(crate) fn write_clif_file<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    postfix: &str,
+    isa: Option<&dyn cranelift_codegen::isa::TargetIsa>,
+    instance: Instance<'tcx>,
+    context: &cranelift_codegen::Context,
+    mut clif_comments: &CommentWriter,
+) {
+    use std::io::Write;
+
+    if !cfg!(debug_assertions)
+        && !tcx
+            .sess
+            .opts
+            .output_types
+            .contains_key(&OutputType::LlvmAssembly)
+    {
+        return;
+    }
+
+    let value_ranges = isa.map(|isa| {
+        context
+            .build_value_labels_ranges(isa)
+            .expect("value location ranges")
+    });
+
+    let clif_output_dir = tcx.output_filenames(LOCAL_CRATE).with_extension("clif");
+
+    match std::fs::create_dir(&clif_output_dir) {
+        Ok(()) => {}
+        Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {}
+        res @ Err(_) => res.unwrap(),
+    }
+
+    let clif_file_name = clif_output_dir.join(format!(
+        "{}.{}.clif",
+        tcx.symbol_name(instance).name,
+        postfix
+    ));
+
+    let mut clif = String::new();
+    cranelift_codegen::write::decorate_function(
+        &mut clif_comments,
+        &mut clif,
+        &context.func,
+        &DisplayFunctionAnnotations {
+            isa: Some(&*crate::build_isa(
+                tcx.sess, true, /* PIC doesn't matter here */
+            )),
+            value_ranges: value_ranges.as_ref(),
+        },
+    )
+    .unwrap();
+
+    let res: std::io::Result<()> = try {
+        let mut file = std::fs::File::create(clif_file_name)?;
+        let target_triple = crate::target_triple(tcx.sess);
+        writeln!(file, "test compile")?;
+        writeln!(file, "set is_pic")?;
+        writeln!(file, "set enable_simd")?;
+        writeln!(file, "target {} haswell", target_triple)?;
+        writeln!(file)?;
+        file.write_all(clif.as_bytes())?;
+    };
+    if let Err(err) = res {
+        tcx.sess.warn(&format!("err writing clif file: {}", err));
+    }
+}
+
+impl<M: Module> fmt::Debug for FunctionCx<'_, '_, M> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        writeln!(f, "{:?}", self.instance.substs)?;
+        writeln!(f, "{:?}", self.local_map)?;
+
+        let mut clif = String::new();
+        ::cranelift_codegen::write::decorate_function(
+            &mut &self.clif_comments,
+            &mut clif,
+            &self.bcx.func,
+            &DisplayFunctionAnnotations::default(),
+        )
+        .unwrap();
+        writeln!(f, "\n{}", clif)
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/toolchain.rs b/compiler/rustc_codegen_cranelift/src/toolchain.rs
new file mode 100644
index 0000000..735c59d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/toolchain.rs
@@ -0,0 +1,125 @@
+//! Locating various executables part of a C toolchain.
+
+use std::path::PathBuf;
+
+use rustc_middle::bug;
+use rustc_session::Session;
+use rustc_target::spec::LinkerFlavor;
+
+/// Tries to infer the path of a binary for the target toolchain from the linker name.
+pub(crate) fn get_toolchain_binary(sess: &Session, tool: &str) -> PathBuf {
+    let (mut linker, _linker_flavor) = linker_and_flavor(sess);
+    let linker_file_name = linker
+        .file_name()
+        .and_then(|name| name.to_str())
+        .unwrap_or_else(|| sess.fatal("couldn't extract file name from specified linker"));
+
+    if linker_file_name == "ld.lld" {
+        if tool != "ld" {
+            linker.set_file_name(tool)
+        }
+    } else {
+        let tool_file_name = linker_file_name
+            .replace("ld", tool)
+            .replace("gcc", tool)
+            .replace("clang", tool)
+            .replace("cc", tool);
+
+        linker.set_file_name(tool_file_name)
+    }
+
+    linker
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/5db778affee7c6600c8e7a177c48282dab3f6292/src/librustc_codegen_ssa/back/link.rs#L848-L931
+fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
+    fn infer_from(
+        sess: &Session,
+        linker: Option<PathBuf>,
+        flavor: Option<LinkerFlavor>,
+    ) -> Option<(PathBuf, LinkerFlavor)> {
+        match (linker, flavor) {
+            (Some(linker), Some(flavor)) => Some((linker, flavor)),
+            // only the linker flavor is known; use the default linker for the selected flavor
+            (None, Some(flavor)) => Some((
+                PathBuf::from(match flavor {
+                    LinkerFlavor::Em => {
+                        if cfg!(windows) {
+                            "emcc.bat"
+                        } else {
+                            "emcc"
+                        }
+                    }
+                    LinkerFlavor::Gcc => {
+                        if cfg!(any(target_os = "solaris", target_os = "illumos")) {
+                            // On historical Solaris systems, "cc" may have
+                            // been Sun Studio, which is not flag-compatible
+                            // with "gcc".  This history casts a long shadow,
+                            // and many modern illumos distributions today
+                            // ship GCC as "gcc" without also making it
+                            // available as "cc".
+                            "gcc"
+                        } else {
+                            "cc"
+                        }
+                    }
+                    LinkerFlavor::Ld => "ld",
+                    LinkerFlavor::Msvc => "link.exe",
+                    LinkerFlavor::Lld(_) => "lld",
+                    LinkerFlavor::PtxLinker => "rust-ptx-linker",
+                }),
+                flavor,
+            )),
+            (Some(linker), None) => {
+                let stem = linker
+                    .file_stem()
+                    .and_then(|stem| stem.to_str())
+                    .unwrap_or_else(|| {
+                        sess.fatal("couldn't extract file stem from specified linker")
+                    });
+
+                let flavor = if stem == "emcc" {
+                    LinkerFlavor::Em
+                } else if stem == "gcc"
+                    || stem.ends_with("-gcc")
+                    || stem == "clang"
+                    || stem.ends_with("-clang")
+                {
+                    LinkerFlavor::Gcc
+                } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") {
+                    LinkerFlavor::Ld
+                } else if stem == "link" || stem == "lld-link" {
+                    LinkerFlavor::Msvc
+                } else if stem == "lld" || stem == "rust-lld" {
+                    LinkerFlavor::Lld(sess.target.lld_flavor)
+                } else {
+                    // fall back to the value in the target spec
+                    sess.target.linker_flavor
+                };
+
+                Some((linker, flavor))
+            }
+            (None, None) => None,
+        }
+    }
+
+    // linker and linker flavor specified via command line have precedence over what the target
+    // specification specifies
+    if let Some(ret) = infer_from(
+        sess,
+        sess.opts.cg.linker.clone(),
+        sess.opts.cg.linker_flavor,
+    ) {
+        return ret;
+    }
+
+    if let Some(ret) = infer_from(
+        sess,
+        sess.target.linker.clone().map(PathBuf::from),
+        Some(sess.target.linker_flavor),
+    ) {
+        return ret;
+    }
+
+    bug!("Not enough information provided to determine how to invoke the linker");
+}
diff --git a/compiler/rustc_codegen_cranelift/src/trap.rs b/compiler/rustc_codegen_cranelift/src/trap.rs
new file mode 100644
index 0000000..690d967
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/trap.rs
@@ -0,0 +1,69 @@
+//! Helpers used to print a message and abort in case of certain panics and some detected UB.
+
+use crate::prelude::*;
+
+fn codegen_print(fx: &mut FunctionCx<'_, '_, impl Module>, msg: &str) {
+    let puts = fx
+        .cx
+        .module
+        .declare_function(
+            "puts",
+            Linkage::Import,
+            &Signature {
+                call_conv: CallConv::triple_default(fx.triple()),
+                params: vec![AbiParam::new(pointer_ty(fx.tcx))],
+                returns: vec![AbiParam::new(types::I32)],
+            },
+        )
+        .unwrap();
+    let puts = fx.cx.module.declare_func_in_func(puts, &mut fx.bcx.func);
+    #[cfg(debug_assertions)]
+    {
+        fx.add_comment(puts, "puts");
+    }
+
+    let symbol_name = fx.tcx.symbol_name(fx.instance);
+    let real_msg = format!("trap at {:?} ({}): {}\0", fx.instance, symbol_name, msg);
+    let msg_ptr = fx.anonymous_str("trap", &real_msg);
+    fx.bcx.ins().call(puts, &[msg_ptr]);
+}
+
+/// Trap code: user1
+pub(crate) fn trap_abort(fx: &mut FunctionCx<'_, '_, impl Module>, msg: impl AsRef<str>) {
+    codegen_print(fx, msg.as_ref());
+    fx.bcx.ins().trap(TrapCode::User(1));
+}
+
+/// Use this for example when a function call should never return. This will fill the current block,
+/// so you can **not** add instructions to it afterwards.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unreachable(fx: &mut FunctionCx<'_, '_, impl Module>, msg: impl AsRef<str>) {
+    codegen_print(fx, msg.as_ref());
+    fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+}
+
+/// Like `trap_unreachable` but returns a fake value of the specified type.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unreachable_ret_value<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    dest_layout: TyAndLayout<'tcx>,
+    msg: impl AsRef<str>,
+) -> CValue<'tcx> {
+    codegen_print(fx, msg.as_ref());
+    let true_ = fx.bcx.ins().iconst(types::I32, 1);
+    fx.bcx.ins().trapnz(true_, TrapCode::UnreachableCodeReached);
+    CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
+}
+
+/// Use this when something is unimplemented, but `libcore` or `libstd` requires it to codegen.
+/// Unlike `trap_unreachable` this will not fill the current block, so you **must** add instructions
+/// to it afterwards.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, impl Module>, msg: impl AsRef<str>) {
+    codegen_print(fx, msg.as_ref());
+    let true_ = fx.bcx.ins().iconst(types::I32, 1);
+    fx.bcx.ins().trapnz(true_, TrapCode::User(!0));
+}
diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs
new file mode 100644
index 0000000..c77ff5d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/unsize.rs
@@ -0,0 +1,238 @@
+//! Codegen of the [`PointerCast::Unsize`] operation.
+//!
+//! [`PointerCast::Unsize`]: `rustc_middle::ty::adjustment::PointerCast::Unsize`
+
+use crate::prelude::*;
+
+// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/base.rs#L159-L307
+
+/// Retrieve the information we are losing (making dynamic) in an unsizing
+/// adjustment.
+///
+/// The `old_info` argument is a bit funny. It is intended for use
+/// in an upcast, where the new vtable for an object will be derived
+/// from the old one.
+pub(crate) fn unsized_info<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    source: Ty<'tcx>,
+    target: Ty<'tcx>,
+    old_info: Option<Value>,
+) -> Value {
+    let (source, target) =
+        fx.tcx
+            .struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
+    match (&source.kind(), &target.kind()) {
+        (&ty::Array(_, len), &ty::Slice(_)) => fx.bcx.ins().iconst(
+            fx.pointer_type,
+            len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64,
+        ),
+        (&ty::Dynamic(..), &ty::Dynamic(..)) => {
+            // For now, upcasts are limited to changes in marker
+            // traits, and hence never actually require an actual
+            // change to the vtable.
+            old_info.expect("unsized_info: missing old info for trait upcast")
+        }
+        (_, &ty::Dynamic(ref data, ..)) => {
+            crate::vtable::get_vtable(fx, fx.layout_of(source), data.principal())
+        }
+        _ => bug!(
+            "unsized_info: invalid unsizing {:?} -> {:?}",
+            source,
+            target
+        ),
+    }
+}
+
+/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
+fn unsize_thin_ptr<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    src: Value,
+    src_layout: TyAndLayout<'tcx>,
+    dst_layout: TyAndLayout<'tcx>,
+) -> (Value, Value) {
+    match (&src_layout.ty.kind(), &dst_layout.ty.kind()) {
+        (&ty::Ref(_, a, _), &ty::Ref(_, b, _))
+        | (&ty::Ref(_, a, _), &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
+        | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
+            assert!(!fx.layout_of(a).is_unsized());
+            (src, unsized_info(fx, a, b, None))
+        }
+        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+            let (a, b) = (src_layout.ty.boxed_ty(), dst_layout.ty.boxed_ty());
+            assert!(!fx.layout_of(a).is_unsized());
+            (src, unsized_info(fx, a, b, None))
+        }
+        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+            assert_eq!(def_a, def_b);
+
+            let mut result = None;
+            for i in 0..src_layout.fields.count() {
+                let src_f = src_layout.field(fx, i);
+                assert_eq!(src_layout.fields.offset(i).bytes(), 0);
+                assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
+                if src_f.is_zst() {
+                    continue;
+                }
+                assert_eq!(src_layout.size, src_f.size);
+
+                let dst_f = dst_layout.field(fx, i);
+                assert_ne!(src_f.ty, dst_f.ty);
+                assert_eq!(result, None);
+                result = Some(unsize_thin_ptr(fx, src, src_f, dst_f));
+            }
+            result.unwrap()
+        }
+        _ => bug!("unsize_thin_ptr: called on bad types"),
+    }
+}
+
+/// Coerce `src`, which is a reference to a value of type `src_ty`,
+/// to a value of type `dst_ty` and store the result in `dst`
+pub(crate) fn coerce_unsized_into<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    src: CValue<'tcx>,
+    dst: CPlace<'tcx>,
+) {
+    let src_ty = src.layout().ty;
+    let dst_ty = dst.layout().ty;
+    let mut coerce_ptr = || {
+        let (base, info) = if fx
+            .layout_of(src.layout().ty.builtin_deref(true).unwrap().ty)
+            .is_unsized()
+        {
+            // fat-ptr to fat-ptr unsize preserves the vtable
+            // i.e., &'a fmt::Debug+Send => &'a fmt::Debug
+            src.load_scalar_pair(fx)
+        } else {
+            let base = src.load_scalar(fx);
+            unsize_thin_ptr(fx, base, src.layout(), dst.layout())
+        };
+        dst.write_cvalue(fx, CValue::by_val_pair(base, info, dst.layout()));
+    };
+    match (&src_ty.kind(), &dst_ty.kind()) {
+        (&ty::Ref(..), &ty::Ref(..))
+        | (&ty::Ref(..), &ty::RawPtr(..))
+        | (&ty::RawPtr(..), &ty::RawPtr(..)) => coerce_ptr(),
+        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+            assert_eq!(def_a, def_b);
+
+            for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
+                let src_f = src.value_field(fx, mir::Field::new(i));
+                let dst_f = dst.place_field(fx, mir::Field::new(i));
+
+                if dst_f.layout().is_zst() {
+                    continue;
+                }
+
+                if src_f.layout().ty == dst_f.layout().ty {
+                    dst_f.write_cvalue(fx, src_f);
+                } else {
+                    coerce_unsized_into(fx, src_f, dst_f);
+                }
+            }
+        }
+        _ => bug!(
+            "coerce_unsized_into: invalid coercion {:?} -> {:?}",
+            src_ty,
+            dst_ty
+        ),
+    }
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/glue.rs
+
+pub(crate) fn size_and_align_of_dst<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    layout: TyAndLayout<'tcx>,
+    info: Value,
+) -> (Value, Value) {
+    if !layout.is_unsized() {
+        let size = fx
+            .bcx
+            .ins()
+            .iconst(fx.pointer_type, layout.size.bytes() as i64);
+        let align = fx
+            .bcx
+            .ins()
+            .iconst(fx.pointer_type, layout.align.abi.bytes() as i64);
+        return (size, align);
+    }
+    match layout.ty.kind() {
+        ty::Dynamic(..) => {
+            // load size/align from vtable
+            (
+                crate::vtable::size_of_obj(fx, info),
+                crate::vtable::min_align_of_obj(fx, info),
+            )
+        }
+        ty::Slice(_) | ty::Str => {
+            let unit = layout.field(fx, 0);
+            // The info in this case is the length of the str, so the size is that
+            // times the unit size.
+            (
+                fx.bcx.ins().imul_imm(info, unit.size.bytes() as i64),
+                fx.bcx
+                    .ins()
+                    .iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
+            )
+        }
+        _ => {
+            // First get the size of all statically known fields.
+            // Don't use size_of because it also rounds up to alignment, which we
+            // want to avoid, as the unsized field's alignment could be smaller.
+            assert!(!layout.ty.is_simd());
+
+            let i = layout.fields.count() - 1;
+            let sized_size = layout.fields.offset(i).bytes();
+            let sized_align = layout.align.abi.bytes();
+            let sized_align = fx.bcx.ins().iconst(fx.pointer_type, sized_align as i64);
+
+            // Recurse to get the size of the dynamically sized field (must be
+            // the last field).
+            let field_layout = layout.field(fx, i);
+            let (unsized_size, mut unsized_align) = size_and_align_of_dst(fx, field_layout, info);
+
+            // FIXME (#26403, #27023): We should be adding padding
+            // to `sized_size` (to accommodate the `unsized_align`
+            // required of the unsized field that follows) before
+            // summing it with `sized_size`. (Note that since #26403
+            // is unfixed, we do not yet add the necessary padding
+            // here. But this is where the add would go.)
+
+            // Return the sum of sizes and max of aligns.
+            let size = fx.bcx.ins().iadd_imm(unsized_size, sized_size as i64);
+
+            // Packed types ignore the alignment of their fields.
+            if let ty::Adt(def, _) = layout.ty.kind() {
+                if def.repr.packed() {
+                    unsized_align = sized_align;
+                }
+            }
+
+            // Choose max of two known alignments (combined value must
+            // be aligned according to more restrictive of the two).
+            let cmp = fx
+                .bcx
+                .ins()
+                .icmp(IntCC::UnsignedGreaterThan, sized_align, unsized_align);
+            let align = fx.bcx.ins().select(cmp, sized_align, unsized_align);
+
+            // Issue #27023: must add any necessary padding to `size`
+            // (to make it a multiple of `align`) before returning it.
+            //
+            // Namely, the returned size should be, in C notation:
+            //
+            //   `size + ((size & (align-1)) ? align : 0)`
+            //
+            // emulated via the semi-standard fast bit trick:
+            //
+            //   `(size + (align-1)) & -align`
+            let addend = fx.bcx.ins().iadd_imm(align, -1);
+            let add = fx.bcx.ins().iadd(size, addend);
+            let neg = fx.bcx.ins().ineg(align);
+            let size = fx.bcx.ins().band(add, neg);
+
+            (size, align)
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
new file mode 100644
index 0000000..0000866
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -0,0 +1,794 @@
+//! Definition of [`CValue`] and [`CPlace`]
+
+use crate::prelude::*;
+
+use cranelift_codegen::entity::EntityRef;
+use cranelift_codegen::ir::immediates::Offset32;
+
+fn codegen_field<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    base: Pointer,
+    extra: Option<Value>,
+    layout: TyAndLayout<'tcx>,
+    field: mir::Field,
+) -> (Pointer, TyAndLayout<'tcx>) {
+    let field_offset = layout.fields.offset(field.index());
+    let field_layout = layout.field(&*fx, field.index());
+
+    let simple = |fx: &mut FunctionCx<'_, '_, _>| {
+        (
+            base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()),
+            field_layout,
+        )
+    };
+
+    if let Some(extra) = extra {
+        if !field_layout.is_unsized() {
+            return simple(fx);
+        }
+        match field_layout.ty.kind() {
+            ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
+            ty::Adt(def, _) if def.repr.packed() => {
+                assert_eq!(layout.align.abi.bytes(), 1);
+                simple(fx)
+            }
+            _ => {
+                // We have to align the offset for DST's
+                let unaligned_offset = field_offset.bytes();
+                let (_, unsized_align) =
+                    crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
+
+                let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
+                let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
+                let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
+                let zero = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 0);
+                let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
+                let offset = fx.bcx.ins().band(and_lhs, and_rhs);
+
+                (base.offset_value(fx, offset), field_layout)
+            }
+        }
+    } else {
+        simple(fx)
+    }
+}
+
+fn scalar_pair_calculate_b_offset(
+    tcx: TyCtxt<'_>,
+    a_scalar: &Scalar,
+    b_scalar: &Scalar,
+) -> Offset32 {
+    let b_offset = a_scalar
+        .value
+        .size(&tcx)
+        .align_to(b_scalar.value.align(&tcx).abi);
+    Offset32::new(b_offset.bytes().try_into().unwrap())
+}
+
+/// A read-only value
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
+
+#[derive(Debug, Copy, Clone)]
+enum CValueInner {
+    ByRef(Pointer, Option<Value>),
+    ByVal(Value),
+    ByValPair(Value, Value),
+}
+
+impl<'tcx> CValue<'tcx> {
+    pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+        CValue(CValueInner::ByRef(ptr, None), layout)
+    }
+
+    pub(crate) fn by_ref_unsized(
+        ptr: Pointer,
+        meta: Value,
+        layout: TyAndLayout<'tcx>,
+    ) -> CValue<'tcx> {
+        CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
+    }
+
+    pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+        CValue(CValueInner::ByVal(value), layout)
+    }
+
+    pub(crate) fn by_val_pair(
+        value: Value,
+        extra: Value,
+        layout: TyAndLayout<'tcx>,
+    ) -> CValue<'tcx> {
+        CValue(CValueInner::ByValPair(value, extra), layout)
+    }
+
+    pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+        self.1
+    }
+
+    // FIXME remove
+    pub(crate) fn force_stack(
+        self,
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    ) -> (Pointer, Option<Value>) {
+        let layout = self.1;
+        match self.0 {
+            CValueInner::ByRef(ptr, meta) => (ptr, meta),
+            CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
+                let cplace = CPlace::new_stack_slot(fx, layout);
+                cplace.write_cvalue(fx, self);
+                (cplace.to_ptr(), None)
+            }
+        }
+    }
+
+    pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
+        match self.0 {
+            CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
+            CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
+        }
+    }
+
+    /// Load a value with layout.abi of scalar
+    pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, 'tcx, impl Module>) -> Value {
+        let layout = self.1;
+        match self.0 {
+            CValueInner::ByRef(ptr, None) => {
+                let clif_ty = match layout.abi {
+                    Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
+                    Abi::Vector { ref element, count } => {
+                        scalar_to_clif_type(fx.tcx, element.clone())
+                            .by(u16::try_from(count).unwrap())
+                            .unwrap()
+                    }
+                    _ => unreachable!("{:?}", layout.ty),
+                };
+                let mut flags = MemFlags::new();
+                flags.set_notrap();
+                ptr.load(fx, clif_ty, flags)
+            }
+            CValueInner::ByVal(value) => value,
+            CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
+            CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
+        }
+    }
+
+    /// Load a value pair with layout.abi of scalar pair
+    pub(crate) fn load_scalar_pair(
+        self,
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    ) -> (Value, Value) {
+        let layout = self.1;
+        match self.0 {
+            CValueInner::ByRef(ptr, None) => {
+                let (a_scalar, b_scalar) = match &layout.abi {
+                    Abi::ScalarPair(a, b) => (a, b),
+                    _ => unreachable!("load_scalar_pair({:?})", self),
+                };
+                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+                let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
+                let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
+                let mut flags = MemFlags::new();
+                flags.set_notrap();
+                let val1 = ptr.load(fx, clif_ty1, flags);
+                let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
+                (val1, val2)
+            }
+            CValueInner::ByRef(_, Some(_)) => {
+                bug!("load_scalar_pair for unsized value not allowed")
+            }
+            CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
+            CValueInner::ByValPair(val1, val2) => (val1, val2),
+        }
+    }
+
+    pub(crate) fn value_field(
+        self,
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+        field: mir::Field,
+    ) -> CValue<'tcx> {
+        let layout = self.1;
+        match self.0 {
+            CValueInner::ByVal(val) => match layout.abi {
+                Abi::Vector { element: _, count } => {
+                    let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
+                    let field = u8::try_from(field.index()).unwrap();
+                    assert!(field < count);
+                    let lane = fx.bcx.ins().extractlane(val, field);
+                    let field_layout = layout.field(&*fx, usize::from(field));
+                    CValue::by_val(lane, field_layout)
+                }
+                _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
+            },
+            CValueInner::ByValPair(val1, val2) => match layout.abi {
+                Abi::ScalarPair(_, _) => {
+                    let val = match field.as_u32() {
+                        0 => val1,
+                        1 => val2,
+                        _ => bug!("field should be 0 or 1"),
+                    };
+                    let field_layout = layout.field(&*fx, usize::from(field));
+                    CValue::by_val(val, field_layout)
+                }
+                _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
+            },
+            CValueInner::ByRef(ptr, None) => {
+                let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
+                CValue::by_ref(field_ptr, field_layout)
+            }
+            CValueInner::ByRef(_, Some(_)) => todo!(),
+        }
+    }
+
+    pub(crate) fn unsize_value(
+        self,
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+        dest: CPlace<'tcx>,
+    ) {
+        crate::unsize::coerce_unsized_into(fx, self, dest);
+    }
+
+    /// If `ty` is signed, `const_val` must already be sign extended.
+    pub(crate) fn const_val(
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+        layout: TyAndLayout<'tcx>,
+        const_val: ty::ScalarInt,
+    ) -> CValue<'tcx> {
+        assert_eq!(const_val.size(), layout.size);
+        use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
+
+        let clif_ty = fx.clif_type(layout.ty).unwrap();
+
+        if let ty::Bool = layout.ty.kind() {
+            assert!(
+                const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
+                "Invalid bool 0x{:032X}",
+                const_val
+            );
+        }
+
+        let val = match layout.ty.kind() {
+            ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+                let const_val = const_val.to_bits(layout.size).unwrap();
+                let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
+                let msb = fx
+                    .bcx
+                    .ins()
+                    .iconst(types::I64, (const_val >> 64) as u64 as i64);
+                fx.bcx.ins().iconcat(lsb, msb)
+            }
+            ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..)
+            | ty::RawPtr(..) => {
+                fx
+                    .bcx
+                    .ins()
+                    .iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
+            }
+            ty::Float(FloatTy::F32) => {
+                fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
+            }
+            ty::Float(FloatTy::F64) => {
+                fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
+            }
+            _ => panic!(
+                "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
+                layout.ty
+            ),
+        };
+
+        CValue::by_val(val, layout)
+    }
+
+    pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
+        assert!(matches!(
+            self.layout().ty.kind(),
+            ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)
+        ));
+        assert!(matches!(
+            layout.ty.kind(),
+            ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)
+        ));
+        assert_eq!(self.layout().abi, layout.abi);
+        CValue(self.0, layout)
+    }
+}
+
+/// A place where you can write a value to or read a value from
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CPlace<'tcx> {
+    inner: CPlaceInner,
+    layout: TyAndLayout<'tcx>,
+}
+
+#[derive(Debug, Copy, Clone)]
+pub(crate) enum CPlaceInner {
+    Var(Local, Variable),
+    VarPair(Local, Variable, Variable),
+    VarLane(Local, Variable, u8),
+    Addr(Pointer, Option<Value>),
+}
+
+impl<'tcx> CPlace<'tcx> {
+    pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    pub(crate) fn inner(&self) -> &CPlaceInner {
+        &self.inner
+    }
+
+    pub(crate) fn no_place(layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
+        CPlace {
+            inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
+            layout,
+        }
+    }
+
+    pub(crate) fn new_stack_slot(
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+        layout: TyAndLayout<'tcx>,
+    ) -> CPlace<'tcx> {
+        assert!(!layout.is_unsized());
+        if layout.size.bytes() == 0 {
+            return CPlace::no_place(layout);
+        }
+
+        let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+            kind: StackSlotKind::ExplicitSlot,
+            size: u32::try_from(layout.size.bytes()).unwrap(),
+            offset: None,
+        });
+        CPlace {
+            inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None),
+            layout,
+        }
+    }
+
+    pub(crate) fn new_var(
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+        local: Local,
+        layout: TyAndLayout<'tcx>,
+    ) -> CPlace<'tcx> {
+        let var = Variable::with_u32(fx.next_ssa_var);
+        fx.next_ssa_var += 1;
+        fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
+        CPlace {
+            inner: CPlaceInner::Var(local, var),
+            layout,
+        }
+    }
+
+    pub(crate) fn new_var_pair(
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+        local: Local,
+        layout: TyAndLayout<'tcx>,
+    ) -> CPlace<'tcx> {
+        let var1 = Variable::with_u32(fx.next_ssa_var);
+        fx.next_ssa_var += 1;
+        let var2 = Variable::with_u32(fx.next_ssa_var);
+        fx.next_ssa_var += 1;
+
+        let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
+        fx.bcx.declare_var(var1, ty1);
+        fx.bcx.declare_var(var2, ty2);
+        CPlace {
+            inner: CPlaceInner::VarPair(local, var1, var2),
+            layout,
+        }
+    }
+
+    pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
+        CPlace {
+            inner: CPlaceInner::Addr(ptr, None),
+            layout,
+        }
+    }
+
+    pub(crate) fn for_ptr_with_extra(
+        ptr: Pointer,
+        extra: Value,
+        layout: TyAndLayout<'tcx>,
+    ) -> CPlace<'tcx> {
+        CPlace {
+            inner: CPlaceInner::Addr(ptr, Some(extra)),
+            layout,
+        }
+    }
+
+    pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, 'tcx, impl Module>) -> CValue<'tcx> {
+        let layout = self.layout();
+        match self.inner {
+            CPlaceInner::Var(_local, var) => {
+                let val = fx.bcx.use_var(var);
+                fx.bcx
+                    .set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+                CValue::by_val(val, layout)
+            }
+            CPlaceInner::VarPair(_local, var1, var2) => {
+                let val1 = fx.bcx.use_var(var1);
+                fx.bcx
+                    .set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
+                let val2 = fx.bcx.use_var(var2);
+                fx.bcx
+                    .set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
+                CValue::by_val_pair(val1, val2, layout)
+            }
+            CPlaceInner::VarLane(_local, var, lane) => {
+                let val = fx.bcx.use_var(var);
+                fx.bcx
+                    .set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+                let val = fx.bcx.ins().extractlane(val, lane);
+                CValue::by_val(val, layout)
+            }
+            CPlaceInner::Addr(ptr, extra) => {
+                if let Some(extra) = extra {
+                    CValue::by_ref_unsized(ptr, extra, layout)
+                } else {
+                    CValue::by_ref(ptr, layout)
+                }
+            }
+        }
+    }
+
+    pub(crate) fn to_ptr(self) -> Pointer {
+        match self.to_ptr_maybe_unsized() {
+            (ptr, None) => ptr,
+            (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
+        }
+    }
+
+    pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
+        match self.inner {
+            CPlaceInner::Addr(ptr, extra) => (ptr, extra),
+            CPlaceInner::Var(_, _)
+            | CPlaceInner::VarPair(_, _, _)
+            | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
+        }
+    }
+
+    pub(crate) fn write_cvalue(
+        self,
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+        from: CValue<'tcx>,
+    ) {
+        fn assert_assignable<'tcx>(
+            fx: &FunctionCx<'_, 'tcx, impl Module>,
+            from_ty: Ty<'tcx>,
+            to_ty: Ty<'tcx>,
+        ) {
+            match (&from_ty.kind(), &to_ty.kind()) {
+                (ty::Ref(_, a, _), ty::Ref(_, b, _))
+                | (
+                    ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
+                    ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
+                ) => {
+                    assert_assignable(fx, a, b);
+                }
+                (ty::FnPtr(_), ty::FnPtr(_)) => {
+                    let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
+                        ParamEnv::reveal_all(),
+                        &from_ty.fn_sig(fx.tcx),
+                    );
+                    let to_sig = fx.tcx.normalize_erasing_late_bound_regions(
+                        ParamEnv::reveal_all(),
+                        &to_ty.fn_sig(fx.tcx),
+                    );
+                    assert_eq!(
+                        from_sig, to_sig,
+                        "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+                        from_sig, to_sig, fx,
+                    );
+                    // fn(&T) -> for<'l> fn(&'l T) is allowed
+                }
+                (ty::Dynamic(from_traits, _), ty::Dynamic(to_traits, _)) => {
+                    let from_traits = fx
+                        .tcx
+                        .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from_traits);
+                    let to_traits = fx
+                        .tcx
+                        .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_traits);
+                    assert_eq!(
+                        from_traits, to_traits,
+                        "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
+                        from_traits, to_traits, fx,
+                    );
+                    // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
+                }
+                _ => {
+                    assert_eq!(
+                        from_ty,
+                        to_ty,
+                        "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
+                        from_ty,
+                        to_ty,
+                        fx,
+                    );
+                }
+            }
+        }
+
+        assert_assignable(fx, from.layout().ty, self.layout().ty);
+
+        self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
+    }
+
+    pub(crate) fn write_cvalue_transmute(
+        self,
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+        from: CValue<'tcx>,
+    ) {
+        self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
+    }
+
+    fn write_cvalue_maybe_transmute(
+        self,
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+        from: CValue<'tcx>,
+        #[cfg_attr(not(debug_assertions), allow(unused_variables))] method: &'static str,
+    ) {
+        fn transmute_value<'tcx>(
+            fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+            var: Variable,
+            data: Value,
+            dst_ty: Type,
+        ) {
+            let src_ty = fx.bcx.func.dfg.value_type(data);
+            assert_eq!(
+                src_ty.bytes(),
+                dst_ty.bytes(),
+                "write_cvalue_transmute: {:?} -> {:?}",
+                src_ty,
+                dst_ty,
+            );
+            let data = match (src_ty, dst_ty) {
+                (_, _) if src_ty == dst_ty => data,
+
+                // This is a `write_cvalue_transmute`.
+                (types::I32, types::F32)
+                | (types::F32, types::I32)
+                | (types::I64, types::F64)
+                | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
+                _ if src_ty.is_vector() && dst_ty.is_vector() => {
+                    fx.bcx.ins().raw_bitcast(dst_ty, data)
+                }
+                _ if src_ty.is_vector() || dst_ty.is_vector() => {
+                    // FIXME do something more efficient for transmutes between vectors and integers.
+                    let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+                        kind: StackSlotKind::ExplicitSlot,
+                        size: src_ty.bytes(),
+                        offset: None,
+                    });
+                    let ptr = Pointer::stack_slot(stack_slot);
+                    ptr.store(fx, data, MemFlags::trusted());
+                    ptr.load(fx, dst_ty, MemFlags::trusted())
+                }
+                _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
+            };
+            fx.bcx
+                .set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
+            fx.bcx.def_var(var, data);
+        }
+
+        assert_eq!(self.layout().size, from.layout().size);
+
+        #[cfg(debug_assertions)]
+        {
+            use cranelift_codegen::cursor::{Cursor, CursorPosition};
+            let cur_block = match fx.bcx.cursor().position() {
+                CursorPosition::After(block) => block,
+                _ => unreachable!(),
+            };
+            fx.add_comment(
+                fx.bcx.func.layout.last_inst(cur_block).unwrap(),
+                format!(
+                    "{}: {:?}: {:?} <- {:?}: {:?}",
+                    method,
+                    self.inner(),
+                    self.layout().ty,
+                    from.0,
+                    from.layout().ty
+                ),
+            );
+        }
+
+        let dst_layout = self.layout();
+        let to_ptr = match self.inner {
+            CPlaceInner::Var(_local, var) => {
+                let data = CValue(from.0, dst_layout).load_scalar(fx);
+                let dst_ty = fx.clif_type(self.layout().ty).unwrap();
+                transmute_value(fx, var, data, dst_ty);
+                return;
+            }
+            CPlaceInner::VarPair(_local, var1, var2) => {
+                let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
+                let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
+                transmute_value(fx, var1, data1, dst_ty1);
+                transmute_value(fx, var2, data2, dst_ty2);
+                return;
+            }
+            CPlaceInner::VarLane(_local, var, lane) => {
+                let data = from.load_scalar(fx);
+
+                // First get the old vector
+                let vector = fx.bcx.use_var(var);
+                fx.bcx
+                    .set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+
+                // Next insert the written lane into the vector
+                let vector = fx.bcx.ins().insertlane(vector, data, lane);
+
+                // Finally write the new vector
+                fx.bcx
+                    .set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+                fx.bcx.def_var(var, vector);
+
+                return;
+            }
+            CPlaceInner::Addr(ptr, None) => {
+                if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
+                    return;
+                }
+                ptr
+            }
+            CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
+        };
+
+        let mut flags = MemFlags::new();
+        flags.set_notrap();
+        match from.layout().abi {
+            // FIXME make Abi::Vector work too
+            Abi::Scalar(_) => {
+                let val = from.load_scalar(fx);
+                to_ptr.store(fx, val, flags);
+                return;
+            }
+            Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
+                let (value, extra) = from.load_scalar_pair(fx);
+                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+                to_ptr.store(fx, value, flags);
+                to_ptr.offset(fx, b_offset).store(fx, extra, flags);
+                return;
+            }
+            _ => {}
+        }
+
+        match from.0 {
+            CValueInner::ByVal(val) => {
+                to_ptr.store(fx, val, flags);
+            }
+            CValueInner::ByValPair(_, _) => {
+                bug!(
+                    "Non ScalarPair abi {:?} for ByValPair CValue",
+                    dst_layout.abi
+                );
+            }
+            CValueInner::ByRef(from_ptr, None) => {
+                let from_addr = from_ptr.get_addr(fx);
+                let to_addr = to_ptr.get_addr(fx);
+                let src_layout = from.1;
+                let size = dst_layout.size.bytes();
+                let src_align = src_layout.align.abi.bytes() as u8;
+                let dst_align = dst_layout.align.abi.bytes() as u8;
+                fx.bcx.emit_small_memory_copy(
+                    fx.cx.module.target_config(),
+                    to_addr,
+                    from_addr,
+                    size,
+                    dst_align,
+                    src_align,
+                    true,
+                );
+            }
+            CValueInner::ByRef(_, Some(_)) => todo!(),
+        }
+    }
+
+    pub(crate) fn place_field(
+        self,
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+        field: mir::Field,
+    ) -> CPlace<'tcx> {
+        let layout = self.layout();
+
+        match self.inner {
+            CPlaceInner::Var(local, var) => {
+                if let Abi::Vector { .. } = layout.abi {
+                    return CPlace {
+                        inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
+                        layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+                    };
+                }
+            }
+            CPlaceInner::VarPair(local, var1, var2) => {
+                let layout = layout.field(&*fx, field.index());
+
+                match field.as_u32() {
+                    0 => {
+                        return CPlace {
+                            inner: CPlaceInner::Var(local, var1),
+                            layout,
+                        }
+                    }
+                    1 => {
+                        return CPlace {
+                            inner: CPlaceInner::Var(local, var2),
+                            layout,
+                        }
+                    }
+                    _ => unreachable!("field should be 0 or 1"),
+                }
+            }
+            _ => {}
+        }
+
+        let (base, extra) = self.to_ptr_maybe_unsized();
+
+        let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
+        if field_layout.is_unsized() {
+            CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
+        } else {
+            CPlace::for_ptr(field_ptr, field_layout)
+        }
+    }
+
+    pub(crate) fn place_index(
+        self,
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+        index: Value,
+    ) -> CPlace<'tcx> {
+        let (elem_layout, ptr) = match self.layout().ty.kind() {
+            ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_ptr()),
+            ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_ptr_maybe_unsized().0),
+            _ => bug!("place_index({:?})", self.layout().ty),
+        };
+
+        let offset = fx
+            .bcx
+            .ins()
+            .imul_imm(index, elem_layout.size.bytes() as i64);
+
+        CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
+    }
+
+    pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, 'tcx, impl Module>) -> CPlace<'tcx> {
+        let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
+        if has_ptr_meta(fx.tcx, inner_layout.ty) {
+            let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
+            CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
+        } else {
+            CPlace::for_ptr(
+                Pointer::new(self.to_cvalue(fx).load_scalar(fx)),
+                inner_layout,
+            )
+        }
+    }
+
+    pub(crate) fn place_ref(
+        self,
+        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+        layout: TyAndLayout<'tcx>,
+    ) -> CValue<'tcx> {
+        if has_ptr_meta(fx.tcx, self.layout().ty) {
+            let (ptr, extra) = self.to_ptr_maybe_unsized();
+            CValue::by_val_pair(
+                ptr.get_addr(fx),
+                extra.expect("unsized type without metadata"),
+                layout,
+            )
+        } else {
+            CValue::by_val(self.to_ptr().get_addr(fx), layout)
+        }
+    }
+
+    pub(crate) fn downcast_variant(
+        self,
+        fx: &FunctionCx<'_, 'tcx, impl Module>,
+        variant: VariantIdx,
+    ) -> Self {
+        assert!(!self.layout().is_unsized());
+        let layout = self.layout().for_variant(fx, variant);
+        CPlace {
+            inner: self.inner,
+            layout,
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs
new file mode 100644
index 0000000..238abc0
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/vtable.rs
@@ -0,0 +1,186 @@
+//! Codegen vtables and vtable accesses.
+//!
+//! See librustc_codegen_llvm/meth.rs for reference
+// FIXME dedup this logic between miri, cg_llvm and cg_clif
+
+use crate::prelude::*;
+
+const DROP_FN_INDEX: usize = 0;
+const SIZE_INDEX: usize = 1;
+const ALIGN_INDEX: usize = 2;
+
+fn vtable_memflags() -> MemFlags {
+    let mut flags = MemFlags::trusted(); // A vtable access is always aligned and will never trap.
+    flags.set_readonly(); // A vtable is always read-only.
+    flags
+}
+
+pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
+    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+    fx.bcx.ins().load(
+        pointer_ty(fx.tcx),
+        vtable_memflags(),
+        vtable,
+        (DROP_FN_INDEX * usize_size) as i32,
+    )
+}
+
+pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
+    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+    fx.bcx.ins().load(
+        pointer_ty(fx.tcx),
+        vtable_memflags(),
+        vtable,
+        (SIZE_INDEX * usize_size) as i32,
+    )
+}
+
+pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
+    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+    fx.bcx.ins().load(
+        pointer_ty(fx.tcx),
+        vtable_memflags(),
+        vtable,
+        (ALIGN_INDEX * usize_size) as i32,
+    )
+}
+
+pub(crate) fn get_ptr_and_method_ref<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    arg: CValue<'tcx>,
+    idx: usize,
+) -> (Value, Value) {
+    let (ptr, vtable) = if let Abi::ScalarPair(_, _) = arg.layout().abi {
+        arg.load_scalar_pair(fx)
+    } else {
+        let (ptr, vtable) = arg.try_to_ptr().unwrap();
+        (ptr.get_addr(fx), vtable.unwrap())
+    };
+
+    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
+    let func_ref = fx.bcx.ins().load(
+        pointer_ty(fx.tcx),
+        vtable_memflags(),
+        vtable,
+        ((idx + 3) * usize_size as usize) as i32,
+    );
+    (ptr, func_ref)
+}
+
+pub(crate) fn get_vtable<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    layout: TyAndLayout<'tcx>,
+    trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> Value {
+    let data_id = if let Some(data_id) = fx.cx.vtables.get(&(layout.ty, trait_ref)) {
+        *data_id
+    } else {
+        let data_id = build_vtable(fx, layout, trait_ref);
+        fx.cx.vtables.insert((layout.ty, trait_ref), data_id);
+        data_id
+    };
+
+    let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+    fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+}
+
+fn build_vtable<'tcx>(
+    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+    layout: TyAndLayout<'tcx>,
+    trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> DataId {
+    let tcx = fx.tcx;
+    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+
+    let drop_in_place_fn = import_function(
+        tcx,
+        &mut fx.cx.module,
+        Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fx.tcx),
+    );
+
+    let mut components: Vec<_> = vec![Some(drop_in_place_fn), None, None];
+
+    let methods_root;
+    let methods = if let Some(trait_ref) = trait_ref {
+        methods_root = tcx.vtable_methods(trait_ref.with_self_ty(tcx, layout.ty));
+        methods_root.iter()
+    } else {
+        (&[]).iter()
+    };
+    let methods = methods.cloned().map(|opt_mth| {
+        opt_mth.map(|(def_id, substs)| {
+            import_function(
+                tcx,
+                &mut fx.cx.module,
+                Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs)
+                    .unwrap()
+                    .polymorphize(fx.tcx),
+            )
+        })
+    });
+    components.extend(methods);
+
+    let mut data_ctx = DataContext::new();
+    let mut data = ::std::iter::repeat(0u8)
+        .take(components.len() * usize_size)
+        .collect::<Vec<u8>>()
+        .into_boxed_slice();
+
+    write_usize(fx.tcx, &mut data, SIZE_INDEX, layout.size.bytes());
+    write_usize(fx.tcx, &mut data, ALIGN_INDEX, layout.align.abi.bytes());
+    data_ctx.define(data);
+
+    for (i, component) in components.into_iter().enumerate() {
+        if let Some(func_id) = component {
+            let func_ref = fx.cx.module.declare_func_in_data(func_id, &mut data_ctx);
+            data_ctx.write_function_addr((i * usize_size) as u32, func_ref);
+        }
+    }
+
+    data_ctx.set_align(fx.tcx.data_layout.pointer_align.pref.bytes());
+
+    let data_id = fx
+        .cx
+        .module
+        .declare_data(
+            &format!(
+                "__vtable.{}.for.{:?}.{}",
+                trait_ref
+                    .as_ref()
+                    .map(|trait_ref| format!("{:?}", trait_ref.skip_binder()).into())
+                    .unwrap_or(std::borrow::Cow::Borrowed("???")),
+                layout.ty,
+                fx.cx.vtables.len(),
+            ),
+            Linkage::Local,
+            false,
+            false,
+        )
+        .unwrap();
+
+    fx.cx.module.define_data(data_id, &data_ctx).unwrap();
+
+    data_id
+}
+
+fn write_usize(tcx: TyCtxt<'_>, buf: &mut [u8], idx: usize, num: u64) {
+    let pointer_size = tcx
+        .layout_of(ParamEnv::reveal_all().and(tcx.types.usize))
+        .unwrap()
+        .size
+        .bytes() as usize;
+    let target = &mut buf[idx * pointer_size..(idx + 1) * pointer_size];
+
+    match tcx.data_layout.endian {
+        rustc_target::abi::Endian::Little => match pointer_size {
+            4 => target.copy_from_slice(&(num as u32).to_le_bytes()),
+            8 => target.copy_from_slice(&(num as u64).to_le_bytes()),
+            _ => todo!("pointer size {} is not yet supported", pointer_size),
+        },
+        rustc_target::abi::Endian::Big => match pointer_size {
+            4 => target.copy_from_slice(&(num as u32).to_be_bytes()),
+            8 => target.copy_from_slice(&(num as u64).to_be_bytes()),
+            _ => todo!("pointer size {} is not yet supported", pointer_size),
+        },
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/test.sh b/compiler/rustc_codegen_cranelift/test.sh
new file mode 100755
index 0000000..c6c4956
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/test.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+set -e
+
+export RUSTFLAGS="-Zrun_dsymutil=no"
+
+./build.sh --without-sysroot "$@"
+
+rm -r target/out || true
+
+scripts/tests.sh no_sysroot
+
+./build.sh "$@"
+
+scripts/tests.sh base_sysroot
+scripts/tests.sh extended_sysroot
diff --git a/compiler/rustc_codegen_llvm/Cargo.toml b/compiler/rustc_codegen_llvm/Cargo.toml
index 04792b3..f937364 100644
--- a/compiler/rustc_codegen_llvm/Cargo.toml
+++ b/compiler/rustc_codegen_llvm/Cargo.toml
@@ -11,11 +11,11 @@
 [dependencies]
 bitflags = "1.0"
 libc = "0.2"
-measureme = "0.7.1"
+measureme = "9.0.0"
 snap = "1"
 tracing = "0.1"
 rustc_middle = { path = "../rustc_middle" }
-rustc-demangle = "0.1"
+rustc-demangle = "0.1.18"
 rustc_attr = { path = "../rustc_attr" }
 rustc_codegen_ssa = { path = "../rustc_codegen_ssa" }
 rustc_data_structures = { path = "../rustc_data_structures" }
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index bc1d9e1..a5ea0b2 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -3,17 +3,23 @@
 use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
 use rustc_middle::bug;
 use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::sym;
 
 use crate::llvm::{self, False, True};
 use crate::ModuleLlvm;
 
-pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut ModuleLlvm, kind: AllocatorKind) {
+pub(crate) unsafe fn codegen(
+    tcx: TyCtxt<'_>,
+    mods: &mut ModuleLlvm,
+    kind: AllocatorKind,
+    has_alloc_error_handler: bool,
+) {
     let llcx = &*mods.llcx;
     let llmod = mods.llmod();
-    let usize = match &tcx.sess.target.target.target_pointer_width[..] {
-        "16" => llvm::LLVMInt16TypeInContext(llcx),
-        "32" => llvm::LLVMInt32TypeInContext(llcx),
-        "64" => llvm::LLVMInt64TypeInContext(llcx),
+    let usize = match tcx.sess.target.pointer_width {
+        16 => llvm::LLVMInt16TypeInContext(llcx),
+        32 => llvm::LLVMInt32TypeInContext(llcx),
+        64 => llvm::LLVMInt64TypeInContext(llcx),
         tws => bug!("Unsupported target word size for int: {}", tws),
     };
     let i8 = llvm::LLVMInt8TypeInContext(llcx);
@@ -51,7 +57,7 @@
         let name = format!("__rust_{}", method.name);
         let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
 
-        if tcx.sess.target.target.options.default_hidden_visibility {
+        if tcx.sess.target.default_hidden_visibility {
             llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
         }
         if tcx.sess.must_emit_unwind_tables() {
@@ -82,4 +88,41 @@
         }
         llvm::LLVMDisposeBuilder(llbuilder);
     }
+
+    // rust alloc error handler
+    let args = [usize, usize]; // size, align
+
+    let ty = llvm::LLVMFunctionType(void, args.as_ptr(), args.len() as c_uint, False);
+    let name = "__rust_alloc_error_handler".to_string();
+    let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
+    // -> ! DIFlagNoReturn
+    llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
+
+    if tcx.sess.target.default_hidden_visibility {
+        llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+    }
+    if tcx.sess.must_emit_unwind_tables() {
+        attributes::emit_uwtable(llfn, true);
+    }
+
+    let kind = if has_alloc_error_handler { AllocatorKind::Global } else { AllocatorKind::Default };
+    let callee = kind.fn_name(sym::oom);
+    let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
+    // -> ! DIFlagNoReturn
+    llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, callee);
+    llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+    let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
+
+    let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
+    llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
+    let args = args
+        .iter()
+        .enumerate()
+        .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
+        .collect::<Vec<_>>();
+    let ret = llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None);
+    llvm::LLVMSetTailCall(ret, True);
+    llvm::LLVMBuildRetVoid(llbuilder);
+    llvm::LLVMDisposeBuilder(llbuilder);
 }
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index f801f84..b5d279e 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -12,8 +12,8 @@
 use rustc_codegen_ssa::traits::*;
 use rustc_data_structures::fx::FxHashMap;
 use rustc_hir as hir;
-use rustc_middle::span_bug;
 use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::{bug, span_bug};
 use rustc_span::{Pos, Span};
 use rustc_target::abi::*;
 use rustc_target::asm::*;
@@ -60,7 +60,7 @@
 
         // Default per-arch clobbers
         // Basically what clang does
-        let arch_clobbers = match &self.sess().target.target.arch[..] {
+        let arch_clobbers = match &self.sess().target.arch[..] {
             "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
             "mips" | "mips64" => vec!["~{$1}"],
             _ => Vec::new(),
@@ -259,7 +259,8 @@
                 InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {}
                 InlineAsmArch::Nvptx64 => {}
                 InlineAsmArch::Hexagon => {}
-                InlineAsmArch::Mips => {}
+                InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
+                InlineAsmArch::SpirV => {}
             }
         }
         if !options.contains(InlineAsmOptions::NOMEM) {
@@ -302,13 +303,11 @@
             } else if options.contains(InlineAsmOptions::READONLY) {
                 llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result);
             }
+        } else if options.contains(InlineAsmOptions::NOMEM) {
+            llvm::Attribute::InaccessibleMemOnly
+                .apply_callsite(llvm::AttributePlace::Function, result);
         } else {
-            if options.contains(InlineAsmOptions::NOMEM) {
-                llvm::Attribute::InaccessibleMemOnly
-                    .apply_callsite(llvm::AttributePlace::Function, result);
-            } else {
-                // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
-            }
+            // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
         }
 
         // Write results to outputs
@@ -520,6 +519,9 @@
             | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
             InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
             InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
+            InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+                bug!("LLVM backend does not support SPIR-V")
+            }
         }
         .to_string(),
     }
@@ -582,6 +584,9 @@
             _ => unreachable!(),
         },
         InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
+        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+            bug!("LLVM backend does not support SPIR-V")
+        }
     }
 }
 
@@ -621,6 +626,9 @@
         | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
         | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
         InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+            bug!("LLVM backend does not support SPIR-V")
+        }
     }
 }
 
@@ -710,6 +718,7 @@
             // MIPS only supports register-length arithmetics.
             Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
             Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
+            Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
             _ => value,
         },
         _ => value,
@@ -785,6 +794,7 @@
             Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
             Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
             Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
+            Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
             _ => value,
         },
         _ => value,
@@ -854,6 +864,7 @@
             // MIPS only supports register-length arithmetics.
             Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
             Primitive::F32 => cx.type_i32(),
+            Primitive::F64 => cx.type_i64(),
             _ => layout.llvm_type(cx),
         },
         _ => layout.llvm_type(cx),
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 73c3481..87bcce0 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -6,7 +6,7 @@
 use rustc_data_structures::const_cstr;
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::small_c_str::SmallCStr;
-use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_hir::def_id::DefId;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::ty::layout::HasTyCtxt;
 use rustc_middle::ty::query::Providers;
@@ -18,7 +18,7 @@
 use crate::llvm::AttributePlace::Function;
 use crate::llvm::{self, Attribute};
 use crate::llvm_util;
-pub use rustc_attr::{InlineAttr, OptimizeAttr};
+pub use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr};
 
 use crate::context::CodegenCx;
 use crate::value::Value;
@@ -31,7 +31,7 @@
         Hint => Attribute::InlineHint.apply_llfn(Function, val),
         Always => Attribute::AlwaysInline.apply_llfn(Function, val),
         Never => {
-            if cx.tcx().sess.target.target.arch != "amdgpu" {
+            if cx.tcx().sess.target.arch != "amdgpu" {
                 Attribute::NoInline.apply_llfn(Function, val);
             }
         }
@@ -90,9 +90,7 @@
 
         // The function name varies on platforms.
         // See test/CodeGen/mcount.c in clang.
-        let mcount_name =
-            CString::new(cx.sess().target.target.options.target_mcount.as_str().as_bytes())
-                .unwrap();
+        let mcount_name = CString::new(cx.sess().target.mcount.as_str().as_bytes()).unwrap();
 
         llvm::AddFunctionAttrStringValue(
             llfn,
@@ -106,7 +104,7 @@
 fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
     // Only use stack probes if the target specification indicates that we
     // should be using stack probes
-    if !cx.sess().target.target.options.stack_probes {
+    if !cx.sess().target.stack_probes {
         return;
     }
 
@@ -175,8 +173,6 @@
         .split(',')
         .filter(|f| !RUSTC_SPECIFIC_FEATURES.iter().any(|s| f.contains(s)));
     sess.target
-        .target
-        .options
         .features
         .split(',')
         .chain(cmdline)
@@ -194,6 +190,18 @@
     );
 }
 
+pub fn apply_tune_cpu_attr(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
+    if let Some(tune) = llvm_util::tune_cpu(cx.tcx.sess) {
+        let tune_cpu = SmallCStr::new(tune);
+        llvm::AddFunctionAttrStringValue(
+            llfn,
+            llvm::AttributePlace::Function,
+            const_cstr!("tune-cpu"),
+            tune_cpu.as_c_str(),
+        );
+    }
+}
+
 /// Sets the `NonLazyBind` LLVM attribute on a given function,
 /// assuming the codegen options allow skipping the PLT.
 pub fn non_lazy_bind(sess: &Session, llfn: &'ll Value) {
@@ -303,6 +311,9 @@
     // Without this, ThinLTO won't inline Rust functions into Clang generated
     // functions (because Clang annotates functions this way too).
     apply_target_cpu_attr(cx, llfn);
+    // tune-cpu is only conveyed through the attribute for our purpose.
+    // The target doesn't care; the subtarget reads our attribute.
+    apply_tune_cpu_attr(cx, llfn);
 
     let features = llvm_target_features(cx.tcx.sess)
         .map(|s| s.to_string())
@@ -310,6 +321,10 @@
             let feature = &f.as_str();
             format!("+{}", llvm_util::to_llvm_feature(cx.tcx.sess, feature))
         }))
+        .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
+            InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
+            InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
+        }))
         .collect::<Vec<String>>()
         .join(",");
 
@@ -326,7 +341,7 @@
     // Note that currently the `wasm-import-module` doesn't do anything, but
     // eventually LLVM 7 should read this and ferry the appropriate import
     // module to the output file.
-    if cx.tcx.sess.target.target.arch == "wasm32" {
+    if cx.tcx.sess.target.arch == "wasm32" {
         if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
             llvm::AddFunctionAttrStringValue(
                 llfn,
@@ -348,25 +363,7 @@
     }
 }
 
-pub fn provide(providers: &mut Providers) {
-    providers.supported_target_features = |tcx, cnum| {
-        assert_eq!(cnum, LOCAL_CRATE);
-        if tcx.sess.opts.actually_rustdoc {
-            // rustdoc needs to be able to document functions that use all the features, so
-            // provide them all.
-            llvm_util::all_known_features().map(|(a, b)| (a.to_string(), b)).collect()
-        } else {
-            llvm_util::supported_target_features(tcx.sess)
-                .iter()
-                .map(|&(a, b)| (a.to_string(), b))
-                .collect()
-        }
-    };
-
-    provide_extern(providers);
-}
-
-pub fn provide_extern(providers: &mut Providers) {
+pub fn provide_both(providers: &mut Providers) {
     providers.wasm_import_module_map = |tcx, cnum| {
         // Build up a map from DefId to a `NativeLib` structure, where
         // `NativeLib` internally contains information about
@@ -379,8 +376,8 @@
             .collect::<FxHashMap<_, _>>();
 
         let mut ret = FxHashMap::default();
-        for lib in tcx.foreign_modules(cnum).iter() {
-            let module = def_id_to_native_lib.get(&lib.def_id).and_then(|s| s.wasm_import_module);
+        for (def_id, lib) in tcx.foreign_modules(cnum).iter() {
+            let module = def_id_to_native_lib.get(&def_id).and_then(|s| s.wasm_import_module);
             let module = match module {
                 Some(s) => s,
                 None => continue,
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index a115a1e..4e72138 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -206,7 +206,7 @@
     }
 
     fn llvm_archive_kind(&self) -> Result<ArchiveKind, &str> {
-        let kind = &*self.config.sess.target.target.options.archive_format;
+        let kind = &*self.config.sess.target.archive_format;
         kind.parse().map_err(|_| kind)
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 4b2d590..64fd1d0 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -2,14 +2,14 @@
     self, save_temp_bitcode, to_llvm_opt_settings, with_llvm_pmb, DiagnosticHandlers,
 };
 use crate::llvm::archive_ro::ArchiveRO;
-use crate::llvm::{self, False, True};
+use crate::llvm::{self, build_string, False, True};
 use crate::{LlvmCodegenBackend, ModuleLlvm};
 use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
 use rustc_codegen_ssa::back::symbol_export;
 use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
 use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::fx::FxHashMap;
 use rustc_errors::{FatalError, Handler};
 use rustc_hir::def_id::LOCAL_CRATE;
 use rustc_middle::bug;
@@ -22,16 +22,14 @@
 use std::ffi::{CStr, CString};
 use std::fs::File;
 use std::io;
-use std::mem;
 use std::path::Path;
 use std::ptr;
 use std::slice;
 use std::sync::Arc;
 
-/// We keep track of past LTO imports that were used to produce the current set
-/// of compiled object files that we might choose to reuse during this
-/// compilation session.
-pub const THIN_LTO_IMPORTS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-imports.bin";
+/// We keep track of the computed LTO cache keys from the previous
+/// session to determine which CGUs we can reuse.
+pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
 
 pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
     match crate_type {
@@ -485,31 +483,31 @@
         )
         .ok_or_else(|| write::llvm_err(&diag_handler, "failed to prepare thin LTO context"))?;
 
+        let data = ThinData(data);
+
         info!("thin LTO data created");
 
-        let (import_map_path, prev_import_map, curr_import_map) =
-            if let Some(ref incr_comp_session_dir) = cgcx.incr_comp_session_dir {
-                let path = incr_comp_session_dir.join(THIN_LTO_IMPORTS_INCR_COMP_FILE_NAME);
-                // If previous imports have been deleted, or we get an IO error
-                // reading the file storing them, then we'll just use `None` as the
-                // prev_import_map, which will force the code to be recompiled.
-                let prev = if path.exists() {
-                    ThinLTOImportMaps::load_from_file(&path).ok()
-                } else {
-                    None
-                };
-                let curr = ThinLTOImportMaps::from_thin_lto_data(data);
-                (Some(path), prev, curr)
-            } else {
-                // If we don't compile incrementally, we don't need to load the
-                // import data from LLVM.
-                assert!(green_modules.is_empty());
-                let curr = ThinLTOImportMaps::default();
-                (None, None, curr)
-            };
-        info!("thin LTO import map loaded");
-
-        let data = ThinData(data);
+        let (key_map_path, prev_key_map, curr_key_map) = if let Some(ref incr_comp_session_dir) =
+            cgcx.incr_comp_session_dir
+        {
+            let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
+            // If the previous file was deleted, or we get an IO error
+            // reading the file, then we'll just use `None` as the
+            // prev_key_map, which will force the code to be recompiled.
+            let prev =
+                if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
+            let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
+            (Some(path), prev, curr)
+        } else {
+            // If we don't compile incrementally, we don't need to load the
+            // import data from LLVM.
+            assert!(green_modules.is_empty());
+            let curr = ThinLTOKeysMap::default();
+            (None, None, curr)
+        };
+        info!("thin LTO cache key map loaded");
+        info!("prev_key_map: {:#?}", prev_key_map);
+        info!("curr_key_map: {:#?}", curr_key_map);
 
         // Throw our data in an `Arc` as we'll be sharing it across threads. We
         // also put all memory referenced by the C++ data (buffers, ids, etc)
@@ -528,60 +526,14 @@
         info!("checking which modules can be-reused and which have to be re-optimized.");
         for (module_index, module_name) in shared.module_names.iter().enumerate() {
             let module_name = module_name_to_str(module_name);
-
-            // If (1.) the module hasn't changed, and (2.) none of the modules
-            // it imports from have changed, *and* (3.) the import and export
-            // sets themselves have not changed from the previous compile when
-            // it was last ThinLTO'ed, then we can re-use the post-ThinLTO
-            // version of the module. Otherwise, freshly perform LTO
-            // optimization.
-            //
-            // (Note that globally, the export set is just the inverse of the
-            // import set.)
-            //
-            // For further justification of why the above is necessary and sufficient,
-            // see the LLVM blog post on ThinLTO:
-            //
-            // http://blog.llvm.org/2016/06/thinlto-scalable-and-incremental-lto.html
-            //
-            // which states the following:
-            //
-            // ```quote
-            // any particular ThinLTO backend must be redone iff:
-            //
-            // 1. The corresponding (primary) module’s bitcode changed
-            // 2. The list of imports into or exports from the module changed
-            // 3. The bitcode for any module being imported from has changed
-            // 4. Any global analysis result affecting either the primary module
-            //    or anything it imports has changed.
-            // ```
-            //
-            // This strategy means we can always save the computed imports as
-            // canon: when we reuse the post-ThinLTO version, condition (3.)
-            // ensures that the current import set is the same as the previous
-            // one. (And of course, when we don't reuse the post-ThinLTO
-            // version, the current import set *is* the correct one, since we
-            // are doing the ThinLTO in this current compilation cycle.)
-            //
-            // For more discussion, see rust-lang/rust#59535 (where the import
-            // issue was discovered) and rust-lang/rust#69798 (where the
-            // analogous export issue was discovered).
-            if let (Some(prev_import_map), true) =
-                (prev_import_map.as_ref(), green_modules.contains_key(module_name))
+            if let (Some(prev_key_map), true) =
+                (prev_key_map.as_ref(), green_modules.contains_key(module_name))
             {
                 assert!(cgcx.incr_comp_session_dir.is_some());
 
-                let prev_imports = prev_import_map.imports_of(module_name);
-                let curr_imports = curr_import_map.imports_of(module_name);
-                let prev_exports = prev_import_map.exports_of(module_name);
-                let curr_exports = curr_import_map.exports_of(module_name);
-                let imports_all_green = curr_imports
-                    .iter()
-                    .all(|imported_module| green_modules.contains_key(imported_module));
-                if imports_all_green
-                    && equivalent_as_sets(prev_imports, curr_imports)
-                    && equivalent_as_sets(prev_exports, curr_exports)
-                {
+                // If a module exists in both the current and the previous session,
+                // and has the same LTO cache key in both sessions, then we can re-use it
+                if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
                     let work_product = green_modules[module_name].clone();
                     copy_jobs.push(work_product);
                     info!(" - {}: re-used", module_name);
@@ -599,10 +551,10 @@
         }
 
         // Save the current ThinLTO import information for the next compilation
-        // session, overwriting the previous serialized imports (if any).
-        if let Some(path) = import_map_path {
-            if let Err(err) = curr_import_map.save_to_file(&path) {
-                let msg = format!("Error while writing ThinLTO import data: {}", err);
+        // session, overwriting the previous serialized data (if any).
+        if let Some(path) = key_map_path {
+            if let Err(err) = curr_key_map.save_to_file(&path) {
+                let msg = format!("Error while writing ThinLTO key data: {}", err);
                 return Err(write::llvm_err(&diag_handler, &msg));
             }
         }
@@ -611,24 +563,6 @@
     }
 }
 
-/// Given two slices, each with no repeat elements. returns true if and only if
-/// the two slices have the same contents when considered as sets (i.e. when
-/// element order is disregarded).
-fn equivalent_as_sets(a: &[String], b: &[String]) -> bool {
-    // cheap path: unequal lengths means cannot possibly be set equivalent.
-    if a.len() != b.len() {
-        return false;
-    }
-    // fast path: before building new things, check if inputs are equivalent as is.
-    if a == b {
-        return true;
-    }
-    // slow path: general set comparison.
-    let a: FxHashSet<&str> = a.iter().map(|s| s.as_str()).collect();
-    let b: FxHashSet<&str> = b.iter().map(|s| s.as_str()).collect();
-    a == b
-}
-
 pub(crate) fn run_pass_manager(
     cgcx: &CodegenContext<LlvmCodegenBackend>,
     module: &ModuleCodegen<ModuleLlvm>,
@@ -942,113 +876,56 @@
     Ok(module)
 }
 
-/// Summarizes module import/export relationships used by LLVM's ThinLTO pass.
-///
-/// Note that we tend to have two such instances of `ThinLTOImportMaps` in use:
-/// one loaded from a file that represents the relationships used during the
-/// compilation associated with the incremetnal build artifacts we are
-/// attempting to reuse, and another constructed via `from_thin_lto_data`, which
-/// captures the relationships of ThinLTO in the current compilation.
+/// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys
 #[derive(Debug, Default)]
-pub struct ThinLTOImportMaps {
-    // key = llvm name of importing module, value = list of modules it imports from
-    imports: FxHashMap<String, Vec<String>>,
-    // key = llvm name of exporting module, value = list of modules it exports to
-    exports: FxHashMap<String, Vec<String>>,
+pub struct ThinLTOKeysMap {
+    // key = llvm name of importing module, value = LLVM cache key
+    keys: FxHashMap<String, String>,
 }
 
-impl ThinLTOImportMaps {
-    /// Returns modules imported by `llvm_module_name` during some ThinLTO pass.
-    fn imports_of(&self, llvm_module_name: &str) -> &[String] {
-        self.imports.get(llvm_module_name).map(|v| &v[..]).unwrap_or(&[])
-    }
-
-    /// Returns modules exported by `llvm_module_name` during some ThinLTO pass.
-    fn exports_of(&self, llvm_module_name: &str) -> &[String] {
-        self.exports.get(llvm_module_name).map(|v| &v[..]).unwrap_or(&[])
-    }
-
+impl ThinLTOKeysMap {
     fn save_to_file(&self, path: &Path) -> io::Result<()> {
         use std::io::Write;
         let file = File::create(path)?;
         let mut writer = io::BufWriter::new(file);
-        for (importing_module_name, imported_modules) in &self.imports {
-            writeln!(writer, "{}", importing_module_name)?;
-            for imported_module in imported_modules {
-                writeln!(writer, " {}", imported_module)?;
-            }
-            writeln!(writer)?;
+        for (module, key) in &self.keys {
+            writeln!(writer, "{} {}", module, key)?;
         }
         Ok(())
     }
 
-    fn load_from_file(path: &Path) -> io::Result<ThinLTOImportMaps> {
+    fn load_from_file(path: &Path) -> io::Result<Self> {
         use std::io::BufRead;
-        let mut imports = FxHashMap::default();
-        let mut exports: FxHashMap<_, Vec<_>> = FxHashMap::default();
-        let mut current_module: Option<String> = None;
-        let mut current_imports: Vec<String> = vec![];
+        let mut keys = FxHashMap::default();
         let file = File::open(path)?;
         for line in io::BufReader::new(file).lines() {
             let line = line?;
-            if line.is_empty() {
-                let importing_module = current_module.take().expect("Importing module not set");
-                for imported in &current_imports {
-                    exports.entry(imported.clone()).or_default().push(importing_module.clone());
-                }
-                imports.insert(importing_module, mem::replace(&mut current_imports, vec![]));
-            } else if line.starts_with(' ') {
-                // Space marks an imported module
-                assert_ne!(current_module, None);
-                current_imports.push(line.trim().to_string());
-            } else {
-                // Otherwise, beginning of a new module (must be start or follow empty line)
-                assert_eq!(current_module, None);
-                current_module = Some(line.trim().to_string());
-            }
+            let mut split = line.split(' ');
+            let module = split.next().unwrap();
+            let key = split.next().unwrap();
+            assert_eq!(split.next(), None, "Expected two space-separated values, found {:?}", line);
+            keys.insert(module.to_string(), key.to_string());
         }
-        Ok(ThinLTOImportMaps { imports, exports })
+        Ok(Self { keys })
     }
 
-    /// Loads the ThinLTO import map from ThinLTOData.
-    unsafe fn from_thin_lto_data(data: *const llvm::ThinLTOData) -> ThinLTOImportMaps {
-        unsafe extern "C" fn imported_module_callback(
-            payload: *mut libc::c_void,
-            importing_module_name: *const libc::c_char,
-            imported_module_name: *const libc::c_char,
-        ) {
-            let map = &mut *(payload as *mut ThinLTOImportMaps);
-            let importing_module_name = CStr::from_ptr(importing_module_name);
-            let importing_module_name = module_name_to_str(&importing_module_name);
-            let imported_module_name = CStr::from_ptr(imported_module_name);
-            let imported_module_name = module_name_to_str(&imported_module_name);
-
-            if !map.imports.contains_key(importing_module_name) {
-                map.imports.insert(importing_module_name.to_owned(), vec![]);
-            }
-
-            map.imports
-                .get_mut(importing_module_name)
-                .unwrap()
-                .push(imported_module_name.to_owned());
-
-            if !map.exports.contains_key(imported_module_name) {
-                map.exports.insert(imported_module_name.to_owned(), vec![]);
-            }
-
-            map.exports
-                .get_mut(imported_module_name)
-                .unwrap()
-                .push(importing_module_name.to_owned());
-        }
-
-        let mut map = ThinLTOImportMaps::default();
-        llvm::LLVMRustGetThinLTOModuleImports(
-            data,
-            imported_module_callback,
-            &mut map as *mut _ as *mut libc::c_void,
-        );
-        map
+    fn from_thin_lto_modules(
+        data: &ThinData,
+        modules: &[llvm::ThinLTOModule],
+        names: &[CString],
+    ) -> Self {
+        let keys = modules
+            .iter()
+            .zip(names.iter())
+            .map(|(module, name)| {
+                let key = build_string(|rust_str| unsafe {
+                    llvm::LLVMRustComputeLTOCacheKey(rust_str, module.identifier, data.0);
+                })
+                .expect("Invalid ThinLTO module key");
+                (name.clone().into_string().unwrap(), key)
+            })
+            .collect();
+        Self { keys }
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index f35c101..e6acb68 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -128,40 +128,39 @@
     let (opt_level, _) = to_llvm_opt_settings(optlvl);
     let use_softfp = sess.opts.cg.soft_float;
 
-    let ffunction_sections = sess.target.target.options.function_sections;
+    let ffunction_sections =
+        sess.opts.debugging_opts.function_sections.unwrap_or(sess.target.function_sections);
     let fdata_sections = ffunction_sections;
 
     let code_model = to_llvm_code_model(sess.code_model());
 
     let features = attributes::llvm_target_features(sess).collect::<Vec<_>>();
-    let mut singlethread = sess.target.target.options.singlethread;
+    let mut singlethread = sess.target.singlethread;
 
     // On the wasm target once the `atomics` feature is enabled that means that
     // we're no longer single-threaded, or otherwise we don't want LLVM to
     // lower atomic operations to single-threaded operations.
     if singlethread
-        && sess.target.target.llvm_target.contains("wasm32")
+        && sess.target.llvm_target.contains("wasm32")
         && sess.target_features.contains(&sym::atomics)
     {
         singlethread = false;
     }
 
-    let triple = SmallCStr::new(&sess.target.target.llvm_target);
+    let triple = SmallCStr::new(&sess.target.llvm_target);
     let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
     let features = features.join(",");
     let features = CString::new(features).unwrap();
-    let abi = SmallCStr::new(&sess.target.target.options.llvm_abiname);
-    let trap_unreachable = sess.target.target.options.trap_unreachable;
+    let abi = SmallCStr::new(&sess.target.llvm_abiname);
+    let trap_unreachable = sess.target.trap_unreachable;
     let emit_stack_size_section = sess.opts.debugging_opts.emit_stack_sizes;
 
     let asm_comments = sess.asm_comments();
-    let relax_elf_relocations = sess.target.target.options.relax_elf_relocations;
+    let relax_elf_relocations =
+        sess.opts.debugging_opts.relax_elf_relocations.unwrap_or(sess.target.relax_elf_relocations);
 
-    let use_init_array = !sess
-        .opts
-        .debugging_opts
-        .use_ctors_section
-        .unwrap_or(sess.target.target.options.use_ctors_section);
+    let use_init_array =
+        !sess.opts.debugging_opts.use_ctors_section.unwrap_or(sess.target.use_ctors_section);
 
     Arc::new(move || {
         let tm = unsafe {
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index f35708b..1090d4a 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -60,7 +60,7 @@
         unsafe { llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr()) };
     unsafe {
         llvm::LLVMSetInitializer(llglobal, llconst);
-        let section_name = metadata::metadata_section_name(&tcx.sess.target.target);
+        let section_name = metadata::metadata_section_name(&tcx.sess.target);
         let name = SmallCStr::new(section_name);
         llvm::LLVMSetSection(llglobal, name.as_ptr());
 
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 0c172dc..f122fa1 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -16,7 +16,7 @@
 use rustc_hir::def_id::DefId;
 use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_span::sym;
+use rustc_span::{sym, Span};
 use rustc_target::abi::{self, Align, Size};
 use rustc_target::spec::{HasTargetSpec, Target};
 use std::borrow::Cow;
@@ -56,6 +56,7 @@
     type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
 
     type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
+    type DILocation = <CodegenCx<'ll, 'tcx> as BackendTypes>::DILocation;
     type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable;
 }
 
@@ -139,6 +140,8 @@
         unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
     }
 
+    fn set_span(&mut self, _span: Span) {}
+
     fn position_at_end(&mut self, llbb: &'ll BasicBlock) {
         unsafe {
             llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
@@ -306,8 +309,8 @@
         use rustc_middle::ty::{Int, Uint};
 
         let new_kind = match ty.kind() {
-            Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.ptr_width)),
-            Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.ptr_width)),
+            Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
+            Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
             t @ (Uint(_) | Int(_)) => t.clone(),
             _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
         };
@@ -539,7 +542,7 @@
     }
 
     fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
-        if self.sess().target.target.arch == "amdgpu" {
+        if self.sess().target.arch == "amdgpu" {
             // amdgpu/LLVM does something weird and thinks a i64 value is
             // split into a v2i32, halving the bitwidth LLVM expects,
             // tripping an assertion. So, for now, just disable this
@@ -669,7 +672,7 @@
         // WebAssembly has saturating floating point to integer casts if the
         // `nontrapping-fptoint` target feature is activated. We'll use those if
         // they are available.
-        if self.sess().target.target.arch == "wasm32"
+        if self.sess().target.arch == "wasm32"
             && self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
         {
             let src_ty = self.cx.val_ty(val);
@@ -694,7 +697,7 @@
         // WebAssembly has saturating floating point to integer casts if the
         // `nontrapping-fptoint` target feature is activated. We'll use those if
         // they are available.
-        if self.sess().target.target.arch == "wasm32"
+        if self.sess().target.arch == "wasm32"
             && self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
         {
             let src_ty = self.cx.val_ty(val);
@@ -729,10 +732,7 @@
         let src_ty = self.cx.val_ty(val);
         let float_width = self.cx.float_width(src_ty);
         let int_width = self.cx.int_width(dest_ty);
-        match (int_width, float_width) {
-            (32, 32) | (32, 64) | (64, 32) | (64, 64) => true,
-            _ => false,
-        }
+        matches!((int_width, float_width), (32, 32) | (32, 64) | (64, 32) | (64, 64))
     }
 
     fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
@@ -1425,7 +1425,7 @@
     }
 
     fn wasm_and_missing_nontrapping_fptoint(&self) -> bool {
-        self.sess().target.target.arch == "wasm32"
+        self.sess().target.arch == "wasm32"
             && !self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 4afd906..367c1f4 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -176,7 +176,7 @@
         // should use dllimport for functions.
         if cx.use_dll_storage_attrs
             && tcx.is_dllimport_foreign_item(instance_def_id)
-            && tcx.sess.target.target.target_env != "gnu"
+            && tcx.sess.target.env != "gnu"
         {
             unsafe {
                 llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index 0992410a..34e1b7a 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -12,7 +12,7 @@
 use rustc_codegen_ssa::traits::*;
 use rustc_middle::bug;
 use rustc_middle::mir::interpret::{Allocation, GlobalAlloc, Scalar};
-use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{layout::TyAndLayout, ScalarInt};
 use rustc_span::symbol::Symbol;
 use rustc_target::abi::{self, AddressSpace, HasDataLayout, LayoutOf, Pointer, Size};
 
@@ -80,6 +80,7 @@
 
 impl BackendTypes for CodegenCx<'ll, 'tcx> {
     type Value = &'ll Value;
+    // FIXME(eddyb) replace this with a `Function` "subclass" of `Value`.
     type Function = &'ll Value;
 
     type BasicBlock = &'ll BasicBlock;
@@ -87,6 +88,7 @@
     type Funclet = Funclet<'ll>;
 
     type DIScope = &'ll llvm::debuginfo::DIScope;
+    type DILocation = &'ll llvm::debuginfo::DILocation;
     type DIVariable = &'ll llvm::debuginfo::DIVariable;
 }
 
@@ -228,12 +230,12 @@
     fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: &'ll Type) -> &'ll Value {
         let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
         match cv {
-            Scalar::Raw { size: 0, .. } => {
+            Scalar::Int(ScalarInt::ZST) => {
                 assert_eq!(0, layout.value.size(self).bytes());
                 self.const_undef(self.type_ix(0))
             }
-            Scalar::Raw { data, size } => {
-                assert_eq!(size as u64, layout.value.size(self).bytes());
+            Scalar::Int(int) => {
+                let data = int.assert_bits(layout.value.size(self));
                 let llval = self.const_uint_big(self.type_ix(bitsize), data);
                 if layout.value == Pointer {
                     unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 6d3582d..14dd245 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -19,7 +19,6 @@
 use rustc_middle::ty::{self, Instance, Ty};
 use rustc_middle::{bug, span_bug};
 use rustc_span::symbol::sym;
-use rustc_span::Span;
 use rustc_target::abi::{AddressSpace, Align, HasDataLayout, LayoutOf, Primitive, Scalar, Size};
 use tracing::debug;
 
@@ -92,7 +91,7 @@
     // The target may require greater alignment for globals than the type does.
     // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
     // which can force it to be smaller.  Rust doesn't support this yet.
-    if let Some(min) = cx.sess().target.target.options.min_global_align {
+    if let Some(min) = cx.sess().target.min_global_align {
         match Align::from_bits(min) {
             Ok(min) => align = align.max(min),
             Err(err) => {
@@ -110,7 +109,7 @@
     attrs: &CodegenFnAttrs,
     ty: Ty<'tcx>,
     sym: &str,
-    span: Span,
+    span_def_id: DefId,
 ) -> &'ll Value {
     let llty = cx.layout_of(ty).llvm_type(cx);
     if let Some(linkage) = attrs.linkage {
@@ -125,7 +124,7 @@
             cx.layout_of(mt.ty).llvm_type(cx)
         } else {
             cx.sess().span_fatal(
-                span,
+                cx.tcx.def_span(span_def_id),
                 "must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
             )
         };
@@ -143,7 +142,10 @@
             let mut real_name = "_rust_extern_with_linkage_".to_string();
             real_name.push_str(&sym);
             let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| {
-                cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym))
+                cx.sess().span_fatal(
+                    cx.tcx.def_span(span_def_id),
+                    &format!("symbol `{}` is already defined", &sym),
+                )
             });
             llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
             llvm::LLVMSetInitializer(g2, g1);
@@ -210,21 +212,21 @@
 
         debug!("get_static: sym={} instance={:?}", sym, instance);
 
-        let g = if let Some(def_id) = def_id.as_local() {
-            let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+        let g = if let Some(local_def_id) = def_id.as_local() {
+            let id = self.tcx.hir().local_def_id_to_hir_id(local_def_id);
             let llty = self.layout_of(ty).llvm_type(self);
             // FIXME: refactor this to work without accessing the HIR
             let (g, attrs) = match self.tcx.hir().get(id) {
-                Node::Item(&hir::Item { attrs, span, kind: hir::ItemKind::Static(..), .. }) => {
+                Node::Item(&hir::Item { attrs, kind: hir::ItemKind::Static(..), .. }) => {
                     if let Some(g) = self.get_declared_value(sym) {
                         if self.val_ty(g) != self.type_ptr_to(llty) {
-                            span_bug!(span, "Conflicting types for static");
+                            span_bug!(self.tcx.def_span(def_id), "Conflicting types for static");
                         }
                     }
 
                     let g = self.declare_global(sym, llty);
 
-                    if !self.tcx.is_reachable_non_generic(def_id) {
+                    if !self.tcx.is_reachable_non_generic(local_def_id) {
                         unsafe {
                             llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden);
                         }
@@ -235,12 +237,11 @@
 
                 Node::ForeignItem(&hir::ForeignItem {
                     ref attrs,
-                    span,
                     kind: hir::ForeignItemKind::Static(..),
                     ..
                 }) => {
-                    let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
-                    (check_and_apply_linkage(&self, &fn_attrs, ty, sym, span), &**attrs)
+                    let fn_attrs = self.tcx.codegen_fn_attrs(local_def_id);
+                    (check_and_apply_linkage(&self, &fn_attrs, ty, sym, def_id), &**attrs)
                 }
 
                 item => bug!("get_static: expected static, found {:?}", item),
@@ -260,8 +261,7 @@
             debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id));
 
             let attrs = self.tcx.codegen_fn_attrs(def_id);
-            let span = self.tcx.def_span(def_id);
-            let g = check_and_apply_linkage(&self, &attrs, ty, sym, span);
+            let g = check_and_apply_linkage(&self, &attrs, ty, sym, def_id);
 
             // Thread-local statics in some other crate need to *always* be linked
             // against in a thread-local fashion, so we need to be sure to apply the
@@ -283,7 +283,7 @@
             // argument validation.
             debug_assert!(
                 !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
-                    && self.tcx.sess.target.target.options.is_like_windows
+                    && self.tcx.sess.target.is_like_windows
                     && self.tcx.sess.opts.cg.prefer_dynamic)
             );
 
@@ -397,10 +397,8 @@
 
             // As an optimization, all shared statics which do not have interior
             // mutability are placed into read-only memory.
-            if !is_mutable {
-                if self.type_is_freeze(ty) {
-                    llvm::LLVMSetGlobalConstant(g, llvm::True);
-                }
+            if !is_mutable && self.type_is_freeze(ty) {
+                llvm::LLVMSetGlobalConstant(g, llvm::True);
             }
 
             debuginfo::create_global_var_metadata(&self, def_id, g);
@@ -437,7 +435,7 @@
                 // will use load-unaligned instructions instead, and thus avoiding the crash.
                 //
                 // We could remove this hack whenever we decide to drop macOS 10.10 support.
-                if self.tcx.sess.target.target.options.is_like_osx {
+                if self.tcx.sess.target.is_like_osx {
                     // The `inspect` method is okay here because we checked relocations, and
                     // because we are doing this access to inspect the final interpreter state
                     // (not as part of the interpreter execution).
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 1696f35..b6e922c 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -118,18 +118,18 @@
     let mod_name = SmallCStr::new(mod_name);
     let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
 
-    let mut target_data_layout = sess.target.target.data_layout.clone();
+    let mut target_data_layout = sess.target.data_layout.clone();
     if llvm_util::get_major_version() < 9 {
         target_data_layout = strip_function_ptr_alignment(target_data_layout);
     }
-    if llvm_util::get_major_version() < 10 {
-        if sess.target.target.arch == "x86" || sess.target.target.arch == "x86_64" {
-            target_data_layout = strip_x86_address_spaces(target_data_layout);
-        }
+    if llvm_util::get_major_version() < 10
+        && (sess.target.arch == "x86" || sess.target.arch == "x86_64")
+    {
+        target_data_layout = strip_x86_address_spaces(target_data_layout);
     }
 
     // Ensure the data-layout values hardcoded remain the defaults.
-    if sess.target.target.options.is_builtin {
+    if sess.target.is_builtin {
         let tm = crate::back::write::create_informational_target_machine(tcx.sess);
         llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
         llvm::LLVMRustDisposeTargetMachine(tm);
@@ -160,7 +160,7 @@
             bug!(
                 "data-layout for builtin `{}` target, `{}`, \
                   differs from LLVM default, `{}`",
-                sess.target.target.llvm_target,
+                sess.target.llvm_target,
                 target_data_layout,
                 llvm_data_layout
             );
@@ -170,7 +170,7 @@
     let data_layout = SmallCStr::new(&target_data_layout);
     llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
 
-    let llvm_target = SmallCStr::new(&sess.target.target.llvm_target);
+    let llvm_target = SmallCStr::new(&sess.target.llvm_target);
     llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
 
     if sess.relocation_model() == RelocModel::Pic {
@@ -190,7 +190,7 @@
     }
 
     // Control Flow Guard is currently only supported by the MSVC linker on Windows.
-    if sess.target.target.options.is_like_msvc {
+    if sess.target.is_like_msvc {
         match sess.opts.cg.control_flow_guard {
             CFGuard::Disabled => {}
             CFGuard::NoChecks => {
@@ -265,7 +265,7 @@
         // linker will take care of everything. Fixing this problem will likely
         // require adding a few attributes to Rust itself (feature gated at the
         // start) and then strongly recommending static linkage on Windows!
-        let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_windows;
+        let use_dll_storage_attrs = tcx.sess.target.is_like_windows;
 
         let check_overflow = tcx.sess.overflow_checks();
 
@@ -324,8 +324,8 @@
     }
 
     #[inline]
-    pub fn coverage_context(&'a self) -> &'a coverageinfo::CrateCoverageContext<'tcx> {
-        self.coverage_cx.as_ref().unwrap()
+    pub fn coverage_context(&'a self) -> Option<&'a coverageinfo::CrateCoverageContext<'tcx>> {
+        self.coverage_cx.as_ref()
     }
 }
 
@@ -417,7 +417,8 @@
     }
 
     fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
-        attributes::apply_target_cpu_attr(self, llfn)
+        attributes::apply_target_cpu_attr(self, llfn);
+        attributes::apply_tune_cpu_attr(self, llfn);
     }
 
     fn create_used_variable(&self) {
@@ -838,7 +839,7 @@
             return eh_catch_typeinfo;
         }
         let tcx = self.tcx;
-        assert!(self.sess().target.target.options.is_like_emscripten);
+        assert!(self.sess().target.is_like_emscripten);
         let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
             Some(def_id) => self.get_static(def_id),
             _ => {
@@ -863,7 +864,7 @@
         // user defined names
         let mut name = String::with_capacity(prefix.len() + 6);
         name.push_str(prefix);
-        name.push_str(".");
+        name.push('.');
         base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
         name
     }
@@ -877,7 +878,7 @@
 
 impl HasTargetSpec for CodegenCx<'ll, 'tcx> {
     fn target_spec(&self) -> &Target {
-        &self.tcx.sess.target.target
+        &self.tcx.sess.target
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index ec6c177..41827a9 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -26,7 +26,10 @@
 /// undocumented details in Clang's implementation (that may or may not be important) were also
 /// replicated for Rust's Coverage Map.
 pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
-    let function_coverage_map = cx.coverage_context().take_function_coverage_map();
+    let function_coverage_map = match cx.coverage_context() {
+        Some(ctx) => ctx.take_function_coverage_map(),
+        None => return,
+    };
     if function_coverage_map.is_empty() {
         // This module has no functions with coverage instrumentation
         return;
@@ -126,6 +129,7 @@
                 let (filenames_index, _) = self.filenames.insert_full(c_filename);
                 virtual_file_mapping.push(filenames_index as u32);
             }
+            debug!("Adding counter {:?} to map for {:?}", counter, region);
             mapping_regions.push(CounterMappingRegion::code_region(
                 counter,
                 current_file_id,
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index 2bd37bf..e21e038 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -12,7 +12,7 @@
 use rustc_data_structures::fx::FxHashMap;
 use rustc_llvm::RustString;
 use rustc_middle::mir::coverage::{
-    CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionIndex, Op,
+    CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId, Op,
 };
 use rustc_middle::ty::Instance;
 
@@ -27,8 +27,8 @@
 
 /// A context object for maintaining all state needed by the coverageinfo module.
 pub struct CrateCoverageContext<'tcx> {
-    // Coverage region data for each instrumented function identified by DefId.
-    pub(crate) function_coverage_map: RefCell<FxHashMap<Instance<'tcx>, FunctionCoverage>>,
+    // Coverage data for each instrumented function identified by DefId.
+    pub(crate) function_coverage_map: RefCell<FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>>>,
 }
 
 impl<'tcx> CrateCoverageContext<'tcx> {
@@ -36,7 +36,7 @@
         Self { function_coverage_map: Default::default() }
     }
 
-    pub fn take_function_coverage_map(&self) -> FxHashMap<Instance<'tcx>, FunctionCoverage> {
+    pub fn take_function_coverage_map(&self) -> FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>> {
         self.function_coverage_map.replace(FxHashMap::default())
     }
 }
@@ -58,56 +58,90 @@
         unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar(llfn, mangled_fn_name.as_ptr()) }
     }
 
-    fn add_counter_region(
+    fn set_function_source_hash(
         &mut self,
         instance: Instance<'tcx>,
         function_source_hash: u64,
-        id: CounterValueReference,
-        region: CodeRegion,
-    ) {
-        debug!(
-            "adding counter to coverage_regions: instance={:?}, function_source_hash={}, id={:?}, \
-             at {:?}",
-            instance, function_source_hash, id, region,
-        );
-        let mut coverage_regions = self.coverage_context().function_coverage_map.borrow_mut();
-        coverage_regions
-            .entry(instance)
-            .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-            .add_counter(function_source_hash, id, region);
+    ) -> bool {
+        if let Some(coverage_context) = self.coverage_context() {
+            debug!(
+                "ensuring function source hash is set for instance={:?}; function_source_hash={}",
+                instance, function_source_hash,
+            );
+            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+            coverage_map
+                .entry(instance)
+                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+                .set_function_source_hash(function_source_hash);
+            true
+        } else {
+            false
+        }
     }
 
-    fn add_counter_expression_region(
+    fn add_coverage_counter(
         &mut self,
         instance: Instance<'tcx>,
-        id: InjectedExpressionIndex,
+        id: CounterValueReference,
+        region: CodeRegion,
+    ) -> bool {
+        if let Some(coverage_context) = self.coverage_context() {
+            debug!(
+                "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
+                instance, id, region,
+            );
+            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+            coverage_map
+                .entry(instance)
+                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+                .add_counter(id, region);
+            true
+        } else {
+            false
+        }
+    }
+
+    fn add_coverage_counter_expression(
+        &mut self,
+        instance: Instance<'tcx>,
+        id: InjectedExpressionId,
         lhs: ExpressionOperandId,
         op: Op,
         rhs: ExpressionOperandId,
-        region: CodeRegion,
-    ) {
-        debug!(
-            "adding counter expression to coverage_regions: instance={:?}, id={:?}, {:?} {:?} {:?}, \
-             at {:?}",
-            instance, id, lhs, op, rhs, region,
-        );
-        let mut coverage_regions = self.coverage_context().function_coverage_map.borrow_mut();
-        coverage_regions
-            .entry(instance)
-            .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-            .add_counter_expression(id, lhs, op, rhs, region);
+        region: Option<CodeRegion>,
+    ) -> bool {
+        if let Some(coverage_context) = self.coverage_context() {
+            debug!(
+                "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; \
+                region: {:?}",
+                instance, id, lhs, op, rhs, region,
+            );
+            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+            coverage_map
+                .entry(instance)
+                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+                .add_counter_expression(id, lhs, op, rhs, region);
+            true
+        } else {
+            false
+        }
     }
 
-    fn add_unreachable_region(&mut self, instance: Instance<'tcx>, region: CodeRegion) {
-        debug!(
-            "adding unreachable code to coverage_regions: instance={:?}, at {:?}",
-            instance, region,
-        );
-        let mut coverage_regions = self.coverage_context().function_coverage_map.borrow_mut();
-        coverage_regions
-            .entry(instance)
-            .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-            .add_unreachable_region(region);
+    fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
+        if let Some(coverage_context) = self.coverage_context() {
+            debug!(
+                "adding unreachable code to coverage_map: instance={:?}, at {:?}",
+                instance, region,
+            );
+            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+            coverage_map
+                .entry(instance)
+                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+                .add_unreachable_region(region);
+            true
+        } else {
+            false
+        }
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index 7f47b61..6737872 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -3,21 +3,26 @@
 use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext};
 use rustc_codegen_ssa::traits::*;
 
+use crate::abi::FnAbi;
 use crate::common::CodegenCx;
 use crate::llvm;
-use crate::llvm::debuginfo::{DIScope, DISubprogram};
+use crate::llvm::debuginfo::{DILocation, DIScope};
 use rustc_middle::mir::{Body, SourceScope};
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_middle::ty::{self, Instance};
 use rustc_session::config::DebugInfo;
 
 use rustc_index::bit_set::BitSet;
 use rustc_index::vec::Idx;
 
 /// Produces DIScope DIEs for each MIR Scope which has variables defined in it.
+// FIXME(eddyb) almost all of this should be in `rustc_codegen_ssa::mir::debuginfo`.
 pub fn compute_mir_scopes(
-    cx: &CodegenCx<'ll, '_>,
-    mir: &Body<'_>,
-    fn_metadata: &'ll DISubprogram,
-    debug_context: &mut FunctionDebugContext<&'ll DIScope>,
+    cx: &CodegenCx<'ll, 'tcx>,
+    instance: Instance<'tcx>,
+    mir: &Body<'tcx>,
+    fn_dbg_scope: &'ll DIScope,
+    debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
 ) {
     // Find all the scopes with variables defined in them.
     let mut has_variables = BitSet::new_empty(mir.source_scopes.len());
@@ -37,58 +42,82 @@
     // Instantiate all scopes.
     for idx in 0..mir.source_scopes.len() {
         let scope = SourceScope::new(idx);
-        make_mir_scope(cx, &mir, fn_metadata, &has_variables, debug_context, scope);
+        make_mir_scope(cx, instance, &mir, fn_dbg_scope, &has_variables, debug_context, scope);
     }
 }
 
 fn make_mir_scope(
-    cx: &CodegenCx<'ll, '_>,
-    mir: &Body<'_>,
-    fn_metadata: &'ll DISubprogram,
+    cx: &CodegenCx<'ll, 'tcx>,
+    instance: Instance<'tcx>,
+    mir: &Body<'tcx>,
+    fn_dbg_scope: &'ll DIScope,
     has_variables: &BitSet<SourceScope>,
-    debug_context: &mut FunctionDebugContext<&'ll DISubprogram>,
+    debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
     scope: SourceScope,
 ) {
-    if debug_context.scopes[scope].is_valid() {
+    if debug_context.scopes[scope].dbg_scope.is_some() {
         return;
     }
 
     let scope_data = &mir.source_scopes[scope];
     let parent_scope = if let Some(parent) = scope_data.parent_scope {
-        make_mir_scope(cx, mir, fn_metadata, has_variables, debug_context, parent);
+        make_mir_scope(cx, instance, mir, fn_dbg_scope, has_variables, debug_context, parent);
         debug_context.scopes[parent]
     } else {
         // The root is the function itself.
         let loc = cx.lookup_debug_loc(mir.span.lo());
         debug_context.scopes[scope] = DebugScope {
-            scope_metadata: Some(fn_metadata),
+            dbg_scope: Some(fn_dbg_scope),
+            inlined_at: None,
             file_start_pos: loc.file.start_pos,
             file_end_pos: loc.file.end_pos,
         };
         return;
     };
 
-    if !has_variables.contains(scope) {
-        // Do not create a DIScope if there are no variables
-        // defined in this MIR Scope, to avoid debuginfo bloat.
+    if !has_variables.contains(scope) && scope_data.inlined.is_none() {
+        // Do not create a DIScope if there are no variables defined in this
+        // MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
         debug_context.scopes[scope] = parent_scope;
         return;
     }
 
     let loc = cx.lookup_debug_loc(scope_data.span.lo());
-    let file_metadata = file_metadata(cx, &loc.file, debug_context.defining_crate);
+    let file_metadata = file_metadata(cx, &loc.file);
 
-    let scope_metadata = unsafe {
-        Some(llvm::LLVMRustDIBuilderCreateLexicalBlock(
-            DIB(cx),
-            parent_scope.scope_metadata.unwrap(),
-            file_metadata,
-            loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
-            loc.col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
-        ))
+    let dbg_scope = match scope_data.inlined {
+        Some((callee, _)) => {
+            // FIXME(eddyb) this would be `self.monomorphize(&callee)`
+            // if this is moved to `rustc_codegen_ssa::mir::debuginfo`.
+            let callee = cx.tcx.subst_and_normalize_erasing_regions(
+                instance.substs,
+                ty::ParamEnv::reveal_all(),
+                &callee,
+            );
+            let callee_fn_abi = FnAbi::of_instance(cx, callee, &[]);
+            cx.dbg_scope_fn(callee, &callee_fn_abi, None)
+        }
+        None => unsafe {
+            llvm::LLVMRustDIBuilderCreateLexicalBlock(
+                DIB(cx),
+                parent_scope.dbg_scope.unwrap(),
+                file_metadata,
+                loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
+                loc.col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
+            )
+        },
     };
+
+    let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
+        // FIXME(eddyb) this doesn't account for the macro-related
+        // `Span` fixups that `rustc_codegen_ssa::mir::debuginfo` does.
+        let callsite_scope = parent_scope.adjust_dbg_scope_for_span(cx, callsite_span);
+        cx.dbg_loc(callsite_scope, parent_scope.inlined_at, callsite_span)
+    });
+
     debug_context.scopes[scope] = DebugScope {
-        scope_metadata,
+        dbg_scope: Some(dbg_scope),
+        inlined_at: inlined_at.or(parent_scope.inlined_at),
         file_start_pos: loc.file.start_pos,
         file_end_pos: loc.file.end_pos,
     };
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs b/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs
index b3a8fa2..10dd590 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs
@@ -28,7 +28,7 @@
 //! utilizing a cache. The way to get a shared metadata node when needed is
 //! thus to just call the corresponding function in this module:
 //!
-//!     let file_metadata = file_metadata(crate_context, path);
+//!     let file_metadata = file_metadata(cx, file);
 //!
 //! The function will take care of probing the cache for an existing node for
 //! that exact file path.
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
index 29edd660..38f50a6 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -67,5 +67,5 @@
 
     !omit_gdb_pretty_printer_section
         && cx.sess().opts.debuginfo != DebugInfo::None
-        && cx.sess().target.target.options.emit_debug_gdb_scripts
+        && cx.sess().target.emit_debug_gdb_scripts
 }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 987149c..27b81eb 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -26,10 +26,9 @@
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
 use rustc_fs_util::path_to_c_string;
 use rustc_hir::def::CtorKind;
-use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
 use rustc_index::vec::{Idx, IndexVec};
 use rustc_middle::ich::NodeIdHashingMode;
-use rustc_middle::mir::interpret::truncate;
 use rustc_middle::mir::{self, Field, GeneratorLayout};
 use rustc_middle::ty::layout::{self, IntegerExt, PrimitiveExt, TyAndLayout};
 use rustc_middle::ty::subst::GenericArgKind;
@@ -760,16 +759,12 @@
     hex_string
 }
 
-pub fn file_metadata(
-    cx: &CodegenCx<'ll, '_>,
-    source_file: &SourceFile,
-    defining_crate: CrateNum,
-) -> &'ll DIFile {
-    debug!("file_metadata: file_name: {}, defining_crate: {}", source_file.name, defining_crate);
+pub fn file_metadata(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> &'ll DIFile {
+    debug!("file_metadata: file_name: {}", source_file.name);
 
     let hash = Some(&source_file.src_hash);
     let file_name = Some(source_file.name.to_string());
-    let directory = if defining_crate == LOCAL_CRATE {
+    let directory = if source_file.is_real_file() && !source_file.is_imported() {
         Some(cx.sess().working_dir.0.to_string_lossy().to_string())
     } else {
         // If the path comes from an upstream crate we assume it has been made
@@ -805,6 +800,7 @@
                     let kind = match hash.kind {
                         rustc_span::SourceFileHashAlgorithm::Md5 => llvm::ChecksumKind::MD5,
                         rustc_span::SourceFileHashAlgorithm::Sha1 => llvm::ChecksumKind::SHA1,
+                        rustc_span::SourceFileHashAlgorithm::Sha256 => llvm::ChecksumKind::SHA256,
                     };
                     (kind, hex_encode(hash.hash_bytes()))
                 }
@@ -874,7 +870,7 @@
 
     // When targeting MSVC, emit MSVC style type names for compatibility with
     // .natvis visualizers (and perhaps other existing native debuggers?)
-    let msvc_like_names = cx.tcx.sess.target.target.options.is_like_msvc;
+    let msvc_like_names = cx.tcx.sess.target.is_like_msvc;
 
     let (name, encoding) = match t.kind() {
         ty::Never => ("!", DW_ATE_unsigned),
@@ -985,7 +981,7 @@
     // if multiple object files with the same `DW_AT_name` are linked together.
     // As a workaround we generate unique names for each object file. Those do
     // not correspond to an actual source file but that should be harmless.
-    if tcx.sess.target.target.options.is_like_osx {
+    if tcx.sess.target.is_like_osx {
         name_in_debuginfo.push("@");
         name_in_debuginfo.push(codegen_unit_name);
     }
@@ -1401,7 +1397,7 @@
 /// on MSVC we have to use the fallback mode, because LLVM doesn't
 /// lower variant parts to PDB.
 fn use_enum_fallback(cx: &CodegenCx<'_, '_>) -> bool {
-    cx.sess().target.target.options.is_like_msvc
+    cx.sess().target.is_like_msvc
 }
 
 // FIXME(eddyb) maybe precompute this? Right now it's computed once
@@ -1696,7 +1692,7 @@
                                 let value = (i.as_u32() as u128)
                                     .wrapping_sub(niche_variants.start().as_u32() as u128)
                                     .wrapping_add(niche_start);
-                                let value = truncate(value, tag.value.size(cx));
+                                let value = tag.value.size(cx).truncate(value);
                                 // NOTE(eddyb) do *NOT* remove this assert, until
                                 // we pass the full 128-bit value to LLVM, otherwise
                                 // truncation will be silent and remain undetected.
@@ -1835,7 +1831,7 @@
                 if !span.is_dummy() {
                     let loc = cx.lookup_debug_loc(span.lo());
                     return Some(SourceInfo {
-                        file: file_metadata(cx, &loc.file, def_id.krate),
+                        file: file_metadata(cx, &loc.file),
                         line: loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
                     });
                 }
@@ -2474,7 +2470,7 @@
 
     let (file_metadata, line_number) = if !span.is_dummy() {
         let loc = cx.lookup_debug_loc(span.lo());
-        (file_metadata(cx, &loc.file, LOCAL_CRATE), loc.line)
+        (file_metadata(cx, &loc.file), loc.line)
     } else {
         (unknown_file_metadata(cx), None)
     };
@@ -2576,9 +2572,8 @@
 pub fn extend_scope_to_file(
     cx: &CodegenCx<'ll, '_>,
     scope_metadata: &'ll DIScope,
-    file: &rustc_span::SourceFile,
-    defining_crate: CrateNum,
+    file: &SourceFile,
 ) -> &'ll DILexicalBlock {
-    let file_metadata = file_metadata(cx, &file, defining_crate);
+    let file_metadata = file_metadata(cx, file);
     unsafe { llvm::LLVMRustDIBuilderCreateLexicalBlockFile(DIB(cx), scope_metadata, file_metadata) }
 }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 7cdd366..5065ff0 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -3,7 +3,8 @@
 
 use rustc_codegen_ssa::mir::debuginfo::VariableKind::*;
 
-use self::metadata::{file_metadata, type_metadata, TypeMap, UNKNOWN_LINE_NUMBER};
+use self::metadata::{file_metadata, type_metadata, TypeMap};
+use self::metadata::{UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
 use self::namespace::mangled_name_of_instance;
 use self::type_names::compute_debuginfo_type_name;
 use self::utils::{create_DIArray, is_node_local_to_unit, DIB};
@@ -13,7 +14,8 @@
 use crate::common::CodegenCx;
 use crate::llvm;
 use crate::llvm::debuginfo::{
-    DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DISPFlags, DIScope, DIType, DIVariable,
+    DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope, DIType,
+    DIVariable,
 };
 use crate::value::Value;
 
@@ -21,7 +23,8 @@
 use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind};
 use rustc_codegen_ssa::traits::*;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LOCAL_CRATE};
+use rustc_data_structures::sync::Lrc;
+use rustc_hir::def_id::{DefId, DefIdMap, LOCAL_CRATE};
 use rustc_index::vec::IndexVec;
 use rustc_middle::mir;
 use rustc_middle::ty::layout::HasTyCtxt;
@@ -29,7 +32,7 @@
 use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeFoldable};
 use rustc_session::config::{self, DebugInfo};
 use rustc_span::symbol::Symbol;
-use rustc_span::{self, BytePos, Span};
+use rustc_span::{self, BytePos, Pos, SourceFile, SourceFileAndLine, Span};
 use rustc_target::abi::{LayoutOf, Primitive, Size};
 
 use libc::c_uint;
@@ -41,7 +44,6 @@
 pub mod gdb;
 pub mod metadata;
 mod namespace;
-mod source_loc;
 mod utils;
 
 pub use self::create_scope_map::compute_mir_scopes;
@@ -120,14 +122,12 @@
         // for macOS to understand. For more info see #11352
         // This can be overridden using --llvm-opts -dwarf-version,N.
         // Android has the same issue (#22398)
-        if cx.sess().target.target.options.is_like_osx
-            || cx.sess().target.target.options.is_like_android
-        {
-            llvm::LLVMRustAddModuleFlag(cx.llmod, "Dwarf Version\0".as_ptr().cast(), 2)
+        if let Some(version) = cx.sess().target.dwarf_version {
+            llvm::LLVMRustAddModuleFlag(cx.llmod, "Dwarf Version\0".as_ptr().cast(), version)
         }
 
         // Indicate that we want CodeView debug information on MSVC
-        if cx.sess().target.target.options.is_like_msvc {
+        if cx.sess().target.is_like_msvc {
             llvm::LLVMRustAddModuleFlag(cx.llmod, "CodeView\0".as_ptr().cast(), 1)
         }
 
@@ -143,14 +143,11 @@
     fn dbg_var_addr(
         &mut self,
         dbg_var: &'ll DIVariable,
-        scope_metadata: &'ll DIScope,
+        dbg_loc: &'ll DILocation,
         variable_alloca: Self::Value,
         direct_offset: Size,
         indirect_offsets: &[Size],
-        span: Span,
     ) {
-        let cx = self.cx();
-
         // Convert the direct and indirect offsets to address ops.
         // FIXME(eddyb) use `const`s instead of getting the values via FFI,
         // the values should match the ones in the DWARF standard anyway.
@@ -170,14 +167,10 @@
             }
         }
 
-        // FIXME(eddyb) maybe this information could be extracted from `dbg_var`,
-        // to avoid having to pass it down in both places?
-        // NB: `var` doesn't seem to know about the column, so that's a limitation.
-        let dbg_loc = cx.create_debug_loc(scope_metadata, span);
         unsafe {
             // FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
             llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
-                DIB(cx),
+                DIB(self.cx()),
                 variable_alloca,
                 dbg_var,
                 addr_ops.as_ptr(),
@@ -188,15 +181,13 @@
         }
     }
 
-    fn set_source_location(&mut self, scope: &'ll DIScope, span: Span) {
-        debug!("set_source_location: {}", self.sess().source_map().span_to_string(span));
-
-        let dbg_loc = self.cx().create_debug_loc(scope, span);
-
+    fn set_dbg_loc(&mut self, dbg_loc: &'ll DILocation) {
         unsafe {
-            llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc);
+            let dbg_loc_as_llval = llvm::LLVMRustMetadataAsValue(self.cx().llcx, dbg_loc);
+            llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc_as_llval);
         }
     }
+
     fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
         gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
     }
@@ -225,30 +216,95 @@
     }
 }
 
+/// A source code location used to generate debug information.
+// FIXME(eddyb) rename this to better indicate it's a duplicate of
+// `rustc_span::Loc` rather than `DILocation`, perhaps by making
+// `lookup_char_pos` return the right information instead.
+pub struct DebugLoc {
+    /// Information about the original source file.
+    pub file: Lrc<SourceFile>,
+    /// The (1-based) line number.
+    pub line: Option<u32>,
+    /// The (1-based) column number.
+    pub col: Option<u32>,
+}
+
+impl CodegenCx<'ll, '_> {
+    /// Looks up debug source information about a `BytePos`.
+    // FIXME(eddyb) rename this to better indicate it's a duplicate of
+    // `lookup_char_pos` rather than `dbg_loc`, perhaps by making
+    // `lookup_char_pos` return the right information instead.
+    pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
+        let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
+            Ok(SourceFileAndLine { sf: file, line }) => {
+                let line_pos = file.line_begin_pos(pos);
+
+                // Use 1-based indexing.
+                let line = (line + 1) as u32;
+                let col = (pos - line_pos).to_u32() + 1;
+
+                (file, Some(line), Some(col))
+            }
+            Err(file) => (file, None, None),
+        };
+
+        // For MSVC, omit the column number.
+        // Otherwise, emit it. This mimics clang behaviour.
+        // See discussion in https://github.com/rust-lang/rust/issues/42921
+        if self.sess().target.is_like_msvc {
+            DebugLoc { file, line, col: None }
+        } else {
+            DebugLoc { file, line, col }
+        }
+    }
+}
+
 impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     fn create_function_debug_context(
         &self,
         instance: Instance<'tcx>,
         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
         llfn: &'ll Value,
-        mir: &mir::Body<'_>,
-    ) -> Option<FunctionDebugContext<&'ll DIScope>> {
+        mir: &mir::Body<'tcx>,
+    ) -> Option<FunctionDebugContext<&'ll DIScope, &'ll DILocation>> {
         if self.sess().opts.debuginfo == DebugInfo::None {
             return None;
         }
 
-        let span = mir.span;
+        // Initialize fn debug context (including scopes).
+        // FIXME(eddyb) figure out a way to not need `Option` for `dbg_scope`.
+        let empty_scope = DebugScope {
+            dbg_scope: None,
+            inlined_at: None,
+            file_start_pos: BytePos(0),
+            file_end_pos: BytePos(0),
+        };
+        let mut fn_debug_context =
+            FunctionDebugContext { scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes) };
 
-        // This can be the case for functions inlined from another crate
-        if span.is_dummy() {
-            // FIXME(simulacrum): Probably can't happen; remove.
-            return None;
-        }
+        // Fill in all the scopes, with the information from the MIR body.
+        compute_mir_scopes(
+            self,
+            instance,
+            mir,
+            self.dbg_scope_fn(instance, fn_abi, Some(llfn)),
+            &mut fn_debug_context,
+        );
 
+        Some(fn_debug_context)
+    }
+
+    fn dbg_scope_fn(
+        &self,
+        instance: Instance<'tcx>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        maybe_definition_llfn: Option<&'ll Value>,
+    ) -> &'ll DIScope {
         let def_id = instance.def_id();
         let containing_scope = get_containing_scope(self, instance);
+        let span = self.tcx.def_span(def_id);
         let loc = self.lookup_debug_loc(span.lo());
-        let file_metadata = file_metadata(self, &loc.file, def_id.krate);
+        let file_metadata = file_metadata(self, &loc.file);
 
         let function_type_metadata = unsafe {
             let fn_signature = get_function_signature(self, fn_abi);
@@ -293,8 +349,8 @@
             }
         }
 
-        let fn_metadata = unsafe {
-            llvm::LLVMRustDIBuilderCreateFunction(
+        unsafe {
+            return llvm::LLVMRustDIBuilderCreateFunction(
                 DIB(self),
                 containing_scope,
                 name.as_ptr().cast(),
@@ -307,28 +363,11 @@
                 scope_line.unwrap_or(UNKNOWN_LINE_NUMBER),
                 flags,
                 spflags,
-                llfn,
+                maybe_definition_llfn,
                 template_parameters,
                 None,
-            )
-        };
-
-        // Initialize fn debug context (including scopes).
-        // FIXME(eddyb) figure out a way to not need `Option` for `scope_metadata`.
-        let null_scope = DebugScope {
-            scope_metadata: None,
-            file_start_pos: BytePos(0),
-            file_end_pos: BytePos(0),
-        };
-        let mut fn_debug_context = FunctionDebugContext {
-            scopes: IndexVec::from_elem(null_scope, &mir.source_scopes),
-            defining_crate: def_id.krate,
-        };
-
-        // Fill in all the scopes, with the information from the MIR body.
-        compute_mir_scopes(self, mir, fn_metadata, &mut fn_debug_context);
-
-        return Some(fn_debug_context);
+            );
+        }
 
         fn get_function_signature<'ll, 'tcx>(
             cx: &CodegenCx<'ll, 'tcx>,
@@ -348,7 +387,7 @@
             });
 
             // Arguments types
-            if cx.sess().target.target.options.is_like_msvc {
+            if cx.sess().target.is_like_msvc {
                 // FIXME(#42800):
                 // There is a bug in MSDIA that leads to a crash when it encounters
                 // a fixed-size array of `u8` or something zero-sized in a
@@ -396,7 +435,7 @@
             name_to_append_suffix_to.push('<');
             for (i, actual_type) in substs.types().enumerate() {
                 if i != 0 {
-                    name_to_append_suffix_to.push_str(",");
+                    name_to_append_suffix_to.push(',');
                 }
 
                 let actual_type =
@@ -503,6 +542,25 @@
         }
     }
 
+    fn dbg_loc(
+        &self,
+        scope: &'ll DIScope,
+        inlined_at: Option<&'ll DILocation>,
+        span: Span,
+    ) -> &'ll DILocation {
+        let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
+
+        unsafe {
+            llvm::LLVMRustDIBuilderCreateDebugLocation(
+                utils::debug_context(self).llcontext,
+                line.unwrap_or(UNKNOWN_LINE_NUMBER),
+                col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
+                scope,
+                inlined_at,
+            )
+        }
+    }
+
     fn create_vtable_metadata(&self, ty: Ty<'tcx>, vtable: Self::Value) {
         metadata::create_vtable_metadata(self, ty, vtable)
     }
@@ -511,9 +569,8 @@
         &self,
         scope_metadata: &'ll DIScope,
         file: &rustc_span::SourceFile,
-        defining_crate: CrateNum,
     ) -> &'ll DILexicalBlock {
-        metadata::extend_scope_to_file(&self, scope_metadata, file, defining_crate)
+        metadata::extend_scope_to_file(&self, scope_metadata, file)
     }
 
     fn debuginfo_finalize(&self) {
@@ -524,7 +581,6 @@
     // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
     fn create_dbg_var(
         &self,
-        dbg_context: &FunctionDebugContext<&'ll DIScope>,
         variable_name: Symbol,
         variable_type: Ty<'tcx>,
         scope_metadata: &'ll DIScope,
@@ -532,7 +588,7 @@
         span: Span,
     ) -> &'ll DIVariable {
         let loc = self.lookup_debug_loc(span.lo());
-        let file_metadata = file_metadata(self, &loc.file, dbg_context.defining_crate);
+        let file_metadata = file_metadata(self, &loc.file);
 
         let type_metadata = type_metadata(self, variable_type, span);
 
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/source_loc.rs b/compiler/rustc_codegen_llvm/src/debuginfo/source_loc.rs
deleted file mode 100644
index 66ae9d7..0000000
--- a/compiler/rustc_codegen_llvm/src/debuginfo/source_loc.rs
+++ /dev/null
@@ -1,61 +0,0 @@
-use super::metadata::{UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
-use super::utils::debug_context;
-
-use crate::common::CodegenCx;
-use crate::llvm::debuginfo::DIScope;
-use crate::llvm::{self, Value};
-use rustc_codegen_ssa::traits::*;
-
-use rustc_data_structures::sync::Lrc;
-use rustc_span::{BytePos, Pos, SourceFile, SourceFileAndLine, Span};
-
-/// A source code location used to generate debug information.
-pub struct DebugLoc {
-    /// Information about the original source file.
-    pub file: Lrc<SourceFile>,
-    /// The (1-based) line number.
-    pub line: Option<u32>,
-    /// The (1-based) column number.
-    pub col: Option<u32>,
-}
-
-impl CodegenCx<'ll, '_> {
-    /// Looks up debug source information about a `BytePos`.
-    pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
-        let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
-            Ok(SourceFileAndLine { sf: file, line }) => {
-                let line_pos = file.line_begin_pos(pos);
-
-                // Use 1-based indexing.
-                let line = (line + 1) as u32;
-                let col = (pos - line_pos).to_u32() + 1;
-
-                (file, Some(line), Some(col))
-            }
-            Err(file) => (file, None, None),
-        };
-
-        // For MSVC, omit the column number.
-        // Otherwise, emit it. This mimics clang behaviour.
-        // See discussion in https://github.com/rust-lang/rust/issues/42921
-        if self.sess().target.target.options.is_like_msvc {
-            DebugLoc { file, line, col: None }
-        } else {
-            DebugLoc { file, line, col }
-        }
-    }
-
-    pub fn create_debug_loc(&self, scope: &'ll DIScope, span: Span) -> &'ll Value {
-        let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
-
-        unsafe {
-            llvm::LLVMRustDIBuilderCreateDebugLocation(
-                debug_context(self).llcontext,
-                line.unwrap_or(UNKNOWN_LINE_NUMBER),
-                col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
-                scope,
-                None,
-            )
-        }
-    }
-}
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
index a3d6882..0591e0a 100644
--- a/compiler/rustc_codegen_llvm/src/declare.rs
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -42,7 +42,7 @@
     // be merged.
     llvm::SetUnnamedAddress(llfn, llvm::UnnamedAddr::Global);
 
-    if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.target.options.disable_redzone) {
+    if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.disable_redzone) {
         llvm::Attribute::NoRedZone.apply_llfn(Function, llfn);
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 7f5b09e..d52b3be 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -334,8 +334,8 @@
         self.call(expect, &[cond, self.const_bool(expected)], None)
     }
 
-    fn sideeffect(&mut self) {
-        if self.tcx.sess.opts.debugging_opts.insert_sideeffect {
+    fn sideeffect(&mut self, unconditional: bool) {
+        if unconditional || self.tcx.sess.opts.debugging_opts.insert_sideeffect {
             let fnname = self.get_intrinsic(&("llvm.sideeffect"));
             self.call(fnname, &[], None);
         }
@@ -367,7 +367,7 @@
         bx.store(bx.const_i32(0), dest, ret_align);
     } else if wants_msvc_seh(bx.sess()) {
         codegen_msvc_try(bx, try_func, data, catch_func, dest);
-    } else if bx.sess().target.target.options.is_like_emscripten {
+    } else if bx.sess().target.is_like_emscripten {
         codegen_emcc_try(bx, try_func, data, catch_func, dest);
     } else {
         codegen_gnu_try(bx, try_func, data, catch_func, dest);
@@ -390,7 +390,7 @@
 ) {
     let llfn = get_rust_try_fn(bx, &mut |mut bx| {
         bx.set_personality_fn(bx.eh_personality());
-        bx.sideeffect();
+        bx.sideeffect(false);
 
         let mut normal = bx.build_sibling_block("normal");
         let mut catchswitch = bx.build_sibling_block("catchswitch");
@@ -553,7 +553,7 @@
         //      call %catch_func(%data, %ptr)
         //      ret 1
 
-        bx.sideeffect();
+        bx.sideeffect(false);
 
         let mut then = bx.build_sibling_block("then");
         let mut catch = bx.build_sibling_block("catch");
@@ -615,7 +615,7 @@
         //      call %catch_func(%data, %catch_data)
         //      ret 1
 
-        bx.sideeffect();
+        bx.sideeffect(false);
 
         let mut then = bx.build_sibling_block("then");
         let mut catch = bx.build_sibling_block("catch");
@@ -673,17 +673,9 @@
 fn gen_fn<'ll, 'tcx>(
     cx: &CodegenCx<'ll, 'tcx>,
     name: &str,
-    inputs: Vec<Ty<'tcx>>,
-    output: Ty<'tcx>,
+    rust_fn_sig: ty::PolyFnSig<'tcx>,
     codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
 ) -> &'ll Value {
-    let rust_fn_sig = ty::Binder::bind(cx.tcx.mk_fn_sig(
-        inputs.into_iter(),
-        output,
-        false,
-        hir::Unsafety::Unsafe,
-        Abi::Rust,
-    ));
     let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
     let llfn = cx.declare_fn(name, &fn_abi);
     cx.set_frame_pointer_elimination(llfn);
@@ -710,22 +702,31 @@
     // Define the type up front for the signature of the rust_try function.
     let tcx = cx.tcx;
     let i8p = tcx.mk_mut_ptr(tcx.types.i8);
-    let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
+    // `unsafe fn(*mut i8) -> ()`
+    let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
         iter::once(i8p),
         tcx.mk_unit(),
         false,
         hir::Unsafety::Unsafe,
         Abi::Rust,
     )));
-    let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
+    // `unsafe fn(*mut i8, *mut i8) -> ()`
+    let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
         [i8p, i8p].iter().cloned(),
         tcx.mk_unit(),
         false,
         hir::Unsafety::Unsafe,
         Abi::Rust,
     )));
-    let output = tcx.types.i32;
-    let rust_try = gen_fn(cx, "__rust_try", vec![try_fn_ty, i8p, catch_fn_ty], output, codegen);
+    // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
+    let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
+        vec![try_fn_ty, i8p, catch_fn_ty].into_iter(),
+        tcx.types.i32,
+        false,
+        hir::Unsafety::Unsafe,
+        Abi::Rust,
+    ));
+    let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
     cx.rust_try_fn.set(Some(rust_try));
     rust_try
 }
@@ -793,14 +794,18 @@
         require_simd!(arg_tys[1], "argument");
         let v_len = arg_tys[1].simd_size(tcx);
         require!(
-            m_len == v_len,
+            // Allow masks for vectors with fewer than 8 elements to be
+            // represented with a u8 or i8.
+            m_len == v_len || (m_len == 8 && v_len < 8),
             "mismatched lengths: mask length `{}` != other vector length `{}`",
             m_len,
             v_len
         );
         let i1 = bx.type_i1();
-        let i1xn = bx.type_vector(i1, m_len);
-        let m_i1s = bx.bitcast(args[0].immediate(), i1xn);
+        let im = bx.type_ix(v_len);
+        let i1xn = bx.type_vector(i1, v_len);
+        let m_im = bx.trunc(args[0].immediate(), im);
+        let m_i1s = bx.bitcast(m_im, i1xn);
         return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
     }
 
@@ -974,12 +979,14 @@
 
         // Integer vector <i{in_bitwidth} x in_len>:
         let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
-            ty::Int(i) => {
-                (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
-            }
-            ty::Uint(i) => {
-                (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
-            }
+            ty::Int(i) => (
+                args[0].immediate(),
+                i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
+            ),
+            ty::Uint(i) => (
+                args[0].immediate(),
+                i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
+            ),
             _ => return_error!(
                 "vector argument `{}`'s element type `{}`, expected integer element type",
                 in_ty,
@@ -1718,10 +1725,10 @@
 fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
     match ty.kind() {
         ty::Int(t) => {
-            Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.ptr_width)), true))
+            Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), true))
         }
         ty::Uint(t) => {
-            Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.ptr_width)), false))
+            Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), false))
         }
         _ => None,
     }
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index f14493e..5974b59 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -23,18 +23,17 @@
 use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::ModuleCodegen;
 use rustc_codegen_ssa::{CodegenResults, CompiledModule};
+use rustc_data_structures::fx::FxHashMap;
 use rustc_errors::{ErrorReported, FatalError, Handler};
-use rustc_middle::dep_graph::{DepGraph, WorkProduct};
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
 use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoaderDyn};
 use rustc_middle::ty::{self, TyCtxt};
-use rustc_serialize::json;
-use rustc_session::config::{self, OptLevel, OutputFilenames, PrintRequest};
+use rustc_session::config::{OptLevel, OutputFilenames, PrintRequest};
 use rustc_session::Session;
 use rustc_span::symbol::Symbol;
 
 use std::any::Any;
 use std::ffi::CStr;
-use std::fs;
 use std::sync::Arc;
 
 mod back {
@@ -95,8 +94,9 @@
         tcx: TyCtxt<'tcx>,
         mods: &mut ModuleLlvm,
         kind: AllocatorKind,
+        has_alloc_error_handler: bool,
     ) {
-        unsafe { allocator::codegen(tcx, mods, kind) }
+        unsafe { allocator::codegen(tcx, mods, kind, has_alloc_error_handler) }
     }
     fn compile_codegen_unit(
         &self,
@@ -115,6 +115,9 @@
     fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
         llvm_util::target_cpu(sess)
     }
+    fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str> {
+        llvm_util::tune_cpu(sess)
+    }
 }
 
 impl WriteBackendMethods for LlvmCodegenBackend {
@@ -248,11 +251,11 @@
     }
 
     fn provide(&self, providers: &mut ty::query::Providers) {
-        attributes::provide(providers);
+        attributes::provide_both(providers);
     }
 
     fn provide_extern(&self, providers: &mut ty::query::Providers) {
-        attributes::provide_extern(providers);
+        attributes::provide_both(providers);
     }
 
     fn codegen_crate<'tcx>(
@@ -273,47 +276,27 @@
         &self,
         ongoing_codegen: Box<dyn Any>,
         sess: &Session,
-        dep_graph: &DepGraph,
-    ) -> Result<Box<dyn Any>, ErrorReported> {
+    ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported> {
         let (codegen_results, work_products) = ongoing_codegen
             .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
             .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
             .join(sess);
-        if sess.opts.debugging_opts.incremental_info {
-            rustc_codegen_ssa::back::write::dump_incremental_data(&codegen_results);
-        }
 
-        sess.time("serialize_work_products", move || {
-            rustc_incremental::save_work_product_index(sess, &dep_graph, work_products)
+        sess.time("llvm_dump_timing_file", || {
+            if sess.opts.debugging_opts.llvm_time_trace {
+                llvm_util::time_trace_profiler_finish("llvm_timings.json");
+            }
         });
 
-        sess.compile_status()?;
-
-        Ok(Box::new(codegen_results))
+        Ok((codegen_results, work_products))
     }
 
     fn link(
         &self,
         sess: &Session,
-        codegen_results: Box<dyn Any>,
+        codegen_results: CodegenResults,
         outputs: &OutputFilenames,
     ) -> Result<(), ErrorReported> {
-        let codegen_results = codegen_results
-            .downcast::<CodegenResults>()
-            .expect("Expected CodegenResults, found Box<Any>");
-
-        if sess.opts.debugging_opts.no_link {
-            // FIXME: use a binary format to encode the `.rlink` file
-            let rlink_data = json::encode(&codegen_results).map_err(|err| {
-                sess.fatal(&format!("failed to encode rlink: {}", err));
-            })?;
-            let rlink_file = outputs.with_extension(config::RLINK_EXT);
-            fs::write(&rlink_file, rlink_data).map_err(|err| {
-                sess.fatal(&format!("failed to write file {}: {}", rlink_file.display(), err));
-            })?;
-            return Ok(());
-        }
-
         // Run the linker on any artifacts that resulted from the LLVM run.
         // This should produce either a finished executable or library.
         sess.time("link_crate", || {
@@ -330,16 +313,6 @@
             );
         });
 
-        // Now that we won't touch anything in the incremental compilation directory
-        // any more, we can finalize it (which involves renaming it)
-        rustc_incremental::finalize_session_directory(sess, codegen_results.crate_hash);
-
-        sess.time("llvm_dump_timing_file", || {
-            if sess.opts.debugging_opts.llvm_time_trace {
-                llvm_util::time_trace_profiler_finish("llvm_timings.json");
-            }
-        });
-
         Ok(())
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index af3f3e7..8b15c8b 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -5,8 +5,9 @@
 
 use super::debuginfo::{
     DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator,
-    DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DINameSpace, DISPFlags, DIScope,
-    DISubprogram, DISubrange, DITemplateTypeParameter, DIType, DIVariable, DebugEmissionKind,
+    DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DILocation, DINameSpace,
+    DISPFlags, DIScope, DISubprogram, DISubrange, DITemplateTypeParameter, DIType, DIVariable,
+    DebugEmissionKind,
 };
 
 use libc::{c_char, c_int, c_uint, size_t};
@@ -557,6 +558,7 @@
     None,
     MD5,
     SHA1,
+    SHA256,
 }
 
 extern "C" {
@@ -794,6 +796,7 @@
     pub struct DIBuilder<'a>(InvariantOpaque<'a>);
 
     pub type DIDescriptor = Metadata;
+    pub type DILocation = Metadata;
     pub type DIScope = DIDescriptor;
     pub type DIFile = DIScope;
     pub type DILexicalBlock = DIScope;
@@ -1854,7 +1857,7 @@
         ScopeLine: c_uint,
         Flags: DIFlags,
         SPFlags: DISPFlags,
-        Fn: &'a Value,
+        MaybeFn: Option<&'a Value>,
         TParam: &'a DIArray,
         Decl: Option<&'a DIDescriptor>,
     ) -> &'a DISubprogram;
@@ -2005,7 +2008,7 @@
         VarInfo: &'a DIVariable,
         AddrOps: *const i64,
         AddrOpsCount: c_uint,
-        DL: &'a Value,
+        DL: &'a DILocation,
         InsertAtEnd: &'a BasicBlock,
     ) -> &'a Value;
 
@@ -2093,8 +2096,8 @@
         Line: c_uint,
         Column: c_uint,
         Scope: &'a DIScope,
-        InlinedAt: Option<&'a Metadata>,
-    ) -> &'a Value;
+        InlinedAt: Option<&'a DILocation>,
+    ) -> &'a DILocation;
     pub fn LLVMRustDIBuilderCreateOpDeref() -> i64;
     pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> i64;
 
@@ -2362,4 +2365,10 @@
         bytecode_len: usize,
     ) -> bool;
     pub fn LLVMRustLinkerFree(linker: &'a mut Linker<'a>);
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustComputeLTOCacheKey(
+        key_out: &RustString,
+        mod_id: *const c_char,
+        data: &ThinLTOData,
+    );
 }
diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
index ed9b991..53a404e 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
@@ -118,11 +118,6 @@
     }
 }
 
-pub fn set_thread_local(global: &'a Value, is_thread_local: bool) {
-    unsafe {
-        LLVMSetThreadLocal(global, is_thread_local as Bool);
-    }
-}
 pub fn set_thread_local_mode(global: &'a Value, mode: ThreadLocalMode) {
     unsafe {
         LLVMSetThreadLocalMode(global, mode);
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 900f2df..ab70f72 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -1,12 +1,12 @@
 use crate::back::write::create_informational_target_machine;
 use crate::llvm;
 use libc::c_int;
+use rustc_codegen_ssa::target_features::supported_target_features;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_feature::UnstableFeatures;
 use rustc_middle::bug;
 use rustc_session::config::PrintRequest;
 use rustc_session::Session;
-use rustc_span::symbol::sym;
 use rustc_span::symbol::Symbol;
 use rustc_target::spec::{MergeFunctions, PanicStrategy};
 use std::ffi::CString;
@@ -46,7 +46,7 @@
 }
 
 unsafe fn configure_llvm(sess: &Session) {
-    let n_args = sess.opts.cg.llvm_args.len() + sess.target.target.options.llvm_args.len();
+    let n_args = sess.opts.cg.llvm_args.len() + sess.target.llvm_args.len();
     let mut llvm_c_strs = Vec::with_capacity(n_args + 1);
     let mut llvm_args = Vec::with_capacity(n_args + 1);
 
@@ -57,7 +57,7 @@
     }
 
     let cg_opts = sess.opts.cg.llvm_args.iter();
-    let tg_opts = sess.target.target.options.llvm_args.iter();
+    let tg_opts = sess.target.llvm_args.iter();
     let sess_args = cg_opts.chain(tg_opts);
 
     let user_specified_args: FxHashSet<_> =
@@ -84,21 +84,14 @@
         if !sess.opts.debugging_opts.no_generate_arange_section {
             add("-generate-arange-section", false);
         }
-        match sess
-            .opts
-            .debugging_opts
-            .merge_functions
-            .unwrap_or(sess.target.target.options.merge_functions)
-        {
+        match sess.opts.debugging_opts.merge_functions.unwrap_or(sess.target.merge_functions) {
             MergeFunctions::Disabled | MergeFunctions::Trampolines => {}
             MergeFunctions::Aliases => {
                 add("-mergefunc-use-aliases", false);
             }
         }
 
-        if sess.target.target.target_os == "emscripten"
-            && sess.panic_strategy() == PanicStrategy::Unwind
-        {
+        if sess.target.os == "emscripten" && sess.panic_strategy() == PanicStrategy::Unwind {
             add("-enable-emscripten-cxx-exceptions", false);
         }
 
@@ -122,7 +115,7 @@
 
     llvm::LLVMInitializePasses();
 
-    ::rustc_llvm::initialize_available_targets();
+    rustc_llvm::initialize_available_targets();
 
     llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr());
 }
@@ -139,142 +132,8 @@
 // WARNING: the features after applying `to_llvm_feature` must be known
 // to LLVM or the feature detection code will walk past the end of the feature
 // array, leading to crashes.
-
-const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("aclass", Some(sym::arm_target_feature)),
-    ("mclass", Some(sym::arm_target_feature)),
-    ("rclass", Some(sym::arm_target_feature)),
-    ("dsp", Some(sym::arm_target_feature)),
-    ("neon", Some(sym::arm_target_feature)),
-    ("crc", Some(sym::arm_target_feature)),
-    ("crypto", Some(sym::arm_target_feature)),
-    ("v5te", Some(sym::arm_target_feature)),
-    ("v6", Some(sym::arm_target_feature)),
-    ("v6k", Some(sym::arm_target_feature)),
-    ("v6t2", Some(sym::arm_target_feature)),
-    ("v7", Some(sym::arm_target_feature)),
-    ("v8", Some(sym::arm_target_feature)),
-    ("vfp2", Some(sym::arm_target_feature)),
-    ("vfp3", Some(sym::arm_target_feature)),
-    ("vfp4", Some(sym::arm_target_feature)),
-    // This is needed for inline assembly, but shouldn't be stabilized as-is
-    // since it should be enabled per-function using #[instruction_set], not
-    // #[target_feature].
-    ("thumb-mode", Some(sym::arm_target_feature)),
-];
-
-const AARCH64_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("fp", Some(sym::aarch64_target_feature)),
-    ("neon", Some(sym::aarch64_target_feature)),
-    ("sve", Some(sym::aarch64_target_feature)),
-    ("crc", Some(sym::aarch64_target_feature)),
-    ("crypto", Some(sym::aarch64_target_feature)),
-    ("ras", Some(sym::aarch64_target_feature)),
-    ("lse", Some(sym::aarch64_target_feature)),
-    ("rdm", Some(sym::aarch64_target_feature)),
-    ("fp16", Some(sym::aarch64_target_feature)),
-    ("rcpc", Some(sym::aarch64_target_feature)),
-    ("dotprod", Some(sym::aarch64_target_feature)),
-    ("tme", Some(sym::aarch64_target_feature)),
-    ("v8.1a", Some(sym::aarch64_target_feature)),
-    ("v8.2a", Some(sym::aarch64_target_feature)),
-    ("v8.3a", Some(sym::aarch64_target_feature)),
-];
-
-const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("adx", Some(sym::adx_target_feature)),
-    ("aes", None),
-    ("avx", None),
-    ("avx2", None),
-    ("avx512bw", Some(sym::avx512_target_feature)),
-    ("avx512cd", Some(sym::avx512_target_feature)),
-    ("avx512dq", Some(sym::avx512_target_feature)),
-    ("avx512er", Some(sym::avx512_target_feature)),
-    ("avx512f", Some(sym::avx512_target_feature)),
-    ("avx512ifma", Some(sym::avx512_target_feature)),
-    ("avx512pf", Some(sym::avx512_target_feature)),
-    ("avx512vbmi", Some(sym::avx512_target_feature)),
-    ("avx512vl", Some(sym::avx512_target_feature)),
-    ("avx512vpopcntdq", Some(sym::avx512_target_feature)),
-    ("bmi1", None),
-    ("bmi2", None),
-    ("cmpxchg16b", Some(sym::cmpxchg16b_target_feature)),
-    ("f16c", Some(sym::f16c_target_feature)),
-    ("fma", None),
-    ("fxsr", None),
-    ("lzcnt", None),
-    ("movbe", Some(sym::movbe_target_feature)),
-    ("pclmulqdq", None),
-    ("popcnt", None),
-    ("rdrand", None),
-    ("rdseed", None),
-    ("rtm", Some(sym::rtm_target_feature)),
-    ("sha", None),
-    ("sse", None),
-    ("sse2", None),
-    ("sse3", None),
-    ("sse4.1", None),
-    ("sse4.2", None),
-    ("sse4a", Some(sym::sse4a_target_feature)),
-    ("ssse3", None),
-    ("tbm", Some(sym::tbm_target_feature)),
-    ("xsave", None),
-    ("xsavec", None),
-    ("xsaveopt", None),
-    ("xsaves", None),
-];
-
-const HEXAGON_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("hvx", Some(sym::hexagon_target_feature)),
-    ("hvx-length128b", Some(sym::hexagon_target_feature)),
-];
-
-const POWERPC_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("altivec", Some(sym::powerpc_target_feature)),
-    ("power8-altivec", Some(sym::powerpc_target_feature)),
-    ("power9-altivec", Some(sym::powerpc_target_feature)),
-    ("power8-vector", Some(sym::powerpc_target_feature)),
-    ("power9-vector", Some(sym::powerpc_target_feature)),
-    ("vsx", Some(sym::powerpc_target_feature)),
-];
-
-const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] =
-    &[("fp64", Some(sym::mips_target_feature)), ("msa", Some(sym::mips_target_feature))];
-
-const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("m", Some(sym::riscv_target_feature)),
-    ("a", Some(sym::riscv_target_feature)),
-    ("c", Some(sym::riscv_target_feature)),
-    ("f", Some(sym::riscv_target_feature)),
-    ("d", Some(sym::riscv_target_feature)),
-    ("e", Some(sym::riscv_target_feature)),
-];
-
-const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("simd128", Some(sym::wasm_target_feature)),
-    ("atomics", Some(sym::wasm_target_feature)),
-    ("nontrapping-fptoint", Some(sym::wasm_target_feature)),
-];
-
-/// When rustdoc is running, provide a list of all known features so that all their respective
-/// primitives may be documented.
-///
-/// IMPORTANT: If you're adding another feature list above, make sure to add it to this iterator!
-pub fn all_known_features() -> impl Iterator<Item = (&'static str, Option<Symbol>)> {
-    std::iter::empty()
-        .chain(ARM_ALLOWED_FEATURES.iter())
-        .chain(AARCH64_ALLOWED_FEATURES.iter())
-        .chain(X86_ALLOWED_FEATURES.iter())
-        .chain(HEXAGON_ALLOWED_FEATURES.iter())
-        .chain(POWERPC_ALLOWED_FEATURES.iter())
-        .chain(MIPS_ALLOWED_FEATURES.iter())
-        .chain(RISCV_ALLOWED_FEATURES.iter())
-        .chain(WASM_ALLOWED_FEATURES.iter())
-        .cloned()
-}
-
 pub fn to_llvm_feature<'a>(sess: &Session, s: &'a str) -> &'a str {
-    let arch = if sess.target.target.arch == "x86_64" { "x86" } else { &*sess.target.target.arch };
+    let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch };
     match (arch, s) {
         ("x86", "pclmulqdq") => "pclmul",
         ("x86", "rdrand") => "rdrnd",
@@ -306,20 +165,6 @@
         .collect()
 }
 
-pub fn supported_target_features(sess: &Session) -> &'static [(&'static str, Option<Symbol>)] {
-    match &*sess.target.target.arch {
-        "arm" => ARM_ALLOWED_FEATURES,
-        "aarch64" => AARCH64_ALLOWED_FEATURES,
-        "x86" | "x86_64" => X86_ALLOWED_FEATURES,
-        "hexagon" => HEXAGON_ALLOWED_FEATURES,
-        "mips" | "mips64" => MIPS_ALLOWED_FEATURES,
-        "powerpc" | "powerpc64" => POWERPC_ALLOWED_FEATURES,
-        "riscv32" | "riscv64" => RISCV_ALLOWED_FEATURES,
-        "wasm32" => WASM_ALLOWED_FEATURES,
-        _ => &[],
-    }
-}
-
 pub fn print_version() {
     // Can be called without initializing LLVM
     unsafe {
@@ -350,11 +195,7 @@
     }
 }
 
-pub fn target_cpu(sess: &Session) -> &str {
-    let name = match sess.opts.cg.target_cpu {
-        Some(ref s) => &**s,
-        None => &*sess.target.target.options.cpu,
-    };
+fn handle_native(name: &str) -> &str {
     if name != "native" {
         return name;
     }
@@ -365,3 +206,19 @@
         str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap()
     }
 }
+
+pub fn target_cpu(sess: &Session) -> &str {
+    let name = match sess.opts.cg.target_cpu {
+        Some(ref s) => &**s,
+        None => &*sess.target.cpu,
+    };
+
+    handle_native(name)
+}
+
+pub fn tune_cpu(sess: &Session) -> Option<&str> {
+    match sess.opts.debugging_opts.tune_cpu {
+        Some(ref s) => Some(handle_native(&**s)),
+        None => None,
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/metadata.rs b/compiler/rustc_codegen_llvm/src/metadata.rs
index 9036428..3912d6a 100644
--- a/compiler/rustc_codegen_llvm/src/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/metadata.rs
@@ -104,7 +104,7 @@
     // As a result, we choose a slightly shorter name! As to why
     // `.note.rustc` works on MinGW, that's another good question...
 
-    if target.options.is_like_osx { "__DATA,.rustc" } else { ".rustc" }
+    if target.is_like_osx { "__DATA,.rustc" } else { ".rustc" }
 }
 
 fn read_metadata_section_name(_target: &Target) -> &'static str {
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index 22ed4dd..3fc56ee 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -52,7 +52,7 @@
     let next = bx.inbounds_gep(addr, &[full_direct_size]);
     bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
 
-    if size.bytes() < slot_size.bytes() && &*bx.tcx().sess.target.target.target_endian == "big" {
+    if size.bytes() < slot_size.bytes() && &*bx.tcx().sess.target.endian == "big" {
         let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
         let adjusted = bx.inbounds_gep(addr, &[adjusted_size]);
         (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
@@ -105,7 +105,7 @@
     let mut end = bx.build_sibling_block("va_arg.end");
     let zero = bx.const_i32(0);
     let offset_align = Align::from_bytes(4).unwrap();
-    assert!(&*bx.tcx().sess.target.target.target_endian == "little");
+    assert!(&*bx.tcx().sess.target.endian == "little");
 
     let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
     let (reg_off, reg_top_index, slot_size) = if gr_type {
@@ -171,28 +171,26 @@
 ) -> &'ll Value {
     // Determine the va_arg implementation to use. The LLVM va_arg instruction
     // is lacking in some instances, so we should only use it as a fallback.
-    let target = &bx.cx.tcx.sess.target.target;
-    let arch = &bx.cx.tcx.sess.target.target.arch;
-    match (&**arch, target.options.is_like_windows) {
+    let target = &bx.cx.tcx.sess.target;
+    let arch = &bx.cx.tcx.sess.target.arch;
+    match &**arch {
         // Windows x86
-        ("x86", true) => {
+        "x86" if target.is_like_windows => {
             emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
         }
         // Generic x86
-        ("x86", _) => {
-            emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true)
-        }
+        "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true),
         // Windows AArch64
-        ("aarch64", true) => {
+        "aarch64" if target.is_like_windows => {
             emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
         }
-        // iOS AArch64
-        ("aarch64", _) if target.target_os == "ios" => {
+        // macOS / iOS AArch64
+        "aarch64" if target.is_like_osx => {
             emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
         }
-        ("aarch64", _) => emit_aapcs_va_arg(bx, addr, target_ty),
+        "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
         // Windows x86_64
-        ("x86_64", true) => {
+        "x86_64" if target.is_like_windows => {
             let target_ty_size = bx.cx.size_of(target_ty).bytes();
             let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
             emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
diff --git a/compiler/rustc_codegen_ssa/src/back/archive.rs b/compiler/rustc_codegen_ssa/src/back/archive.rs
index f83b4b2..c477ac6 100644
--- a/compiler/rustc_codegen_ssa/src/back/archive.rs
+++ b/compiler/rustc_codegen_ssa/src/back/archive.rs
@@ -7,12 +7,8 @@
 pub fn find_library(name: Symbol, search_paths: &[PathBuf], sess: &Session) -> PathBuf {
     // On Windows, static libraries sometimes show up as libfoo.a and other
     // times show up as foo.lib
-    let oslibname = format!(
-        "{}{}{}",
-        sess.target.target.options.staticlib_prefix,
-        name,
-        sess.target.target.options.staticlib_suffix
-    );
+    let oslibname =
+        format!("{}{}{}", sess.target.staticlib_prefix, name, sess.target.staticlib_suffix);
     let unixlibname = format!("lib{}.a", name);
 
     for path in search_paths {
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index 010fd4e..5a627a0 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -15,7 +15,7 @@
 use rustc_span::symbol::Symbol;
 use rustc_target::spec::crt_objects::{CrtObjects, CrtObjectsFallback};
 use rustc_target::spec::{LinkOutputKind, LinkerFlavor, LldFlavor};
-use rustc_target::spec::{PanicStrategy, RelocModel, RelroLevel};
+use rustc_target::spec::{PanicStrategy, RelocModel, RelroLevel, Target};
 
 use super::archive::ArchiveBuilder;
 use super::command::Command;
@@ -151,9 +151,7 @@
         Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker),
         _ => match flavor {
             LinkerFlavor::Lld(f) => Command::lld(linker, f),
-            LinkerFlavor::Msvc
-                if sess.opts.cg.linker.is_none() && sess.target.target.options.linker.is_none() =>
-            {
+            LinkerFlavor::Msvc if sess.opts.cg.linker.is_none() && sess.target.linker.is_none() => {
                 Command::new(msvc_tool.as_ref().map(|t| t.path()).unwrap_or(linker))
             }
             _ => Command::new(linker),
@@ -163,9 +161,9 @@
     // UWP apps have API restrictions enforced during Store submissions.
     // To comply with the Windows App Certification Kit,
     // MSVC needs to link with the Store versions of the runtime libraries (vcruntime, msvcrt, etc).
-    let t = &sess.target.target;
+    let t = &sess.target;
     if (flavor == LinkerFlavor::Msvc || flavor == LinkerFlavor::Lld(LldFlavor::Link))
-        && t.target_vendor == "uwp"
+        && t.vendor == "uwp"
     {
         if let Some(ref tool) = msvc_tool {
             let original_path = tool.path();
@@ -197,7 +195,7 @@
     // PATH for the child.
     let mut new_path = sess.host_filesearch(PathKind::All).get_tools_search_paths(self_contained);
     let mut msvc_changed_path = false;
-    if sess.target.target.options.is_like_msvc {
+    if sess.target.is_like_msvc {
         if let Some(ref tool) = msvc_tool {
             cmd.args(tool.args());
             for &(ref k, ref v) in tool.env() {
@@ -365,7 +363,7 @@
             // After adding all files to the archive, we need to update the
             // symbol table of the archive. This currently dies on macOS (see
             // #11162), and isn't necessary there anyway
-            if !sess.target.target.options.is_like_osx {
+            if !sess.target.is_like_osx {
                 ab.update_symbols();
             }
         }
@@ -476,10 +474,10 @@
 
     linker::disable_localization(&mut cmd);
 
-    for &(ref k, ref v) in &sess.target.target.options.link_env {
+    for &(ref k, ref v) in &sess.target.link_env {
         cmd.env(k, v);
     }
-    for k in &sess.target.target.options.link_env_remove {
+    for k in &sess.target.link_env_remove {
         cmd.env_remove(k);
     }
 
@@ -515,7 +513,7 @@
         // if the linker doesn't support -no-pie then it should not default to
         // linking executables as pie. Different versions of gcc seem to use
         // different quotes in the error message so don't check for them.
-        if sess.target.target.options.linker_is_gnu
+        if sess.target.linker_is_gnu
             && flavor != LinkerFlavor::Ld
             && (out.contains("unrecognized command line option")
                 || out.contains("unknown argument"))
@@ -535,7 +533,7 @@
 
         // Detect '-static-pie' used with an older version of gcc or clang not supporting it.
         // Fallback from '-static-pie' to '-static' in that case.
-        if sess.target.target.options.linker_is_gnu
+        if sess.target.linker_is_gnu
             && flavor != LinkerFlavor::Ld
             && (out.contains("unrecognized command line option")
                 || out.contains("unknown argument"))
@@ -548,7 +546,7 @@
             );
             // Mirror `add_(pre,post)_link_objects` to replace CRT objects.
             let self_contained = crt_objects_fallback(sess, crate_type);
-            let opts = &sess.target.target.options;
+            let opts = &sess.target;
             let pre_objects = if self_contained {
                 &opts.pre_link_objects_fallback
             } else {
@@ -670,7 +668,7 @@
                 // is not a Microsoft LNK error then suggest a way to fix or
                 // install the Visual Studio build tools.
                 if let Some(code) = prog.status.code() {
-                    if sess.target.target.options.is_like_msvc
+                    if sess.target.is_like_msvc
                         && flavor == LinkerFlavor::Msvc
                         // Respect the command line override
                         && sess.opts.cg.linker.is_none()
@@ -741,7 +739,7 @@
 
             linker_error.emit();
 
-            if sess.target.target.options.is_like_msvc && linker_not_found {
+            if sess.target.is_like_msvc && linker_not_found {
                 sess.note_without_error(
                     "the msvc targets depend on the msvc linker \
                      but `link.exe` was not found",
@@ -758,7 +756,7 @@
     // On macOS, debuggers need this utility to get run to do some munging of
     // the symbols. Note, though, that if the object files are being preserved
     // for their debug information there's no need for us to run dsymutil.
-    if sess.target.target.options.is_like_osx
+    if sess.target.is_like_osx
         && sess.opts.debuginfo != DebugInfo::None
         && !preserve_objects_for_their_debuginfo(sess)
     {
@@ -775,9 +773,7 @@
     // executables only.
     let needs_runtime = match crate_type {
         CrateType::Executable => true,
-        CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro => {
-            sess.target.target.options.is_like_osx
-        }
+        CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro => sess.target.is_like_osx,
         CrateType::Rlib | CrateType::Staticlib => false,
     };
 
@@ -846,7 +842,7 @@
     // If our target enables builtin function lowering in LLVM then the
     // crates providing these functions don't participate in LTO (e.g.
     // no_builtins or compiler builtins crates).
-    !sess.target.target.options.no_builtins
+    !sess.target.no_builtins
         && (info.compiler_builtins == Some(cnum) || info.is_no_builtins.contains(&cnum))
 }
 
@@ -906,10 +902,10 @@
                 } else if stem == "link" || stem == "lld-link" {
                     LinkerFlavor::Msvc
                 } else if stem == "lld" || stem == "rust-lld" {
-                    LinkerFlavor::Lld(sess.target.target.options.lld_flavor)
+                    LinkerFlavor::Lld(sess.target.lld_flavor)
                 } else {
                     // fall back to the value in the target spec
-                    sess.target.target.linker_flavor
+                    sess.target.linker_flavor
                 };
 
                 Some((linker, flavor))
@@ -926,8 +922,8 @@
 
     if let Some(ret) = infer_from(
         sess,
-        sess.target.target.options.linker.clone().map(PathBuf::from),
-        Some(sess.target.target.linker_flavor),
+        sess.target.linker.clone().map(PathBuf::from),
+        Some(sess.target.linker_flavor),
     ) {
         return ret;
     }
@@ -962,7 +958,7 @@
     // Basically as a result this just means that if we're on OSX and we're
     // *not* running dsymutil then the object files are the only source of truth
     // for debug information, so we must preserve them.
-    if sess.target.target.options.is_like_osx {
+    if sess.target.is_like_osx {
         return !sess.opts.debugging_opts.run_dsymutil;
     }
 
@@ -988,7 +984,7 @@
                 NativeLibKind::StaticNoBundle
                 | NativeLibKind::Dylib
                 | NativeLibKind::Unspecified => {
-                    if sess.target.target.options.is_like_msvc {
+                    if sess.target.is_like_msvc {
                         Some(format!("{}.lib", name))
                     } else {
                         Some(format!("-l{}", name))
@@ -1070,16 +1066,13 @@
     let mut args = String::new();
     for arg in cmd2.take_args() {
         args.push_str(
-            &Escape {
-                arg: arg.to_str().unwrap(),
-                is_like_msvc: sess.target.target.options.is_like_msvc,
-            }
-            .to_string(),
+            &Escape { arg: arg.to_str().unwrap(), is_like_msvc: sess.target.is_like_msvc }
+                .to_string(),
         );
         args.push('\n');
     }
     let file = tmpdir.join("linker-arguments");
-    let bytes = if sess.target.target.options.is_like_msvc {
+    let bytes = if sess.target.is_like_msvc {
         let mut out = Vec::with_capacity((1 + args.len()) * 2);
         // start the stream with a UTF-16 BOM
         for c in std::iter::once(0xFEFF).chain(args.encode_utf16()) {
@@ -1195,7 +1188,7 @@
     };
 
     // Adjust the output kind to target capabilities.
-    let opts = &sess.target.target.options;
+    let opts = &sess.target;
     let pic_exe_supported = opts.position_independent_executables;
     let static_pic_exe_supported = opts.static_position_independent_executables;
     let static_dylib_supported = opts.crt_static_allows_dylibs;
@@ -1236,14 +1229,14 @@
         return self_contained;
     }
 
-    match sess.target.target.options.crt_objects_fallback {
+    match sess.target.crt_objects_fallback {
         // FIXME: Find a better heuristic for "native musl toolchain is available",
         // based on host and linker path, for example.
         // (https://github.com/rust-lang/rust/pull/71769#issuecomment-626330237).
         Some(CrtObjectsFallback::Musl) => sess.crt_static(Some(crate_type)),
         Some(CrtObjectsFallback::Mingw) => {
-            sess.host == sess.target.target
-                && sess.target.target.target_vendor != "uwp"
+            sess.host == sess.target
+                && sess.target.vendor != "uwp"
                 && detect_self_contained_mingw(&sess)
         }
         // FIXME: Figure out cases in which WASM needs to link with a native toolchain.
@@ -1259,7 +1252,7 @@
     link_output_kind: LinkOutputKind,
     self_contained: bool,
 ) {
-    let opts = &sess.target.target.options;
+    let opts = &sess.target;
     let objects =
         if self_contained { &opts.pre_link_objects_fallback } else { &opts.pre_link_objects };
     for obj in objects.get(&link_output_kind).iter().copied().flatten() {
@@ -1274,7 +1267,7 @@
     link_output_kind: LinkOutputKind,
     self_contained: bool,
 ) {
-    let opts = &sess.target.target.options;
+    let opts = &sess.target;
     let objects =
         if self_contained { &opts.post_link_objects_fallback } else { &opts.post_link_objects };
     for obj in objects.get(&link_output_kind).iter().copied().flatten() {
@@ -1285,7 +1278,7 @@
 /// Add arbitrary "pre-link" args defined by the target spec or from command line.
 /// FIXME: Determine where exactly these args need to be inserted.
 fn add_pre_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
-    if let Some(args) = sess.target.target.options.pre_link_args.get(&flavor) {
+    if let Some(args) = sess.target.pre_link_args.get(&flavor) {
         cmd.args(args);
     }
     cmd.args(&sess.opts.debugging_opts.pre_link_args);
@@ -1293,13 +1286,13 @@
 
 /// Add a link script embedded in the target, if applicable.
 fn add_link_script(cmd: &mut dyn Linker, sess: &Session, tmpdir: &Path, crate_type: CrateType) {
-    match (crate_type, &sess.target.target.options.link_script) {
+    match (crate_type, &sess.target.link_script) {
         (CrateType::Cdylib | CrateType::Executable, Some(script)) => {
-            if !sess.target.target.options.linker_is_gnu {
+            if !sess.target.linker_is_gnu {
                 sess.fatal("can only use link script when linking with GNU-like linker");
             }
 
-            let file_name = ["rustc", &sess.target.target.llvm_target, "linkfile.ld"].join("-");
+            let file_name = ["rustc", &sess.target.llvm_target, "linkfile.ld"].join("-");
 
             let path = tmpdir.join(file_name);
             if let Err(e) = fs::write(&path, script) {
@@ -1338,15 +1331,15 @@
             *ty == crate_type && list.iter().any(|&linkage| linkage == Linkage::Dynamic)
         });
     if any_dynamic_crate {
-        if let Some(args) = sess.target.target.options.late_link_args_dynamic.get(&flavor) {
+        if let Some(args) = sess.target.late_link_args_dynamic.get(&flavor) {
             cmd.args(args);
         }
     } else {
-        if let Some(args) = sess.target.target.options.late_link_args_static.get(&flavor) {
+        if let Some(args) = sess.target.late_link_args_static.get(&flavor) {
             cmd.args(args);
         }
     }
-    if let Some(args) = sess.target.target.options.late_link_args.get(&flavor) {
+    if let Some(args) = sess.target.late_link_args.get(&flavor) {
         cmd.args(args);
     }
 }
@@ -1354,7 +1347,7 @@
 /// Add arbitrary "post-link" args defined by the target spec.
 /// FIXME: Determine where exactly these args need to be inserted.
 fn add_post_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
-    if let Some(args) = sess.target.target.options.post_link_args.get(&flavor) {
+    if let Some(args) = sess.target.post_link_args.get(&flavor) {
         cmd.args(args);
     }
 }
@@ -1456,7 +1449,7 @@
 /// Add options making relocation sections in the produced ELF files read-only
 /// and suppressing lazy binding.
 fn add_relro_args(cmd: &mut dyn Linker, sess: &Session) {
-    match sess.opts.debugging_opts.relro_level.unwrap_or(sess.target.target.options.relro_level) {
+    match sess.opts.debugging_opts.relro_level.unwrap_or(sess.target.relro_level) {
         RelroLevel::Full => cmd.full_relro(),
         RelroLevel::Partial => cmd.partial_relro(),
         RelroLevel::Off => cmd.no_relro(),
@@ -1487,9 +1480,9 @@
         let mut rpath_config = RPathConfig {
             used_crates: &codegen_results.crate_info.used_crates_dynamic,
             out_filename: out_filename.to_path_buf(),
-            has_rpath: sess.target.target.options.has_rpath,
-            is_like_osx: sess.target.target.options.is_like_osx,
-            linker_is_gnu: sess.target.target.options.linker_is_gnu,
+            has_rpath: sess.target.has_rpath,
+            is_like_osx: sess.target.is_like_osx,
+            linker_is_gnu: sess.target.linker_is_gnu,
             get_install_prefix_lib_path: &mut get_install_prefix_lib_path,
         };
         cmd.args(&rpath::get_rpath_flags(&mut rpath_config));
@@ -1517,7 +1510,7 @@
     let base_cmd = get_linker(sess, path, flavor, crt_objects_fallback);
     // FIXME: Move `/LIBPATH` addition for uwp targets from the linker construction
     // to the linker args construction.
-    assert!(base_cmd.get_args().is_empty() || sess.target.target.target_vendor == "uwp");
+    assert!(base_cmd.get_args().is_empty() || sess.target.vendor == "uwp");
     let cmd = &mut *codegen_results.linker_info.to_linker(base_cmd, &sess, flavor, target_cpu);
     let link_output_kind = link_output_kind(sess, crate_type);
 
@@ -1531,7 +1524,7 @@
     add_link_script(cmd, sess, tmpdir, crate_type);
 
     // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
-    if sess.target.target.options.is_like_fuchsia && crate_type == CrateType::Executable {
+    if sess.target.is_like_fuchsia && crate_type == CrateType::Executable {
         let prefix = if sess.opts.debugging_opts.sanitizer.contains(SanitizerSet::ADDRESS) {
             "asan/"
         } else {
@@ -1541,7 +1534,7 @@
     }
 
     // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
-    if sess.target.target.options.eh_frame_header {
+    if sess.target.eh_frame_header {
         cmd.add_eh_frame_header();
     }
 
@@ -1554,7 +1547,7 @@
     add_pre_link_objects(cmd, sess, link_output_kind, crt_objects_fallback);
 
     // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
-    if sess.target.target.options.is_like_emscripten {
+    if sess.target.is_like_emscripten {
         cmd.arg("-s");
         cmd.arg(if sess.panic_strategy() == PanicStrategy::Abort {
             "DISABLE_EXCEPTION_CATCHING=1"
@@ -1582,7 +1575,7 @@
     cmd.output_filename(out_filename);
 
     // OBJECT-FILES-NO, AUDIT-ORDER
-    if crate_type == CrateType::Executable && sess.target.target.options.is_like_windows {
+    if crate_type == CrateType::Executable && sess.target.is_like_windows {
         if let Some(ref s) = codegen_results.windows_subsystem {
             cmd.subsystem(s);
         }
@@ -1626,7 +1619,7 @@
     // OBJECT-FILES-NO, AUDIT-ORDER
     // We want to prevent the compiler from accidentally leaking in any system libraries,
     // so by default we tell linkers not to link to any default libraries.
-    if !sess.opts.cg.default_linker_libraries && sess.target.target.options.no_default_libraries {
+    if !sess.opts.cg.default_linker_libraries && sess.target.no_default_libraries {
         cmd.no_default_libraries();
     }
 
@@ -1845,12 +1838,8 @@
     }
 
     // Converts a library file-stem into a cc -l argument
-    fn unlib<'a>(config: &config::Config, stem: &'a str) -> &'a str {
-        if stem.starts_with("lib") && !config.target.options.is_like_windows {
-            &stem[3..]
-        } else {
-            stem
-        }
+    fn unlib<'a>(target: &Target, stem: &'a str) -> &'a str {
+        if stem.starts_with("lib") && !target.is_like_windows { &stem[3..] } else { stem }
     }
 
     // Adds the static "rlib" versions of all crates to the command line.
@@ -1945,7 +1934,7 @@
                 // though, so we let that object file slide.
                 let skip_because_lto = are_upstream_rust_objects_already_included(sess)
                     && is_rust_object
-                    && (sess.target.target.options.no_builtins
+                    && (sess.target.no_builtins
                         || !codegen_results.crate_info.is_no_builtins.contains(&cnum));
 
                 if skip_because_cfg_say_so || skip_because_lto {
@@ -2088,10 +2077,10 @@
 }
 
 fn add_apple_sdk(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
-    let arch = &sess.target.target.arch;
-    let os = &sess.target.target.target_os;
-    let llvm_target = &sess.target.target.llvm_target;
-    if sess.target.target.target_vendor != "apple"
+    let arch = &sess.target.arch;
+    let os = &sess.target.os;
+    let llvm_target = &sess.target.llvm_target;
+    if sess.target.vendor != "apple"
         || !matches!(os.as_str(), "ios" | "tvos")
         || flavor != LinkerFlavor::Gcc
     {
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
index 0ddf8bd..3df956c 100644
--- a/compiler/rustc_codegen_ssa/src/back/linker.rs
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -184,7 +184,7 @@
         // * On OSX they have their own linker, not binutils'
         // * For WebAssembly the only functional linker is LLD, which doesn't
         //   support hint flags
-        !self.sess.target.target.options.is_like_osx && self.sess.target.target.arch != "wasm32"
+        !self.sess.target.is_like_osx && self.sess.target.arch != "wasm32"
     }
 
     // Some platforms take hints about whether a library is static or dynamic.
@@ -221,10 +221,8 @@
         let opt_level = match self.sess.opts.optimize {
             config::OptLevel::No => "O0",
             config::OptLevel::Less => "O1",
-            config::OptLevel::Default => "O2",
+            config::OptLevel::Default | config::OptLevel::Size | config::OptLevel::SizeMin => "O2",
             config::OptLevel::Aggressive => "O3",
-            config::OptLevel::Size => "Os",
-            config::OptLevel::SizeMin => "Oz",
         };
 
         self.linker_arg(&format!("-plugin-opt={}", opt_level));
@@ -234,7 +232,7 @@
 
     fn build_dylib(&mut self, out_filename: &Path) {
         // On mac we need to tell the linker to let this library be rpathed
-        if self.sess.target.target.options.is_like_osx {
+        if self.sess.target.is_like_osx {
             self.cmd.arg("-dynamiclib");
             self.linker_arg("-dylib");
 
@@ -250,7 +248,7 @@
             }
         } else {
             self.cmd.arg("-shared");
-            if self.sess.target.target.options.is_like_windows {
+            if self.sess.target.is_like_windows {
                 // The output filename already contains `dll_suffix` so
                 // the resulting import library will have a name in the
                 // form of libfoo.dll.a
@@ -258,9 +256,9 @@
                     out_filename.file_name().and_then(|file| file.to_str()).map(|file| {
                         format!(
                             "{}{}{}",
-                            self.sess.target.target.options.staticlib_prefix,
+                            self.sess.target.staticlib_prefix,
                             file,
-                            self.sess.target.target.options.staticlib_suffix
+                            self.sess.target.staticlib_suffix
                         )
                     });
                 if let Some(implib_name) = implib_name {
@@ -282,7 +280,7 @@
     fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path) {
         match output_kind {
             LinkOutputKind::DynamicNoPicExe => {
-                if !self.is_ld && self.sess.target.target.options.linker_is_gnu {
+                if !self.is_ld && self.sess.target.linker_is_gnu {
                     self.cmd.arg("-no-pie");
                 }
             }
@@ -293,7 +291,7 @@
             LinkOutputKind::StaticNoPicExe => {
                 // `-static` works for both gcc wrapper and ld.
                 self.cmd.arg("-static");
-                if !self.is_ld && self.sess.target.target.options.linker_is_gnu {
+                if !self.is_ld && self.sess.target.linker_is_gnu {
                     self.cmd.arg("-no-pie");
                 }
             }
@@ -322,7 +320,7 @@
         // any `#[link]` attributes in the `libc` crate, see #72782 for details.
         // FIXME: Switch to using `#[link]` attributes in the `libc` crate
         // similarly to other targets.
-        if self.sess.target.target.target_os == "vxworks"
+        if self.sess.target.os == "vxworks"
             && matches!(
                 output_kind,
                 LinkOutputKind::StaticNoPicExe
@@ -387,8 +385,8 @@
     // functions, etc.
     fn link_whole_staticlib(&mut self, lib: Symbol, search_path: &[PathBuf]) {
         self.hint_static();
-        let target = &self.sess.target.target;
-        if !target.options.is_like_osx {
+        let target = &self.sess.target;
+        if !target.is_like_osx {
             self.linker_arg("--whole-archive").cmd.arg(format!("-l{}", lib));
             self.linker_arg("--no-whole-archive");
         } else {
@@ -402,7 +400,7 @@
 
     fn link_whole_rlib(&mut self, lib: &Path) {
         self.hint_static();
-        if self.sess.target.target.options.is_like_osx {
+        if self.sess.target.is_like_osx {
             self.linker_arg("-force_load");
             self.linker_arg(&lib);
         } else {
@@ -426,9 +424,9 @@
         // -dead_strip can't be part of the pre_link_args because it's also used
         // for partial linking when using multiple codegen units (-r).  So we
         // insert it here.
-        if self.sess.target.target.options.is_like_osx {
+        if self.sess.target.is_like_osx {
             self.linker_arg("-dead_strip");
-        } else if self.sess.target.target.options.is_like_solaris {
+        } else if self.sess.target.is_like_solaris {
             self.linker_arg("-zignore");
 
         // If we're building a dylib, we don't use --gc-sections because LLVM
@@ -442,7 +440,7 @@
     }
 
     fn optimize(&mut self) {
-        if !self.sess.target.target.options.linker_is_gnu {
+        if !self.sess.target.linker_is_gnu {
             return;
         }
 
@@ -456,7 +454,7 @@
     }
 
     fn pgo_gen(&mut self) {
-        if !self.sess.target.target.options.linker_is_gnu {
+        if !self.sess.target.linker_is_gnu {
             return;
         }
 
@@ -505,8 +503,7 @@
 
     fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType) {
         // Symbol visibility in object files typically takes care of this.
-        if crate_type == CrateType::Executable
-            && self.sess.target.target.options.override_export_symbols.is_none()
+        if crate_type == CrateType::Executable && self.sess.target.override_export_symbols.is_none()
         {
             return;
         }
@@ -515,7 +512,7 @@
         // The object files have far more public symbols than we actually want to export,
         // so we hide them all here.
 
-        if !self.sess.target.target.options.limit_rdylib_exports {
+        if !self.sess.target.limit_rdylib_exports {
             return;
         }
 
@@ -523,13 +520,13 @@
             return;
         }
 
-        let is_windows = self.sess.target.target.options.is_like_windows;
+        let is_windows = self.sess.target.is_like_windows;
         let mut arg = OsString::new();
         let path = tmpdir.join(if is_windows { "list.def" } else { "list" });
 
         debug!("EXPORTED SYMBOLS:");
 
-        if self.sess.target.target.options.is_like_osx {
+        if self.sess.target.is_like_osx {
             // Write a plain, newline-separated list of symbols
             let res: io::Result<()> = try {
                 let mut f = BufWriter::new(File::create(&path)?);
@@ -575,12 +572,12 @@
             }
         }
 
-        if self.sess.target.target.options.is_like_osx {
+        if self.sess.target.is_like_osx {
             if !self.is_ld {
                 arg.push("-Wl,")
             }
             arg.push("-exported_symbols_list,");
-        } else if self.sess.target.target.options.is_like_solaris {
+        } else if self.sess.target.is_like_solaris {
             if !self.is_ld {
                 arg.push("-Wl,")
             }
@@ -1205,7 +1202,7 @@
 }
 
 fn exported_symbols(tcx: TyCtxt<'_>, crate_type: CrateType) -> Vec<String> {
-    if let Some(ref exports) = tcx.sess.target.target.options.override_export_symbols {
+    if let Some(ref exports) = tcx.sess.target.override_export_symbols {
         return exports.clone();
     }
 
@@ -1295,7 +1292,7 @@
         // Provide the linker with fallback to internal `target-cpu`.
         self.cmd.arg("--fallback-arch").arg(match self.sess.opts.cg.target_cpu {
             Some(ref s) => s,
-            None => &self.sess.target.target.options.cpu,
+            None => &self.sess.target.cpu,
         });
     }
 
diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
index 51cc1ad..9a6f8cd 100644
--- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
+++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
@@ -229,8 +229,7 @@
         // needs to be exported.
         // However, on platforms that don't allow for Rust dylibs, having
         // external linkage is enough for monomorphization to be linked to.
-        let need_visibility = tcx.sess.target.target.options.dynamic_linking
-            && !tcx.sess.target.target.options.only_cdylib;
+        let need_visibility = tcx.sess.target.dynamic_linking && !tcx.sess.target.only_cdylib;
 
         let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
 
@@ -391,7 +390,7 @@
         codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
 
     if is_extern && !std_internal {
-        let target = &tcx.sess.target.target.llvm_target;
+        let target = &tcx.sess.target.llvm_target;
         // WebAssembly cannot export data symbols, so reduce their export level
         if target.contains("emscripten") {
             if let Some(Node::Item(&hir::Item { kind: hir::ItemKind::Static(..), .. })) =
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index 0edf0fc..b34bee3 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -13,7 +13,6 @@
 use rustc_data_structures::profiling::SelfProfilerRef;
 use rustc_data_structures::profiling::TimingGuard;
 use rustc_data_structures::profiling::VerboseTimingGuard;
-use rustc_data_structures::svh::Svh;
 use rustc_data_structures::sync::Lrc;
 use rustc_errors::emitter::Emitter;
 use rustc_errors::{DiagnosticId, FatalError, Handler, Level};
@@ -140,7 +139,7 @@
 
         let emit_obj = if !should_emit_obj {
             EmitObj::None
-        } else if sess.target.target.options.obj_is_bitcode
+        } else if sess.target.obj_is_bitcode
             || (sess.opts.cg.linker_plugin_lto.enabled() && !no_builtins)
         {
             // This case is selected if the target uses objects as bitcode, or
@@ -222,11 +221,11 @@
                 false
             ),
             emit_obj,
-            bc_cmdline: sess.target.target.options.bitcode_llvm_cmdline.clone(),
+            bc_cmdline: sess.target.bitcode_llvm_cmdline.clone(),
 
             verify_llvm_ir: sess.verify_llvm_ir(),
             no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes,
-            no_builtins: no_builtins || sess.target.target.options.no_builtins,
+            no_builtins: no_builtins || sess.target.no_builtins,
 
             // Exclude metadata and allocator modules from time_passes output,
             // since they throw off the "LLVM passes" measurement.
@@ -253,7 +252,7 @@
                 .opts
                 .debugging_opts
                 .merge_functions
-                .unwrap_or(sess.target.target.options.merge_functions)
+                .unwrap_or(sess.target.merge_functions)
             {
                 MergeFunctions::Disabled => false,
                 MergeFunctions::Trampolines | MergeFunctions::Aliases => {
@@ -308,7 +307,7 @@
     pub allocator_module_config: Arc<ModuleConfig>,
     pub tm_factory: TargetMachineFactory<B>,
     pub msvc_imps_needed: bool,
-    pub target_pointer_width: String,
+    pub target_pointer_width: u32,
     pub target_arch: String,
     pub debuginfo: config::DebugInfo,
 
@@ -389,7 +388,7 @@
     let requested_for_rlib = sess.opts.cg.embed_bitcode
         && sess.crate_types().contains(&CrateType::Rlib)
         && sess.opts.output_types.contains_key(&OutputType::Exe);
-    let forced_by_target = sess.target.target.options.forces_embed_bitcode;
+    let forced_by_target = sess.target.forces_embed_bitcode;
     requested_for_rlib || forced_by_target
 }
 
@@ -414,7 +413,6 @@
     let sess = tcx.sess;
 
     let crate_name = tcx.crate_name(LOCAL_CRATE);
-    let crate_hash = tcx.crate_hash(LOCAL_CRATE);
     let no_builtins = tcx.sess.contains_name(&tcx.hir().krate().item.attrs, sym::no_builtins);
     let is_compiler_builtins =
         tcx.sess.contains_name(&tcx.hir().krate().item.attrs, sym::compiler_builtins);
@@ -463,7 +461,6 @@
     OngoingCodegen {
         backend,
         crate_name,
-        crate_hash,
         metadata,
         windows_subsystem,
         linker_info,
@@ -658,15 +655,6 @@
     // These are used in linking steps and will be cleaned up afterward.
 }
 
-pub fn dump_incremental_data(_codegen_results: &CodegenResults) {
-    // FIXME(mw): This does not work at the moment because the situation has
-    //            become more complicated due to incremental LTO. Now a CGU
-    //            can have more than two caching states.
-    // println!("[incremental] Re-using {} out of {} modules",
-    //           codegen_results.modules.iter().filter(|m| m.pre_existing).count(),
-    //           codegen_results.modules.len());
-}
-
 pub enum WorkItem<B: WriteBackendMethods> {
     /// Optimize a newly codegened, totally unoptimized module.
     Optimize(ModuleCodegen<B::Module>),
@@ -1034,8 +1022,8 @@
         tm_factory: TargetMachineFactory(backend.target_machine_factory(tcx.sess, ol)),
         total_cgus,
         msvc_imps_needed: msvc_imps_needed(tcx),
-        target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(),
-        target_arch: tcx.sess.target.target.arch.clone(),
+        target_pointer_width: tcx.sess.target.pointer_width,
+        target_arch: tcx.sess.target.arch.clone(),
         debuginfo: tcx.sess.opts.debuginfo,
     };
 
@@ -1175,7 +1163,7 @@
     // necessary. There's already optimizations in place to avoid sending work
     // back to the coordinator if LTO isn't requested.
     return thread::spawn(move || {
-        let max_workers = ::num_cpus::get();
+        let max_workers = num_cpus::get();
         let mut worker_id_counter = 0;
         let mut free_worker_ids = Vec::new();
         let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
@@ -1531,8 +1519,6 @@
     }
 }
 
-pub const CODEGEN_WORKER_ID: usize = usize::MAX;
-
 /// `FatalError` is explicitly not `Send`.
 #[must_use]
 pub struct WorkerFatalError;
@@ -1720,7 +1706,6 @@
 pub struct OngoingCodegen<B: ExtraBackendMethods> {
     pub backend: B,
     pub crate_name: Symbol,
-    pub crate_hash: Svh,
     pub metadata: EncodedMetadata,
     pub windows_subsystem: Option<String>,
     pub linker_info: LinkerInfo,
@@ -1766,7 +1751,6 @@
         (
             CodegenResults {
                 crate_name: self.crate_name,
-                crate_hash: self.crate_hash,
                 metadata: self.metadata,
                 windows_subsystem: self.windows_subsystem,
                 linker_info: self.linker_info,
@@ -1881,11 +1865,11 @@
     // something is wrong with commandline arg validation.
     assert!(
         !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
-            && tcx.sess.target.target.options.is_like_windows
+            && tcx.sess.target.is_like_windows
             && tcx.sess.opts.cg.prefer_dynamic)
     );
 
-    tcx.sess.target.target.options.is_like_windows &&
+    tcx.sess.target.is_like_windows &&
         tcx.sess.crate_types().iter().any(|ct| *ct == CrateType::Rlib) &&
     // ThinLTO can't handle this workaround in all cases, so we don't
     // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index d82fc2c..5fe26db 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -46,7 +46,6 @@
 use rustc_session::config::{self, EntryFnType};
 use rustc_session::utils::NativeLibKind;
 use rustc_session::Session;
-use rustc_span::Span;
 use rustc_symbol_mangling::test as symbol_names_test;
 use rustc_target::abi::{Align, LayoutOf, VariantIdx};
 
@@ -327,7 +326,7 @@
 /// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
 /// 64-bit MinGW) instead of "full SEH".
 pub fn wants_msvc_seh(sess: &Session) -> bool {
-    sess.target.target.options.is_like_msvc
+    sess.target.is_like_msvc
 }
 
 pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
@@ -364,11 +363,7 @@
 pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     cx: &'a Bx::CodegenCx,
 ) -> Option<Bx::Function> {
-    let (main_def_id, span) = match cx.tcx().entry_fn(LOCAL_CRATE) {
-        Some((def_id, _)) => (def_id, cx.tcx().def_span(def_id)),
-        None => return None,
-    };
-
+    let main_def_id = cx.tcx().entry_fn(LOCAL_CRATE).map(|(def_id, _)| def_id)?;
     let instance = Instance::mono(cx.tcx(), main_def_id.to_def_id());
 
     if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
@@ -381,19 +376,18 @@
 
     return cx.tcx().entry_fn(LOCAL_CRATE).map(|(_, et)| {
         let use_start_lang_item = EntryFnType::Start != et;
-        create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, use_start_lang_item)
+        create_entry_fn::<Bx>(cx, main_llfn, main_def_id, use_start_lang_item)
     });
 
     fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
         cx: &'a Bx::CodegenCx,
-        sp: Span,
         rust_main: Bx::Value,
         rust_main_def_id: LocalDefId,
         use_start_lang_item: bool,
     ) -> Bx::Function {
         // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
         // depending on whether the target needs `argc` and `argv` to be passed in.
-        let llfty = if cx.sess().target.target.options.main_needs_argc_argv {
+        let llfty = if cx.sess().target.main_needs_argc_argv {
             cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
         } else {
             cx.type_func(&[], cx.type_int())
@@ -411,8 +405,9 @@
             Some(llfn) => llfn,
             None => {
                 // FIXME: We should be smart and show a better diagnostic here.
+                let span = cx.tcx().def_span(rust_main_def_id);
                 cx.sess()
-                    .struct_span_err(sp, "entry symbol `main` declared multiple times")
+                    .struct_span_err(span, "entry symbol `main` declared multiple times")
                     .help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
                     .emit();
                 cx.sess().abort_if_errors();
@@ -464,7 +459,7 @@
     cx: &'a Bx::CodegenCx,
     bx: &mut Bx,
 ) -> (Bx::Value, Bx::Value) {
-    if cx.sess().target.target.options.main_needs_argc_argv {
+    if cx.sess().target.main_needs_argc_argv {
         // Params from native `main()` used as args for rust start function
         let param_argc = bx.get_param(0);
         let param_argv = bx.get_param(1);
@@ -479,8 +474,6 @@
     }
 }
 
-pub const CODEGEN_WORKER_ID: usize = usize::MAX;
-
 pub fn codegen_crate<B: ExtraBackendMethods>(
     backend: B,
     tcx: TyCtxt<'tcx>,
@@ -538,8 +531,9 @@
         let llmod_id =
             cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
         let mut modules = backend.new_metadata(tcx, &llmod_id);
-        tcx.sess
-            .time("write_allocator_module", || backend.codegen_allocator(tcx, &mut modules, kind));
+        tcx.sess.time("write_allocator_module", || {
+            backend.codegen_allocator(tcx, &mut modules, kind, tcx.lang_items().oom().is_some())
+        });
 
         Some(ModuleCodegen { name: llmod_id, module_llvm: modules, kind: ModuleKind::Allocator })
     } else {
@@ -694,7 +688,7 @@
         total_codegen_time.into_inner(),
     );
 
-    ::rustc_incremental::assert_module_sources::assert_module_sources(tcx);
+    rustc_incremental::assert_module_sources::assert_module_sources(tcx);
 
     symbol_names_test::report_symbol_names(tcx);
 
@@ -753,8 +747,8 @@
 }
 
 fn finalize_tcx(tcx: TyCtxt<'_>) {
-    tcx.sess.time("assert_dep_graph", || ::rustc_incremental::assert_dep_graph(tcx));
-    tcx.sess.time("serialize_dep_graph", || ::rustc_incremental::save_dep_graph(tcx));
+    tcx.sess.time("assert_dep_graph", || rustc_incremental::assert_dep_graph(tcx));
+    tcx.sess.time("serialize_dep_graph", || rustc_incremental::save_dep_graph(tcx));
 
     // We assume that no queries are run past here. If there are new queries
     // after this point, they'll show up as "<unknown>" in self-profiling data.
@@ -861,8 +855,6 @@
 
     providers.dllimport_foreign_items = |tcx, krate| {
         let module_map = tcx.foreign_modules(krate);
-        let module_map =
-            module_map.iter().map(|lib| (lib.def_id, lib)).collect::<FxHashMap<_, _>>();
 
         let dllimports = tcx
             .native_libraries(krate)
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs
index a266d17..bcac2c9 100644
--- a/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs
@@ -3,7 +3,7 @@
 /// Aligns with [llvm::coverage::Counter::CounterKind](https://github.com/rust-lang/llvm-project/blob/rustc/10.0-2020-05-05/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L91)
 #[derive(Copy, Clone, Debug)]
 #[repr(C)]
-enum CounterKind {
+pub enum CounterKind {
     Zero = 0,
     CounterValueReference = 1,
     Expression = 2,
@@ -23,8 +23,8 @@
 #[repr(C)]
 pub struct Counter {
     // Important: The layout (order and types of fields) must match its C++ counterpart.
-    kind: CounterKind,
-    id: u32,
+    pub kind: CounterKind,
+    pub id: u32,
 }
 
 impl Counter {
@@ -55,9 +55,9 @@
 #[derive(Copy, Clone, Debug)]
 #[repr(C)]
 pub struct CounterExpression {
-    kind: ExprKind,
-    lhs: Counter,
-    rhs: Counter,
+    pub kind: ExprKind,
+    pub lhs: Counter,
+    pub rhs: Counter,
 }
 
 impl CounterExpression {
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
index 814e43c..b0d7953 100644
--- a/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
@@ -2,18 +2,18 @@
 
 use rustc_index::vec::IndexVec;
 use rustc_middle::mir::coverage::{
-    CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionIndex,
-    MappedExpressionIndex, Op,
+    CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId,
+    InjectedExpressionIndex, MappedExpressionIndex, Op,
 };
 use rustc_middle::ty::Instance;
 use rustc_middle::ty::TyCtxt;
 
 #[derive(Clone, Debug)]
-pub struct ExpressionRegion {
+pub struct Expression {
     lhs: ExpressionOperandId,
     op: Op,
     rhs: ExpressionOperandId,
-    region: CodeRegion,
+    region: Option<CodeRegion>,
 }
 
 /// Collects all of the coverage regions associated with (a) injected counters, (b) counter
@@ -28,17 +28,23 @@
 /// only whitespace or comments). According to LLVM Code Coverage Mapping documentation, "A count
 /// for a gap area is only used as the line execution count if there are no other regions on a
 /// line."
-pub struct FunctionCoverage {
+pub struct FunctionCoverage<'tcx> {
+    instance: Instance<'tcx>,
     source_hash: u64,
     counters: IndexVec<CounterValueReference, Option<CodeRegion>>,
-    expressions: IndexVec<InjectedExpressionIndex, Option<ExpressionRegion>>,
+    expressions: IndexVec<InjectedExpressionIndex, Option<Expression>>,
     unreachable_regions: Vec<CodeRegion>,
 }
 
-impl FunctionCoverage {
-    pub fn new<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+impl<'tcx> FunctionCoverage<'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
         let coverageinfo = tcx.coverageinfo(instance.def_id());
+        debug!(
+            "FunctionCoverage::new(instance={:?}) has coverageinfo={:?}",
+            instance, coverageinfo
+        );
         Self {
+            instance,
             source_hash: 0, // will be set with the first `add_counter()`
             counters: IndexVec::from_elem_n(None, coverageinfo.num_counters as usize),
             expressions: IndexVec::from_elem_n(None, coverageinfo.num_expressions as usize),
@@ -46,15 +52,18 @@
         }
     }
 
-    /// Adds a code region to be counted by an injected counter intrinsic.
-    /// The source_hash (computed during coverage instrumentation) should also be provided, and
-    /// should be the same for all counters in a given function.
-    pub fn add_counter(&mut self, source_hash: u64, id: CounterValueReference, region: CodeRegion) {
+    /// Sets the function source hash value. If called multiple times for the same function, all
+    /// calls should have the same hash value.
+    pub fn set_function_source_hash(&mut self, source_hash: u64) {
         if self.source_hash == 0 {
             self.source_hash = source_hash;
         } else {
             debug_assert_eq!(source_hash, self.source_hash);
         }
+    }
+
+    /// Adds a code region to be counted by an injected counter intrinsic.
+    pub fn add_counter(&mut self, id: CounterValueReference, region: CodeRegion) {
         self.counters[id].replace(region).expect_none("add_counter called with duplicate `id`");
     }
 
@@ -74,15 +83,19 @@
     /// counters and expressions have been added.
     pub fn add_counter_expression(
         &mut self,
-        expression_id: InjectedExpressionIndex,
+        expression_id: InjectedExpressionId,
         lhs: ExpressionOperandId,
         op: Op,
         rhs: ExpressionOperandId,
-        region: CodeRegion,
+        region: Option<CodeRegion>,
     ) {
+        debug!(
+            "add_counter_expression({:?}, lhs={:?}, op={:?}, rhs={:?} at {:?}",
+            expression_id, lhs, op, rhs, region
+        );
         let expression_index = self.expression_index(u32::from(expression_id));
         self.expressions[expression_index]
-            .replace(ExpressionRegion { lhs, op, rhs, region })
+            .replace(Expression { lhs, op, rhs, region })
             .expect_none("add_counter_expression called with duplicate `id_descending_from_max`");
     }
 
@@ -103,7 +116,11 @@
     pub fn get_expressions_and_counter_regions<'a>(
         &'a self,
     ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &'a CodeRegion)>) {
-        assert!(self.source_hash != 0);
+        assert!(
+            self.source_hash != 0,
+            "No counters provided the source_hash for function: {:?}",
+            self.instance
+        );
 
         let counter_regions = self.counter_regions();
         let (counter_expressions, expression_regions) = self.expressions_with_regions();
@@ -129,66 +146,105 @@
     ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &'a CodeRegion)>) {
         let mut counter_expressions = Vec::with_capacity(self.expressions.len());
         let mut expression_regions = Vec::with_capacity(self.expressions.len());
-        let mut new_indexes =
-            IndexVec::from_elem_n(MappedExpressionIndex::from(u32::MAX), self.expressions.len());
-        // Note, the initial value shouldn't matter since every index in use in `self.expressions`
-        // will be set, and after that, `new_indexes` will only be accessed using those same
-        // indexes.
+        let mut new_indexes = IndexVec::from_elem_n(None, self.expressions.len());
 
-        // Note that an `ExpressionRegion`s at any given index can include other expressions as
+        // This closure converts any `Expression` operand (`lhs` or `rhs` of the `Op::Add` or
+        // `Op::Subtract` operation) into its native `llvm::coverage::Counter::CounterKind` type
+        // and value. Operand ID value `0` maps to `CounterKind::Zero`; values in the known range
+        // of injected LLVM counters map to `CounterKind::CounterValueReference` (and the value
+        // matches the injected counter index); and any other value is converted into a
+        // `CounterKind::Expression` with the expression's `new_index`.
+        //
+        // Expressions will be returned from this function in a sequential vector (array) of
+        // `CounterExpression`, so the expression IDs must be mapped from their original,
+        // potentially sparse set of indexes, originally in reverse order from `u32::MAX`.
+        //
+        // An `Expression` as an operand will have already been encountered as an `Expression` with
+        // operands, so its new_index will already have been generated (as a 1-up index value).
+        // (If an `Expression` as an operand does not have a corresponding new_index, it was
+        // probably optimized out, after the expression was injected into the MIR, so it will
+        // get a `CounterKind::Zero` instead.)
+        //
+        // In other words, an `Expression`s at any given index can include other expressions as
         // operands, but expression operands can only come from the subset of expressions having
-        // `expression_index`s lower than the referencing `ExpressionRegion`. Therefore, it is
+        // `expression_index`s lower than the referencing `Expression`. Therefore, it is
         // reasonable to look up the new index of an expression operand while the `new_indexes`
         // vector is only complete up to the current `ExpressionIndex`.
         let id_to_counter =
-            |new_indexes: &IndexVec<InjectedExpressionIndex, MappedExpressionIndex>,
+            |new_indexes: &IndexVec<InjectedExpressionIndex, Option<MappedExpressionIndex>>,
              id: ExpressionOperandId| {
-                if id.index() < self.counters.len() {
+                if id == ExpressionOperandId::ZERO {
+                    Some(Counter::zero())
+                } else if id.index() < self.counters.len() {
+                    // Note: Some codegen-injected Counters may be only referenced by `Expression`s,
+                    // and may not have their own `CodeRegion`s,
                     let index = CounterValueReference::from(id.index());
-                    self.counters
-                        .get(index)
-                        .unwrap() // pre-validated
-                        .as_ref()
-                        .map(|_| Counter::counter_value_reference(index))
+                    Some(Counter::counter_value_reference(index))
                 } else {
                     let index = self.expression_index(u32::from(id));
                     self.expressions
                         .get(index)
                         .expect("expression id is out of range")
                         .as_ref()
-                        .map(|_| Counter::expression(new_indexes[index]))
+                        // If an expression was optimized out, assume it would have produced a count
+                        // of zero. This ensures that expressions dependent on optimized-out
+                        // expressions are still valid.
+                        .map_or(Some(Counter::zero()), |_| {
+                            new_indexes[index].map(|new_index| Counter::expression(new_index))
+                        })
                 }
             };
 
-        for (original_index, expression_region) in
+        for (original_index, expression) in
             self.expressions.iter_enumerated().filter_map(|(original_index, entry)| {
                 // Option::map() will return None to filter out missing expressions. This may happen
                 // if, for example, a MIR-instrumented expression is removed during an optimization.
-                entry.as_ref().map(|region| (original_index, region))
+                entry.as_ref().map(|expression| (original_index, expression))
             })
         {
-            let region = &expression_region.region;
-            let ExpressionRegion { lhs, op, rhs, .. } = *expression_region;
+            let optional_region = &expression.region;
+            let Expression { lhs, op, rhs, .. } = *expression;
 
             if let Some(Some((lhs_counter, rhs_counter))) =
                 id_to_counter(&new_indexes, lhs).map(|lhs_counter| {
                     id_to_counter(&new_indexes, rhs).map(|rhs_counter| (lhs_counter, rhs_counter))
                 })
             {
+                debug_assert!(
+                    (lhs_counter.id as usize)
+                        < usize::max(self.counters.len(), self.expressions.len())
+                );
+                debug_assert!(
+                    (rhs_counter.id as usize)
+                        < usize::max(self.counters.len(), self.expressions.len())
+                );
                 // Both operands exist. `Expression` operands exist in `self.expressions` and have
                 // been assigned a `new_index`.
                 let mapped_expression_index =
                     MappedExpressionIndex::from(counter_expressions.len());
-                counter_expressions.push(CounterExpression::new(
+                let expression = CounterExpression::new(
                     lhs_counter,
                     match op {
                         Op::Add => ExprKind::Add,
                         Op::Subtract => ExprKind::Subtract,
                     },
                     rhs_counter,
-                ));
-                new_indexes[original_index] = mapped_expression_index;
-                expression_regions.push((Counter::expression(mapped_expression_index), region));
+                );
+                debug!(
+                    "Adding expression {:?} = {:?}, region: {:?}",
+                    mapped_expression_index, expression, optional_region
+                );
+                counter_expressions.push(expression);
+                new_indexes[original_index] = Some(mapped_expression_index);
+                if let Some(region) = optional_region {
+                    expression_regions.push((Counter::expression(mapped_expression_index), region));
+                }
+            } else {
+                debug!(
+                    "Ignoring expression with one or more missing operands: \
+                    original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
+                    original_index, lhs, op, rhs, optional_region,
+                )
             }
         }
         (counter_expressions, expression_regions.into_iter())
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
index 45ecb79..0b49a37 100644
--- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
+++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
@@ -33,7 +33,7 @@
 ) {
     // When targeting MSVC, emit C++ style type names for compatibility with
     // .natvis visualizers (and perhaps other existing native debuggers?)
-    let cpp_like_names = tcx.sess.target.target.options.is_like_msvc;
+    let cpp_like_names = tcx.sess.target.is_like_msvc;
 
     match *t.kind() {
         ty::Bool => output.push_str("bool"),
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
index d4f3bf3..70b92b2 100644
--- a/compiler/rustc_codegen_ssa/src/lib.rs
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -21,7 +21,6 @@
 extern crate rustc_middle;
 
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_data_structures::svh::Svh;
 use rustc_data_structures::sync::Lrc;
 use rustc_hir::def_id::CrateNum;
 use rustc_hir::LangItem;
@@ -42,6 +41,7 @@
 pub mod meth;
 pub mod mir;
 pub mod mono_item;
+pub mod target_features;
 pub mod traits;
 
 pub struct ModuleCodegen<M> {
@@ -133,7 +133,6 @@
     pub modules: Vec<CompiledModule>,
     pub allocator_module: Option<CompiledModule>,
     pub metadata_module: Option<CompiledModule>,
-    pub crate_hash: Svh,
     pub metadata: rustc_middle::middle::cstore::EncodedMetadata,
     pub windows_subsystem: Option<String>,
     pub linker_info: back::linker::LinkerInfo,
@@ -143,6 +142,7 @@
 pub fn provide(providers: &mut Providers) {
     crate::back::symbol_export::provide(providers);
     crate::base::provide_both(providers);
+    crate::target_features::provide(providers);
 }
 
 pub fn provide_extern(providers: &mut Providers) {
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 703a17b..da4637b 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -12,9 +12,9 @@
 use rustc_ast as ast;
 use rustc_hir::lang_items::LangItem;
 use rustc_index::vec::Idx;
-use rustc_middle::mir;
 use rustc_middle::mir::interpret::ConstValue;
 use rustc_middle::mir::AssertKind;
+use rustc_middle::mir::{self, SwitchTargets};
 use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{self, Instance, Ty, TypeFoldable};
@@ -24,8 +24,6 @@
 use rustc_target::abi::{self, LayoutOf};
 use rustc_target::spec::abi::Abi;
 
-use std::borrow::Cow;
-
 /// Used by `FunctionCx::codegen_terminator` for emitting common patterns
 /// e.g., creating a basic block, calling a function, etc.
 struct TerminatorCodegenHelper<'tcx> {
@@ -165,7 +163,7 @@
                 target <= self.bb
                     && target.start_location().is_predecessor_of(self.bb.start_location(), mir)
             }) {
-                bx.sideeffect();
+                bx.sideeffect(false);
             }
         }
     }
@@ -198,42 +196,37 @@
         mut bx: Bx,
         discr: &mir::Operand<'tcx>,
         switch_ty: Ty<'tcx>,
-        values: &Cow<'tcx, [u128]>,
-        targets: &Vec<mir::BasicBlock>,
+        targets: &SwitchTargets,
     ) {
         let discr = self.codegen_operand(&mut bx, &discr);
         // `switch_ty` is redundant, sanity-check that.
         assert_eq!(discr.layout.ty, switch_ty);
-        if targets.len() == 2 {
-            // If there are two targets, emit br instead of switch
-            let lltrue = helper.llblock(self, targets[0]);
-            let llfalse = helper.llblock(self, targets[1]);
+        helper.maybe_sideeffect(self.mir, &mut bx, targets.all_targets());
+
+        let mut target_iter = targets.iter();
+        if target_iter.len() == 1 {
+            // If there are two targets (one conditional, one fallback), emit br instead of switch
+            let (test_value, target) = target_iter.next().unwrap();
+            let lltrue = helper.llblock(self, target);
+            let llfalse = helper.llblock(self, targets.otherwise());
             if switch_ty == bx.tcx().types.bool {
-                helper.maybe_sideeffect(self.mir, &mut bx, targets.as_slice());
                 // Don't generate trivial icmps when switching on bool
-                if let [0] = values[..] {
-                    bx.cond_br(discr.immediate(), llfalse, lltrue);
-                } else {
-                    assert_eq!(&values[..], &[1]);
-                    bx.cond_br(discr.immediate(), lltrue, llfalse);
+                match test_value {
+                    0 => bx.cond_br(discr.immediate(), llfalse, lltrue),
+                    1 => bx.cond_br(discr.immediate(), lltrue, llfalse),
+                    _ => bug!(),
                 }
             } else {
                 let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty));
-                let llval = bx.const_uint_big(switch_llty, values[0]);
+                let llval = bx.const_uint_big(switch_llty, test_value);
                 let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
-                helper.maybe_sideeffect(self.mir, &mut bx, targets.as_slice());
                 bx.cond_br(cmp, lltrue, llfalse);
             }
         } else {
-            helper.maybe_sideeffect(self.mir, &mut bx, targets.as_slice());
-            let (otherwise, targets) = targets.split_last().unwrap();
             bx.switch(
                 discr.immediate(),
-                helper.llblock(self, *otherwise),
-                values
-                    .iter()
-                    .zip(targets)
-                    .map(|(&value, target)| (value, helper.llblock(self, *target))),
+                helper.llblock(self, targets.otherwise()),
+                target_iter.map(|(value, target)| (value, helper.llblock(self, target))),
             );
         }
     }
@@ -412,7 +405,7 @@
         self.set_debug_loc(&mut bx, terminator.source_info);
 
         // Get the location information.
-        let location = self.get_caller_location(&mut bx, span).immediate();
+        let location = self.get_caller_location(&mut bx, terminator.source_info).immediate();
 
         // Put together the arguments to the panic entry point.
         let (lang_item, args) = match msg {
@@ -449,7 +442,7 @@
         bx: &mut Bx,
         intrinsic: Option<Symbol>,
         instance: Option<Instance<'tcx>>,
-        span: Span,
+        source_info: mir::SourceInfo,
         destination: &Option<(mir::Place<'tcx>, mir::BasicBlock)>,
         cleanup: Option<mir::BasicBlock>,
     ) -> bool {
@@ -491,11 +484,12 @@
                     }
                 });
                 let msg = bx.const_str(Symbol::intern(&msg_str));
-                let location = self.get_caller_location(bx, span).immediate();
+                let location = self.get_caller_location(bx, source_info).immediate();
 
                 // Obtain the panic entry point.
                 // FIXME: dedup this with `codegen_assert_terminator` above.
-                let def_id = common::langcall(bx.tcx(), Some(span), "", LangItem::Panic);
+                let def_id =
+                    common::langcall(bx.tcx(), Some(source_info.span), "", LangItem::Panic);
                 let instance = ty::Instance::mono(bx.tcx(), def_id);
                 let fn_abi = FnAbi::of_instance(bx, instance, &[]);
                 let llfn = bx.get_fn_addr(instance);
@@ -536,7 +530,9 @@
         cleanup: Option<mir::BasicBlock>,
         fn_span: Span,
     ) {
-        let span = terminator.source_info.span;
+        let source_info = terminator.source_info;
+        let span = source_info.span;
+
         // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
         let callee = self.codegen_operand(&mut bx, func);
 
@@ -613,7 +609,7 @@
             &mut bx,
             intrinsic,
             instance,
-            span,
+            source_info,
             destination,
             cleanup,
         ) {
@@ -634,7 +630,8 @@
 
         if intrinsic == Some(sym::caller_location) {
             if let Some((_, target)) = destination.as_ref() {
-                let location = self.get_caller_location(&mut bx, fn_span);
+                let location = self
+                    .get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
 
                 if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
                     location.val.store(&mut bx, tmp);
@@ -693,7 +690,7 @@
                 &fn_abi,
                 &args,
                 dest,
-                terminator.source_info.span,
+                span,
             );
 
             if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
@@ -800,7 +797,8 @@
                 args.len() + 1,
                 "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR",
             );
-            let location = self.get_caller_location(&mut bx, fn_span);
+            let location =
+                self.get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
             debug!(
                 "codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
                 terminator, location, fn_span
@@ -879,7 +877,7 @@
                         let string = match ty.kind() {
                             ty::Uint(_) => value.to_string(),
                             ty::Int(int_ty) => {
-                                match int_ty.normalize(bx.tcx().sess.target.ptr_width) {
+                                match int_ty.normalize(bx.tcx().sess.target.pointer_width) {
                                     ast::IntTy::I8 => (value as i8).to_string(),
                                     ast::IntTy::I16 => (value as i16).to_string(),
                                     ast::IntTy::I32 => (value as i32).to_string(),
@@ -971,12 +969,28 @@
             }
 
             mir::TerminatorKind::Goto { target } => {
-                helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+                if bb == target {
+                    // This is an unconditional branch back to this same basic
+                    // block. That means we have something like a `loop {}`
+                    // statement. Currently LLVM miscompiles this because it
+                    // assumes forward progress. We want to prevent this in all
+                    // cases, but that has a fairly high cost to compile times
+                    // currently. Instead, try to handle this specific case
+                    // which comes up commonly in practice (e.g., in embedded
+                    // code).
+                    //
+                    // The `true` here means we insert side effects regardless
+                    // of -Zinsert-sideeffect being passed on unconditional
+                    // branching to the same basic block.
+                    bx.sideeffect(true);
+                } else {
+                    helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+                }
                 helper.funclet_br(self, &mut bx, target);
             }
 
-            mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
-                self.codegen_switchint_terminator(helper, bx, discr, switch_ty, values, targets);
+            mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => {
+                self.codegen_switchint_terminator(helper, bx, discr, switch_ty, targets);
             }
 
             mir::TerminatorKind::Return => {
@@ -1170,17 +1184,49 @@
         }
     }
 
-    fn get_caller_location(&mut self, bx: &mut Bx, span: Span) -> OperandRef<'tcx, Bx::Value> {
-        self.caller_location.unwrap_or_else(|| {
+    fn get_caller_location(
+        &mut self,
+        bx: &mut Bx,
+        mut source_info: mir::SourceInfo,
+    ) -> OperandRef<'tcx, Bx::Value> {
+        let tcx = bx.tcx();
+
+        let mut span_to_caller_location = |span: Span| {
             let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
-            let caller = bx.tcx().sess.source_map().lookup_char_pos(topmost.lo());
-            let const_loc = bx.tcx().const_caller_location((
+            let caller = tcx.sess.source_map().lookup_char_pos(topmost.lo());
+            let const_loc = tcx.const_caller_location((
                 Symbol::intern(&caller.file.name.to_string()),
                 caller.line as u32,
                 caller.col_display as u32 + 1,
             ));
             OperandRef::from_const(bx, const_loc, bx.tcx().caller_location_ty())
-        })
+        };
+
+        // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
+        // If so, the starting `source_info.span` is in the innermost inlined
+        // function, and will be replaced with outer callsite spans as long
+        // as the inlined functions were `#[track_caller]`.
+        loop {
+            let scope_data = &self.mir.source_scopes[source_info.scope];
+
+            if let Some((callee, callsite_span)) = scope_data.inlined {
+                // Stop inside the most nested non-`#[track_caller]` function,
+                // before ever reaching its caller (which is irrelevant).
+                if !callee.def.requires_caller_location(tcx) {
+                    return span_to_caller_location(source_info.span);
+                }
+                source_info.span = callsite_span;
+            }
+
+            // Skip past all of the parents with `inlined: None`.
+            match scope_data.inlined_parent_scope {
+                Some(parent) => source_info.scope = parent,
+                None => break,
+            }
+        }
+
+        // No inlined `SourceScope`s, or all of them were `#[track_caller]`.
+        self.caller_location.unwrap_or_else(|| span_to_caller_location(source_info.span))
     }
 
     fn get_personality_slot(&mut self, bx: &mut Bx) -> PlaceRef<'tcx, Bx::Value> {
diff --git a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
index a2ad27b..a115d35 100644
--- a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
@@ -10,25 +10,37 @@
         let Coverage { kind, code_region } = coverage;
         match kind {
             CoverageKind::Counter { function_source_hash, id } => {
-                bx.add_counter_region(self.instance, function_source_hash, id, code_region);
+                if bx.set_function_source_hash(self.instance, function_source_hash) {
+                    // If `set_function_source_hash()` returned true, the coverage map is enabled,
+                    // so continue adding the counter.
+                    if let Some(code_region) = code_region {
+                        // Note: Some counters do not have code regions, but may still be referenced
+                        // from expressions. In that case, don't add the counter to the coverage map,
+                        // but do inject the counter intrinsic.
+                        bx.add_coverage_counter(self.instance, id, code_region);
+                    }
 
-                let coverageinfo = bx.tcx().coverageinfo(self.instance.def_id());
+                    let coverageinfo = bx.tcx().coverageinfo(self.instance.def_id());
 
-                let fn_name = bx.create_pgo_func_name_var(self.instance);
-                let hash = bx.const_u64(function_source_hash);
-                let num_counters = bx.const_u32(coverageinfo.num_counters);
-                let id = bx.const_u32(u32::from(id));
-                debug!(
-                    "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
-                    fn_name, hash, num_counters, id,
-                );
-                bx.instrprof_increment(fn_name, hash, num_counters, id);
+                    let fn_name = bx.create_pgo_func_name_var(self.instance);
+                    let hash = bx.const_u64(function_source_hash);
+                    let num_counters = bx.const_u32(coverageinfo.num_counters);
+                    let index = bx.const_u32(u32::from(id));
+                    debug!(
+                        "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+                        fn_name, hash, num_counters, index,
+                    );
+                    bx.instrprof_increment(fn_name, hash, num_counters, index);
+                }
             }
             CoverageKind::Expression { id, lhs, op, rhs } => {
-                bx.add_counter_expression_region(self.instance, id, lhs, op, rhs, code_region);
+                bx.add_coverage_counter_expression(self.instance, id, lhs, op, rhs, code_region);
             }
             CoverageKind::Unreachable => {
-                bx.add_unreachable_region(self.instance, code_region);
+                bx.add_coverage_unreachable(
+                    self.instance,
+                    code_region.expect("unreachable regions always have code regions"),
+                );
             }
         }
     }
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index d8a530d..4e0396a 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -1,5 +1,4 @@
 use crate::traits::*;
-use rustc_hir::def_id::CrateNum;
 use rustc_index::vec::IndexVec;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir;
@@ -13,9 +12,8 @@
 use super::place::PlaceRef;
 use super::{FunctionCx, LocalRef};
 
-pub struct FunctionDebugContext<D> {
-    pub scopes: IndexVec<mir::SourceScope, DebugScope<D>>,
-    pub defining_crate: CrateNum,
+pub struct FunctionDebugContext<S, L> {
+    pub scopes: IndexVec<mir::SourceScope, DebugScope<S, L>>,
 }
 
 #[derive(Copy, Clone)]
@@ -38,76 +36,84 @@
 }
 
 #[derive(Clone, Copy, Debug)]
-pub struct DebugScope<D> {
-    pub scope_metadata: Option<D>,
+pub struct DebugScope<S, L> {
+    // FIXME(eddyb) this should never be `None`, after initialization.
+    pub dbg_scope: Option<S>,
+
+    /// Call site location, if this scope was inlined from another function.
+    pub inlined_at: Option<L>,
+
     // Start and end offsets of the file to which this DIScope belongs.
     // These are used to quickly determine whether some span refers to the same file.
     pub file_start_pos: BytePos,
     pub file_end_pos: BytePos,
 }
 
-impl<D> DebugScope<D> {
-    pub fn is_valid(&self) -> bool {
-        self.scope_metadata.is_some()
+impl<'tcx, S: Copy, L: Copy> DebugScope<S, L> {
+    /// DILocations inherit source file name from the parent DIScope.  Due to macro expansions
+    /// it may so happen that the current span belongs to a different file than the DIScope
+    /// corresponding to span's containing source scope.  If so, we need to create a DIScope
+    /// "extension" into that file.
+    pub fn adjust_dbg_scope_for_span<Cx: CodegenMethods<'tcx, DIScope = S, DILocation = L>>(
+        &self,
+        cx: &Cx,
+        span: Span,
+    ) -> S {
+        // FIXME(eddyb) this should never be `None`.
+        let dbg_scope = self
+            .dbg_scope
+            .unwrap_or_else(|| bug!("`dbg_scope` is only `None` during initialization"));
+
+        let pos = span.lo();
+        if pos < self.file_start_pos || pos >= self.file_end_pos {
+            let sm = cx.sess().source_map();
+            cx.extend_scope_to_file(dbg_scope, &sm.lookup_char_pos(pos).file)
+        } else {
+            dbg_scope
+        }
     }
 }
 
 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     pub fn set_debug_loc(&self, bx: &mut Bx, source_info: mir::SourceInfo) {
-        let (scope, span) = self.debug_loc(source_info);
-        if let Some(scope) = scope {
-            bx.set_source_location(scope, span);
+        bx.set_span(source_info.span);
+        if let Some(dbg_loc) = self.dbg_loc(source_info) {
+            bx.set_dbg_loc(dbg_loc);
         }
     }
 
-    pub fn debug_loc(&self, source_info: mir::SourceInfo) -> (Option<Bx::DIScope>, Span) {
+    fn dbg_loc(&self, source_info: mir::SourceInfo) -> Option<Bx::DILocation> {
+        let (dbg_scope, inlined_at, span) = self.adjusted_span_and_dbg_scope(source_info)?;
+        Some(self.cx.dbg_loc(dbg_scope, inlined_at, span))
+    }
+
+    fn adjusted_span_and_dbg_scope(
+        &self,
+        source_info: mir::SourceInfo,
+    ) -> Option<(Bx::DIScope, Option<Bx::DILocation>, Span)> {
+        let span = self.adjust_span_for_debugging(source_info.span);
+        let scope = &self.debug_context.as_ref()?.scopes[source_info.scope];
+        Some((scope.adjust_dbg_scope_for_span(self.cx, span), scope.inlined_at, span))
+    }
+
+    /// In order to have a good line stepping behavior in debugger, we overwrite debug
+    /// locations of macro expansions with that of the outermost expansion site
+    /// (unless the crate is being compiled with `-Z debug-macros`).
+    fn adjust_span_for_debugging(&self, mut span: Span) -> Span {
         // Bail out if debug info emission is not enabled.
-        match self.debug_context {
-            None => return (None, source_info.span),
-            Some(_) => {}
+        if self.debug_context.is_none() {
+            return span;
         }
 
-        // In order to have a good line stepping behavior in debugger, we overwrite debug
-        // locations of macro expansions with that of the outermost expansion site
-        // (unless the crate is being compiled with `-Z debug-macros`).
-        if !source_info.span.from_expansion() || self.cx.sess().opts.debugging_opts.debug_macros {
-            let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo());
-            (scope, source_info.span)
-        } else {
+        if span.from_expansion() && !self.cx.sess().opts.debugging_opts.debug_macros {
             // Walk up the macro expansion chain until we reach a non-expanded span.
             // We also stop at the function body level because no line stepping can occur
             // at the level above that.
-            let span = rustc_span::hygiene::walk_chain(source_info.span, self.mir.span.ctxt());
-            let scope = self.scope_metadata_for_loc(source_info.scope, span.lo());
             // Use span of the outermost expansion site, while keeping the original lexical scope.
-            (scope, span)
+            span = rustc_span::hygiene::walk_chain(span, self.mir.span.ctxt());
         }
-    }
 
-    // DILocations inherit source file name from the parent DIScope.  Due to macro expansions
-    // it may so happen that the current span belongs to a different file than the DIScope
-    // corresponding to span's containing source scope.  If so, we need to create a DIScope
-    // "extension" into that file.
-    fn scope_metadata_for_loc(
-        &self,
-        scope_id: mir::SourceScope,
-        pos: BytePos,
-    ) -> Option<Bx::DIScope> {
-        let debug_context = self.debug_context.as_ref()?;
-        let scope_metadata = debug_context.scopes[scope_id].scope_metadata;
-        if pos < debug_context.scopes[scope_id].file_start_pos
-            || pos >= debug_context.scopes[scope_id].file_end_pos
-        {
-            let sm = self.cx.sess().source_map();
-            let defining_crate = debug_context.defining_crate;
-            Some(self.cx.extend_scope_to_file(
-                scope_metadata.unwrap(),
-                &sm.lookup_char_pos(pos).file,
-                defining_crate,
-            ))
-        } else {
-            scope_metadata
-        }
+        span
     }
 
     /// Apply debuginfo and/or name, after creating the `alloca` for a local,
@@ -148,24 +154,20 @@
             } else {
                 let name = kw::Invalid;
                 let decl = &self.mir.local_decls[local];
-                let (scope, span) = if full_debug_info {
-                    self.debug_loc(decl.source_info)
-                } else {
-                    (None, decl.source_info.span)
-                };
-                let dbg_var = scope.map(|scope| {
-                    // FIXME(eddyb) is this `+ 1` needed at all?
-                    let kind = VariableKind::ArgumentVariable(arg_index + 1);
+                let dbg_var = if full_debug_info {
+                    self.adjusted_span_and_dbg_scope(decl.source_info).map(
+                        |(dbg_scope, _, span)| {
+                            // FIXME(eddyb) is this `+ 1` needed at all?
+                            let kind = VariableKind::ArgumentVariable(arg_index + 1);
 
-                    self.cx.create_dbg_var(
-                        self.debug_context.as_ref().unwrap(),
-                        name,
-                        self.monomorphize(&decl.ty),
-                        scope,
-                        kind,
-                        span,
+                            let arg_ty = self.monomorphize(&decl.ty);
+
+                            self.cx.create_dbg_var(name, arg_ty, dbg_scope, kind, span)
+                        },
                     )
-                });
+                } else {
+                    None
+                };
 
                 Some(PerLocalVarDebugInfo {
                     name,
@@ -246,6 +248,15 @@
         let vars = vars.iter().copied().chain(fallback_var);
 
         for var in vars {
+            let dbg_var = match var.dbg_var {
+                Some(dbg_var) => dbg_var,
+                None => continue,
+            };
+            let dbg_loc = match self.dbg_loc(var.source_info) {
+                Some(dbg_loc) => dbg_loc,
+                None => continue,
+            };
+
             let mut layout = base.layout;
             let mut direct_offset = Size::ZERO;
             // FIXME(eddyb) use smallvec here.
@@ -282,19 +293,7 @@
                 }
             }
 
-            let (scope, span) = self.debug_loc(var.source_info);
-            if let Some(scope) = scope {
-                if let Some(dbg_var) = var.dbg_var {
-                    bx.dbg_var_addr(
-                        dbg_var,
-                        scope,
-                        base.llval,
-                        direct_offset,
-                        &indirect_offsets,
-                        span,
-                    );
-                }
-            }
+            bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, direct_offset, &indirect_offsets);
         }
     }
 
@@ -318,12 +317,12 @@
 
         let mut per_local = IndexVec::from_elem(vec![], &self.mir.local_decls);
         for var in &self.mir.var_debug_info {
-            let (scope, span) = if full_debug_info {
-                self.debug_loc(var.source_info)
+            let dbg_scope_and_span = if full_debug_info {
+                self.adjusted_span_and_dbg_scope(var.source_info)
             } else {
-                (None, var.source_info.span)
+                None
             };
-            let dbg_var = scope.map(|scope| {
+            let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
                 let place = var.place;
                 let var_ty = self.monomorphized_place_ty(place.as_ref());
                 let var_kind = if self.mir.local_kind(place.local) == mir::LocalKind::Arg
@@ -339,14 +338,7 @@
                 } else {
                     VariableKind::LocalVariable
                 };
-                self.cx.create_dbg_var(
-                    self.debug_context.as_ref().unwrap(),
-                    var.name,
-                    var_ty,
-                    scope,
-                    var_kind,
-                    span,
-                )
+                self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span)
             });
 
             per_local[var.place.local].push(PerLocalVarDebugInfo {
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 14f1ed5..2bf1ee4 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -580,8 +580,12 @@
 // stuffs.
 fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
     match ty.kind() {
-        ty::Int(t) => Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.ptr_width)), true)),
-        ty::Uint(t) => Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.ptr_width)), false)),
+        ty::Int(t) => {
+            Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
+        }
+        ty::Uint(t) => {
+            Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
+        }
         _ => None,
     }
 }
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index 64d456f..01fd168 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -26,7 +26,7 @@
 
     mir: &'tcx mir::Body<'tcx>,
 
-    debug_context: Option<FunctionDebugContext<Bx::DIScope>>,
+    debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>,
 
     llfn: Bx::Function,
 
@@ -92,15 +92,11 @@
         T: Copy + TypeFoldable<'tcx>,
     {
         debug!("monomorphize: self.instance={:?}", self.instance);
-        if let Some(substs) = self.instance.substs_for_mir_body() {
-            self.cx.tcx().subst_and_normalize_erasing_regions(
-                substs,
-                ty::ParamEnv::reveal_all(),
-                &value,
-            )
-        } else {
-            self.cx.tcx().normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value)
-        }
+        self.instance.subst_mir_and_normalize_erasing_regions(
+            self.cx.tcx(),
+            ty::ParamEnv::reveal_all(),
+            value,
+        )
     }
 }
 
@@ -153,7 +149,7 @@
         bx.set_personality_fn(cx.eh_personality());
     }
 
-    bx.sideeffect();
+    bx.sideeffect(false);
 
     let cleanup_kinds = analyze::cleanup_kinds(&mir);
     // Allocate a `Block` for every basic block, except
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index 91609b2..e1cc026 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -346,8 +346,8 @@
                 ..
             } => {
                 if variant_index != dataful_variant {
-                    if bx.cx().sess().target.target.arch == "arm"
-                        || bx.cx().sess().target.target.arch == "aarch64"
+                    if bx.cx().sess().target.arch == "arm"
+                        || bx.cx().sess().target.arch == "aarch64"
                     {
                         // FIXME(#34427): as workaround for LLVM bug on ARM,
                         // use memset of 0 before assigning niche value.
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 7ce110d..40ae0a1 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -502,6 +502,7 @@
             }
 
             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
+                let ty = self.monomorphize(&ty);
                 assert!(bx.cx().type_is_sized(ty));
                 let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
                 let tcx = self.cx.tcx();
diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs
new file mode 100644
index 0000000..000ddf4
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/target_features.rs
@@ -0,0 +1,166 @@
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::ty::query::Providers;
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::symbol::Symbol;
+
+const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+    ("aclass", Some(sym::arm_target_feature)),
+    ("mclass", Some(sym::arm_target_feature)),
+    ("rclass", Some(sym::arm_target_feature)),
+    ("dsp", Some(sym::arm_target_feature)),
+    ("neon", Some(sym::arm_target_feature)),
+    ("crc", Some(sym::arm_target_feature)),
+    ("crypto", Some(sym::arm_target_feature)),
+    ("v5te", Some(sym::arm_target_feature)),
+    ("v6", Some(sym::arm_target_feature)),
+    ("v6k", Some(sym::arm_target_feature)),
+    ("v6t2", Some(sym::arm_target_feature)),
+    ("v7", Some(sym::arm_target_feature)),
+    ("v8", Some(sym::arm_target_feature)),
+    ("vfp2", Some(sym::arm_target_feature)),
+    ("vfp3", Some(sym::arm_target_feature)),
+    ("vfp4", Some(sym::arm_target_feature)),
+    // This is needed for inline assembly, but shouldn't be stabilized as-is
+    // since it should be enabled per-function using #[instruction_set], not
+    // #[target_feature].
+    ("thumb-mode", Some(sym::arm_target_feature)),
+];
+
+const AARCH64_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+    ("fp", Some(sym::aarch64_target_feature)),
+    ("neon", Some(sym::aarch64_target_feature)),
+    ("sve", Some(sym::aarch64_target_feature)),
+    ("crc", Some(sym::aarch64_target_feature)),
+    ("crypto", Some(sym::aarch64_target_feature)),
+    ("ras", Some(sym::aarch64_target_feature)),
+    ("lse", Some(sym::aarch64_target_feature)),
+    ("rdm", Some(sym::aarch64_target_feature)),
+    ("fp16", Some(sym::aarch64_target_feature)),
+    ("rcpc", Some(sym::aarch64_target_feature)),
+    ("dotprod", Some(sym::aarch64_target_feature)),
+    ("tme", Some(sym::aarch64_target_feature)),
+    ("v8.1a", Some(sym::aarch64_target_feature)),
+    ("v8.2a", Some(sym::aarch64_target_feature)),
+    ("v8.3a", Some(sym::aarch64_target_feature)),
+];
+
+const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+    ("adx", Some(sym::adx_target_feature)),
+    ("aes", None),
+    ("avx", None),
+    ("avx2", None),
+    ("avx512bw", Some(sym::avx512_target_feature)),
+    ("avx512cd", Some(sym::avx512_target_feature)),
+    ("avx512dq", Some(sym::avx512_target_feature)),
+    ("avx512er", Some(sym::avx512_target_feature)),
+    ("avx512f", Some(sym::avx512_target_feature)),
+    ("avx512ifma", Some(sym::avx512_target_feature)),
+    ("avx512pf", Some(sym::avx512_target_feature)),
+    ("avx512vbmi", Some(sym::avx512_target_feature)),
+    ("avx512vl", Some(sym::avx512_target_feature)),
+    ("avx512vpopcntdq", Some(sym::avx512_target_feature)),
+    ("bmi1", None),
+    ("bmi2", None),
+    ("cmpxchg16b", Some(sym::cmpxchg16b_target_feature)),
+    ("ermsb", Some(sym::ermsb_target_feature)),
+    ("f16c", Some(sym::f16c_target_feature)),
+    ("fma", None),
+    ("fxsr", None),
+    ("lzcnt", None),
+    ("movbe", Some(sym::movbe_target_feature)),
+    ("pclmulqdq", None),
+    ("popcnt", None),
+    ("rdrand", None),
+    ("rdseed", None),
+    ("rtm", Some(sym::rtm_target_feature)),
+    ("sha", None),
+    ("sse", None),
+    ("sse2", None),
+    ("sse3", None),
+    ("sse4.1", None),
+    ("sse4.2", None),
+    ("sse4a", Some(sym::sse4a_target_feature)),
+    ("ssse3", None),
+    ("tbm", Some(sym::tbm_target_feature)),
+    ("xsave", None),
+    ("xsavec", None),
+    ("xsaveopt", None),
+    ("xsaves", None),
+];
+
+const HEXAGON_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+    ("hvx", Some(sym::hexagon_target_feature)),
+    ("hvx-length128b", Some(sym::hexagon_target_feature)),
+];
+
+const POWERPC_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+    ("altivec", Some(sym::powerpc_target_feature)),
+    ("power8-altivec", Some(sym::powerpc_target_feature)),
+    ("power9-altivec", Some(sym::powerpc_target_feature)),
+    ("power8-vector", Some(sym::powerpc_target_feature)),
+    ("power9-vector", Some(sym::powerpc_target_feature)),
+    ("vsx", Some(sym::powerpc_target_feature)),
+];
+
+const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] =
+    &[("fp64", Some(sym::mips_target_feature)), ("msa", Some(sym::mips_target_feature))];
+
+const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+    ("m", Some(sym::riscv_target_feature)),
+    ("a", Some(sym::riscv_target_feature)),
+    ("c", Some(sym::riscv_target_feature)),
+    ("f", Some(sym::riscv_target_feature)),
+    ("d", Some(sym::riscv_target_feature)),
+    ("e", Some(sym::riscv_target_feature)),
+];
+
+const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+    ("simd128", Some(sym::wasm_target_feature)),
+    ("atomics", Some(sym::wasm_target_feature)),
+    ("nontrapping-fptoint", Some(sym::wasm_target_feature)),
+];
+
+/// When rustdoc is running, provide a list of all known features so that all their respective
+/// primitives may be documented.
+///
+/// IMPORTANT: If you're adding another feature list above, make sure to add it to this iterator!
+pub fn all_known_features() -> impl Iterator<Item = (&'static str, Option<Symbol>)> {
+    std::iter::empty()
+        .chain(ARM_ALLOWED_FEATURES.iter())
+        .chain(AARCH64_ALLOWED_FEATURES.iter())
+        .chain(X86_ALLOWED_FEATURES.iter())
+        .chain(HEXAGON_ALLOWED_FEATURES.iter())
+        .chain(POWERPC_ALLOWED_FEATURES.iter())
+        .chain(MIPS_ALLOWED_FEATURES.iter())
+        .chain(RISCV_ALLOWED_FEATURES.iter())
+        .chain(WASM_ALLOWED_FEATURES.iter())
+        .cloned()
+}
+
+pub fn supported_target_features(sess: &Session) -> &'static [(&'static str, Option<Symbol>)] {
+    match &*sess.target.arch {
+        "arm" => ARM_ALLOWED_FEATURES,
+        "aarch64" => AARCH64_ALLOWED_FEATURES,
+        "x86" | "x86_64" => X86_ALLOWED_FEATURES,
+        "hexagon" => HEXAGON_ALLOWED_FEATURES,
+        "mips" | "mips64" => MIPS_ALLOWED_FEATURES,
+        "powerpc" | "powerpc64" => POWERPC_ALLOWED_FEATURES,
+        "riscv32" | "riscv64" => RISCV_ALLOWED_FEATURES,
+        "wasm32" => WASM_ALLOWED_FEATURES,
+        _ => &[],
+    }
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+    providers.supported_target_features = |tcx, cnum| {
+        assert_eq!(cnum, LOCAL_CRATE);
+        if tcx.sess.opts.actually_rustdoc {
+            // rustdoc needs to be able to document functions that use all the features, so
+            // whitelist them all
+            all_known_features().map(|(a, b)| (a.to_string(), b)).collect()
+        } else {
+            supported_target_features(tcx.sess).iter().map(|&(a, b)| (a.to_string(), b)).collect()
+        }
+    };
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
index 90520f7..b9c555c 100644
--- a/compiler/rustc_codegen_ssa/src/traits/backend.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -1,10 +1,11 @@
 use super::write::WriteBackendMethods;
 use super::CodegenObject;
-use crate::ModuleCodegen;
+use crate::{CodegenResults, ModuleCodegen};
 
 use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_data_structures::fx::FxHashMap;
 use rustc_errors::ErrorReported;
-use rustc_middle::dep_graph::DepGraph;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
 use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoaderDyn};
 use rustc_middle::ty::layout::{HasTyCtxt, TyAndLayout};
 use rustc_middle::ty::query::Providers;
@@ -33,6 +34,7 @@
     // FIXME(eddyb) find a common convention for all of the debuginfo-related
     // names (choose between `Dbg`, `Debug`, `DebugInfo`, `DI` etc.).
     type DIScope: Copy;
+    type DILocation: Copy;
     type DIVariable: Copy;
 }
 
@@ -80,8 +82,7 @@
         &self,
         ongoing_codegen: Box<dyn Any>,
         sess: &Session,
-        dep_graph: &DepGraph,
-    ) -> Result<Box<dyn Any>, ErrorReported>;
+    ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported>;
 
     /// This is called on the returned `Box<dyn Any>` from `join_codegen`
     ///
@@ -91,7 +92,7 @@
     fn link(
         &self,
         sess: &Session,
-        codegen_results: Box<dyn Any>,
+        codegen_results: CodegenResults,
         outputs: &OutputFilenames,
     ) -> Result<(), ErrorReported>;
 }
@@ -109,6 +110,7 @@
         tcx: TyCtxt<'tcx>,
         mods: &mut Self::Module,
         kind: AllocatorKind,
+        has_alloc_error_handler: bool,
     );
     /// This generates the codegen unit and returns it along with
     /// a `u64` giving an estimate of the unit's processing cost.
@@ -123,4 +125,5 @@
         opt_level: config::OptLevel,
     ) -> Arc<dyn Fn() -> Result<Self::TargetMachine, String> + Send + Sync>;
     fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str;
+    fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str>;
 }
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
index b35b0f2..d5bd278 100644
--- a/compiler/rustc_codegen_ssa/src/traits/builder.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -15,6 +15,7 @@
 
 use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout};
 use rustc_middle::ty::Ty;
+use rustc_span::Span;
 use rustc_target::abi::{Abi, Align, Scalar, Size};
 use rustc_target::spec::HasTargetSpec;
 
@@ -44,6 +45,7 @@
     fn build_sibling_block(&self, name: &str) -> Self;
     fn cx(&self) -> &Self::CodegenCx;
     fn llbb(&self) -> Self::BasicBlock;
+    fn set_span(&mut self, span: Span);
 
     fn position_at_end(&mut self, llbb: Self::BasicBlock);
     fn ret_void(&mut self);
diff --git a/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs
index b74e4e4..95bddfb 100644
--- a/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs
@@ -9,23 +9,37 @@
 pub trait CoverageInfoBuilderMethods<'tcx>: BackendTypes {
     fn create_pgo_func_name_var(&self, instance: Instance<'tcx>) -> Self::Value;
 
-    fn add_counter_region(
+    /// Returns true if the function source hash was added to the coverage map (even if it had
+    /// already been added, for this instance). Returns false *only* if `-Z instrument-coverage` is
+    /// not enabled (a coverage map is not being generated).
+    fn set_function_source_hash(
         &mut self,
         instance: Instance<'tcx>,
         function_source_hash: u64,
-        id: CounterValueReference,
-        region: CodeRegion,
-    );
+    ) -> bool;
 
-    fn add_counter_expression_region(
+    /// Returns true if the counter was added to the coverage map; false if `-Z instrument-coverage`
+    /// is not enabled (a coverage map is not being generated).
+    fn add_coverage_counter(
         &mut self,
         instance: Instance<'tcx>,
-        id: InjectedExpressionIndex,
+        index: CounterValueReference,
+        region: CodeRegion,
+    ) -> bool;
+
+    /// Returns true if the expression was added to the coverage map; false if
+    /// `-Z instrument-coverage` is not enabled (a coverage map is not being generated).
+    fn add_coverage_counter_expression(
+        &mut self,
+        instance: Instance<'tcx>,
+        id: InjectedExpressionId,
         lhs: ExpressionOperandId,
         op: Op,
         rhs: ExpressionOperandId,
-        region: CodeRegion,
-    );
+        region: Option<CodeRegion>,
+    ) -> bool;
 
-    fn add_unreachable_region(&mut self, instance: Instance<'tcx>, region: CodeRegion);
+    /// Returns true if the region was added to the coverage map; false if `-Z instrument-coverage`
+    /// is not enabled (a coverage map is not being generated).
+    fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool;
 }
diff --git a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
index 1ee0f48..3e66d71 100644
--- a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
@@ -1,6 +1,5 @@
 use super::BackendTypes;
 use crate::mir::debuginfo::{FunctionDebugContext, VariableKind};
-use rustc_hir::def_id::CrateNum;
 use rustc_middle::mir;
 use rustc_middle::ty::{Instance, Ty};
 use rustc_span::{SourceFile, Span, Symbol};
@@ -19,14 +18,29 @@
         instance: Instance<'tcx>,
         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
         llfn: Self::Function,
-        mir: &mir::Body<'_>,
-    ) -> Option<FunctionDebugContext<Self::DIScope>>;
+        mir: &mir::Body<'tcx>,
+    ) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>>;
+
+    // FIXME(eddyb) find a common convention for all of the debuginfo-related
+    // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+    fn dbg_scope_fn(
+        &self,
+        instance: Instance<'tcx>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        maybe_definition_llfn: Option<Self::Function>,
+    ) -> Self::DIScope;
+
+    fn dbg_loc(
+        &self,
+        scope: Self::DIScope,
+        inlined_at: Option<Self::DILocation>,
+        span: Span,
+    ) -> Self::DILocation;
 
     fn extend_scope_to_file(
         &self,
         scope_metadata: Self::DIScope,
         file: &SourceFile,
-        defining_crate: CrateNum,
     ) -> Self::DIScope;
     fn debuginfo_finalize(&self);
 
@@ -34,7 +48,6 @@
     // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
     fn create_dbg_var(
         &self,
-        dbg_context: &FunctionDebugContext<Self::DIScope>,
         variable_name: Symbol,
         variable_type: Ty<'tcx>,
         scope_metadata: Self::DIScope,
@@ -49,14 +62,13 @@
     fn dbg_var_addr(
         &mut self,
         dbg_var: Self::DIVariable,
-        scope_metadata: Self::DIScope,
+        dbg_loc: Self::DILocation,
         variable_alloca: Self::Value,
         direct_offset: Size,
         // NB: each offset implies a deref (i.e. they're steps in a pointer chain).
         indirect_offsets: &[Size],
-        span: Span,
     );
-    fn set_source_location(&mut self, scope: Self::DIScope, span: Span);
+    fn set_dbg_loc(&mut self, dbg_loc: Self::DILocation);
     fn insert_reference_to_gdb_debug_scripts_section_global(&mut self);
     fn set_var_name(&mut self, value: Self::Value, name: &str);
 }
diff --git a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
index ccd294d..ac3c99f 100644
--- a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
@@ -20,7 +20,9 @@
     fn abort(&mut self);
     fn assume(&mut self, val: Self::Value);
     fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value;
-    fn sideeffect(&mut self);
+    /// Normally, sideeffect is only emitted if -Zinsert-sideeffect is passed;
+    /// in some cases though we want to emit it regardless.
+    fn sideeffect(&mut self, unconditional: bool);
     /// Trait method used to inject `va_start` on the "spoofed" `VaListImpl` in
     /// Rust defined C-variadic functions.
     fn va_start(&mut self, val: Self::Value) -> Self::Value;
diff --git a/compiler/rustc_codegen_ssa/src/traits/mod.rs b/compiler/rustc_codegen_ssa/src/traits/mod.rs
index 698ef60..8ada6c1 100644
--- a/compiler/rustc_codegen_ssa/src/traits/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/mod.rs
@@ -85,7 +85,7 @@
 }
 
 pub trait HasCodegen<'tcx>:
-    Backend<'tcx> + ::std::ops::Deref<Target = <Self as HasCodegen<'tcx>>::CodegenCx>
+    Backend<'tcx> + std::ops::Deref<Target = <Self as HasCodegen<'tcx>>::CodegenCx>
 {
     type CodegenCx: CodegenMethods<'tcx>
         + BackendTypes<
@@ -95,6 +95,7 @@
             Type = Self::Type,
             Funclet = Self::Funclet,
             DIScope = Self::DIScope,
+            DILocation = Self::DILocation,
             DIVariable = Self::DIVariable,
         >;
 }
diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs
index cec07b9..634a20b 100644
--- a/compiler/rustc_codegen_ssa/src/traits/type_.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs
@@ -51,11 +51,11 @@
     }
 
     fn type_int(&self) -> Self::Type {
-        match &self.sess().target.target.target_c_int_width[..] {
+        match &self.sess().target.c_int_width[..] {
             "16" => self.type_i16(),
             "32" => self.type_i32(),
             "64" => self.type_i64(),
-            width => bug!("Unsupported target_c_int_width: {}", width),
+            width => bug!("Unsupported c_int_width: {}", width),
         }
     }
 
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
index caaf7c0..23e689fc 100644
--- a/compiler/rustc_data_structures/Cargo.toml
+++ b/compiler/rustc_data_structures/Cargo.toml
@@ -25,7 +25,7 @@
 smallvec = { version = "1.0", features = ["union", "may_dangle"] }
 rustc_index = { path = "../rustc_index", package = "rustc_index" }
 bitflags = "1.2.1"
-measureme = "0.7.1"
+measureme = "9.0.0"
 libc = "0.2"
 stacker = "0.1.12"
 tempfile = "3.0.5"
diff --git a/compiler/rustc_data_structures/src/fingerprint.rs b/compiler/rustc_data_structures/src/fingerprint.rs
index aba0bbb..ec2f959 100644
--- a/compiler/rustc_data_structures/src/fingerprint.rs
+++ b/compiler/rustc_data_structures/src/fingerprint.rs
@@ -71,8 +71,8 @@
     }
 }
 
-impl ::std::fmt::Display for Fingerprint {
-    fn fmt(&self, formatter: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+impl std::fmt::Display for Fingerprint {
+    fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
         write!(formatter, "{:x}-{:x}", self.0, self.1)
     }
 }
diff --git a/compiler/rustc_data_structures/src/graph/dominators/mod.rs b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
index 438a0d0..1cfbce2 100644
--- a/compiler/rustc_data_structures/src/graph/dominators/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
@@ -9,6 +9,7 @@
 use super::ControlFlowGraph;
 use rustc_index::vec::{Idx, IndexVec};
 use std::borrow::BorrowMut;
+use std::cmp::Ordering;
 
 #[cfg(test)]
 mod tests;
@@ -108,6 +109,14 @@
         // FIXME -- could be optimized by using post-order-rank
         self.dominators(node).any(|n| n == dom)
     }
+
+    /// Provide deterministic ordering of nodes such that, if any two nodes have a dominator
+    /// relationship, the dominator will always precede the dominated. (The relative ordering
+    /// of two unrelated nodes will also be consistent, but otherwise the order has no
+    /// meaning.) This method cannot be used to determine if either Node dominates the other.
+    pub fn rank_partial_cmp(&self, lhs: Node, rhs: Node) -> Option<Ordering> {
+        self.post_order_rank[lhs].partial_cmp(&self.post_order_rank[rhs])
+    }
 }
 
 pub struct Iter<'dom, Node: Idx> {
diff --git a/compiler/rustc_data_structures/src/graph/iterate/mod.rs b/compiler/rustc_data_structures/src/graph/iterate/mod.rs
index 5f42d46..1634c58 100644
--- a/compiler/rustc_data_structures/src/graph/iterate/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/iterate/mod.rs
@@ -33,16 +33,31 @@
     result: &mut Vec<G::Node>,
     visited: &mut IndexVec<G::Node, bool>,
 ) {
+    struct PostOrderFrame<Node, Iter> {
+        node: Node,
+        iter: Iter,
+    }
+
     if visited[node] {
         return;
     }
-    visited[node] = true;
 
-    for successor in graph.successors(node) {
-        post_order_walk(graph, successor, result, visited);
+    let mut stack = vec![PostOrderFrame { node, iter: graph.successors(node) }];
+
+    'recurse: while let Some(frame) = stack.last_mut() {
+        let node = frame.node;
+        visited[node] = true;
+
+        while let Some(successor) = frame.iter.next() {
+            if !visited[successor] {
+                stack.push(PostOrderFrame { node: successor, iter: graph.successors(successor) });
+                continue 'recurse;
+            }
+        }
+
+        let _ = stack.pop();
+        result.push(node);
     }
-
-    result.push(node);
 }
 
 pub fn reverse_post_order<G: DirectedGraph + WithSuccessors + WithNumNodes>(
diff --git a/compiler/rustc_data_structures/src/graph/scc/mod.rs b/compiler/rustc_data_structures/src/graph/scc/mod.rs
index 2db8e46..486a9ba 100644
--- a/compiler/rustc_data_structures/src/graph/scc/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/scc/mod.rs
@@ -307,10 +307,7 @@
     fn walk_unvisited_node(&mut self, depth: usize, node: G::Node) -> WalkReturn<S> {
         debug!("walk_unvisited_node(depth = {:?}, node = {:?})", depth, node);
 
-        debug_assert!(match self.node_states[node] {
-            NodeState::NotVisited => true,
-            _ => false,
-        });
+        debug_assert!(matches!(self.node_states[node], NodeState::NotVisited));
 
         // Push `node` onto the stack.
         self.node_states[node] = NodeState::BeingVisited { depth };
diff --git a/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs b/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
index 0644671..4ed8887 100644
--- a/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
@@ -29,7 +29,7 @@
 
         // Create the *edge starts* array. We are iterating over over
         // the (sorted) edge pairs. We maintain the invariant that the
-        // length of the `node_starts` arary is enough to store the
+        // length of the `node_starts` array is enough to store the
         // current source node -- so when we see that the source node
         // for an edge is greater than the current length, we grow the
         // edge-starts array by just enough.
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
index 90b0f25..7669b78 100644
--- a/compiler/rustc_data_structures/src/lib.rs
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -28,6 +28,7 @@
 #![feature(const_panic)]
 #![feature(min_const_generics)]
 #![feature(once_cell)]
+#![feature(maybe_uninit_uninit_array)]
 #![allow(rustc::default_hash_types)]
 
 #[macro_use]
@@ -100,8 +101,7 @@
 pub mod work_queue;
 pub use atomic_ref::AtomicRef;
 pub mod frozen;
-pub mod mini_map;
-pub mod mini_set;
+pub mod sso;
 pub mod tagged_ptr;
 pub mod temp_dir;
 pub mod unhash;
diff --git a/compiler/rustc_data_structures/src/mini_map.rs b/compiler/rustc_data_structures/src/mini_map.rs
deleted file mode 100644
index cd3e949..0000000
--- a/compiler/rustc_data_structures/src/mini_map.rs
+++ /dev/null
@@ -1,61 +0,0 @@
-use crate::fx::FxHashMap;
-use arrayvec::ArrayVec;
-
-use std::hash::Hash;
-
-/// Small-storage-optimized implementation of a map
-/// made specifically for caching results.
-///
-/// Stores elements in a small array up to a certain length
-/// and switches to `HashMap` when that length is exceeded.
-pub enum MiniMap<K, V> {
-    Array(ArrayVec<[(K, V); 8]>),
-    Map(FxHashMap<K, V>),
-}
-
-impl<K: Eq + Hash, V> MiniMap<K, V> {
-    /// Creates an empty `MiniMap`.
-    pub fn new() -> Self {
-        MiniMap::Array(ArrayVec::new())
-    }
-
-    /// Inserts or updates value in the map.
-    pub fn insert(&mut self, key: K, value: V) {
-        match self {
-            MiniMap::Array(array) => {
-                for pair in array.iter_mut() {
-                    if pair.0 == key {
-                        pair.1 = value;
-                        return;
-                    }
-                }
-                if let Err(error) = array.try_push((key, value)) {
-                    let mut map: FxHashMap<K, V> = array.drain(..).collect();
-                    let (key, value) = error.element();
-                    map.insert(key, value);
-                    *self = MiniMap::Map(map);
-                }
-            }
-            MiniMap::Map(map) => {
-                map.insert(key, value);
-            }
-        }
-    }
-
-    /// Return value by key if any.
-    pub fn get(&self, key: &K) -> Option<&V> {
-        match self {
-            MiniMap::Array(array) => {
-                for pair in array {
-                    if pair.0 == *key {
-                        return Some(&pair.1);
-                    }
-                }
-                return None;
-            }
-            MiniMap::Map(map) => {
-                return map.get(key);
-            }
-        }
-    }
-}
diff --git a/compiler/rustc_data_structures/src/mini_set.rs b/compiler/rustc_data_structures/src/mini_set.rs
deleted file mode 100644
index 9d45af7..0000000
--- a/compiler/rustc_data_structures/src/mini_set.rs
+++ /dev/null
@@ -1,41 +0,0 @@
-use crate::fx::FxHashSet;
-use arrayvec::ArrayVec;
-use std::hash::Hash;
-/// Small-storage-optimized implementation of a set.
-///
-/// Stores elements in a small array up to a certain length
-/// and switches to `HashSet` when that length is exceeded.
-pub enum MiniSet<T> {
-    Array(ArrayVec<[T; 8]>),
-    Set(FxHashSet<T>),
-}
-
-impl<T: Eq + Hash> MiniSet<T> {
-    /// Creates an empty `MiniSet`.
-    pub fn new() -> Self {
-        MiniSet::Array(ArrayVec::new())
-    }
-
-    /// Adds a value to the set.
-    ///
-    /// If the set did not have this value present, true is returned.
-    ///
-    /// If the set did have this value present, false is returned.
-    pub fn insert(&mut self, elem: T) -> bool {
-        match self {
-            MiniSet::Array(array) => {
-                if array.iter().any(|e| *e == elem) {
-                    false
-                } else {
-                    if let Err(error) = array.try_push(elem) {
-                        let mut set: FxHashSet<T> = array.drain(..).collect();
-                        set.insert(error.element());
-                        *self = MiniSet::Set(set);
-                    }
-                    true
-                }
-            }
-            MiniSet::Set(set) => set.insert(elem),
-        }
-    }
-}
diff --git a/compiler/rustc_data_structures/src/obligation_forest/mod.rs b/compiler/rustc_data_structures/src/obligation_forest/mod.rs
index 7cf5202..a5b2df1 100644
--- a/compiler/rustc_data_structures/src/obligation_forest/mod.rs
+++ b/compiler/rustc_data_structures/src/obligation_forest/mod.rs
@@ -129,7 +129,7 @@
 struct ObligationTreeId(usize);
 
 type ObligationTreeIdGenerator =
-    ::std::iter::Map<::std::ops::RangeFrom<usize>, fn(usize) -> ObligationTreeId>;
+    std::iter::Map<std::ops::RangeFrom<usize>, fn(usize) -> ObligationTreeId>;
 
 pub struct ObligationForest<O: ForestObligation> {
     /// The list of obligations. In between calls to `process_obligations`,
@@ -149,8 +149,8 @@
     /// comments in `process_obligation` for details.
     active_cache: FxHashMap<O::CacheKey, usize>,
 
-    /// A vector reused in compress(), to avoid allocating new vectors.
-    node_rewrites: Vec<usize>,
+    /// A vector reused in compress() and find_cycles_from_node(), to avoid allocating new vectors.
+    reused_node_vec: Vec<usize>,
 
     obligation_tree_id_generator: ObligationTreeIdGenerator,
 
@@ -251,12 +251,22 @@
     Error,
 }
 
+/// This trait allows us to have two different Outcome types:
+///  - the normal one that does as little as possible
+///  - one for tests that does some additional work and checking
+pub trait OutcomeTrait {
+    type Error;
+    type Obligation;
+
+    fn new() -> Self;
+    fn mark_not_stalled(&mut self);
+    fn is_stalled(&self) -> bool;
+    fn record_completed(&mut self, outcome: &Self::Obligation);
+    fn record_error(&mut self, error: Self::Error);
+}
+
 #[derive(Debug)]
 pub struct Outcome<O, E> {
-    /// Obligations that were completely evaluated, including all
-    /// (transitive) subobligations. Only computed if requested.
-    pub completed: Option<Vec<O>>,
-
     /// Backtrace of obligations that were found to be in error.
     pub errors: Vec<Error<O, E>>,
 
@@ -269,12 +279,29 @@
     pub stalled: bool,
 }
 
-/// Should `process_obligations` compute the `Outcome::completed` field of its
-/// result?
-#[derive(PartialEq)]
-pub enum DoCompleted {
-    No,
-    Yes,
+impl<O, E> OutcomeTrait for Outcome<O, E> {
+    type Error = Error<O, E>;
+    type Obligation = O;
+
+    fn new() -> Self {
+        Self { stalled: true, errors: vec![] }
+    }
+
+    fn mark_not_stalled(&mut self) {
+        self.stalled = false;
+    }
+
+    fn is_stalled(&self) -> bool {
+        self.stalled
+    }
+
+    fn record_completed(&mut self, _outcome: &Self::Obligation) {
+        // do nothing
+    }
+
+    fn record_error(&mut self, error: Self::Error) {
+        self.errors.push(error)
+    }
 }
 
 #[derive(Debug, PartialEq, Eq)]
@@ -289,7 +316,7 @@
             nodes: vec![],
             done_cache: Default::default(),
             active_cache: Default::default(),
-            node_rewrites: vec![],
+            reused_node_vec: vec![],
             obligation_tree_id_generator: (0..).map(ObligationTreeId),
             error_cache: Default::default(),
         }
@@ -363,8 +390,7 @@
             .map(|(index, _node)| Error { error: error.clone(), backtrace: self.error_at(index) })
             .collect();
 
-        let successful_obligations = self.compress(DoCompleted::Yes);
-        assert!(successful_obligations.unwrap().is_empty());
+        self.compress(|_| assert!(false));
         errors
     }
 
@@ -392,16 +418,12 @@
     /// be called in a loop until `outcome.stalled` is false.
     ///
     /// This _cannot_ be unrolled (presently, at least).
-    pub fn process_obligations<P>(
-        &mut self,
-        processor: &mut P,
-        do_completed: DoCompleted,
-    ) -> Outcome<O, P::Error>
+    pub fn process_obligations<P, OUT>(&mut self, processor: &mut P) -> OUT
     where
         P: ObligationProcessor<Obligation = O>,
+        OUT: OutcomeTrait<Obligation = O, Error = Error<O, P::Error>>,
     {
-        let mut errors = vec![];
-        let mut stalled = true;
+        let mut outcome = OUT::new();
 
         // Note that the loop body can append new nodes, and those new nodes
         // will then be processed by subsequent iterations of the loop.
@@ -429,7 +451,7 @@
                 }
                 ProcessResult::Changed(children) => {
                     // We are not (yet) stalled.
-                    stalled = false;
+                    outcome.mark_not_stalled();
                     node.state.set(NodeState::Success);
 
                     for child in children {
@@ -442,28 +464,22 @@
                     }
                 }
                 ProcessResult::Error(err) => {
-                    stalled = false;
-                    errors.push(Error { error: err, backtrace: self.error_at(index) });
+                    outcome.mark_not_stalled();
+                    outcome.record_error(Error { error: err, backtrace: self.error_at(index) });
                 }
             }
             index += 1;
         }
 
-        if stalled {
-            // There's no need to perform marking, cycle processing and compression when nothing
-            // changed.
-            return Outcome {
-                completed: if do_completed == DoCompleted::Yes { Some(vec![]) } else { None },
-                errors,
-                stalled,
-            };
+        // There's no need to perform marking, cycle processing and compression when nothing
+        // changed.
+        if !outcome.is_stalled() {
+            self.mark_successes();
+            self.process_cycles(processor);
+            self.compress(|obl| outcome.record_completed(obl));
         }
 
-        self.mark_successes();
-        self.process_cycles(processor);
-        let completed = self.compress(do_completed);
-
-        Outcome { completed, errors, stalled }
+        outcome
     }
 
     /// Returns a vector of obligations for `p` and all of its
@@ -526,7 +542,6 @@
             let node = &self.nodes[index];
             let state = node.state.get();
             if state == NodeState::Success {
-                node.state.set(NodeState::Waiting);
                 // This call site is cold.
                 self.uninlined_mark_dependents_as_waiting(node);
             } else {
@@ -538,17 +553,18 @@
     // This never-inlined function is for the cold call site.
     #[inline(never)]
     fn uninlined_mark_dependents_as_waiting(&self, node: &Node<O>) {
+        // Mark node Waiting in the cold uninlined code instead of the hot inlined
+        node.state.set(NodeState::Waiting);
         self.inlined_mark_dependents_as_waiting(node)
     }
 
     /// Report cycles between all `Success` nodes, and convert all `Success`
     /// nodes to `Done`. This must be called after `mark_successes`.
-    fn process_cycles<P>(&self, processor: &mut P)
+    fn process_cycles<P>(&mut self, processor: &mut P)
     where
         P: ObligationProcessor<Obligation = O>,
     {
-        let mut stack = vec![];
-
+        let mut stack = std::mem::take(&mut self.reused_node_vec);
         for (index, node) in self.nodes.iter().enumerate() {
             // For some benchmarks this state test is extremely hot. It's a win
             // to handle the no-op cases immediately to avoid the cost of the
@@ -559,6 +575,7 @@
         }
 
         debug_assert!(stack.is_empty());
+        self.reused_node_vec = stack;
     }
 
     fn find_cycles_from_node<P>(&self, stack: &mut Vec<usize>, processor: &mut P, index: usize)
@@ -591,13 +608,12 @@
     /// indices and hence invalidates any outstanding indices. `process_cycles`
     /// must be run beforehand to remove any cycles on `Success` nodes.
     #[inline(never)]
-    fn compress(&mut self, do_completed: DoCompleted) -> Option<Vec<O>> {
+    fn compress(&mut self, mut outcome_cb: impl FnMut(&O)) {
         let orig_nodes_len = self.nodes.len();
-        let mut node_rewrites: Vec<_> = std::mem::take(&mut self.node_rewrites);
+        let mut node_rewrites: Vec<_> = std::mem::take(&mut self.reused_node_vec);
         debug_assert!(node_rewrites.is_empty());
         node_rewrites.extend(0..orig_nodes_len);
         let mut dead_nodes = 0;
-        let mut removed_done_obligations: Vec<O> = vec![];
 
         // Move removable nodes to the end, preserving the order of the
         // remaining nodes.
@@ -627,10 +643,8 @@
                     } else {
                         self.done_cache.insert(node.obligation.as_cache_key().clone());
                     }
-                    if do_completed == DoCompleted::Yes {
-                        // Extract the success stories.
-                        removed_done_obligations.push(node.obligation.clone());
-                    }
+                    // Extract the success stories.
+                    outcome_cb(&node.obligation);
                     node_rewrites[index] = orig_nodes_len;
                     dead_nodes += 1;
                 }
@@ -654,9 +668,7 @@
         }
 
         node_rewrites.truncate(0);
-        self.node_rewrites = node_rewrites;
-
-        if do_completed == DoCompleted::Yes { Some(removed_done_obligations) } else { None }
+        self.reused_node_vec = node_rewrites;
     }
 
     fn apply_rewrites(&mut self, node_rewrites: &[usize]) {
diff --git a/compiler/rustc_data_structures/src/obligation_forest/tests.rs b/compiler/rustc_data_structures/src/obligation_forest/tests.rs
index 0165246..371c62c 100644
--- a/compiler/rustc_data_structures/src/obligation_forest/tests.rs
+++ b/compiler/rustc_data_structures/src/obligation_forest/tests.rs
@@ -17,6 +17,40 @@
     marker: PhantomData<(O, E)>,
 }
 
+struct TestOutcome<O, E> {
+    pub completed: Vec<O>,
+    pub errors: Vec<Error<O, E>>,
+    pub stalled: bool,
+}
+
+impl<O, E> OutcomeTrait for TestOutcome<O, E>
+where
+    O: Clone,
+{
+    type Error = Error<O, E>;
+    type Obligation = O;
+
+    fn new() -> Self {
+        Self { errors: vec![], stalled: false, completed: vec![] }
+    }
+
+    fn mark_not_stalled(&mut self) {
+        self.stalled = false;
+    }
+
+    fn is_stalled(&self) -> bool {
+        self.stalled
+    }
+
+    fn record_completed(&mut self, outcome: &Self::Obligation) {
+        self.completed.push(outcome.clone())
+    }
+
+    fn record_error(&mut self, error: Self::Error) {
+        self.errors.push(error)
+    }
+}
+
 #[allow(non_snake_case)]
 fn C<OF, BF, O>(of: OF, bf: BF) -> ClosureObligationProcessor<OF, BF, O, &'static str>
 where
@@ -65,20 +99,17 @@
     //      A |-> A.1
     //        |-> A.2
     //        |-> A.3
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]),
-                "B" => ProcessResult::Error("B is for broken"),
-                "C" => ProcessResult::Changed(vec![]),
-                "A.1" | "A.2" | "A.3" => ProcessResult::Unchanged,
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap(), vec!["C"]);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]),
+            "B" => ProcessResult::Error("B is for broken"),
+            "C" => ProcessResult::Changed(vec![]),
+            "A.1" | "A.2" | "A.3" => ProcessResult::Unchanged,
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok, vec!["C"]);
     assert_eq!(err, vec![Error { error: "B is for broken", backtrace: vec!["B"] }]);
 
     // second round: two delays, one success, creating an uneven set of subtasks:
@@ -88,60 +119,51 @@
     //      D |-> D.1
     //        |-> D.2
     forest.register_obligation("D");
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A.1" => ProcessResult::Unchanged,
-                "A.2" => ProcessResult::Unchanged,
-                "A.3" => ProcessResult::Changed(vec!["A.3.i"]),
-                "D" => ProcessResult::Changed(vec!["D.1", "D.2"]),
-                "A.3.i" | "D.1" | "D.2" => ProcessResult::Unchanged,
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap(), Vec::<&'static str>::new());
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A.1" => ProcessResult::Unchanged,
+            "A.2" => ProcessResult::Unchanged,
+            "A.3" => ProcessResult::Changed(vec!["A.3.i"]),
+            "D" => ProcessResult::Changed(vec!["D.1", "D.2"]),
+            "A.3.i" | "D.1" | "D.2" => ProcessResult::Unchanged,
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok, Vec::<&'static str>::new());
     assert_eq!(err, Vec::new());
 
     // third round: ok in A.1 but trigger an error in A.2. Check that it
     // propagates to A, but not D.1 or D.2.
     //      D |-> D.1 |-> D.1.i
     //        |-> D.2 |-> D.2.i
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A.1" => ProcessResult::Changed(vec![]),
-                "A.2" => ProcessResult::Error("A is for apple"),
-                "A.3.i" => ProcessResult::Changed(vec![]),
-                "D.1" => ProcessResult::Changed(vec!["D.1.i"]),
-                "D.2" => ProcessResult::Changed(vec!["D.2.i"]),
-                "D.1.i" | "D.2.i" => ProcessResult::Unchanged,
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    let mut ok = ok.unwrap();
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A.1" => ProcessResult::Changed(vec![]),
+            "A.2" => ProcessResult::Error("A is for apple"),
+            "A.3.i" => ProcessResult::Changed(vec![]),
+            "D.1" => ProcessResult::Changed(vec!["D.1.i"]),
+            "D.2" => ProcessResult::Changed(vec!["D.2.i"]),
+            "D.1.i" | "D.2.i" => ProcessResult::Unchanged,
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    let mut ok = ok;
     ok.sort();
     assert_eq!(ok, vec!["A.1", "A.3", "A.3.i"]);
     assert_eq!(err, vec![Error { error: "A is for apple", backtrace: vec!["A.2", "A"] }]);
 
     // fourth round: error in D.1.i
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "D.1.i" => ProcessResult::Error("D is for dumb"),
-                "D.2.i" => ProcessResult::Changed(vec![]),
-                _ => panic!("unexpected obligation {:?}", obligation),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    let mut ok = ok.unwrap();
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "D.1.i" => ProcessResult::Error("D is for dumb"),
+            "D.2.i" => ProcessResult::Changed(vec![]),
+            _ => panic!("unexpected obligation {:?}", obligation),
+        },
+        |_| {},
+    ));
+    let mut ok = ok;
     ok.sort();
     assert_eq!(ok, vec!["D.2", "D.2.i"]);
     assert_eq!(err, vec![Error { error: "D is for dumb", backtrace: vec!["D.1.i", "D.1", "D"] }]);
@@ -160,72 +182,60 @@
     let mut forest = ObligationForest::new();
     forest.register_obligation("A");
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]),
-                "A.1" => ProcessResult::Changed(vec![]),
-                "A.2" => ProcessResult::Changed(vec!["A.2.i", "A.2.ii"]),
-                "A.3" => ProcessResult::Changed(vec![]),
-                "A.2.i" | "A.2.ii" => ProcessResult::Unchanged,
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    let mut ok = ok.unwrap();
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]),
+            "A.1" => ProcessResult::Changed(vec![]),
+            "A.2" => ProcessResult::Changed(vec!["A.2.i", "A.2.ii"]),
+            "A.3" => ProcessResult::Changed(vec![]),
+            "A.2.i" | "A.2.ii" => ProcessResult::Unchanged,
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    let mut ok = ok;
     ok.sort();
     assert_eq!(ok, vec!["A.1", "A.3"]);
     assert!(err.is_empty());
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A.2.i" => ProcessResult::Unchanged,
-                "A.2.ii" => ProcessResult::Changed(vec![]),
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap(), vec!["A.2.ii"]);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A.2.i" => ProcessResult::Unchanged,
+            "A.2.ii" => ProcessResult::Changed(vec![]),
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok, vec!["A.2.ii"]);
     assert!(err.is_empty());
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A.2.i" => ProcessResult::Changed(vec!["A.2.i.a"]),
-                "A.2.i.a" => ProcessResult::Unchanged,
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert!(ok.unwrap().is_empty());
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A.2.i" => ProcessResult::Changed(vec!["A.2.i.a"]),
+            "A.2.i.a" => ProcessResult::Unchanged,
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert!(ok.is_empty());
     assert!(err.is_empty());
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A.2.i.a" => ProcessResult::Changed(vec![]),
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    let mut ok = ok.unwrap();
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A.2.i.a" => ProcessResult::Changed(vec![]),
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    let mut ok = ok;
     ok.sort();
     assert_eq!(ok, vec!["A", "A.2", "A.2.i", "A.2.i.a"]);
     assert!(err.is_empty());
 
-    let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations(&mut C(|_| unreachable!(), |_| {}), DoCompleted::Yes);
+    let TestOutcome { completed: ok, errors: err, .. } =
+        forest.process_obligations(&mut C(|_| unreachable!(), |_| {}));
 
-    assert!(ok.unwrap().is_empty());
+    assert!(ok.is_empty());
     assert!(err.is_empty());
 }
 
@@ -235,18 +245,15 @@
     // yields to correct errors (and does not panic, in particular).
     let mut forest = ObligationForest::new();
     forest.register_obligation("A");
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]),
-                "A.1" | "A.2" | "A.3" => ProcessResult::Unchanged,
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap().len(), 0);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]),
+            "A.1" | "A.2" | "A.3" => ProcessResult::Unchanged,
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok.len(), 0);
     assert_eq!(err.len(), 0);
     let errors = forest.to_errors(());
     assert_eq!(errors[0].backtrace, vec!["A.1", "A"]);
@@ -260,51 +267,42 @@
     // check that diamond dependencies are handled correctly
     let mut forest = ObligationForest::new();
     forest.register_obligation("A");
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A" => ProcessResult::Changed(vec!["A.1", "A.2"]),
-                "A.1" | "A.2" => ProcessResult::Unchanged,
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap().len(), 0);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A" => ProcessResult::Changed(vec!["A.1", "A.2"]),
+            "A.1" | "A.2" => ProcessResult::Unchanged,
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok.len(), 0);
     assert_eq!(err.len(), 0);
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A.1" => ProcessResult::Changed(vec!["D"]),
-                "A.2" => ProcessResult::Changed(vec!["D"]),
-                "D" => ProcessResult::Unchanged,
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap().len(), 0);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A.1" => ProcessResult::Changed(vec!["D"]),
+            "A.2" => ProcessResult::Changed(vec!["D"]),
+            "D" => ProcessResult::Unchanged,
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok.len(), 0);
     assert_eq!(err.len(), 0);
 
     let mut d_count = 0;
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "D" => {
-                    d_count += 1;
-                    ProcessResult::Changed(vec![])
-                }
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "D" => {
+                d_count += 1;
+                ProcessResult::Changed(vec![])
+            }
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
     assert_eq!(d_count, 1);
-    let mut ok = ok.unwrap();
+    let mut ok = ok;
     ok.sort();
     assert_eq!(ok, vec!["A", "A.1", "A.2", "D"]);
     assert_eq!(err.len(), 0);
@@ -313,51 +311,42 @@
     assert_eq!(errors.len(), 0);
 
     forest.register_obligation("A'");
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A'" => ProcessResult::Changed(vec!["A'.1", "A'.2"]),
-                "A'.1" | "A'.2" => ProcessResult::Unchanged,
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap().len(), 0);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A'" => ProcessResult::Changed(vec!["A'.1", "A'.2"]),
+            "A'.1" | "A'.2" => ProcessResult::Unchanged,
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok.len(), 0);
     assert_eq!(err.len(), 0);
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A'.1" => ProcessResult::Changed(vec!["D'", "A'"]),
-                "A'.2" => ProcessResult::Changed(vec!["D'"]),
-                "D'" | "A'" => ProcessResult::Unchanged,
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap().len(), 0);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A'.1" => ProcessResult::Changed(vec!["D'", "A'"]),
+            "A'.2" => ProcessResult::Changed(vec!["D'"]),
+            "D'" | "A'" => ProcessResult::Unchanged,
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok.len(), 0);
     assert_eq!(err.len(), 0);
 
     let mut d_count = 0;
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "D'" => {
-                    d_count += 1;
-                    ProcessResult::Error("operation failed")
-                }
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "D'" => {
+                d_count += 1;
+                ProcessResult::Error("operation failed")
+            }
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
     assert_eq!(d_count, 1);
-    assert_eq!(ok.unwrap().len(), 0);
+    assert_eq!(ok.len(), 0);
     assert_eq!(
         err,
         vec![super::Error { error: "operation failed", backtrace: vec!["D'", "A'.1", "A'"] }]
@@ -375,35 +364,27 @@
     forest.register_obligation("B: Sized");
     forest.register_obligation("C: Sized");
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A: Sized" | "B: Sized" | "C: Sized" => ProcessResult::Changed(vec![]),
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    let mut ok = ok.unwrap();
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A: Sized" | "B: Sized" | "C: Sized" => ProcessResult::Changed(vec![]),
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    let mut ok = ok;
     ok.sort();
     assert_eq!(ok, vec!["A: Sized", "B: Sized", "C: Sized"]);
     assert_eq!(err.len(), 0);
 
     forest.register_obligation("(A,B,C): Sized");
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "(A,B,C): Sized" => {
-                    ProcessResult::Changed(vec!["A: Sized", "B: Sized", "C: Sized"])
-                }
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap(), vec!["(A,B,C): Sized"]);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "(A,B,C): Sized" => ProcessResult::Changed(vec!["A: Sized", "B: Sized", "C: Sized"]),
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok, vec!["(A,B,C): Sized"]);
     assert_eq!(err.len(), 0);
 }
 
@@ -416,64 +397,52 @@
     forest.register_obligation("C1");
     forest.register_obligation("C2");
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A" => ProcessResult::Changed(vec!["D", "E"]),
-                "B" => ProcessResult::Unchanged,
-                "C1" => ProcessResult::Changed(vec![]),
-                "C2" => ProcessResult::Changed(vec![]),
-                "D" | "E" => ProcessResult::Unchanged,
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    let mut ok = ok.unwrap();
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A" => ProcessResult::Changed(vec!["D", "E"]),
+            "B" => ProcessResult::Unchanged,
+            "C1" => ProcessResult::Changed(vec![]),
+            "C2" => ProcessResult::Changed(vec![]),
+            "D" | "E" => ProcessResult::Unchanged,
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    let mut ok = ok;
     ok.sort();
     assert_eq!(ok, vec!["C1", "C2"]);
     assert_eq!(err.len(), 0);
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "D" | "E" => ProcessResult::Unchanged,
-                "B" => ProcessResult::Changed(vec!["D"]),
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap().len(), 0);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "D" | "E" => ProcessResult::Unchanged,
+            "B" => ProcessResult::Changed(vec!["D"]),
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok.len(), 0);
     assert_eq!(err.len(), 0);
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "D" => ProcessResult::Unchanged,
-                "E" => ProcessResult::Error("E is for error"),
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap().len(), 0);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "D" => ProcessResult::Unchanged,
+            "E" => ProcessResult::Error("E is for error"),
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok.len(), 0);
     assert_eq!(err, vec![super::Error { error: "E is for error", backtrace: vec!["E", "A"] }]);
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "D" => ProcessResult::Error("D is dead"),
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap().len(), 0);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "D" => ProcessResult::Error("D is dead"),
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok.len(), 0);
     assert_eq!(err, vec![super::Error { error: "D is dead", backtrace: vec!["D"] }]);
 
     let errors = forest.to_errors(());
@@ -487,35 +456,29 @@
     forest.register_obligation("A");
     forest.register_obligation("B");
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A" => ProcessResult::Error("An error"),
-                "B" => ProcessResult::Changed(vec!["A"]),
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap().len(), 0);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A" => ProcessResult::Error("An error"),
+            "B" => ProcessResult::Changed(vec!["A"]),
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok.len(), 0);
     assert_eq!(err, vec![super::Error { error: "An error", backtrace: vec!["A"] }]);
 
     let mut forest = ObligationForest::new();
     forest.register_obligation("B");
     forest.register_obligation("A");
 
-    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
-        &mut C(
-            |obligation| match *obligation {
-                "A" => ProcessResult::Error("An error"),
-                "B" => ProcessResult::Changed(vec!["A"]),
-                _ => unreachable!(),
-            },
-            |_| {},
-        ),
-        DoCompleted::Yes,
-    );
-    assert_eq!(ok.unwrap().len(), 0);
+    let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+        |obligation| match *obligation {
+            "A" => ProcessResult::Error("An error"),
+            "B" => ProcessResult::Changed(vec!["A"]),
+            _ => unreachable!(),
+        },
+        |_| {},
+    ));
+    assert_eq!(ok.len(), 0);
     assert_eq!(err, vec![super::Error { error: "An error", backtrace: vec!["A"] }]);
 }
diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs
index 363879c..e598d7a 100644
--- a/compiler/rustc_data_structures/src/profiling.rs
+++ b/compiler/rustc_data_structures/src/profiling.rs
@@ -94,34 +94,9 @@
 use std::sync::Arc;
 use std::time::{Duration, Instant};
 
-use measureme::{EventId, EventIdBuilder, SerializableString, StringId};
+use measureme::{EventId, EventIdBuilder, Profiler, SerializableString, StringId};
 use parking_lot::RwLock;
 
-cfg_if! {
-    if #[cfg(any(windows, target_os = "wasi"))] {
-        /// FileSerializationSink is faster on Windows
-        type SerializationSink = measureme::FileSerializationSink;
-    } else if #[cfg(target_arch = "wasm32")] {
-        type SerializationSink = measureme::ByteVecSink;
-    } else {
-        /// MmapSerializatioSink is faster on macOS and Linux
-        type SerializationSink = measureme::MmapSerializationSink;
-    }
-}
-
-type Profiler = measureme::Profiler<SerializationSink>;
-
-#[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)]
-pub enum ProfileCategory {
-    Parsing,
-    Expansion,
-    TypeChecking,
-    BorrowChecking,
-    Codegen,
-    Linking,
-    Other,
-}
-
 bitflags::bitflags! {
     struct EventFilter: u32 {
         const GENERIC_ACTIVITIES = 1 << 0;
@@ -400,7 +375,7 @@
         output_directory: &Path,
         crate_name: Option<&str>,
         event_filters: &Option<Vec<String>>,
-    ) -> Result<SelfProfiler, Box<dyn Error>> {
+    ) -> Result<SelfProfiler, Box<dyn Error + Send + Sync>> {
         fs::create_dir_all(output_directory)?;
 
         let crate_name = crate_name.unwrap_or("unknown-crate");
@@ -511,13 +486,13 @@
         self.event_filter_mask.contains(EventFilter::QUERY_KEYS)
     }
 
-    pub fn event_id_builder(&self) -> EventIdBuilder<'_, SerializationSink> {
+    pub fn event_id_builder(&self) -> EventIdBuilder<'_> {
         EventIdBuilder::new(&self.profiler)
     }
 }
 
 #[must_use]
-pub struct TimingGuard<'a>(Option<measureme::TimingGuard<'a, SerializationSink>>);
+pub struct TimingGuard<'a>(Option<measureme::TimingGuard<'a>>);
 
 impl<'a> TimingGuard<'a> {
     #[inline]
diff --git a/compiler/rustc_data_structures/src/sip128.rs b/compiler/rustc_data_structures/src/sip128.rs
index 2c4eff6..53062b9 100644
--- a/compiler/rustc_data_structures/src/sip128.rs
+++ b/compiler/rustc_data_structures/src/sip128.rs
@@ -1,21 +1,53 @@
 //! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes.
 
-use std::cmp;
 use std::hash::Hasher;
-use std::mem;
+use std::mem::{self, MaybeUninit};
 use std::ptr;
 
 #[cfg(test)]
 mod tests;
 
+// The SipHash algorithm operates on 8-byte chunks.
+const ELEM_SIZE: usize = mem::size_of::<u64>();
+
+// Size of the buffer in number of elements, not including the spill.
+//
+// The selection of this size was guided by rustc-perf benchmark comparisons of
+// different buffer sizes. It should be periodically reevaluated as the compiler
+// implementation and input characteristics change.
+//
+// Using the same-sized buffer for everything we hash is a performance versus
+// complexity tradeoff. The ideal buffer size, and whether buffering should even
+// be used, depends on what is being hashed. It may be worth it to size the
+// buffer appropriately (perhaps by making SipHasher128 generic over the buffer
+// size) or disable buffering depending on what is being hashed. But at this
+// time, we use the same buffer size for everything.
+const BUFFER_CAPACITY: usize = 8;
+
+// Size of the buffer in bytes, not including the spill.
+const BUFFER_SIZE: usize = BUFFER_CAPACITY * ELEM_SIZE;
+
+// Size of the buffer in number of elements, including the spill.
+const BUFFER_WITH_SPILL_CAPACITY: usize = BUFFER_CAPACITY + 1;
+
+// Size of the buffer in bytes, including the spill.
+const BUFFER_WITH_SPILL_SIZE: usize = BUFFER_WITH_SPILL_CAPACITY * ELEM_SIZE;
+
+// Index of the spill element in the buffer.
+const BUFFER_SPILL_INDEX: usize = BUFFER_WITH_SPILL_CAPACITY - 1;
+
 #[derive(Debug, Clone)]
+#[repr(C)]
 pub struct SipHasher128 {
-    k0: u64,
-    k1: u64,
-    length: usize, // how many bytes we've processed
-    state: State,  // hash State
-    tail: u64,     // unprocessed bytes le
-    ntail: usize,  // how many bytes in tail are valid
+    // The access pattern during hashing consists of accesses to `nbuf` and
+    // `buf` until the buffer is full, followed by accesses to `state` and
+    // `processed`, and then repetition of that pattern until hashing is done.
+    // This is the basis for the ordering of fields below. However, in practice
+    // the cache miss-rate for data access is extremely low regardless of order.
+    nbuf: usize, // how many bytes in buf are valid
+    buf: [MaybeUninit<u64>; BUFFER_WITH_SPILL_CAPACITY], // unprocessed bytes le
+    state: State, // hash State
+    processed: usize, // how many bytes we've processed
 }
 
 #[derive(Debug, Clone, Copy)]
@@ -51,178 +83,328 @@
     }};
 }
 
-/// Loads an integer of the desired type from a byte stream, in LE order. Uses
-/// `copy_nonoverlapping` to let the compiler generate the most efficient way
-/// to load it from a possibly unaligned address.
-///
-/// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
-macro_rules! load_int_le {
-    ($buf:expr, $i:expr, $int_ty:ident) => {{
-        debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
-        let mut data = 0 as $int_ty;
-        ptr::copy_nonoverlapping(
-            $buf.get_unchecked($i),
-            &mut data as *mut _ as *mut u8,
-            mem::size_of::<$int_ty>(),
-        );
-        data.to_le()
-    }};
-}
-
-/// Loads a u64 using up to 7 bytes of a byte slice. It looks clumsy but the
-/// `copy_nonoverlapping` calls that occur (via `load_int_le!`) all have fixed
-/// sizes and avoid calling `memcpy`, which is good for speed.
-///
-/// Unsafe because: unchecked indexing at start..start+len
+// Copies up to 8 bytes from source to destination. This performs better than
+// `ptr::copy_nonoverlapping` on microbenchmarks and may perform better on real
+// workloads since all of the copies have fixed sizes and avoid calling memcpy.
+//
+// This is specifically designed for copies of up to 8 bytes, because that's the
+// maximum of number bytes needed to fill an 8-byte-sized element on which
+// SipHash operates. Note that for variable-sized copies which are known to be
+// less than 8 bytes, this function will perform more work than necessary unless
+// the compiler is able to optimize the extra work away.
 #[inline]
-unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
-    debug_assert!(len < 8);
-    let mut i = 0; // current byte index (from LSB) in the output u64
-    let mut out = 0;
-    if i + 3 < len {
-        out = load_int_le!(buf, start + i, u32) as u64;
+unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) {
+    debug_assert!(count <= 8);
+
+    if count == 8 {
+        ptr::copy_nonoverlapping(src, dst, 8);
+        return;
+    }
+
+    let mut i = 0;
+    if i + 3 < count {
+        ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
         i += 4;
     }
-    if i + 1 < len {
-        out |= (load_int_le!(buf, start + i, u16) as u64) << (i * 8);
+
+    if i + 1 < count {
+        ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
         i += 2
     }
-    if i < len {
-        out |= (*buf.get_unchecked(start + i) as u64) << (i * 8);
+
+    if i < count {
+        *dst.add(i) = *src.add(i);
         i += 1;
     }
-    debug_assert_eq!(i, len);
-    out
+
+    debug_assert_eq!(i, count);
 }
 
+// # Implementation
+//
+// This implementation uses buffering to reduce the hashing cost for inputs
+// consisting of many small integers. Buffering simplifies the integration of
+// integer input--the integer write function typically just appends to the
+// buffer with a statically sized write, updates metadata, and returns.
+//
+// Buffering also prevents alternating between writes that do and do not trigger
+// the hashing process. Only when the entire buffer is full do we transition
+// into hashing. This allows us to keep the hash state in registers for longer,
+// instead of loading and storing it before and after processing each element.
+//
+// When a write fills the buffer, a buffer processing function is invoked to
+// hash all of the buffered input. The buffer processing functions are marked
+// `#[inline(never)]` so that they aren't inlined into the append functions,
+// which ensures the more frequently called append functions remain inlineable
+// and don't include register pushing/popping that would only be made necessary
+// by inclusion of the complex buffer processing path which uses those
+// registers.
+//
+// The buffer includes a "spill"--an extra element at the end--which simplifies
+// the integer write buffer processing path. The value that fills the buffer can
+// be written with a statically sized write that may spill over into the spill.
+// After the buffer is processed, the part of the value that spilled over can be
+// written from the spill to the beginning of the buffer with another statically
+// sized write. This write may copy more bytes than actually spilled over, but
+// we maintain the metadata such that any extra copied bytes will be ignored by
+// subsequent processing. Due to the static sizes, this scheme performs better
+// than copying the exact number of bytes needed into the end and beginning of
+// the buffer.
+//
+// The buffer is uninitialized, which improves performance, but may preclude
+// efficient implementation of alternative approaches. The improvement is not so
+// large that an alternative approach should be disregarded because it cannot be
+// efficiently implemented with an uninitialized buffer. On the other hand, an
+// uninitialized buffer may become more important should a larger one be used.
+//
+// # Platform Dependence
+//
+// The SipHash algorithm operates on byte sequences. It parses the input stream
+// as 8-byte little-endian integers. Therefore, given the same byte sequence, it
+// produces the same result on big- and little-endian hardware.
+//
+// However, the Hasher trait has methods which operate on multi-byte integers.
+// How they are converted into byte sequences can be endian-dependent (by using
+// native byte order) or independent (by consistently using either LE or BE byte
+// order). It can also be `isize` and `usize` size dependent (by using the
+// native size), or independent (by converting to a common size), supposing the
+// values can be represented in 32 bits.
+//
+// In order to make `SipHasher128` consistent with `SipHasher` in libstd, we
+// choose to do the integer to byte sequence conversion in the platform-
+// dependent way. Clients can achieve platform-independent hashing by widening
+// `isize` and `usize` integers to 64 bits on 32-bit systems and byte-swapping
+// integers on big-endian systems before passing them to the writing functions.
+// This causes the input byte sequence to look identical on big- and little-
+// endian systems (supposing `isize` and `usize` values can be represented in 32
+// bits), which ensures platform-independent results.
 impl SipHasher128 {
     #[inline]
     pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 {
-        let mut state = SipHasher128 {
-            k0: key0,
-            k1: key1,
-            length: 0,
-            state: State { v0: 0, v1: 0, v2: 0, v3: 0 },
-            tail: 0,
-            ntail: 0,
+        let mut hasher = SipHasher128 {
+            nbuf: 0,
+            buf: MaybeUninit::uninit_array(),
+            state: State {
+                v0: key0 ^ 0x736f6d6570736575,
+                // The XOR with 0xee is only done on 128-bit algorithm version.
+                v1: key1 ^ (0x646f72616e646f6d ^ 0xee),
+                v2: key0 ^ 0x6c7967656e657261,
+                v3: key1 ^ 0x7465646279746573,
+            },
+            processed: 0,
         };
-        state.reset();
-        state
-    }
 
-    #[inline]
-    fn reset(&mut self) {
-        self.length = 0;
-        self.state.v0 = self.k0 ^ 0x736f6d6570736575;
-        self.state.v1 = self.k1 ^ 0x646f72616e646f6d;
-        self.state.v2 = self.k0 ^ 0x6c7967656e657261;
-        self.state.v3 = self.k1 ^ 0x7465646279746573;
-        self.ntail = 0;
+        unsafe {
+            // Initialize spill because we read from it in `short_write_process_buffer`.
+            *hasher.buf.get_unchecked_mut(BUFFER_SPILL_INDEX) = MaybeUninit::zeroed();
+        }
 
-        // This is only done in the 128 bit version:
-        self.state.v1 ^= 0xee;
+        hasher
     }
 
     // A specialized write function for values with size <= 8.
-    //
-    // The input must be zero-extended to 64-bits by the caller. This extension
-    // isn't hashed, but the implementation requires it for correctness.
-    //
-    // This function, given the same integer size and value, has the same effect
-    // on both little- and big-endian hardware. It operates on values without
-    // depending on their sequence in memory, so is independent of endianness.
-    //
-    // However, we want SipHasher128 to be platform-dependent, in order to be
-    // consistent with the platform-dependent SipHasher in libstd. In other
-    // words, we want:
-    //
-    // - little-endian: `write_u32(0xDDCCBBAA)` == `write([0xAA, 0xBB, 0xCC, 0xDD])`
-    // - big-endian:    `write_u32(0xDDCCBBAA)` == `write([0xDD, 0xCC, 0xBB, 0xAA])`
-    //
-    // Therefore, in order to produce endian-dependent results, SipHasher128's
-    // `write_xxx` Hasher trait methods byte-swap `x` prior to zero-extending.
-    //
-    // If clients of SipHasher128 itself want platform-independent results, they
-    // *also* must byte-swap integer inputs before invoking the `write_xxx`
-    // methods on big-endian hardware (that is, two byte-swaps must occur--one
-    // in the client, and one in SipHasher128). Additionally, they must extend
-    // `usize` and `isize` types to 64 bits on 32-bit systems.
     #[inline]
-    fn short_write<T>(&mut self, _x: T, x: u64) {
+    fn short_write<T>(&mut self, x: T) {
         let size = mem::size_of::<T>();
-        self.length += size;
+        let nbuf = self.nbuf;
+        debug_assert!(size <= 8);
+        debug_assert!(nbuf < BUFFER_SIZE);
+        debug_assert!(nbuf + size < BUFFER_WITH_SPILL_SIZE);
 
-        // The original number must be zero-extended, not sign-extended.
-        debug_assert!(if size < 8 { x >> (8 * size) == 0 } else { true });
+        if nbuf + size < BUFFER_SIZE {
+            unsafe {
+                // The memcpy call is optimized away because the size is known.
+                let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
+                ptr::copy_nonoverlapping(&x as *const _ as *const u8, dst, size);
+            }
 
-        // The number of bytes needed to fill `self.tail`.
-        let needed = 8 - self.ntail;
+            self.nbuf = nbuf + size;
 
-        // SipHash parses the input stream as 8-byte little-endian integers.
-        // Inputs are put into `self.tail` until 8 bytes of data have been
-        // collected, and then that word is processed.
-        //
-        // For example, imagine that `self.tail` is 0x0000_00EE_DDCC_BBAA,
-        // `self.ntail` is 5 (because 5 bytes have been put into `self.tail`),
-        // and `needed` is therefore 3.
-        //
-        // - Scenario 1, `self.write_u8(0xFF)`: we have already zero-extended
-        //   the input to 0x0000_0000_0000_00FF. We now left-shift it five
-        //   bytes, giving 0x0000_FF00_0000_0000. We then bitwise-OR that value
-        //   into `self.tail`, resulting in 0x0000_FFEE_DDCC_BBAA.
-        //   (Zero-extension of the original input is critical in this scenario
-        //   because we don't want the high two bytes of `self.tail` to be
-        //   touched by the bitwise-OR.) `self.tail` is not yet full, so we
-        //   return early, after updating `self.ntail` to 6.
-        //
-        // - Scenario 2, `self.write_u32(0xIIHH_GGFF)`: we have already
-        //   zero-extended the input to 0x0000_0000_IIHH_GGFF. We now
-        //   left-shift it five bytes, giving 0xHHGG_FF00_0000_0000. We then
-        //   bitwise-OR that value into `self.tail`, resulting in
-        //   0xHHGG_FFEE_DDCC_BBAA. `self.tail` is now full, and we can use it
-        //   to update `self.state`. (As mentioned above, this assumes a
-        //   little-endian machine; on a big-endian machine we would have
-        //   byte-swapped 0xIIHH_GGFF in the caller, giving 0xFFGG_HHII, and we
-        //   would then end up bitwise-ORing 0xGGHH_II00_0000_0000 into
-        //   `self.tail`).
-        //
-        self.tail |= x << (8 * self.ntail);
-        if size < needed {
-            self.ntail += size;
             return;
         }
 
-        // `self.tail` is full, process it.
-        self.state.v3 ^= self.tail;
-        Sip24Rounds::c_rounds(&mut self.state);
-        self.state.v0 ^= self.tail;
+        unsafe { self.short_write_process_buffer(x) }
+    }
 
-        // Continuing scenario 2: we have one byte left over from the input. We
-        // set `self.ntail` to 1 and `self.tail` to `0x0000_0000_IIHH_GGFF >>
-        // 8*3`, which is 0x0000_0000_0000_00II. (Or on a big-endian machine
-        // the prior byte-swapping would leave us with 0x0000_0000_0000_00FF.)
-        //
-        // The `if` is needed to avoid shifting by 64 bits, which Rust
-        // complains about.
-        self.ntail = size - needed;
-        self.tail = if needed < 8 { x >> (8 * needed) } else { 0 };
+    // A specialized write function for values with size <= 8 that should only
+    // be called when the write would cause the buffer to fill.
+    //
+    // SAFETY: the write of `x` into `self.buf` starting at byte offset
+    // `self.nbuf` must cause `self.buf` to become fully initialized (and not
+    // overflow) if it wasn't already.
+    #[inline(never)]
+    unsafe fn short_write_process_buffer<T>(&mut self, x: T) {
+        let size = mem::size_of::<T>();
+        let nbuf = self.nbuf;
+        debug_assert!(size <= 8);
+        debug_assert!(nbuf < BUFFER_SIZE);
+        debug_assert!(nbuf + size >= BUFFER_SIZE);
+        debug_assert!(nbuf + size < BUFFER_WITH_SPILL_SIZE);
+
+        // Copy first part of input into end of buffer, possibly into spill
+        // element. The memcpy call is optimized away because the size is known.
+        let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
+        ptr::copy_nonoverlapping(&x as *const _ as *const u8, dst, size);
+
+        // Process buffer.
+        for i in 0..BUFFER_CAPACITY {
+            let elem = self.buf.get_unchecked(i).assume_init().to_le();
+            self.state.v3 ^= elem;
+            Sip24Rounds::c_rounds(&mut self.state);
+            self.state.v0 ^= elem;
+        }
+
+        // Copy remaining input into start of buffer by copying size - 1
+        // elements from spill (at most size - 1 bytes could have overflowed
+        // into the spill). The memcpy call is optimized away because the size
+        // is known. And the whole copy is optimized away for size == 1.
+        let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
+        ptr::copy_nonoverlapping(src, self.buf.as_mut_ptr() as *mut u8, size - 1);
+
+        // This function should only be called when the write fills the buffer.
+        // Therefore, when size == 1, the new `self.nbuf` must be zero. The size
+        // is statically known, so the branch is optimized away.
+        self.nbuf = if size == 1 { 0 } else { nbuf + size - BUFFER_SIZE };
+        self.processed += BUFFER_SIZE;
+    }
+
+    // A write function for byte slices.
+    #[inline]
+    fn slice_write(&mut self, msg: &[u8]) {
+        let length = msg.len();
+        let nbuf = self.nbuf;
+        debug_assert!(nbuf < BUFFER_SIZE);
+
+        if nbuf + length < BUFFER_SIZE {
+            unsafe {
+                let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
+
+                if length <= 8 {
+                    copy_nonoverlapping_small(msg.as_ptr(), dst, length);
+                } else {
+                    // This memcpy is *not* optimized away.
+                    ptr::copy_nonoverlapping(msg.as_ptr(), dst, length);
+                }
+            }
+
+            self.nbuf = nbuf + length;
+
+            return;
+        }
+
+        unsafe { self.slice_write_process_buffer(msg) }
+    }
+
+    // A write function for byte slices that should only be called when the
+    // write would cause the buffer to fill.
+    //
+    // SAFETY: `self.buf` must be initialized up to the byte offset `self.nbuf`,
+    // and `msg` must contain enough bytes to initialize the rest of the element
+    // containing the byte offset `self.nbuf`.
+    #[inline(never)]
+    unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) {
+        let length = msg.len();
+        let nbuf = self.nbuf;
+        debug_assert!(nbuf < BUFFER_SIZE);
+        debug_assert!(nbuf + length >= BUFFER_SIZE);
+
+        // Always copy first part of input into current element of buffer.
+        // This function should only be called when the write fills the buffer,
+        // so we know that there is enough input to fill the current element.
+        let valid_in_elem = nbuf % ELEM_SIZE;
+        let needed_in_elem = ELEM_SIZE - valid_in_elem;
+
+        let src = msg.as_ptr();
+        let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
+        copy_nonoverlapping_small(src, dst, needed_in_elem);
+
+        // Process buffer.
+
+        // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
+        // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
+        // We know that is true, because last step ensured we have a full
+        // element in the buffer.
+        let last = nbuf / ELEM_SIZE + 1;
+
+        for i in 0..last {
+            let elem = self.buf.get_unchecked(i).assume_init().to_le();
+            self.state.v3 ^= elem;
+            Sip24Rounds::c_rounds(&mut self.state);
+            self.state.v0 ^= elem;
+        }
+
+        // Process the remaining element-sized chunks of input.
+        let mut processed = needed_in_elem;
+        let input_left = length - processed;
+        let elems_left = input_left / ELEM_SIZE;
+        let extra_bytes_left = input_left % ELEM_SIZE;
+
+        for _ in 0..elems_left {
+            let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
+            self.state.v3 ^= elem;
+            Sip24Rounds::c_rounds(&mut self.state);
+            self.state.v0 ^= elem;
+            processed += ELEM_SIZE;
+        }
+
+        // Copy remaining input into start of buffer.
+        let src = msg.as_ptr().add(processed);
+        let dst = self.buf.as_mut_ptr() as *mut u8;
+        copy_nonoverlapping_small(src, dst, extra_bytes_left);
+
+        self.nbuf = extra_bytes_left;
+        self.processed += nbuf + processed;
     }
 
     #[inline]
     pub fn finish128(mut self) -> (u64, u64) {
-        let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail;
+        debug_assert!(self.nbuf < BUFFER_SIZE);
 
-        self.state.v3 ^= b;
-        Sip24Rounds::c_rounds(&mut self.state);
-        self.state.v0 ^= b;
+        // Process full elements in buffer.
+        let last = self.nbuf / ELEM_SIZE;
 
-        self.state.v2 ^= 0xee;
-        Sip24Rounds::d_rounds(&mut self.state);
-        let _0 = self.state.v0 ^ self.state.v1 ^ self.state.v2 ^ self.state.v3;
+        // Since we're consuming self, avoid updating members for a potential
+        // performance gain.
+        let mut state = self.state;
 
-        self.state.v1 ^= 0xdd;
-        Sip24Rounds::d_rounds(&mut self.state);
-        let _1 = self.state.v0 ^ self.state.v1 ^ self.state.v2 ^ self.state.v3;
+        for i in 0..last {
+            let elem = unsafe { self.buf.get_unchecked(i).assume_init().to_le() };
+            state.v3 ^= elem;
+            Sip24Rounds::c_rounds(&mut state);
+            state.v0 ^= elem;
+        }
+
+        // Get remaining partial element.
+        let elem = if self.nbuf % ELEM_SIZE != 0 {
+            unsafe {
+                // Ensure element is initialized by writing zero bytes. At most
+                // `ELEM_SIZE - 1` are required given the above check. It's safe
+                // to write this many because we have the spill and we maintain
+                // `self.nbuf` such that this write will start before the spill.
+                let dst = (self.buf.as_mut_ptr() as *mut u8).add(self.nbuf);
+                ptr::write_bytes(dst, 0, ELEM_SIZE - 1);
+                self.buf.get_unchecked(last).assume_init().to_le()
+            }
+        } else {
+            0
+        };
+
+        // Finalize the hash.
+        let length = self.processed + self.nbuf;
+        let b: u64 = ((length as u64 & 0xff) << 56) | elem;
+
+        state.v3 ^= b;
+        Sip24Rounds::c_rounds(&mut state);
+        state.v0 ^= b;
+
+        state.v2 ^= 0xee;
+        Sip24Rounds::d_rounds(&mut state);
+        let _0 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
+
+        state.v1 ^= 0xdd;
+        Sip24Rounds::d_rounds(&mut state);
+        let _1 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
+
         (_0, _1)
     }
 }
@@ -230,92 +412,57 @@
 impl Hasher for SipHasher128 {
     #[inline]
     fn write_u8(&mut self, i: u8) {
-        self.short_write(i, i as u64);
+        self.short_write(i);
     }
 
     #[inline]
     fn write_u16(&mut self, i: u16) {
-        self.short_write(i, i.to_le() as u64);
+        self.short_write(i);
     }
 
     #[inline]
     fn write_u32(&mut self, i: u32) {
-        self.short_write(i, i.to_le() as u64);
+        self.short_write(i);
     }
 
     #[inline]
     fn write_u64(&mut self, i: u64) {
-        self.short_write(i, i.to_le() as u64);
+        self.short_write(i);
     }
 
     #[inline]
     fn write_usize(&mut self, i: usize) {
-        self.short_write(i, i.to_le() as u64);
+        self.short_write(i);
     }
 
     #[inline]
     fn write_i8(&mut self, i: i8) {
-        self.short_write(i, i as u8 as u64);
+        self.short_write(i as u8);
     }
 
     #[inline]
     fn write_i16(&mut self, i: i16) {
-        self.short_write(i, (i as u16).to_le() as u64);
+        self.short_write(i as u16);
     }
 
     #[inline]
     fn write_i32(&mut self, i: i32) {
-        self.short_write(i, (i as u32).to_le() as u64);
+        self.short_write(i as u32);
     }
 
     #[inline]
     fn write_i64(&mut self, i: i64) {
-        self.short_write(i, (i as u64).to_le() as u64);
+        self.short_write(i as u64);
     }
 
     #[inline]
     fn write_isize(&mut self, i: isize) {
-        self.short_write(i, (i as usize).to_le() as u64);
+        self.short_write(i as usize);
     }
 
     #[inline]
     fn write(&mut self, msg: &[u8]) {
-        let length = msg.len();
-        self.length += length;
-
-        let mut needed = 0;
-
-        if self.ntail != 0 {
-            needed = 8 - self.ntail;
-            self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail);
-            if length < needed {
-                self.ntail += length;
-                return;
-            } else {
-                self.state.v3 ^= self.tail;
-                Sip24Rounds::c_rounds(&mut self.state);
-                self.state.v0 ^= self.tail;
-                self.ntail = 0;
-            }
-        }
-
-        // Buffered tail is now flushed, process new input.
-        let len = length - needed;
-        let left = len & 0x7;
-
-        let mut i = needed;
-        while i < len - left {
-            let mi = unsafe { load_int_le!(msg, i, u64) };
-
-            self.state.v3 ^= mi;
-            Sip24Rounds::c_rounds(&mut self.state);
-            self.state.v0 ^= mi;
-
-            i += 8;
-        }
-
-        self.tail = unsafe { u8to64_le(msg, i, left) };
-        self.ntail = left;
+        self.slice_write(msg);
     }
 
     fn finish(&self) -> u64 {
diff --git a/compiler/rustc_data_structures/src/sip128/tests.rs b/compiler/rustc_data_structures/src/sip128/tests.rs
index 2e2274a..5fe967c 100644
--- a/compiler/rustc_data_structures/src/sip128/tests.rs
+++ b/compiler/rustc_data_structures/src/sip128/tests.rs
@@ -450,3 +450,48 @@
 
     assert_eq!(h1_hash, h2_hash);
 }
+
+macro_rules! test_fill_buffer {
+    ($type:ty, $write_method:ident) => {{
+        // Test filling and overfilling the buffer from all possible offsets
+        // for a given integer type and its corresponding write method.
+        const SIZE: usize = std::mem::size_of::<$type>();
+        let input = [42; BUFFER_SIZE];
+        let x = 0x01234567_89ABCDEF_76543210_FEDCBA98_u128 as $type;
+        let x_bytes = &x.to_ne_bytes();
+
+        for i in 1..=SIZE {
+            let s = &input[..BUFFER_SIZE - i];
+
+            let mut h1 = SipHasher128::new_with_keys(7, 13);
+            h1.write(s);
+            h1.$write_method(x);
+
+            let mut h2 = SipHasher128::new_with_keys(7, 13);
+            h2.write(s);
+            h2.write(x_bytes);
+
+            let h1_hash = h1.finish128();
+            let h2_hash = h2.finish128();
+
+            assert_eq!(h1_hash, h2_hash);
+        }
+    }};
+}
+
+#[test]
+fn test_fill_buffer() {
+    test_fill_buffer!(u8, write_u8);
+    test_fill_buffer!(u16, write_u16);
+    test_fill_buffer!(u32, write_u32);
+    test_fill_buffer!(u64, write_u64);
+    test_fill_buffer!(u128, write_u128);
+    test_fill_buffer!(usize, write_usize);
+
+    test_fill_buffer!(i8, write_i8);
+    test_fill_buffer!(i16, write_i16);
+    test_fill_buffer!(i32, write_i32);
+    test_fill_buffer!(i64, write_i64);
+    test_fill_buffer!(i128, write_i128);
+    test_fill_buffer!(isize, write_isize);
+}
diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs
index 4807380..9a28f8f 100644
--- a/compiler/rustc_data_structures/src/sorted_map.rs
+++ b/compiler/rustc_data_structures/src/sorted_map.rs
@@ -93,7 +93,7 @@
 
     /// Iterate over elements, sorted by key
     #[inline]
-    pub fn iter(&self) -> ::std::slice::Iter<'_, (K, V)> {
+    pub fn iter(&self) -> std::slice::Iter<'_, (K, V)> {
         self.data.iter()
     }
 
@@ -134,7 +134,7 @@
         R: RangeBounds<K>,
     {
         let (start, end) = self.range_slice_indices(range);
-        self.data.splice(start..end, ::std::iter::empty());
+        self.data.splice(start..end, std::iter::empty());
     }
 
     /// Mutate all keys with the given function `f`. This mutation must not
@@ -241,7 +241,7 @@
 
 impl<K: Ord, V> IntoIterator for SortedMap<K, V> {
     type Item = (K, V);
-    type IntoIter = ::std::vec::IntoIter<(K, V)>;
+    type IntoIter = std::vec::IntoIter<(K, V)>;
 
     fn into_iter(self) -> Self::IntoIter {
         self.data.into_iter()
diff --git a/compiler/rustc_data_structures/src/sso/either_iter.rs b/compiler/rustc_data_structures/src/sso/either_iter.rs
new file mode 100644
index 0000000..af8ffcf
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sso/either_iter.rs
@@ -0,0 +1,75 @@
+use std::fmt;
+use std::iter::ExactSizeIterator;
+use std::iter::FusedIterator;
+use std::iter::Iterator;
+
+/// Iterator which may contain instance of
+/// one of two specific implementations.
+///
+/// Note: For most methods providing custom
+///       implementation may margianlly
+///       improve performance by avoiding
+///       doing Left/Right match on every step
+///       and doing it only once instead.
+#[derive(Clone)]
+pub enum EitherIter<L, R> {
+    Left(L),
+    Right(R),
+}
+
+impl<L, R> Iterator for EitherIter<L, R>
+where
+    L: Iterator,
+    R: Iterator<Item = L::Item>,
+{
+    type Item = L::Item;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        match self {
+            EitherIter::Left(l) => l.next(),
+            EitherIter::Right(r) => r.next(),
+        }
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        match self {
+            EitherIter::Left(l) => l.size_hint(),
+            EitherIter::Right(r) => r.size_hint(),
+        }
+    }
+}
+
+impl<L, R> ExactSizeIterator for EitherIter<L, R>
+where
+    L: ExactSizeIterator,
+    R: ExactSizeIterator,
+    EitherIter<L, R>: Iterator,
+{
+    fn len(&self) -> usize {
+        match self {
+            EitherIter::Left(l) => l.len(),
+            EitherIter::Right(r) => r.len(),
+        }
+    }
+}
+
+impl<L, R> FusedIterator for EitherIter<L, R>
+where
+    L: FusedIterator,
+    R: FusedIterator,
+    EitherIter<L, R>: Iterator,
+{
+}
+
+impl<L, R> fmt::Debug for EitherIter<L, R>
+where
+    L: fmt::Debug,
+    R: fmt::Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            EitherIter::Left(l) => l.fmt(f),
+            EitherIter::Right(r) => r.fmt(f),
+        }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/sso/map.rs b/compiler/rustc_data_structures/src/sso/map.rs
new file mode 100644
index 0000000..fe8ae7a
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sso/map.rs
@@ -0,0 +1,560 @@
+use super::either_iter::EitherIter;
+use crate::fx::FxHashMap;
+use arrayvec::ArrayVec;
+use std::fmt;
+use std::hash::Hash;
+use std::iter::FromIterator;
+use std::ops::Index;
+
+// For pointer-sized arguments arrays
+// are faster than set/map for up to 64
+// arguments.
+//
+// On the other hand such a big array
+// hurts cache performance, makes passing
+// sso structures around very expensive.
+//
+// Biggest performance benefit is gained
+// for reasonably small arrays that stay
+// small in vast majority of cases.
+//
+// '8' is choosen as a sane default, to be
+// reevaluated later.
+//
+// Note: As of now ArrayVec design prevents
+//       us from making it user-customizable.
+const SSO_ARRAY_SIZE: usize = 8;
+
+/// Small-storage-optimized implementation of a map.
+///
+/// Stores elements in a small array up to a certain length
+/// and switches to `HashMap` when that length is exceeded.
+//
+// FIXME: Implements subset of HashMap API.
+//
+// Missing HashMap API:
+//   all hasher-related
+//   try_reserve (unstable)
+//   shrink_to (unstable)
+//   drain_filter (unstable)
+//   into_keys/into_values (unstable)
+//   all raw_entry-related
+//   PartialEq/Eq (requires sorting the array)
+//   Entry::or_insert_with_key (unstable)
+//   Vacant/Occupied entries and related
+//
+// FIXME: In HashMap most methods accepting key reference
+// accept reference to generic `Q` where `K: Borrow<Q>`.
+//
+// However, using this approach in `HashMap::get` apparently
+// breaks inlining and noticeably reduces performance.
+//
+// Performance *should* be the same given that borrow is
+// a NOP in most cases, but in practice that's not the case.
+//
+// Further investigation is required.
+//
+// Affected methods:
+//   SsoHashMap::get
+//   SsoHashMap::get_mut
+//   SsoHashMap::get_entry
+//   SsoHashMap::get_key_value
+//   SsoHashMap::contains_key
+//   SsoHashMap::remove
+//   SsoHashMap::remove_entry
+//   Index::index
+//   SsoHashSet::take
+//   SsoHashSet::get
+//   SsoHashSet::remove
+//   SsoHashSet::contains
+
+#[derive(Clone)]
+pub enum SsoHashMap<K, V> {
+    Array(ArrayVec<[(K, V); SSO_ARRAY_SIZE]>),
+    Map(FxHashMap<K, V>),
+}
+
+impl<K, V> SsoHashMap<K, V> {
+    /// Creates an empty `SsoHashMap`.
+    #[inline]
+    pub fn new() -> Self {
+        SsoHashMap::Array(ArrayVec::new())
+    }
+
+    /// Creates an empty `SsoHashMap` with the specified capacity.
+    pub fn with_capacity(cap: usize) -> Self {
+        if cap <= SSO_ARRAY_SIZE {
+            Self::new()
+        } else {
+            SsoHashMap::Map(FxHashMap::with_capacity_and_hasher(cap, Default::default()))
+        }
+    }
+
+    /// Clears the map, removing all key-value pairs. Keeps the allocated memory
+    /// for reuse.
+    pub fn clear(&mut self) {
+        match self {
+            SsoHashMap::Array(array) => array.clear(),
+            SsoHashMap::Map(map) => map.clear(),
+        }
+    }
+
+    /// Returns the number of elements the map can hold without reallocating.
+    pub fn capacity(&self) -> usize {
+        match self {
+            SsoHashMap::Array(_) => SSO_ARRAY_SIZE,
+            SsoHashMap::Map(map) => map.capacity(),
+        }
+    }
+
+    /// Returns the number of elements in the map.
+    pub fn len(&self) -> usize {
+        match self {
+            SsoHashMap::Array(array) => array.len(),
+            SsoHashMap::Map(map) => map.len(),
+        }
+    }
+
+    /// Returns `true` if the map contains no elements.
+    pub fn is_empty(&self) -> bool {
+        match self {
+            SsoHashMap::Array(array) => array.is_empty(),
+            SsoHashMap::Map(map) => map.is_empty(),
+        }
+    }
+
+    /// An iterator visiting all key-value pairs in arbitrary order.
+    /// The iterator element type is `(&'a K, &'a V)`.
+    #[inline]
+    pub fn iter(&self) -> <&Self as IntoIterator>::IntoIter {
+        self.into_iter()
+    }
+
+    /// An iterator visiting all key-value pairs in arbitrary order,
+    /// with mutable references to the values.
+    /// The iterator element type is `(&'a K, &'a mut V)`.
+    #[inline]
+    pub fn iter_mut(&mut self) -> impl Iterator<Item = (&'_ K, &'_ mut V)> {
+        self.into_iter()
+    }
+
+    /// An iterator visiting all keys in arbitrary order.
+    /// The iterator element type is `&'a K`.
+    pub fn keys(&self) -> impl Iterator<Item = &'_ K> {
+        match self {
+            SsoHashMap::Array(array) => EitherIter::Left(array.iter().map(|(k, _v)| k)),
+            SsoHashMap::Map(map) => EitherIter::Right(map.keys()),
+        }
+    }
+
+    /// An iterator visiting all values in arbitrary order.
+    /// The iterator element type is `&'a V`.
+    pub fn values(&self) -> impl Iterator<Item = &'_ V> {
+        match self {
+            SsoHashMap::Array(array) => EitherIter::Left(array.iter().map(|(_k, v)| v)),
+            SsoHashMap::Map(map) => EitherIter::Right(map.values()),
+        }
+    }
+
+    /// An iterator visiting all values mutably in arbitrary order.
+    /// The iterator element type is `&'a mut V`.
+    pub fn values_mut(&mut self) -> impl Iterator<Item = &'_ mut V> {
+        match self {
+            SsoHashMap::Array(array) => EitherIter::Left(array.iter_mut().map(|(_k, v)| v)),
+            SsoHashMap::Map(map) => EitherIter::Right(map.values_mut()),
+        }
+    }
+
+    /// Clears the map, returning all key-value pairs as an iterator. Keeps the
+    /// allocated memory for reuse.
+    pub fn drain(&mut self) -> impl Iterator<Item = (K, V)> + '_ {
+        match self {
+            SsoHashMap::Array(array) => EitherIter::Left(array.drain(..)),
+            SsoHashMap::Map(map) => EitherIter::Right(map.drain()),
+        }
+    }
+}
+
+impl<K: Eq + Hash, V> SsoHashMap<K, V> {
+    /// Changes underlying storage from array to hashmap
+    /// if array is full.
+    fn migrate_if_full(&mut self) {
+        if let SsoHashMap::Array(array) = self {
+            if array.is_full() {
+                *self = SsoHashMap::Map(array.drain(..).collect());
+            }
+        }
+    }
+
+    /// Reserves capacity for at least `additional` more elements to be inserted
+    /// in the `SsoHashMap`. The collection may reserve more space to avoid
+    /// frequent reallocations.
+    pub fn reserve(&mut self, additional: usize) {
+        match self {
+            SsoHashMap::Array(array) => {
+                if SSO_ARRAY_SIZE < (array.len() + additional) {
+                    let mut map: FxHashMap<K, V> = array.drain(..).collect();
+                    map.reserve(additional);
+                    *self = SsoHashMap::Map(map);
+                }
+            }
+            SsoHashMap::Map(map) => map.reserve(additional),
+        }
+    }
+
+    /// Shrinks the capacity of the map as much as possible. It will drop
+    /// down as much as possible while maintaining the internal rules
+    /// and possibly leaving some space in accordance with the resize policy.
+    pub fn shrink_to_fit(&mut self) {
+        if let SsoHashMap::Map(map) = self {
+            if map.len() <= SSO_ARRAY_SIZE {
+                *self = SsoHashMap::Array(map.drain().collect());
+            } else {
+                map.shrink_to_fit();
+            }
+        }
+    }
+
+    /// Retains only the elements specified by the predicate.
+    pub fn retain<F>(&mut self, mut f: F)
+    where
+        F: FnMut(&K, &mut V) -> bool,
+    {
+        match self {
+            SsoHashMap::Array(array) => array.retain(|(k, v)| f(k, v)),
+            SsoHashMap::Map(map) => map.retain(f),
+        }
+    }
+
+    /// Inserts a key-value pair into the map.
+    ///
+    /// If the map did not have this key present, [`None`] is returned.
+    ///
+    /// If the map did have this key present, the value is updated, and the old
+    /// value is returned. The key is not updated, though; this matters for
+    /// types that can be `==` without being identical. See the [module-level
+    /// documentation] for more.
+    pub fn insert(&mut self, key: K, value: V) -> Option<V> {
+        match self {
+            SsoHashMap::Array(array) => {
+                for (k, v) in array.iter_mut() {
+                    if *k == key {
+                        let old_value = std::mem::replace(v, value);
+                        return Some(old_value);
+                    }
+                }
+                if let Err(error) = array.try_push((key, value)) {
+                    let mut map: FxHashMap<K, V> = array.drain(..).collect();
+                    let (key, value) = error.element();
+                    map.insert(key, value);
+                    *self = SsoHashMap::Map(map);
+                }
+                None
+            }
+            SsoHashMap::Map(map) => map.insert(key, value),
+        }
+    }
+
+    /// Removes a key from the map, returning the value at the key if the key
+    /// was previously in the map.
+    pub fn remove(&mut self, key: &K) -> Option<V> {
+        match self {
+            SsoHashMap::Array(array) => {
+                if let Some(index) = array.iter().position(|(k, _v)| k == key) {
+                    Some(array.swap_remove(index).1)
+                } else {
+                    None
+                }
+            }
+            SsoHashMap::Map(map) => map.remove(key),
+        }
+    }
+
+    /// Removes a key from the map, returning the stored key and value if the
+    /// key was previously in the map.
+    pub fn remove_entry(&mut self, key: &K) -> Option<(K, V)> {
+        match self {
+            SsoHashMap::Array(array) => {
+                if let Some(index) = array.iter().position(|(k, _v)| k == key) {
+                    Some(array.swap_remove(index))
+                } else {
+                    None
+                }
+            }
+            SsoHashMap::Map(map) => map.remove_entry(key),
+        }
+    }
+
+    /// Returns a reference to the value corresponding to the key.
+    pub fn get(&self, key: &K) -> Option<&V> {
+        match self {
+            SsoHashMap::Array(array) => {
+                for (k, v) in array {
+                    if k == key {
+                        return Some(v);
+                    }
+                }
+                None
+            }
+            SsoHashMap::Map(map) => map.get(key),
+        }
+    }
+
+    /// Returns a mutable reference to the value corresponding to the key.
+    pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
+        match self {
+            SsoHashMap::Array(array) => {
+                for (k, v) in array {
+                    if k == key {
+                        return Some(v);
+                    }
+                }
+                None
+            }
+            SsoHashMap::Map(map) => map.get_mut(key),
+        }
+    }
+
+    /// Returns the key-value pair corresponding to the supplied key.
+    pub fn get_key_value(&self, key: &K) -> Option<(&K, &V)> {
+        match self {
+            SsoHashMap::Array(array) => {
+                for (k, v) in array {
+                    if k == key {
+                        return Some((k, v));
+                    }
+                }
+                None
+            }
+            SsoHashMap::Map(map) => map.get_key_value(key),
+        }
+    }
+
+    /// Returns `true` if the map contains a value for the specified key.
+    pub fn contains_key(&self, key: &K) -> bool {
+        match self {
+            SsoHashMap::Array(array) => array.iter().any(|(k, _v)| k == key),
+            SsoHashMap::Map(map) => map.contains_key(key),
+        }
+    }
+
+    /// Gets the given key's corresponding entry in the map for in-place manipulation.
+    #[inline]
+    pub fn entry(&mut self, key: K) -> Entry<'_, K, V> {
+        Entry { ssomap: self, key }
+    }
+}
+
+impl<K, V> Default for SsoHashMap<K, V> {
+    #[inline]
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+impl<K: Eq + Hash, V> FromIterator<(K, V)> for SsoHashMap<K, V> {
+    fn from_iter<I: IntoIterator<Item = (K, V)>>(iter: I) -> SsoHashMap<K, V> {
+        let mut map: SsoHashMap<K, V> = Default::default();
+        map.extend(iter);
+        map
+    }
+}
+
+impl<K: Eq + Hash, V> Extend<(K, V)> for SsoHashMap<K, V> {
+    fn extend<I>(&mut self, iter: I)
+    where
+        I: IntoIterator<Item = (K, V)>,
+    {
+        for (key, value) in iter.into_iter() {
+            self.insert(key, value);
+        }
+    }
+
+    #[inline]
+    fn extend_one(&mut self, (k, v): (K, V)) {
+        self.insert(k, v);
+    }
+
+    fn extend_reserve(&mut self, additional: usize) {
+        match self {
+            SsoHashMap::Array(array) => {
+                if SSO_ARRAY_SIZE < (array.len() + additional) {
+                    let mut map: FxHashMap<K, V> = array.drain(..).collect();
+                    map.extend_reserve(additional);
+                    *self = SsoHashMap::Map(map);
+                }
+            }
+            SsoHashMap::Map(map) => map.extend_reserve(additional),
+        }
+    }
+}
+
+impl<'a, K, V> Extend<(&'a K, &'a V)> for SsoHashMap<K, V>
+where
+    K: Eq + Hash + Copy,
+    V: Copy,
+{
+    fn extend<T: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: T) {
+        self.extend(iter.into_iter().map(|(k, v)| (*k, *v)))
+    }
+
+    #[inline]
+    fn extend_one(&mut self, (&k, &v): (&'a K, &'a V)) {
+        self.insert(k, v);
+    }
+
+    #[inline]
+    fn extend_reserve(&mut self, additional: usize) {
+        Extend::<(K, V)>::extend_reserve(self, additional)
+    }
+}
+
+impl<K, V> IntoIterator for SsoHashMap<K, V> {
+    type IntoIter = EitherIter<
+        <ArrayVec<[(K, V); 8]> as IntoIterator>::IntoIter,
+        <FxHashMap<K, V> as IntoIterator>::IntoIter,
+    >;
+    type Item = <Self::IntoIter as Iterator>::Item;
+
+    fn into_iter(self) -> Self::IntoIter {
+        match self {
+            SsoHashMap::Array(array) => EitherIter::Left(array.into_iter()),
+            SsoHashMap::Map(map) => EitherIter::Right(map.into_iter()),
+        }
+    }
+}
+
+/// adapts Item of array reference iterator to Item of hashmap reference iterator.
+#[inline(always)]
+fn adapt_array_ref_it<K, V>(pair: &'a (K, V)) -> (&'a K, &'a V) {
+    let (a, b) = pair;
+    (a, b)
+}
+
+/// adapts Item of array mut reference iterator to Item of hashmap mut reference iterator.
+#[inline(always)]
+fn adapt_array_mut_it<K, V>(pair: &'a mut (K, V)) -> (&'a K, &'a mut V) {
+    let (a, b) = pair;
+    (a, b)
+}
+
+impl<'a, K, V> IntoIterator for &'a SsoHashMap<K, V> {
+    type IntoIter = EitherIter<
+        std::iter::Map<
+            <&'a ArrayVec<[(K, V); 8]> as IntoIterator>::IntoIter,
+            fn(&'a (K, V)) -> (&'a K, &'a V),
+        >,
+        <&'a FxHashMap<K, V> as IntoIterator>::IntoIter,
+    >;
+    type Item = <Self::IntoIter as Iterator>::Item;
+
+    fn into_iter(self) -> Self::IntoIter {
+        match self {
+            SsoHashMap::Array(array) => EitherIter::Left(array.into_iter().map(adapt_array_ref_it)),
+            SsoHashMap::Map(map) => EitherIter::Right(map.iter()),
+        }
+    }
+}
+
+impl<'a, K, V> IntoIterator for &'a mut SsoHashMap<K, V> {
+    type IntoIter = EitherIter<
+        std::iter::Map<
+            <&'a mut ArrayVec<[(K, V); 8]> as IntoIterator>::IntoIter,
+            fn(&'a mut (K, V)) -> (&'a K, &'a mut V),
+        >,
+        <&'a mut FxHashMap<K, V> as IntoIterator>::IntoIter,
+    >;
+    type Item = <Self::IntoIter as Iterator>::Item;
+
+    fn into_iter(self) -> Self::IntoIter {
+        match self {
+            SsoHashMap::Array(array) => EitherIter::Left(array.into_iter().map(adapt_array_mut_it)),
+            SsoHashMap::Map(map) => EitherIter::Right(map.iter_mut()),
+        }
+    }
+}
+
+impl<K, V> fmt::Debug for SsoHashMap<K, V>
+where
+    K: fmt::Debug,
+    V: fmt::Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_map().entries(self.iter()).finish()
+    }
+}
+
+impl<'a, K, V> Index<&'a K> for SsoHashMap<K, V>
+where
+    K: Eq + Hash,
+{
+    type Output = V;
+
+    #[inline]
+    fn index(&self, key: &K) -> &V {
+        self.get(key).expect("no entry found for key")
+    }
+}
+
+/// A view into a single entry in a map.
+pub struct Entry<'a, K, V> {
+    ssomap: &'a mut SsoHashMap<K, V>,
+    key: K,
+}
+
+impl<'a, K: Eq + Hash, V> Entry<'a, K, V> {
+    /// Provides in-place mutable access to an occupied entry before any
+    /// potential inserts into the map.
+    pub fn and_modify<F>(self, f: F) -> Self
+    where
+        F: FnOnce(&mut V),
+    {
+        if let Some(value) = self.ssomap.get_mut(&self.key) {
+            f(value);
+        }
+        self
+    }
+
+    /// Ensures a value is in the entry by inserting the default if empty, and returns
+    /// a mutable reference to the value in the entry.
+    #[inline]
+    pub fn or_insert(self, value: V) -> &'a mut V {
+        self.or_insert_with(|| value)
+    }
+
+    /// Ensures a value is in the entry by inserting the result of the default function if empty,
+    /// and returns a mutable reference to the value in the entry.
+    pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
+        self.ssomap.migrate_if_full();
+        match self.ssomap {
+            SsoHashMap::Array(array) => {
+                let key_ref = &self.key;
+                let found_index = array.iter().position(|(k, _v)| k == key_ref);
+                let index = if let Some(index) = found_index {
+                    index
+                } else {
+                    let index = array.len();
+                    array.try_push((self.key, default())).unwrap();
+                    index
+                };
+                &mut array[index].1
+            }
+            SsoHashMap::Map(map) => map.entry(self.key).or_insert_with(default),
+        }
+    }
+
+    /// Returns a reference to this entry's key.
+    #[inline]
+    pub fn key(&self) -> &K {
+        &self.key
+    }
+}
+
+impl<'a, K: Eq + Hash, V: Default> Entry<'a, K, V> {
+    /// Ensures a value is in the entry by inserting the default value if empty,
+    /// and returns a mutable reference to the value in the entry.
+    #[inline]
+    pub fn or_default(self) -> &'a mut V {
+        self.or_insert_with(Default::default)
+    }
+}
diff --git a/compiler/rustc_data_structures/src/sso/mod.rs b/compiler/rustc_data_structures/src/sso/mod.rs
new file mode 100644
index 0000000..dd21bc8
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sso/mod.rs
@@ -0,0 +1,6 @@
+mod either_iter;
+mod map;
+mod set;
+
+pub use map::SsoHashMap;
+pub use set::SsoHashSet;
diff --git a/compiler/rustc_data_structures/src/sso/set.rs b/compiler/rustc_data_structures/src/sso/set.rs
new file mode 100644
index 0000000..23cff02
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sso/set.rs
@@ -0,0 +1,237 @@
+use std::fmt;
+use std::hash::Hash;
+use std::iter::FromIterator;
+
+use super::map::SsoHashMap;
+
+/// Small-storage-optimized implementation of a set.
+///
+/// Stores elements in a small array up to a certain length
+/// and switches to `HashSet` when that length is exceeded.
+//
+// FIXME: Implements subset of HashSet API.
+//
+// Missing HashSet API:
+//   all hasher-related
+//   try_reserve (unstable)
+//   shrink_to (unstable)
+//   drain_filter (unstable)
+//   replace
+//   get_or_insert/get_or_insert_owned/get_or_insert_with (unstable)
+//   difference/symmetric_difference/intersection/union
+//   is_disjoint/is_subset/is_superset
+//   PartialEq/Eq (requires SsoHashMap implementation)
+//   BitOr/BitAnd/BitXor/Sub
+#[derive(Clone)]
+pub struct SsoHashSet<T> {
+    map: SsoHashMap<T, ()>,
+}
+
+/// Adapter function used ot return
+/// result if SsoHashMap functions into
+/// result SsoHashSet should return.
+#[inline(always)]
+fn entry_to_key<K, V>((k, _v): (K, V)) -> K {
+    k
+}
+
+impl<T> SsoHashSet<T> {
+    /// Creates an empty `SsoHashSet`.
+    #[inline]
+    pub fn new() -> Self {
+        Self { map: SsoHashMap::new() }
+    }
+
+    /// Creates an empty `SsoHashSet` with the specified capacity.
+    #[inline]
+    pub fn with_capacity(cap: usize) -> Self {
+        Self { map: SsoHashMap::with_capacity(cap) }
+    }
+
+    /// Clears the set, removing all values.
+    #[inline]
+    pub fn clear(&mut self) {
+        self.map.clear()
+    }
+
+    /// Returns the number of elements the set can hold without reallocating.
+    #[inline]
+    pub fn capacity(&self) -> usize {
+        self.map.capacity()
+    }
+
+    /// Returns the number of elements in the set.
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.map.len()
+    }
+
+    /// Returns `true` if the set contains no elements.
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.map.is_empty()
+    }
+
+    /// An iterator visiting all elements in arbitrary order.
+    /// The iterator element type is `&'a T`.
+    #[inline]
+    pub fn iter(&'a self) -> impl Iterator<Item = &'a T> {
+        self.into_iter()
+    }
+
+    /// Clears the set, returning all elements in an iterator.
+    #[inline]
+    pub fn drain(&mut self) -> impl Iterator<Item = T> + '_ {
+        self.map.drain().map(entry_to_key)
+    }
+}
+
+impl<T: Eq + Hash> SsoHashSet<T> {
+    /// Reserves capacity for at least `additional` more elements to be inserted
+    /// in the `SsoHashSet`. The collection may reserve more space to avoid
+    /// frequent reallocations.
+    #[inline]
+    pub fn reserve(&mut self, additional: usize) {
+        self.map.reserve(additional)
+    }
+
+    /// Shrinks the capacity of the set as much as possible. It will drop
+    /// down as much as possible while maintaining the internal rules
+    /// and possibly leaving some space in accordance with the resize policy.
+    #[inline]
+    pub fn shrink_to_fit(&mut self) {
+        self.map.shrink_to_fit()
+    }
+
+    /// Retains only the elements specified by the predicate.
+    #[inline]
+    pub fn retain<F>(&mut self, mut f: F)
+    where
+        F: FnMut(&T) -> bool,
+    {
+        self.map.retain(|k, _v| f(k))
+    }
+
+    /// Removes and returns the value in the set, if any, that is equal to the given one.
+    #[inline]
+    pub fn take(&mut self, value: &T) -> Option<T> {
+        self.map.remove_entry(value).map(entry_to_key)
+    }
+
+    /// Returns a reference to the value in the set, if any, that is equal to the given value.
+    #[inline]
+    pub fn get(&self, value: &T) -> Option<&T> {
+        self.map.get_key_value(value).map(entry_to_key)
+    }
+
+    /// Adds a value to the set.
+    ///
+    /// If the set did not have this value present, `true` is returned.
+    ///
+    /// If the set did have this value present, `false` is returned.
+    #[inline]
+    pub fn insert(&mut self, elem: T) -> bool {
+        self.map.insert(elem, ()).is_none()
+    }
+
+    /// Removes a value from the set. Returns whether the value was
+    /// present in the set.
+    #[inline]
+    pub fn remove(&mut self, value: &T) -> bool {
+        self.map.remove(value).is_some()
+    }
+
+    /// Returns `true` if the set contains a value.
+    #[inline]
+    pub fn contains(&self, value: &T) -> bool {
+        self.map.contains_key(value)
+    }
+}
+
+impl<T: Eq + Hash> FromIterator<T> for SsoHashSet<T> {
+    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> SsoHashSet<T> {
+        let mut set: SsoHashSet<T> = Default::default();
+        set.extend(iter);
+        set
+    }
+}
+
+impl<T> Default for SsoHashSet<T> {
+    #[inline]
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+impl<T: Eq + Hash> Extend<T> for SsoHashSet<T> {
+    fn extend<I>(&mut self, iter: I)
+    where
+        I: IntoIterator<Item = T>,
+    {
+        for val in iter.into_iter() {
+            self.insert(val);
+        }
+    }
+
+    #[inline]
+    fn extend_one(&mut self, item: T) {
+        self.insert(item);
+    }
+
+    #[inline]
+    fn extend_reserve(&mut self, additional: usize) {
+        self.map.extend_reserve(additional)
+    }
+}
+
+impl<'a, T> Extend<&'a T> for SsoHashSet<T>
+where
+    T: 'a + Eq + Hash + Copy,
+{
+    #[inline]
+    fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+        self.extend(iter.into_iter().cloned());
+    }
+
+    #[inline]
+    fn extend_one(&mut self, &item: &'a T) {
+        self.insert(item);
+    }
+
+    #[inline]
+    fn extend_reserve(&mut self, additional: usize) {
+        Extend::<T>::extend_reserve(self, additional)
+    }
+}
+
+impl<T> IntoIterator for SsoHashSet<T> {
+    type IntoIter = std::iter::Map<<SsoHashMap<T, ()> as IntoIterator>::IntoIter, fn((T, ())) -> T>;
+    type Item = <Self::IntoIter as Iterator>::Item;
+
+    #[inline]
+    fn into_iter(self) -> Self::IntoIter {
+        self.map.into_iter().map(entry_to_key)
+    }
+}
+
+impl<'a, T> IntoIterator for &'a SsoHashSet<T> {
+    type IntoIter = std::iter::Map<
+        <&'a SsoHashMap<T, ()> as IntoIterator>::IntoIter,
+        fn((&'a T, &'a ())) -> &'a T,
+    >;
+    type Item = <Self::IntoIter as Iterator>::Item;
+
+    #[inline]
+    fn into_iter(self) -> Self::IntoIter {
+        self.map.iter().map(entry_to_key)
+    }
+}
+
+impl<T> fmt::Debug for SsoHashSet<T>
+where
+    T: fmt::Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_set().entries(self.iter()).finish()
+    }
+}
diff --git a/compiler/rustc_data_structures/src/stable_hasher.rs b/compiler/rustc_data_structures/src/stable_hasher.rs
index 68875b3..579eb1c 100644
--- a/compiler/rustc_data_structures/src/stable_hasher.rs
+++ b/compiler/rustc_data_structures/src/stable_hasher.rs
@@ -20,7 +20,7 @@
 }
 
 impl ::std::fmt::Debug for StableHasher {
-    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
         write!(f, "{:?}", self.state)
     }
 }
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index d22f3ad..26706cd 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -512,7 +512,7 @@
     }
 }
 
-#[derive(Debug)]
+#[derive(Debug, Default)]
 pub struct RwLock<T>(InnerRwLock<T>);
 
 impl<T> RwLock<T> {
diff --git a/compiler/rustc_data_structures/src/tagged_ptr.rs b/compiler/rustc_data_structures/src/tagged_ptr.rs
index e3839d1..cd1e12c 100644
--- a/compiler/rustc_data_structures/src/tagged_ptr.rs
+++ b/compiler/rustc_data_structures/src/tagged_ptr.rs
@@ -24,7 +24,7 @@
 pub use copy::CopyTaggedPtr;
 pub use drop::TaggedPtr;
 
-/// This describes the pointer type encaspulated by TaggedPtr.
+/// This describes the pointer type encapsulated by TaggedPtr.
 ///
 /// # Safety
 ///
diff --git a/compiler/rustc_data_structures/src/transitive_relation.rs b/compiler/rustc_data_structures/src/transitive_relation.rs
index fe60a99..2e1512b 100644
--- a/compiler/rustc_data_structures/src/transitive_relation.rs
+++ b/compiler/rustc_data_structures/src/transitive_relation.rs
@@ -18,7 +18,7 @@
     edges: Vec<Edge>,
 
     // This is a cached transitive closure derived from the edges.
-    // Currently, we build it lazilly and just throw out any existing
+    // Currently, we build it lazily and just throw out any existing
     // copy whenever a new edge is added. (The Lock is to permit
     // the lazy computation.) This is kind of silly, except for the
     // fact its size is tied to `self.elements.len()`, so I wanted to
@@ -255,7 +255,7 @@
             // argument is that, after step 2, we know that no element
             // can reach its successors (in the vector, not the graph).
             // After step 3, we know that no element can reach any of
-            // its predecesssors (because of step 2) nor successors
+            // its predecessors (because of step 2) nor successors
             // (because we just called `pare_down`)
             //
             // This same algorithm is used in `parents` below.
diff --git a/compiler/rustc_data_structures/src/work_queue.rs b/compiler/rustc_data_structures/src/work_queue.rs
index 0c848eb..cc562bc 100644
--- a/compiler/rustc_data_structures/src/work_queue.rs
+++ b/compiler/rustc_data_structures/src/work_queue.rs
@@ -14,12 +14,6 @@
 }
 
 impl<T: Idx> WorkQueue<T> {
-    /// Creates a new work queue with all the elements from (0..len).
-    #[inline]
-    pub fn with_all(len: usize) -> Self {
-        WorkQueue { deque: (0..len).map(T::new).collect(), set: BitSet::new_filled(len) }
-    }
-
     /// Creates a new work queue that starts empty, where elements range from (0..len).
     #[inline]
     pub fn with_none(len: usize) -> Self {
diff --git a/compiler/rustc_driver/Cargo.toml b/compiler/rustc_driver/Cargo.toml
index adfce10..0adc006 100644
--- a/compiler/rustc_driver/Cargo.toml
+++ b/compiler/rustc_driver/Cargo.toml
@@ -10,7 +10,8 @@
 [dependencies]
 libc = "0.2"
 tracing = { version = "0.1.18" }
-tracing-subscriber = { version = "0.2.10", default-features = false, features = ["fmt", "env-filter", "smallvec", "parking_lot", "ansi"] }
+tracing-subscriber = { version = "0.2.13", default-features = false, features = ["fmt", "env-filter", "smallvec", "parking_lot", "ansi"] }
+tracing-tree = "0.1.6"
 rustc_middle = { path = "../rustc_middle" }
 rustc_ast_pretty = { path = "../rustc_ast_pretty" }
 rustc_target = { path = "../rustc_target" }
diff --git a/compiler/rustc_driver/src/lib.rs b/compiler/rustc_driver/src/lib.rs
index a3391ca..a192c2e 100644
--- a/compiler/rustc_driver/src/lib.rs
+++ b/compiler/rustc_driver/src/lib.rs
@@ -22,7 +22,7 @@
 use rustc_errors::{ErrorReported, PResult};
 use rustc_feature::{find_gated_cfg, UnstableFeatures};
 use rustc_hir::def_id::LOCAL_CRATE;
-use rustc_interface::util::{collect_crate_types, get_builtin_codegen_backend};
+use rustc_interface::util::{self, collect_crate_types, get_builtin_codegen_backend};
 use rustc_interface::{interface, Queries};
 use rustc_lint::LintStore;
 use rustc_metadata::locator;
@@ -198,8 +198,7 @@
             ),
         }
     }
-    let diagnostic_output =
-        emitter.map(|emitter| DiagnosticOutput::Raw(emitter)).unwrap_or(DiagnosticOutput::Default);
+    let diagnostic_output = emitter.map_or(DiagnosticOutput::Default, DiagnosticOutput::Raw);
     let matches = match handle_options(&args) {
         Some(matches) => matches,
         None => return Ok(()),
@@ -643,7 +642,7 @@
             let codegen_results: CodegenResults = json::decode(&rlink_data).unwrap_or_else(|err| {
                 sess.fatal(&format!("failed to decode rlink: {}", err));
             });
-            compiler.codegen_backend().link(&sess, Box::new(codegen_results), &outputs)
+            compiler.codegen_backend().link(&sess, codegen_results, &outputs)
         } else {
             sess.fatal("rlink must be a file")
         }
@@ -671,7 +670,7 @@
                 Input::File(ref ifile) => {
                     let path = &(*ifile);
                     let mut v = Vec::new();
-                    locator::list_file_metadata(&sess.target.target, path, metadata_loader, &mut v)
+                    locator::list_file_metadata(&sess.target, path, metadata_loader, &mut v)
                         .unwrap();
                     println!("{}", String::from_utf8(v).unwrap());
                 }
@@ -715,8 +714,9 @@
         for req in &sess.opts.prints {
             match *req {
                 TargetList => {
-                    let mut targets = rustc_target::spec::get_targets().collect::<Vec<String>>();
-                    targets.sort();
+                    let mut targets =
+                        rustc_target::spec::TARGETS.iter().copied().collect::<Vec<_>>();
+                    targets.sort_unstable();
                     println!("{}", targets.join("\n"));
                 }
                 Sysroot => println!("{}", sess.sysroot.display()),
@@ -724,7 +724,7 @@
                     "{}",
                     sess.target_tlib_path.as_ref().unwrap_or(&sess.host_tlib_path).dir.display()
                 ),
-                TargetSpec => println!("{}", sess.target.target.to_json().pretty()),
+                TargetSpec => println!("{}", sess.target.to_json().pretty()),
                 FileNames | CrateName => {
                     let input = input.unwrap_or_else(|| {
                         early_error(ErrorOutputType::default(), "no input file provided")
@@ -793,37 +793,24 @@
     }
 }
 
-/// Returns a version string such as "0.12.0-dev".
-fn release_str() -> Option<&'static str> {
-    option_env!("CFG_RELEASE")
-}
-
-/// Returns the full SHA1 hash of HEAD of the Git repo from which rustc was built.
-fn commit_hash_str() -> Option<&'static str> {
-    option_env!("CFG_VER_HASH")
-}
-
-/// Returns the "commit date" of HEAD of the Git repo from which rustc was built as a static string.
-fn commit_date_str() -> Option<&'static str> {
-    option_env!("CFG_VER_DATE")
-}
-
 /// Prints version information
 pub fn version(binary: &str, matches: &getopts::Matches) {
     let verbose = matches.opt_present("verbose");
 
-    println!("{} {}", binary, option_env!("CFG_VERSION").unwrap_or("unknown version"));
+    println!("{} {}", binary, util::version_str().unwrap_or("unknown version"));
 
     if verbose {
         fn unw(x: Option<&str>) -> &str {
             x.unwrap_or("unknown")
         }
         println!("binary: {}", binary);
-        println!("commit-hash: {}", unw(commit_hash_str()));
-        println!("commit-date: {}", unw(commit_date_str()));
+        println!("commit-hash: {}", unw(util::commit_hash_str()));
+        println!("commit-date: {}", unw(util::commit_date_str()));
         println!("host: {}", config::host_triple());
-        println!("release: {}", unw(release_str()));
-        get_builtin_codegen_backend("llvm")().print_version();
+        println!("release: {}", unw(util::release_str()));
+        if cfg!(llvm) {
+            get_builtin_codegen_backend("llvm")().print_version();
+        }
     }
 }
 
@@ -1109,7 +1096,9 @@
     }
 
     if cg_flags.iter().any(|x| *x == "passes=list") {
-        get_builtin_codegen_backend("llvm")().print_passes();
+        if cfg!(llvm) {
+            get_builtin_codegen_backend("llvm")().print_passes();
+        }
         return None;
     }
 
@@ -1237,7 +1226,7 @@
         format!("we would appreciate a bug report: {}", bug_report_url).into(),
         format!(
             "rustc {} running on {}",
-            option_env!("CFG_VERSION").unwrap_or("unknown_version"),
+            util::version_str().unwrap_or("unknown_version"),
             config::host_triple()
         )
         .into(),
@@ -1258,9 +1247,9 @@
     // If backtraces are enabled, also print the query stack
     let backtrace = env::var_os("RUST_BACKTRACE").map(|x| &x != "0").unwrap_or(false);
 
-    if backtrace {
-        TyCtxt::try_print_query_stack(&handler);
-    }
+    let num_frames = if backtrace { None } else { Some(2) };
+
+    TyCtxt::try_print_query_stack(&handler, num_frames);
 
     #[cfg(windows)]
     unsafe {
@@ -1294,11 +1283,21 @@
         Ok(s) if s.is_empty() => return,
         Ok(_) => {}
     }
-    let builder = tracing_subscriber::FmtSubscriber::builder();
+    let filter = tracing_subscriber::EnvFilter::from_env(env);
+    let layer = tracing_tree::HierarchicalLayer::default()
+        .with_indent_lines(true)
+        .with_ansi(true)
+        .with_targets(true)
+        .with_wraparound(10)
+        .with_verbose_exit(true)
+        .with_verbose_entry(true)
+        .with_indent_amount(2);
+    #[cfg(parallel_compiler)]
+    let layer = layer.with_thread_ids(true).with_thread_names(true);
 
-    let builder = builder.with_env_filter(tracing_subscriber::EnvFilter::from_env(env));
-
-    builder.init()
+    use tracing_subscriber::layer::SubscriberExt;
+    let subscriber = tracing_subscriber::Registry::default().with(filter).with(layer);
+    tracing::subscriber::set_global_default(subscriber).unwrap();
 }
 
 pub fn main() -> ! {
diff --git a/compiler/rustc_error_codes/src/error_codes.rs b/compiler/rustc_error_codes/src/error_codes.rs
index 81f65ac..0a88759 100644
--- a/compiler/rustc_error_codes/src/error_codes.rs
+++ b/compiler/rustc_error_codes/src/error_codes.rs
@@ -459,6 +459,9 @@
 E0774: include_str!("./error_codes/E0774.md"),
 E0775: include_str!("./error_codes/E0775.md"),
 E0776: include_str!("./error_codes/E0776.md"),
+E0777: include_str!("./error_codes/E0777.md"),
+E0778: include_str!("./error_codes/E0778.md"),
+E0779: include_str!("./error_codes/E0779.md"),
 ;
 //  E0006, // merged with E0005
 //  E0008, // cannot bind by-move into a pattern guard
diff --git a/compiler/rustc_error_codes/src/error_codes/E0007.md b/compiler/rustc_error_codes/src/error_codes/E0007.md
index 2be7870..2c22b86 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0007.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0007.md
@@ -1,3 +1,5 @@
+#### Note: this error code is no longer emitted by the compiler.
+
 This error indicates that the bindings in a match arm would require a value to
 be moved into more than one location, thus violating unique ownership. Code
 like the following is invalid as it requires the entire `Option<String>` to be
@@ -6,11 +8,13 @@
 
 Erroneous code example:
 
-```compile_fail,E0007
+```compile_fail,E0382
+#![feature(bindings_after_at)]
+
 let x = Some("s".to_string());
 
 match x {
-    op_string @ Some(s) => {}, // error: cannot bind by-move with sub-bindings
+    op_string @ Some(s) => {}, // error: use of moved value
     None => {},
 }
 ```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0284.md b/compiler/rustc_error_codes/src/error_codes/E0284.md
index a1ffa2b..82598ae 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0284.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0284.md
@@ -5,37 +5,29 @@
 For example:
 
 ```compile_fail,E0284
-fn foo() -> Result<bool, ()> {
-    let results = [Ok(true), Ok(false), Err(())].iter().cloned();
-    let v: Vec<bool> = results.collect()?;
-    // Do things with v...
-    Ok(true)
+fn main() {
+    let n: u32 = 1;
+    let mut d: u64 = 2;
+    d = d + n.into();
 }
 ```
 
-Here we have an iterator `results` over `Result<bool, ()>`.
-Hence, `results.collect()` can return any type implementing
-`FromIterator<Result<bool, ()>>`. On the other hand, the
-`?` operator can accept any type implementing `Try`.
+Here we have an addition of `d` and `n.into()`. Hence, `n.into()` can return
+any type `T` where `u64: Add<T>`. On the other hand, the `into` method can
+return any type where `u32: Into<T>`.
 
-The author of this code probably wants `collect()` to return a
-`Result<Vec<bool>, ()>`, but the compiler can't be sure
-that there isn't another type `T` implementing both `Try` and
-`FromIterator<Result<bool, ()>>` in scope such that
-`T::Ok == Vec<bool>`. Hence, this code is ambiguous and an error
-is returned.
+The author of this code probably wants `into()` to return a `u64`, but the
+compiler can't be sure that there isn't another type `T` where both
+`u32: Into<T>` and `u64: Add<T>`.
 
 To resolve this error, use a concrete type for the intermediate expression:
 
 ```
-fn foo() -> Result<bool, ()> {
-    let results = [Ok(true), Ok(false), Err(())].iter().cloned();
-    let v = {
-        let temp: Result<Vec<bool>, ()> = results.collect();
-        temp?
-    };
-    // Do things with v...
-    Ok(true)
+fn main() {
+    let n: u32 = 1;
+    let mut d: u64 = 2;
+    let m: u64 = n.into();
+    d = d + m;
 }
 ```
 
diff --git a/compiler/rustc_error_codes/src/error_codes/E0308.md b/compiler/rustc_error_codes/src/error_codes/E0308.md
index e2c40f0..decee63 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0308.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0308.md
@@ -1,18 +1,26 @@
 Expected type did not match the received type.
 
-Erroneous code example:
+Erroneous code examples:
 
 ```compile_fail,E0308
-let x: i32 = "I am not a number!";
-//     ~~~   ~~~~~~~~~~~~~~~~~~~~
-//      |             |
-//      |    initializing expression;
-//      |    compiler infers type `&str`
-//      |
-//    type `i32` assigned to variable `x`
+fn plus_one(x: i32) -> i32 {
+    x + 1
+}
+
+plus_one("Not a number");
+//       ^^^^^^^^^^^^^^ expected `i32`, found `&str`
+
+if "Not a bool" {
+// ^^^^^^^^^^^^ expected `bool`, found `&str`
+}
+
+let x: f32 = "Not a float";
+//     ---   ^^^^^^^^^^^^^ expected `f32`, found `&str`
+//     |
+//     expected due to this
 ```
 
-This error occurs when the compiler is unable to infer the concrete type of a
-variable. It can occur in several cases, the most common being a mismatch
-between two types: the type the author explicitly assigned, and the type the
-compiler inferred.
+This error occurs when an expression was used in a place where the compiler
+expected an expression of a different type. It can occur in several cases, the
+most common being when calling a function and passing an argument which has a
+different type than the matching type in the function declaration.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0424.md b/compiler/rustc_error_codes/src/error_codes/E0424.md
index a9f6f57..a58c16b 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0424.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0424.md
@@ -21,7 +21,7 @@
 The `self` keyword can only be used inside methods, which are associated
 functions (functions defined inside of a `trait` or `impl` block) that have a
 `self` receiver as its first parameter, like `self`, `&self`, `&mut self` or
-`self: &mut Pin<Self>` (this last one is an example of an ["abitrary `self`
+`self: &mut Pin<Self>` (this last one is an example of an ["arbitrary `self`
 type"](https://github.com/rust-lang/rust/issues/44874)).
 
 Check if the associated function's parameter list should have contained a `self`
diff --git a/compiler/rustc_error_codes/src/error_codes/E0660.md b/compiler/rustc_error_codes/src/error_codes/E0660.md
index fccd1b9..26d35f2 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0660.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0660.md
@@ -9,4 +9,4 @@
 Considering that this would be a long explanation, we instead recommend you
 take a look at the [`llvm_asm`] chapter of the Unstable book:
 
-[llvm_asm]: https://doc.rust-lang.org/stable/unstable-book/library-features/llvm-asm.html
+[`llvm_asm`]: https://doc.rust-lang.org/stable/unstable-book/library-features/llvm-asm.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0661.md b/compiler/rustc_error_codes/src/error_codes/E0661.md
index f1debee..0b8ba7f 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0661.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0661.md
@@ -10,4 +10,4 @@
 Considering that this would be a long explanation, we instead recommend you
 take a look at the [`llvm_asm`] chapter of the Unstable book:
 
-[llvm_asm]: https://doc.rust-lang.org/stable/unstable-book/library-features/llvm-asm.html
+[`llvm_asm`]: https://doc.rust-lang.org/stable/unstable-book/library-features/llvm-asm.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0662.md b/compiler/rustc_error_codes/src/error_codes/E0662.md
index d4765f0..8c1bab8 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0662.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0662.md
@@ -13,4 +13,4 @@
 Considering that this would be a long explanation, we instead recommend you
 take a look at the [`llvm_asm`] chapter of the Unstable book:
 
-[llvm_asm]: https://doc.rust-lang.org/stable/unstable-book/library-features/llvm-asm.html
+[`llvm_asm`]: https://doc.rust-lang.org/stable/unstable-book/library-features/llvm-asm.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0663.md b/compiler/rustc_error_codes/src/error_codes/E0663.md
index d5a85b2..53ffd33 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0663.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0663.md
@@ -13,4 +13,4 @@
 Considering that this would be a long explanation, we instead recommend you
 take a look at the [`llvm_asm`] chapter of the Unstable book:
 
-[llvm_asm]: https://doc.rust-lang.org/stable/unstable-book/library-features/llvm-asm.html
+[`llvm_asm`]: https://doc.rust-lang.org/stable/unstable-book/library-features/llvm-asm.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0664.md b/compiler/rustc_error_codes/src/error_codes/E0664.md
index ce9c949..f8e72cd 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0664.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0664.md
@@ -13,4 +13,4 @@
 Considering that this would be a long explanation, we instead recommend you
 take a look at the [`llvm_asm`] chapter of the Unstable book:
 
-[llvm_asm]: https://doc.rust-lang.org/stable/unstable-book/library-features/llvm-asm.html
+[`llvm_asm`]: https://doc.rust-lang.org/stable/unstable-book/library-features/llvm-asm.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0723.md b/compiler/rustc_error_codes/src/error_codes/E0723.md
index 95d47ab..bc22442 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0723.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0723.md
@@ -3,12 +3,8 @@
 Erroneous code example:
 
 ```compile_fail,E0723
-trait T {}
-
-impl T for () {}
-
-const fn foo() -> impl T { // error: `impl Trait` in const fn is unstable
-    ()
+const fn foo<T: Copy>(_: T) { // error!
+   // ...
 }
 ```
 
@@ -18,11 +14,7 @@
 ```
 #![feature(const_fn)]
 
-trait T {}
-
-impl T for () {}
-
-const fn foo() -> impl T {
-    ()
+const fn foo<T: Copy>(_: T) { // ok!
+   // ...
 }
 ```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0777.md b/compiler/rustc_error_codes/src/error_codes/E0777.md
new file mode 100644
index 0000000..8c5c6e2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0777.md
@@ -0,0 +1,19 @@
+A literal value was used inside `#[derive]`.
+
+Erroneous code example:
+
+```compile_fail,E0777
+#[derive("Clone")] // error!
+struct Foo;
+```
+
+Only paths to traits are allowed as argument inside `#[derive]`. You can find
+more information about the `#[derive]` attribute in the [Rust Book].
+
+
+```
+#[derive(Clone)] // ok!
+struct Foo;
+```
+
+[Rust Book]: https://doc.rust-lang.org/book/appendix-03-derivable-traits.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0778.md b/compiler/rustc_error_codes/src/error_codes/E0778.md
new file mode 100644
index 0000000..467362d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0778.md
@@ -0,0 +1,35 @@
+The `instruction_set` attribute was malformed.
+
+Erroneous code example:
+
+```compile_fail,E0778
+#![feature(isa_attribute)]
+
+#[instruction_set()] // error: expected one argument
+pub fn something() {}
+fn main() {}
+```
+
+The parenthesized `instruction_set` attribute requires the parameter to be
+specified:
+
+```
+#![feature(isa_attribute)]
+
+#[cfg_attr(target_arch="arm", instruction_set(arm::a32))]
+fn something() {}
+```
+
+or:
+
+```
+#![feature(isa_attribute)]
+
+#[cfg_attr(target_arch="arm", instruction_set(arm::t32))]
+fn something() {}
+```
+
+For more information see the [`instruction_set` attribute][isa-attribute]
+section of the Reference.
+
+[isa-attribute]: https://doc.rust-lang.org/reference/attributes/codegen.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0779.md b/compiler/rustc_error_codes/src/error_codes/E0779.md
new file mode 100644
index 0000000..146e20c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0779.md
@@ -0,0 +1,32 @@
+An unknown argument was given to the `instruction_set` attribute.
+
+Erroneous code example:
+
+```compile_fail,E0779
+#![feature(isa_attribute)]
+
+#[instruction_set(intel::x64)] // error: invalid argument
+pub fn something() {}
+fn main() {}
+```
+
+The `instruction_set` attribute only supports two arguments currently:
+
+ * arm::a32
+ * arm::t32
+
+All other arguments given to the `instruction_set` attribute will return this
+error. Example:
+
+```
+#![feature(isa_attribute)]
+
+#[cfg_attr(target_arch="arm", instruction_set(arm::a32))] // ok!
+pub fn something() {}
+fn main() {}
+```
+
+For more information see the [`instruction_set` attribute][isa-attribute]
+section of the Reference.
+
+[isa-attribute]: https://doc.rust-lang.org/reference/attributes/codegen.html
diff --git a/compiler/rustc_error_codes/src/lib.rs b/compiler/rustc_error_codes/src/lib.rs
index 4353a29..e4a7025 100644
--- a/compiler/rustc_error_codes/src/lib.rs
+++ b/compiler/rustc_error_codes/src/lib.rs
@@ -1,3 +1,4 @@
+#![deny(invalid_codeblock_attributes)]
 //! This library is used to gather all error codes into one place,
 //! the goal being to make their maintenance easier.
 
diff --git a/compiler/rustc_errors/Cargo.toml b/compiler/rustc_errors/Cargo.toml
index e4dbb8d..5d8ff60 100644
--- a/compiler/rustc_errors/Cargo.toml
+++ b/compiler/rustc_errors/Cargo.toml
@@ -13,6 +13,7 @@
 rustc_span = { path = "../rustc_span" }
 rustc_macros = { path = "../rustc_macros" }
 rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_lint_defs = { path = "../rustc_lint_defs" }
 unicode-width = "0.1.4"
 atty = "0.2"
 termcolor = "1.0"
diff --git a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
index 265ba59..6f365c0 100644
--- a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
+++ b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
@@ -72,6 +72,7 @@
         Level::Help => AnnotationType::Help,
         // FIXME(#59346): Not sure how to map these two levels
         Level::Cancelled | Level::FailureNote => AnnotationType::Error,
+        Level::Allow => panic!("Should not call with Allow"),
     }
 }
 
@@ -143,7 +144,8 @@
                 title: Some(Annotation {
                     label: Some(&message),
                     id: code.as_ref().map(|c| match c {
-                        DiagnosticId::Error(val) | DiagnosticId::Lint(val) => val.as_str(),
+                        DiagnosticId::Error(val)
+                        | DiagnosticId::Lint { name: val, has_future_breakage: _ } => val.as_str(),
                     }),
                     annotation_type: annotation_type_for_level(*level),
                 }),
diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs
index 870f7b8..decbf03 100644
--- a/compiler/rustc_errors/src/diagnostic.rs
+++ b/compiler/rustc_errors/src/diagnostic.rs
@@ -1,10 +1,10 @@
 use crate::snippet::Style;
-use crate::Applicability;
 use crate::CodeSuggestion;
 use crate::Level;
 use crate::Substitution;
 use crate::SubstitutionPart;
 use crate::SuggestionStyle;
+use rustc_lint_defs::Applicability;
 use rustc_span::{MultiSpan, Span, DUMMY_SP};
 use std::fmt;
 
@@ -27,7 +27,7 @@
 #[derive(Clone, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
 pub enum DiagnosticId {
     Error(String),
-    Lint(String),
+    Lint { name: String, has_future_breakage: bool },
 }
 
 /// For example a note attached to an error.
@@ -107,7 +107,14 @@
         match self.level {
             Level::Bug | Level::Fatal | Level::Error | Level::FailureNote => true,
 
-            Level::Warning | Level::Note | Level::Help | Level::Cancelled => false,
+            Level::Warning | Level::Note | Level::Help | Level::Cancelled | Level::Allow => false,
+        }
+    }
+
+    pub fn has_future_breakage(&self) -> bool {
+        match self.code {
+            Some(DiagnosticId::Lint { has_future_breakage, .. }) => has_future_breakage,
+            _ => false,
         }
     }
 
@@ -121,11 +128,6 @@
         self.level == Level::Cancelled
     }
 
-    /// Set the sorting span.
-    pub fn set_sort_span(&mut self, sp: Span) {
-        self.sort_span = sp;
-    }
-
     /// Adds a span/label to be included in the resulting snippet.
     ///
     /// This is pushed onto the [`MultiSpan`] that was created when the diagnostic
@@ -535,14 +537,6 @@
         &self.message
     }
 
-    /// Used by a lint. Copies over all details *but* the "main
-    /// message".
-    pub fn copy_details_not_message(&mut self, from: &Diagnostic) {
-        self.span = from.span.clone();
-        self.code = from.code.clone();
-        self.children.extend(from.children.iter().cloned())
-    }
-
     /// Convenience function for internal use, clients should use one of the
     /// public methods above.
     pub fn sub(
diff --git a/compiler/rustc_errors/src/diagnostic_builder.rs b/compiler/rustc_errors/src/diagnostic_builder.rs
index d1ff6f7..56acdf6 100644
--- a/compiler/rustc_errors/src/diagnostic_builder.rs
+++ b/compiler/rustc_errors/src/diagnostic_builder.rs
@@ -1,5 +1,6 @@
-use crate::{Applicability, Handler, Level, StashKey};
 use crate::{Diagnostic, DiagnosticId, DiagnosticStyledString};
+use crate::{Handler, Level, StashKey};
+use rustc_lint_defs::Applicability;
 
 use rustc_span::{MultiSpan, Span};
 use std::fmt::{self, Debug};
diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs
index 98cbf98..302713a 100644
--- a/compiler/rustc_errors/src/emitter.rs
+++ b/compiler/rustc_errors/src/emitter.rs
@@ -9,14 +9,15 @@
 
 use Destination::*;
 
+use rustc_lint_defs::FutureBreakage;
 use rustc_span::source_map::SourceMap;
 use rustc_span::{MultiSpan, SourceFile, Span};
 
 use crate::snippet::{Annotation, AnnotationType, Line, MultilineAnnotation, Style, StyledString};
 use crate::styled_buffer::StyledBuffer;
-use crate::{
-    pluralize, CodeSuggestion, Diagnostic, DiagnosticId, Level, SubDiagnostic, SuggestionStyle,
-};
+use crate::{CodeSuggestion, Diagnostic, DiagnosticId, Level, SubDiagnostic, SuggestionStyle};
+
+use rustc_lint_defs::pluralize;
 
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::sync::Lrc;
@@ -192,6 +193,8 @@
     /// other formats can, and will, simply ignore it.
     fn emit_artifact_notification(&mut self, _path: &Path, _artifact_type: &str) {}
 
+    fn emit_future_breakage_report(&mut self, _diags: Vec<(FutureBreakage, Diagnostic)>) {}
+
     /// Checks if should show explanations about "rustc --explain"
     fn should_show_explain(&self) -> bool {
         true
@@ -296,7 +299,7 @@
 
                     // Skip past non-macro entries, just in case there
                     // are some which do actually involve macros.
-                    ExpnKind::Desugaring(..) | ExpnKind::AstPass(..) => None,
+                    ExpnKind::Inlined | ExpnKind::Desugaring(..) | ExpnKind::AstPass(..) => None,
 
                     ExpnKind::Macro(macro_kind, _) => Some(macro_kind),
                 }
@@ -356,7 +359,10 @@
                     continue;
                 }
 
-                if always_backtrace {
+                if matches!(trace.kind, ExpnKind::Inlined) {
+                    new_labels
+                        .push((trace.call_site, "in the inlined copy of this code".to_string()));
+                } else if always_backtrace {
                     new_labels.push((
                         trace.def_site,
                         format!(
@@ -510,12 +516,10 @@
     fn emit_diagnostic(&mut self, _: &Diagnostic) {}
 }
 
-/// Maximum number of lines we will print for each error; arbitrary.
-pub const MAX_HIGHLIGHT_LINES: usize = 6;
 /// Maximum number of lines we will print for a multiline suggestion; arbitrary.
 ///
 /// This should be replaced with a more involved mechanism to output multiline suggestions that
-/// more closely mimmics the regular diagnostic output, where irrelevant code lines are elided.
+/// more closely mimics the regular diagnostic output, where irrelevant code lines are elided.
 pub const MAX_SUGGESTION_HIGHLIGHT_LINES: usize = 6;
 /// Maximum number of suggestions to be shown
 ///
@@ -889,7 +893,7 @@
                                                      // or the next are vertical line placeholders.
                         || (annotation.takes_space() // If either this or the next annotation is
                             && next.has_label())     // multiline start/end, move it to a new line
-                        || (annotation.has_label()   // so as not to overlap the orizontal lines.
+                        || (annotation.has_label()   // so as not to overlap the horizontal lines.
                             && next.takes_space())
                         || (annotation.takes_space() && next.takes_space())
                         || (overlaps(next, annotation, l)
diff --git a/compiler/rustc_errors/src/json.rs b/compiler/rustc_errors/src/json.rs
index 750d36d..d57beb1 100644
--- a/compiler/rustc_errors/src/json.rs
+++ b/compiler/rustc_errors/src/json.rs
@@ -13,8 +13,9 @@
 
 use crate::emitter::{Emitter, HumanReadableErrorType};
 use crate::registry::Registry;
-use crate::{Applicability, DiagnosticId};
+use crate::DiagnosticId;
 use crate::{CodeSuggestion, SubDiagnostic};
+use rustc_lint_defs::{Applicability, FutureBreakage};
 
 use rustc_data_structures::sync::Lrc;
 use rustc_span::hygiene::ExpnData;
@@ -131,15 +132,37 @@
         }
     }
 
+    fn emit_future_breakage_report(&mut self, diags: Vec<(FutureBreakage, crate::Diagnostic)>) {
+        let data: Vec<FutureBreakageItem> = diags
+            .into_iter()
+            .map(|(breakage, mut diag)| {
+                if diag.level == crate::Level::Allow {
+                    diag.level = crate::Level::Warning;
+                }
+                FutureBreakageItem {
+                    future_breakage_date: breakage.date,
+                    diagnostic: Diagnostic::from_errors_diagnostic(&diag, self),
+                }
+            })
+            .collect();
+        let report = FutureIncompatReport { future_incompat_report: data };
+        let result = if self.pretty {
+            writeln!(&mut self.dst, "{}", as_pretty_json(&report))
+        } else {
+            writeln!(&mut self.dst, "{}", as_json(&report))
+        }
+        .and_then(|_| self.dst.flush());
+        if let Err(e) = result {
+            panic!("failed to print future breakage report: {:?}", e);
+        }
+    }
+
     fn source_map(&self) -> Option<&Lrc<SourceMap>> {
         Some(&self.sm)
     }
 
     fn should_show_explain(&self) -> bool {
-        match self.json_rendered {
-            HumanReadableErrorType::Short(_) => false,
-            _ => true,
-        }
+        !matches!(self.json_rendered, HumanReadableErrorType::Short(_))
     }
 }
 
@@ -226,6 +249,17 @@
     emit: &'a str,
 }
 
+#[derive(Encodable)]
+struct FutureBreakageItem {
+    future_breakage_date: Option<&'static str>,
+    diagnostic: Diagnostic,
+}
+
+#[derive(Encodable)]
+struct FutureIncompatReport {
+    future_incompat_report: Vec<FutureBreakageItem>,
+}
+
 impl Diagnostic {
     fn from_errors_diagnostic(diag: &crate::Diagnostic, je: &JsonEmitter) -> Diagnostic {
         let sugg = diag.suggestions.iter().map(|sugg| Diagnostic {
@@ -435,7 +469,7 @@
         s.map(|s| {
             let s = match s {
                 DiagnosticId::Error(s) => s,
-                DiagnosticId::Lint(s) => s,
+                DiagnosticId::Lint { name, has_future_breakage: _ } => name,
             };
             let je_result =
                 je.registry.as_ref().map(|registry| registry.try_find_description(&s)).unwrap();
diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs
index 2e8a4ef..593e0d9 100644
--- a/compiler/rustc_errors/src/lib.rs
+++ b/compiler/rustc_errors/src/lib.rs
@@ -21,6 +21,8 @@
 use rustc_data_structures::stable_hasher::StableHasher;
 use rustc_data_structures::sync::{self, Lock, Lrc};
 use rustc_data_structures::AtomicRef;
+use rustc_lint_defs::FutureBreakage;
+pub use rustc_lint_defs::{pluralize, Applicability};
 use rustc_span::source_map::SourceMap;
 use rustc_span::{Loc, MultiSpan, Span};
 
@@ -49,30 +51,6 @@
 #[cfg(target_arch = "x86_64")]
 rustc_data_structures::static_assert_size!(PResult<'_, bool>, 16);
 
-/// Indicates the confidence in the correctness of a suggestion.
-///
-/// All suggestions are marked with an `Applicability`. Tools use the applicability of a suggestion
-/// to determine whether it should be automatically applied or if the user should be consulted
-/// before applying the suggestion.
-#[derive(Copy, Clone, Debug, PartialEq, Hash, Encodable, Decodable)]
-pub enum Applicability {
-    /// The suggestion is definitely what the user intended. This suggestion should be
-    /// automatically applied.
-    MachineApplicable,
-
-    /// The suggestion may be what the user intended, but it is uncertain. The suggestion should
-    /// result in valid Rust code if it is applied.
-    MaybeIncorrect,
-
-    /// The suggestion contains placeholders like `(...)` or `{ /* fields */ }`. The suggestion
-    /// cannot be applied automatically because it will not result in valid Rust code. The user
-    /// will need to fill in the placeholders.
-    HasPlaceholders,
-
-    /// The applicability of the suggestion is unknown.
-    Unspecified,
-}
-
 #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash, Encodable, Decodable)]
 pub enum SuggestionStyle {
     /// Hide the suggested code when displaying this suggestion inline.
@@ -91,10 +69,7 @@
 
 impl SuggestionStyle {
     fn hide_inline(&self) -> bool {
-        match *self {
-            SuggestionStyle::ShowCode => false,
-            _ => true,
-        }
+        !matches!(*self, SuggestionStyle::ShowCode)
     }
 }
 
@@ -324,6 +299,8 @@
 
     /// The warning count, used for a recap upon finishing
     deduplicated_warn_count: usize,
+
+    future_breakage_diagnostics: Vec<Diagnostic>,
 }
 
 /// A key denoting where from a diagnostic was stashed.
@@ -437,6 +414,7 @@
                 emitted_diagnostic_codes: Default::default(),
                 emitted_diagnostics: Default::default(),
                 stashed_diagnostics: Default::default(),
+                future_breakage_diagnostics: Vec::new(),
             }),
         }
     }
@@ -506,6 +484,17 @@
         result
     }
 
+    /// Construct a builder at the `Allow` level at the given `span` and with the `msg`.
+    pub fn struct_span_allow(
+        &self,
+        span: impl Into<MultiSpan>,
+        msg: &str,
+    ) -> DiagnosticBuilder<'_> {
+        let mut result = self.struct_allow(msg);
+        result.set_span(span);
+        result
+    }
+
     /// Construct a builder at the `Warning` level at the given `span` and with the `msg`.
     /// Also include a code.
     pub fn struct_span_warn_with_code(
@@ -528,6 +517,11 @@
         result
     }
 
+    /// Construct a builder at the `Allow` level with the `msg`.
+    pub fn struct_allow(&self, msg: &str) -> DiagnosticBuilder<'_> {
+        DiagnosticBuilder::new(self, Level::Allow, msg)
+    }
+
     /// Construct a builder at the `Error` level at the given `span` and with the `msg`.
     pub fn struct_span_err(&self, span: impl Into<MultiSpan>, msg: &str) -> DiagnosticBuilder<'_> {
         let mut result = self.struct_err(msg);
@@ -696,6 +690,10 @@
         self.inner.borrow_mut().print_error_count(registry)
     }
 
+    pub fn take_future_breakage_diagnostics(&self) -> Vec<Diagnostic> {
+        std::mem::take(&mut self.inner.borrow_mut().future_breakage_diagnostics)
+    }
+
     pub fn abort_if_errors(&self) {
         self.inner.borrow_mut().abort_if_errors()
     }
@@ -726,6 +724,10 @@
         self.inner.borrow_mut().emit_artifact_notification(path, artifact_type)
     }
 
+    pub fn emit_future_breakage_report(&self, diags: Vec<(FutureBreakage, Diagnostic)>) {
+        self.inner.borrow_mut().emitter.emit_future_breakage_report(diags)
+    }
+
     pub fn delay_as_bug(&self, diagnostic: Diagnostic) {
         self.inner.borrow_mut().delay_as_bug(diagnostic)
     }
@@ -751,12 +753,23 @@
             return;
         }
 
+        if diagnostic.has_future_breakage() {
+            self.future_breakage_diagnostics.push(diagnostic.clone());
+        }
+
         if diagnostic.level == Warning && !self.flags.can_emit_warnings {
+            if diagnostic.has_future_breakage() {
+                (*TRACK_DIAGNOSTICS)(diagnostic);
+            }
             return;
         }
 
         (*TRACK_DIAGNOSTICS)(diagnostic);
 
+        if diagnostic.level == Allow {
+            return;
+        }
+
         if let Some(ref code) = diagnostic.code {
             self.emitted_diagnostic_codes.insert(code.clone());
         }
@@ -995,6 +1008,7 @@
     Help,
     Cancelled,
     FailureNote,
+    Allow,
 }
 
 impl fmt::Display for Level {
@@ -1020,7 +1034,7 @@
                 spec.set_fg(Some(Color::Cyan)).set_intense(true);
             }
             FailureNote => {}
-            Cancelled => unreachable!(),
+            Allow | Cancelled => unreachable!(),
         }
         spec
     }
@@ -1034,22 +1048,55 @@
             Help => "help",
             FailureNote => "failure-note",
             Cancelled => panic!("Shouldn't call on cancelled error"),
+            Allow => panic!("Shouldn't call on allowed error"),
         }
     }
 
     pub fn is_failure_note(&self) -> bool {
-        match *self {
-            FailureNote => true,
-            _ => false,
-        }
+        matches!(*self, FailureNote)
     }
 }
 
-#[macro_export]
-macro_rules! pluralize {
-    ($x:expr) => {
-        if $x != 1 { "s" } else { "" }
+pub fn add_elided_lifetime_in_path_suggestion(
+    source_map: &SourceMap,
+    db: &mut DiagnosticBuilder<'_>,
+    n: usize,
+    path_span: Span,
+    incl_angl_brckt: bool,
+    insertion_span: Span,
+    anon_lts: String,
+) {
+    let (replace_span, suggestion) = if incl_angl_brckt {
+        (insertion_span, anon_lts)
+    } else {
+        // When possible, prefer a suggestion that replaces the whole
+        // `Path<T>` expression with `Path<'_, T>`, rather than inserting `'_, `
+        // at a point (which makes for an ugly/confusing label)
+        if let Ok(snippet) = source_map.span_to_snippet(path_span) {
+            // But our spans can get out of whack due to macros; if the place we think
+            // we want to insert `'_` isn't even within the path expression's span, we
+            // should bail out of making any suggestion rather than panicking on a
+            // subtract-with-overflow or string-slice-out-out-bounds (!)
+            // FIXME: can we do better?
+            if insertion_span.lo().0 < path_span.lo().0 {
+                return;
+            }
+            let insertion_index = (insertion_span.lo().0 - path_span.lo().0) as usize;
+            if insertion_index > snippet.len() {
+                return;
+            }
+            let (before, after) = snippet.split_at(insertion_index);
+            (path_span, format!("{}{}{}", before, anon_lts, after))
+        } else {
+            (insertion_span, anon_lts)
+        }
     };
+    db.span_suggestion(
+        replace_span,
+        &format!("indicate the anonymous lifetime{}", pluralize!(n)),
+        suggestion,
+        Applicability::MachineApplicable,
+    );
 }
 
 // Useful type to use with `Result<>` indicate that an error has already
diff --git a/compiler/rustc_errors/src/snippet.rs b/compiler/rustc_errors/src/snippet.rs
index fae5b94..dbb2523 100644
--- a/compiler/rustc_errors/src/snippet.rs
+++ b/compiler/rustc_errors/src/snippet.rs
@@ -158,10 +158,7 @@
 
     pub fn takes_space(&self) -> bool {
         // Multiline annotations always have to keep vertical space.
-        match self.annotation_type {
-            AnnotationType::MultilineStart(_) | AnnotationType::MultilineEnd(_) => true,
-            _ => false,
-        }
+        matches!(self.annotation_type, AnnotationType::MultilineStart(_) | AnnotationType::MultilineEnd(_))
     }
 }
 
diff --git a/compiler/rustc_expand/src/base.rs b/compiler/rustc_expand/src/base.rs
index f7651ca..b435def8 100644
--- a/compiler/rustc_expand/src/base.rs
+++ b/compiler/rustc_expand/src/base.rs
@@ -148,17 +148,6 @@
         }
     }
 
-    pub fn map_item_or<F, G>(self, mut f: F, mut or: G) -> Annotatable
-    where
-        F: FnMut(P<ast::Item>) -> P<ast::Item>,
-        G: FnMut(Annotatable) -> Annotatable,
-    {
-        match self {
-            Annotatable::Item(i) => Annotatable::Item(f(i)),
-            _ => or(self),
-        }
-    }
-
     pub fn expect_trait_item(self) -> P<ast::AssocItem> {
         match self {
             Annotatable::TraitItem(i) => i,
@@ -804,7 +793,7 @@
             allow_internal_unsafe: sess.contains_name(attrs, sym::allow_internal_unsafe),
             local_inner_macros,
             stability,
-            deprecation: attr::find_deprecation(&sess, attrs, span),
+            deprecation: attr::find_deprecation(&sess, attrs).map(|(d, _)| d),
             helper_attrs,
             edition,
             is_builtin,
@@ -1052,9 +1041,6 @@
             .chain(components.iter().map(|&s| Ident::with_dummy_span(s)))
             .collect()
     }
-    pub fn name_of(&self, st: &str) -> Symbol {
-        Symbol::intern(st)
-    }
 
     pub fn check_unused_macros(&mut self) {
         self.resolver.check_unused_macros();
diff --git a/compiler/rustc_expand/src/build.rs b/compiler/rustc_expand/src/build.rs
index a5a7ee6..30f0fc6 100644
--- a/compiler/rustc_expand/src/build.rs
+++ b/compiler/rustc_expand/src/build.rs
@@ -139,24 +139,6 @@
         ast::Lifetime { id: ast::DUMMY_NODE_ID, ident: ident.with_span_pos(span) }
     }
 
-    pub fn lifetime_def(
-        &self,
-        span: Span,
-        ident: Ident,
-        attrs: Vec<ast::Attribute>,
-        bounds: ast::GenericBounds,
-    ) -> ast::GenericParam {
-        let lifetime = self.lifetime(span, ident);
-        ast::GenericParam {
-            ident: lifetime.ident,
-            id: lifetime.id,
-            attrs: attrs.into(),
-            bounds,
-            kind: ast::GenericParamKind::Lifetime,
-            is_placeholder: false,
-        }
-    }
-
     pub fn stmt_expr(&self, expr: P<ast::Expr>) -> ast::Stmt {
         ast::Stmt {
             id: ast::DUMMY_NODE_ID,
@@ -316,7 +298,7 @@
         path: ast::Path,
         fields: Vec<ast::Field>,
     ) -> P<ast::Expr> {
-        self.expr(span, ast::ExprKind::Struct(path, fields, None))
+        self.expr(span, ast::ExprKind::Struct(path, fields, ast::StructRest::None))
     }
     pub fn expr_struct_ident(
         &self,
@@ -465,24 +447,6 @@
         self.pat_tuple_struct(span, path, vec![pat])
     }
 
-    pub fn pat_none(&self, span: Span) -> P<ast::Pat> {
-        let some = self.std_path(&[sym::option, sym::Option, sym::None]);
-        let path = self.path_global(span, some);
-        self.pat_path(span, path)
-    }
-
-    pub fn pat_ok(&self, span: Span, pat: P<ast::Pat>) -> P<ast::Pat> {
-        let some = self.std_path(&[sym::result, sym::Result, sym::Ok]);
-        let path = self.path_global(span, some);
-        self.pat_tuple_struct(span, path, vec![pat])
-    }
-
-    pub fn pat_err(&self, span: Span, pat: P<ast::Pat>) -> P<ast::Pat> {
-        let some = self.std_path(&[sym::result, sym::Result, sym::Err]);
-        let path = self.path_global(span, some);
-        self.pat_tuple_struct(span, path, vec![pat])
-    }
-
     pub fn arm(&self, span: Span, pat: P<ast::Pat>, expr: P<ast::Expr>) -> ast::Arm {
         ast::Arm {
             attrs: vec![],
@@ -514,26 +478,6 @@
         self.expr(span, ast::ExprKind::If(cond, self.block_expr(then), els))
     }
 
-    pub fn lambda_fn_decl(
-        &self,
-        span: Span,
-        fn_decl: P<ast::FnDecl>,
-        body: P<ast::Expr>,
-        fn_decl_span: Span,
-    ) -> P<ast::Expr> {
-        self.expr(
-            span,
-            ast::ExprKind::Closure(
-                ast::CaptureBy::Ref,
-                ast::Async::No,
-                ast::Movability::Movable,
-                fn_decl,
-                body,
-                fn_decl_span,
-            ),
-        )
-    }
-
     pub fn lambda(&self, span: Span, ids: Vec<Ident>, body: P<ast::Expr>) -> P<ast::Expr> {
         let fn_decl = self.fn_decl(
             ids.iter().map(|id| self.param(span, *id, self.ty(span, ast::TyKind::Infer))).collect(),
@@ -610,47 +554,6 @@
         })
     }
 
-    pub fn variant(&self, span: Span, ident: Ident, tys: Vec<P<ast::Ty>>) -> ast::Variant {
-        let vis_span = span.shrink_to_lo();
-        let fields: Vec<_> = tys
-            .into_iter()
-            .map(|ty| ast::StructField {
-                span: ty.span,
-                ty,
-                ident: None,
-                vis: ast::Visibility {
-                    span: vis_span,
-                    kind: ast::VisibilityKind::Inherited,
-                    tokens: None,
-                },
-                attrs: Vec::new(),
-                id: ast::DUMMY_NODE_ID,
-                is_placeholder: false,
-            })
-            .collect();
-
-        let vdata = if fields.is_empty() {
-            ast::VariantData::Unit(ast::DUMMY_NODE_ID)
-        } else {
-            ast::VariantData::Tuple(fields, ast::DUMMY_NODE_ID)
-        };
-
-        ast::Variant {
-            attrs: Vec::new(),
-            data: vdata,
-            disr_expr: None,
-            id: ast::DUMMY_NODE_ID,
-            ident,
-            vis: ast::Visibility {
-                span: vis_span,
-                kind: ast::VisibilityKind::Inherited,
-                tokens: None,
-            },
-            span,
-            is_placeholder: false,
-        }
-    }
-
     pub fn item_static(
         &self,
         span: Span,
diff --git a/compiler/rustc_expand/src/config.rs b/compiler/rustc_expand/src/config.rs
index dd087ab..cccbdf7 100644
--- a/compiler/rustc_expand/src/config.rs
+++ b/compiler/rustc_expand/src/config.rs
@@ -3,6 +3,8 @@
 use rustc_ast::attr::HasAttrs;
 use rustc_ast::mut_visit::*;
 use rustc_ast::ptr::P;
+use rustc_ast::token::{DelimToken, Token, TokenKind};
+use rustc_ast::tokenstream::{DelimSpan, LazyTokenStream, Spacing, TokenStream, TokenTree};
 use rustc_ast::{self as ast, AttrItem, Attribute, MetaItem};
 use rustc_attr as attr;
 use rustc_data_structures::fx::FxHashMap;
@@ -289,8 +291,36 @@
         expanded_attrs
             .into_iter()
             .flat_map(|(item, span)| {
-                let attr = attr::mk_attr_from_item(attr.style, item, span);
-                self.process_cfg_attr(attr)
+                let orig_tokens = attr.tokens();
+
+                // We are taking an attribute of the form `#[cfg_attr(pred, attr)]`
+                // and producing an attribute of the form `#[attr]`. We
+                // have captured tokens for `attr` itself, but we need to
+                // synthesize tokens for the wrapper `#` and `[]`, which
+                // we do below.
+
+                // Use the `#` in `#[cfg_attr(pred, attr)]` as the `#` token
+                // for `attr` when we expand it to `#[attr]`
+                let pound_token = orig_tokens.trees().next().unwrap();
+                if !matches!(pound_token, TokenTree::Token(Token { kind: TokenKind::Pound, .. })) {
+                    panic!("Bad tokens for attribute {:?}", attr);
+                }
+                // We don't really have a good span to use for the syntheized `[]`
+                // in `#[attr]`, so just use the span of the `#` token.
+                let bracket_group = TokenTree::Delimited(
+                    DelimSpan::from_single(pound_token.span()),
+                    DelimToken::Bracket,
+                    item.tokens
+                        .as_ref()
+                        .unwrap_or_else(|| panic!("Missing tokens for {:?}", item))
+                        .create_token_stream(),
+                );
+                let tokens = Some(LazyTokenStream::new(TokenStream::new(vec![
+                    (pound_token, Spacing::Alone),
+                    (bracket_group, Spacing::Alone),
+                ])));
+
+                self.process_cfg_attr(attr::mk_attr_from_item(item, tokens, attr.style, span))
             })
             .collect()
     }
@@ -515,11 +545,6 @@
         noop_flat_map_assoc_item(configure!(self, item), self)
     }
 
-    fn visit_mac(&mut self, _mac: &mut ast::MacCall) {
-        // Don't configure interpolated AST (cf. issue #34171).
-        // Interpolated AST will get configured once the surrounding tokens are parsed.
-    }
-
     fn visit_pat(&mut self, pat: &mut P<ast::Pat>) {
         self.configure_pat(pat);
         noop_visit_pat(pat, self)
diff --git a/compiler/rustc_expand/src/expand.rs b/compiler/rustc_expand/src/expand.rs
index e5cfb86..1b31bd6 100644
--- a/compiler/rustc_expand/src/expand.rs
+++ b/compiler/rustc_expand/src/expand.rs
@@ -18,9 +18,9 @@
 use rustc_attr::{self as attr, is_builtin_attr, HasAttrs};
 use rustc_data_structures::map_in_place::MapInPlace;
 use rustc_data_structures::stack::ensure_sufficient_stack;
-use rustc_errors::{Applicability, PResult};
+use rustc_errors::{struct_span_err, Applicability, PResult};
 use rustc_feature::Features;
-use rustc_parse::parser::Parser;
+use rustc_parse::parser::{AttemptLocalParseRecovery, Parser};
 use rustc_parse::validate_attr;
 use rustc_session::lint::builtin::UNUSED_DOC_COMMENTS;
 use rustc_session::lint::BuiltinLintDiagnostics;
@@ -542,7 +542,7 @@
     fn error_derive_forbidden_on_non_adt(&self, derives: &[Path], item: &Annotatable) {
         let attr = self.cx.sess.find_by_name(item.attrs(), sym::derive);
         let span = attr.map_or(item.span(), |attr| attr.span);
-        let mut err = rustc_errors::struct_span_err!(
+        let mut err = struct_span_err!(
             self.cx.sess,
             span,
             E0774,
@@ -850,8 +850,6 @@
 
                 visit::walk_item(self, item);
             }
-
-            fn visit_mac(&mut self, _: &'ast ast::MacCall) {}
         }
 
         if !self.cx.ecfg.proc_macro_hygiene() {
@@ -921,7 +919,7 @@
             let mut stmts = SmallVec::new();
             // Won't make progress on a `}`.
             while this.token != token::Eof && this.token != token::CloseDelim(token::Brace) {
-                if let Some(stmt) = this.parse_full_stmt()? {
+                if let Some(stmt) = this.parse_full_stmt(AttemptLocalParseRecovery::Yes)? {
                     stmts.push(stmt);
                 }
             }
@@ -1357,7 +1355,8 @@
         // we'll expand attributes on expressions separately
         if !stmt.is_expr() {
             let (attr, derives, after_derive) = if stmt.is_item() {
-                self.classify_item(&mut stmt)
+                // FIXME: Handle custom attributes on statements (#15701)
+                (None, vec![], false)
             } else {
                 // ignore derives on non-item statements so it falls through
                 // to the unused-attributes lint
@@ -1435,9 +1434,9 @@
                 item.attrs = attrs;
                 self.check_attributes(&item.attrs);
                 item.and_then(|item| match item.kind {
-                    ItemKind::MacCall(mac) => self
-                        .collect(AstFragmentKind::Items, InvocationKind::Bang { mac, span })
-                        .make_items(),
+                    ItemKind::MacCall(mac) => {
+                        self.collect_bang(mac, span, AstFragmentKind::Items).make_items()
+                    }
                     _ => unreachable!(),
                 })
             }
@@ -1777,11 +1776,10 @@
 
             let meta = attr::mk_list_item(Ident::with_dummy_span(sym::doc), items);
             *at = ast::Attribute {
-                kind: ast::AttrKind::Normal(AttrItem {
-                    path: meta.path,
-                    args: meta.kind.mac_args(meta.span),
-                    tokens: None,
-                }),
+                kind: ast::AttrKind::Normal(
+                    AttrItem { path: meta.path, args: meta.kind.mac_args(meta.span), tokens: None },
+                    None,
+                ),
                 span: at.span,
                 id: at.id,
                 style: at.style,
diff --git a/compiler/rustc_expand/src/mbe.rs b/compiler/rustc_expand/src/mbe.rs
index da69b32..cbc4d14 100644
--- a/compiler/rustc_expand/src/mbe.rs
+++ b/compiler/rustc_expand/src/mbe.rs
@@ -102,10 +102,7 @@
 
     /// Returns `true` if the given token tree is delimited.
     fn is_delimited(&self) -> bool {
-        match *self {
-            TokenTree::Delimited(..) => true,
-            _ => false,
-        }
+        matches!(*self, TokenTree::Delimited(..))
     }
 
     /// Returns `true` if the given token tree is a token of the given kind.
diff --git a/compiler/rustc_expand/src/mbe/macro_check.rs b/compiler/rustc_expand/src/mbe/macro_check.rs
index 6b419da..91add4f 100644
--- a/compiler/rustc_expand/src/mbe/macro_check.rs
+++ b/compiler/rustc_expand/src/mbe/macro_check.rs
@@ -134,10 +134,7 @@
 impl<'a, T> Stack<'a, T> {
     /// Returns whether a stack is empty.
     fn is_empty(&self) -> bool {
-        match *self {
-            Stack::Empty => true,
-            _ => false,
-        }
+        matches!(*self, Stack::Empty)
     }
 
     /// Returns a new stack with an element of top.
diff --git a/compiler/rustc_expand/src/mbe/macro_rules.rs b/compiler/rustc_expand/src/mbe/macro_rules.rs
index 48a622d..d928fb6 100644
--- a/compiler/rustc_expand/src/mbe/macro_rules.rs
+++ b/compiler/rustc_expand/src/mbe/macro_rules.rs
@@ -288,7 +288,8 @@
                 // Replace all the tokens for the corresponding positions in the macro, to maintain
                 // proper positions in error reporting, while maintaining the macro_backtrace.
                 if rhs_spans.len() == tts.len() {
-                    tts = tts.map_enumerated(|i, mut tt| {
+                    tts = tts.map_enumerated(|i, tt| {
+                        let mut tt = tt.clone();
                         let mut sp = rhs_spans[i];
                         sp = sp.with_ctxt(tt.span().ctxt());
                         tt.set_span(sp);
@@ -1035,17 +1036,16 @@
 /// a fragment specifier (indeed, these fragments can be followed by
 /// ANYTHING without fear of future compatibility hazards).
 fn frag_can_be_followed_by_any(kind: NonterminalKind) -> bool {
-    match kind {
+    matches!(
+        kind,
         NonterminalKind::Item           // always terminated by `}` or `;`
         | NonterminalKind::Block        // exactly one token tree
         | NonterminalKind::Ident        // exactly one token tree
         | NonterminalKind::Literal      // exactly one token tree
         | NonterminalKind::Meta         // exactly one token tree
         | NonterminalKind::Lifetime     // exactly one token tree
-        | NonterminalKind::TT => true,  // exactly one token tree
-
-        _ => false,
-    }
+        | NonterminalKind::TT // exactly one token tree
+    )
 }
 
 enum IsInFollow {
diff --git a/compiler/rustc_expand/src/mbe/transcribe.rs b/compiler/rustc_expand/src/mbe/transcribe.rs
index 0e5c5fe..dde65d9 100644
--- a/compiler/rustc_expand/src/mbe/transcribe.rs
+++ b/compiler/rustc_expand/src/mbe/transcribe.rs
@@ -5,7 +5,6 @@
 use rustc_ast::mut_visit::{self, MutVisitor};
 use rustc_ast::token::{self, NtTT, Token};
 use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree, TreeAndSpacing};
-use rustc_ast::MacCall;
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::sync::Lrc;
 use rustc_errors::{pluralize, PResult};
@@ -20,12 +19,12 @@
 struct Marker(ExpnId, Transparency);
 
 impl MutVisitor for Marker {
-    fn visit_span(&mut self, span: &mut Span) {
-        *span = span.apply_mark(self.0, self.1)
+    fn token_visiting_enabled(&self) -> bool {
+        true
     }
 
-    fn visit_mac(&mut self, mac: &mut MacCall) {
-        mut_visit::noop_visit_mac(mac, self)
+    fn visit_span(&mut self, span: &mut Span) {
+        *span = span.apply_mark(self.0, self.1)
     }
 }
 
@@ -232,17 +231,19 @@
                 // the meta-var.
                 let ident = MacroRulesNormalizedIdent::new(orignal_ident);
                 if let Some(cur_matched) = lookup_cur_matched(ident, interp, &repeats) {
-                    if let MatchedNonterminal(ref nt) = cur_matched {
-                        // FIXME #2887: why do we apply a mark when matching a token tree meta-var
-                        // (e.g. `$x:tt`), but not when we are matching any other type of token
-                        // tree?
-                        if let NtTT(ref tt) = **nt {
-                            result.push(tt.clone().into());
+                    if let MatchedNonterminal(nt) = cur_matched {
+                        let token = if let NtTT(tt) = &**nt {
+                            // `tt`s are emitted into the output stream directly as "raw tokens",
+                            // without wrapping them into groups.
+                            tt.clone()
                         } else {
+                            // Other variables are emitted into the output stream as groups with
+                            // `Delimiter::None` to maintain parsing priorities.
+                            // `Interpolated` is currenty used for such groups in rustc parser.
                             marker.visit_span(&mut sp);
-                            let token = TokenTree::token(token::Interpolated(nt.clone()), sp);
-                            result.push(token.into());
-                        }
+                            TokenTree::token(token::Interpolated(nt.clone()), sp)
+                        };
+                        result.push(token.into());
                     } else {
                         // We were unable to descend far enough. This is an error.
                         return Err(cx.struct_span_err(
@@ -275,7 +276,7 @@
             // preserve syntax context.
             mbe::TokenTree::Token(token) => {
                 let mut tt = TokenTree::Token(token);
-                marker.visit_tt(&mut tt);
+                mut_visit::visit_tt(&mut tt, &mut marker);
                 result.push(tt.into());
             }
 
diff --git a/compiler/rustc_expand/src/mut_visit/tests.rs b/compiler/rustc_expand/src/mut_visit/tests.rs
index 38ff594..be0300b 100644
--- a/compiler/rustc_expand/src/mut_visit/tests.rs
+++ b/compiler/rustc_expand/src/mut_visit/tests.rs
@@ -1,7 +1,7 @@
 use crate::tests::{matches_codepattern, string_to_crate};
 
 use rustc_ast as ast;
-use rustc_ast::mut_visit::{self, MutVisitor};
+use rustc_ast::mut_visit::MutVisitor;
 use rustc_ast_pretty::pprust;
 use rustc_span::symbol::Ident;
 use rustc_span::with_default_session_globals;
@@ -15,12 +15,12 @@
 struct ToZzIdentMutVisitor;
 
 impl MutVisitor for ToZzIdentMutVisitor {
+    fn token_visiting_enabled(&self) -> bool {
+        true
+    }
     fn visit_ident(&mut self, ident: &mut Ident) {
         *ident = Ident::from_str("zz");
     }
-    fn visit_mac(&mut self, mac: &mut ast::MacCall) {
-        mut_visit::noop_visit_mac(mac, self)
-    }
 }
 
 // Maybe add to `expand.rs`.
diff --git a/compiler/rustc_expand/src/placeholders.rs b/compiler/rustc_expand/src/placeholders.rs
index 4c9271a..f0e5826 100644
--- a/compiler/rustc_expand/src/placeholders.rs
+++ b/compiler/rustc_expand/src/placeholders.rs
@@ -310,8 +310,43 @@
         };
 
         if style == ast::MacStmtStyle::Semicolon {
+            // Implement the proposal described in
+            // https://github.com/rust-lang/rust/issues/61733#issuecomment-509626449
+            //
+            // The macro invocation expands to the list of statements. If the
+            // list of statements is empty, then 'parse' the trailing semicolon
+            // on the original invocation as an empty statement. That is:
+            //
+            // `empty();` is parsed as a single `StmtKind::Empty`
+            //
+            // If the list of statements is non-empty, see if the final
+            // statement already has a trailing semicolon.
+            //
+            // If it doesn't have a semicolon, then 'parse' the trailing
+            // semicolon from the invocation as part of the final statement,
+            // using `stmt.add_trailing_semicolon()`
+            //
+            // If it does have a semicolon, then 'parse' the trailing semicolon
+            // from the invocation as a new StmtKind::Empty
+
+            // FIXME: We will need to preserve the original semicolon token and
+            // span as part of #15701
+            let empty_stmt = ast::Stmt {
+                id: ast::DUMMY_NODE_ID,
+                kind: ast::StmtKind::Empty,
+                span: DUMMY_SP,
+                tokens: None,
+            };
+
             if let Some(stmt) = stmts.pop() {
-                stmts.push(stmt.add_trailing_semicolon());
+                if stmt.has_trailing_semicolon() {
+                    stmts.push(stmt);
+                    stmts.push(empty_stmt);
+                } else {
+                    stmts.push(stmt.add_trailing_semicolon());
+                }
+            } else {
+                stmts.push(empty_stmt);
             }
         }
 
@@ -345,13 +380,9 @@
 
     fn visit_mod(&mut self, module: &mut ast::Mod) {
         noop_visit_mod(module, self);
-        module.items.retain(|item| match item.kind {
-            ast::ItemKind::MacCall(_) if !self.cx.ecfg.keep_macs => false, // remove macro definitions
-            _ => true,
-        });
-    }
-
-    fn visit_mac(&mut self, _mac: &mut ast::MacCall) {
-        // Do nothing.
+        // remove macro definitions
+        module.items.retain(
+            |item| !matches!(item.kind, ast::ItemKind::MacCall(_) if !self.cx.ecfg.keep_macs),
+        );
     }
 }
diff --git a/compiler/rustc_expand/src/proc_macro.rs b/compiler/rustc_expand/src/proc_macro.rs
index 94b3fcf..4c95f19 100644
--- a/compiler/rustc_expand/src/proc_macro.rs
+++ b/compiler/rustc_expand/src/proc_macro.rs
@@ -5,7 +5,8 @@
 use rustc_ast::tokenstream::{TokenStream, TokenTree};
 use rustc_ast::{self as ast, *};
 use rustc_data_structures::sync::Lrc;
-use rustc_errors::{Applicability, ErrorReported};
+use rustc_errors::{struct_span_err, Applicability, ErrorReported};
+use rustc_lexer::is_ident;
 use rustc_parse::nt_to_tokenstream;
 use rustc_span::symbol::sym;
 use rustc_span::{Span, DUMMY_SP};
@@ -182,9 +183,22 @@
             .filter_map(|nmi| match nmi {
                 NestedMetaItem::Literal(lit) => {
                     error_reported_filter_map = true;
-                    cx.struct_span_err(lit.span, "expected path to a trait, found literal")
-                        .help("for example, write `#[derive(Debug)]` for `Debug`")
-                        .emit();
+                    let mut err = struct_span_err!(
+                        cx.sess,
+                        lit.span,
+                        E0777,
+                        "expected path to a trait, found literal",
+                    );
+                    let token = lit.token.to_string();
+                    if token.starts_with('"')
+                        && token.len() > 2
+                        && is_ident(&token[1..token.len() - 1])
+                    {
+                        err.help(&format!("try using `#[derive({})]`", &token[1..token.len() - 1]));
+                    } else {
+                        err.help("for example, write `#[derive(Debug)]` for `Debug`");
+                    }
+                    err.emit();
                     None
                 }
                 NestedMetaItem::MetaItem(mi) => Some(mi),
diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs
index e2492ef..4401ec0 100644
--- a/compiler/rustc_feature/src/accepted.rs
+++ b/compiler/rustc_feature/src/accepted.rs
@@ -270,6 +270,9 @@
     (accepted, track_caller, "1.46.0", Some(47809), None),
     /// Allows `#[doc(alias = "...")]`.
     (accepted, doc_alias, "1.48.0", Some(50146), None),
+    /// Allows patterns with concurrent by-move and by-ref bindings.
+    /// For example, you can write `Foo(a, ref b)` where `a` is by-move and `b` is by-ref.
+    (accepted, move_ref_pattern, "1.48.0", Some(68354), None),
 
     // -------------------------------------------------------------------------
     // feature-group-end: accepted features
diff --git a/compiler/rustc_feature/src/active.rs b/compiler/rustc_feature/src/active.rs
index 060efd2..0df67b6 100644
--- a/compiler/rustc_feature/src/active.rs
+++ b/compiler/rustc_feature/src/active.rs
@@ -210,6 +210,11 @@
     /// it is not on path for eventual stabilization).
     (active, no_niche, "1.42.0", None, None),
 
+    /// Allows using `#[rustc_allow_const_fn_unstable]`.
+    /// This is an attribute on `const fn` for the same
+    /// purpose as `#[allow_internal_unstable]`.
+    (active, rustc_allow_const_fn_unstable, "1.49.0", Some(69399), None),
+
     // no-tracking-issue-end
 
     // -------------------------------------------------------------------------
@@ -238,6 +243,7 @@
     (active, rtm_target_feature, "1.35.0", Some(44839), None),
     (active, f16c_target_feature, "1.36.0", Some(44839), None),
     (active, riscv_target_feature, "1.45.0", Some(44839), None),
+    (active, ermsb_target_feature, "1.49.0", Some(44839), None),
 
     // -------------------------------------------------------------------------
     // feature-group-end: actual feature gates (target features)
@@ -526,10 +532,6 @@
     /// For example, you can write `x @ Some(y)`.
     (active, bindings_after_at, "1.41.0", Some(65490), None),
 
-    /// Allows patterns with concurrent by-move and by-ref bindings.
-    /// For example, you can write `Foo(a, ref b)` where `a` is by-move and `b` is by-ref.
-    (active, move_ref_pattern, "1.42.0", Some(68354), None),
-
     /// Allows `impl const Trait for T` syntax.
     (active, const_trait_impl, "1.42.0", Some(67792), None),
 
@@ -581,7 +583,7 @@
     /// Allows `if let` guard in match arms.
     (active, if_let_guard, "1.47.0", Some(51114), None),
 
-    /// Allows non trivial generic constants which have to be manually propageted upwards.
+    /// Allows non-trivial generic constants which have to be manually propageted upwards.
     (active, const_evaluatable_checked, "1.48.0", Some(76560), None),
 
     /// Allows basic arithmetic on floating point types in a `const fn`.
@@ -593,6 +595,27 @@
     /// Allows to use the `#[cmse_nonsecure_entry]` attribute.
     (active, cmse_nonsecure_entry, "1.48.0", Some(75835), None),
 
+    /// Allows rustc to inject a default alloc_error_handler
+    (active, default_alloc_error_handler, "1.48.0", Some(66741), None),
+
+    /// Allows argument and return position `impl Trait` in a `const fn`.
+    (active, const_impl_trait, "1.48.0", Some(77463), None),
+
+    /// Allows `#[instruction_set(_)]` attribute
+    (active, isa_attribute, "1.48.0", Some(74727), None),
+
+    /// Allow anonymous constants from an inline `const` block
+    (active, inline_const, "1.49.0", Some(76001), None),
+
+    /// Allows unsized fn parameters.
+    (active, unsized_fn_params, "1.49.0", Some(48055), None),
+
+    /// Allows the use of destructuring assignments.
+    (active, destructuring_assignment, "1.49.0", Some(71126), None),
+
+    /// Enables `#[cfg(panic = "...")]` config key.
+    (active, cfg_panic, "1.49.0", Some(77443), None),
+
     // -------------------------------------------------------------------------
     // feature-group-end: actual feature gates
     // -------------------------------------------------------------------------
@@ -613,6 +636,9 @@
     sym::const_trait_bound_opt_out,
     sym::lazy_normalization_consts,
     sym::specialization,
+    sym::inline_const,
+    sym::repr128,
+    sym::unsized_locals,
 ];
 
 /// Some features are not allowed to be used together at the same time, if
diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs
index b7e113e..5c5cf60 100644
--- a/compiler/rustc_feature/src/builtin_attrs.rs
+++ b/compiler/rustc_feature/src/builtin_attrs.rs
@@ -33,6 +33,7 @@
     ),
     (sym::sanitize, sym::cfg_sanitize, cfg_fn!(cfg_sanitize)),
     (sym::version, sym::cfg_version, cfg_fn!(cfg_version)),
+    (sym::panic, sym::cfg_panic, cfg_fn!(cfg_panic)),
 ];
 
 /// Find a gated cfg determined by the `pred`icate which is given the cfg's name.
@@ -83,10 +84,7 @@
 
 impl AttributeGate {
     fn is_deprecated(&self) -> bool {
-        match *self {
-            Self::Gated(Stability::Deprecated(_, _), ..) => true,
-            _ => false,
-        }
+        matches!(*self, Self::Gated(Stability::Deprecated(_, _), ..))
     }
 }
 
@@ -336,6 +334,8 @@
         optimize, AssumedUsed, template!(List: "size|speed"), optimize_attribute,
         experimental!(optimize),
     ),
+    // RFC 2867
+    gated!(instruction_set, AssumedUsed, template!(List: "set"), isa_attribute, experimental!(instruction_set)),
 
     gated!(ffi_returns_twice, AssumedUsed, template!(Word), experimental!(ffi_returns_twice)),
     gated!(ffi_pure, AssumedUsed, template!(Word), experimental!(ffi_pure)),
@@ -378,6 +378,10 @@
         "allow_internal_unstable side-steps feature gating and stability checks",
     ),
     gated!(
+        rustc_allow_const_fn_unstable, AssumedUsed, template!(Word, List: "feat1, feat2, ..."),
+        "rustc_allow_const_fn_unstable side-steps feature gating and stability checks"
+    ),
+    gated!(
         allow_internal_unsafe, Normal, template!(Word),
         "allow_internal_unsafe side-steps the unsafe_code lint",
     ),
@@ -596,7 +600,7 @@
     BUILTIN_ATTRIBUTE_MAP.get(&name).is_some()
 }
 
-pub static BUILTIN_ATTRIBUTE_MAP: SyncLazy<FxHashMap<Symbol, &'static BuiltinAttribute>> =
+pub static BUILTIN_ATTRIBUTE_MAP: SyncLazy<FxHashMap<Symbol, &BuiltinAttribute>> =
     SyncLazy::new(|| {
         let mut map = FxHashMap::default();
         for attr in BUILTIN_ATTRIBUTES.iter() {
diff --git a/compiler/rustc_fs_util/src/lib.rs b/compiler/rustc_fs_util/src/lib.rs
index 289b9f3..7742961 100644
--- a/compiler/rustc_fs_util/src/lib.rs
+++ b/compiler/rustc_fs_util/src/lib.rs
@@ -75,33 +75,6 @@
     }
 }
 
-#[derive(Debug)]
-pub enum RenameOrCopyRemove {
-    Rename,
-    CopyRemove,
-}
-
-/// Rename `p` into `q`, preferring to use `rename` if possible.
-/// If `rename` fails (rename may fail for reasons such as crossing
-/// filesystem), fallback to copy & remove
-pub fn rename_or_copy_remove<P: AsRef<Path>, Q: AsRef<Path>>(
-    p: P,
-    q: Q,
-) -> io::Result<RenameOrCopyRemove> {
-    let p = p.as_ref();
-    let q = q.as_ref();
-    match fs::rename(p, q) {
-        Ok(()) => Ok(RenameOrCopyRemove::Rename),
-        Err(_) => match fs::copy(p, q) {
-            Ok(_) => {
-                fs::remove_file(p)?;
-                Ok(RenameOrCopyRemove::CopyRemove)
-            }
-            Err(e) => Err(e),
-        },
-    }
-}
-
 #[cfg(unix)]
 pub fn path_to_c_string(p: &Path) -> CString {
     use std::ffi::OsStr;
diff --git a/compiler/rustc_graphviz/src/lib.rs b/compiler/rustc_graphviz/src/lib.rs
index 76e33be..9653ff0 100644
--- a/compiler/rustc_graphviz/src/lib.rs
+++ b/compiler/rustc_graphviz/src/lib.rs
@@ -643,6 +643,7 @@
     }
     if options.contains(&RenderOption::DarkTheme) {
         graph_attrs.push(r#"bgcolor="black""#);
+        graph_attrs.push(r#"fontcolor="white""#);
         content_attrs.push(r#"color="white""#);
         content_attrs.push(r#"fontcolor="white""#);
     }
@@ -653,13 +654,13 @@
         writeln!(w, r#"    edge[{}];"#, content_attrs_str)?;
     }
 
+    let mut text = Vec::new();
     for n in g.nodes().iter() {
         write!(w, "    ")?;
         let id = g.node_id(n);
 
         let escaped = &g.node_label(n).to_dot_string();
 
-        let mut text = Vec::new();
         write!(text, "{}", id.as_slice()).unwrap();
 
         if !options.contains(&RenderOption::NoNodeLabels) {
@@ -677,6 +678,8 @@
 
         writeln!(text, ";").unwrap();
         w.write_all(&text[..])?;
+
+        text.clear();
     }
 
     for e in g.edges().iter() {
@@ -687,7 +690,6 @@
         let source_id = g.node_id(&source);
         let target_id = g.node_id(&target);
 
-        let mut text = Vec::new();
         write!(text, "{} -> {}", source_id.as_slice(), target_id.as_slice()).unwrap();
 
         if !options.contains(&RenderOption::NoEdgeLabels) {
@@ -701,6 +703,8 @@
 
         writeln!(text, ";").unwrap();
         w.write_all(&text[..])?;
+
+        text.clear();
     }
 
     writeln!(w, "}}")
diff --git a/compiler/rustc_hir/src/def.rs b/compiler/rustc_hir/src/def.rs
index 96fde48..298cfcc 100644
--- a/compiler/rustc_hir/src/def.rs
+++ b/compiler/rustc_hir/src/def.rs
@@ -206,8 +206,10 @@
     /// ```rust
     /// impl Foo { fn test() -> [u8; std::mem::size_of::<Self>()] {} }
     /// ```
+    /// We do however allow `Self` in repeat expression even if it is generic to not break code
+    /// which already works on stable while causing the `const_evaluatable_unchecked` future compat lint.
     ///
-    /// FIXME(lazy_normalization_consts): Remove this bodge once this feature is stable.
+    /// FIXME(lazy_normalization_consts): Remove this bodge once that feature is stable.
     SelfTy(Option<DefId> /* trait */, Option<(DefId, bool)> /* impl */),
     ToolMod, // e.g., `rustfmt` in `#[rustfmt::skip]`
 
@@ -341,9 +343,7 @@
 
     /// Returns an iterator over the items which are `Some`.
     pub fn present_items(self) -> impl Iterator<Item = T> {
-        use std::iter::once;
-
-        once(self.type_ns).chain(once(self.value_ns)).chain(once(self.macro_ns)).filter_map(|it| it)
+        IntoIter::new([self.type_ns, self.value_ns, self.macro_ns]).filter_map(|it| it)
     }
 }
 
@@ -484,4 +484,9 @@
     pub fn matches_ns(&self, ns: Namespace) -> bool {
         self.ns().map_or(true, |actual_ns| actual_ns == ns)
     }
+
+    /// Returns whether such a resolved path can occur in a tuple struct/variant pattern
+    pub fn expected_in_tuple_struct_pat(&self) -> bool {
+        matches!(self, Res::Def(DefKind::Ctor(_, CtorKind::Fn), _) | Res::SelfCtor(..))
+    }
 }
diff --git a/compiler/rustc_hir/src/definitions.rs b/compiler/rustc_hir/src/definitions.rs
index afefde0..d5ade86 100644
--- a/compiler/rustc_hir/src/definitions.rs
+++ b/compiler/rustc_hir/src/definitions.rs
@@ -118,7 +118,7 @@
 
         let DisambiguatedDefPathData { ref data, disambiguator } = self.disambiguated_data;
 
-        ::std::mem::discriminant(data).hash(&mut hasher);
+        std::mem::discriminant(data).hash(&mut hasher);
         if let Some(name) = data.get_opt_name() {
             // Get a stable hash by considering the symbol chars rather than
             // the symbol index.
@@ -188,10 +188,6 @@
 }
 
 impl DefPath {
-    pub fn is_local(&self) -> bool {
-        self.krate == LOCAL_CRATE
-    }
-
     pub fn make<FN>(krate: CrateNum, start_index: DefIndex, mut get_key: FN) -> DefPath
     where
         FN: FnMut(DefIndex) -> DefKey,
@@ -413,7 +409,7 @@
     }
 
     pub fn expansion_that_defined(&self, id: LocalDefId) -> ExpnId {
-        self.expansions_that_defined.get(&id).copied().unwrap_or(ExpnId::root())
+        self.expansions_that_defined.get(&id).copied().unwrap_or_else(ExpnId::root)
     }
 
     pub fn parent_module_of_macro_def(&self, expn_id: ExpnId) -> DefId {
diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs
index 636f67a..3c28b48 100644
--- a/compiler/rustc_hir/src/hir.rs
+++ b/compiler/rustc_hir/src/hir.rs
@@ -3,7 +3,6 @@
 crate use crate::hir_id::HirId;
 use crate::{itemlikevisit, LangItem};
 
-use rustc_ast::node_id::NodeMap;
 use rustc_ast::util::parser::ExprPrecedence;
 use rustc_ast::{self as ast, CrateSugar, LlvmAsmDialect};
 use rustc_ast::{AttrVec, Attribute, FloatTy, IntTy, Label, LitKind, StrStyle, UintTy};
@@ -273,10 +272,7 @@
     }
 
     pub fn is_const(&self) -> bool {
-        match self {
-            GenericArg::Const(_) => true,
-            _ => false,
-        }
+        matches!(self, GenericArg::Const(_))
     }
 
     pub fn descr(&self) -> &'static str {
@@ -306,10 +302,6 @@
         Self { args: &[], bindings: &[], parenthesized: false }
     }
 
-    pub fn is_empty(&self) -> bool {
-        self.args.is_empty() && self.bindings.is_empty() && !self.parenthesized
-    }
-
     pub fn inputs(&self) -> &[Ty<'_>] {
         if self.parenthesized {
             for arg in self.args {
@@ -467,23 +459,6 @@
         }
     }
 
-    pub fn own_counts(&self) -> GenericParamCount {
-        // We could cache this as a property of `GenericParamCount`, but
-        // the aim is to refactor this away entirely eventually and the
-        // presence of this method will be a constant reminder.
-        let mut own_counts: GenericParamCount = Default::default();
-
-        for param in self.params {
-            match param.kind {
-                GenericParamKind::Lifetime { .. } => own_counts.lifetimes += 1,
-                GenericParamKind::Type { .. } => own_counts.types += 1,
-                GenericParamKind::Const { .. } => own_counts.consts += 1,
-            };
-        }
-
-        own_counts
-    }
-
     pub fn get_named(&self, name: Symbol) -> Option<&GenericParam<'_>> {
         for param in self.params {
             if name == param.name.ident().name {
@@ -508,6 +483,8 @@
 #[derive(HashStable_Generic)]
 pub enum SyntheticTyParamKind {
     ImplTrait,
+    // Created by the `#[rustc_synthetic]` attribute.
+    FromAttr,
 }
 
 /// A where-clause in a definition.
@@ -755,6 +732,9 @@
     pub hir_id: HirId,
     pub kind: PatKind<'hir>,
     pub span: Span,
+    // Whether to use default binding modes.
+    // At present, this is false only for destructuring assignment.
+    pub default_binding_modes: bool,
 }
 
 impl Pat<'_> {
@@ -1000,17 +980,11 @@
     }
 
     pub fn is_lazy(self) -> bool {
-        match self {
-            BinOpKind::And | BinOpKind::Or => true,
-            _ => false,
-        }
+        matches!(self, BinOpKind::And | BinOpKind::Or)
     }
 
     pub fn is_shift(self) -> bool {
-        match self {
-            BinOpKind::Shl | BinOpKind::Shr => true,
-            _ => false,
-        }
+        matches!(self, BinOpKind::Shl | BinOpKind::Shr)
     }
 
     pub fn is_comparison(self) -> bool {
@@ -1090,10 +1064,7 @@
 
     /// Returns `true` if the unary operator takes its argument by value.
     pub fn is_by_value(self) -> bool {
-        match self {
-            Self::UnNeg | Self::UnNot => true,
-            _ => false,
-        }
+        matches!(self, Self::UnNeg | Self::UnNot)
     }
 }
 
@@ -1121,11 +1092,11 @@
     Semi(&'hir Expr<'hir>),
 }
 
-impl StmtKind<'hir> {
-    pub fn attrs(&self) -> &'hir [Attribute] {
+impl<'hir> StmtKind<'hir> {
+    pub fn attrs(&self, get_item: impl FnOnce(ItemId) -> &'hir Item<'hir>) -> &'hir [Attribute] {
         match *self {
             StmtKind::Local(ref l) => &l.attrs,
-            StmtKind::Item(_) => &[],
+            StmtKind::Item(ref item_id) => &get_item(*item_id).attrs,
             StmtKind::Expr(ref e) | StmtKind::Semi(ref e) => &e.attrs,
         }
     }
@@ -1383,6 +1354,7 @@
     pub fn precedence(&self) -> ExprPrecedence {
         match self.kind {
             ExprKind::Box(_) => ExprPrecedence::Box,
+            ExprKind::ConstBlock(_) => ExprPrecedence::ConstBlock,
             ExprKind::Array(_) => ExprPrecedence::Array,
             ExprKind::Call(..) => ExprPrecedence::Call,
             ExprKind::MethodCall(..) => ExprPrecedence::MethodCall,
@@ -1428,10 +1400,9 @@
     /// on the given expression should be considered a place expression.
     pub fn is_place_expr(&self, mut allow_projections_from: impl FnMut(&Self) -> bool) -> bool {
         match self.kind {
-            ExprKind::Path(QPath::Resolved(_, ref path)) => match path.res {
-                Res::Local(..) | Res::Def(DefKind::Static, _) | Res::Err => true,
-                _ => false,
-            },
+            ExprKind::Path(QPath::Resolved(_, ref path)) => {
+                matches!(path.res, Res::Local(..) | Res::Def(DefKind::Static, _) | Res::Err)
+            }
 
             // Type ascription inherits its place expression kind from its
             // operand. See:
@@ -1468,6 +1439,7 @@
             | ExprKind::LlvmInlineAsm(..)
             | ExprKind::AssignOp(..)
             | ExprKind::Lit(_)
+            | ExprKind::ConstBlock(..)
             | ExprKind::Unary(..)
             | ExprKind::Box(..)
             | ExprKind::AddrOf(..)
@@ -1523,6 +1495,8 @@
 pub enum ExprKind<'hir> {
     /// A `box x` expression.
     Box(&'hir Expr<'hir>),
+    /// Allow anonymous constants from an inline `const` block
+    ConstBlock(AnonConst),
     /// An array (e.g., `[a, b, c, d]`).
     Array(&'hir [Expr<'hir>]),
     /// A function call.
@@ -1709,6 +1683,9 @@
     AsyncFn,
     /// A desugared `<expr>.await`.
     AwaitDesugar,
+    /// A desugared `expr = expr`, where the LHS is a tuple, struct or array.
+    /// The span is that of the `=` sign.
+    AssignDesugar(Span),
 }
 
 /// Hints at the original code for a `match _ { .. }`.
@@ -2220,10 +2197,7 @@
 impl ImplicitSelfKind {
     /// Does this represent an implicit self?
     pub fn has_implicit_self(&self) -> bool {
-        match *self {
-            ImplicitSelfKind::None => false,
-            _ => true,
-        }
+        !matches!(*self, ImplicitSelfKind::None)
     }
 }
 
@@ -2253,10 +2227,7 @@
     }
 
     pub fn is_default(&self) -> bool {
-        match *self {
-            Defaultness::Default { .. } => true,
-            _ => false,
-        }
+        matches!(*self, Defaultness::Default { .. })
     }
 }
 
@@ -2387,10 +2358,7 @@
 
 impl VisibilityKind<'_> {
     pub fn is_pub(&self) -> bool {
-        match *self {
-            VisibilityKind::Public => true,
-            _ => false,
-        }
+        matches!(*self, VisibilityKind::Public)
     }
 
     pub fn is_pub_restricted(&self) -> bool {
@@ -2399,15 +2367,6 @@
             VisibilityKind::Crate(..) | VisibilityKind::Restricted { .. } => true,
         }
     }
-
-    pub fn descr(&self) -> &'static str {
-        match *self {
-            VisibilityKind::Public => "public",
-            VisibilityKind::Inherited => "private",
-            VisibilityKind::Crate(..) => "crate-visible",
-            VisibilityKind::Restricted { .. } => "restricted",
-        }
-    }
 }
 
 #[derive(Debug, HashStable_Generic)]
@@ -2527,10 +2486,7 @@
 
 impl FnHeader {
     pub fn is_const(&self) -> bool {
-        match &self.constness {
-            Constness::Const => true,
-            _ => false,
-        }
+        matches!(&self.constness, Constness::Const)
     }
 }
 
@@ -2679,8 +2635,6 @@
     pub span: Span,
 }
 
-pub type CaptureModeMap = NodeMap<CaptureBy>;
-
 // The TraitCandidate's import_ids is empty if the trait is defined in the same module, and
 // has length > 0 if the trait is found through an chain of imports, starting with the
 // import/use statement in the scope where the trait is used.
@@ -2729,6 +2683,9 @@
             Node::TraitItem(TraitItem { ident, .. })
             | Node::ImplItem(ImplItem { ident, .. })
             | Node::ForeignItem(ForeignItem { ident, .. })
+            | Node::Field(StructField { ident, .. })
+            | Node::Variant(Variant { ident, .. })
+            | Node::MacroDef(MacroDef { ident, .. })
             | Node::Item(Item { ident, .. }) => Some(*ident),
             _ => None,
         }
diff --git a/compiler/rustc_hir/src/hir_id.rs b/compiler/rustc_hir/src/hir_id.rs
index fea850c..cc8ac4c 100644
--- a/compiler/rustc_hir/src/hir_id.rs
+++ b/compiler/rustc_hir/src/hir_id.rs
@@ -45,5 +45,3 @@
     owner: LocalDefId { local_def_index: CRATE_DEF_INDEX },
     local_id: ItemLocalId::from_u32(0),
 };
-
-pub const DUMMY_ITEM_LOCAL_ID: ItemLocalId = ItemLocalId::MAX;
diff --git a/compiler/rustc_hir/src/intravisit.rs b/compiler/rustc_hir/src/intravisit.rs
index 820d664..35615af 100644
--- a/compiler/rustc_hir/src/intravisit.rs
+++ b/compiler/rustc_hir/src/intravisit.rs
@@ -1068,6 +1068,7 @@
         ExprKind::Array(subexpressions) => {
             walk_list!(visitor, visit_expr, subexpressions);
         }
+        ExprKind::ConstBlock(ref anon_const) => visitor.visit_anon_const(anon_const),
         ExprKind::Repeat(ref element, ref count) => {
             visitor.visit_expr(element);
             visitor.visit_anon_const(count)
diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs
index 5e4c03b..3e4eb9e 100644
--- a/compiler/rustc_hir/src/lang_items.rs
+++ b/compiler/rustc_hir/src/lang_items.rs
@@ -263,6 +263,7 @@
     // is required to define it somewhere. Additionally, there are restrictions on crates that use
     // a weak lang item, but do not have it defined.
     Panic,                   sym::panic,               panic_fn,                   Target::Fn;
+    PanicStr,                sym::panic_str,           panic_str,                  Target::Fn;
     PanicBoundsCheck,        sym::panic_bounds_check,  panic_bounds_check_fn,      Target::Fn;
     PanicInfo,               sym::panic_info,          panic_info,                 Target::Struct;
     PanicLocation,           sym::panic_location,      panic_location,             Target::Struct;
diff --git a/compiler/rustc_hir/src/pat_util.rs b/compiler/rustc_hir/src/pat_util.rs
index 2f1b5da..9e0a6aa 100644
--- a/compiler/rustc_hir/src/pat_util.rs
+++ b/compiler/rustc_hir/src/pat_util.rs
@@ -58,25 +58,6 @@
 }
 
 impl hir::Pat<'_> {
-    pub fn is_refutable(&self) -> bool {
-        match self.kind {
-            PatKind::Lit(_)
-            | PatKind::Range(..)
-            | PatKind::Path(hir::QPath::Resolved(Some(..), _) | hir::QPath::TypeRelative(..)) => {
-                true
-            }
-
-            PatKind::Path(hir::QPath::Resolved(_, ref path))
-            | PatKind::TupleStruct(hir::QPath::Resolved(_, ref path), ..)
-            | PatKind::Struct(hir::QPath::Resolved(_, ref path), ..) => match path.res {
-                Res::Def(DefKind::Variant, _) => true,
-                _ => false,
-            },
-            PatKind::Slice(..) => true,
-            _ => false,
-        }
-    }
-
     /// Call `f` on every "binding" in a pattern, e.g., on `a` in
     /// `match foo() { Some(a) => (), None => () }`
     pub fn each_binding(&self, mut f: impl FnMut(hir::BindingAnnotation, HirId, Span, Ident)) {
@@ -111,19 +92,7 @@
     /// Checks if the pattern contains any patterns that bind something to
     /// an ident, e.g., `foo`, or `Foo(foo)` or `foo @ Bar(..)`.
     pub fn contains_bindings(&self) -> bool {
-        self.satisfies(|p| match p.kind {
-            PatKind::Binding(..) => true,
-            _ => false,
-        })
-    }
-
-    /// Checks if the pattern contains any patterns that bind something to
-    /// an ident or wildcard, e.g., `foo`, or `Foo(_)`, `foo @ Bar(..)`,
-    pub fn contains_bindings_or_wild(&self) -> bool {
-        self.satisfies(|p| match p.kind {
-            PatKind::Binding(..) | PatKind::Wild => true,
-            _ => false,
-        })
+        self.satisfies(|p| matches!(p.kind, PatKind::Binding(..)))
     }
 
     /// Checks if the pattern satisfies the given predicate on some sub-pattern.
diff --git a/compiler/rustc_hir/src/target.rs b/compiler/rustc_hir/src/target.rs
index 1efc8bc..fd6a312 100644
--- a/compiler/rustc_hir/src/target.rs
+++ b/compiler/rustc_hir/src/target.rs
@@ -9,13 +9,13 @@
 
 use std::fmt::{self, Display};
 
-#[derive(Copy, Clone, PartialEq)]
+#[derive(Copy, Clone, PartialEq, Debug)]
 pub enum MethodKind {
     Trait { body: bool },
     Inherent,
 }
 
-#[derive(Copy, Clone, PartialEq)]
+#[derive(Copy, Clone, PartialEq, Debug)]
 pub enum Target {
     ExternCrate,
     Use,
diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs
index f6e4b1f..f7018ae 100644
--- a/compiler/rustc_hir_pretty/src/lib.rs
+++ b/compiler/rustc_hir_pretty/src/lib.rs
@@ -44,9 +44,6 @@
     fn nested(&self, _state: &mut State<'_>, _nested: Nested) {}
     fn pre(&self, _state: &mut State<'_>, _node: AnnNode<'_>) {}
     fn post(&self, _state: &mut State<'_>, _node: AnnNode<'_>) {}
-    fn try_fetch_item(&self, _: hir::HirId) -> Option<&hir::Item<'_>> {
-        None
-    }
 }
 
 pub struct NoAnn;
@@ -54,9 +51,6 @@
 pub const NO_ANN: &dyn PpAnn = &NoAnn;
 
 impl PpAnn for hir::Crate<'_> {
-    fn try_fetch_item(&self, item: hir::HirId) -> Option<&hir::Item<'_>> {
-        Some(self.item(item))
-    }
     fn nested(&self, state: &mut State<'_>, nested: Nested) {
         match nested {
             Nested::Item(id) => state.print_item(self.item(id.id)),
@@ -141,6 +135,9 @@
 }
 
 impl<'a> PrintState<'a> for State<'a> {
+    fn insert_extra_parens(&self) -> bool {
+        true
+    }
     fn comments(&mut self) -> &mut Option<Comments<'a>> {
         &mut self.comments
     }
@@ -302,13 +299,11 @@
     pub fn break_offset_if_not_bol(&mut self, n: usize, off: isize) {
         if !self.s.is_beginning_of_line() {
             self.s.break_offset(n, off)
-        } else {
-            if off != 0 && self.s.last_token().is_hardbreak_tok() {
-                // We do something pretty sketchy here: tuck the nonzero
-                // offset-adjustment we were going to deposit along with the
-                // break into the previous hardbreak.
-                self.s.replace_last_token(pp::Printer::hardbreak_tok_offset(off));
-            }
+        } else if off != 0 && self.s.last_token().is_hardbreak_tok() {
+            // We do something pretty sketchy here: tuck the nonzero
+            // offset-adjustment we were going to deposit along with the
+            // break into the previous hardbreak.
+            self.s.replace_last_token(pp::Printer::hardbreak_tok_offset(off));
         }
     }
 
@@ -1138,6 +1133,13 @@
         self.end()
     }
 
+    fn print_expr_anon_const(&mut self, anon_const: &hir::AnonConst) {
+        self.ibox(INDENT_UNIT);
+        self.s.word_space("const");
+        self.print_anon_const(anon_const);
+        self.end()
+    }
+
     fn print_expr_repeat(&mut self, element: &hir::Expr<'_>, count: &hir::AnonConst) {
         self.ibox(INDENT_UNIT);
         self.s.word("[");
@@ -1290,6 +1292,9 @@
             hir::ExprKind::Array(ref exprs) => {
                 self.print_expr_vec(exprs);
             }
+            hir::ExprKind::ConstBlock(ref anon_const) => {
+                self.print_expr_anon_const(anon_const);
+            }
             hir::ExprKind::Repeat(ref element, ref count) => {
                 self.print_expr_repeat(&element, count);
             }
@@ -1914,10 +1919,7 @@
                 self.pclose();
             }
             PatKind::Box(ref inner) => {
-                let is_range_inner = match inner.kind {
-                    PatKind::Range(..) => true,
-                    _ => false,
-                };
+                let is_range_inner = matches!(inner.kind, PatKind::Range(..));
                 self.s.word("box ");
                 if is_range_inner {
                     self.popen();
@@ -1928,10 +1930,7 @@
                 }
             }
             PatKind::Ref(ref inner, mutbl) => {
-                let is_range_inner = match inner.kind {
-                    PatKind::Range(..) => true,
-                    _ => false,
-                };
+                let is_range_inner = matches!(inner.kind, PatKind::Range(..));
                 self.s.word("&");
                 self.s.word(mutbl.prefix_str());
                 if is_range_inner {
@@ -2428,10 +2427,7 @@
 //
 // Duplicated from `parse::classify`, but adapted for the HIR.
 fn expr_requires_semi_to_be_stmt(e: &hir::Expr<'_>) -> bool {
-    match e.kind {
-        hir::ExprKind::Match(..) | hir::ExprKind::Block(..) | hir::ExprKind::Loop(..) => false,
-        _ => true,
-    }
+    !matches!(e.kind, hir::ExprKind::Match(..) | hir::ExprKind::Block(..) | hir::ExprKind::Loop(..))
 }
 
 /// This statement requires a semicolon after it.
diff --git a/compiler/rustc_incremental/src/assert_module_sources.rs b/compiler/rustc_incremental/src/assert_module_sources.rs
index 80448c0..17d8ac9 100644
--- a/compiler/rustc_incremental/src/assert_module_sources.rs
+++ b/compiler/rustc_incremental/src/assert_module_sources.rs
@@ -111,10 +111,12 @@
             (&user_path[..], None)
         };
 
-        let mut cgu_path_components = user_path.split('-').collect::<Vec<_>>();
+        let mut iter = user_path.split('-');
 
         // Remove the crate name
-        assert_eq!(cgu_path_components.remove(0), crate_name);
+        assert_eq!(iter.next().unwrap(), crate_name);
+
+        let cgu_path_components = iter.collect::<Vec<_>>();
 
         let cgu_name_builder = &mut CodegenUnitNameBuilder::new(self.tcx);
         let cgu_name =
diff --git a/compiler/rustc_incremental/src/persist/dirty_clean.rs b/compiler/rustc_incremental/src/persist/dirty_clean.rs
index f0a1088..d55813f4 100644
--- a/compiler/rustc_incremental/src/persist/dirty_clean.rs
+++ b/compiler/rustc_incremental/src/persist/dirty_clean.rs
@@ -160,7 +160,7 @@
 
         let mut all_attrs = FindAllAttrs {
             tcx,
-            attr_names: vec![sym::rustc_dirty, sym::rustc_clean],
+            attr_names: &[sym::rustc_dirty, sym::rustc_clean],
             found_attrs: vec![],
         };
         intravisit::walk_crate(&mut all_attrs, krate);
@@ -299,7 +299,7 @@
 
                     // Represents a Trait Declaration
                     // FIXME(michaelwoerister): trait declaration is buggy because sometimes some of
-                    // the depnodes don't exist (because they legitametely didn't need to be
+                    // the depnodes don't exist (because they legitimately didn't need to be
                     // calculated)
                     //
                     // michaelwoerister and vitiral came up with a possible solution,
@@ -512,17 +512,17 @@
 }
 
 // A visitor that collects all #[rustc_dirty]/#[rustc_clean] attributes from
-// the HIR. It is used to verfiy that we really ran checks for all annotated
+// the HIR. It is used to verify that we really ran checks for all annotated
 // nodes.
-pub struct FindAllAttrs<'tcx> {
+pub struct FindAllAttrs<'a, 'tcx> {
     tcx: TyCtxt<'tcx>,
-    attr_names: Vec<Symbol>,
+    attr_names: &'a [Symbol],
     found_attrs: Vec<&'tcx Attribute>,
 }
 
-impl FindAllAttrs<'tcx> {
+impl FindAllAttrs<'_, 'tcx> {
     fn is_active_attr(&mut self, attr: &Attribute) -> bool {
-        for attr_name in &self.attr_names {
+        for attr_name in self.attr_names {
             if self.tcx.sess.check_name(attr, *attr_name) && check_config(self.tcx, attr) {
                 return true;
             }
@@ -543,7 +543,7 @@
     }
 }
 
-impl intravisit::Visitor<'tcx> for FindAllAttrs<'tcx> {
+impl intravisit::Visitor<'tcx> for FindAllAttrs<'_, 'tcx> {
     type Map = Map<'tcx>;
 
     fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
diff --git a/compiler/rustc_incremental/src/persist/fs.rs b/compiler/rustc_incremental/src/persist/fs.rs
index 4926f72..9fdf0a5 100644
--- a/compiler/rustc_incremental/src/persist/fs.rs
+++ b/compiler/rustc_incremental/src/persist/fs.rs
@@ -765,7 +765,6 @@
 
     // Now garbage collect the valid session directories.
     let mut deletion_candidates = vec![];
-    let mut definitely_delete = vec![];
 
     for (lock_file_name, directory_name) in &lock_file_to_session_dir {
         debug!("garbage_collect_session_directories() - inspecting: {}", directory_name);
@@ -842,8 +841,11 @@
                             successfully acquired lock"
                     );
 
-                    // Note that we are holding on to the lock
-                    definitely_delete.push((crate_directory.join(directory_name), Some(lock)));
+                    delete_old(sess, &crate_directory.join(directory_name));
+
+                    // Let's make it explicit that the file lock is released at this point,
+                    // or rather, that we held on to it until here
+                    mem::drop(lock);
                 }
                 Err(_) => {
                     debug!(
@@ -880,28 +882,23 @@
         mem::drop(lock);
     }
 
-    for (path, lock) in definitely_delete {
-        debug!("garbage_collect_session_directories() - deleting `{}`", path.display());
-
-        if let Err(err) = safe_remove_dir_all(&path) {
-            sess.warn(&format!(
-                "Failed to garbage collect incremental \
-                                compilation session directory `{}`: {}",
-                path.display(),
-                err
-            ));
-        } else {
-            delete_session_dir_lock_file(sess, &lock_file_path(&path));
-        }
-
-        // Let's make it explicit that the file lock is released at this point,
-        // or rather, that we held on to it until here
-        mem::drop(lock);
-    }
-
     Ok(())
 }
 
+fn delete_old(sess: &Session, path: &Path) {
+    debug!("garbage_collect_session_directories() - deleting `{}`", path.display());
+
+    if let Err(err) = safe_remove_dir_all(&path) {
+        sess.warn(&format!(
+            "Failed to garbage collect incremental compilation session directory `{}`: {}",
+            path.display(),
+            err
+        ));
+    } else {
+        delete_session_dir_lock_file(sess, &lock_file_path(&path));
+    }
+}
+
 fn all_except_most_recent(
     deletion_candidates: Vec<(SystemTime, PathBuf, Option<flock::Lock>)>,
 ) -> FxHashMap<PathBuf, Option<flock::Lock>> {
diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs
index c43d4ad..45cef47 100644
--- a/compiler/rustc_incremental/src/persist/save.rs
+++ b/compiler/rustc_incremental/src/persist/save.rs
@@ -153,7 +153,8 @@
         let total_node_count = serialized_graph.nodes.len();
         let total_edge_count = serialized_graph.edge_list_data.len();
 
-        let mut counts: FxHashMap<_, Stat> = FxHashMap::default();
+        let mut counts: FxHashMap<_, Stat> =
+            FxHashMap::with_capacity_and_hasher(total_node_count, Default::default());
 
         for (i, &node) in serialized_graph.nodes.iter_enumerated() {
             let stat = counts.entry(node.kind).or_insert(Stat {
@@ -170,14 +171,6 @@
         let mut counts: Vec<_> = counts.values().cloned().collect();
         counts.sort_by_key(|s| -(s.node_counter as i64));
 
-        let percentage_of_all_nodes: Vec<f64> = counts
-            .iter()
-            .map(|s| (100.0 * (s.node_counter as f64)) / (total_node_count as f64))
-            .collect();
-
-        let average_edges_per_kind: Vec<f64> =
-            counts.iter().map(|s| (s.edge_counter as f64) / (s.node_counter as f64)).collect();
-
         println!("[incremental]");
         println!("[incremental] DepGraph Statistics");
 
@@ -207,13 +200,13 @@
                   |------------------|"
         );
 
-        for (i, stat) in counts.iter().enumerate() {
+        for stat in counts.iter() {
             println!(
                 "[incremental]  {:<36}|{:>16.1}% |{:>12} |{:>17.1} |",
                 format!("{:?}", stat.kind),
-                percentage_of_all_nodes[i],
+                (100.0 * (stat.node_counter as f64)) / (total_node_count as f64), // percentage of all nodes
                 stat.node_counter,
-                average_edges_per_kind[i]
+                (stat.edge_counter as f64) / (stat.node_counter as f64), // average edges per kind
             );
         }
 
diff --git a/compiler/rustc_infer/src/infer/combine.rs b/compiler/rustc_infer/src/infer/combine.rs
index a540fac..6a1715e 100644
--- a/compiler/rustc_infer/src/infer/combine.rs
+++ b/compiler/rustc_infer/src/infer/combine.rs
@@ -35,7 +35,7 @@
 use crate::traits::{Obligation, PredicateObligations};
 
 use rustc_ast as ast;
-use rustc_data_structures::mini_map::MiniMap;
+use rustc_data_structures::sso::SsoHashMap;
 use rustc_hir::def_id::DefId;
 use rustc_middle::traits::ObligationCause;
 use rustc_middle::ty::error::TypeError;
@@ -429,7 +429,7 @@
             needs_wf: false,
             root_ty: ty,
             param_env: self.param_env,
-            cache: MiniMap::new(),
+            cache: SsoHashMap::new(),
         };
 
         let ty = match generalize.relate(ty, ty) {
@@ -490,7 +490,7 @@
 
     param_env: ty::ParamEnv<'tcx>,
 
-    cache: MiniMap<Ty<'tcx>, RelateResult<'tcx, Ty<'tcx>>>,
+    cache: SsoHashMap<Ty<'tcx>, RelateResult<'tcx, Ty<'tcx>>>,
 }
 
 /// Result from a generalization operation. This includes
diff --git a/compiler/rustc_infer/src/infer/error_reporting/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/mod.rs
index 795c5a6..524efd0 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/mod.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/mod.rs
@@ -50,10 +50,10 @@
 use super::{InferCtxt, RegionVariableOrigin, SubregionOrigin, TypeTrace, ValuePairs};
 
 use crate::infer;
-use crate::infer::OriginalQueryValues;
 use crate::traits::error_reporting::report_object_safety_error;
 use crate::traits::{
     IfExpressionCause, MatchExpressionArmCause, ObligationCause, ObligationCauseCode,
+    StatementAsExpression,
 };
 
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
@@ -64,7 +64,6 @@
 use rustc_hir::lang_items::LangItem;
 use rustc_hir::{Item, ItemKind, Node};
 use rustc_middle::ty::error::TypeError;
-use rustc_middle::ty::ParamEnvAnd;
 use rustc_middle::ty::{
     self,
     subst::{Subst, SubstsRef},
@@ -72,6 +71,7 @@
 };
 use rustc_span::{BytePos, DesugaringKind, Pos, Span};
 use rustc_target::spec::abi;
+use std::ops::ControlFlow;
 use std::{cmp, fmt};
 
 mod note;
@@ -619,6 +619,7 @@
                 scrut_hir_id,
                 opt_suggest_box_span,
                 arm_span,
+                scrut_span,
                 ..
             }) => match source {
                 hir::MatchSource::IfLetDesugar { .. } => {
@@ -664,25 +665,59 @@
                         Some(ty::error::ExpectedFound { expected, .. }) => expected,
                         _ => last_ty,
                     });
-                    let msg = "`match` arms have incompatible types";
-                    err.span_label(cause.span, msg);
+                    let source_map = self.tcx.sess.source_map();
+                    let mut any_multiline_arm = source_map.is_multiline(arm_span);
                     if prior_arms.len() <= 4 {
                         for sp in prior_arms {
+                            any_multiline_arm |= source_map.is_multiline(*sp);
                             err.span_label(*sp, format!("this is found to be of type `{}`", t));
                         }
                     } else if let Some(sp) = prior_arms.last() {
+                        any_multiline_arm |= source_map.is_multiline(*sp);
                         err.span_label(
                             *sp,
                             format!("this and all prior arms are found to be of type `{}`", t),
                         );
                     }
-                    if let Some(sp) = semi_span {
-                        err.span_suggestion_short(
-                            sp,
-                            "consider removing this semicolon",
-                            String::new(),
-                            Applicability::MachineApplicable,
-                        );
+                    let outer_error_span = if any_multiline_arm {
+                        // Cover just `match` and the scrutinee expression, not
+                        // the entire match body, to reduce diagram noise.
+                        cause.span.shrink_to_lo().to(scrut_span)
+                    } else {
+                        cause.span
+                    };
+                    let msg = "`match` arms have incompatible types";
+                    err.span_label(outer_error_span, msg);
+                    if let Some((sp, boxed)) = semi_span {
+                        if let (StatementAsExpression::NeedsBoxing, [.., prior_arm]) =
+                            (boxed, &prior_arms[..])
+                        {
+                            err.multipart_suggestion(
+                                "consider removing this semicolon and boxing the expressions",
+                                vec![
+                                    (prior_arm.shrink_to_lo(), "Box::new(".to_string()),
+                                    (prior_arm.shrink_to_hi(), ")".to_string()),
+                                    (arm_span.shrink_to_lo(), "Box::new(".to_string()),
+                                    (arm_span.shrink_to_hi(), ")".to_string()),
+                                    (sp, String::new()),
+                                ],
+                                Applicability::HasPlaceholders,
+                            );
+                        } else if matches!(boxed, StatementAsExpression::NeedsBoxing) {
+                            err.span_suggestion_short(
+                                sp,
+                                "consider removing this semicolon and boxing the expressions",
+                                String::new(),
+                                Applicability::MachineApplicable,
+                            );
+                        } else {
+                            err.span_suggestion_short(
+                                sp,
+                                "consider removing this semicolon",
+                                String::new(),
+                                Applicability::MachineApplicable,
+                            );
+                        }
                     }
                     if let Some(ret_sp) = opt_suggest_box_span {
                         // Get return type span and point to it.
@@ -705,13 +740,27 @@
                 if let Some(sp) = outer {
                     err.span_label(sp, "`if` and `else` have incompatible types");
                 }
-                if let Some(sp) = semicolon {
-                    err.span_suggestion_short(
-                        sp,
-                        "consider removing this semicolon",
-                        String::new(),
-                        Applicability::MachineApplicable,
-                    );
+                if let Some((sp, boxed)) = semicolon {
+                    if matches!(boxed, StatementAsExpression::NeedsBoxing) {
+                        err.multipart_suggestion(
+                            "consider removing this semicolon and boxing the expression",
+                            vec![
+                                (then.shrink_to_lo(), "Box::new(".to_string()),
+                                (then.shrink_to_hi(), ")".to_string()),
+                                (else_sp.shrink_to_lo(), "Box::new(".to_string()),
+                                (else_sp.shrink_to_hi(), ")".to_string()),
+                                (sp, String::new()),
+                            ],
+                            Applicability::MachineApplicable,
+                        );
+                    } else {
+                        err.span_suggestion_short(
+                            sp,
+                            "consider removing this semicolon",
+                            String::new(),
+                            Applicability::MachineApplicable,
+                        );
+                    }
                 }
                 if let Some(ret_sp) = opt_suggest_box_span {
                     self.suggest_boxing_for_return_impl_trait(
@@ -1449,7 +1498,7 @@
         }
 
         impl<'tcx> ty::fold::TypeVisitor<'tcx> for OpaqueTypesVisitor<'tcx> {
-            fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+            fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> {
                 if let Some((kind, def_id)) = TyCategory::from_ty(t) {
                     let span = self.tcx.def_span(def_id);
                     // Avoid cluttering the output when the "found" and error span overlap:
@@ -1590,6 +1639,16 @@
             Mismatch::Variable(exp_found) => Some(exp_found),
             Mismatch::Fixed(_) => None,
         };
+        let exp_found = match terr {
+            // `terr` has more accurate type information than `exp_found` in match expressions.
+            ty::error::TypeError::Sorts(terr)
+                if exp_found.map_or(false, |ef| terr.found == ef.found) =>
+            {
+                Some(*terr)
+            }
+            _ => exp_found,
+        };
+        debug!("exp_found {:?} terr {:?}", exp_found, terr);
         if let Some(exp_found) = exp_found {
             self.suggest_as_ref_where_appropriate(span, &exp_found, diag);
             self.suggest_await_on_expect_found(cause, span, &exp_found, diag);
@@ -1611,19 +1670,8 @@
         self.note_error_origin(diag, cause, exp_found);
     }
 
-    fn suggest_await_on_expect_found(
-        &self,
-        cause: &ObligationCause<'tcx>,
-        exp_span: Span,
-        exp_found: &ty::error::ExpectedFound<Ty<'tcx>>,
-        diag: &mut DiagnosticBuilder<'tcx>,
-    ) {
-        debug!(
-            "suggest_await_on_expect_found: exp_span={:?}, expected_ty={:?}, found_ty={:?}",
-            exp_span, exp_found.expected, exp_found.found
-        );
-
-        if let ty::Opaque(def_id, _) = *exp_found.expected.kind() {
+    pub fn get_impl_future_output_ty(&self, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
+        if let ty::Opaque(def_id, substs) = ty.kind() {
             let future_trait = self.tcx.require_lang_item(LangItem::Future, None);
             // Future::Output
             let item_def_id = self
@@ -1634,37 +1682,121 @@
                 .unwrap()
                 .def_id;
 
-            let projection_ty = self.tcx.projection_ty_from_predicates((def_id, item_def_id));
-            if let Some(projection_ty) = projection_ty {
-                let projection_query = self.canonicalize_query(
-                    &ParamEnvAnd { param_env: self.tcx.param_env(def_id), value: projection_ty },
-                    &mut OriginalQueryValues::default(),
-                );
-                if let Ok(resp) = self.tcx.normalize_projection_ty(projection_query) {
-                    let normalized_ty = resp.value.value.normalized_ty;
-                    debug!("suggest_await_on_expect_found: normalized={:?}", normalized_ty);
-                    if ty::TyS::same_type(normalized_ty, exp_found.found) {
-                        let span = if let ObligationCauseCode::Pattern {
-                            span,
-                            origin_expr: _,
-                            root_ty: _,
-                        } = cause.code
-                        {
-                            // scrutinee's span
-                            span.unwrap_or(exp_span)
-                        } else {
-                            exp_span
-                        };
-                        diag.span_suggestion_verbose(
-                            span.shrink_to_hi(),
-                            "consider awaiting on the future",
-                            ".await".to_string(),
-                            Applicability::MaybeIncorrect,
-                        );
+            let bounds = self.tcx.explicit_item_bounds(*def_id);
+
+            for (predicate, _) in bounds {
+                let predicate = predicate.subst(self.tcx, substs);
+                if let ty::PredicateAtom::Projection(projection_predicate) =
+                    predicate.skip_binders()
+                {
+                    if projection_predicate.projection_ty.item_def_id == item_def_id {
+                        // We don't account for multiple `Future::Output = Ty` contraints.
+                        return Some(projection_predicate.ty);
                     }
                 }
             }
         }
+        None
+    }
+
+    /// A possible error is to forget to add `.await` when using futures:
+    ///
+    /// ```
+    /// async fn make_u32() -> u32 {
+    ///     22
+    /// }
+    ///
+    /// fn take_u32(x: u32) {}
+    ///
+    /// async fn foo() {
+    ///     let x = make_u32();
+    ///     take_u32(x);
+    /// }
+    /// ```
+    ///
+    /// This routine checks if the found type `T` implements `Future<Output=U>` where `U` is the
+    /// expected type. If this is the case, and we are inside of an async body, it suggests adding
+    /// `.await` to the tail of the expression.
+    fn suggest_await_on_expect_found(
+        &self,
+        cause: &ObligationCause<'tcx>,
+        exp_span: Span,
+        exp_found: &ty::error::ExpectedFound<Ty<'tcx>>,
+        diag: &mut DiagnosticBuilder<'tcx>,
+    ) {
+        debug!(
+            "suggest_await_on_expect_found: exp_span={:?}, expected_ty={:?}, found_ty={:?}",
+            exp_span, exp_found.expected, exp_found.found,
+        );
+
+        if let ObligationCauseCode::CompareImplMethodObligation { .. } = &cause.code {
+            return;
+        }
+
+        match (
+            self.get_impl_future_output_ty(exp_found.expected),
+            self.get_impl_future_output_ty(exp_found.found),
+        ) {
+            (Some(exp), Some(found)) if ty::TyS::same_type(exp, found) => match &cause.code {
+                ObligationCauseCode::IfExpression(box IfExpressionCause { then, .. }) => {
+                    diag.multipart_suggestion(
+                        "consider `await`ing on both `Future`s",
+                        vec![
+                            (then.shrink_to_hi(), ".await".to_string()),
+                            (exp_span.shrink_to_hi(), ".await".to_string()),
+                        ],
+                        Applicability::MaybeIncorrect,
+                    );
+                }
+                ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
+                    prior_arms,
+                    ..
+                }) => {
+                    if let [.., arm_span] = &prior_arms[..] {
+                        diag.multipart_suggestion(
+                            "consider `await`ing on both `Future`s",
+                            vec![
+                                (arm_span.shrink_to_hi(), ".await".to_string()),
+                                (exp_span.shrink_to_hi(), ".await".to_string()),
+                            ],
+                            Applicability::MaybeIncorrect,
+                        );
+                    } else {
+                        diag.help("consider `await`ing on both `Future`s");
+                    }
+                }
+                _ => {
+                    diag.help("consider `await`ing on both `Future`s");
+                }
+            },
+            (_, Some(ty)) if ty::TyS::same_type(exp_found.expected, ty) => {
+                let span = match cause.code {
+                    // scrutinee's span
+                    ObligationCauseCode::Pattern { span: Some(span), .. } => span,
+                    _ => exp_span,
+                };
+                diag.span_suggestion_verbose(
+                    span.shrink_to_hi(),
+                    "consider `await`ing on the `Future`",
+                    ".await".to_string(),
+                    Applicability::MaybeIncorrect,
+                );
+            }
+            (Some(ty), _) if ty::TyS::same_type(ty, exp_found.found) => {
+                let span = match cause.code {
+                    // scrutinee's span
+                    ObligationCauseCode::Pattern { span: Some(span), .. } => span,
+                    _ => exp_span,
+                };
+                diag.span_suggestion_verbose(
+                    span.shrink_to_hi(),
+                    "consider `await`ing on the `Future`",
+                    ".await".to_string(),
+                    Applicability::MaybeIncorrect,
+                );
+            }
+            _ => {}
+        }
     }
 
     /// When encountering a case where `.as_ref()` on a `Result` or `Option` would be appropriate,
diff --git a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
index 2f3089f..8689895 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
@@ -91,17 +91,6 @@
         if let (None, Some(ty)) =
             (self.found_local_pattern, self.node_ty_contains_target(local.hir_id))
         {
-            // FIXME: There's a trade-off here - we can either check that our target span
-            // is contained in `local.span` or not. If we choose to check containment
-            // we can avoid some spurious suggestions (see #72690), but we lose
-            // the ability to report on things like:
-            //
-            // ```
-            // let x = vec![];
-            // ```
-            //
-            // because the target span will be in the macro expansion of `vec![]`.
-            // At present we choose not to check containment.
             self.found_local_pattern = Some(&*local.pat);
             self.found_node_ty = Some(ty);
         }
@@ -113,10 +102,8 @@
             if let (None, Some(ty)) =
                 (self.found_arg_pattern, self.node_ty_contains_target(param.hir_id))
             {
-                if self.target_span.contains(param.pat.span) {
-                    self.found_arg_pattern = Some(&*param.pat);
-                    self.found_node_ty = Some(ty);
-                }
+                self.found_arg_pattern = Some(&*param.pat);
+                self.found_node_ty = Some(ty);
             }
         }
         intravisit::walk_body(self, body);
@@ -752,7 +739,6 @@
                 "cannot infer {} {} {} `{}`{}",
                 kind_str, preposition, descr, type_name, parent_desc
             )
-            .into()
         }
     }
 }
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs
index 7ab18e5..5978605 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs
@@ -102,43 +102,89 @@
             None => String::new(),
         };
 
-        let (span_1, span_2, main_label, span_label) = match (sup_is_ret_type, sub_is_ret_type) {
-            (None, None) => {
-                let (main_label_1, span_label_1) = if ty_sup.hir_id == ty_sub.hir_id {
-                    (
-                        "this type is declared with multiple lifetimes...".to_owned(),
-                        "...but data with one lifetime flows into the other here".to_owned(),
-                    )
-                } else {
-                    (
-                        "these two types are declared with different lifetimes...".to_owned(),
-                        format!("...but data{} flows{} here", span_label_var1, span_label_var2),
-                    )
-                };
-                (ty_sup.span, ty_sub.span, main_label_1, span_label_1)
-            }
+        let (span_1, span_2, main_label, span_label, future_return_type) =
+            match (sup_is_ret_type, sub_is_ret_type) {
+                (None, None) => {
+                    let (main_label_1, span_label_1) = if ty_sup.hir_id == ty_sub.hir_id {
+                        (
+                            "this type is declared with multiple lifetimes...".to_owned(),
+                            "...but data with one lifetime flows into the other here".to_owned(),
+                        )
+                    } else {
+                        (
+                            "these two types are declared with different lifetimes...".to_owned(),
+                            format!("...but data{} flows{} here", span_label_var1, span_label_var2),
+                        )
+                    };
+                    (ty_sup.span, ty_sub.span, main_label_1, span_label_1, None)
+                }
 
-            (Some(ret_span), _) => (
-                ty_sub.span,
-                ret_span,
-                "this parameter and the return type are declared with different lifetimes..."
-                    .to_owned(),
-                format!("...but data{} is returned here", span_label_var1),
-            ),
-            (_, Some(ret_span)) => (
-                ty_sup.span,
-                ret_span,
-                "this parameter and the return type are declared with different lifetimes..."
-                    .to_owned(),
-                format!("...but data{} is returned here", span_label_var1),
-            ),
-        };
+                (Some(ret_span), _) => {
+                    let sup_future = self.future_return_type(scope_def_id_sup);
+                    let (return_type, action) = if let Some(_) = sup_future {
+                        ("returned future", "held across an await point")
+                    } else {
+                        ("return type", "returned")
+                    };
 
-        struct_span_err!(self.tcx().sess, span, E0623, "lifetime mismatch")
-            .span_label(span_1, main_label)
-            .span_label(span_2, String::new())
-            .span_label(span, span_label)
-            .emit();
+                    (
+                        ty_sub.span,
+                        ret_span,
+                        format!(
+                            "this parameter and the {} are declared with different lifetimes...",
+                            return_type
+                        ),
+                        format!("...but data{} is {} here", span_label_var1, action),
+                        sup_future,
+                    )
+                }
+                (_, Some(ret_span)) => {
+                    let sub_future = self.future_return_type(scope_def_id_sub);
+                    let (return_type, action) = if let Some(_) = sub_future {
+                        ("returned future", "held across an await point")
+                    } else {
+                        ("return type", "returned")
+                    };
+
+                    (
+                        ty_sup.span,
+                        ret_span,
+                        format!(
+                            "this parameter and the {} are declared with different lifetimes...",
+                            return_type
+                        ),
+                        format!("...but data{} is {} here", span_label_var1, action),
+                        sub_future,
+                    )
+                }
+            };
+
+        let mut e = struct_span_err!(self.tcx().sess, span, E0623, "lifetime mismatch");
+
+        e.span_label(span_1, main_label);
+        e.span_label(span_2, String::new());
+        e.span_label(span, span_label);
+
+        if let Some(t) = future_return_type {
+            let snip = self
+                .tcx()
+                .sess
+                .source_map()
+                .span_to_snippet(t.span)
+                .ok()
+                .and_then(|s| match (&t.kind, s.as_str()) {
+                    (rustc_hir::TyKind::Tup(&[]), "") => Some("()".to_string()),
+                    (_, "") => None,
+                    _ => Some(s),
+                })
+                .unwrap_or("{unnamed_type}".to_string());
+
+            e.span_label(
+                t.span,
+                &format!("this `async fn` implicitly returns an `impl Future<Output = {}>`", snip),
+            );
+        }
+        e.emit();
         Some(ErrorReported)
     }
 }
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
index e9d5eba..df3dbfc 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
@@ -15,6 +15,8 @@
 use rustc_span::symbol::Ident;
 use rustc_span::{MultiSpan, Span};
 
+use std::ops::ControlFlow;
+
 impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
     /// Print the error message for lifetime errors when the return type is a static `impl Trait`,
     /// `dyn Trait` or if a method call on a trait object introduces a static requirement.
@@ -472,13 +474,13 @@
 struct TraitObjectVisitor(Vec<DefId>);
 
 impl TypeVisitor<'_> for TraitObjectVisitor {
-    fn visit_ty(&mut self, t: Ty<'_>) -> bool {
+    fn visit_ty(&mut self, t: Ty<'_>) -> ControlFlow<()> {
         match t.kind() {
             ty::Dynamic(preds, RegionKind::ReStatic) => {
                 if let Some(def_id) = preds.principal_def_id() {
                     self.0.push(def_id);
                 }
-                false
+                ControlFlow::CONTINUE
             }
             _ => t.super_visit_with(self),
         }
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs
index c055fed..ca93b27 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs
@@ -85,6 +85,60 @@
         })
     }
 
+    pub(super) fn future_return_type(
+        &self,
+        local_def_id: LocalDefId,
+    ) -> Option<&rustc_hir::Ty<'_>> {
+        if let Some(hir::IsAsync::Async) = self.asyncness(local_def_id) {
+            if let rustc_middle::ty::Opaque(def_id, _) =
+                self.tcx().type_of(local_def_id).fn_sig(self.tcx()).output().skip_binder().kind()
+            {
+                match self.tcx().hir().get_if_local(*def_id) {
+                    Some(hir::Node::Item(hir::Item {
+                        kind:
+                            hir::ItemKind::OpaqueTy(hir::OpaqueTy {
+                                bounds,
+                                origin: hir::OpaqueTyOrigin::AsyncFn,
+                                ..
+                            }),
+                        ..
+                    })) => {
+                        for b in bounds.iter() {
+                            if let hir::GenericBound::LangItemTrait(
+                                hir::LangItem::Future,
+                                _span,
+                                _hir_id,
+                                generic_args,
+                            ) = b
+                            {
+                                for type_binding in generic_args.bindings.iter() {
+                                    if type_binding.ident.name == rustc_span::sym::Output {
+                                        if let hir::TypeBindingKind::Equality { ty } =
+                                            type_binding.kind
+                                        {
+                                            return Some(ty);
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                    _ => {}
+                }
+            }
+        }
+        None
+    }
+
+    pub(super) fn asyncness(&self, local_def_id: LocalDefId) -> Option<hir::IsAsync> {
+        // similar to the asyncness fn in rustc_ty::ty
+        let hir_id = self.tcx().hir().local_def_id_to_hir_id(local_def_id);
+        let node = self.tcx().hir().get(hir_id);
+        let fn_like = rustc_middle::hir::map::blocks::FnLikeNode::from_node(node)?;
+
+        Some(fn_like.asyncness())
+    }
+
     // Here, we check for the case where the anonymous region
     // is in the return type.
     // FIXME(#42703) - Need to handle certain cases here.
diff --git a/compiler/rustc_infer/src/infer/free_regions.rs b/compiler/rustc_infer/src/infer/free_regions.rs
index ffe5fb1..32f7323 100644
--- a/compiler/rustc_infer/src/infer/free_regions.rs
+++ b/compiler/rustc_infer/src/infer/free_regions.rs
@@ -157,7 +157,7 @@
 
 impl<'a, 'tcx> Lift<'tcx> for FreeRegionMap<'a> {
     type Lifted = FreeRegionMap<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<FreeRegionMap<'tcx>> {
-        self.relation.maybe_map(|&fr| tcx.lift(&fr)).map(|relation| FreeRegionMap { relation })
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<FreeRegionMap<'tcx>> {
+        self.relation.maybe_map(|&fr| tcx.lift(fr)).map(|relation| FreeRegionMap { relation })
     }
 }
diff --git a/compiler/rustc_infer/src/infer/higher_ranked/mod.rs b/compiler/rustc_infer/src/infer/higher_ranked/mod.rs
index ea19dff..e3365e8 100644
--- a/compiler/rustc_infer/src/infer/higher_ranked/mod.rs
+++ b/compiler/rustc_infer/src/infer/higher_ranked/mod.rs
@@ -2,7 +2,7 @@
 //! the end of the file for details.
 
 use super::combine::CombineFields;
-use super::{HigherRankedType, InferCtxt, PlaceholderMap};
+use super::{HigherRankedType, InferCtxt};
 
 use crate::infer::CombinedSnapshot;
 use rustc_middle::ty::relate::{Relate, RelateResult, TypeRelation};
@@ -33,7 +33,7 @@
         self.infcx.commit_if_ok(|_| {
             // First, we instantiate each bound region in the supertype with a
             // fresh placeholder region.
-            let (b_prime, _) = self.infcx.replace_bound_vars_with_placeholders(&b);
+            let b_prime = self.infcx.replace_bound_vars_with_placeholders(&b);
 
             // Next, we instantiate each bound region in the subtype
             // with a fresh region variable. These region variables --
@@ -66,10 +66,7 @@
     /// the [rustc dev guide].
     ///
     /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/hrtb.html
-    pub fn replace_bound_vars_with_placeholders<T>(
-        &self,
-        binder: &ty::Binder<T>,
-    ) -> (T, PlaceholderMap<'tcx>)
+    pub fn replace_bound_vars_with_placeholders<T>(&self, binder: &ty::Binder<T>) -> T
     where
         T: TypeFoldable<'tcx>,
     {
@@ -122,7 +119,7 @@
             next_universe, binder, result, map,
         );
 
-        (result, map)
+        result
     }
 
     /// See `infer::region_constraints::RegionConstraintCollector::leak_check`.
diff --git a/compiler/rustc_infer/src/infer/mod.rs b/compiler/rustc_infer/src/infer/mod.rs
index 07a55c7..acded53 100644
--- a/compiler/rustc_infer/src/infer/mod.rs
+++ b/compiler/rustc_infer/src/infer/mod.rs
@@ -113,13 +113,6 @@
 }
 
 impl RegionckMode {
-    pub fn suppressed(self) -> bool {
-        match self {
-            Self::Solve => false,
-            Self::Erase { suppress_errors } => suppress_errors,
-        }
-    }
-
     /// Indicates that the MIR borrowck will repeat these region
     /// checks, so we should ignore errors if NLL is (unconditionally)
     /// enabled.
@@ -351,11 +344,6 @@
     universe: Cell<ty::UniverseIndex>,
 }
 
-/// A map returned by `replace_bound_vars_with_placeholders()`
-/// indicating the placeholder region that each late-bound region was
-/// replaced with.
-pub type PlaceholderMap<'tcx> = BTreeMap<ty::BoundRegion, ty::Region<'tcx>>;
-
 /// See the `error_reporting` module for more details.
 #[derive(Clone, Debug, PartialEq, Eq, TypeFoldable)]
 pub enum ValuePairs<'tcx> {
@@ -425,15 +413,6 @@
 #[cfg(target_arch = "x86_64")]
 static_assert_size!(SubregionOrigin<'_>, 32);
 
-/// Places that type/region parameters can appear.
-#[derive(Clone, Copy, Debug)]
-pub enum ParameterOrigin {
-    Path,               // foo::bar
-    MethodCall,         // foo.bar() <-- parameters on impl providing bar()
-    OverloadedOperator, // a + b when overloaded
-    OverloadedDeref,    // *a when overloaded
-}
-
 /// Times when we replace late-bound regions with variables:
 #[derive(Clone, Copy, Debug)]
 pub enum LateBoundRegionConversionTime {
@@ -513,21 +492,6 @@
     },
 }
 
-impl NLLRegionVariableOrigin {
-    pub fn is_universal(self) -> bool {
-        match self {
-            NLLRegionVariableOrigin::FreeRegion => true,
-            NLLRegionVariableOrigin::Placeholder(..) => true,
-            NLLRegionVariableOrigin::Existential { .. } => false,
-            NLLRegionVariableOrigin::RootEmptyRegion => false,
-        }
-    }
-
-    pub fn is_existential(self) -> bool {
-        !self.is_universal()
-    }
-}
-
 // FIXME(eddyb) investigate overlap between this and `TyOrConstInferVar`.
 #[derive(Copy, Clone, Debug)]
 pub enum FixupError<'tcx> {
@@ -714,8 +678,6 @@
 
     pub fn unsolved_variables(&self) -> Vec<Ty<'tcx>> {
         let mut inner = self.inner.borrow_mut();
-        // FIXME(const_generics): should there be an equivalent function for const variables?
-
         let mut vars: Vec<Ty<'_>> = inner
             .type_variables()
             .unsolved_variables()
@@ -992,7 +954,7 @@
         }
 
         Some(self.commit_if_ok(|_snapshot| {
-            let (ty::SubtypePredicate { a_is_expected, a, b }, _) =
+            let ty::SubtypePredicate { a_is_expected, a, b } =
                 self.replace_bound_vars_with_placeholders(&predicate);
 
             let ok = self.at(cause, param_env).sub_exp(a_is_expected, a, b)?;
@@ -1007,7 +969,7 @@
         predicate: ty::PolyRegionOutlivesPredicate<'tcx>,
     ) -> UnitResult<'tcx> {
         self.commit_if_ok(|_snapshot| {
-            let (ty::OutlivesPredicate(r_a, r_b), _) =
+            let ty::OutlivesPredicate(r_a, r_b) =
                 self.replace_bound_vars_with_placeholders(&predicate);
             let origin = SubregionOrigin::from_obligation_cause(cause, || {
                 RelateRegionParamBound(cause.span)
diff --git a/compiler/rustc_infer/src/infer/nll_relate/mod.rs b/compiler/rustc_infer/src/infer/nll_relate/mod.rs
index 839891f..9b2ffc7 100644
--- a/compiler/rustc_infer/src/infer/nll_relate/mod.rs
+++ b/compiler/rustc_infer/src/infer/nll_relate/mod.rs
@@ -28,9 +28,9 @@
 use rustc_middle::ty::error::TypeError;
 use rustc_middle::ty::fold::{TypeFoldable, TypeVisitor};
 use rustc_middle::ty::relate::{self, Relate, RelateResult, TypeRelation};
-use rustc_middle::ty::subst::GenericArg;
 use rustc_middle::ty::{self, InferConst, Ty, TyCtxt};
 use std::fmt::Debug;
+use std::ops::ControlFlow;
 
 #[derive(PartialEq)]
 pub enum NormalizationStrategy {
@@ -119,12 +119,6 @@
     fn forbid_inference_vars() -> bool;
 }
 
-#[derive(Clone, Debug)]
-struct ScopesAndKind<'tcx> {
-    scopes: Vec<BoundRegionScope<'tcx>>,
-    kind: GenericArg<'tcx>,
-}
-
 #[derive(Clone, Debug, Default)]
 struct BoundRegionScope<'tcx> {
     map: FxHashMap<ty::BoundRegion, ty::Region<'tcx>>,
@@ -341,7 +335,7 @@
         // been fully instantiated and hence the set of scopes we have
         // doesn't matter -- just to be sure, put an empty vector
         // in there.
-        let old_a_scopes = ::std::mem::take(pair.vid_scopes(self));
+        let old_a_scopes = std::mem::take(pair.vid_scopes(self));
 
         // Relate the generalized kind to the original one.
         let result = pair.relate_generalized_ty(self, generalized_ty);
@@ -643,7 +637,7 @@
         if let (Some(a), Some(b)) = (a.no_bound_vars(), b.no_bound_vars()) {
             // Fast path for the common case.
             self.relate(a, b)?;
-            return Ok(ty::Binder::bind(a));
+            return Ok(ty::Binder::dummy(a));
         }
 
         if self.ambient_covariance() {
@@ -680,7 +674,7 @@
             //   itself occurs. Note that `'b` and `'c` must both
             //   include P. At the point, the call works because of
             //   subtyping (i.e., `&'b u32 <: &{P} u32`).
-            let variance = ::std::mem::replace(&mut self.ambient_variance, ty::Variance::Covariant);
+            let variance = std::mem::replace(&mut self.ambient_variance, ty::Variance::Covariant);
 
             self.relate(a.skip_binder(), b.skip_binder())?;
 
@@ -709,7 +703,7 @@
             // Reset ambient variance to contravariance. See the
             // covariant case above for an explanation.
             let variance =
-                ::std::mem::replace(&mut self.ambient_variance, ty::Variance::Contravariant);
+                std::mem::replace(&mut self.ambient_variance, ty::Variance::Contravariant);
 
             self.relate(a.skip_binder(), b.skip_binder())?;
 
@@ -747,15 +741,15 @@
 }
 
 impl<'me, 'tcx> TypeVisitor<'tcx> for ScopeInstantiator<'me, 'tcx> {
-    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> bool {
+    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ControlFlow<()> {
         self.target_index.shift_in(1);
         t.super_visit_with(self);
         self.target_index.shift_out(1);
 
-        false
+        ControlFlow::CONTINUE
     }
 
-    fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+    fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> {
         let ScopeInstantiator { bound_region_scope, next_region, .. } = self;
 
         match r {
@@ -766,7 +760,7 @@
             _ => {}
         }
 
-        false
+        ControlFlow::CONTINUE
     }
 }
 
diff --git a/compiler/rustc_infer/src/infer/outlives/obligations.rs b/compiler/rustc_infer/src/infer/outlives/obligations.rs
index 2851da8..eb1a780 100644
--- a/compiler/rustc_infer/src/infer/outlives/obligations.rs
+++ b/compiler/rustc_infer/src/infer/outlives/obligations.rs
@@ -110,7 +110,7 @@
 
     /// Trait queries just want to pass back type obligations "as is"
     pub fn take_registered_region_obligations(&self) -> Vec<(hir::HirId, RegionObligation<'tcx>)> {
-        ::std::mem::take(&mut self.inner.borrow_mut().region_obligations)
+        std::mem::take(&mut self.inner.borrow_mut().region_obligations)
     }
 
     /// Process the region obligations that must be proven (during
diff --git a/compiler/rustc_infer/src/infer/outlives/verify.rs b/compiler/rustc_infer/src/infer/outlives/verify.rs
index 21b0836..2b827f4 100644
--- a/compiler/rustc_infer/src/infer/outlives/verify.rs
+++ b/compiler/rustc_infer/src/infer/outlives/verify.rs
@@ -1,7 +1,7 @@
 use crate::infer::outlives::env::RegionBoundPairs;
 use crate::infer::{GenericKind, VerifyBound};
 use rustc_data_structures::captures::Captures;
-use rustc_data_structures::mini_set::MiniSet;
+use rustc_data_structures::sso::SsoHashSet;
 use rustc_hir::def_id::DefId;
 use rustc_middle::ty::subst::{GenericArg, GenericArgKind, Subst};
 use rustc_middle::ty::{self, Ty, TyCtxt};
@@ -32,7 +32,7 @@
     /// Returns a "verify bound" that encodes what we know about
     /// `generic` and the regions it outlives.
     pub fn generic_bound(&self, generic: GenericKind<'tcx>) -> VerifyBound<'tcx> {
-        let mut visited = MiniSet::new();
+        let mut visited = SsoHashSet::new();
         match generic {
             GenericKind::Param(param_ty) => self.param_bound(param_ty),
             GenericKind::Projection(projection_ty) => {
@@ -44,7 +44,7 @@
     fn type_bound(
         &self,
         ty: Ty<'tcx>,
-        visited: &mut MiniSet<GenericArg<'tcx>>,
+        visited: &mut SsoHashSet<GenericArg<'tcx>>,
     ) -> VerifyBound<'tcx> {
         match *ty.kind() {
             ty::Param(p) => self.param_bound(p),
@@ -148,7 +148,7 @@
     pub fn projection_bound(
         &self,
         projection_ty: ty::ProjectionTy<'tcx>,
-        visited: &mut MiniSet<GenericArg<'tcx>>,
+        visited: &mut SsoHashSet<GenericArg<'tcx>>,
     ) -> VerifyBound<'tcx> {
         debug!("projection_bound(projection_ty={:?})", projection_ty);
 
@@ -186,7 +186,7 @@
     fn recursive_bound(
         &self,
         parent: GenericArg<'tcx>,
-        visited: &mut MiniSet<GenericArg<'tcx>>,
+        visited: &mut SsoHashSet<GenericArg<'tcx>>,
     ) -> VerifyBound<'tcx> {
         let mut bounds = parent
             .walk_shallow(visited)
@@ -328,8 +328,8 @@
         assoc_item_def_id: DefId,
     ) -> impl Iterator<Item = ty::Region<'tcx>> {
         let tcx = self.tcx;
-        let predicates = tcx.projection_predicates(assoc_item_def_id);
-        predicates
+        let bounds = tcx.item_bounds(assoc_item_def_id);
+        bounds
             .into_iter()
             .filter_map(|p| p.to_opt_type_outlives())
             .filter_map(|p| p.no_bound_vars())
diff --git a/compiler/rustc_infer/src/infer/resolve.rs b/compiler/rustc_infer/src/infer/resolve.rs
index 337772d..fe4ba5a 100644
--- a/compiler/rustc_infer/src/infer/resolve.rs
+++ b/compiler/rustc_infer/src/infer/resolve.rs
@@ -3,6 +3,8 @@
 use rustc_middle::ty::fold::{TypeFolder, TypeVisitor};
 use rustc_middle::ty::{self, Const, InferConst, Ty, TyCtxt, TypeFoldable};
 
+use std::ops::ControlFlow;
+
 ///////////////////////////////////////////////////////////////////////////
 // OPPORTUNISTIC VAR RESOLVER
 
@@ -121,7 +123,7 @@
 }
 
 impl<'a, 'tcx> TypeVisitor<'tcx> for UnresolvedTypeFinder<'a, 'tcx> {
-    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> {
         let t = self.infcx.shallow_resolve(t);
         if t.has_infer_types() {
             if let ty::Infer(infer_ty) = *t.kind() {
@@ -143,7 +145,7 @@
                     None
                 };
                 self.first_unresolved = Some((t, ty_var_span));
-                true // Halt visiting.
+                ControlFlow::BREAK
             } else {
                 // Otherwise, visit its contents.
                 t.super_visit_with(self)
@@ -151,7 +153,7 @@
         } else {
             // All type variables in inference types must already be resolved,
             // - no need to visit the contents, continue visiting.
-            false
+            ControlFlow::CONTINUE
         }
     }
 }
diff --git a/compiler/rustc_infer/src/lib.rs b/compiler/rustc_infer/src/lib.rs
index ea9a466..3690a88 100644
--- a/compiler/rustc_infer/src/lib.rs
+++ b/compiler/rustc_infer/src/lib.rs
@@ -22,6 +22,7 @@
 #![feature(never_type)]
 #![feature(or_patterns)]
 #![feature(in_band_lifetimes)]
+#![feature(control_flow_enum)]
 #![recursion_limit = "512"] // For rustdoc
 
 #[macro_use]
diff --git a/compiler/rustc_infer/src/traits/error_reporting/mod.rs b/compiler/rustc_infer/src/traits/error_reporting/mod.rs
index f873358..835f75e 100644
--- a/compiler/rustc_infer/src/traits/error_reporting/mod.rs
+++ b/compiler/rustc_infer/src/traits/error_reporting/mod.rs
@@ -2,12 +2,12 @@
 
 use crate::infer::InferCtxt;
 use rustc_data_structures::fx::FxHashSet;
-use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder};
+use rustc_errors::{struct_span_err, DiagnosticBuilder};
 use rustc_hir as hir;
 use rustc_hir::def_id::DefId;
 use rustc_middle::ty::TyCtxt;
 use rustc_span::symbol::Symbol;
-use rustc_span::Span;
+use rustc_span::{MultiSpan, Span};
 use std::fmt;
 
 impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
@@ -54,10 +54,11 @@
         "the trait `{}` cannot be made into an object",
         trait_str
     );
-    err.span_label(span, format!("the trait `{}` cannot be made into an object", trait_str));
+    err.span_label(span, format!("`{}` cannot be made into an object", trait_str));
 
     let mut reported_violations = FxHashSet::default();
-    let mut had_span_label = false;
+    let mut multi_span = vec![];
+    let mut messages = vec![];
     for violation in violations {
         if let ObjectSafetyViolation::SizedSelf(sp) = &violation {
             if !sp.is_empty() {
@@ -71,31 +72,37 @@
             let msg = if trait_span.is_none() || spans.is_empty() {
                 format!("the trait cannot be made into an object because {}", violation.error_msg())
             } else {
-                had_span_label = true;
                 format!("...because {}", violation.error_msg())
             };
             if spans.is_empty() {
                 err.note(&msg);
             } else {
                 for span in spans {
-                    err.span_label(span, &msg);
+                    multi_span.push(span);
+                    messages.push(msg.clone());
                 }
             }
-            match (trait_span, violation.solution()) {
-                (Some(_), Some((note, None))) => {
-                    err.help(&note);
-                }
-                (Some(_), Some((note, Some((sugg, span))))) => {
-                    err.span_suggestion(span, &note, sugg, Applicability::MachineApplicable);
-                }
+            if trait_span.is_some() {
                 // Only provide the help if its a local trait, otherwise it's not actionable.
-                _ => {}
+                violation.solution(&mut err);
             }
         }
     }
-    if let (Some(trait_span), true) = (trait_span, had_span_label) {
-        err.span_label(trait_span, "this trait cannot be made into an object...");
+    let has_multi_span = !multi_span.is_empty();
+    let mut note_span = MultiSpan::from_spans(multi_span.clone());
+    if let (Some(trait_span), true) = (trait_span, has_multi_span) {
+        note_span
+            .push_span_label(trait_span, "this trait cannot be made into an object...".to_string());
     }
+    for (span, msg) in multi_span.into_iter().zip(messages.into_iter()) {
+        note_span.push_span_label(span, msg);
+    }
+    err.span_note(
+        note_span,
+        "for a trait to be \"object safe\" it needs to allow building a vtable to allow the call \
+         to be resolvable dynamically; for more information visit \
+         <https://doc.rust-lang.org/reference/items/traits.html#object-safety>",
+    );
 
     if tcx.sess.trait_methods_not_found.borrow().contains(&span) {
         // Avoid emitting error caused by non-existing method (#58734)
diff --git a/compiler/rustc_infer/src/traits/mod.rs b/compiler/rustc_infer/src/traits/mod.rs
index a3c4920..aaf5e95 100644
--- a/compiler/rustc_infer/src/traits/mod.rs
+++ b/compiler/rustc_infer/src/traits/mod.rs
@@ -59,9 +59,7 @@
 #[cfg(target_arch = "x86_64")]
 static_assert_size!(PredicateObligation<'_>, 32);
 
-pub type Obligations<'tcx, O> = Vec<Obligation<'tcx, O>>;
 pub type PredicateObligations<'tcx> = Vec<PredicateObligation<'tcx>>;
-pub type TraitObligations<'tcx> = Vec<TraitObligation<'tcx>>;
 
 pub type Selection<'tcx> = ImplSource<'tcx, PredicateObligation<'tcx>>;
 
diff --git a/compiler/rustc_infer/src/traits/project.rs b/compiler/rustc_infer/src/traits/project.rs
index 65284bc..33bddf1 100644
--- a/compiler/rustc_infer/src/traits/project.rs
+++ b/compiler/rustc_infer/src/traits/project.rs
@@ -90,6 +90,7 @@
 pub enum ProjectionCacheEntry<'tcx> {
     InProgress,
     Ambiguous,
+    Recur,
     Error,
     NormalizedTy(NormalizedTy<'tcx>),
 }
@@ -143,7 +144,12 @@
             "ProjectionCacheEntry::insert_ty: adding cache entry: key={:?}, value={:?}",
             key, value
         );
-        let fresh_key = self.map().insert(key, ProjectionCacheEntry::NormalizedTy(value));
+        let mut map = self.map();
+        if let Some(ProjectionCacheEntry::Recur) = map.get(&key) {
+            debug!("Not overwriting Recur");
+            return;
+        }
+        let fresh_key = map.insert(key, ProjectionCacheEntry::NormalizedTy(value));
         assert!(!fresh_key, "never started projecting `{:?}`", key);
     }
 
@@ -197,6 +203,14 @@
         assert!(!fresh, "never started projecting `{:?}`", key);
     }
 
+    /// Indicates that while trying to normalize `key`, `key` was required to
+    /// be normalized again. Selection or evaluation should eventually report
+    /// an error here.
+    pub fn recur(&mut self, key: ProjectionCacheKey<'tcx>) {
+        let fresh = self.map().insert(key, ProjectionCacheEntry::Recur);
+        assert!(!fresh, "never started projecting `{:?}`", key);
+    }
+
     /// Indicates that trying to normalize `key` resulted in
     /// error.
     pub fn error(&mut self, key: ProjectionCacheKey<'tcx>) {
diff --git a/compiler/rustc_infer/src/traits/structural_impls.rs b/compiler/rustc_infer/src/traits/structural_impls.rs
index c48e58c..1a1c263 100644
--- a/compiler/rustc_infer/src/traits/structural_impls.rs
+++ b/compiler/rustc_infer/src/traits/structural_impls.rs
@@ -4,6 +4,7 @@
 use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
 
 use std::fmt;
+use std::ops::ControlFlow;
 
 // Structural impls for the structs in `traits`.
 
@@ -68,7 +69,7 @@
         }
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         self.predicate.visit_with(visitor)
     }
 }
diff --git a/compiler/rustc_infer/src/traits/util.rs b/compiler/rustc_infer/src/traits/util.rs
index 9c0d934..f6ef984 100644
--- a/compiler/rustc_infer/src/traits/util.rs
+++ b/compiler/rustc_infer/src/traits/util.rs
@@ -4,7 +4,6 @@
 use rustc_data_structures::fx::FxHashSet;
 use rustc_middle::ty::outlives::Component;
 use rustc_middle::ty::{self, ToPredicate, TyCtxt, WithConstness};
-use rustc_span::Span;
 
 pub fn anonymize_predicate<'tcx>(
     tcx: TyCtxt<'tcx>,
@@ -94,7 +93,11 @@
     tcx: TyCtxt<'tcx>,
     predicates: impl Iterator<Item = ty::Predicate<'tcx>>,
 ) -> Elaborator<'tcx> {
-    let obligations = predicates.map(|predicate| predicate_obligation(predicate, None)).collect();
+    let obligations = predicates
+        .map(|predicate| {
+            predicate_obligation(predicate, ty::ParamEnv::empty(), ObligationCause::dummy())
+        })
+        .collect();
     elaborate_obligations(tcx, obligations)
 }
 
@@ -109,15 +112,10 @@
 
 fn predicate_obligation<'tcx>(
     predicate: ty::Predicate<'tcx>,
-    span: Option<Span>,
+    param_env: ty::ParamEnv<'tcx>,
+    cause: ObligationCause<'tcx>,
 ) -> PredicateObligation<'tcx> {
-    let cause = if let Some(span) = span {
-        ObligationCause::dummy_with_span(span)
-    } else {
-        ObligationCause::dummy()
-    };
-
-    Obligation { cause, param_env: ty::ParamEnv::empty(), recursion_depth: 0, predicate }
+    Obligation { cause, param_env, recursion_depth: 0, predicate }
 }
 
 impl Elaborator<'tcx> {
@@ -128,15 +126,17 @@
     fn elaborate(&mut self, obligation: &PredicateObligation<'tcx>) {
         let tcx = self.visited.tcx;
 
-        match obligation.predicate.skip_binders() {
+        let bound_predicate = obligation.predicate.bound_atom();
+        match bound_predicate.skip_binder() {
             ty::PredicateAtom::Trait(data, _) => {
                 // Get predicates declared on the trait.
                 let predicates = tcx.super_predicates_of(data.def_id());
 
-                let obligations = predicates.predicates.iter().map(|&(pred, span)| {
+                let obligations = predicates.predicates.iter().map(|&(pred, _)| {
                     predicate_obligation(
-                        pred.subst_supertrait(tcx, &ty::Binder::bind(data.trait_ref)),
-                        Some(span),
+                        pred.subst_supertrait(tcx, &bound_predicate.rebind(data.trait_ref)),
+                        obligation.param_env,
+                        obligation.cause.clone(),
                     )
                 });
                 debug!("super_predicates: data={:?}", data);
@@ -233,7 +233,13 @@
                         })
                         .map(|predicate_kind| predicate_kind.to_predicate(tcx))
                         .filter(|&predicate| visited.insert(predicate))
-                        .map(|predicate| predicate_obligation(predicate, None)),
+                        .map(|predicate| {
+                            predicate_obligation(
+                                predicate,
+                                obligation.param_env,
+                                obligation.cause.clone(),
+                            )
+                        }),
                 );
             }
             ty::PredicateAtom::TypeWellFormedFromEnv(..) => {
diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs
index 77d80bb..fc227b9 100644
--- a/compiler/rustc_interface/src/passes.rs
+++ b/compiler/rustc_interface/src/passes.rs
@@ -205,7 +205,10 @@
         }
     });
 
-    Ok((krate, Lrc::new(lint_store)))
+    let lint_store = Lrc::new(lint_store);
+    sess.init_lint_store(lint_store.clone());
+
+    Ok((krate, lint_store))
 }
 
 fn pre_expansion_lint(sess: &Session, lint_store: &LintStore, krate: &ast::Crate) {
@@ -568,6 +571,10 @@
             .map(|fmap| escape_dep_filename(&fmap.unmapped_path.as_ref().unwrap_or(&fmap.name)))
             .collect();
 
+        if let Some(ref backend) = sess.opts.debugging_opts.codegen_backend {
+            files.push(backend.to_string());
+        }
+
         if sess.binary_dep_depinfo() {
             boxed_resolver.borrow().borrow_mut().access(|resolver| {
                 for cnum in resolver.cstore().crates_untracked() {
diff --git a/compiler/rustc_interface/src/queries.rs b/compiler/rustc_interface/src/queries.rs
index 8b82217..1de7350 100644
--- a/compiler/rustc_interface/src/queries.rs
+++ b/compiler/rustc_interface/src/queries.rs
@@ -3,6 +3,7 @@
 
 use rustc_ast as ast;
 use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_data_structures::svh::Svh;
 use rustc_data_structures::sync::{Lrc, OnceCell, WorkerLocal};
 use rustc_errors::ErrorReported;
 use rustc_hir::def_id::LOCAL_CRATE;
@@ -13,7 +14,8 @@
 use rustc_middle::dep_graph::DepGraph;
 use rustc_middle::ty::steal::Steal;
 use rustc_middle::ty::{GlobalCtxt, ResolverOutputs, TyCtxt};
-use rustc_session::config::{OutputFilenames, OutputType};
+use rustc_serialize::json;
+use rustc_session::config::{self, OutputFilenames, OutputType};
 use rustc_session::{output::find_crate_name, Session};
 use rustc_span::symbol::sym;
 use std::any::Any;
@@ -331,6 +333,7 @@
     pub fn linker(&'tcx self) -> Result<Linker> {
         let dep_graph = self.dep_graph()?;
         let prepare_outputs = self.prepare_outputs()?;
+        let crate_hash = self.global_ctxt()?.peek_mut().enter(|tcx| tcx.crate_hash(LOCAL_CRATE));
         let ongoing_codegen = self.ongoing_codegen()?;
 
         let sess = self.session().clone();
@@ -340,6 +343,7 @@
             sess,
             dep_graph: dep_graph.peek().clone(),
             prepare_outputs: prepare_outputs.take(),
+            crate_hash,
             ongoing_codegen: ongoing_codegen.take(),
             codegen_backend,
         })
@@ -350,18 +354,31 @@
     sess: Lrc<Session>,
     dep_graph: DepGraph,
     prepare_outputs: OutputFilenames,
+    crate_hash: Svh,
     ongoing_codegen: Box<dyn Any>,
     codegen_backend: Lrc<Box<dyn CodegenBackend>>,
 }
 
 impl Linker {
     pub fn link(self) -> Result<()> {
-        let codegen_results =
-            self.codegen_backend.join_codegen(self.ongoing_codegen, &self.sess, &self.dep_graph)?;
-        let prof = self.sess.prof.clone();
+        let (codegen_results, work_products) =
+            self.codegen_backend.join_codegen(self.ongoing_codegen, &self.sess)?;
+
+        self.sess.compile_status()?;
+
+        let sess = &self.sess;
         let dep_graph = self.dep_graph;
+        sess.time("serialize_work_products", || {
+            rustc_incremental::save_work_product_index(&sess, &dep_graph, work_products)
+        });
+
+        let prof = self.sess.prof.clone();
         prof.generic_activity("drop_dep_graph").run(move || drop(dep_graph));
 
+        // Now that we won't touch anything in the incremental compilation directory
+        // any more, we can finalize it (which involves renaming it)
+        rustc_incremental::finalize_session_directory(&self.sess, self.crate_hash);
+
         if !self
             .sess
             .opts
@@ -371,6 +388,19 @@
         {
             return Ok(());
         }
+
+        if sess.opts.debugging_opts.no_link {
+            // FIXME: use a binary format to encode the `.rlink` file
+            let rlink_data = json::encode(&codegen_results).map_err(|err| {
+                sess.fatal(&format!("failed to encode rlink: {}", err));
+            })?;
+            let rlink_file = self.prepare_outputs.with_extension(config::RLINK_EXT);
+            std::fs::write(&rlink_file, rlink_data).map_err(|err| {
+                sess.fatal(&format!("failed to write file {}: {}", rlink_file.display(), err));
+            })?;
+            return Ok(());
+        }
+
         self.codegen_backend.link(&self.sess, codegen_results, &self.prepare_outputs)
     }
 }
diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs
index 07ce9d0..1fc2d28 100644
--- a/compiler/rustc_interface/src/tests.rs
+++ b/compiler/rustc_interface/src/tests.rs
@@ -477,6 +477,7 @@
     untracked!(dump_mir_dir, String::from("abc"));
     untracked!(dump_mir_exclude_pass_number, true);
     untracked!(dump_mir_graphviz, true);
+    untracked!(emit_future_incompat_report, true);
     untracked!(emit_stack_sizes, true);
     untracked!(hir_stats, true);
     untracked!(identify_regions, true);
@@ -550,8 +551,11 @@
     tracked!(force_overflow_checks, Some(true));
     tracked!(force_unstable_if_unmarked, true);
     tracked!(fuel, Some(("abc".to_string(), 99)));
+    tracked!(function_sections, Some(false));
     tracked!(human_readable_cgu_names, true);
     tracked!(inline_in_all_cgus, Some(true));
+    tracked!(inline_mir_threshold, 123);
+    tracked!(inline_mir_hint_threshold, 123);
     tracked!(insert_sideeffect, true);
     tracked!(instrument_coverage, true);
     tracked!(instrument_mcount, true);
@@ -572,6 +576,7 @@
     tracked!(print_fuel, Some("abc".to_string()));
     tracked!(profile, true);
     tracked!(profile_emit, Some(PathBuf::from("abc")));
+    tracked!(relax_elf_relocations, Some(true));
     tracked!(relro_level, Some(RelroLevel::Full));
     tracked!(report_delayed_bugs, true);
     tracked!(run_dsymutil, false);
@@ -585,6 +590,7 @@
     tracked!(symbol_mangling_version, SymbolManglingVersion::V0);
     tracked!(teach, true);
     tracked!(thinlto, Some(true));
+    tracked!(tune_cpu, Some(String::from("abc")));
     tracked!(tls_model, Some(TlsModel::GeneralDynamic));
     tracked!(treat_err_as_bug, Some(1));
     tracked!(unleash_the_miri_inside_of_you, true);
diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs
index 7ace707..d9ec6d5 100644
--- a/compiler/rustc_interface/src/util.rs
+++ b/compiler/rustc_interface/src/util.rs
@@ -24,11 +24,13 @@
 use rustc_span::symbol::{sym, Symbol};
 use smallvec::SmallVec;
 use std::env;
+use std::env::consts::{DLL_PREFIX, DLL_SUFFIX};
 use std::io::{self, Write};
 use std::lazy::SyncOnceCell;
 use std::mem;
 use std::ops::DerefMut;
 use std::path::{Path, PathBuf};
+use std::sync::atomic::{AtomicBool, Ordering};
 use std::sync::{Arc, Mutex, Once};
 #[cfg(not(parallel_compiler))]
 use std::{panic, thread};
@@ -113,6 +115,11 @@
         Ok(())
     }
 }
+impl io::LocalOutput for Sink {
+    fn clone_box(&self) -> Box<dyn io::LocalOutput> {
+        Box::new(Self(self.0.clone()))
+    }
+}
 
 /// Like a `thread::Builder::spawn` followed by a `join()`, but avoids the need
 /// for `'static` bounds.
@@ -187,7 +194,7 @@
         config = config.stack_size(size);
     }
 
-    let with_pool = move |pool: &rayon::ThreadPool| pool.install(move || f());
+    let with_pool = move |pool: &rayon::ThreadPool| pool.install(f);
 
     rustc_span::with_session_globals(edition, || {
         rustc_span::SESSION_GLOBALS.with(|session_globals| {
@@ -238,7 +245,19 @@
     static mut LOAD: fn() -> Box<dyn CodegenBackend> = || unreachable!();
 
     INIT.call_once(|| {
-        let codegen_name = sopts.debugging_opts.codegen_backend.as_deref().unwrap_or("llvm");
+        #[cfg(feature = "llvm")]
+        const DEFAULT_CODEGEN_BACKEND: &str = "llvm";
+
+        #[cfg(not(feature = "llvm"))]
+        const DEFAULT_CODEGEN_BACKEND: &str = "cranelift";
+
+        let codegen_name = sopts
+            .debugging_opts
+            .codegen_backend
+            .as_ref()
+            .map(|name| &name[..])
+            .unwrap_or(DEFAULT_CODEGEN_BACKEND);
+
         let backend = match codegen_name {
             filename if filename.contains('.') => load_backend_from_dylib(filename.as_ref()),
             codegen_name => get_builtin_codegen_backend(codegen_name),
@@ -367,15 +386,101 @@
 }
 
 pub fn get_builtin_codegen_backend(backend_name: &str) -> fn() -> Box<dyn CodegenBackend> {
-    #[cfg(feature = "llvm")]
-    {
-        if backend_name == "llvm" {
-            return rustc_codegen_llvm::LlvmCodegenBackend::new;
+    match backend_name {
+        #[cfg(feature = "llvm")]
+        "llvm" => rustc_codegen_llvm::LlvmCodegenBackend::new,
+        _ => get_codegen_sysroot(backend_name),
+    }
+}
+
+pub fn get_codegen_sysroot(backend_name: &str) -> fn() -> Box<dyn CodegenBackend> {
+    // For now we only allow this function to be called once as it'll dlopen a
+    // few things, which seems to work best if we only do that once. In
+    // general this assertion never trips due to the once guard in `get_codegen_backend`,
+    // but there's a few manual calls to this function in this file we protect
+    // against.
+    static LOADED: AtomicBool = AtomicBool::new(false);
+    assert!(
+        !LOADED.fetch_or(true, Ordering::SeqCst),
+        "cannot load the default codegen backend twice"
+    );
+
+    let target = session::config::host_triple();
+    let sysroot_candidates = sysroot_candidates();
+
+    let sysroot = sysroot_candidates
+        .iter()
+        .map(|sysroot| {
+            let libdir = filesearch::relative_target_lib_path(&sysroot, &target);
+            sysroot.join(libdir).with_file_name("codegen-backends")
+        })
+        .find(|f| {
+            info!("codegen backend candidate: {}", f.display());
+            f.exists()
+        });
+    let sysroot = sysroot.unwrap_or_else(|| {
+        let candidates = sysroot_candidates
+            .iter()
+            .map(|p| p.display().to_string())
+            .collect::<Vec<_>>()
+            .join("\n* ");
+        let err = format!(
+            "failed to find a `codegen-backends` folder \
+                           in the sysroot candidates:\n* {}",
+            candidates
+        );
+        early_error(ErrorOutputType::default(), &err);
+    });
+    info!("probing {} for a codegen backend", sysroot.display());
+
+    let d = sysroot.read_dir().unwrap_or_else(|e| {
+        let err = format!(
+            "failed to load default codegen backend, couldn't \
+                           read `{}`: {}",
+            sysroot.display(),
+            e
+        );
+        early_error(ErrorOutputType::default(), &err);
+    });
+
+    let mut file: Option<PathBuf> = None;
+
+    let expected_name =
+        format!("rustc_codegen_{}-{}", backend_name, release_str().expect("CFG_RELEASE"));
+    for entry in d.filter_map(|e| e.ok()) {
+        let path = entry.path();
+        let filename = match path.file_name().and_then(|s| s.to_str()) {
+            Some(s) => s,
+            None => continue,
+        };
+        if !(filename.starts_with(DLL_PREFIX) && filename.ends_with(DLL_SUFFIX)) {
+            continue;
         }
+        let name = &filename[DLL_PREFIX.len()..filename.len() - DLL_SUFFIX.len()];
+        if name != expected_name {
+            continue;
+        }
+        if let Some(ref prev) = file {
+            let err = format!(
+                "duplicate codegen backends found\n\
+                               first:  {}\n\
+                               second: {}\n\
+            ",
+                prev.display(),
+                path.display()
+            );
+            early_error(ErrorOutputType::default(), &err);
+        }
+        file = Some(path.clone());
     }
 
-    let err = format!("unsupported builtin codegen backend `{}`", backend_name);
-    early_error(ErrorOutputType::default(), &err);
+    match file {
+        Some(ref s) => load_backend_from_dylib(s),
+        None => {
+            let err = format!("unsupported builtin codegen backend `{}`", backend_name);
+            early_error(ErrorOutputType::default(), &err);
+        }
+    }
 }
 
 pub(crate) fn compute_crate_disambiguator(session: &Session) -> CrateDisambiguator {
@@ -775,10 +880,24 @@
             })
         }
     }
+}
 
-    // in general the pretty printer processes unexpanded code, so
-    // we override the default `visit_mac` method which panics.
-    fn visit_mac(&mut self, mac: &mut ast::MacCall) {
-        noop_visit_mac(mac, self)
-    }
+/// Returns a version string such as "rustc 1.46.0 (04488afe3 2020-08-24)"
+pub fn version_str() -> Option<&'static str> {
+    option_env!("CFG_VERSION")
+}
+
+/// Returns a version string such as "0.12.0-dev".
+pub fn release_str() -> Option<&'static str> {
+    option_env!("CFG_RELEASE")
+}
+
+/// Returns the full SHA1 hash of HEAD of the Git repo from which rustc was built.
+pub fn commit_hash_str() -> Option<&'static str> {
+    option_env!("CFG_VER_HASH")
+}
+
+/// Returns the "commit date" of HEAD of the Git repo from which rustc was built as a static string.
+pub fn commit_date_str() -> Option<&'static str> {
+    option_env!("CFG_VER_DATE")
 }
diff --git a/compiler/rustc_lexer/src/lib.rs b/compiler/rustc_lexer/src/lib.rs
index d784a86..6539419 100644
--- a/compiler/rustc_lexer/src/lib.rs
+++ b/compiler/rustc_lexer/src/lib.rs
@@ -48,6 +48,7 @@
 }
 
 /// Enum representing common lexeme types.
+// perf note: Changing all `usize` to `u32` doesn't change performance. See #77629
 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
 pub enum TokenKind {
     // Multi-char tokens:
@@ -160,6 +161,7 @@
 /// - `r##~"abcde"##`: `InvalidStarter`
 /// - `r###"abcde"##`: `NoTerminator { expected: 3, found: 2, possible_terminator_offset: Some(11)`
 /// - Too many `#`s (>65535): `TooManyDelimiters`
+// perf note: It doesn't matter that this makes `Token` 36 bytes bigger. See #77629
 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
 pub enum RawStrError {
     /// Non `#` characters exist between `r` and `"` eg. `r#~"..`
@@ -236,9 +238,10 @@
     // Note that this set is stable (ie, it doesn't change with different
     // Unicode versions), so it's ok to just hard-code the values.
 
-    match c {
+    matches!(
+        c,
         // Usual ASCII suspects
-        | '\u{0009}' // \t
+        '\u{0009}'   // \t
         | '\u{000A}' // \n
         | '\u{000B}' // vertical tab
         | '\u{000C}' // form feed
@@ -255,9 +258,7 @@
         // Dedicated whitespace characters from Unicode
         | '\u{2028}' // LINE SEPARATOR
         | '\u{2029}' // PARAGRAPH SEPARATOR
-        => true,
-        _ => false,
-    }
+    )
 }
 
 /// True if `c` is valid as a first character of an identifier.
@@ -689,7 +690,12 @@
         let mut max_hashes = 0;
 
         // Count opening '#' symbols.
-        let n_start_hashes = self.eat_while(|c| c == '#');
+        let mut eaten = 0;
+        while self.first() == '#' {
+            eaten += 1;
+            self.bump();
+        }
+        let n_start_hashes = eaten;
 
         // Check that string is started.
         match self.bump() {
@@ -724,16 +730,11 @@
             // Note that this will not consume extra trailing `#` characters:
             // `r###"abcde"####` is lexed as a `RawStr { n_hashes: 3 }`
             // followed by a `#` token.
-            let mut hashes_left = n_start_hashes;
-            let is_closing_hash = |c| {
-                if c == '#' && hashes_left != 0 {
-                    hashes_left -= 1;
-                    true
-                } else {
-                    false
-                }
-            };
-            let n_end_hashes = self.eat_while(is_closing_hash);
+            let mut n_end_hashes = 0;
+            while self.first() == '#' && n_end_hashes < n_start_hashes {
+                n_end_hashes += 1;
+                self.bump();
+            }
 
             if n_end_hashes == n_start_hashes {
                 return (n_start_hashes, None);
@@ -807,17 +808,9 @@
     }
 
     /// Eats symbols while predicate returns true or until the end of file is reached.
-    /// Returns amount of eaten symbols.
-    fn eat_while<F>(&mut self, mut predicate: F) -> usize
-    where
-        F: FnMut(char) -> bool,
-    {
-        let mut eaten: usize = 0;
+    fn eat_while(&mut self, mut predicate: impl FnMut(char) -> bool) {
         while predicate(self.first()) && !self.is_eof() {
-            eaten += 1;
             self.bump();
         }
-
-        eaten
     }
 }
diff --git a/compiler/rustc_lint/src/array_into_iter.rs b/compiler/rustc_lint/src/array_into_iter.rs
index e6be082..0b5bd39 100644
--- a/compiler/rustc_lint/src/array_into_iter.rs
+++ b/compiler/rustc_lint/src/array_into_iter.rs
@@ -3,7 +3,7 @@
 use rustc_hir as hir;
 use rustc_middle::ty;
 use rustc_middle::ty::adjustment::{Adjust, Adjustment};
-use rustc_session::lint::FutureIncompatibleInfo;
+use rustc_session::lint::FutureBreakage;
 use rustc_span::symbol::sym;
 
 declare_lint! {
@@ -38,6 +38,9 @@
     @future_incompatible = FutureIncompatibleInfo {
         reference: "issue #66145 <https://github.com/rust-lang/rust/issues/66145>",
         edition: None,
+        future_breakage: Some(FutureBreakage {
+            date: None
+        })
     };
 }
 
diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs
index abd899e..c65cf65 100644
--- a/compiler/rustc_lint/src/builtin.rs
+++ b/compiler/rustc_lint/src/builtin.rs
@@ -44,7 +44,6 @@
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::subst::{GenericArgKind, Subst};
 use rustc_middle::ty::{self, layout::LayoutError, Ty, TyCtxt};
-use rustc_session::lint::FutureIncompatibleInfo;
 use rustc_session::Session;
 use rustc_span::edition::Edition;
 use rustc_span::source_map::Spanned;
@@ -968,7 +967,7 @@
     while let Some(attr) = attrs.next() {
         if attr.is_doc_comment() {
             sugared_span =
-                Some(sugared_span.map_or_else(|| attr.span, |span| span.with_hi(attr.span.hi())));
+                Some(sugared_span.map_or(attr.span, |span| span.with_hi(attr.span.hi())));
         }
 
         if attrs.peek().map(|next_attr| next_attr.is_doc_comment()).unwrap_or_default() {
@@ -994,7 +993,8 @@
     fn check_stmt(&mut self, cx: &EarlyContext<'_>, stmt: &ast::Stmt) {
         let kind = match stmt.kind {
             ast::StmtKind::Local(..) => "statements",
-            ast::StmtKind::Item(..) => "inner items",
+            // Disabled pending discussion in #78306
+            ast::StmtKind::Item(..) => return,
             // expressions will be reported by `check_expr`.
             ast::StmtKind::Empty
             | ast::StmtKind::Semi(_)
@@ -1368,10 +1368,9 @@
             hir::QPath::TypeRelative(ref ty, _) => {
                 // If this is a type variable, we found a `T::Assoc`.
                 match ty.kind {
-                    hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => match path.res {
-                        Res::Def(DefKind::TyParam, _) => true,
-                        _ => false,
-                    },
+                    hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => {
+                        matches!(path.res, Res::Def(DefKind::TyParam, _))
+                    }
                     _ => false,
                 }
             }
@@ -2288,12 +2287,20 @@
                             n, n,
                         ));
                     }
+                    if HAS_MIN_FEATURES.contains(&name) {
+                        builder.help(&format!(
+                            "consider using `min_{}` instead, which is more stable and complete",
+                            name,
+                        ));
+                    }
                     builder.emit();
                 })
             });
     }
 }
 
+const HAS_MIN_FEATURES: &[Symbol] = &[sym::const_generics, sym::specialization];
+
 declare_lint! {
     /// The `invalid_value` lint detects creating a value that is not valid,
     /// such as a NULL reference.
@@ -2372,10 +2379,9 @@
                         return Some(InitKind::Zeroed);
                     } else if cx.tcx.is_diagnostic_item(sym::mem_uninitialized, def_id) {
                         return Some(InitKind::Uninit);
-                    } else if cx.tcx.is_diagnostic_item(sym::transmute, def_id) {
-                        if is_zero(&args[0]) {
-                            return Some(InitKind::Zeroed);
-                        }
+                    } else if cx.tcx.is_diagnostic_item(sym::transmute, def_id) && is_zero(&args[0])
+                    {
+                        return Some(InitKind::Zeroed);
                     }
                 }
             } else if let hir::ExprKind::MethodCall(_, _, ref args, _) = expr.kind {
@@ -2871,7 +2877,7 @@
     fn check_foreign_item(&mut self, cx: &LateContext<'tcx>, this_fi: &hir::ForeignItem<'_>) {
         trace!("ClashingExternDeclarations: check_foreign_item: {:?}", this_fi);
         if let ForeignItemKind::Fn(..) = this_fi.kind {
-            let tcx = *&cx.tcx;
+            let tcx = cx.tcx;
             if let Some(existing_hid) = self.insert(tcx, this_fi) {
                 let existing_decl_ty = tcx.type_of(tcx.hir().local_def_id(existing_hid));
                 let this_decl_ty = tcx.type_of(tcx.hir().local_def_id(this_fi.hir_id));
diff --git a/compiler/rustc_lint/src/context.rs b/compiler/rustc_lint/src/context.rs
index 7a3035e..4cfeb0d 100644
--- a/compiler/rustc_lint/src/context.rs
+++ b/compiler/rustc_lint/src/context.rs
@@ -22,7 +22,7 @@
 use rustc_ast::util::lev_distance::find_best_match_for_name;
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::sync;
-use rustc_errors::{struct_span_err, Applicability};
+use rustc_errors::{add_elided_lifetime_in_path_suggestion, struct_span_err, Applicability};
 use rustc_hir as hir;
 use rustc_hir::def::Res;
 use rustc_hir::def_id::{CrateNum, DefId};
@@ -33,9 +33,10 @@
 use rustc_middle::ty::layout::{LayoutError, TyAndLayout};
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{self, print::Printer, subst::GenericArg, Ty, TyCtxt};
-use rustc_session::lint::{add_elided_lifetime_in_path_suggestion, BuiltinLintDiagnostics};
+use rustc_session::lint::BuiltinLintDiagnostics;
 use rustc_session::lint::{FutureIncompatibleInfo, Level, Lint, LintBuffer, LintId};
 use rustc_session::Session;
+use rustc_session::SessionLintStore;
 use rustc_span::{symbol::Symbol, MultiSpan, Span, DUMMY_SP};
 use rustc_target::abi::LayoutOf;
 
@@ -69,6 +70,20 @@
     lint_groups: FxHashMap<&'static str, LintGroup>,
 }
 
+impl SessionLintStore for LintStore {
+    fn name_to_lint(&self, lint_name: &str) -> LintId {
+        let lints = self
+            .find_lints(lint_name)
+            .unwrap_or_else(|_| panic!("Failed to find lint with name `{}`", lint_name));
+
+        if let &[lint] = lints.as_slice() {
+            return lint;
+        } else {
+            panic!("Found mutliple lints with name `{}`: {:?}", lint_name, lints);
+        }
+    }
+}
+
 /// The target of the `by_name` map, which accounts for renaming/deprecation.
 enum TargetLint {
     /// A direct lint target
@@ -543,7 +558,7 @@
                     anon_lts,
                 ) => {
                     add_elided_lifetime_in_path_suggestion(
-                        sess,
+                        sess.source_map(),
                         &mut db,
                         n,
                         path_span,
@@ -711,10 +726,6 @@
         }
     }
 
-    pub fn current_lint_root(&self) -> hir::HirId {
-        self.last_node_with_lint_attrs
-    }
-
     /// Check if a `DefId`'s path matches the given absolute type path usage.
     ///
     /// Anonymous scopes such as `extern` imports are matched with `kw::Invalid`;
diff --git a/compiler/rustc_lint/src/early.rs b/compiler/rustc_lint/src/early.rs
index 998676d..08c147e 100644
--- a/compiler/rustc_lint/src/early.rs
+++ b/compiler/rustc_lint/src/early.rs
@@ -18,6 +18,7 @@
 use crate::passes::{EarlyLintPass, EarlyLintPassObject};
 use rustc_ast as ast;
 use rustc_ast::visit as ast_visit;
+use rustc_attr::HasAttrs;
 use rustc_session::lint::{BufferedEarlyLint, LintBuffer, LintPass};
 use rustc_session::Session;
 use rustc_span::symbol::Ident;
@@ -119,8 +120,22 @@
     }
 
     fn visit_stmt(&mut self, s: &'a ast::Stmt) {
-        run_early_pass!(self, check_stmt, s);
-        self.check_id(s.id);
+        // Add the statement's lint attributes to our
+        // current state when checking the statement itself.
+        // This allows us to handle attributes like
+        // `#[allow(unused_doc_comments)]`, which apply to
+        // sibling attributes on the same target
+        //
+        // Note that statements get their attributes from
+        // the AST struct that they wrap (e.g. an item)
+        self.with_lint_attrs(s.id, s.attrs(), |cx| {
+            run_early_pass!(cx, check_stmt, s);
+            cx.check_id(s.id);
+        });
+        // The visitor for the AST struct wrapped
+        // by the statement (e.g. `Item`) will call
+        // `with_lint_attrs`, so do this walk
+        // outside of the above `with_lint_attrs` call
         ast_visit::walk_stmt(self, s);
     }
 
@@ -195,6 +210,11 @@
         run_early_pass!(self, check_expr_post, e);
     }
 
+    fn visit_generic_arg(&mut self, arg: &'a ast::GenericArg) {
+        run_early_pass!(self, check_generic_arg, arg);
+        ast_visit::walk_generic_arg(self, arg);
+    }
+
     fn visit_generic_param(&mut self, param: &'a ast::GenericParam) {
         run_early_pass!(self, check_generic_param, param);
         ast_visit::walk_generic_param(self, param);
@@ -250,15 +270,9 @@
         self.check_id(id);
     }
 
-    fn visit_mac(&mut self, mac: &'a ast::MacCall) {
-        // FIXME(#54110): So, this setup isn't really right. I think
-        // that (a) the librustc_ast visitor ought to be doing this as
-        // part of `walk_mac`, and (b) we should be calling
-        // `visit_path`, *but* that would require a `NodeId`, and I
-        // want to get #53686 fixed quickly. -nmatsakis
-        ast_visit::walk_path(self, &mac.path);
-
+    fn visit_mac_call(&mut self, mac: &'a ast::MacCall) {
         run_early_pass!(self, check_mac, mac);
+        ast_visit::walk_mac(self, mac);
     }
 }
 
diff --git a/compiler/rustc_lint/src/late.rs b/compiler/rustc_lint/src/late.rs
index a6c04fb..015e109 100644
--- a/compiler/rustc_lint/src/late.rs
+++ b/compiler/rustc_lint/src/late.rs
@@ -174,12 +174,13 @@
     }
 
     fn visit_stmt(&mut self, s: &'tcx hir::Stmt<'tcx>) {
-        // statement attributes are actually just attributes on one of
-        // - item
-        // - local
-        // - expression
-        // so we keep track of lint levels there
-        lint_callback!(self, check_stmt, s);
+        let get_item = |id: hir::ItemId| self.context.tcx.hir().item(id.id);
+        let attrs = &s.kind.attrs(get_item);
+        // See `EarlyContextAndPass::visit_stmt` for an explanation
+        // of why we call `walk_stmt` outside of `with_lint_attrs`
+        self.with_lint_attrs(s.hir_id, attrs, |cx| {
+            lint_callback!(cx, check_stmt, s);
+        });
         hir_visit::walk_stmt(self, s);
     }
 
diff --git a/compiler/rustc_lint/src/levels.rs b/compiler/rustc_lint/src/levels.rs
index 48254dc..db48700 100644
--- a/compiler/rustc_lint/src/levels.rs
+++ b/compiler/rustc_lint/src/levels.rs
@@ -73,6 +73,7 @@
 
         for &(ref lint_name, level) in &sess.opts.lint_opts {
             store.check_lint_name_cmdline(sess, &lint_name, level);
+            let orig_level = level;
 
             // If the cap is less than this specified level, e.g., if we've got
             // `--cap-lints allow` but we've also got `-D foo` then we ignore
@@ -87,7 +88,7 @@
             };
             for id in ids {
                 self.check_gated_lint(id, DUMMY_SP);
-                let src = LintSource::CommandLine(lint_flag_val);
+                let src = LintSource::CommandLine(lint_flag_val, orig_level);
                 specs.insert(id, (level, src));
             }
         }
@@ -383,7 +384,7 @@
             let forbidden_lint_name = match forbid_src {
                 LintSource::Default => id.to_string(),
                 LintSource::Node(name, _, _) => name.to_string(),
-                LintSource::CommandLine(name) => name.to_string(),
+                LintSource::CommandLine(name, _) => name.to_string(),
             };
             let (lint_attr_name, lint_attr_span) = match *src {
                 LintSource::Node(name, span, _) => (name, span),
@@ -407,7 +408,7 @@
                         diag_builder.note(&rationale.as_str());
                     }
                 }
-                LintSource::CommandLine(_) => {
+                LintSource::CommandLine(_, _) => {
                     diag_builder.note("`forbid` lint level was set on command line");
                 }
             }
@@ -523,6 +524,13 @@
         })
     }
 
+    fn visit_stmt(&mut self, e: &'tcx hir::Stmt<'tcx>) {
+        // We will call `with_lint_attrs` when we walk
+        // the `StmtKind`. The outer statement itself doesn't
+        // define the lint levels.
+        intravisit::walk_stmt(self, e);
+    }
+
     fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
         self.with_lint_attrs(e.hir_id, &e.attrs, |builder| {
             intravisit::walk_expr(builder, e);
diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs
index 33caedf..24bfdad 100644
--- a/compiler/rustc_lint/src/lib.rs
+++ b/compiler/rustc_lint/src/lib.rs
@@ -35,6 +35,9 @@
 #![feature(never_type)]
 #![feature(nll)]
 #![feature(or_patterns)]
+#![feature(half_open_range_patterns)]
+#![feature(exclusive_range_pattern)]
+#![feature(control_flow_enum)]
 #![recursion_limit = "256"]
 
 #[macro_use]
@@ -49,10 +52,12 @@
 mod internal;
 mod late;
 mod levels;
+mod methods;
 mod non_ascii_idents;
 mod nonstandard_style;
 mod passes;
 mod redundant_semicolon;
+mod traits;
 mod types;
 mod unused;
 
@@ -63,8 +68,8 @@
 use rustc_middle::ty::TyCtxt;
 use rustc_session::lint::builtin::{
     BARE_TRAIT_OBJECTS, BROKEN_INTRA_DOC_LINKS, ELIDED_LIFETIMES_IN_PATHS,
-    EXPLICIT_OUTLIVES_REQUIREMENTS, INVALID_CODEBLOCK_ATTRIBUTES, MISSING_DOC_CODE_EXAMPLES,
-    PRIVATE_DOC_TESTS,
+    EXPLICIT_OUTLIVES_REQUIREMENTS, INVALID_CODEBLOCK_ATTRIBUTES, INVALID_HTML_TAGS,
+    MISSING_DOC_CODE_EXAMPLES, NON_AUTOLINKS, PRIVATE_DOC_TESTS,
 };
 use rustc_span::symbol::{Ident, Symbol};
 use rustc_span::Span;
@@ -72,9 +77,11 @@
 use array_into_iter::ArrayIntoIter;
 use builtin::*;
 use internal::*;
+use methods::*;
 use non_ascii_idents::*;
 use nonstandard_style::*;
 use redundant_semicolon::*;
+use traits::*;
 use types::*;
 use unused::*;
 
@@ -157,6 +164,8 @@
                 MissingDebugImplementations: MissingDebugImplementations::default(),
                 ArrayIntoIter: ArrayIntoIter,
                 ClashingExternDeclarations: ClashingExternDeclarations::new(),
+                DropTraitConstraints: DropTraitConstraints,
+                TemporaryCStringAsPtr: TemporaryCStringAsPtr,
             ]
         );
     };
@@ -304,11 +313,13 @@
 
     add_lint_group!(
         "rustdoc",
+        NON_AUTOLINKS,
         BROKEN_INTRA_DOC_LINKS,
         PRIVATE_INTRA_DOC_LINKS,
         INVALID_CODEBLOCK_ATTRIBUTES,
         MISSING_DOC_CODE_EXAMPLES,
-        PRIVATE_DOC_TESTS
+        PRIVATE_DOC_TESTS,
+        INVALID_HTML_TAGS
     );
 
     // Register renamed and removed lints.
diff --git a/compiler/rustc_lint/src/methods.rs b/compiler/rustc_lint/src/methods.rs
new file mode 100644
index 0000000..8732845
--- /dev/null
+++ b/compiler/rustc_lint/src/methods.rs
@@ -0,0 +1,106 @@
+use crate::LateContext;
+use crate::LateLintPass;
+use crate::LintContext;
+use rustc_hir::{Expr, ExprKind, PathSegment};
+use rustc_middle::ty;
+use rustc_span::{symbol::sym, ExpnKind, Span};
+
+declare_lint! {
+    /// The `temporary_cstring_as_ptr` lint detects getting the inner pointer of
+    /// a temporary `CString`.
+    ///
+    /// ### Example
+    ///
+    /// ```rust
+    /// # #![allow(unused)]
+    /// # use std::ffi::CString;
+    /// let c_str = CString::new("foo").unwrap().as_ptr();
+    /// ```
+    ///
+    /// {{produces}}
+    ///
+    /// ### Explanation
+    ///
+    /// The inner pointer of a `CString` lives only as long as the `CString` it
+    /// points to. Getting the inner pointer of a *temporary* `CString` allows the `CString`
+    /// to be dropped at the end of the statement, as it is not being referenced as far as the typesystem
+    /// is concerned. This means outside of the statement the pointer will point to freed memory, which
+    /// causes undefined behavior if the pointer is later dereferenced.
+    pub TEMPORARY_CSTRING_AS_PTR,
+    Warn,
+    "detects getting the inner pointer of a temporary `CString`"
+}
+
+declare_lint_pass!(TemporaryCStringAsPtr => [TEMPORARY_CSTRING_AS_PTR]);
+
+fn in_macro(span: Span) -> bool {
+    if span.from_expansion() {
+        !matches!(span.ctxt().outer_expn_data().kind, ExpnKind::Desugaring(..))
+    } else {
+        false
+    }
+}
+
+fn first_method_call<'tcx>(
+    expr: &'tcx Expr<'tcx>,
+) -> Option<(&'tcx PathSegment<'tcx>, &'tcx [Expr<'tcx>])> {
+    if let ExprKind::MethodCall(path, _, args, _) = &expr.kind {
+        if args.iter().any(|e| e.span.from_expansion()) { None } else { Some((path, *args)) }
+    } else {
+        None
+    }
+}
+
+impl<'tcx> LateLintPass<'tcx> for TemporaryCStringAsPtr {
+    fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
+        if in_macro(expr.span) {
+            return;
+        }
+
+        match first_method_call(expr) {
+            Some((path, args)) if path.ident.name == sym::as_ptr => {
+                let unwrap_arg = &args[0];
+                let as_ptr_span = path.ident.span;
+                match first_method_call(unwrap_arg) {
+                    Some((path, args))
+                        if path.ident.name == sym::unwrap || path.ident.name == sym::expect =>
+                    {
+                        let source_arg = &args[0];
+                        lint_cstring_as_ptr(cx, as_ptr_span, source_arg, unwrap_arg);
+                    }
+                    _ => return,
+                }
+            }
+            _ => return,
+        }
+    }
+}
+
+fn lint_cstring_as_ptr(
+    cx: &LateContext<'_>,
+    as_ptr_span: Span,
+    source: &rustc_hir::Expr<'_>,
+    unwrap: &rustc_hir::Expr<'_>,
+) {
+    let source_type = cx.typeck_results().expr_ty(source);
+    if let ty::Adt(def, substs) = source_type.kind() {
+        if cx.tcx.is_diagnostic_item(sym::result_type, def.did) {
+            if let ty::Adt(adt, _) = substs.type_at(0).kind() {
+                if cx.tcx.is_diagnostic_item(sym::cstring_type, adt.did) {
+                    cx.struct_span_lint(TEMPORARY_CSTRING_AS_PTR, as_ptr_span, |diag| {
+                        let mut diag = diag
+                            .build("getting the inner pointer of a temporary `CString`");
+                        diag.span_label(as_ptr_span, "this pointer will be invalid");
+                        diag.span_label(
+                            unwrap.span,
+                            "this `CString` is deallocated at the end of the statement, bind it to a variable to extend its lifetime",
+                        );
+                        diag.note("pointers do not have a lifetime; when calling `as_ptr` the `CString` will be deallocated at the end of the statement because nothing is referencing it as far as the type system is concerned");
+                        diag.help("for more information, see https://doc.rust-lang.org/reference/destructors.html");
+                        diag.emit();
+                    });
+                }
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_lint/src/nonstandard_style.rs b/compiler/rustc_lint/src/nonstandard_style.rs
index b3125f5..f117ce1 100644
--- a/compiler/rustc_lint/src/nonstandard_style.rs
+++ b/compiler/rustc_lint/src/nonstandard_style.rs
@@ -320,7 +320,7 @@
                                             .with_hi(lit.span.hi() - BytePos(right as u32)),
                                     )
                                 })
-                                .unwrap_or_else(|| lit.span);
+                                .unwrap_or(lit.span);
 
                             Some(Ident::new(name, sp))
                         } else {
diff --git a/compiler/rustc_lint/src/passes.rs b/compiler/rustc_lint/src/passes.rs
index 159286c..828f283 100644
--- a/compiler/rustc_lint/src/passes.rs
+++ b/compiler/rustc_lint/src/passes.rs
@@ -33,6 +33,7 @@
             fn check_expr(a: &$hir hir::Expr<$hir>);
             fn check_expr_post(a: &$hir hir::Expr<$hir>);
             fn check_ty(a: &$hir hir::Ty<$hir>);
+            fn check_generic_arg(a: &$hir hir::GenericArg<$hir>);
             fn check_generic_param(a: &$hir hir::GenericParam<$hir>);
             fn check_generics(a: &$hir hir::Generics<$hir>);
             fn check_where_predicate(a: &$hir hir::WherePredicate<$hir>);
@@ -176,6 +177,7 @@
             fn check_expr(a: &ast::Expr);
             fn check_expr_post(a: &ast::Expr);
             fn check_ty(a: &ast::Ty);
+            fn check_generic_arg(a: &ast::GenericArg);
             fn check_generic_param(a: &ast::GenericParam);
             fn check_generics(a: &ast::Generics);
             fn check_where_predicate(a: &ast::WherePredicate);
diff --git a/compiler/rustc_lint/src/redundant_semicolon.rs b/compiler/rustc_lint/src/redundant_semicolon.rs
index a31deb8..84cc7b6 100644
--- a/compiler/rustc_lint/src/redundant_semicolon.rs
+++ b/compiler/rustc_lint/src/redundant_semicolon.rs
@@ -42,6 +42,11 @@
 
 fn maybe_lint_redundant_semis(cx: &EarlyContext<'_>, seq: &mut Option<(Span, bool)>) {
     if let Some((span, multiple)) = seq.take() {
+        // FIXME: Find a better way of ignoring the trailing
+        // semicolon from macro expansion
+        if span == rustc_span::DUMMY_SP {
+            return;
+        }
         cx.struct_span_lint(REDUNDANT_SEMICOLONS, span, |lint| {
             let (msg, rem) = if multiple {
                 ("unnecessary trailing semicolons", "remove these semicolons")
diff --git a/compiler/rustc_lint/src/traits.rs b/compiler/rustc_lint/src/traits.rs
new file mode 100644
index 0000000..d4f7903
--- /dev/null
+++ b/compiler/rustc_lint/src/traits.rs
@@ -0,0 +1,79 @@
+use crate::LateContext;
+use crate::LateLintPass;
+use crate::LintContext;
+use rustc_hir as hir;
+use rustc_span::symbol::sym;
+
+declare_lint! {
+    /// The `drop_bounds` lint checks for generics with `std::ops::Drop` as
+    /// bounds.
+    ///
+    /// ### Example
+    ///
+    /// ```rust
+    /// fn foo<T: Drop>() {}
+    /// ```
+    ///
+    /// {{produces}}
+    ///
+    /// ### Explanation
+    ///
+    /// `Drop` bounds do not really accomplish anything. A type may have
+    /// compiler-generated drop glue without implementing the `Drop` trait
+    /// itself. The `Drop` trait also only has one method, `Drop::drop`, and
+    /// that function is by fiat not callable in user code. So there is really
+    /// no use case for using `Drop` in trait bounds.
+    ///
+    /// The most likely use case of a drop bound is to distinguish between
+    /// types that have destructors and types that don't. Combined with
+    /// specialization, a naive coder would write an implementation that
+    /// assumed a type could be trivially dropped, then write a specialization
+    /// for `T: Drop` that actually calls the destructor. Except that doing so
+    /// is not correct; String, for example, doesn't actually implement Drop,
+    /// but because String contains a Vec, assuming it can be trivially dropped
+    /// will leak memory.
+    pub DROP_BOUNDS,
+    Warn,
+    "bounds of the form `T: Drop` are useless"
+}
+
+declare_lint_pass!(
+    /// Lint for bounds of the form `T: Drop`, which usually
+    /// indicate an attempt to emulate `std::mem::needs_drop`.
+    DropTraitConstraints => [DROP_BOUNDS]
+);
+
+impl<'tcx> LateLintPass<'tcx> for DropTraitConstraints {
+    fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) {
+        use rustc_middle::ty::PredicateAtom::*;
+
+        let def_id = cx.tcx.hir().local_def_id(item.hir_id);
+        let predicates = cx.tcx.explicit_predicates_of(def_id);
+        for &(predicate, span) in predicates.predicates {
+            let trait_predicate = match predicate.skip_binders() {
+                Trait(trait_predicate, _constness) => trait_predicate,
+                _ => continue,
+            };
+            let def_id = trait_predicate.trait_ref.def_id;
+            if cx.tcx.lang_items().drop_trait() == Some(def_id) {
+                // Explicitly allow `impl Drop`, a drop-guards-as-Voldemort-type pattern.
+                if trait_predicate.trait_ref.self_ty().is_impl_trait() {
+                    continue;
+                }
+                cx.struct_span_lint(DROP_BOUNDS, span, |lint| {
+                    let needs_drop = match cx.tcx.get_diagnostic_item(sym::needs_drop) {
+                        Some(needs_drop) => needs_drop,
+                        None => return,
+                    };
+                    let msg = format!(
+                        "bounds on `{}` are useless, consider instead \
+                         using `{}` to detect if a type has a destructor",
+                        predicate,
+                        cx.tcx.def_path_str(needs_drop)
+                    );
+                    lint.build(&msg).emit()
+                });
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
index 9925444..467a3a4 100644
--- a/compiler/rustc_lint/src/types.rs
+++ b/compiler/rustc_lint/src/types.rs
@@ -6,7 +6,6 @@
 use rustc_hir as hir;
 use rustc_hir::{is_range_literal, ExprKind, Node};
 use rustc_index::vec::Idx;
-use rustc_middle::mir::interpret::{sign_extend, truncate};
 use rustc_middle::ty::layout::{IntegerExt, SizeSkeleton};
 use rustc_middle::ty::subst::SubstsRef;
 use rustc_middle::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable};
@@ -18,6 +17,7 @@
 use rustc_target::spec::abi::Abi as SpecAbi;
 
 use std::cmp;
+use std::ops::ControlFlow;
 use tracing::debug;
 
 declare_lint! {
@@ -145,9 +145,9 @@
                     // We need to preserve the literal's suffix,
                     // as it may determine typing information.
                     let suffix = match lit.node {
-                        LitKind::Int(_, LitIntType::Signed(s)) => s.name_str().to_string(),
-                        LitKind::Int(_, LitIntType::Unsigned(s)) => s.name_str().to_string(),
-                        LitKind::Int(_, LitIntType::Unsuffixed) => "".to_string(),
+                        LitKind::Int(_, LitIntType::Signed(s)) => s.name_str(),
+                        LitKind::Int(_, LitIntType::Unsigned(s)) => s.name_str(),
+                        LitKind::Int(_, LitIntType::Unsuffixed) => "",
                         _ => bug!(),
                     };
                     let suggestion = format!("{}..={}{}", start, lit_val - 1, suffix);
@@ -170,24 +170,25 @@
 // warnings are consistent between 32- and 64-bit platforms.
 fn int_ty_range(int_ty: ast::IntTy) -> (i128, i128) {
     match int_ty {
-        ast::IntTy::Isize => (i64::MIN as i128, i64::MAX as i128),
-        ast::IntTy::I8 => (i8::MIN as i64 as i128, i8::MAX as i128),
-        ast::IntTy::I16 => (i16::MIN as i64 as i128, i16::MAX as i128),
-        ast::IntTy::I32 => (i32::MIN as i64 as i128, i32::MAX as i128),
-        ast::IntTy::I64 => (i64::MIN as i128, i64::MAX as i128),
-        ast::IntTy::I128 => (i128::MIN as i128, i128::MAX),
+        ast::IntTy::Isize => (i64::MIN.into(), i64::MAX.into()),
+        ast::IntTy::I8 => (i8::MIN.into(), i8::MAX.into()),
+        ast::IntTy::I16 => (i16::MIN.into(), i16::MAX.into()),
+        ast::IntTy::I32 => (i32::MIN.into(), i32::MAX.into()),
+        ast::IntTy::I64 => (i64::MIN.into(), i64::MAX.into()),
+        ast::IntTy::I128 => (i128::MIN, i128::MAX),
     }
 }
 
 fn uint_ty_range(uint_ty: ast::UintTy) -> (u128, u128) {
-    match uint_ty {
-        ast::UintTy::Usize => (u64::MIN as u128, u64::MAX as u128),
-        ast::UintTy::U8 => (u8::MIN as u128, u8::MAX as u128),
-        ast::UintTy::U16 => (u16::MIN as u128, u16::MAX as u128),
-        ast::UintTy::U32 => (u32::MIN as u128, u32::MAX as u128),
-        ast::UintTy::U64 => (u64::MIN as u128, u64::MAX as u128),
-        ast::UintTy::U128 => (u128::MIN, u128::MAX),
-    }
+    let max = match uint_ty {
+        ast::UintTy::Usize => u64::MAX.into(),
+        ast::UintTy::U8 => u8::MAX.into(),
+        ast::UintTy::U16 => u16::MAX.into(),
+        ast::UintTy::U32 => u32::MAX.into(),
+        ast::UintTy::U64 => u64::MAX.into(),
+        ast::UintTy::U128 => u128::MAX,
+    };
+    (0, max)
 }
 
 fn get_bin_hex_repr(cx: &LateContext<'_>, lit: &hir::Lit) -> Option<String> {
@@ -216,11 +217,11 @@
     cx.struct_span_lint(OVERFLOWING_LITERALS, expr.span, |lint| {
         let (t, actually) = match ty {
             attr::IntType::SignedInt(t) => {
-                let actually = sign_extend(val, size) as i128;
+                let actually = size.sign_extend(val) as i128;
                 (t.name_str(), actually.to_string())
             }
             attr::IntType::UnsignedInt(t) => {
-                let actually = truncate(val, size);
+                let actually = size.truncate(val);
                 (t.name_str(), actually.to_string())
             }
         };
@@ -304,7 +305,7 @@
     t: ast::IntTy,
     v: u128,
 ) {
-    let int_type = t.normalize(cx.sess().target.ptr_width);
+    let int_type = t.normalize(cx.sess().target.pointer_width);
     let (min, max) = int_ty_range(int_type);
     let max = max as u128;
     let negative = type_limits.negated_expr_id == Some(e.hir_id);
@@ -352,7 +353,7 @@
     lit: &hir::Lit,
     t: ast::UintTy,
 ) {
-    let uint_type = t.normalize(cx.sess().target.ptr_width);
+    let uint_type = t.normalize(cx.sess().target.pointer_width);
     let (min, max) = uint_ty_range(uint_type);
     let lit_val: u128 = match lit.node {
         // _v is u8, within range by definition
@@ -438,7 +439,7 @@
                 cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| {
                     lint.build(&format!("literal out of range for `{}`", t.name_str()))
                         .note(&format!(
-                            "the literal `{}` does not fit into the type `{}` and will be converted to `std::{}::INFINITY`",
+                            "the literal `{}` does not fit into the type `{}` and will be converted to `{}::INFINITY`",
                             cx.sess()
                                 .source_map()
                                 .span_to_snippet(lit.span)
@@ -543,15 +544,15 @@
         }
 
         fn is_comparison(binop: hir::BinOp) -> bool {
-            match binop.node {
+            matches!(
+                binop.node,
                 hir::BinOpKind::Eq
-                | hir::BinOpKind::Lt
-                | hir::BinOpKind::Le
-                | hir::BinOpKind::Ne
-                | hir::BinOpKind::Ge
-                | hir::BinOpKind::Gt => true,
-                _ => false,
-            }
+                    | hir::BinOpKind::Lt
+                    | hir::BinOpKind::Le
+                    | hir::BinOpKind::Ne
+                    | hir::BinOpKind::Ge
+                    | hir::BinOpKind::Gt
+            )
         }
     }
 }
@@ -1134,11 +1135,11 @@
         };
 
         impl<'a, 'tcx> ty::fold::TypeVisitor<'tcx> for ProhibitOpaqueTypes<'a, 'tcx> {
-            fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+            fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> {
                 match ty.kind() {
                     ty::Opaque(..) => {
                         self.ty = Some(ty);
-                        true
+                        ControlFlow::BREAK
                     }
                     // Consider opaque types within projections FFI-safe if they do not normalize
                     // to more opaque types.
@@ -1147,7 +1148,11 @@
 
                         // If `ty` is a opaque type directly then `super_visit_with` won't invoke
                         // this function again.
-                        if ty.has_opaque_types() { self.visit_ty(ty) } else { false }
+                        if ty.has_opaque_types() {
+                            self.visit_ty(ty)
+                        } else {
+                            ControlFlow::CONTINUE
+                        }
                     }
                     _ => ty.super_visit_with(self),
                 }
@@ -1232,15 +1237,10 @@
     }
 
     fn is_internal_abi(&self, abi: SpecAbi) -> bool {
-        if let SpecAbi::Rust
-        | SpecAbi::RustCall
-        | SpecAbi::RustIntrinsic
-        | SpecAbi::PlatformIntrinsic = abi
-        {
-            true
-        } else {
-            false
-        }
+        matches!(
+            abi,
+            SpecAbi::Rust | SpecAbi::RustCall | SpecAbi::RustIntrinsic | SpecAbi::PlatformIntrinsic
+        )
     }
 }
 
diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs
index 1e8c300..4bbc180 100644
--- a/compiler/rustc_lint/src/unused.rs
+++ b/compiler/rustc_lint/src/unused.rs
@@ -200,7 +200,7 @@
                 ty::Adt(def, _) => check_must_use_def(cx, def.did, span, descr_pre, descr_post),
                 ty::Opaque(def, _) => {
                     let mut has_emitted = false;
-                    for (predicate, _) in cx.tcx.predicates_of(def).predicates {
+                    for &(predicate, _) in cx.tcx.explicit_item_bounds(def) {
                         // We only look at the `DefId`, so it is safe to skip the binder here.
                         if let ty::PredicateAtom::Trait(ref poly_trait_predicate, _) =
                             predicate.skip_binders()
@@ -250,13 +250,13 @@
                     has_emitted
                 }
                 ty::Array(ty, len) => match len.try_eval_usize(cx.tcx, cx.param_env) {
+                    // If the array is empty we don't lint, to avoid false positives
+                    Some(0) | None => false,
                     // If the array is definitely non-empty, we can do `#[must_use]` checking.
-                    Some(n) if n != 0 => {
+                    Some(n) => {
                         let descr_pre = &format!("{}array{} of ", descr_pre, plural_suffix,);
                         check_must_use_ty(cx, ty, expr, span, descr_pre, descr_post, n as usize + 1)
                     }
-                    // Otherwise, we don't lint, to avoid false positives.
-                    _ => false,
                 },
                 ty::Closure(..) => {
                     cx.struct_span_lint(UNUSED_MUST_USE, span, |lint| {
@@ -751,13 +751,17 @@
                 if !Self::is_expr_delims_necessary(inner, followed_by_block)
                     && value.attrs.is_empty()
                     && !value.span.from_expansion()
+                    && (ctx != UnusedDelimsCtx::LetScrutineeExpr
+                        || !matches!(inner.kind, ast::ExprKind::Binary(
+                                rustc_span::source_map::Spanned { node, .. },
+                                _,
+                                _,
+                            ) if node.lazy()))
                 {
                     self.emit_unused_delims_expr(cx, value, ctx, left_pos, right_pos)
                 }
             }
             ast::ExprKind::Let(_, ref expr) => {
-                // FIXME(#60336): Properly handle `let true = (false && true)`
-                // actually needing the parenthesis.
                 self.check_unused_delims_expr(
                     cx,
                     expr,
@@ -839,10 +843,6 @@
         }
     }
 
-    fn check_anon_const(&mut self, cx: &EarlyContext<'_>, c: &ast::AnonConst) {
-        self.check_unused_delims_expr(cx, &c.value, UnusedDelimsCtx::AnonConst, false, None, None);
-    }
-
     fn check_stmt(&mut self, cx: &EarlyContext<'_>, s: &ast::Stmt) {
         if let StmtKind::Local(ref local) = s.kind {
             self.check_unused_parens_pat(cx, &local.pat, false, false);
@@ -965,13 +965,6 @@
                         if !Self::is_expr_delims_necessary(expr, followed_by_block)
                             && (ctx != UnusedDelimsCtx::AnonConst
                                 || matches!(expr.kind, ast::ExprKind::Lit(_)))
-                            // array length expressions are checked during `check_anon_const` and `check_ty`,
-                            // once as `ArrayLenExpr` and once as `AnonConst`.
-                            //
-                            // As we do not want to lint this twice, we do not emit an error for
-                            // `ArrayLenExpr` if `AnonConst` would do the same.
-                            && (ctx != UnusedDelimsCtx::ArrayLenExpr
-                                || !matches!(expr.kind, ast::ExprKind::Lit(_)))
                             && !cx.sess().source_map().is_multiline(value.span)
                             && value.attrs.is_empty()
                             && !value.span.from_expansion()
@@ -999,21 +992,54 @@
 }
 
 impl EarlyLintPass for UnusedBraces {
-    fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) {
-        <Self as UnusedDelimLint>::check_expr(self, cx, e)
-    }
-
-    fn check_anon_const(&mut self, cx: &EarlyContext<'_>, c: &ast::AnonConst) {
-        self.check_unused_delims_expr(cx, &c.value, UnusedDelimsCtx::AnonConst, false, None, None);
-    }
-
     fn check_stmt(&mut self, cx: &EarlyContext<'_>, s: &ast::Stmt) {
         <Self as UnusedDelimLint>::check_stmt(self, cx, s)
     }
 
+    fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) {
+        <Self as UnusedDelimLint>::check_expr(self, cx, e);
+
+        if let ExprKind::Repeat(_, ref anon_const) = e.kind {
+            self.check_unused_delims_expr(
+                cx,
+                &anon_const.value,
+                UnusedDelimsCtx::AnonConst,
+                false,
+                None,
+                None,
+            );
+        }
+    }
+
+    fn check_generic_arg(&mut self, cx: &EarlyContext<'_>, arg: &ast::GenericArg) {
+        if let ast::GenericArg::Const(ct) = arg {
+            self.check_unused_delims_expr(
+                cx,
+                &ct.value,
+                UnusedDelimsCtx::AnonConst,
+                false,
+                None,
+                None,
+            );
+        }
+    }
+
+    fn check_variant(&mut self, cx: &EarlyContext<'_>, v: &ast::Variant) {
+        if let Some(anon_const) = &v.disr_expr {
+            self.check_unused_delims_expr(
+                cx,
+                &anon_const.value,
+                UnusedDelimsCtx::AnonConst,
+                false,
+                None,
+                None,
+            );
+        }
+    }
+
     fn check_ty(&mut self, cx: &EarlyContext<'_>, ty: &ast::Ty) {
-        if let &ast::TyKind::Paren(ref r) = &ty.kind {
-            if let ast::TyKind::Array(_, ref len) = r.kind {
+        match ty.kind {
+            ast::TyKind::Array(_, ref len) => {
                 self.check_unused_delims_expr(
                     cx,
                     &len.value,
@@ -1023,6 +1049,19 @@
                     None,
                 );
             }
+
+            ast::TyKind::Typeof(ref anon_const) => {
+                self.check_unused_delims_expr(
+                    cx,
+                    &anon_const.value,
+                    UnusedDelimsCtx::AnonConst,
+                    false,
+                    None,
+                    None,
+                );
+            }
+
+            _ => {}
         }
     }
 
diff --git a/compiler/rustc_lint_defs/Cargo.toml b/compiler/rustc_lint_defs/Cargo.toml
new file mode 100644
index 0000000..7f90808
--- /dev/null
+++ b/compiler/rustc_lint_defs/Cargo.toml
@@ -0,0 +1,13 @@
+[package]
+authors = ["The Rust Project Developers"]
+name = "rustc_lint_defs"
+version = "0.0.0"
+edition = "2018"
+
+[dependencies]
+log = { package = "tracing", version = "0.1" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_span = { path = "../rustc_span" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_macros = { path = "../rustc_macros" }
diff --git a/compiler/rustc_session/src/lint/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs
similarity index 95%
rename from compiler/rustc_session/src/lint/builtin.rs
rename to compiler/rustc_lint_defs/src/builtin.rs
index 6133f4d..ad53f23 100644
--- a/compiler/rustc_session/src/lint/builtin.rs
+++ b/compiler/rustc_lint_defs/src/builtin.rs
@@ -4,7 +4,6 @@
 //! compiler code, rather than using their own custom pass. Those
 //! lints are all available in `rustc_lint::builtin`.
 
-use crate::lint::FutureIncompatibleInfo;
 use crate::{declare_lint, declare_lint_pass, declare_tool_lint};
 use rustc_span::edition::Edition;
 use rustc_span::symbol::sym;
@@ -518,7 +517,7 @@
     /// ### Example
     ///
     /// ```rust
-    /// #![macro_export]
+    /// #![ignore]
     /// ```
     ///
     /// {{produces}}
@@ -1230,9 +1229,9 @@
 }
 
 declare_lint! {
-    /// The `missing_fragment_specifier` lint is issued when an unused pattern
-    /// in a `macro_rules!` macro definition has a meta-variable (e.g. `$e`)
-    /// that is not followed by a fragment specifier (e.g. `:expr`).
+    /// The `missing_fragment_specifier` lint is issued when an unused pattern in a
+    /// `macro_rules!` macro definition has a meta-variable (e.g. `$e`) that is not
+    /// followed by a fragment specifier (e.g. `:expr`).
     ///
     /// This warning can always be fixed by removing the unused pattern in the
     /// `macro_rules!` macro definition.
@@ -1241,17 +1240,29 @@
     ///
     /// ```rust,compile_fail
     /// macro_rules! foo {
-    ///    ($e) => {}
+    ///    () => {};
+    ///    ($name) => { };
+    /// }
+    ///
+    /// fn main() {
+    ///    foo!();
     /// }
     /// ```
     ///
     /// {{produces}}
     ///
-    ///
     /// ### Explanation
     ///
-    /// The meta-variable (`$e` above) lacks a fragment specifier, which is a
-    /// malformed input. It can be fixed by adding a fragment specifier.
+    /// To fix this, remove the unused pattern from the `macro_rules!` macro definition:
+    ///
+    /// ```rust
+    /// macro_rules! foo {
+    ///     () => {};
+    /// }
+    /// fn main() {
+    ///     foo!();
+    /// }
+    /// ```
     pub MISSING_FRAGMENT_SPECIFIER,
     Deny,
     "detects missing fragment specifiers in unused `macro_rules!` patterns",
@@ -1914,6 +1925,27 @@
 }
 
 declare_lint! {
+    /// The `invalid_html_tags` lint detects invalid HTML tags. This is a
+    /// `rustdoc` only lint, see the documentation in the [rustdoc book].
+    ///
+    /// [rustdoc book]: ../../../rustdoc/lints.html#invalid_html_tags
+    pub INVALID_HTML_TAGS,
+    Allow,
+    "detects invalid HTML tags in doc comments"
+}
+
+declare_lint! {
+    /// The `non_autolinks` lint detects when a URL could be written using
+    /// only angle brackets. This is a `rustdoc` only lint, see the
+    /// documentation in the [rustdoc book].
+    ///
+    /// [rustdoc book]: ../../../rustdoc/lints.html#non_autolinks
+    pub NON_AUTOLINKS,
+    Warn,
+    "detects URLs that could be written using only angle brackets"
+}
+
+declare_lint! {
     /// The `where_clauses_object_safety` lint detects for [object safety] of
     /// [where clauses].
     ///
@@ -2669,6 +2701,91 @@
     };
 }
 
+declare_lint! {
+    /// The `function_item_references` lint detects function references that are
+    /// formatted with [`fmt::Pointer`] or transmuted.
+    ///
+    /// [`fmt::Pointer`]: https://doc.rust-lang.org/std/fmt/trait.Pointer.html
+    ///
+    /// ### Example
+    ///
+    /// ```rust
+    /// fn foo() { }
+    ///
+    /// fn main() {
+    ///     println!("{:p}", &foo);
+    /// }
+    /// ```
+    ///
+    /// {{produces}}
+    ///
+    /// ### Explanation
+    ///
+    /// Taking a reference to a function may be mistaken as a way to obtain a
+    /// pointer to that function. This can give unexpected results when
+    /// formatting the reference as a pointer or transmuting it. This lint is
+    /// issued when function references are formatted as pointers, passed as
+    /// arguments bound by [`fmt::Pointer`] or transmuted.
+    pub FUNCTION_ITEM_REFERENCES,
+    Warn,
+    "suggest casting to a function pointer when attempting to take references to function items",
+}
+
+declare_lint! {
+    /// The `uninhabited_static` lint detects uninhabited statics.
+    ///
+    /// ### Example
+    ///
+    /// ```rust
+    /// enum Void {}
+    /// extern {
+    ///     static EXTERN: Void;
+    /// }
+    /// ```
+    ///
+    /// {{produces}}
+    ///
+    /// ### Explanation
+    ///
+    /// Statics with an uninhabited type can never be initialized, so they are impossible to define.
+    /// However, this can be side-stepped with an `extern static`, leading to problems later in the
+    /// compiler which assumes that there are no initialized uninhabited places (such as locals or
+    /// statics). This was accientally allowed, but is being phased out.
+    pub UNINHABITED_STATIC,
+    Warn,
+    "uninhabited static",
+    @future_incompatible = FutureIncompatibleInfo {
+        reference: "issue #74840 <https://github.com/rust-lang/rust/issues/74840>",
+        edition: None,
+    };
+}
+
+declare_lint! {
+    /// The `useless_deprecated` lint detects deprecation attributes with no effect.
+    ///
+    /// ### Example
+    ///
+    /// ```rust,compile_fail
+    /// struct X;
+    ///
+    /// #[deprecated = "message"]
+    /// impl Default for X {
+    ///     fn default() -> Self {
+    ///         X
+    ///     }
+    /// }
+    /// ```
+    ///
+    /// {{produces}}
+    ///
+    /// ### Explanation
+    ///
+    /// Deprecation attributes have no effect on trait implementations.
+    pub USELESS_DEPRECATED,
+    Deny,
+    "detects deprecation attributes with no effect",
+}
+
 declare_tool_lint! {
     pub rustc::INEFFECTIVE_UNSTABLE_TRAIT_IMPL,
     Deny,
@@ -2711,6 +2828,7 @@
         CONST_ITEM_MUTATION,
         SAFE_PACKED_BORROWS,
         PATTERNS_IN_FNS_WITHOUT_BODY,
+        MISSING_FRAGMENT_SPECIFIER,
         LATE_BOUND_LIFETIME_ARGUMENTS,
         ORDER_DEPENDENT_TRAIT_OBJECTS,
         COHERENCE_LEAK_CHECK,
@@ -2728,10 +2846,13 @@
         UNSTABLE_NAME_COLLISIONS,
         IRREFUTABLE_LET_PATTERNS,
         BROKEN_INTRA_DOC_LINKS,
+        PRIVATE_INTRA_DOC_LINKS,
         INVALID_CODEBLOCK_ATTRIBUTES,
         MISSING_CRATE_LEVEL_DOCS,
         MISSING_DOC_CODE_EXAMPLES,
+        INVALID_HTML_TAGS,
         PRIVATE_DOC_TESTS,
+        NON_AUTOLINKS,
         WHERE_CLAUSES_OBJECT_SAFETY,
         PROC_MACRO_DERIVE_RESOLUTION_FALLBACK,
         MACRO_USE_EXTERN_CRATE,
@@ -2753,7 +2874,9 @@
         CENUM_IMPL_DROP_CAST,
         CONST_EVALUATABLE_UNCHECKED,
         INEFFECTIVE_UNSTABLE_TRAIT_IMPL,
-        MISSING_FRAGMENT_SPECIFIER,
+        UNINHABITED_STATIC,
+        FUNCTION_ITEM_REFERENCES,
+        USELESS_DEPRECATED,
     ]
 }
 
diff --git a/compiler/rustc_session/src/lint.rs b/compiler/rustc_lint_defs/src/lib.rs
similarity index 81%
rename from compiler/rustc_session/src/lint.rs
rename to compiler/rustc_lint_defs/src/lib.rs
index 62e021d..af99264 100644
--- a/compiler/rustc_session/src/lint.rs
+++ b/compiler/rustc_lint_defs/src/lib.rs
@@ -1,12 +1,45 @@
+#[macro_use]
+extern crate rustc_macros;
+
 pub use self::Level::*;
 use rustc_ast::node_id::{NodeId, NodeMap};
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
-use rustc_errors::{pluralize, Applicability, DiagnosticBuilder};
 use rustc_span::edition::Edition;
 use rustc_span::{sym, symbol::Ident, MultiSpan, Span, Symbol};
 
 pub mod builtin;
 
+#[macro_export]
+macro_rules! pluralize {
+    ($x:expr) => {
+        if $x != 1 { "s" } else { "" }
+    };
+}
+
+/// Indicates the confidence in the correctness of a suggestion.
+///
+/// All suggestions are marked with an `Applicability`. Tools use the applicability of a suggestion
+/// to determine whether it should be automatically applied or if the user should be consulted
+/// before applying the suggestion.
+#[derive(Copy, Clone, Debug, PartialEq, Hash, Encodable, Decodable)]
+pub enum Applicability {
+    /// The suggestion is definitely what the user intended. This suggestion should be
+    /// automatically applied.
+    MachineApplicable,
+
+    /// The suggestion may be what the user intended, but it is uncertain. The suggestion should
+    /// result in valid Rust code if it is applied.
+    MaybeIncorrect,
+
+    /// The suggestion contains placeholders like `(...)` or `{ /* fields */ }`. The suggestion
+    /// cannot be applied automatically because it will not result in valid Rust code. The user
+    /// will need to fill in the placeholders.
+    HasPlaceholders,
+
+    /// The applicability of the suggestion is unknown.
+    Unspecified,
+}
+
 /// Setting for how to handle a lint.
 #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
 pub enum Level {
@@ -66,13 +99,13 @@
     /// The name is written with underscores, e.g., "unused_imports".
     /// On the command line, underscores become dashes.
     ///
-    /// See https://rustc-dev-guide.rust-lang.org/diagnostics.html#lint-naming
+    /// See <https://rustc-dev-guide.rust-lang.org/diagnostics.html#lint-naming>
     /// for naming guidelines.
     pub name: &'static str,
 
     /// Default level for the lint.
     ///
-    /// See https://rustc-dev-guide.rust-lang.org/diagnostics.html#diagnostic-levels
+    /// See <https://rustc-dev-guide.rust-lang.org/diagnostics.html#diagnostic-levels>
     /// for guidelines on choosing a default level.
     pub default_level: Level,
 
@@ -106,6 +139,21 @@
     /// If this is an edition fixing lint, the edition in which
     /// this lint becomes obsolete
     pub edition: Option<Edition>,
+    /// Information about a future breakage, which will
+    /// be emitted in JSON messages to be displayed by Cargo
+    /// for upstream deps
+    pub future_breakage: Option<FutureBreakage>,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct FutureBreakage {
+    pub date: Option<&'static str>,
+}
+
+impl FutureIncompatibleInfo {
+    pub const fn default_fields_for_macro() -> Self {
+        FutureIncompatibleInfo { reference: "", edition: None, future_breakage: None }
+    }
 }
 
 impl Lint {
@@ -282,8 +330,8 @@
 
 /// Declares a static item of type `&'static Lint`.
 ///
-/// See https://rustc-dev-guide.rust-lang.org/diagnostics.html for documentation
-/// and guidelines on writing lints.
+/// See <https://rustc-dev-guide.rust-lang.org/diagnostics.html> for
+/// documentation and guidelines on writing lints.
 ///
 /// The macro call should start with a doc comment explaining the lint
 /// which will be embedded in the rustc user documentation book. It should
@@ -331,31 +379,34 @@
         );
     );
     ($(#[$attr:meta])* $vis: vis $NAME: ident, $Level: ident, $desc: expr,
-     $(@future_incompatible = $fi:expr;)?
      $(@feature_gate = $gate:expr;)?
+     $(@future_incompatible = FutureIncompatibleInfo { $($field:ident : $val:expr),* $(,)*  }; )?
      $($v:ident),*) => (
         $(#[$attr])*
-        $vis static $NAME: &$crate::lint::Lint = &$crate::lint::Lint {
+        $vis static $NAME: &$crate::Lint = &$crate::Lint {
             name: stringify!($NAME),
-            default_level: $crate::lint::$Level,
+            default_level: $crate::$Level,
             desc: $desc,
             edition_lint_opts: None,
             is_plugin: false,
             $($v: true,)*
-            $(future_incompatible: Some($fi),)*
             $(feature_gate: Some($gate),)*
-            ..$crate::lint::Lint::default_fields_for_macro()
+            $(future_incompatible: Some($crate::FutureIncompatibleInfo {
+                $($field: $val,)*
+                ..$crate::FutureIncompatibleInfo::default_fields_for_macro()
+            }),)*
+            ..$crate::Lint::default_fields_for_macro()
         };
     );
     ($(#[$attr:meta])* $vis: vis $NAME: ident, $Level: ident, $desc: expr,
      $lint_edition: expr => $edition_level: ident
     ) => (
         $(#[$attr])*
-        $vis static $NAME: &$crate::lint::Lint = &$crate::lint::Lint {
+        $vis static $NAME: &$crate::Lint = &$crate::Lint {
             name: stringify!($NAME),
-            default_level: $crate::lint::$Level,
+            default_level: $crate::$Level,
             desc: $desc,
-            edition_lint_opts: Some(($lint_edition, $crate::lint::Level::$edition_level)),
+            edition_lint_opts: Some(($lint_edition, $crate::Level::$edition_level)),
             report_in_external_macro: false,
             is_plugin: false,
         };
@@ -380,9 +431,9 @@
         $external:expr
     ) => (
         $(#[$attr])*
-        $vis static $NAME: &$crate::lint::Lint = &$crate::lint::Lint {
+        $vis static $NAME: &$crate::Lint = &$crate::Lint {
             name: &concat!(stringify!($tool), "::", stringify!($NAME)),
-            default_level: $crate::lint::$Level,
+            default_level: $crate::$Level,
             desc: $desc,
             edition_lint_opts: None,
             report_in_external_macro: $external,
@@ -413,11 +464,11 @@
 #[macro_export]
 macro_rules! impl_lint_pass {
     ($ty:ty => [$($lint:expr),* $(,)?]) => {
-        impl $crate::lint::LintPass for $ty {
+        impl $crate::LintPass for $ty {
             fn name(&self) -> &'static str { stringify!($ty) }
         }
         impl $ty {
-            pub fn get_lints() -> $crate::lint::LintArray { $crate::lint_array!($($lint),*) }
+            pub fn get_lints() -> $crate::LintArray { $crate::lint_array!($($lint),*) }
         }
     };
 }
@@ -431,45 +482,3 @@
         $crate::impl_lint_pass!($name => [$($lint),*]);
     };
 }
-
-pub fn add_elided_lifetime_in_path_suggestion(
-    sess: &crate::Session,
-    db: &mut DiagnosticBuilder<'_>,
-    n: usize,
-    path_span: Span,
-    incl_angl_brckt: bool,
-    insertion_span: Span,
-    anon_lts: String,
-) {
-    let (replace_span, suggestion) = if incl_angl_brckt {
-        (insertion_span, anon_lts)
-    } else {
-        // When possible, prefer a suggestion that replaces the whole
-        // `Path<T>` expression with `Path<'_, T>`, rather than inserting `'_, `
-        // at a point (which makes for an ugly/confusing label)
-        if let Ok(snippet) = sess.source_map().span_to_snippet(path_span) {
-            // But our spans can get out of whack due to macros; if the place we think
-            // we want to insert `'_` isn't even within the path expression's span, we
-            // should bail out of making any suggestion rather than panicking on a
-            // subtract-with-overflow or string-slice-out-out-bounds (!)
-            // FIXME: can we do better?
-            if insertion_span.lo().0 < path_span.lo().0 {
-                return;
-            }
-            let insertion_index = (insertion_span.lo().0 - path_span.lo().0) as usize;
-            if insertion_index > snippet.len() {
-                return;
-            }
-            let (before, after) = snippet.split_at(insertion_index);
-            (path_span, format!("{}{}{}", before, anon_lts, after))
-        } else {
-            (insertion_span, anon_lts)
-        }
-    };
-    db.span_suggestion(
-        replace_span,
-        &format!("indicate the anonymous lifetime{}", pluralize!(n)),
-        suggestion,
-        Applicability::MachineApplicable,
-    );
-}
diff --git a/compiler/rustc_llvm/build.rs b/compiler/rustc_llvm/build.rs
index 7f1e5cf..54b22ca 100644
--- a/compiler/rustc_llvm/build.rs
+++ b/compiler/rustc_llvm/build.rs
@@ -70,7 +70,7 @@
     let host = env::var("HOST").expect("HOST was not set");
     let is_crossed = target != host;
 
-    let mut optional_components = vec![
+    let optional_components = &[
         "x86",
         "arm",
         "aarch64",
@@ -85,6 +85,7 @@
         "sparc",
         "nvptx",
         "hexagon",
+        "riscv",
     ];
 
     let mut version_cmd = Command::new(&llvm_config);
@@ -94,13 +95,9 @@
     let (major, _minor) = if let (Some(major), Some(minor)) = (parts.next(), parts.next()) {
         (major, minor)
     } else {
-        (6, 0)
+        (8, 0)
     };
 
-    if major > 6 {
-        optional_components.push("riscv");
-    }
-
     let required_components = &[
         "ipo",
         "bitreader",
diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
index 7b1c3f9..71ca4f2 100644
--- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
@@ -1212,6 +1212,7 @@
   StringMap<FunctionImporter::ImportMapTy> ImportLists;
   StringMap<FunctionImporter::ExportSetTy> ExportLists;
   StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries;
+  StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
 
   LLVMRustThinLTOData() : Index(/* HaveGVs = */ false) {}
 };
@@ -1308,7 +1309,6 @@
   //
   // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp` with some of this
   // being lifted from `lib/LTO/LTO.cpp` as well
-  StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
   DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
   for (auto &I : Ret->Index) {
     if (I.second.SummaryList.size() > 1)
@@ -1323,7 +1323,7 @@
   auto recordNewLinkage = [&](StringRef ModuleIdentifier,
                               GlobalValue::GUID GUID,
                               GlobalValue::LinkageTypes NewLinkage) {
-    ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
+    Ret->ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
   };
 #if LLVM_VERSION_GE(9, 0)
   thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage,
@@ -1491,7 +1491,7 @@
 // Calls `module_name_callback` for each module import done by ThinLTO.
 // The callback is provided with regular null-terminated C strings.
 extern "C" void
-LLVMRustGetThinLTOModuleImports(const LLVMRustThinLTOData *data,
+LLVMRustGetThinLTOModules(const LLVMRustThinLTOData *data,
                                 LLVMRustModuleNameCallback module_name_callback,
                                 void* callback_payload) {
   for (const auto& importing_module : data->ImportLists) {
@@ -1653,3 +1653,36 @@
   MD->clearOperands();
   MD->addOperand(Unit);
 }
+
+// Computes the LTO cache key for the provided 'ModId' in the given 'Data',
+// storing the result in 'KeyOut'.
+// Currently, this cache key is a SHA-1 hash of anything that could affect
+// the result of optimizing this module (e.g. module imports, exports, liveness
+// of access globals, etc).
+// The precise details are determined by LLVM in `computeLTOCacheKey`, which is
+// used during the normal linker-plugin incremental thin-LTO process.
+extern "C" void
+LLVMRustComputeLTOCacheKey(RustStringRef KeyOut, const char *ModId, LLVMRustThinLTOData *Data) {
+  SmallString<40> Key;
+  llvm::lto::Config conf;
+  const auto &ImportList = Data->ImportLists.lookup(ModId);
+  const auto &ExportList = Data->ExportLists.lookup(ModId);
+  const auto &ResolvedODR = Data->ResolvedODR.lookup(ModId);
+  const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(ModId);
+  std::set<GlobalValue::GUID> CfiFunctionDefs;
+  std::set<GlobalValue::GUID> CfiFunctionDecls;
+
+  // Based on the 'InProcessThinBackend' constructor in LLVM
+  for (auto &Name : Data->Index.cfiFunctionDefs())
+    CfiFunctionDefs.insert(
+        GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
+  for (auto &Name : Data->Index.cfiFunctionDecls())
+    CfiFunctionDecls.insert(
+        GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
+
+  llvm::computeLTOCacheKey(Key, conf, Data->Index, ModId,
+      ImportList, ExportList, ResolvedODR, DefinedGlobals, CfiFunctionDefs, CfiFunctionDecls
+  );
+
+  LLVMRustStringWriteImpl(KeyOut, Key.c_str(), Key.size());
+}
diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
index 9f8ea7f..938eb19 100644
--- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
@@ -648,6 +648,7 @@
   None,
   MD5,
   SHA1,
+  SHA256,
 };
 
 static Optional<DIFile::ChecksumKind> fromRust(LLVMRustChecksumKind Kind) {
@@ -658,6 +659,10 @@
     return DIFile::ChecksumKind::CSK_MD5;
   case LLVMRustChecksumKind::SHA1:
     return DIFile::ChecksumKind::CSK_SHA1;
+#if (LLVM_VERSION_MAJOR >= 11)
+  case LLVMRustChecksumKind::SHA256:
+    return DIFile::ChecksumKind::CSK_SHA256;
+#endif
   default:
     report_fatal_error("bad ChecksumKind.");
   }
@@ -733,7 +738,7 @@
     const char *LinkageName, size_t LinkageNameLen,
     LLVMMetadataRef File, unsigned LineNo,
     LLVMMetadataRef Ty, unsigned ScopeLine, LLVMRustDIFlags Flags,
-    LLVMRustDISPFlags SPFlags, LLVMValueRef Fn, LLVMMetadataRef TParam,
+    LLVMRustDISPFlags SPFlags, LLVMValueRef MaybeFn, LLVMMetadataRef TParam,
     LLVMMetadataRef Decl) {
   DITemplateParameterArray TParams =
       DITemplateParameterArray(unwrap<MDTuple>(TParam));
@@ -750,7 +755,8 @@
       unwrapDI<DIFile>(File), LineNo,
       unwrapDI<DISubroutineType>(Ty), ScopeLine, llvmFlags,
       llvmSPFlags, TParams, unwrapDIPtr<DISubprogram>(Decl));
-  unwrap<Function>(Fn)->setSubprogram(Sub);
+  if (MaybeFn)
+    unwrap<Function>(MaybeFn)->setSubprogram(Sub);
   return wrap(Sub);
 }
 
@@ -765,7 +771,7 @@
     LLVMMetadataRef File, unsigned LineNo, LLVMMetadataRef Scope) {
   return wrap(Builder->createTypedef(
     unwrap<DIType>(Type), StringRef(Name, NameLen), unwrap<DIFile>(File),
-    LineNo, unwrap<DIScope>(Scope)));
+    LineNo, unwrapDIPtr<DIScope>(Scope)));
 }
 
 extern "C" LLVMMetadataRef LLVMRustDIBuilderCreatePointerType(
@@ -930,12 +936,12 @@
 
 extern "C" LLVMValueRef LLVMRustDIBuilderInsertDeclareAtEnd(
     LLVMRustDIBuilderRef Builder, LLVMValueRef V, LLVMMetadataRef VarInfo,
-    int64_t *AddrOps, unsigned AddrOpsCount, LLVMValueRef DL,
+    int64_t *AddrOps, unsigned AddrOpsCount, LLVMMetadataRef DL,
     LLVMBasicBlockRef InsertAtEnd) {
   return wrap(Builder->insertDeclare(
       unwrap(V), unwrap<DILocalVariable>(VarInfo),
       Builder->createExpression(llvm::ArrayRef<int64_t>(AddrOps, AddrOpsCount)),
-      DebugLoc(cast<MDNode>(unwrap<MetadataAsValue>(DL)->getMetadata())),
+      DebugLoc(cast<MDNode>(unwrap(DL))),
       unwrap(InsertAtEnd)));
 }
 
@@ -1002,7 +1008,7 @@
                          DINodeArray(unwrap<MDTuple>(Params)));
 }
 
-extern "C" LLVMValueRef
+extern "C" LLVMMetadataRef
 LLVMRustDIBuilderCreateDebugLocation(LLVMContextRef ContextRef, unsigned Line,
                                      unsigned Column, LLVMMetadataRef Scope,
                                      LLVMMetadataRef InlinedAt) {
@@ -1011,7 +1017,7 @@
   DebugLoc debug_loc = DebugLoc::get(Line, Column, unwrapDIPtr<MDNode>(Scope),
                                      unwrapDIPtr<MDNode>(InlinedAt));
 
-  return wrap(MetadataAsValue::get(Context, debug_loc.getAsMDNode()));
+  return wrap(debug_loc.getAsMDNode());
 }
 
 extern "C" int64_t LLVMRustDIBuilderCreateOpDeref() {
diff --git a/compiler/rustc_macros/src/lift.rs b/compiler/rustc_macros/src/lift.rs
index 4bf4ce0..ad7ac74 100644
--- a/compiler/rustc_macros/src/lift.rs
+++ b/compiler/rustc_macros/src/lift.rs
@@ -3,6 +3,7 @@
 
 pub fn lift_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
     s.add_bounds(synstructure::AddBounds::Generics);
+    s.bind_with(|_| synstructure::BindStyle::Move);
 
     let tcx: syn::Lifetime = parse_quote!('tcx);
     let newtcx: syn::GenericParam = parse_quote!('__lifted);
@@ -43,8 +44,8 @@
         quote! {
             type Lifted = #lifted;
 
-            fn lift_to_tcx(&self, __tcx: ::rustc_middle::ty::TyCtxt<'__lifted>) -> Option<#lifted> {
-                Some(match *self { #body })
+            fn lift_to_tcx(self, __tcx: ::rustc_middle::ty::TyCtxt<'__lifted>) -> Option<#lifted> {
+                Some(match self { #body })
             }
         },
     )
diff --git a/compiler/rustc_macros/src/query.rs b/compiler/rustc_macros/src/query.rs
index e7c0546..fd85919 100644
--- a/compiler/rustc_macros/src/query.rs
+++ b/compiler/rustc_macros/src/query.rs
@@ -190,7 +190,11 @@
 }
 
 /// A named group containing queries.
+///
+/// For now, the name is not used any more, but the capability remains interesting for future
+/// developments of the query system.
 struct Group {
+    #[allow(unused)]
     name: Ident,
     queries: List<Query>,
 }
@@ -417,12 +421,9 @@
     let mut query_stream = quote! {};
     let mut query_description_stream = quote! {};
     let mut dep_node_def_stream = quote! {};
-    let mut dep_node_force_stream = quote! {};
-    let mut try_load_from_on_disk_cache_stream = quote! {};
     let mut cached_queries = quote! {};
 
     for group in groups.0 {
-        let mut group_stream = quote! {};
         for mut query in group.queries.0 {
             let modifiers = process_modifiers(&mut query);
             let name = &query.name;
@@ -437,22 +438,6 @@
                 cached_queries.extend(quote! {
                     #name,
                 });
-
-                try_load_from_on_disk_cache_stream.extend(quote! {
-                    ::rustc_middle::dep_graph::DepKind::#name => {
-                        if <#arg as DepNodeParams<TyCtxt<'_>>>::can_reconstruct_query_key() {
-                            debug_assert!($tcx.dep_graph
-                                            .node_color($dep_node)
-                                            .map(|c| c.is_green())
-                                            .unwrap_or(false));
-
-                            let key = <#arg as DepNodeParams<TyCtxt<'_>>>::recover($tcx, $dep_node).unwrap();
-                            if queries::#name::cache_on_disk($tcx, &key, None) {
-                                let _ = $tcx.#name(key);
-                            }
-                        }
-                    }
-                });
             }
 
             let mut attributes = Vec::new();
@@ -485,9 +470,9 @@
             let attribute_stream = quote! {#(#attributes),*};
             let doc_comments = query.doc_comments.iter();
             // Add the query to the group
-            group_stream.extend(quote! {
+            query_stream.extend(quote! {
                 #(#doc_comments)*
-                [#attribute_stream] fn #name: #name(#arg) #result,
+                [#attribute_stream] fn #name(#arg) #result,
             });
 
             // Create a dep node for the query
@@ -495,37 +480,10 @@
                 [#attribute_stream] #name(#arg),
             });
 
-            // Add a match arm to force the query given the dep node
-            dep_node_force_stream.extend(quote! {
-                ::rustc_middle::dep_graph::DepKind::#name => {
-                    if <#arg as DepNodeParams<TyCtxt<'_>>>::can_reconstruct_query_key() {
-                        if let Some(key) = <#arg as DepNodeParams<TyCtxt<'_>>>::recover($tcx, $dep_node) {
-                            force_query::<crate::ty::query::queries::#name<'_>, _>(
-                                $tcx,
-                                key,
-                                DUMMY_SP,
-                                *$dep_node
-                            );
-                            return true;
-                        }
-                    }
-                }
-            });
-
             add_query_description_impl(&query, modifiers, &mut query_description_stream);
         }
-        let name = &group.name;
-        query_stream.extend(quote! {
-            #name { #group_stream },
-        });
     }
 
-    dep_node_force_stream.extend(quote! {
-        ::rustc_middle::dep_graph::DepKind::Null => {
-            bug!("Cannot force dep node: {:?}", $dep_node)
-        }
-    });
-
     TokenStream::from(quote! {
         macro_rules! rustc_query_append {
             ([$($macro:tt)*][$($other:tt)*]) => {
@@ -546,15 +504,6 @@
                 );
             }
         }
-        macro_rules! rustc_dep_node_force {
-            ([$dep_node:expr, $tcx:expr] $($other:tt)*) => {
-                match $dep_node.kind {
-                    $($other)*
-
-                    #dep_node_force_stream
-                }
-            }
-        }
         macro_rules! rustc_cached_queries {
             ($($macro:tt)*) => {
                 $($macro)*(#cached_queries);
@@ -562,14 +511,5 @@
         }
 
         #query_description_stream
-
-        macro_rules! rustc_dep_node_try_load_from_on_disk_cache {
-            ($dep_node:expr, $tcx:expr) => {
-                match $dep_node.kind {
-                    #try_load_from_on_disk_cache_stream
-                    _ => (),
-                }
-            }
-        }
     })
 }
diff --git a/compiler/rustc_macros/src/type_foldable.rs b/compiler/rustc_macros/src/type_foldable.rs
index 6931e65..8fa6e6a 100644
--- a/compiler/rustc_macros/src/type_foldable.rs
+++ b/compiler/rustc_macros/src/type_foldable.rs
@@ -15,8 +15,11 @@
             }
         })
     });
-    let body_visit = s.fold(false, |acc, bind| {
-        quote! { #acc || ::rustc_middle::ty::fold::TypeFoldable::visit_with(#bind, __folder) }
+
+    let body_visit = s.each(|bind| {
+        quote! {
+            ::rustc_middle::ty::fold::TypeFoldable::visit_with(#bind, __folder)?;
+        }
     });
 
     s.bound_impl(
@@ -32,8 +35,9 @@
             fn super_visit_with<__F: ::rustc_middle::ty::fold::TypeVisitor<'tcx>>(
                 &self,
                 __folder: &mut __F
-            ) -> bool {
+            ) -> ::std::ops::ControlFlow<()> {
                 match *self { #body_visit }
+                ::std::ops::ControlFlow::CONTINUE
             }
         },
     )
diff --git a/compiler/rustc_metadata/src/creader.rs b/compiler/rustc_metadata/src/creader.rs
index 7562da6..33cbf0f 100644
--- a/compiler/rustc_metadata/src/creader.rs
+++ b/compiler/rustc_metadata/src/creader.rs
@@ -752,10 +752,7 @@
         // At this point we've determined that we need an allocator. Let's see
         // if our compilation session actually needs an allocator based on what
         // we're emitting.
-        let all_rlib = self.sess.crate_types().iter().all(|ct| match *ct {
-            CrateType::Rlib => true,
-            _ => false,
-        });
+        let all_rlib = self.sess.crate_types().iter().all(|ct| matches!(*ct, CrateType::Rlib));
         if all_rlib {
             return;
         }
diff --git a/compiler/rustc_metadata/src/dependency_format.rs b/compiler/rustc_metadata/src/dependency_format.rs
index f7454da..c3afc9f 100644
--- a/compiler/rustc_metadata/src/dependency_format.rs
+++ b/compiler/rustc_metadata/src/dependency_format.rs
@@ -127,7 +127,7 @@
         if ty == CrateType::Staticlib
             || (ty == CrateType::Executable
                 && sess.crt_static(Some(ty))
-                && !sess.target.target.options.crt_static_allows_dylibs)
+                && !sess.target.crt_static_allows_dylibs)
         {
             for &cnum in tcx.crates().iter() {
                 if tcx.dep_kind(cnum).macros_only() {
diff --git a/compiler/rustc_metadata/src/locator.rs b/compiler/rustc_metadata/src/locator.rs
index 0869ec2..c4c025d 100644
--- a/compiler/rustc_metadata/src/locator.rs
+++ b/compiler/rustc_metadata/src/locator.rs
@@ -325,7 +325,7 @@
             hash,
             host_hash,
             extra_filename,
-            target: if is_host { &sess.host } else { &sess.target.target },
+            target: if is_host { &sess.host } else { &sess.target },
             triple: if is_host {
                 TargetTriple::from_triple(config::host_triple())
             } else {
@@ -373,11 +373,10 @@
         seen_paths: &mut FxHashSet<PathBuf>,
     ) -> Result<Option<Library>, CrateError> {
         // want: crate_name.dir_part() + prefix + crate_name.file_part + "-"
-        let dylib_prefix =
-            format!("{}{}{}", self.target.options.dll_prefix, self.crate_name, extra_prefix);
+        let dylib_prefix = format!("{}{}{}", self.target.dll_prefix, self.crate_name, extra_prefix);
         let rlib_prefix = format!("lib{}{}", self.crate_name, extra_prefix);
         let staticlib_prefix =
-            format!("{}{}{}", self.target.options.staticlib_prefix, self.crate_name, extra_prefix);
+            format!("{}{}{}", self.target.staticlib_prefix, self.crate_name, extra_prefix);
 
         let mut candidates: FxHashMap<_, (FxHashMap<_, _>, FxHashMap<_, _>, FxHashMap<_, _>)> =
             Default::default();
@@ -405,17 +404,14 @@
                 (&file[(rlib_prefix.len())..(file.len() - ".rlib".len())], CrateFlavor::Rlib)
             } else if file.starts_with(&rlib_prefix) && file.ends_with(".rmeta") {
                 (&file[(rlib_prefix.len())..(file.len() - ".rmeta".len())], CrateFlavor::Rmeta)
-            } else if file.starts_with(&dylib_prefix)
-                && file.ends_with(&self.target.options.dll_suffix)
-            {
+            } else if file.starts_with(&dylib_prefix) && file.ends_with(&self.target.dll_suffix) {
                 (
-                    &file
-                        [(dylib_prefix.len())..(file.len() - self.target.options.dll_suffix.len())],
+                    &file[(dylib_prefix.len())..(file.len() - self.target.dll_suffix.len())],
                     CrateFlavor::Dylib,
                 )
             } else {
                 if file.starts_with(&staticlib_prefix)
-                    && file.ends_with(&self.target.options.staticlib_suffix)
+                    && file.ends_with(&self.target.staticlib_suffix)
                 {
                     staticlibs
                         .push(CrateMismatch { path: spf.path.clone(), got: "static".to_string() });
@@ -633,11 +629,9 @@
             }
         }
 
-        if self.exact_paths.is_empty() {
-            if self.crate_name != root.name() {
-                info!("Rejecting via crate name");
-                return None;
-            }
+        if self.exact_paths.is_empty() && self.crate_name != root.name() {
+            info!("Rejecting via crate name");
+            return None;
         }
 
         if root.triple() != &self.triple {
@@ -681,8 +675,8 @@
             };
 
             if file.starts_with("lib") && (file.ends_with(".rlib") || file.ends_with(".rmeta"))
-                || file.starts_with(&self.target.options.dll_prefix)
-                    && file.ends_with(&self.target.options.dll_suffix)
+                || file.starts_with(&self.target.dll_prefix)
+                    && file.ends_with(&self.target.dll_suffix)
             {
                 // Make sure there's at most one rlib and at most one dylib.
                 // Note to take care and match against the non-canonicalized name:
@@ -714,8 +708,8 @@
             crate_name: self.crate_name,
             root: self.root.cloned(),
             triple: self.triple,
-            dll_prefix: self.target.options.dll_prefix.clone(),
-            dll_suffix: self.target.options.dll_suffix.clone(),
+            dll_prefix: self.target.dll_prefix.clone(),
+            dll_suffix: self.target.dll_suffix.clone(),
             rejected_via_hash: self.rejected_via_hash,
             rejected_via_triple: self.rejected_via_triple,
             rejected_via_kind: self.rejected_via_kind,
diff --git a/compiler/rustc_metadata/src/native_libs.rs b/compiler/rustc_metadata/src/native_libs.rs
index e76c2cb..2f7c2c2 100644
--- a/compiler/rustc_metadata/src/native_libs.rs
+++ b/compiler/rustc_metadata/src/native_libs.rs
@@ -149,7 +149,7 @@
             }
             return;
         }
-        let is_osx = self.tcx.sess.target.target.options.is_like_osx;
+        let is_osx = self.tcx.sess.target.is_like_osx;
         if lib.kind == NativeLibKind::Framework && !is_osx {
             let msg = "native frameworks are only available on macOS targets";
             match span {
diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs
index c31e941..746c3b6 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder.rs
@@ -313,27 +313,6 @@
         Ok(ty)
     }
 
-    fn cached_predicate_for_shorthand<F>(
-        &mut self,
-        shorthand: usize,
-        or_insert_with: F,
-    ) -> Result<ty::Predicate<'tcx>, Self::Error>
-    where
-        F: FnOnce(&mut Self) -> Result<ty::Predicate<'tcx>, Self::Error>,
-    {
-        let tcx = self.tcx();
-
-        let key = ty::CReaderCacheKey { cnum: self.cdata().cnum, pos: shorthand };
-
-        if let Some(&pred) = tcx.pred_rcache.borrow().get(&key) {
-            return Ok(pred);
-        }
-
-        let pred = or_insert_with(self)?;
-        tcx.pred_rcache.borrow_mut().insert(key, pred);
-        Ok(pred)
-    }
-
     fn with_position<F, R>(&mut self, pos: usize, f: F) -> R
     where
         F: FnOnce(&mut Self) -> R,
@@ -877,7 +856,7 @@
                 .tables
                 .children
                 .get(self, index)
-                .unwrap_or(Lazy::empty())
+                .unwrap_or_else(Lazy::empty)
                 .decode(self)
                 .map(|index| ty::FieldDef {
                     did: self.local_def_id(index),
@@ -909,7 +888,7 @@
                 .tables
                 .children
                 .get(self, item_id)
-                .unwrap_or(Lazy::empty())
+                .unwrap_or_else(Lazy::empty)
                 .decode(self)
                 .map(|index| self.get_variant(&self.kind(index), index, did, tcx.sess))
                 .collect()
@@ -937,7 +916,7 @@
             .tables
             .inferred_outlives
             .get(self, item_id)
-            .map(|predicates| predicates.decode((self, tcx)))
+            .map(|predicates| tcx.arena.alloc_from_iter(predicates.decode((self, tcx))))
             .unwrap_or_default()
     }
 
@@ -949,6 +928,19 @@
         self.root.tables.super_predicates.get(self, item_id).unwrap().decode((self, tcx))
     }
 
+    fn get_explicit_item_bounds(
+        &self,
+        item_id: DefIndex,
+        tcx: TyCtxt<'tcx>,
+    ) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
+        self.root
+            .tables
+            .explicit_item_bounds
+            .get(self, item_id)
+            .map(|bounds| tcx.arena.alloc_from_iter(bounds.decode((self, tcx))))
+            .unwrap_or_default()
+    }
+
     fn get_generics(&self, item_id: DefIndex, sess: &Session) -> ty::Generics {
         self.root.tables.generics.get(self, item_id).unwrap().decode((self, sess))
     }
@@ -1011,6 +1003,10 @@
         self.root.tables.impl_trait_ref.get(self, id).map(|tr| tr.decode((self, tcx)))
     }
 
+    fn get_expn_that_defined(&self, id: DefIndex, sess: &Session) -> ExpnId {
+        self.root.tables.expn_that_defined.get(self, id).unwrap().decode((self, sess))
+    }
+
     /// Iterates over all the stability attributes in the given crate.
     fn get_lib_features(&self, tcx: TyCtxt<'tcx>) -> &'tcx [(Symbol, Option<Symbol>)] {
         // FIXME: For a proc macro crate, not sure whether we should return the "host"
@@ -1079,7 +1075,7 @@
 
         // Iterate over all children.
         let macros_only = self.dep_kind.lock().macros_only();
-        let children = self.root.tables.children.get(self, id).unwrap_or(Lazy::empty());
+        let children = self.root.tables.children.get(self, id).unwrap_or_else(Lazy::empty);
         for child_index in children.decode((self, sess)) {
             if macros_only {
                 continue;
@@ -1102,7 +1098,7 @@
                             .tables
                             .children
                             .get(self, child_index)
-                            .unwrap_or(Lazy::empty());
+                            .unwrap_or_else(Lazy::empty);
                         for child_index in child_children.decode((self, sess)) {
                             let kind = self.def_kind(child_index);
                             callback(Export {
@@ -1288,7 +1284,7 @@
     }
 
     fn get_item_variances(&self, id: DefIndex) -> Vec<ty::Variance> {
-        self.root.tables.variances.get(self, id).unwrap_or(Lazy::empty()).decode(self).collect()
+        self.root.tables.variances.get(self, id).unwrap_or_else(Lazy::empty).decode(self).collect()
     }
 
     fn get_ctor_kind(&self, node_id: DefIndex) -> CtorKind {
@@ -1327,7 +1323,7 @@
             .tables
             .attributes
             .get(self, item_id)
-            .unwrap_or(Lazy::empty())
+            .unwrap_or_else(Lazy::empty)
             .decode((self, sess))
             .collect::<Vec<_>>()
     }
@@ -1337,7 +1333,7 @@
             .tables
             .children
             .get(self, id)
-            .unwrap_or(Lazy::empty())
+            .unwrap_or_else(Lazy::empty)
             .decode(self)
             .map(|index| respan(self.get_span(index, sess), self.item_ident(index, sess).name))
             .collect()
@@ -1353,7 +1349,7 @@
                 .tables
                 .inherent_impls
                 .get(self, id)
-                .unwrap_or(Lazy::empty())
+                .unwrap_or_else(Lazy::empty)
                 .decode(self)
                 .map(|index| self.local_def_id(index)),
         )
@@ -1418,12 +1414,14 @@
         }
     }
 
-    fn get_foreign_modules(&self, tcx: TyCtxt<'tcx>) -> &'tcx [ForeignModule] {
+    fn get_foreign_modules(&self, tcx: TyCtxt<'tcx>) -> Lrc<FxHashMap<DefId, ForeignModule>> {
         if self.root.is_proc_macro_crate() {
             // Proc macro crates do not have any *target* foreign modules.
-            &[]
+            Lrc::new(FxHashMap::default())
         } else {
-            tcx.arena.alloc_from_iter(self.root.foreign_modules.decode((self, tcx.sess)))
+            let modules: FxHashMap<DefId, ForeignModule> =
+                self.root.foreign_modules.decode((self, tcx.sess)).map(|m| (m.def_id, m)).collect();
+            Lrc::new(modules)
         }
     }
 
diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
index 4102cf8..ddd85ab 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
@@ -6,11 +6,14 @@
 
 use rustc_ast as ast;
 use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_data_structures::stable_map::FxHashMap;
 use rustc_data_structures::svh::Svh;
 use rustc_hir as hir;
+use rustc_hir::def::DefKind;
 use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, CRATE_DEF_INDEX, LOCAL_CRATE};
 use rustc_hir::definitions::{DefKey, DefPath, DefPathHash};
 use rustc_middle::hir::exports::Export;
+use rustc_middle::middle::cstore::ForeignModule;
 use rustc_middle::middle::cstore::{CrateSource, CrateStore, EncodedMetadata};
 use rustc_middle::middle::exported_symbols::ExportedSymbol;
 use rustc_middle::middle::stability::DeprecationEntry;
@@ -89,11 +92,12 @@
     explicit_predicates_of => { cdata.get_explicit_predicates(def_id.index, tcx) }
     inferred_outlives_of => { cdata.get_inferred_outlives(def_id.index, tcx) }
     super_predicates_of => { cdata.get_super_predicates(def_id.index, tcx) }
+    explicit_item_bounds => { cdata.get_explicit_item_bounds(def_id.index, tcx) }
     trait_def => { cdata.get_trait_def(def_id.index, tcx.sess) }
     adt_def => { cdata.get_adt_def(def_id.index, tcx) }
     adt_destructor => {
         let _ = cdata;
-        tcx.calculate_dtor(def_id, &mut |_,_| Ok(()))
+        tcx.calculate_dtor(def_id, |_,_| Ok(()))
     }
     variances_of => { tcx.arena.alloc_from_iter(cdata.get_item_variances(def_id.index)) }
     associated_item_def_ids => {
@@ -218,10 +222,7 @@
     missing_lang_items => { cdata.get_missing_lang_items(tcx) }
 
     missing_extern_crate_item => {
-        let r = match *cdata.extern_crate.borrow() {
-            Some(extern_crate) if !extern_crate.is_direct() => true,
-            _ => false,
-        };
+        let r = matches!(*cdata.extern_crate.borrow(), Some(extern_crate) if !extern_crate.is_direct());
         r
     }
 
@@ -238,6 +239,7 @@
     }
 
     crate_extern_paths => { cdata.source().paths().cloned().collect() }
+    expn_that_defined => { cdata.get_expn_that_defined(def_id.index, tcx.sess) }
 }
 
 pub fn provide(providers: &mut Providers) {
@@ -251,9 +253,11 @@
             }
             _ => false,
         },
-        is_statically_included_foreign_item: |tcx, id| match tcx.native_library_kind(id) {
-            Some(NativeLibKind::StaticBundle | NativeLibKind::StaticNoBundle) => true,
-            _ => false,
+        is_statically_included_foreign_item: |tcx, id| {
+            matches!(
+                tcx.native_library_kind(id),
+                Some(NativeLibKind::StaticBundle | NativeLibKind::StaticNoBundle)
+            )
         },
         native_library_kind: |tcx, id| {
             tcx.native_libraries(id.krate)
@@ -264,9 +268,8 @@
                         Some(id) => id,
                         None => return false,
                     };
-                    tcx.foreign_modules(id.krate)
-                        .iter()
-                        .find(|m| m.def_id == fm_id)
+                    let map = tcx.foreign_modules(id.krate);
+                    map.get(&fm_id)
                         .expect("failed to find foreign module")
                         .foreign_items
                         .contains(&id)
@@ -279,7 +282,9 @@
         },
         foreign_modules: |tcx, cnum| {
             assert_eq!(cnum, LOCAL_CRATE);
-            &tcx.arena.alloc(foreign_modules::collect(tcx))[..]
+            let modules: FxHashMap<DefId, ForeignModule> =
+                foreign_modules::collect(tcx).into_iter().map(|m| (m.def_id, m)).collect();
+            Lrc::new(modules)
         },
         link_args: |tcx, cnum| {
             assert_eq!(cnum, LOCAL_CRATE);
@@ -485,6 +490,10 @@
         self.get_crate_data(def.krate).def_key(def.index)
     }
 
+    fn def_kind(&self, def: DefId) -> DefKind {
+        self.get_crate_data(def.krate).def_kind(def.index)
+    }
+
     fn def_path(&self, def: DefId) -> DefPath {
         self.get_crate_data(def.krate).def_path(def.index)
     }
diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs
index f58a792..a7cf107 100644
--- a/compiler/rustc_metadata/src/rmeta/encoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/encoder.rs
@@ -28,7 +28,6 @@
 use rustc_serialize::{opaque, Encodable, Encoder};
 use rustc_session::config::CrateType;
 use rustc_span::hygiene::{ExpnDataEncodeMode, HygieneEncodeContext};
-use rustc_span::source_map::Spanned;
 use rustc_span::symbol::{sym, Ident, Symbol};
 use rustc_span::{self, ExternalSource, FileName, SourceFile, Span, SyntaxContext};
 use rustc_target::abi::VariantIdx;
@@ -436,8 +435,7 @@
 
     fn encode_info_for_items(&mut self) {
         let krate = self.tcx.hir().krate();
-        let vis = Spanned { span: rustc_span::DUMMY_SP, node: hir::VisibilityKind::Public };
-        self.encode_info_for_mod(hir::CRATE_HIR_ID, &krate.item.module, &krate.item.attrs, &vis);
+        self.encode_info_for_mod(hir::CRATE_HIR_ID, &krate.item.module, &krate.item.attrs);
 
         // Proc-macro crates only export proc-macro items, which are looked
         // up using `proc_macro_data`
@@ -739,14 +737,11 @@
             is_non_exhaustive: variant.is_field_list_non_exhaustive(),
         };
 
-        let enum_id = tcx.hir().local_def_id_to_hir_id(def.did.expect_local());
-        let enum_vis = &tcx.hir().expect_item(enum_id).vis;
-
         record!(self.tables.kind[def_id] <- EntryKind::Variant(self.lazy(data)));
-        record!(self.tables.visibility[def_id] <-
-            ty::Visibility::from_hir(enum_vis, enum_id, self.tcx));
+        record!(self.tables.visibility[def_id] <- self.tcx.visibility(def_id));
         record!(self.tables.span[def_id] <- self.tcx.def_span(def_id));
         record!(self.tables.attributes[def_id] <- &self.tcx.get_attrs(def_id)[..]);
+        record!(self.tables.expn_that_defined[def_id] <- self.tcx.expansion_that_defined(def_id));
         record!(self.tables.children[def_id] <- variant.fields.iter().map(|f| {
             assert!(f.did.is_local());
             f.did.index
@@ -784,17 +779,8 @@
             is_non_exhaustive: variant.is_field_list_non_exhaustive(),
         };
 
-        // Variant constructors have the same visibility as the parent enums, unless marked as
-        // non-exhaustive, in which case they are lowered to `pub(crate)`.
-        let enum_id = tcx.hir().local_def_id_to_hir_id(def.did.expect_local());
-        let enum_vis = &tcx.hir().expect_item(enum_id).vis;
-        let mut ctor_vis = ty::Visibility::from_hir(enum_vis, enum_id, tcx);
-        if variant.is_field_list_non_exhaustive() && ctor_vis == ty::Visibility::Public {
-            ctor_vis = ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX));
-        }
-
         record!(self.tables.kind[def_id] <- EntryKind::Variant(self.lazy(data)));
-        record!(self.tables.visibility[def_id] <- ctor_vis);
+        record!(self.tables.visibility[def_id] <- self.tcx.visibility(def_id));
         record!(self.tables.span[def_id] <- self.tcx.def_span(def_id));
         self.encode_stability(def_id);
         self.encode_deprecation(def_id);
@@ -810,13 +796,7 @@
         self.encode_promoted_mir(def_id.expect_local());
     }
 
-    fn encode_info_for_mod(
-        &mut self,
-        id: hir::HirId,
-        md: &hir::Mod<'_>,
-        attrs: &[ast::Attribute],
-        vis: &hir::Visibility<'_>,
-    ) {
+    fn encode_info_for_mod(&mut self, id: hir::HirId, md: &hir::Mod<'_>, attrs: &[ast::Attribute]) {
         let tcx = self.tcx;
         let local_def_id = tcx.hir().local_def_id(id);
         let def_id = local_def_id.to_def_id();
@@ -849,7 +829,7 @@
         };
 
         record!(self.tables.kind[def_id] <- EntryKind::Mod(self.lazy(data)));
-        record!(self.tables.visibility[def_id] <- ty::Visibility::from_hir(vis, id, self.tcx));
+        record!(self.tables.visibility[def_id] <- self.tcx.visibility(def_id));
         record!(self.tables.span[def_id] <- self.tcx.def_span(def_id));
         record!(self.tables.attributes[def_id] <- attrs);
         if self.is_proc_macro {
@@ -880,9 +860,10 @@
         let variant_data = tcx.hir().expect_variant_data(variant_id);
 
         record!(self.tables.kind[def_id] <- EntryKind::Field);
-        record!(self.tables.visibility[def_id] <- field.vis);
+        record!(self.tables.visibility[def_id] <- self.tcx.visibility(def_id));
         record!(self.tables.span[def_id] <- self.tcx.def_span(def_id));
         record!(self.tables.attributes[def_id] <- variant_data.fields()[field_index].attrs);
+        record!(self.tables.expn_that_defined[def_id] <- self.tcx.expansion_that_defined(def_id));
         self.encode_ident_span(def_id, field.ident);
         self.encode_stability(def_id);
         self.encode_deprecation(def_id);
@@ -904,26 +885,10 @@
             is_non_exhaustive: variant.is_field_list_non_exhaustive(),
         };
 
-        let struct_id = tcx.hir().local_def_id_to_hir_id(adt_def.did.expect_local());
-        let struct_vis = &tcx.hir().expect_item(struct_id).vis;
-        let mut ctor_vis = ty::Visibility::from_hir(struct_vis, struct_id, tcx);
-        for field in &variant.fields {
-            if ctor_vis.is_at_least(field.vis, tcx) {
-                ctor_vis = field.vis;
-            }
-        }
-
-        // If the structure is marked as non_exhaustive then lower the visibility
-        // to within the crate.
-        if adt_def.non_enum_variant().is_field_list_non_exhaustive()
-            && ctor_vis == ty::Visibility::Public
-        {
-            ctor_vis = ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX));
-        }
-
         record!(self.tables.kind[def_id] <- EntryKind::Struct(self.lazy(data), adt_def.repr));
-        record!(self.tables.visibility[def_id] <- ctor_vis);
+        record!(self.tables.visibility[def_id] <- self.tcx.visibility(def_id));
         record!(self.tables.span[def_id] <- self.tcx.def_span(def_id));
+        record!(self.tables.expn_that_defined[def_id] <- self.tcx.expansion_that_defined(def_id));
         self.encode_stability(def_id);
         self.encode_deprecation(def_id);
         self.encode_item_type(def_id);
@@ -962,6 +927,14 @@
         record!(self.tables.super_predicates[def_id] <- self.tcx.super_predicates_of(def_id));
     }
 
+    fn encode_explicit_item_bounds(&mut self, def_id: DefId) {
+        debug!("EncodeContext::encode_explicit_item_bounds({:?})", def_id);
+        let bounds = self.tcx.explicit_item_bounds(def_id);
+        if !bounds.is_empty() {
+            record!(self.tables.explicit_item_bounds[def_id] <- bounds);
+        }
+    }
+
     fn encode_info_for_trait_item(&mut self, def_id: DefId) {
         debug!("EncodeContext::encode_info_for_trait_item({:?})", def_id);
         let tcx = self.tcx;
@@ -1014,9 +987,12 @@
                     has_self: trait_item.fn_has_self_parameter,
                 }))
             }
-            ty::AssocKind::Type => EntryKind::AssocType(container),
+            ty::AssocKind::Type => {
+                self.encode_explicit_item_bounds(def_id);
+                EntryKind::AssocType(container)
+            }
         });
-        record!(self.tables.visibility[def_id] <- trait_item.vis);
+        record!(self.tables.visibility[def_id] <- self.tcx.visibility(def_id));
         record!(self.tables.span[def_id] <- ast_item.span);
         record!(self.tables.attributes[def_id] <- ast_item.attrs);
         self.encode_ident_span(def_id, ast_item.ident);
@@ -1098,7 +1074,7 @@
             }
             ty::AssocKind::Type => EntryKind::AssocType(container)
         });
-        record!(self.tables.visibility[def_id] <- impl_item.vis);
+        record!(self.tables.visibility[def_id] <- self.tcx.visibility(def_id));
         record!(self.tables.span[def_id] <- ast_item.span);
         record!(self.tables.attributes[def_id] <- ast_item.attrs);
         self.encode_ident_span(def_id, impl_item.ident);
@@ -1247,12 +1223,15 @@
                 EntryKind::Fn(self.lazy(data))
             }
             hir::ItemKind::Mod(ref m) => {
-                return self.encode_info_for_mod(item.hir_id, m, &item.attrs, &item.vis);
+                return self.encode_info_for_mod(item.hir_id, m, &item.attrs);
             }
             hir::ItemKind::ForeignMod(_) => EntryKind::ForeignMod,
             hir::ItemKind::GlobalAsm(..) => EntryKind::GlobalAsm,
             hir::ItemKind::TyAlias(..) => EntryKind::Type,
-            hir::ItemKind::OpaqueTy(..) => EntryKind::OpaqueTy,
+            hir::ItemKind::OpaqueTy(..) => {
+                self.encode_explicit_item_bounds(def_id);
+                EntryKind::OpaqueTy
+            }
             hir::ItemKind::Enum(..) => EntryKind::Enum(self.tcx.adt_def(def_id).repr),
             hir::ItemKind::Struct(ref struct_def, _) => {
                 let adt_def = self.tcx.adt_def(def_id);
@@ -1335,10 +1314,10 @@
             hir::ItemKind::ExternCrate(_) |
             hir::ItemKind::Use(..) => bug!("cannot encode info for item {:?}", item),
         });
-        record!(self.tables.visibility[def_id] <-
-            ty::Visibility::from_hir(&item.vis, item.hir_id, tcx));
+        record!(self.tables.visibility[def_id] <- self.tcx.visibility(def_id));
         record!(self.tables.span[def_id] <- self.tcx.def_span(def_id));
         record!(self.tables.attributes[def_id] <- item.attrs);
+        record!(self.tables.expn_that_defined[def_id] <- self.tcx.expansion_that_defined(def_id));
         // FIXME(eddyb) there should be a nicer way to do this.
         match item.kind {
             hir::ItemKind::ForeignMod(ref fm) => record!(self.tables.children[def_id] <-
@@ -1452,7 +1431,7 @@
     fn encode_info_for_macro_def(&mut self, macro_def: &hir::MacroDef<'_>) {
         let def_id = self.tcx.hir().local_def_id(macro_def.hir_id).to_def_id();
         record!(self.tables.kind[def_id] <- EntryKind::MacroDef(self.lazy(macro_def.ast.clone())));
-        record!(self.tables.visibility[def_id] <- ty::Visibility::Public);
+        record!(self.tables.visibility[def_id] <- self.tcx.visibility(def_id));
         record!(self.tables.span[def_id] <- macro_def.span);
         record!(self.tables.attributes[def_id] <- macro_def.attrs);
         self.encode_ident_span(def_id, macro_def.ident);
@@ -1462,7 +1441,6 @@
 
     fn encode_info_for_generic_param(&mut self, def_id: DefId, kind: EntryKind, encode_type: bool) {
         record!(self.tables.kind[def_id] <- kind);
-        record!(self.tables.visibility[def_id] <- ty::Visibility::Public);
         record!(self.tables.span[def_id] <- self.tcx.def_span(def_id));
         if encode_type {
             self.encode_item_type(def_id);
@@ -1487,7 +1465,6 @@
 
             _ => bug!("closure that is neither generator nor closure"),
         });
-        record!(self.tables.visibility[def_id.to_def_id()] <- ty::Visibility::Public);
         record!(self.tables.span[def_id.to_def_id()] <- self.tcx.def_span(def_id));
         record!(self.tables.attributes[def_id.to_def_id()] <- &self.tcx.get_attrs(def_id.to_def_id())[..]);
         self.encode_item_type(def_id.to_def_id());
@@ -1507,7 +1484,6 @@
         let qualifs = self.tcx.mir_const_qualif(def_id);
 
         record!(self.tables.kind[def_id.to_def_id()] <- EntryKind::AnonConst(qualifs, const_data));
-        record!(self.tables.visibility[def_id.to_def_id()] <- ty::Visibility::Public);
         record!(self.tables.span[def_id.to_def_id()] <- self.tcx.def_span(def_id));
         self.encode_item_type(def_id.to_def_id());
         self.encode_generics(def_id.to_def_id());
@@ -1526,7 +1502,7 @@
     fn encode_foreign_modules(&mut self) -> Lazy<[ForeignModule]> {
         empty_proc_macro!(self);
         let foreign_modules = self.tcx.foreign_modules(LOCAL_CRATE);
-        self.lazy(foreign_modules.iter().cloned())
+        self.lazy(foreign_modules.iter().map(|(_, m)| m).cloned())
     }
 
     fn encode_hygiene(&mut self) -> (SyntaxContextTable, ExpnDataTable) {
@@ -1744,8 +1720,7 @@
             hir::ForeignItemKind::Static(_, hir::Mutability::Not) => EntryKind::ForeignImmStatic,
             hir::ForeignItemKind::Type => EntryKind::ForeignType,
         });
-        record!(self.tables.visibility[def_id] <-
-            ty::Visibility::from_hir(&nitem.vis, nitem.hir_id, self.tcx));
+        record!(self.tables.visibility[def_id] <- self.tcx.visibility(def_id));
         record!(self.tables.span[def_id] <- nitem.span);
         record!(self.tables.attributes[def_id] <- nitem.attrs);
         self.encode_ident_span(def_id, nitem.ident);
@@ -2067,6 +2042,10 @@
     encoder.emit_raw_bytes(&[0, 0, 0, 0]);
 
     let source_map_files = tcx.sess.source_map().files();
+    let source_file_cache = (source_map_files[0].clone(), 0);
+    let required_source_files = Some(GrowableBitSet::with_capacity(source_map_files.len()));
+    drop(source_map_files);
+
     let hygiene_ctxt = HygieneEncodeContext::default();
 
     let mut ecx = EncodeContext {
@@ -2077,13 +2056,12 @@
         lazy_state: LazyState::NoNode,
         type_shorthands: Default::default(),
         predicate_shorthands: Default::default(),
-        source_file_cache: (source_map_files[0].clone(), 0),
+        source_file_cache,
         interpret_allocs: Default::default(),
-        required_source_files: Some(GrowableBitSet::with_capacity(source_map_files.len())),
+        required_source_files,
         is_proc_macro: tcx.sess.crate_types().contains(&CrateType::ProcMacro),
         hygiene_ctxt: &hygiene_ctxt,
     };
-    drop(source_map_files);
 
     // Encode the rustc version string in a predictable location.
     rustc_version().encode(&mut ecx).unwrap();
diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs
index 1a12703..2bd2019 100644
--- a/compiler/rustc_metadata/src/rmeta/mod.rs
+++ b/compiler/rustc_metadata/src/rmeta/mod.rs
@@ -294,13 +294,12 @@
     variances: Table<DefIndex, Lazy<[ty::Variance]>>,
     generics: Table<DefIndex, Lazy<ty::Generics>>,
     explicit_predicates: Table<DefIndex, Lazy!(ty::GenericPredicates<'tcx>)>,
-    // FIXME(eddyb) this would ideally be `Lazy<[...]>` but `ty::Predicate`
-    // doesn't handle shorthands in its own (de)serialization impls,
-    // as it's an `enum` for which we want to derive (de)serialization,
-    // so the `ty::codec` APIs handle the whole `&'tcx [...]` at once.
-    // Also, as an optimization, a missing entry indicates an empty `&[]`.
-    inferred_outlives: Table<DefIndex, Lazy!(&'tcx [(ty::Predicate<'tcx>, Span)])>,
+    expn_that_defined: Table<DefIndex, Lazy<ExpnId>>,
+    // As an optimization, a missing entry indicates an empty `&[]`.
+    inferred_outlives: Table<DefIndex, Lazy!([(ty::Predicate<'tcx>, Span)])>,
     super_predicates: Table<DefIndex, Lazy!(ty::GenericPredicates<'tcx>)>,
+    // As an optimization, a missing entry indicates an empty `&[]`.
+    explicit_item_bounds: Table<DefIndex, Lazy!([(ty::Predicate<'tcx>, Span)])>,
     mir: Table<DefIndex, Lazy!(mir::Body<'tcx>)>,
     promoted_mir: Table<DefIndex, Lazy!(IndexVec<mir::Promoted, mir::Body<'tcx>>)>,
     mir_abstract_consts: Table<DefIndex, Lazy!(&'tcx [mir::abstract_const::Node<'tcx>])>,
diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml
index e8ace36..3250f18 100644
--- a/compiler/rustc_middle/Cargo.toml
+++ b/compiler/rustc_middle/Cargo.toml
@@ -26,7 +26,7 @@
 rustc_serialize = { path = "../rustc_serialize" }
 rustc_ast = { path = "../rustc_ast" }
 rustc_span = { path = "../rustc_span" }
-chalk-ir = "0.29.0"
+chalk-ir = "0.36.0"
 smallvec = { version = "1.0", features = ["union", "may_dangle"] }
-measureme = "0.7.1"
+measureme = "9.0.0"
 rustc_session = { path = "../rustc_session" }
diff --git a/compiler/rustc_middle/src/hir/map/collector.rs b/compiler/rustc_middle/src/hir/map/collector.rs
index d6869ab..516c9b6 100644
--- a/compiler/rustc_middle/src/hir/map/collector.rs
+++ b/compiler/rustc_middle/src/hir/map/collector.rs
@@ -360,8 +360,26 @@
     }
 
     fn visit_generic_param(&mut self, param: &'hir GenericParam<'hir>) {
-        self.insert(param.span, param.hir_id, Node::GenericParam(param));
-        intravisit::walk_generic_param(self, param);
+        if let hir::GenericParamKind::Type {
+            synthetic: Some(hir::SyntheticTyParamKind::ImplTrait),
+            ..
+        } = param.kind
+        {
+            debug_assert_eq!(
+                param.hir_id.owner,
+                self.definitions.opt_hir_id_to_local_def_id(param.hir_id).unwrap()
+            );
+            self.with_dep_node_owner(param.hir_id.owner, param, |this, hash| {
+                this.insert_with_hash(param.span, param.hir_id, Node::GenericParam(param), hash);
+
+                this.with_parent(param.hir_id, |this| {
+                    intravisit::walk_generic_param(this, param);
+                });
+            });
+        } else {
+            self.insert(param.span, param.hir_id, Node::GenericParam(param));
+            intravisit::walk_generic_param(self, param);
+        }
     }
 
     fn visit_trait_item(&mut self, ti: &'hir TraitItem<'hir>) {
diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs
index ceb873a..d86e898 100644
--- a/compiler/rustc_middle/src/hir/map/mod.rs
+++ b/compiler/rustc_middle/src/hir/map/mod.rs
@@ -478,7 +478,7 @@
     }
 
     pub fn get_if_local(&self, id: DefId) -> Option<Node<'hir>> {
-        id.as_local().map(|id| self.get(self.local_def_id_to_hir_id(id)))
+        id.as_local().and_then(|id| self.find(self.local_def_id_to_hir_id(id)))
     }
 
     pub fn get_generics(&self, id: DefId) -> Option<&'hir Generics<'hir>> {
@@ -535,15 +535,15 @@
             Some(Node::Binding(_)) => (),
             _ => return false,
         }
-        match self.find(self.get_parent_node(id)) {
+        matches!(
+            self.find(self.get_parent_node(id)),
             Some(
                 Node::Item(_)
                 | Node::TraitItem(_)
                 | Node::ImplItem(_)
                 | Node::Expr(Expr { kind: ExprKind::Closure(..), .. }),
-            ) => true,
-            _ => false,
-        }
+            )
+        )
     }
 
     /// Whether the expression pointed at by `hir_id` belongs to a `const` evaluation context.
@@ -554,10 +554,10 @@
 
     /// Whether `hir_id` corresponds to a `mod` or a crate.
     pub fn is_hir_id_module(&self, hir_id: HirId) -> bool {
-        match self.get_entry(hir_id).node {
-            Node::Item(Item { kind: ItemKind::Mod(_), .. }) | Node::Crate(..) => true,
-            _ => false,
-        }
+        matches!(
+            self.get_entry(hir_id).node,
+            Node::Item(Item { kind: ItemKind::Mod(_), .. }) | Node::Crate(..)
+        )
     }
 
     /// Retrieves the `HirId` for `id`'s enclosing method, unless there's a
@@ -816,7 +816,7 @@
             Some(Node::Variant(ref v)) => Some(&v.attrs[..]),
             Some(Node::Field(ref f)) => Some(&f.attrs[..]),
             Some(Node::Expr(ref e)) => Some(&*e.attrs),
-            Some(Node::Stmt(ref s)) => Some(s.kind.attrs()),
+            Some(Node::Stmt(ref s)) => Some(s.kind.attrs(|id| self.item(id.id))),
             Some(Node::Arm(ref a)) => Some(&*a.attrs),
             Some(Node::GenericParam(param)) => Some(&param.attrs[..]),
             // Unit/tuple structs/variants take the attributes straight from
diff --git a/compiler/rustc_middle/src/hir/place.rs b/compiler/rustc_middle/src/hir/place.rs
index bcb56fa..5da4be4 100644
--- a/compiler/rustc_middle/src/hir/place.rs
+++ b/compiler/rustc_middle/src/hir/place.rs
@@ -103,7 +103,7 @@
 
     /// Returns the type of this `Place` after all projections have been applied.
     pub fn ty(&self) -> Ty<'tcx> {
-        self.projections.last().map_or_else(|| self.base_ty, |proj| proj.ty)
+        self.projections.last().map_or(self.base_ty, |proj| proj.ty)
     }
 
     /// Returns the type of this `Place` immediately before `projection_index`th projection
diff --git a/compiler/rustc_middle/src/ich/impls_hir.rs b/compiler/rustc_middle/src/ich/impls_hir.rs
index c2d177b..d6c6cef 100644
--- a/compiler/rustc_middle/src/ich/impls_hir.rs
+++ b/compiler/rustc_middle/src/ich/impls_hir.rs
@@ -221,6 +221,12 @@
     }
 }
 
+impl<'hir> HashStable<StableHashingContext<'hir>> for attr::InstructionSetAttr {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'hir>, hasher: &mut StableHasher) {
+        mem::discriminant(self).hash_stable(hcx, hasher);
+    }
+}
+
 impl<'hir> HashStable<StableHashingContext<'hir>> for attr::OptimizeAttr {
     fn hash_stable(&self, hcx: &mut StableHashingContext<'hir>, hasher: &mut StableHasher) {
         mem::discriminant(self).hash_stable(hcx, hasher);
diff --git a/compiler/rustc_middle/src/ich/impls_syntax.rs b/compiler/rustc_middle/src/ich/impls_syntax.rs
index e3d4655..bfbe157 100644
--- a/compiler/rustc_middle/src/ich/impls_syntax.rs
+++ b/compiler/rustc_middle/src/ich/impls_syntax.rs
@@ -5,7 +5,7 @@
 
 use rustc_ast as ast;
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_span::SourceFile;
+use rustc_span::{BytePos, NormalizedPos, SourceFile};
 
 use smallvec::SmallVec;
 
@@ -41,10 +41,11 @@
         debug_assert!(!attr.is_doc_comment());
 
         let ast::Attribute { kind, id: _, style, span } = attr;
-        if let ast::AttrKind::Normal(item) = kind {
+        if let ast::AttrKind::Normal(item, tokens) = kind {
             item.hash_stable(self, hasher);
             style.hash_stable(self, hasher);
             span.hash_stable(self, hasher);
+            tokens.as_ref().expect_none("Tokens should have been removed during lowering!");
         } else {
             unreachable!();
         }
@@ -102,22 +103,19 @@
     }
 }
 
-fn stable_byte_pos(pos: ::rustc_span::BytePos, source_file_start: ::rustc_span::BytePos) -> u32 {
+fn stable_byte_pos(pos: BytePos, source_file_start: BytePos) -> u32 {
     pos.0 - source_file_start.0
 }
 
-fn stable_multibyte_char(
-    mbc: ::rustc_span::MultiByteChar,
-    source_file_start: ::rustc_span::BytePos,
-) -> (u32, u32) {
-    let ::rustc_span::MultiByteChar { pos, bytes } = mbc;
+fn stable_multibyte_char(mbc: rustc_span::MultiByteChar, source_file_start: BytePos) -> (u32, u32) {
+    let rustc_span::MultiByteChar { pos, bytes } = mbc;
 
     (pos.0 - source_file_start.0, bytes as u32)
 }
 
 fn stable_non_narrow_char(
-    swc: ::rustc_span::NonNarrowChar,
-    source_file_start: ::rustc_span::BytePos,
+    swc: rustc_span::NonNarrowChar,
+    source_file_start: BytePos,
 ) -> (u32, u32) {
     let pos = swc.pos();
     let width = swc.width();
@@ -125,11 +123,8 @@
     (pos.0 - source_file_start.0, width as u32)
 }
 
-fn stable_normalized_pos(
-    np: ::rustc_span::NormalizedPos,
-    source_file_start: ::rustc_span::BytePos,
-) -> (u32, u32) {
-    let ::rustc_span::NormalizedPos { pos, diff } = np;
+fn stable_normalized_pos(np: NormalizedPos, source_file_start: BytePos) -> (u32, u32) {
+    let NormalizedPos { pos, diff } = np;
 
     (pos.0 - source_file_start.0, diff)
 }
diff --git a/compiler/rustc_middle/src/infer/unify_key.rs b/compiler/rustc_middle/src/infer/unify_key.rs
index 4d884dd..16e9aaf 100644
--- a/compiler/rustc_middle/src/infer/unify_key.rs
+++ b/compiler/rustc_middle/src/infer/unify_key.rs
@@ -175,19 +175,15 @@
 impl<'tcx> UnifyValue for ConstVarValue<'tcx> {
     type Error = (&'tcx ty::Const<'tcx>, &'tcx ty::Const<'tcx>);
 
-    fn unify_values(value1: &Self, value2: &Self) -> Result<Self, Self::Error> {
-        let (val, span) = match (value1.val, value2.val) {
+    fn unify_values(&value1: &Self, &value2: &Self) -> Result<Self, Self::Error> {
+        Ok(match (value1.val, value2.val) {
             (ConstVariableValue::Known { .. }, ConstVariableValue::Known { .. }) => {
                 bug!("equating two const variables, both of which have known values")
             }
 
             // If one side is known, prefer that one.
-            (ConstVariableValue::Known { .. }, ConstVariableValue::Unknown { .. }) => {
-                (value1.val, value1.origin.span)
-            }
-            (ConstVariableValue::Unknown { .. }, ConstVariableValue::Known { .. }) => {
-                (value2.val, value2.origin.span)
-            }
+            (ConstVariableValue::Known { .. }, ConstVariableValue::Unknown { .. }) => value1,
+            (ConstVariableValue::Unknown { .. }, ConstVariableValue::Known { .. }) => value2,
 
             // If both sides are *unknown*, it hardly matters, does it?
             (
@@ -200,16 +196,11 @@
                 // universe is the minimum of the two universes, because that is
                 // the one which contains the fewest names in scope.
                 let universe = cmp::min(universe1, universe2);
-                (ConstVariableValue::Unknown { universe }, value1.origin.span)
+                ConstVarValue {
+                    val: ConstVariableValue::Unknown { universe },
+                    origin: value1.origin,
+                }
             }
-        };
-
-        Ok(ConstVarValue {
-            origin: ConstVariableOrigin {
-                kind: ConstVariableOriginKind::ConstInference,
-                span: span,
-            },
-            val,
         })
     }
 }
diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs
index fa885ce..4a1d545 100644
--- a/compiler/rustc_middle/src/lib.rs
+++ b/compiler/rustc_middle/src/lib.rs
@@ -24,6 +24,7 @@
 
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
 #![feature(array_windows)]
+#![feature(assoc_char_funcs)]
 #![feature(backtrace)]
 #![feature(bool_to_option)]
 #![feature(box_patterns)]
@@ -47,6 +48,9 @@
 #![feature(associated_type_bounds)]
 #![feature(rustc_attrs)]
 #![feature(int_error_matching)]
+#![feature(half_open_range_patterns)]
+#![feature(exclusive_range_pattern)]
+#![feature(control_flow_enum)]
 #![recursion_limit = "512"]
 
 #[macro_use]
diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs
index 25e5379..36ecd5b 100644
--- a/compiler/rustc_middle/src/lint.rs
+++ b/compiler/rustc_middle/src/lint.rs
@@ -22,7 +22,9 @@
     Node(Symbol, Span, Option<Symbol> /* RFC 2383 reason */),
 
     /// Lint level was set by a command-line flag.
-    CommandLine(Symbol),
+    /// The provided `Level` is the level specified on the command line -
+    /// the actual level may be lower due to `--cap-lints`
+    CommandLine(Symbol, Level),
 }
 
 pub type LevelSource = (Level, LintSource);
@@ -207,9 +209,24 @@
         span: Option<MultiSpan>,
         decorate: Box<dyn for<'b> FnOnce(LintDiagnosticBuilder<'b>) + 'd>,
     ) {
+        // Check for future incompatibility lints and issue a stronger warning.
+        let lint_id = LintId::of(lint);
+        let future_incompatible = lint.future_incompatible;
+
+        let has_future_breakage =
+            future_incompatible.map_or(false, |incompat| incompat.future_breakage.is_some());
+
         let mut err = match (level, span) {
-            (Level::Allow, _) => {
-                return;
+            (Level::Allow, span) => {
+                if has_future_breakage {
+                    if let Some(span) = span {
+                        sess.struct_span_allow(span, "")
+                    } else {
+                        sess.struct_allow("")
+                    }
+                } else {
+                    return;
+                }
             }
             (Level::Warn, Some(span)) => sess.struct_span_warn(span, ""),
             (Level::Warn, None) => sess.struct_warn(""),
@@ -217,10 +234,6 @@
             (Level::Deny | Level::Forbid, None) => sess.struct_err(""),
         };
 
-        // Check for future incompatibility lints and issue a stronger warning.
-        let lint_id = LintId::of(lint);
-        let future_incompatible = lint.future_incompatible;
-
         // If this code originates in a foreign macro, aka something that this crate
         // did not itself author, then it's likely that there's nothing this crate
         // can do about it. We probably want to skip the lint entirely.
@@ -250,12 +263,12 @@
                     &format!("`#[{}({})]` on by default", level.as_str(), name),
                 );
             }
-            LintSource::CommandLine(lint_flag_val) => {
-                let flag = match level {
+            LintSource::CommandLine(lint_flag_val, orig_level) => {
+                let flag = match orig_level {
                     Level::Warn => "-W",
                     Level::Deny => "-D",
                     Level::Forbid => "-F",
-                    Level::Allow => panic!(),
+                    Level::Allow => "-A",
                 };
                 let hyphen_case_lint_name = name.replace("_", "-");
                 if lint_flag_val.as_str() == name {
@@ -303,7 +316,7 @@
             }
         }
 
-        err.code(DiagnosticId::Lint(name));
+        err.code(DiagnosticId::Lint { name, has_future_breakage });
 
         if let Some(future_incompatible) = future_incompatible {
             const STANDARD_MESSAGE: &str = "this was previously accepted by the compiler but is being phased out; \
@@ -340,7 +353,9 @@
 pub fn in_external_macro(sess: &Session, span: Span) -> bool {
     let expn_data = span.ctxt().outer_expn_data();
     match expn_data.kind {
-        ExpnKind::Root | ExpnKind::Desugaring(DesugaringKind::ForLoop(_)) => false,
+        ExpnKind::Inlined | ExpnKind::Root | ExpnKind::Desugaring(DesugaringKind::ForLoop(_)) => {
+            false
+        }
         ExpnKind::AstPass(_) | ExpnKind::Desugaring(_) => true, // well, it's "external"
         ExpnKind::Macro(MacroKind::Bang, _) => {
             // Dummy span for the `def_site` means it's an external macro.
diff --git a/compiler/rustc_middle/src/macros.rs b/compiler/rustc_middle/src/macros.rs
index a5482b7..9210863 100644
--- a/compiler/rustc_middle/src/macros.rs
+++ b/compiler/rustc_middle/src/macros.rs
@@ -29,8 +29,8 @@
         $(
             impl<$tcx> $crate::ty::Lift<$tcx> for $ty {
                 type Lifted = Self;
-                fn lift_to_tcx(&self, _: $crate::ty::TyCtxt<$tcx>) -> Option<Self> {
-                    Some(Clone::clone(self))
+                fn lift_to_tcx(self, _: $crate::ty::TyCtxt<$tcx>) -> Option<Self> {
+                    Some(self)
                 }
             }
         )+
@@ -62,9 +62,9 @@
                 fn super_visit_with<F: $crate::ty::fold::TypeVisitor<$tcx>>(
                     &self,
                     _: &mut F)
-                    -> bool
+                    -> ::std::ops::ControlFlow<()>
                 {
-                    false
+                    ::std::ops::ControlFlow::CONTINUE
                 }
             }
         )+
@@ -105,7 +105,7 @@
             fn super_visit_with<V: $crate::ty::fold::TypeVisitor<$tcx>>(
                 &self,
                 visitor: &mut V,
-            ) -> bool {
+            ) -> ::std::ops::ControlFlow<()> {
                 EnumTypeFoldableImpl!(@VisitVariants(self, visitor) input($($variants)*) output())
             }
         }
@@ -179,9 +179,10 @@
                 input($($input)*)
                 output(
                     $variant ( $($variant_arg),* ) => {
-                        false $(|| $crate::ty::fold::TypeFoldable::visit_with(
+                        $($crate::ty::fold::TypeFoldable::visit_with(
                             $variant_arg, $visitor
-                        ))*
+                        )?;)*
+                        ::std::ops::ControlFlow::CONTINUE
                     }
                     $($output)*
                 )
@@ -196,9 +197,10 @@
                 input($($input)*)
                 output(
                     $variant { $($variant_arg),* } => {
-                        false $(|| $crate::ty::fold::TypeFoldable::visit_with(
+                        $($crate::ty::fold::TypeFoldable::visit_with(
                             $variant_arg, $visitor
-                        ))*
+                        )?;)*
+                        ::std::ops::ControlFlow::CONTINUE
                     }
                     $($output)*
                 )
@@ -212,7 +214,7 @@
             @VisitVariants($this, $visitor)
                 input($($input)*)
                 output(
-                    $variant => { false }
+                    $variant => { ::std::ops::ControlFlow::CONTINUE }
                     $($output)*
                 )
         )
diff --git a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
index d71cdc4..a4363bb 100644
--- a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
+++ b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
@@ -1,5 +1,5 @@
 use crate::mir::mono::Linkage;
-use rustc_attr::{InlineAttr, OptimizeAttr};
+use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr};
 use rustc_session::config::SanitizerSet;
 use rustc_span::symbol::Symbol;
 
@@ -34,6 +34,10 @@
     /// The `#[no_sanitize(...)]` attribute. Indicates sanitizers for which
     /// instrumentation should be disabled inside the annotated function.
     pub no_sanitize: SanitizerSet,
+    /// The `#[instruction_set(set)]` attribute. Indicates if the generated code should
+    /// be generated against a specific instruction set. Only usable on architectures which allow
+    /// switching between multiple instruction sets.
+    pub instruction_set: Option<InstructionSetAttr>,
 }
 
 bitflags! {
@@ -98,6 +102,7 @@
             linkage: None,
             link_section: None,
             no_sanitize: SanitizerSet::empty(),
+            instruction_set: None,
         }
     }
 
diff --git a/compiler/rustc_middle/src/middle/cstore.rs b/compiler/rustc_middle/src/middle/cstore.rs
index f3d7c85..ae9e4d3 100644
--- a/compiler/rustc_middle/src/middle/cstore.rs
+++ b/compiler/rustc_middle/src/middle/cstore.rs
@@ -8,6 +8,7 @@
 use rustc_ast::expand::allocator::AllocatorKind;
 use rustc_data_structures::svh::Svh;
 use rustc_data_structures::sync::{self, MetadataRef};
+use rustc_hir::def::DefKind;
 use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
 use rustc_hir::definitions::{DefKey, DefPath, DefPathHash};
 use rustc_macros::HashStable;
@@ -185,6 +186,7 @@
 
     // resolve
     fn def_key(&self, def: DefId) -> DefKey;
+    fn def_kind(&self, def: DefId) -> DefKind;
     fn def_path(&self, def: DefId) -> DefPath;
     fn def_path_hash(&self, def: DefId) -> DefPathHash;
     fn all_def_path_hashes_and_def_ids(&self, cnum: CrateNum) -> Vec<(DefPathHash, DefId)>;
diff --git a/compiler/rustc_middle/src/middle/limits.rs b/compiler/rustc_middle/src/middle/limits.rs
index def9e5e..4134276 100644
--- a/compiler/rustc_middle/src/middle/limits.rs
+++ b/compiler/rustc_middle/src/middle/limits.rs
@@ -48,10 +48,12 @@
                         .unwrap_or(attr.span);
 
                     let error_str = match e.kind() {
-                        IntErrorKind::Overflow => "`limit` is too large",
+                        IntErrorKind::PosOverflow => "`limit` is too large",
                         IntErrorKind::Empty => "`limit` must be a non-negative integer",
                         IntErrorKind::InvalidDigit => "not a valid integer",
-                        IntErrorKind::Underflow => bug!("`limit` should never underflow"),
+                        IntErrorKind::NegOverflow => {
+                            bug!("`limit` should never negatively overflow")
+                        }
                         IntErrorKind::Zero => bug!("zero is a valid `limit`"),
                         kind => bug!("unimplemented IntErrorKind variant: {:?}", kind),
                     };
diff --git a/compiler/rustc_middle/src/middle/privacy.rs b/compiler/rustc_middle/src/middle/privacy.rs
index 4756e83..254b57a 100644
--- a/compiler/rustc_middle/src/middle/privacy.rs
+++ b/compiler/rustc_middle/src/middle/privacy.rs
@@ -3,7 +3,6 @@
 //! which are available for use externally when compiled as a library.
 
 use rustc_data_structures::fx::FxHashMap;
-use rustc_hir::def_id::DefIdSet;
 use rustc_hir::HirId;
 use rustc_macros::HashStable;
 use std::fmt;
@@ -59,7 +58,3 @@
         fmt::Debug::fmt(&self.map, f)
     }
 }
-
-/// A set containing all exported definitions from external crates.
-/// The set does not contain any entries from local crates.
-pub type ExternalExports = DefIdSet;
diff --git a/compiler/rustc_middle/src/middle/region.rs b/compiler/rustc_middle/src/middle/region.rs
index 4c6ac82..d060549 100644
--- a/compiler/rustc_middle/src/middle/region.rs
+++ b/compiler/rustc_middle/src/middle/region.rs
@@ -283,25 +283,29 @@
     /// To see that this method works, consider:
     ///
     /// Let `D` be our binding/temporary and `U` be our other HIR node, with
-    /// `HIR-postorder(U) < HIR-postorder(D)` (in our example, U would be
-    /// the yield and D would be one of the calls). Let's show that
-    /// `D` is storage-dead at `U`.
+    /// `HIR-postorder(U) < HIR-postorder(D)`. Suppose, as in our example,
+    /// U is the yield and D is one of the calls.
+    /// Let's show that `D` is storage-dead at `U`.
     ///
     /// Remember that storage-live/storage-dead refers to the state of
     /// the *storage*, and does not consider moves/drop flags.
     ///
     /// Then:
-    ///     1. From the ordering guarantee of HIR visitors (see
-    ///     `rustc_hir::intravisit`), `D` does not dominate `U`.
-    ///     2. Therefore, `D` is *potentially* storage-dead at `U` (because
-    ///     we might visit `U` without ever getting to `D`).
-    ///     3. However, we guarantee that at each HIR point, each
-    ///     binding/temporary is always either always storage-live
-    ///     or always storage-dead. This is what is being guaranteed
-    ///     by `terminating_scopes` including all blocks where the
-    ///     count of executions is not guaranteed.
-    ///     4. By `2.` and `3.`, `D` is *statically* storage-dead at `U`,
-    ///     QED.
+    ///
+    ///   1. From the ordering guarantee of HIR visitors (see
+    ///   `rustc_hir::intravisit`), `D` does not dominate `U`.
+    ///
+    ///   2. Therefore, `D` is *potentially* storage-dead at `U` (because
+    ///   we might visit `U` without ever getting to `D`).
+    ///
+    ///   3. However, we guarantee that at each HIR point, each
+    ///   binding/temporary is always either always storage-live
+    ///   or always storage-dead. This is what is being guaranteed
+    ///   by `terminating_scopes` including all blocks where the
+    ///   count of executions is not guaranteed.
+    ///
+    ///   4. By `2.` and `3.`, `D` is *statically* storage-dead at `U`,
+    ///   QED.
     ///
     /// This property ought to not on (3) in an essential way -- it
     /// is probably still correct even if we have "unrestricted" terminating
diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs
index 7e2415f..978f089 100644
--- a/compiler/rustc_middle/src/middle/stability.rs
+++ b/compiler/rustc_middle/src/middle/stability.rs
@@ -256,24 +256,12 @@
 }
 
 // See issue #38412.
-fn skip_stability_check_due_to_privacy(tcx: TyCtxt<'_>, mut def_id: DefId) -> bool {
-    // Check if `def_id` is a trait method.
-    match tcx.def_kind(def_id) {
-        DefKind::AssocFn | DefKind::AssocTy | DefKind::AssocConst => {
-            if let ty::TraitContainer(trait_def_id) = tcx.associated_item(def_id).container {
-                // Trait methods do not declare visibility (even
-                // for visibility info in cstore). Use containing
-                // trait instead, so methods of `pub` traits are
-                // themselves considered `pub`.
-                def_id = trait_def_id;
-            }
-        }
-        _ => {}
+fn skip_stability_check_due_to_privacy(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    if tcx.def_kind(def_id) == DefKind::TyParam {
+        // Have no visibility, considered public for the purpose of this check.
+        return false;
     }
-
-    let visibility = tcx.visibility(def_id);
-
-    match visibility {
+    match tcx.visibility(def_id) {
         // Must check stability for `pub` items.
         ty::Visibility::Public => false,
 
@@ -411,7 +399,7 @@
         def_id: DefId,
         id: Option<HirId>,
         span: Span,
-        unmarked: impl FnOnce(Span, DefId) -> (),
+        unmarked: impl FnOnce(Span, DefId),
     ) {
         let soft_handler = |lint, span, msg: &_| {
             self.struct_span_lint_hir(lint, id.unwrap_or(hir::CRATE_HIR_ID), span, |lint| {
diff --git a/compiler/rustc_middle/src/mir/coverage.rs b/compiler/rustc_middle/src/mir/coverage.rs
new file mode 100644
index 0000000..6b46d7c
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/coverage.rs
@@ -0,0 +1,180 @@
+//! Metadata from source code coverage analysis and instrumentation.
+
+use rustc_macros::HashStable;
+use rustc_span::Symbol;
+
+use std::cmp::Ord;
+use std::fmt::{self, Debug, Formatter};
+
+rustc_index::newtype_index! {
+    /// An ExpressionOperandId value is assigned directly from either a
+    /// CounterValueReference.as_u32() (which ascend from 1) or an ExpressionOperandId.as_u32()
+    /// (which _*descend*_ from u32::MAX). Id value `0` (zero) represents a virtual counter with a
+    /// constant value of `0`.
+    pub struct ExpressionOperandId {
+        derive [HashStable]
+        DEBUG_FORMAT = "ExpressionOperandId({})",
+        MAX = 0xFFFF_FFFF,
+    }
+}
+
+impl ExpressionOperandId {
+    /// An expression operand for a "zero counter", as described in the following references:
+    ///
+    /// * <https://github.com/rust-lang/llvm-project/blob/llvmorg-8.0.0/llvm/docs/CoverageMappingFormat.rst#counter>
+    /// * <https://github.com/rust-lang/llvm-project/blob/llvmorg-8.0.0/llvm/docs/CoverageMappingFormat.rst#tag>
+    /// * <https://github.com/rust-lang/llvm-project/blob/llvmorg-8.0.0/llvm/docs/CoverageMappingFormat.rst#counter-expressions>
+    ///
+    /// This operand can be used to count two or more separate code regions with a single counter,
+    /// if they run sequentially with no branches, by injecting the `Counter` in a `BasicBlock` for
+    /// one of the code regions, and inserting `CounterExpression`s ("add ZERO to the counter") in
+    /// the coverage map for the other code regions.
+    pub const ZERO: Self = Self::from_u32(0);
+}
+
+rustc_index::newtype_index! {
+    pub struct CounterValueReference {
+        derive [HashStable]
+        DEBUG_FORMAT = "CounterValueReference({})",
+        MAX = 0xFFFF_FFFF,
+    }
+}
+
+impl CounterValueReference {
+    // Counters start at 1 to reserve 0 for ExpressionOperandId::ZERO.
+    pub const START: Self = Self::from_u32(1);
+}
+
+rustc_index::newtype_index! {
+    /// InjectedExpressionId.as_u32() converts to ExpressionOperandId.as_u32()
+    ///
+    /// Values descend from u32::MAX.
+    pub struct InjectedExpressionId {
+        derive [HashStable]
+        DEBUG_FORMAT = "InjectedExpressionId({})",
+        MAX = 0xFFFF_FFFF,
+    }
+}
+
+rustc_index::newtype_index! {
+    /// InjectedExpressionIndex.as_u32() translates to u32::MAX - ExpressionOperandId.as_u32()
+    ///
+    /// Values ascend from 0.
+    pub struct InjectedExpressionIndex {
+        derive [HashStable]
+        DEBUG_FORMAT = "InjectedExpressionIndex({})",
+        MAX = 0xFFFF_FFFF,
+    }
+}
+
+rustc_index::newtype_index! {
+    /// MappedExpressionIndex values ascend from zero, and are recalculated indexes based on their
+    /// array position in the LLVM coverage map "Expressions" array, which is assembled during the
+    /// "mapgen" process. They cannot be computed algorithmically, from the other `newtype_index`s.
+    pub struct MappedExpressionIndex {
+        derive [HashStable]
+        DEBUG_FORMAT = "MappedExpressionIndex({})",
+        MAX = 0xFFFF_FFFF,
+    }
+}
+
+impl From<CounterValueReference> for ExpressionOperandId {
+    #[inline]
+    fn from(v: CounterValueReference) -> ExpressionOperandId {
+        ExpressionOperandId::from(v.as_u32())
+    }
+}
+
+impl From<InjectedExpressionId> for ExpressionOperandId {
+    #[inline]
+    fn from(v: InjectedExpressionId) -> ExpressionOperandId {
+        ExpressionOperandId::from(v.as_u32())
+    }
+}
+
+#[derive(Clone, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub enum CoverageKind {
+    Counter {
+        function_source_hash: u64,
+        id: CounterValueReference,
+    },
+    Expression {
+        id: InjectedExpressionId,
+        lhs: ExpressionOperandId,
+        op: Op,
+        rhs: ExpressionOperandId,
+    },
+    Unreachable,
+}
+
+impl CoverageKind {
+    pub fn as_operand_id(&self) -> ExpressionOperandId {
+        use CoverageKind::*;
+        match *self {
+            Counter { id, .. } => ExpressionOperandId::from(id),
+            Expression { id, .. } => ExpressionOperandId::from(id),
+            Unreachable => bug!("Unreachable coverage cannot be part of an expression"),
+        }
+    }
+
+    pub fn is_counter(&self) -> bool {
+        match self {
+            Self::Counter { .. } => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_expression(&self) -> bool {
+        match self {
+            Self::Expression { .. } => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_unreachable(&self) -> bool {
+        *self == Self::Unreachable
+    }
+}
+
+impl Debug for CoverageKind {
+    fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+        use CoverageKind::*;
+        match self {
+            Counter { id, .. } => write!(fmt, "Counter({:?})", id.index()),
+            Expression { id, lhs, op, rhs } => write!(
+                fmt,
+                "Expression({:?}) = {} {} {}",
+                id.index(),
+                lhs.index(),
+                if *op == Op::Add { "+" } else { "-" },
+                rhs.index(),
+            ),
+            Unreachable => write!(fmt, "Unreachable"),
+        }
+    }
+}
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, PartialEq, Eq, PartialOrd, Ord)]
+pub struct CodeRegion {
+    pub file_name: Symbol,
+    pub start_line: u32,
+    pub start_col: u32,
+    pub end_line: u32,
+    pub end_col: u32,
+}
+
+impl Debug for CodeRegion {
+    fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+        write!(
+            fmt,
+            "{}:{}:{} - {}:{}",
+            self.file_name, self.start_line, self.start_col, self.end_line, self.end_col
+        )
+    }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub enum Op {
+    Subtract,
+    Add,
+}
diff --git a/compiler/rustc_middle/src/mir/coverage/mod.rs b/compiler/rustc_middle/src/mir/coverage/mod.rs
deleted file mode 100644
index ce311c2..0000000
--- a/compiler/rustc_middle/src/mir/coverage/mod.rs
+++ /dev/null
@@ -1,105 +0,0 @@
-//! Metadata from source code coverage analysis and instrumentation.
-
-use rustc_macros::HashStable;
-use rustc_span::Symbol;
-
-use std::cmp::Ord;
-use std::fmt::{self, Debug, Formatter};
-
-rustc_index::newtype_index! {
-    pub struct ExpressionOperandId {
-        derive [HashStable]
-        DEBUG_FORMAT = "ExpressionOperandId({})",
-        MAX = 0xFFFF_FFFF,
-    }
-}
-
-rustc_index::newtype_index! {
-    pub struct CounterValueReference {
-        derive [HashStable]
-        DEBUG_FORMAT = "CounterValueReference({})",
-        MAX = 0xFFFF_FFFF,
-    }
-}
-
-rustc_index::newtype_index! {
-    pub struct InjectedExpressionIndex {
-        derive [HashStable]
-        DEBUG_FORMAT = "InjectedExpressionIndex({})",
-        MAX = 0xFFFF_FFFF,
-    }
-}
-
-rustc_index::newtype_index! {
-    pub struct MappedExpressionIndex {
-        derive [HashStable]
-        DEBUG_FORMAT = "MappedExpressionIndex({})",
-        MAX = 0xFFFF_FFFF,
-    }
-}
-
-impl From<CounterValueReference> for ExpressionOperandId {
-    #[inline]
-    fn from(v: CounterValueReference) -> ExpressionOperandId {
-        ExpressionOperandId::from(v.as_u32())
-    }
-}
-
-impl From<InjectedExpressionIndex> for ExpressionOperandId {
-    #[inline]
-    fn from(v: InjectedExpressionIndex) -> ExpressionOperandId {
-        ExpressionOperandId::from(v.as_u32())
-    }
-}
-
-#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
-pub enum CoverageKind {
-    Counter {
-        function_source_hash: u64,
-        id: CounterValueReference,
-    },
-    Expression {
-        id: InjectedExpressionIndex,
-        lhs: ExpressionOperandId,
-        op: Op,
-        rhs: ExpressionOperandId,
-    },
-    Unreachable,
-}
-
-impl CoverageKind {
-    pub fn as_operand_id(&self) -> ExpressionOperandId {
-        match *self {
-            CoverageKind::Counter { id, .. } => ExpressionOperandId::from(id),
-            CoverageKind::Expression { id, .. } => ExpressionOperandId::from(id),
-            CoverageKind::Unreachable => {
-                bug!("Unreachable coverage cannot be part of an expression")
-            }
-        }
-    }
-}
-
-#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, PartialEq, Eq, PartialOrd, Ord)]
-pub struct CodeRegion {
-    pub file_name: Symbol,
-    pub start_line: u32,
-    pub start_col: u32,
-    pub end_line: u32,
-    pub end_col: u32,
-}
-
-impl Debug for CodeRegion {
-    fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
-        write!(
-            fmt,
-            "{}:{}:{} - {}:{}",
-            self.file_name, self.start_line, self.start_col, self.end_line, self.end_col
-        )
-    }
-}
-
-#[derive(Copy, Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
-pub enum Op {
-    Subtract,
-    Add,
-}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
index ee1ea81..5ebe38b 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -40,7 +40,7 @@
     pub extra: Extra,
 }
 
-pub trait AllocationExtra<Tag>: ::std::fmt::Debug + Clone {
+pub trait AllocationExtra<Tag>: std::fmt::Debug + Clone {
     // There is no constructor in here because the constructor's type depends
     // on `MemoryKind`, and making things sufficiently generic leads to painful
     // inference failure.
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
index d41e568..e35ff6b 100644
--- a/compiler/rustc_middle/src/mir/interpret/error.rs
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -81,6 +81,12 @@
     }
 }
 
+impl From<ErrorReported> for InterpErrorInfo<'_> {
+    fn from(err: ErrorReported) -> Self {
+        InterpError::InvalidProgram(InvalidProgramInfo::AlreadyReported(err)).into()
+    }
+}
+
 impl<'tcx> From<InterpError<'tcx>> for InterpErrorInfo<'tcx> {
     fn from(kind: InterpError<'tcx>) -> Self {
         let capture_backtrace = tls::with_opt(|tcx| {
@@ -115,8 +121,8 @@
     /// Cannot compute this constant because it depends on another one
     /// which already produced an error.
     ReferencedConstant,
-    /// Abort in case type errors are reached.
-    TypeckError(ErrorReported),
+    /// Abort in case errors are already reported.
+    AlreadyReported(ErrorReported),
     /// An error occurred during layout computation.
     Layout(layout::LayoutError<'tcx>),
     /// An invalid transmute happened.
@@ -129,7 +135,7 @@
         match self {
             TooGeneric => write!(f, "encountered overly generic constant"),
             ReferencedConstant => write!(f, "referenced constant has errors"),
-            TypeckError(ErrorReported) => {
+            AlreadyReported(ErrorReported) => {
                 write!(f, "encountered constants with type errors, stopping evaluation")
             }
             Layout(ref err) => write!(f, "{}", err),
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
index 2036362..bcf8579 100644
--- a/compiler/rustc_middle/src/mir/interpret/mod.rs
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -110,7 +110,7 @@
 use rustc_macros::HashStable;
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_serialize::{Decodable, Encodable};
-use rustc_target::abi::{Endian, Size};
+use rustc_target::abi::Endian;
 
 use crate::mir;
 use crate::ty::codec::{TyDecoder, TyEncoder};
@@ -486,10 +486,10 @@
         // `main as fn() == main as fn()` is false, while `let x = main as fn(); x == x` is true.
         // However, formatting code relies on function identity (see #58320), so we only do
         // this for generic functions.  Lifetime parameters are ignored.
-        let is_generic = instance.substs.into_iter().any(|kind| match kind.unpack() {
-            GenericArgKind::Lifetime(_) => false,
-            _ => true,
-        });
+        let is_generic = instance
+            .substs
+            .into_iter()
+            .any(|kind| !matches!(kind.unpack(), GenericArgKind::Lifetime(_)));
         if is_generic {
             // Get a fresh ID.
             let mut alloc_map = self.alloc_map.lock();
@@ -590,39 +590,6 @@
     uint
 }
 
-////////////////////////////////////////////////////////////////////////////////
-// Methods to facilitate working with signed integers stored in a u128
-////////////////////////////////////////////////////////////////////////////////
-
-/// Truncates `value` to `size` bits and then sign-extend it to 128 bits
-/// (i.e., if it is negative, fill with 1's on the left).
-#[inline]
-pub fn sign_extend(value: u128, size: Size) -> u128 {
-    let size = size.bits();
-    if size == 0 {
-        // Truncated until nothing is left.
-        return 0;
-    }
-    // Sign-extend it.
-    let shift = 128 - size;
-    // Shift the unsigned value to the left, then shift back to the right as signed
-    // (essentially fills with FF on the left).
-    (((value << shift) as i128) >> shift) as u128
-}
-
-/// Truncates `value` to `size` bits.
-#[inline]
-pub fn truncate(value: u128, size: Size) -> u128 {
-    let size = size.bits();
-    if size == 0 {
-        // Truncated until nothing is left.
-        return 0;
-    }
-    let shift = 128 - size;
-    // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
-    (value << shift) >> shift
-}
-
 /// Computes the unsigned absolute value without wrapping or panicking.
 #[inline]
 pub fn uabs(value: i64) -> u64 {
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
index 206f01c..5e97862 100644
--- a/compiler/rustc_middle/src/mir/interpret/value.rs
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -8,9 +8,9 @@
 use rustc_macros::HashStable;
 use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout};
 
-use crate::ty::{ParamEnv, Ty, TyCtxt};
+use crate::ty::{ParamEnv, ScalarInt, Ty, TyCtxt};
 
-use super::{sign_extend, truncate, AllocId, Allocation, InterpResult, Pointer, PointerArithmetic};
+use super::{AllocId, Allocation, InterpResult, Pointer, PointerArithmetic};
 
 /// Represents the result of const evaluation via the `eval_to_allocation` query.
 #[derive(Clone, HashStable, TyEncodable, TyDecodable)]
@@ -56,15 +56,6 @@
         }
     }
 
-    pub fn try_to_str_slice(&self) -> Option<&'tcx str> {
-        if let ConstValue::Slice { data, start, end } = *self {
-            ::std::str::from_utf8(data.inspect_with_uninit_and_ptr_outside_interpreter(start..end))
-                .ok()
-        } else {
-            None
-        }
-    }
-
     pub fn try_to_bits(&self, size: Size) -> Option<u128> {
         self.try_to_scalar()?.to_bits(size).ok()
     }
@@ -112,12 +103,7 @@
 #[derive(HashStable)]
 pub enum Scalar<Tag = ()> {
     /// The raw bytes of a simple value.
-    Raw {
-        /// The first `size` bytes of `data` are the value.
-        /// Do not try to read less or more bytes than that. The remaining bytes must be 0.
-        data: u128,
-        size: u8,
-    },
+    Int(ScalarInt),
 
     /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
     /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the
@@ -134,16 +120,7 @@
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         match self {
             Scalar::Ptr(ptr) => write!(f, "{:?}", ptr),
-            &Scalar::Raw { data, size } => {
-                Scalar::check_data(data, size);
-                if size == 0 {
-                    write!(f, "<ZST>")
-                } else {
-                    // Format as hex number wide enough to fit any value of the given `size`.
-                    // So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
-                    write!(f, "0x{:>0width$x}", data, width = (size * 2) as usize)
-                }
-            }
+            Scalar::Int(int) => write!(f, "{:?}", int),
         }
     }
 }
@@ -152,7 +129,7 @@
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         match self {
             Scalar::Ptr(ptr) => write!(f, "pointer to {}", ptr),
-            Scalar::Raw { .. } => fmt::Debug::fmt(self, f),
+            Scalar::Int { .. } => fmt::Debug::fmt(self, f),
         }
     }
 }
@@ -172,21 +149,6 @@
 }
 
 impl Scalar<()> {
-    /// Make sure the `data` fits in `size`.
-    /// This is guaranteed by all constructors here, but since the enum variants are public,
-    /// it could still be violated (even though no code outside this file should
-    /// construct `Scalar`s).
-    #[inline(always)]
-    fn check_data(data: u128, size: u8) {
-        debug_assert_eq!(
-            truncate(data, Size::from_bytes(u64::from(size))),
-            data,
-            "Scalar value {:#x} exceeds size of {} bytes",
-            data,
-            size
-        );
-    }
-
     /// Tag this scalar with `new_tag` if it is a pointer, leave it unchanged otherwise.
     ///
     /// Used by `MemPlace::replace_tag`.
@@ -194,12 +156,14 @@
     pub fn with_tag<Tag>(self, new_tag: Tag) -> Scalar<Tag> {
         match self {
             Scalar::Ptr(ptr) => Scalar::Ptr(ptr.with_tag(new_tag)),
-            Scalar::Raw { data, size } => Scalar::Raw { data, size },
+            Scalar::Int(int) => Scalar::Int(int),
         }
     }
 }
 
 impl<'tcx, Tag> Scalar<Tag> {
+    pub const ZST: Self = Scalar::Int(ScalarInt::ZST);
+
     /// Erase the tag from the scalar, if any.
     ///
     /// Used by error reporting code to avoid having the error type depend on `Tag`.
@@ -207,18 +171,13 @@
     pub fn erase_tag(self) -> Scalar {
         match self {
             Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_tag()),
-            Scalar::Raw { data, size } => Scalar::Raw { data, size },
+            Scalar::Int(int) => Scalar::Int(int),
         }
     }
 
     #[inline]
     pub fn null_ptr(cx: &impl HasDataLayout) -> Self {
-        Scalar::Raw { data: 0, size: cx.data_layout().pointer_size.bytes() as u8 }
-    }
-
-    #[inline]
-    pub fn zst() -> Self {
-        Scalar::Raw { data: 0, size: 0 }
+        Scalar::Int(ScalarInt::null(cx.data_layout().pointer_size))
     }
 
     #[inline(always)]
@@ -229,10 +188,7 @@
         f_ptr: impl FnOnce(Pointer<Tag>) -> InterpResult<'tcx, Pointer<Tag>>,
     ) -> InterpResult<'tcx, Self> {
         match self {
-            Scalar::Raw { data, size } => {
-                assert_eq!(u64::from(size), dl.pointer_size.bytes());
-                Ok(Scalar::Raw { data: u128::from(f_int(u64::try_from(data).unwrap())?), size })
-            }
+            Scalar::Int(int) => Ok(Scalar::Int(int.ptr_sized_op(dl, f_int)?)),
             Scalar::Ptr(ptr) => Ok(Scalar::Ptr(f_ptr(ptr)?)),
         }
     }
@@ -273,24 +229,17 @@
 
     #[inline]
     pub fn from_bool(b: bool) -> Self {
-        // Guaranteed to be truncated and does not need sign extension.
-        Scalar::Raw { data: b as u128, size: 1 }
+        Scalar::Int(b.into())
     }
 
     #[inline]
     pub fn from_char(c: char) -> Self {
-        // Guaranteed to be truncated and does not need sign extension.
-        Scalar::Raw { data: c as u128, size: 4 }
+        Scalar::Int(c.into())
     }
 
     #[inline]
     pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
-        let i = i.into();
-        if truncate(i, size) == i {
-            Some(Scalar::Raw { data: i, size: size.bytes() as u8 })
-        } else {
-            None
-        }
+        ScalarInt::try_from_uint(i, size).map(Scalar::Int)
     }
 
     #[inline]
@@ -302,26 +251,22 @@
 
     #[inline]
     pub fn from_u8(i: u8) -> Self {
-        // Guaranteed to be truncated and does not need sign extension.
-        Scalar::Raw { data: i.into(), size: 1 }
+        Scalar::Int(i.into())
     }
 
     #[inline]
     pub fn from_u16(i: u16) -> Self {
-        // Guaranteed to be truncated and does not need sign extension.
-        Scalar::Raw { data: i.into(), size: 2 }
+        Scalar::Int(i.into())
     }
 
     #[inline]
     pub fn from_u32(i: u32) -> Self {
-        // Guaranteed to be truncated and does not need sign extension.
-        Scalar::Raw { data: i.into(), size: 4 }
+        Scalar::Int(i.into())
     }
 
     #[inline]
     pub fn from_u64(i: u64) -> Self {
-        // Guaranteed to be truncated and does not need sign extension.
-        Scalar::Raw { data: i.into(), size: 8 }
+        Scalar::Int(i.into())
     }
 
     #[inline]
@@ -331,14 +276,7 @@
 
     #[inline]
     pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
-        let i = i.into();
-        // `into` performed sign extension, we have to truncate
-        let truncated = truncate(i as u128, size);
-        if sign_extend(truncated, size) as i128 == i {
-            Some(Scalar::Raw { data: truncated, size: size.bytes() as u8 })
-        } else {
-            None
-        }
+        ScalarInt::try_from_int(i, size).map(Scalar::Int)
     }
 
     #[inline]
@@ -375,14 +313,12 @@
 
     #[inline]
     pub fn from_f32(f: Single) -> Self {
-        // We trust apfloat to give us properly truncated data.
-        Scalar::Raw { data: f.to_bits(), size: 4 }
+        Scalar::Int(f.into())
     }
 
     #[inline]
     pub fn from_f64(f: Double) -> Self {
-        // We trust apfloat to give us properly truncated data.
-        Scalar::Raw { data: f.to_bits(), size: 8 }
+        Scalar::Int(f.into())
     }
 
     /// This is very rarely the method you want!  You should dispatch on the type
@@ -397,11 +333,7 @@
     ) -> Result<u128, Pointer<Tag>> {
         assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
         match self {
-            Scalar::Raw { data, size } => {
-                assert_eq!(target_size.bytes(), u64::from(size));
-                Scalar::check_data(data, size);
-                Ok(data)
-            }
+            Scalar::Int(int) => Ok(int.assert_bits(target_size)),
             Scalar::Ptr(ptr) => {
                 assert_eq!(target_size, cx.data_layout().pointer_size);
                 Err(ptr)
@@ -415,16 +347,13 @@
     fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
         assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
         match self {
-            Scalar::Raw { data, size } => {
-                if target_size.bytes() != u64::from(size) {
-                    throw_ub!(ScalarSizeMismatch {
-                        target_size: target_size.bytes(),
-                        data_size: u64::from(size),
-                    });
-                }
-                Scalar::check_data(data, size);
-                Ok(data)
-            }
+            Scalar::Int(int) => int.to_bits(target_size).map_err(|size| {
+                err_ub!(ScalarSizeMismatch {
+                    target_size: target_size.bytes(),
+                    data_size: size.bytes(),
+                })
+                .into()
+            }),
             Scalar::Ptr(_) => throw_unsup!(ReadPointerAsBytes),
         }
     }
@@ -435,29 +364,31 @@
     }
 
     #[inline]
+    pub fn assert_int(self) -> ScalarInt {
+        match self {
+            Scalar::Ptr(_) => bug!("expected an int but got an abstract pointer"),
+            Scalar::Int(int) => int,
+        }
+    }
+
+    #[inline]
     pub fn assert_ptr(self) -> Pointer<Tag> {
         match self {
             Scalar::Ptr(p) => p,
-            Scalar::Raw { .. } => bug!("expected a Pointer but got Raw bits"),
+            Scalar::Int { .. } => bug!("expected a Pointer but got Raw bits"),
         }
     }
 
     /// Do not call this method!  Dispatch based on the type instead.
     #[inline]
     pub fn is_bits(self) -> bool {
-        match self {
-            Scalar::Raw { .. } => true,
-            _ => false,
-        }
+        matches!(self, Scalar::Int { .. })
     }
 
     /// Do not call this method!  Dispatch based on the type instead.
     #[inline]
     pub fn is_ptr(self) -> bool {
-        match self {
-            Scalar::Ptr(_) => true,
-            _ => false,
-        }
+        matches!(self, Scalar::Ptr(_))
     }
 
     pub fn to_bool(self) -> InterpResult<'tcx, bool> {
@@ -471,7 +402,7 @@
 
     pub fn to_char(self) -> InterpResult<'tcx, char> {
         let val = self.to_u32()?;
-        match ::std::char::from_u32(val) {
+        match std::char::from_u32(val) {
             Some(c) => Ok(c),
             None => throw_ub!(InvalidChar(val)),
         }
@@ -517,7 +448,7 @@
     fn to_signed_with_bit_width(self, bits: u64) -> InterpResult<'static, i128> {
         let sz = Size::from_bits(bits);
         let b = self.to_bits(sz)?;
-        Ok(sign_extend(b, sz) as i128)
+        Ok(sz.sign_extend(b) as i128)
     }
 
     /// Converts the scalar to produce an `i8`. Fails if the scalar is a pointer.
@@ -548,7 +479,7 @@
     pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'static, i64> {
         let sz = cx.data_layout().pointer_size;
         let b = self.to_bits(sz)?;
-        let b = sign_extend(b, sz) as i128;
+        let b = sz.sign_extend(b) as i128;
         Ok(i64::try_from(b).unwrap())
     }
 
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
index fee24f0..5fe7b0f 100644
--- a/compiler/rustc_middle/src/mir/mod.rs
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -3,19 +3,18 @@
 //! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html
 
 use crate::mir::coverage::{CodeRegion, CoverageKind};
-use crate::mir::interpret::{Allocation, ConstValue, GlobalAlloc, Scalar};
+use crate::mir::interpret::{Allocation, GlobalAlloc, Scalar};
 use crate::mir::visit::MirVisitable;
 use crate::ty::adjustment::PointerCast;
 use crate::ty::codec::{TyDecoder, TyEncoder};
 use crate::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
 use crate::ty::print::{FmtPrinter, Printer};
 use crate::ty::subst::{Subst, SubstsRef};
-use crate::ty::{
-    self, AdtDef, CanonicalUserTypeAnnotations, List, Region, Ty, TyCtxt, UserTypeAnnotationIndex,
-};
+use crate::ty::{self, List, Ty, TyCtxt};
+use crate::ty::{AdtDef, InstanceDef, Region, UserTypeAnnotationIndex};
 use rustc_hir as hir;
 use rustc_hir::def::{CtorKind, Namespace};
-use rustc_hir::def_id::DefId;
+use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX};
 use rustc_hir::{self, GeneratorKind};
 use rustc_target::abi::VariantIdx;
 
@@ -29,11 +28,10 @@
 use rustc_serialize::{Decodable, Encodable};
 use rustc_span::symbol::Symbol;
 use rustc_span::{Span, DUMMY_SP};
-use rustc_target::abi;
 use rustc_target::asm::InlineAsmRegOrRegClass;
 use std::borrow::Cow;
 use std::fmt::{self, Debug, Display, Formatter, Write};
-use std::ops::{Index, IndexMut};
+use std::ops::{ControlFlow, Index, IndexMut};
 use std::slice;
 use std::{iter, mem, option};
 
@@ -112,10 +110,42 @@
     }
 }
 
+/// Where a specific `mir::Body` comes from.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(HashStable, TyEncodable, TyDecodable, TypeFoldable)]
+pub struct MirSource<'tcx> {
+    pub instance: InstanceDef<'tcx>,
+
+    /// If `Some`, this is a promoted rvalue within the parent function.
+    pub promoted: Option<Promoted>,
+}
+
+impl<'tcx> MirSource<'tcx> {
+    pub fn item(def_id: DefId) -> Self {
+        MirSource {
+            instance: InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)),
+            promoted: None,
+        }
+    }
+
+    pub fn from_instance(instance: InstanceDef<'tcx>) -> Self {
+        MirSource { instance, promoted: None }
+    }
+
+    pub fn with_opt_param(self) -> ty::WithOptConstParam<DefId> {
+        self.instance.with_opt_param()
+    }
+
+    #[inline]
+    pub fn def_id(&self) -> DefId {
+        self.instance.def_id()
+    }
+}
+
 /// The lowered representation of a single function.
 #[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable)]
 pub struct Body<'tcx> {
-    /// A list of basic blocks. References to basic block use a newtyped index type `BasicBlock`
+    /// A list of basic blocks. References to basic block use a newtyped index type [`BasicBlock`]
     /// that indexes into this vector.
     basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
 
@@ -126,9 +156,11 @@
     /// us to see the difference and forego optimization on the inlined promoted items.
     pub phase: MirPhase,
 
+    pub source: MirSource<'tcx>,
+
     /// A list of source scopes; these are referenced by statements
     /// and used for debuginfo. Indexed by a `SourceScope`.
-    pub source_scopes: IndexVec<SourceScope, SourceScopeData>,
+    pub source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
 
     /// The yield type of the function, if it is a generator.
     pub yield_ty: Option<Ty<'tcx>>,
@@ -151,7 +183,7 @@
     pub local_decls: LocalDecls<'tcx>,
 
     /// User type annotations.
-    pub user_type_annotations: CanonicalUserTypeAnnotations<'tcx>,
+    pub user_type_annotations: ty::CanonicalUserTypeAnnotations<'tcx>,
 
     /// The number of arguments this function takes.
     ///
@@ -177,16 +209,6 @@
     /// We hold in this field all the constants we are not able to evaluate yet.
     pub required_consts: Vec<Constant<'tcx>>,
 
-    /// The user may be writing e.g. `&[(SOME_CELL, 42)][i].1` and this would get promoted, because
-    /// we'd statically know that no thing with interior mutability will ever be available to the
-    /// user without some serious unsafe code.  Now this means that our promoted is actually
-    /// `&[(SOME_CELL, 42)]` and the MIR using it will do the `&promoted[i].1` projection because
-    /// the index may be a runtime value. Such a promoted value is illegal because it has reachable
-    /// interior mutability. This flag just makes this situation very obvious where the previous
-    /// implementation without the flag hid this situation silently.
-    /// FIXME(oli-obk): rewrite the promoted during promotion to eliminate the cell components.
-    pub ignore_interior_mut_in_const_validation: bool,
-
     /// Does this body use generic parameters. This is used for the `ConstEvaluatable` check.
     ///
     /// Note that this does not actually mean that this body is not computable right now.
@@ -209,10 +231,11 @@
 
 impl<'tcx> Body<'tcx> {
     pub fn new(
+        source: MirSource<'tcx>,
         basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
-        source_scopes: IndexVec<SourceScope, SourceScopeData>,
+        source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
         local_decls: LocalDecls<'tcx>,
-        user_type_annotations: CanonicalUserTypeAnnotations<'tcx>,
+        user_type_annotations: ty::CanonicalUserTypeAnnotations<'tcx>,
         arg_count: usize,
         var_debug_info: Vec<VarDebugInfo<'tcx>>,
         span: Span,
@@ -228,6 +251,7 @@
 
         let mut body = Body {
             phase: MirPhase::Build,
+            source,
             basic_blocks,
             source_scopes,
             yield_ty: None,
@@ -241,7 +265,6 @@
             var_debug_info,
             span,
             required_consts: Vec::new(),
-            ignore_interior_mut_in_const_validation: false,
             is_polymorphic: false,
             predecessor_cache: PredecessorCache::new(),
         };
@@ -257,6 +280,7 @@
     pub fn new_cfg_only(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self {
         let mut body = Body {
             phase: MirPhase::Build,
+            source: MirSource::item(DefId::local(CRATE_DEF_INDEX)),
             basic_blocks,
             source_scopes: IndexVec::new(),
             yield_ty: None,
@@ -270,7 +294,6 @@
             required_consts: Vec::new(),
             generator_kind: None,
             var_debug_info: Vec::new(),
-            ignore_interior_mut_in_const_validation: false,
             is_polymorphic: false,
             predecessor_cache: PredecessorCache::new(),
         };
@@ -424,17 +447,6 @@
         }
     }
 
-    /// Checks if `sub` is a sub scope of `sup`
-    pub fn is_sub_scope(&self, mut sub: SourceScope, sup: SourceScope) -> bool {
-        while sub != sup {
-            match self.source_scopes[sub].parent_scope {
-                None => return false,
-                Some(p) => sub = p,
-            }
-        }
-        true
-    }
-
     /// Returns the return type; it always return first element from `local_decls` array.
     #[inline]
     pub fn return_ty(&self) -> Ty<'tcx> {
@@ -739,7 +751,7 @@
     impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for super::BindingForm<'tcx> {
         fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
             use super::BindingForm::*;
-            ::std::mem::discriminant(self).hash_stable(hcx, hasher);
+            std::mem::discriminant(self).hash_stable(hcx, hasher);
 
             match self {
                 Var(binding) => binding.hash_stable(hcx, hasher),
@@ -777,7 +789,7 @@
 /// argument, or the return place.
 #[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
 pub struct LocalDecl<'tcx> {
-    /// Whether this is a mutable minding (i.e., `let x` or `let mut x`).
+    /// Whether this is a mutable binding (i.e., `let x` or `let mut x`).
     ///
     /// Temporaries and the return place are always mutable.
     pub mutability: Mutability,
@@ -796,9 +808,6 @@
     /// flag drop flags to avoid triggering this check as they are introduced
     /// after typeck.
     ///
-    /// Unsafety checking will also ignore dereferences of these locals,
-    /// so they can be used for raw pointers only used in a desugaring.
-    ///
     /// This should be sound because the drop flags are fully algebraic, and
     /// therefore don't affect the OIBIT or outlives properties of the
     /// generator.
@@ -935,71 +944,63 @@
     /// - `let x = ...`,
     /// - or `match ... { C(x) => ... }`
     pub fn can_be_made_mutable(&self) -> bool {
-        match self.local_info {
-            Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(VarBindingForm {
-                binding_mode: ty::BindingMode::BindByValue(_),
-                opt_ty_info: _,
-                opt_match_place: _,
-                pat_span: _,
-            })))) => true,
-
-            Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::ImplicitSelf(
-                ImplicitSelfKind::Imm,
-            )))) => true,
-
-            _ => false,
-        }
+        matches!(
+            self.local_info,
+            Some(box LocalInfo::User(ClearCrossCrate::Set(
+                BindingForm::Var(VarBindingForm {
+                    binding_mode: ty::BindingMode::BindByValue(_),
+                    opt_ty_info: _,
+                    opt_match_place: _,
+                    pat_span: _,
+                })
+                | BindingForm::ImplicitSelf(ImplicitSelfKind::Imm),
+            )))
+        )
     }
 
     /// Returns `true` if local is definitely not a `ref ident` or
     /// `ref mut ident` binding. (Such bindings cannot be made into
     /// mutable bindings, but the inverse does not necessarily hold).
     pub fn is_nonref_binding(&self) -> bool {
-        match self.local_info {
-            Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(VarBindingForm {
-                binding_mode: ty::BindingMode::BindByValue(_),
-                opt_ty_info: _,
-                opt_match_place: _,
-                pat_span: _,
-            })))) => true,
-
-            Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::ImplicitSelf(_)))) => true,
-
-            _ => false,
-        }
+        matches!(
+            self.local_info,
+            Some(box LocalInfo::User(ClearCrossCrate::Set(
+                BindingForm::Var(VarBindingForm {
+                    binding_mode: ty::BindingMode::BindByValue(_),
+                    opt_ty_info: _,
+                    opt_match_place: _,
+                    pat_span: _,
+                })
+                | BindingForm::ImplicitSelf(_),
+            )))
+        )
     }
 
     /// Returns `true` if this variable is a named variable or function
     /// parameter declared by the user.
     #[inline]
     pub fn is_user_variable(&self) -> bool {
-        match self.local_info {
-            Some(box LocalInfo::User(_)) => true,
-            _ => false,
-        }
+        matches!(self.local_info, Some(box LocalInfo::User(_)))
     }
 
     /// Returns `true` if this is a reference to a variable bound in a `match`
     /// expression that is used to access said variable for the guard of the
     /// match arm.
     pub fn is_ref_for_guard(&self) -> bool {
-        match self.local_info {
-            Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::RefForGuard))) => true,
-            _ => false,
-        }
+        matches!(
+            self.local_info,
+            Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::RefForGuard)))
+        )
     }
 
     /// Returns `Some` if this is a reference to a static item that is used to
-    /// access that static
+    /// access that static.
     pub fn is_ref_to_static(&self) -> bool {
-        match self.local_info {
-            Some(box LocalInfo::StaticRef { .. }) => true,
-            _ => false,
-        }
+        matches!(self.local_info, Some(box LocalInfo::StaticRef { .. }))
     }
 
-    /// Returns `Some` if this is a reference to a static item that is used to
-    /// access that static
+    /// Returns `Some` if this is a reference to a thread-local static item that is used to
+    /// access that static.
     pub fn is_ref_to_thread_local(&self) -> bool {
         match self.local_info {
             Some(box LocalInfo::StaticRef { is_thread_local, .. }) => is_thread_local,
@@ -1089,6 +1090,9 @@
     /// are edges that go from a multi-successor node to a multi-predecessor node. This pass is
     /// needed because some analyses require that there are no critical edges in the CFG.
     ///
+    /// Note that this type is just an index into [`Body.basic_blocks`](Body::basic_blocks);
+    /// the actual data that a basic block holds is in [`BasicBlockData`].
+    ///
     /// Read more about basic blocks in the [rustc-dev-guide][guide-mir].
     ///
     /// [CFG]: https://rustc-dev-guide.rust-lang.org/appendix/background.html#cfg
@@ -1581,21 +1585,10 @@
                 write!(fmt, "AscribeUserType({:?}, {:?}, {:?})", place, variance, c_ty)
             }
             Coverage(box ref coverage) => {
-                let rgn = &coverage.code_region;
-                match coverage.kind {
-                    CoverageKind::Counter { id, .. } => {
-                        write!(fmt, "Coverage::Counter({:?}) for {:?}", id.index(), rgn)
-                    }
-                    CoverageKind::Expression { id, lhs, op, rhs } => write!(
-                        fmt,
-                        "Coverage::Expression({:?}) = {} {} {} for {:?}",
-                        id.index(),
-                        lhs.index(),
-                        if op == coverage::Op::Add { "+" } else { "-" },
-                        rhs.index(),
-                        rgn
-                    ),
-                    CoverageKind::Unreachable => write!(fmt, "Coverage::Unreachable for {:?}", rgn),
+                if let Some(rgn) = &coverage.code_region {
+                    write!(fmt, "Coverage::{:?} for {:?}", coverage.kind, rgn)
+                } else {
+                    write!(fmt, "Coverage::{:?}", coverage.kind)
                 }
             }
             Nop => write!(fmt, "nop"),
@@ -1606,7 +1599,7 @@
 #[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
 pub struct Coverage {
     pub kind: CoverageKind,
-    pub code_region: CodeRegion,
+    pub code_region: Option<CodeRegion>,
 }
 
 ///////////////////////////////////////////////////////////////////////////
@@ -1848,11 +1841,21 @@
     }
 }
 
-#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
-pub struct SourceScopeData {
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub struct SourceScopeData<'tcx> {
     pub span: Span,
     pub parent_scope: Option<SourceScope>,
 
+    /// Whether this scope is the root of a scope tree of another body,
+    /// inlined into this body by the MIR inliner.
+    /// `ty::Instance` is the callee, and the `Span` is the call site.
+    pub inlined: Option<(ty::Instance<'tcx>, Span)>,
+
+    /// Nearest (transitive) parent scope (if any) which is inlined.
+    /// This is an optimization over walking up `parent_scope`
+    /// until a scope with `inlined: Some(...)` is found.
+    pub inlined_parent_scope: Option<SourceScope>,
+
     /// Crate-local information for this source scope, that can't (and
     /// needn't) be tracked across crates.
     pub local_data: ClearCrossCrate<SourceScopeLocalData>,
@@ -1937,10 +1940,10 @@
                 .layout_of(param_env_and_ty)
                 .unwrap_or_else(|e| panic!("could not compute layout for {:?}: {:?}", ty, e))
                 .size;
-            let scalar_size = abi::Size::from_bytes(match val {
-                Scalar::Raw { size, .. } => size,
+            let scalar_size = match val {
+                Scalar::Int(int) => int.size(),
                 _ => panic!("Invalid scalar type {:?}", val),
-            });
+            };
             scalar_size == type_size
         });
         Operand::Constant(box Constant {
@@ -1950,45 +1953,6 @@
         })
     }
 
-    /// Convenience helper to make a `Scalar` from the given `Operand`, assuming that `Operand`
-    /// wraps a constant literal value. Panics if this is not the case.
-    pub fn scalar_from_const(operand: &Operand<'tcx>) -> Scalar {
-        match operand {
-            Operand::Constant(constant) => match constant.literal.val.try_to_scalar() {
-                Some(scalar) => scalar,
-                _ => panic!("{:?}: Scalar value expected", constant.literal.val),
-            },
-            _ => panic!("{:?}: Constant expected", operand),
-        }
-    }
-
-    /// Convenience helper to make a literal-like constant from a given `&str` slice.
-    /// Since this is used to synthesize MIR, assumes `user_ty` is None.
-    pub fn const_from_str(tcx: TyCtxt<'tcx>, val: &str, span: Span) -> Operand<'tcx> {
-        let tcx = tcx;
-        let allocation = Allocation::from_byte_aligned_bytes(val.as_bytes());
-        let allocation = tcx.intern_const_alloc(allocation);
-        let const_val = ConstValue::Slice { data: allocation, start: 0, end: val.len() };
-        let ty = tcx.mk_imm_ref(tcx.lifetimes.re_erased, tcx.types.str_);
-        Operand::Constant(box Constant {
-            span,
-            user_ty: None,
-            literal: ty::Const::from_value(tcx, const_val, ty),
-        })
-    }
-
-    /// Convenience helper to make a `ConstValue` from the given `Operand`, assuming that `Operand`
-    /// wraps a constant value (such as a `&str` slice). Panics if this is not the case.
-    pub fn value_from_const(operand: &Operand<'tcx>) -> ConstValue<'tcx> {
-        match operand {
-            Operand::Constant(constant) => match constant.literal.val.try_to_value() {
-                Some(const_value) => const_value,
-                _ => panic!("{:?}: ConstValue expected", constant.literal.val),
-            },
-            _ => panic!("{:?}: Constant expected", operand),
-        }
-    }
-
     pub fn to_copy(&self) -> Self {
         match *self {
             Operand::Copy(_) | Operand::Constant(_) => self.clone(),
@@ -2128,10 +2092,7 @@
 impl BinOp {
     pub fn is_checkable(self) -> bool {
         use self::BinOp::*;
-        match self {
-            Add | Sub | Mul | Shl | Shr => true,
-            _ => false,
-        }
+        matches!(self, Add | Sub | Mul | Shl | Shr)
     }
 }
 
@@ -2235,7 +2196,7 @@
 
                         let name = ty::tls::with(|tcx| {
                             let mut name = String::new();
-                            let substs = tcx.lift(&substs).expect("could not lift for printing");
+                            let substs = tcx.lift(substs).expect("could not lift for printing");
                             FmtPrinter::new(tcx, &mut name, Namespace::ValueNS)
                                 .print_def_path(variant_def.def_id, substs)?;
                             Ok(name)
@@ -2258,7 +2219,7 @@
                         if let Some(def_id) = def_id.as_local() {
                             let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
                             let name = if tcx.sess.opts.debugging_opts.span_free_formats {
-                                let substs = tcx.lift(&substs).unwrap();
+                                let substs = tcx.lift(substs).unwrap();
                                 format!(
                                     "[closure@{}]",
                                     tcx.def_path_str_with_substs(def_id.to_def_id(), substs),
@@ -2388,10 +2349,6 @@
         self.contents.is_empty()
     }
 
-    pub fn from_projections(projs: impl Iterator<Item = (UserTypeProjection, Span)>) -> Self {
-        UserTypeProjections { contents: projs.collect() }
-    }
-
     pub fn projections_and_spans(
         &self,
     ) -> impl Iterator<Item = &(UserTypeProjection, Span)> + ExactSizeIterator {
@@ -2520,7 +2477,7 @@
         UserTypeProjection { base, projs }
     }
 
-    fn super_visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> bool {
+    fn super_visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> ControlFlow<()> {
         self.base.visit_with(visitor)
         // Note: there's nothing in `self.proj` to visit.
     }
@@ -2556,7 +2513,7 @@
 ) -> fmt::Result {
     use crate::ty::print::PrettyPrinter;
     ty::tls::with(|tcx| {
-        let literal = tcx.lift(&c).unwrap();
+        let literal = tcx.lift(c).unwrap();
         let mut cx = FmtPrinter::new(tcx, fmt, Namespace::ValueNS);
         cx.print_alloc_ids = true;
         cx.pretty_print_const(literal, print_types)?;
diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs
index 79e2c5a..1e70f76 100644
--- a/compiler/rustc_middle/src/mir/mono.rs
+++ b/compiler/rustc_middle/src/mir/mono.rs
@@ -228,7 +228,7 @@
 
 /// Specifies the linkage type for a `MonoItem`.
 ///
-/// See https://llvm.org/docs/LangRef.html#linkage-types for more details about these variants.
+/// See <https://llvm.org/docs/LangRef.html#linkage-types> for more details about these variants.
 #[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
 pub enum Linkage {
     External,
diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs
index 0878e93..6022194 100644
--- a/compiler/rustc_middle/src/mir/query.rs
+++ b/compiler/rustc_middle/src/mir/query.rs
@@ -1,9 +1,10 @@
 //! Values computed by queries that use MIR.
 
-use crate::mir::{Body, Promoted};
+use crate::mir::{abstract_const, Body, Promoted};
 use crate::ty::{self, Ty, TyCtxt};
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::sync::Lrc;
+use rustc_errors::ErrorReported;
 use rustc_hir as hir;
 use rustc_hir::def_id::{DefId, LocalDefId};
 use rustc_index::bit_set::BitMatrix;
@@ -407,18 +408,12 @@
     pub num_expressions: u32,
 }
 
+/// Shims which make dealing with `WithOptConstParam` easier.
+///
+/// For more information on why this is needed, consider looking
+/// at the docs for `WithOptConstParam` itself.
 impl<'tcx> TyCtxt<'tcx> {
-    pub fn mir_borrowck_opt_const_arg(
-        self,
-        def: ty::WithOptConstParam<LocalDefId>,
-    ) -> &'tcx BorrowCheckResult<'tcx> {
-        if let Some(param_did) = def.const_param_did {
-            self.mir_borrowck_const_arg((def.did, param_did))
-        } else {
-            self.mir_borrowck(def.did)
-        }
-    }
-
+    #[inline]
     pub fn mir_const_qualif_opt_const_arg(
         self,
         def: ty::WithOptConstParam<LocalDefId>,
@@ -430,7 +425,8 @@
         }
     }
 
-    pub fn promoted_mir_of_opt_const_arg(
+    #[inline]
+    pub fn promoted_mir_opt_const_arg(
         self,
         def: ty::WithOptConstParam<DefId>,
     ) -> &'tcx IndexVec<Promoted, Body<'tcx>> {
@@ -440,4 +436,28 @@
             self.promoted_mir(def.did)
         }
     }
+
+    #[inline]
+    pub fn optimized_mir_opt_const_arg(
+        self,
+        def: ty::WithOptConstParam<DefId>,
+    ) -> &'tcx Body<'tcx> {
+        if let Some((did, param_did)) = def.as_const_arg() {
+            self.optimized_mir_of_const_arg((did, param_did))
+        } else {
+            self.optimized_mir(def.did)
+        }
+    }
+
+    #[inline]
+    pub fn mir_abstract_const_opt_const_arg(
+        self,
+        def: ty::WithOptConstParam<DefId>,
+    ) -> Result<Option<&'tcx [abstract_const::Node<'tcx>]>, ErrorReported> {
+        if let Some((did, param_did)) = def.as_const_arg() {
+            self.mir_abstract_const_of_const_arg((did, param_did))
+        } else {
+            self.mir_abstract_const(def.did)
+        }
+    }
 }
diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs
index b9e4f6f..f0bfdae 100644
--- a/compiler/rustc_middle/src/mir/tcx.rs
+++ b/compiler/rustc_middle/src/mir/tcx.rs
@@ -152,10 +152,14 @@
                 tcx.mk_ty(ty::Array(operand.ty(local_decls, tcx), count))
             }
             Rvalue::ThreadLocalRef(did) => {
+                let static_ty = tcx.type_of(did);
                 if tcx.is_mutable_static(did) {
-                    tcx.mk_mut_ptr(tcx.type_of(did))
+                    tcx.mk_mut_ptr(static_ty)
+                } else if tcx.is_foreign_item(did) {
+                    tcx.mk_imm_ptr(static_ty)
                 } else {
-                    tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.type_of(did))
+                    // FIXME: These things don't *really* have 'static lifetime.
+                    tcx.mk_imm_ref(tcx.lifetimes.re_static, static_ty)
                 }
             }
             Rvalue::Ref(reg, bk, ref place) => {
diff --git a/compiler/rustc_middle/src/mir/terminator/mod.rs b/compiler/rustc_middle/src/mir/terminator.rs
similarity index 84%
rename from compiler/rustc_middle/src/mir/terminator/mod.rs
rename to compiler/rustc_middle/src/mir/terminator.rs
index 8909f02..709ffc3 100644
--- a/compiler/rustc_middle/src/mir/terminator/mod.rs
+++ b/compiler/rustc_middle/src/mir/terminator.rs
@@ -1,6 +1,7 @@
 use crate::mir::interpret::Scalar;
 use crate::ty::{self, Ty, TyCtxt};
 use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use smallvec::{smallvec, SmallVec};
 
 use super::{
     AssertMessage, BasicBlock, InlineAsmOperand, Operand, Place, SourceInfo, Successors,
@@ -16,6 +17,87 @@
 
 pub use super::query::*;
 
+#[derive(Debug, Clone, TyEncodable, TyDecodable, HashStable, PartialEq)]
+pub struct SwitchTargets {
+    /// Possible values. The locations to branch to in each case
+    /// are found in the corresponding indices from the `targets` vector.
+    values: SmallVec<[u128; 1]>,
+
+    /// Possible branch sites. The last element of this vector is used
+    /// for the otherwise branch, so targets.len() == values.len() + 1
+    /// should hold.
+    //
+    // This invariant is quite non-obvious and also could be improved.
+    // One way to make this invariant is to have something like this instead:
+    //
+    // branches: Vec<(ConstInt, BasicBlock)>,
+    // otherwise: Option<BasicBlock> // exhaustive if None
+    //
+    // However we’ve decided to keep this as-is until we figure a case
+    // where some other approach seems to be strictly better than other.
+    targets: SmallVec<[BasicBlock; 2]>,
+}
+
+impl SwitchTargets {
+    /// Creates switch targets from an iterator of values and target blocks.
+    ///
+    /// The iterator may be empty, in which case the `SwitchInt` instruction is equivalent to
+    /// `goto otherwise;`.
+    pub fn new(targets: impl Iterator<Item = (u128, BasicBlock)>, otherwise: BasicBlock) -> Self {
+        let (values, mut targets): (SmallVec<_>, SmallVec<_>) = targets.unzip();
+        targets.push(otherwise);
+        Self { values, targets }
+    }
+
+    /// Builds a switch targets definition that jumps to `then` if the tested value equals `value`,
+    /// and to `else_` if not.
+    pub fn static_if(value: u128, then: BasicBlock, else_: BasicBlock) -> Self {
+        Self { values: smallvec![value], targets: smallvec![then, else_] }
+    }
+
+    /// Returns the fallback target that is jumped to when none of the values match the operand.
+    pub fn otherwise(&self) -> BasicBlock {
+        *self.targets.last().unwrap()
+    }
+
+    /// Returns an iterator over the switch targets.
+    ///
+    /// The iterator will yield tuples containing the value and corresponding target to jump to, not
+    /// including the `otherwise` fallback target.
+    ///
+    /// Note that this may yield 0 elements. Only the `otherwise` branch is mandatory.
+    pub fn iter(&self) -> SwitchTargetsIter<'_> {
+        SwitchTargetsIter { inner: self.values.iter().zip(self.targets.iter()) }
+    }
+
+    /// Returns a slice with all possible jump targets (including the fallback target).
+    pub fn all_targets(&self) -> &[BasicBlock] {
+        &self.targets
+    }
+
+    pub fn all_targets_mut(&mut self) -> &mut [BasicBlock] {
+        &mut self.targets
+    }
+}
+
+pub struct SwitchTargetsIter<'a> {
+    inner: iter::Zip<slice::Iter<'a, u128>, slice::Iter<'a, BasicBlock>>,
+}
+
+impl<'a> Iterator for SwitchTargetsIter<'a> {
+    type Item = (u128, BasicBlock);
+
+    fn next(&mut self) -> Option<Self::Item> {
+        self.inner.next().map(|(val, bb)| (*val, *bb))
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.inner.size_hint()
+    }
+}
+
+impl<'a> ExactSizeIterator for SwitchTargetsIter<'a> {}
+
 #[derive(Clone, TyEncodable, TyDecodable, HashStable, PartialEq)]
 pub enum TerminatorKind<'tcx> {
     /// Block should have one successor in the graph; we jump there.
@@ -32,23 +114,7 @@
         /// FIXME: remove this redundant information. Currently, it is relied on by pretty-printing.
         switch_ty: Ty<'tcx>,
 
-        /// Possible values. The locations to branch to in each case
-        /// are found in the corresponding indices from the `targets` vector.
-        values: Cow<'tcx, [u128]>,
-
-        /// Possible branch sites. The last element of this vector is used
-        /// for the otherwise branch, so targets.len() == values.len() + 1
-        /// should hold.
-        //
-        // This invariant is quite non-obvious and also could be improved.
-        // One way to make this invariant is to have something like this instead:
-        //
-        // branches: Vec<(ConstInt, BasicBlock)>,
-        // otherwise: Option<BasicBlock> // exhaustive if None
-        //
-        // However we’ve decided to keep this as-is until we figure a case
-        // where some other approach seems to be strictly better than other.
-        targets: Vec<BasicBlock>,
+        targets: SwitchTargets,
     },
 
     /// Indicates that the landing pad is finished and unwinding should
@@ -227,12 +293,10 @@
         t: BasicBlock,
         f: BasicBlock,
     ) -> TerminatorKind<'tcx> {
-        static BOOL_SWITCH_FALSE: &[u128] = &[0];
         TerminatorKind::SwitchInt {
             discr: cond,
             switch_ty: tcx.types.bool,
-            values: From::from(BOOL_SWITCH_FALSE),
-            targets: vec![f, t],
+            targets: SwitchTargets::static_if(0, f, t),
         }
     }
 
@@ -263,7 +327,7 @@
             | FalseUnwind { real_target: ref t, unwind: Some(ref u) } => {
                 Some(t).into_iter().chain(slice::from_ref(u))
             }
-            SwitchInt { ref targets, .. } => None.into_iter().chain(&targets[..]),
+            SwitchInt { ref targets, .. } => None.into_iter().chain(&targets.targets[..]),
             FalseEdge { ref real_target, ref imaginary_target } => {
                 Some(real_target).into_iter().chain(slice::from_ref(imaginary_target))
             }
@@ -297,7 +361,7 @@
             | FalseUnwind { real_target: ref mut t, unwind: Some(ref mut u) } => {
                 Some(t).into_iter().chain(slice::from_mut(u))
             }
-            SwitchInt { ref mut targets, .. } => None.into_iter().chain(&mut targets[..]),
+            SwitchInt { ref mut targets, .. } => None.into_iter().chain(&mut targets.targets[..]),
             FalseEdge { ref mut real_target, ref mut imaginary_target } => {
                 Some(real_target).into_iter().chain(slice::from_mut(imaginary_target))
             }
@@ -469,11 +533,12 @@
         match *self {
             Return | Resume | Abort | Unreachable | GeneratorDrop => vec![],
             Goto { .. } => vec!["".into()],
-            SwitchInt { ref values, switch_ty, .. } => ty::tls::with(|tcx| {
+            SwitchInt { ref targets, switch_ty, .. } => ty::tls::with(|tcx| {
                 let param_env = ty::ParamEnv::empty();
-                let switch_ty = tcx.lift(&switch_ty).unwrap();
+                let switch_ty = tcx.lift(switch_ty).unwrap();
                 let size = tcx.layout_of(param_env.and(switch_ty)).unwrap().size;
-                values
+                targets
+                    .values
                     .iter()
                     .map(|&u| {
                         ty::Const::from_scalar(tcx, Scalar::from_uint(u, size), switch_ty)
diff --git a/compiler/rustc_middle/src/mir/type_foldable.rs b/compiler/rustc_middle/src/mir/type_foldable.rs
index ad2eae0..0801188 100644
--- a/compiler/rustc_middle/src/mir/type_foldable.rs
+++ b/compiler/rustc_middle/src/mir/type_foldable.rs
@@ -10,7 +10,6 @@
     FakeReadCause,
     RetagKind,
     SourceScope,
-    SourceScopeData,
     SourceScopeLocalData,
     UserTypeAnnotationIndex,
 }
@@ -21,10 +20,9 @@
 
         let kind = match self.kind {
             Goto { target } => Goto { target },
-            SwitchInt { ref discr, switch_ty, ref values, ref targets } => SwitchInt {
+            SwitchInt { ref discr, switch_ty, ref targets } => SwitchInt {
                 discr: discr.fold_with(folder),
                 switch_ty: switch_ty.fold_with(folder),
-                values: values.clone(),
                 targets: targets.clone(),
             },
             Drop { ref place, target, unwind } => {
@@ -89,41 +87,43 @@
         Terminator { source_info: self.source_info, kind }
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         use crate::mir::TerminatorKind::*;
 
         match self.kind {
             SwitchInt { ref discr, switch_ty, .. } => {
-                discr.visit_with(visitor) || switch_ty.visit_with(visitor)
+                discr.visit_with(visitor)?;
+                switch_ty.visit_with(visitor)
             }
             Drop { ref place, .. } => place.visit_with(visitor),
             DropAndReplace { ref place, ref value, .. } => {
-                place.visit_with(visitor) || value.visit_with(visitor)
+                place.visit_with(visitor)?;
+                value.visit_with(visitor)
             }
             Yield { ref value, .. } => value.visit_with(visitor),
             Call { ref func, ref args, ref destination, .. } => {
-                let dest = if let Some((ref loc, _)) = *destination {
-                    loc.visit_with(visitor)
-                } else {
-                    false
+                if let Some((ref loc, _)) = *destination {
+                    loc.visit_with(visitor)?;
                 };
-                dest || func.visit_with(visitor) || args.visit_with(visitor)
+                func.visit_with(visitor)?;
+                args.visit_with(visitor)
             }
             Assert { ref cond, ref msg, .. } => {
-                if cond.visit_with(visitor) {
-                    use AssertKind::*;
-                    match msg {
-                        BoundsCheck { ref len, ref index } => {
-                            len.visit_with(visitor) || index.visit_with(visitor)
-                        }
-                        Overflow(_, l, r) => l.visit_with(visitor) || r.visit_with(visitor),
-                        OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => {
-                            op.visit_with(visitor)
-                        }
-                        ResumedAfterReturn(_) | ResumedAfterPanic(_) => false,
+                cond.visit_with(visitor)?;
+                use AssertKind::*;
+                match msg {
+                    BoundsCheck { ref len, ref index } => {
+                        len.visit_with(visitor)?;
+                        index.visit_with(visitor)
                     }
-                } else {
-                    false
+                    Overflow(_, l, r) => {
+                        l.visit_with(visitor)?;
+                        r.visit_with(visitor)
+                    }
+                    OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => {
+                        op.visit_with(visitor)
+                    }
+                    ResumedAfterReturn(_) | ResumedAfterPanic(_) => ControlFlow::CONTINUE,
                 }
             }
             InlineAsm { ref operands, .. } => operands.visit_with(visitor),
@@ -134,7 +134,7 @@
             | GeneratorDrop
             | Unreachable
             | FalseEdge { .. }
-            | FalseUnwind { .. } => false,
+            | FalseUnwind { .. } => ControlFlow::CONTINUE,
         }
     }
 }
@@ -144,8 +144,8 @@
         *self
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
-        false
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<()> {
+        ControlFlow::CONTINUE
     }
 }
 
@@ -154,8 +154,9 @@
         Place { local: self.local.fold_with(folder), projection: self.projection.fold_with(folder) }
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.local.visit_with(visitor) || self.projection.visit_with(visitor)
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.local.visit_with(visitor)?;
+        self.projection.visit_with(visitor)
     }
 }
 
@@ -165,8 +166,8 @@
         folder.tcx().intern_place_elems(&v)
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.iter().any(|t| t.visit_with(visitor))
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.iter().try_for_each(|t| t.visit_with(visitor))
     }
 }
 
@@ -215,32 +216,47 @@
         }
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         use crate::mir::Rvalue::*;
         match *self {
             Use(ref op) => op.visit_with(visitor),
             Repeat(ref op, _) => op.visit_with(visitor),
             ThreadLocalRef(did) => did.visit_with(visitor),
-            Ref(region, _, ref place) => region.visit_with(visitor) || place.visit_with(visitor),
+            Ref(region, _, ref place) => {
+                region.visit_with(visitor)?;
+                place.visit_with(visitor)
+            }
             AddressOf(_, ref place) => place.visit_with(visitor),
             Len(ref place) => place.visit_with(visitor),
-            Cast(_, ref op, ty) => op.visit_with(visitor) || ty.visit_with(visitor),
+            Cast(_, ref op, ty) => {
+                op.visit_with(visitor)?;
+                ty.visit_with(visitor)
+            }
             BinaryOp(_, ref rhs, ref lhs) | CheckedBinaryOp(_, ref rhs, ref lhs) => {
-                rhs.visit_with(visitor) || lhs.visit_with(visitor)
+                rhs.visit_with(visitor)?;
+                lhs.visit_with(visitor)
             }
             UnaryOp(_, ref val) => val.visit_with(visitor),
             Discriminant(ref place) => place.visit_with(visitor),
             NullaryOp(_, ty) => ty.visit_with(visitor),
             Aggregate(ref kind, ref fields) => {
-                (match **kind {
-                    AggregateKind::Array(ty) => ty.visit_with(visitor),
-                    AggregateKind::Tuple => false,
-                    AggregateKind::Adt(_, _, substs, user_ty, _) => {
-                        substs.visit_with(visitor) || user_ty.visit_with(visitor)
+                match **kind {
+                    AggregateKind::Array(ty) => {
+                        ty.visit_with(visitor)?;
                     }
-                    AggregateKind::Closure(_, substs) => substs.visit_with(visitor),
-                    AggregateKind::Generator(_, substs, _) => substs.visit_with(visitor),
-                }) || fields.visit_with(visitor)
+                    AggregateKind::Tuple => {}
+                    AggregateKind::Adt(_, _, substs, user_ty, _) => {
+                        substs.visit_with(visitor)?;
+                        user_ty.visit_with(visitor)?;
+                    }
+                    AggregateKind::Closure(_, substs) => {
+                        substs.visit_with(visitor)?;
+                    }
+                    AggregateKind::Generator(_, substs, _) => {
+                        substs.visit_with(visitor)?;
+                    }
+                }
+                fields.visit_with(visitor)
             }
         }
     }
@@ -255,7 +271,7 @@
         }
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         match *self {
             Operand::Copy(ref place) | Operand::Move(ref place) => place.visit_with(visitor),
             Operand::Constant(ref c) => c.visit_with(visitor),
@@ -279,13 +295,13 @@
         }
     }
 
-    fn super_visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> bool {
+    fn super_visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> ControlFlow<()> {
         use crate::mir::ProjectionElem::*;
 
         match self {
             Field(_, ty) => ty.visit_with(visitor),
             Index(v) => v.visit_with(visitor),
-            _ => false,
+            _ => ControlFlow::CONTINUE,
         }
     }
 }
@@ -294,8 +310,8 @@
     fn super_fold_with<F: TypeFolder<'tcx>>(&self, _: &mut F) -> Self {
         *self
     }
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
-        false
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<()> {
+        ControlFlow::CONTINUE
     }
 }
 
@@ -303,8 +319,8 @@
     fn super_fold_with<F: TypeFolder<'tcx>>(&self, _: &mut F) -> Self {
         *self
     }
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
-        false
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<()> {
+        ControlFlow::CONTINUE
     }
 }
 
@@ -312,8 +328,8 @@
     fn super_fold_with<F: TypeFolder<'tcx>>(&self, _: &mut F) -> Self {
         self.clone()
     }
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
-        false
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<()> {
+        ControlFlow::CONTINUE
     }
 }
 
@@ -325,7 +341,7 @@
             literal: self.literal.fold_with(folder),
         }
     }
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         self.literal.visit_with(visitor)
     }
 }
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
index a008bd5..d8d639a 100644
--- a/compiler/rustc_middle/src/mir/visit.rs
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -85,7 +85,7 @@
             }
 
             fn visit_source_scope_data(&mut self,
-                                           scope_data: & $($mutability)? SourceScopeData) {
+                                           scope_data: & $($mutability)? SourceScopeData<'tcx>) {
                 self.super_source_scope_data(scope_data);
             }
 
@@ -317,10 +317,15 @@
                 }
             }
 
-            fn super_source_scope_data(&mut self, scope_data: & $($mutability)? SourceScopeData) {
+            fn super_source_scope_data(
+                &mut self,
+                scope_data: & $($mutability)? SourceScopeData<'tcx>,
+            ) {
                 let SourceScopeData {
                     span,
                     parent_scope,
+                    inlined,
+                    inlined_parent_scope,
                     local_data: _,
                 } = scope_data;
 
@@ -328,6 +333,34 @@
                 if let Some(parent_scope) = parent_scope {
                     self.visit_source_scope(parent_scope);
                 }
+                if let Some((callee, callsite_span)) = inlined {
+                    let location = START_BLOCK.start_location();
+
+                    self.visit_span(callsite_span);
+
+                    let ty::Instance { def: callee_def, substs: callee_substs } = callee;
+                    match callee_def {
+                        ty::InstanceDef::Item(_def_id) => {}
+
+                        ty::InstanceDef::Intrinsic(_def_id) |
+                        ty::InstanceDef::VtableShim(_def_id) |
+                        ty::InstanceDef::ReifyShim(_def_id) |
+                        ty::InstanceDef::Virtual(_def_id, _) |
+                        ty::InstanceDef::ClosureOnceShim { call_once: _def_id } |
+                        ty::InstanceDef::DropGlue(_def_id, None) => {}
+
+                        ty::InstanceDef::FnPtrShim(_def_id, ty) |
+                        ty::InstanceDef::DropGlue(_def_id, Some(ty)) |
+                        ty::InstanceDef::CloneShim(_def_id, ty) => {
+                            // FIXME(eddyb) use a better `TyContext` here.
+                            self.visit_ty(ty, TyContext::Location(location));
+                        }
+                    }
+                    self.visit_substs(callee_substs, location);
+                }
+                if let Some(inlined_parent_scope) = inlined_parent_scope {
+                    self.visit_source_scope(inlined_parent_scope);
+                }
             }
 
             fn super_statement(&mut self,
@@ -453,7 +486,6 @@
                     TerminatorKind::SwitchInt {
                         discr,
                         switch_ty,
-                        values: _,
                         targets: _
                     } => {
                         self.visit_operand(discr, location);
@@ -752,7 +784,7 @@
             }
 
             fn super_coverage(&mut self,
-                              _kind: & $($mutability)? Coverage,
+                              _coverage: & $($mutability)? Coverage,
                               _location: Location) {
             }
 
@@ -1164,82 +1196,53 @@
 impl PlaceContext {
     /// Returns `true` if this place context represents a drop.
     pub fn is_drop(&self) -> bool {
-        match *self {
-            PlaceContext::MutatingUse(MutatingUseContext::Drop) => true,
-            _ => false,
-        }
+        matches!(self, PlaceContext::MutatingUse(MutatingUseContext::Drop))
     }
 
     /// Returns `true` if this place context represents a borrow.
     pub fn is_borrow(&self) -> bool {
-        match *self {
+        matches!(
+            self,
             PlaceContext::NonMutatingUse(
                 NonMutatingUseContext::SharedBorrow
-                | NonMutatingUseContext::ShallowBorrow
-                | NonMutatingUseContext::UniqueBorrow,
-            )
-            | PlaceContext::MutatingUse(MutatingUseContext::Borrow) => true,
-            _ => false,
-        }
+                    | NonMutatingUseContext::ShallowBorrow
+                    | NonMutatingUseContext::UniqueBorrow
+            ) | PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+        )
     }
 
     /// Returns `true` if this place context represents a storage live or storage dead marker.
     pub fn is_storage_marker(&self) -> bool {
-        match *self {
-            PlaceContext::NonUse(NonUseContext::StorageLive | NonUseContext::StorageDead) => true,
-            _ => false,
-        }
-    }
-
-    /// Returns `true` if this place context represents a storage live marker.
-    pub fn is_storage_live_marker(&self) -> bool {
-        match *self {
-            PlaceContext::NonUse(NonUseContext::StorageLive) => true,
-            _ => false,
-        }
-    }
-
-    /// Returns `true` if this place context represents a storage dead marker.
-    pub fn is_storage_dead_marker(&self) -> bool {
-        match *self {
-            PlaceContext::NonUse(NonUseContext::StorageDead) => true,
-            _ => false,
-        }
+        matches!(
+            self,
+            PlaceContext::NonUse(NonUseContext::StorageLive | NonUseContext::StorageDead)
+        )
     }
 
     /// Returns `true` if this place context represents a use that potentially changes the value.
     pub fn is_mutating_use(&self) -> bool {
-        match *self {
-            PlaceContext::MutatingUse(..) => true,
-            _ => false,
-        }
+        matches!(self, PlaceContext::MutatingUse(..))
     }
 
     /// Returns `true` if this place context represents a use that does not change the value.
     pub fn is_nonmutating_use(&self) -> bool {
-        match *self {
-            PlaceContext::NonMutatingUse(..) => true,
-            _ => false,
-        }
+        matches!(self, PlaceContext::NonMutatingUse(..))
     }
 
     /// Returns `true` if this place context represents a use.
     pub fn is_use(&self) -> bool {
-        match *self {
-            PlaceContext::NonUse(..) => false,
-            _ => true,
-        }
+        !matches!(self, PlaceContext::NonUse(..))
     }
 
     /// Returns `true` if this place context represents an assignment statement.
     pub fn is_place_assignment(&self) -> bool {
-        match *self {
+        matches!(
+            self,
             PlaceContext::MutatingUse(
                 MutatingUseContext::Store
-                | MutatingUseContext::Call
-                | MutatingUseContext::AsmOutput,
-            ) => true,
-            _ => false,
-        }
+                    | MutatingUseContext::Call
+                    | MutatingUseContext::AsmOutput,
+            )
+        )
     }
 }
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
index d5b99ea..72360e2 100644
--- a/compiler/rustc_middle/src/query/mod.rs
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -92,7 +92,7 @@
         /// Computes the `DefId` of the corresponding const parameter in case the `key` is a
         /// const argument and returns `None` otherwise.
         ///
-        /// ```rust
+        /// ```ignore (incomplete)
         /// let a = foo::<7>();
         /// //            ^ Calling `opt_const_param_of` for this argument,
         ///
@@ -156,21 +156,48 @@
             cache_on_disk_if { key.is_local() }
         }
 
-        /// Returns the list of predicates that can be used for
-        /// `SelectionCandidate::ProjectionCandidate` and
+        /// Returns the list of bounds that can be used for
+        /// `SelectionCandidate::ProjectionCandidate(_)` and
         /// `ProjectionTyCandidate::TraitDef`.
-        /// Specifically this is the bounds (equivalent to) those
-        /// written on the trait's type definition, or those
-        /// after the `impl` keyword
+        /// Specifically this is the bounds written on the trait's type
+        /// definition, or those after the `impl` keyword
         ///
+        /// ```ignore (incomplete)
         /// type X: Bound + 'lt
-        ///         ^^^^^^^^^^^
+        /// //      ^^^^^^^^^^^
         /// impl Debug + Display
-        ///      ^^^^^^^^^^^^^^^
+        /// //   ^^^^^^^^^^^^^^^
+        /// ```
         ///
         /// `key` is the `DefId` of the associated type or opaque type.
-        query projection_predicates(key: DefId) -> &'tcx ty::List<ty::Predicate<'tcx>> {
-            desc { |tcx| "finding projection predicates for `{}`", tcx.def_path_str(key) }
+        ///
+        /// Bounds from the parent (e.g. with nested impl trait) are not included.
+        query explicit_item_bounds(key: DefId) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
+            desc { |tcx| "finding item bounds for `{}`", tcx.def_path_str(key) }
+        }
+
+        /// Elaborated version of the predicates from `explicit_item_bounds`.
+        ///
+        /// For example:
+        ///
+        /// ```
+        /// trait MyTrait {
+        ///     type MyAType: Eq + ?Sized;
+        /// }
+        /// ```
+        ///
+        /// `explicit_item_bounds` returns `[<Self as MyTrait>::MyAType: Eq]`,
+        /// and `item_bounds` returns
+        /// ```text
+        /// [
+        ///     <Self as Trait>::MyAType: Eq,
+        ///     <Self as Trait>::MyAType: PartialEq<<Self as Trait>::MyAType>
+        /// ]
+        /// ```
+        ///
+        /// Bounds from the parent (e.g. with nested impl trait) are not included.
+        query item_bounds(key: DefId) -> &'tcx ty::List<ty::Predicate<'tcx>> {
+            desc { |tcx| "elaborating item bounds for `{}`", tcx.def_path_str(key) }
         }
 
         query projection_ty_from_predicates(key: (DefId, DefId)) -> Option<ty::ProjectionTy<'tcx>> {
@@ -191,6 +218,11 @@
             eval_always
             desc { |tcx| "parent module of `{}`", tcx.def_path_str(key.to_def_id()) }
         }
+
+        /// Internal helper query. Use `tcx.expansion_that_defined` instead
+        query expn_that_defined(key: DefId) -> rustc_span::ExpnId {
+            desc { |tcx| "expansion that defined `{}`", tcx.def_path_str(key) }
+        }
     }
 
     Codegen {
@@ -365,6 +397,24 @@
             desc { |tcx| "computing predicates of `{}`", tcx.def_path_str(key) }
         }
 
+        /// Returns everything that looks like a predicate written explicitly
+        /// by the user on a trait item.
+        ///
+        /// Traits are unusual, because predicates on associated types are
+        /// converted into bounds on that type for backwards compatibility:
+        ///
+        /// trait X where Self::U: Copy { type U; }
+        ///
+        /// becomes
+        ///
+        /// trait X { type U: Copy; }
+        ///
+        /// `explicit_predicates_of` and `explicit_item_bounds` will then take
+        /// the appropriate subsets of the predicates here.
+        query trait_explicit_predicates_and_bounds(key: LocalDefId) -> ty::GenericPredicates<'tcx> {
+            desc { |tcx| "computing explicit predicates of trait `{}`", tcx.def_path_str(key.to_def_id()) }
+        }
+
         /// Returns the predicates written explicitly by the user.
         query explicit_predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
             desc { |tcx| "computing explicit predicates of `{}`", tcx.def_path_str(key) }
@@ -1118,7 +1168,7 @@
     }
 
     Other {
-        query foreign_modules(_: CrateNum) -> &'tcx [ForeignModule] {
+        query foreign_modules(_: CrateNum) -> Lrc<FxHashMap<DefId, ForeignModule>> {
             desc { "looking up the foreign modules of a linked crate" }
         }
 
@@ -1217,6 +1267,7 @@
 
     TypeChecking {
         query visibility(def_id: DefId) -> ty::Visibility {
+            eval_always
             desc { |tcx| "computing visibility of `{}`", tcx.def_path_str(def_id) }
         }
     }
diff --git a/compiler/rustc_middle/src/traits/chalk.rs b/compiler/rustc_middle/src/traits/chalk.rs
index d8507d08..f864ad8 100644
--- a/compiler/rustc_middle/src/traits/chalk.rs
+++ b/compiler/rustc_middle/src/traits/chalk.rs
@@ -102,48 +102,6 @@
         Some(write())
     }
 
-    fn debug_application_ty(
-        application_ty: &chalk_ir::ApplicationTy<Self>,
-        fmt: &mut fmt::Formatter<'_>,
-    ) -> Option<fmt::Result> {
-        match application_ty.name {
-            chalk_ir::TypeName::Ref(mutbl) => {
-                let data = application_ty.substitution.interned();
-                match (&**data[0].interned(), &**data[1].interned()) {
-                    (
-                        chalk_ir::GenericArgData::Lifetime(lifetime),
-                        chalk_ir::GenericArgData::Ty(ty),
-                    ) => Some(match mutbl {
-                        chalk_ir::Mutability::Not => write!(fmt, "(&{:?} {:?})", lifetime, ty),
-                        chalk_ir::Mutability::Mut => write!(fmt, "(&{:?} mut {:?})", lifetime, ty),
-                    }),
-                    _ => unreachable!(),
-                }
-            }
-            chalk_ir::TypeName::Array => {
-                let data = application_ty.substitution.interned();
-                match (&**data[0].interned(), &**data[1].interned()) {
-                    (chalk_ir::GenericArgData::Ty(ty), chalk_ir::GenericArgData::Const(len)) => {
-                        Some(write!(fmt, "[{:?}; {:?}]", ty, len))
-                    }
-                    _ => unreachable!(),
-                }
-            }
-            chalk_ir::TypeName::Slice => {
-                let data = application_ty.substitution.interned();
-                let ty = match &**data[0].interned() {
-                    chalk_ir::GenericArgData::Ty(t) => t,
-                    _ => unreachable!(),
-                };
-                Some(write!(fmt, "[{:?}]", ty))
-            }
-            _ => {
-                let chalk_ir::ApplicationTy { name, substitution } = application_ty;
-                Some(write!(fmt, "{:?}{:?}", name, chalk_ir::debug::Angle(substitution.interned())))
-            }
-        }
-    }
-
     fn debug_substitution(
         substitution: &chalk_ir::Substitution<Self>,
         fmt: &mut fmt::Formatter<'_>,
@@ -174,6 +132,32 @@
         Some(write!(fmt, "{:?}", clauses.interned()))
     }
 
+    fn debug_ty(ty: &chalk_ir::Ty<Self>, fmt: &mut fmt::Formatter<'_>) -> Option<fmt::Result> {
+        match &ty.interned().kind {
+            chalk_ir::TyKind::Ref(chalk_ir::Mutability::Not, lifetime, ty) => {
+                Some(write!(fmt, "(&{:?} {:?})", lifetime, ty))
+            }
+            chalk_ir::TyKind::Ref(chalk_ir::Mutability::Mut, lifetime, ty) => {
+                Some(write!(fmt, "(&{:?} mut {:?})", lifetime, ty))
+            }
+            chalk_ir::TyKind::Array(ty, len) => Some(write!(fmt, "[{:?}; {:?}]", ty, len)),
+            chalk_ir::TyKind::Slice(ty) => Some(write!(fmt, "[{:?}]", ty)),
+            chalk_ir::TyKind::Tuple(len, substs) => Some((|| {
+                write!(fmt, "(")?;
+                for (idx, substitution) in substs.interned().iter().enumerate() {
+                    if idx == *len && *len != 1 {
+                        // Don't add a trailing comma if the tuple has more than one element
+                        write!(fmt, "{:?}", substitution)?;
+                    } else {
+                        write!(fmt, "{:?},", substitution)?;
+                    }
+                }
+                write!(fmt, ")")
+            })()),
+            _ => None,
+        }
+    }
+
     fn debug_alias(
         alias_ty: &chalk_ir::AliasTy<Self>,
         fmt: &mut fmt::Formatter<'_>,
diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs
index 1dd6d59..4deb722 100644
--- a/compiler/rustc_middle/src/traits/mod.rs
+++ b/compiler/rustc_middle/src/traits/mod.rs
@@ -13,6 +13,7 @@
 use crate::ty::subst::SubstsRef;
 use crate::ty::{self, AdtKind, Ty, TyCtxt};
 
+use rustc_errors::{Applicability, DiagnosticBuilder};
 use rustc_hir as hir;
 use rustc_hir::def_id::DefId;
 use rustc_span::symbol::Symbol;
@@ -339,10 +340,24 @@
 #[cfg(target_arch = "x86_64")]
 static_assert_size!(ObligationCauseCode<'_>, 32);
 
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub enum StatementAsExpression {
+    CorrectType,
+    NeedsBoxing,
+}
+
+impl<'tcx> ty::Lift<'tcx> for StatementAsExpression {
+    type Lifted = StatementAsExpression;
+    fn lift_to_tcx(self, _tcx: TyCtxt<'tcx>) -> Option<StatementAsExpression> {
+        Some(self)
+    }
+}
+
 #[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
 pub struct MatchExpressionArmCause<'tcx> {
     pub arm_span: Span,
-    pub semi_span: Option<Span>,
+    pub scrut_span: Span,
+    pub semi_span: Option<(Span, StatementAsExpression)>,
     pub source: hir::MatchSource,
     pub prior_arms: Vec<Span>,
     pub last_ty: Ty<'tcx>,
@@ -355,7 +370,7 @@
     pub then: Span,
     pub else_sp: Span,
     pub outer: Option<Span>,
-    pub semicolon: Option<Span>,
+    pub semicolon: Option<(Span, StatementAsExpression)>,
     pub opt_suggest_box_span: Option<Span>,
 }
 
@@ -646,13 +661,13 @@
             ObjectSafetyViolation::SizedSelf(_) => "it requires `Self: Sized`".into(),
             ObjectSafetyViolation::SupertraitSelf(ref spans) => {
                 if spans.iter().any(|sp| *sp != DUMMY_SP) {
-                    "it uses `Self` as a type parameter in this".into()
+                    "it uses `Self` as a type parameter".into()
                 } else {
                     "it cannot use `Self` as a type parameter in a supertrait or `where`-clause"
                         .into()
                 }
             }
-            ObjectSafetyViolation::Method(name, MethodViolationCode::StaticMethod(_), _) => {
+            ObjectSafetyViolation::Method(name, MethodViolationCode::StaticMethod(_, _, _), _) => {
                 format!("associated function `{}` has no `self` parameter", name).into()
             }
             ObjectSafetyViolation::Method(
@@ -686,32 +701,65 @@
         }
     }
 
-    pub fn solution(&self) -> Option<(String, Option<(String, Span)>)> {
-        Some(match *self {
-            ObjectSafetyViolation::SizedSelf(_) | ObjectSafetyViolation::SupertraitSelf(_) => {
-                return None;
+    pub fn solution(&self, err: &mut DiagnosticBuilder<'_>) {
+        match *self {
+            ObjectSafetyViolation::SizedSelf(_) | ObjectSafetyViolation::SupertraitSelf(_) => {}
+            ObjectSafetyViolation::Method(
+                name,
+                MethodViolationCode::StaticMethod(sugg, self_span, has_args),
+                _,
+            ) => {
+                err.span_suggestion(
+                    self_span,
+                    &format!(
+                        "consider turning `{}` into a method by giving it a `&self` argument",
+                        name
+                    ),
+                    format!("&self{}", if has_args { ", " } else { "" }),
+                    Applicability::MaybeIncorrect,
+                );
+                match sugg {
+                    Some((sugg, span)) => {
+                        err.span_suggestion(
+                            span,
+                            &format!(
+                                "alternatively, consider constraining `{}` so it does not apply to \
+                                 trait objects",
+                                name
+                            ),
+                            sugg.to_string(),
+                            Applicability::MaybeIncorrect,
+                        );
+                    }
+                    None => {
+                        err.help(&format!(
+                            "consider turning `{}` into a method by giving it a `&self` \
+                             argument or constraining it so it does not apply to trait objects",
+                            name
+                        ));
+                    }
+                }
             }
-            ObjectSafetyViolation::Method(name, MethodViolationCode::StaticMethod(sugg), _) => (
-                format!(
-                    "consider turning `{}` into a method by giving it a `&self` argument or \
-                     constraining it so it does not apply to trait objects",
-                    name
-                ),
-                sugg.map(|(sugg, sp)| (sugg.to_string(), sp)),
-            ),
             ObjectSafetyViolation::Method(
                 name,
                 MethodViolationCode::UndispatchableReceiver,
                 span,
-            ) => (
-                format!("consider changing method `{}`'s `self` parameter to be `&self`", name),
-                Some(("&Self".to_string(), span)),
-            ),
+            ) => {
+                err.span_suggestion(
+                    span,
+                    &format!(
+                        "consider changing method `{}`'s `self` parameter to be `&self`",
+                        name
+                    ),
+                    "&Self".to_string(),
+                    Applicability::MachineApplicable,
+                );
+            }
             ObjectSafetyViolation::AssocConst(name, _)
             | ObjectSafetyViolation::Method(name, ..) => {
-                (format!("consider moving `{}` to another trait", name), None)
+                err.help(&format!("consider moving `{}` to another trait", name));
             }
-        })
+        }
     }
 
     pub fn spans(&self) -> SmallVec<[Span; 1]> {
@@ -735,7 +783,7 @@
 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable)]
 pub enum MethodViolationCode {
     /// e.g., `fn foo()`
-    StaticMethod(Option<(&'static str, Span)>),
+    StaticMethod(Option<(&'static str, Span)>, Span, bool /* has args */),
 
     /// e.g., `fn foo(&self, x: Self)`
     ReferencesSelfInput(usize),
diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs
index 6ad514c..c570ad3 100644
--- a/compiler/rustc_middle/src/traits/select.rs
+++ b/compiler/rustc_middle/src/traits/select.rs
@@ -105,9 +105,10 @@
     ImplCandidate(DefId),
     AutoImplCandidate(DefId),
 
-    /// This is a trait matching with a projected type as `Self`, and
-    /// we found an applicable bound in the trait definition.
-    ProjectionCandidate,
+    /// This is a trait matching with a projected type as `Self`, and we found
+    /// an applicable bound in the trait definition. The `usize` is an index
+    /// into the list returned by `tcx.item_bounds`.
+    ProjectionCandidate(usize),
 
     /// Implementation of a `Fn`-family trait by one of the anonymous types
     /// generated for a `||` expression.
@@ -126,7 +127,10 @@
 
     TraitAliasCandidate(DefId),
 
-    ObjectCandidate,
+    /// Matching `dyn Trait` with a supertrait of `Trait`. The index is the
+    /// position in the iterator returned by
+    /// `rustc_infer::traits::util::supertraits`.
+    ObjectCandidate(usize),
 
     BuiltinObjectCandidate,
 
diff --git a/compiler/rustc_middle/src/traits/specialization_graph.rs b/compiler/rustc_middle/src/traits/specialization_graph.rs
index 969404c..ec6010e 100644
--- a/compiler/rustc_middle/src/traits/specialization_graph.rs
+++ b/compiler/rustc_middle/src/traits/specialization_graph.rs
@@ -79,10 +79,7 @@
 
 impl<'tcx> Node {
     pub fn is_from_trait(&self) -> bool {
-        match *self {
-            Node::Trait(..) => true,
-            _ => false,
-        }
+        matches!(self, Node::Trait(..))
     }
 
     /// Iterate over the items defined directly by the given (impl or trait) node.
diff --git a/compiler/rustc_middle/src/ty/adjustment.rs b/compiler/rustc_middle/src/ty/adjustment.rs
index 46ef5ff..89d0e13 100644
--- a/compiler/rustc_middle/src/ty/adjustment.rs
+++ b/compiler/rustc_middle/src/ty/adjustment.rs
@@ -85,10 +85,7 @@
 
 impl Adjustment<'tcx> {
     pub fn is_region_borrow(&self) -> bool {
-        match self.kind {
-            Adjust::Borrow(AutoBorrow::Ref(..)) => true,
-            _ => false,
-        }
+        matches!(self.kind, Adjust::Borrow(AutoBorrow::Ref(..)))
     }
 }
 
diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs
index 8ea34f9..aaf6a85 100644
--- a/compiler/rustc_middle/src/ty/codec.rs
+++ b/compiler/rustc_middle/src/ty/codec.rs
@@ -182,14 +182,6 @@
     where
         F: FnOnce(&mut Self) -> Result<Ty<'tcx>, Self::Error>;
 
-    fn cached_predicate_for_shorthand<F>(
-        &mut self,
-        shorthand: usize,
-        or_insert_with: F,
-    ) -> Result<ty::Predicate<'tcx>, Self::Error>
-    where
-        F: FnOnce(&mut Self) -> Result<ty::Predicate<'tcx>, Self::Error>;
-
     fn with_position<F, R>(&mut self, pos: usize, f: F) -> R
     where
         F: FnOnce(&mut Self) -> R;
diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs
index 64faacc..0af884a 100644
--- a/compiler/rustc_middle/src/ty/consts.rs
+++ b/compiler/rustc_middle/src/ty/consts.rs
@@ -132,7 +132,7 @@
     #[inline]
     /// Creates an interned zst constant.
     pub fn zero_sized(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> &'tcx Self {
-        Self::from_scalar(tcx, Scalar::zst(), ty)
+        Self::from_scalar(tcx, Scalar::ZST, ty)
     }
 
     #[inline]
diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs
index ced0429..63e95f2 100644
--- a/compiler/rustc_middle/src/ty/consts/int.rs
+++ b/compiler/rustc_middle/src/ty/consts/int.rs
@@ -1,31 +1,32 @@
-use crate::mir::interpret::truncate;
-use rustc_target::abi::Size;
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_apfloat::Float;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use rustc_target::abi::{Size, TargetDataLayout};
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
 
 #[derive(Copy, Clone)]
 /// A type for representing any integer. Only used for printing.
-// FIXME: Use this for the integer-tree representation needed for type level ints and
-// const generics?
 pub struct ConstInt {
-    /// Number of bytes of the integer. Only 1, 2, 4, 8, 16 are legal values.
-    size: u8,
+    /// The "untyped" variant of `ConstInt`.
+    int: ScalarInt,
     /// Whether the value is of a signed integer type.
     signed: bool,
     /// Whether the value is a `usize` or `isize` type.
     is_ptr_sized_integral: bool,
-    /// Raw memory of the integer. All bytes beyond the `size` are unused and must be zero.
-    raw: u128,
 }
 
 impl ConstInt {
-    pub fn new(raw: u128, size: Size, signed: bool, is_ptr_sized_integral: bool) -> Self {
-        assert!(raw <= truncate(u128::MAX, size));
-        Self { raw, size: size.bytes() as u8, signed, is_ptr_sized_integral }
+    pub fn new(int: ScalarInt, signed: bool, is_ptr_sized_integral: bool) -> Self {
+        Self { int, signed, is_ptr_sized_integral }
     }
 }
 
 impl std::fmt::Debug for ConstInt {
     fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        let Self { size, signed, raw, is_ptr_sized_integral } = *self;
+        let Self { int, signed, is_ptr_sized_integral } = *self;
+        let size = int.size().bytes();
+        let raw = int.data;
         if signed {
             let bit_size = size * 8;
             let min = 1u128 << (bit_size - 1);
@@ -73,7 +74,7 @@
                 Ok(())
             }
         } else {
-            let max = truncate(u128::MAX, Size::from_bytes(size));
+            let max = Size::from_bytes(size).truncate(u128::MAX);
             if raw == max {
                 match (size, is_ptr_sized_integral) {
                     (_, true) => write!(fmt, "usize::MAX"),
@@ -109,3 +110,257 @@
         }
     }
 }
+
+/// The raw bytes of a simple value.
+///
+/// This is a packed struct in order to allow this type to be optimally embedded in enums
+/// (like Scalar).
+#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
+#[repr(packed)]
+pub struct ScalarInt {
+    /// The first `size` bytes of `data` are the value.
+    /// Do not try to read less or more bytes than that. The remaining bytes must be 0.
+    data: u128,
+    size: u8,
+}
+
+// Cannot derive these, as the derives take references to the fields, and we
+// can't take references to fields of packed structs.
+impl<CTX> crate::ty::HashStable<CTX> for ScalarInt {
+    fn hash_stable(&self, hcx: &mut CTX, hasher: &mut crate::ty::StableHasher) {
+        // Using a block `{self.data}` here to force a copy instead of using `self.data`
+        // directly, because `hash_stable` takes `&self` and would thus borrow `self.data`.
+        // Since `Self` is a packed struct, that would create a possibly unaligned reference,
+        // which is UB.
+        { self.data }.hash_stable(hcx, hasher);
+        self.size.hash_stable(hcx, hasher);
+    }
+}
+
+impl<S: Encoder> Encodable<S> for ScalarInt {
+    fn encode(&self, s: &mut S) -> Result<(), S::Error> {
+        s.emit_u128(self.data)?;
+        s.emit_u8(self.size)
+    }
+}
+
+impl<D: Decoder> Decodable<D> for ScalarInt {
+    fn decode(d: &mut D) -> Result<ScalarInt, D::Error> {
+        Ok(ScalarInt { data: d.read_u128()?, size: d.read_u8()? })
+    }
+}
+
+impl ScalarInt {
+    pub const TRUE: ScalarInt = ScalarInt { data: 1_u128, size: 1 };
+
+    pub const FALSE: ScalarInt = ScalarInt { data: 0_u128, size: 1 };
+
+    pub const ZST: ScalarInt = ScalarInt { data: 0_u128, size: 0 };
+
+    #[inline]
+    pub fn size(self) -> Size {
+        Size::from_bytes(self.size)
+    }
+
+    /// Make sure the `data` fits in `size`.
+    /// This is guaranteed by all constructors here, but having had this check saved us from
+    /// bugs many times in the past, so keeping it around is definitely worth it.
+    #[inline(always)]
+    fn check_data(self) {
+        // Using a block `{self.data}` here to force a copy instead of using `self.data`
+        // directly, because `debug_assert_eq` takes references to its arguments and formatting
+        // arguments and would thus borrow `self.data`. Since `Self`
+        // is a packed struct, that would create a possibly unaligned reference, which
+        // is UB.
+        debug_assert_eq!(
+            self.size().truncate(self.data),
+            { self.data },
+            "Scalar value {:#x} exceeds size of {} bytes",
+            { self.data },
+            self.size
+        );
+    }
+
+    #[inline]
+    pub fn null(size: Size) -> Self {
+        Self { data: 0, size: size.bytes() as u8 }
+    }
+
+    #[inline]
+    pub fn is_null(self) -> bool {
+        self.data == 0
+    }
+
+    pub(crate) fn ptr_sized_op<E>(
+        self,
+        dl: &TargetDataLayout,
+        f_int: impl FnOnce(u64) -> Result<u64, E>,
+    ) -> Result<Self, E> {
+        assert_eq!(u64::from(self.size), dl.pointer_size.bytes());
+        Ok(Self::try_from_uint(f_int(u64::try_from(self.data).unwrap())?, self.size()).unwrap())
+    }
+
+    #[inline]
+    pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
+        let data = i.into();
+        if size.truncate(data) == data {
+            Some(Self { data, size: size.bytes() as u8 })
+        } else {
+            None
+        }
+    }
+
+    #[inline]
+    pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
+        let i = i.into();
+        // `into` performed sign extension, we have to truncate
+        let truncated = size.truncate(i as u128);
+        if size.sign_extend(truncated) as i128 == i {
+            Some(Self { data: truncated, size: size.bytes() as u8 })
+        } else {
+            None
+        }
+    }
+
+    #[inline]
+    pub fn assert_bits(self, target_size: Size) -> u128 {
+        self.to_bits(target_size).unwrap_or_else(|size| {
+            bug!("expected int of size {}, but got size {}", target_size.bytes(), size.bytes())
+        })
+    }
+
+    #[inline]
+    pub fn to_bits(self, target_size: Size) -> Result<u128, Size> {
+        assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
+        if target_size.bytes() == u64::from(self.size) {
+            self.check_data();
+            Ok(self.data)
+        } else {
+            Err(self.size())
+        }
+    }
+}
+
+macro_rules! from {
+    ($($ty:ty),*) => {
+        $(
+            impl From<$ty> for ScalarInt {
+                #[inline]
+                fn from(u: $ty) -> Self {
+                    Self {
+                        data: u128::from(u),
+                        size: std::mem::size_of::<$ty>() as u8,
+                    }
+                }
+            }
+        )*
+    }
+}
+
+macro_rules! try_from {
+    ($($ty:ty),*) => {
+        $(
+            impl TryFrom<ScalarInt> for $ty {
+                type Error = Size;
+                #[inline]
+                fn try_from(int: ScalarInt) -> Result<Self, Size> {
+                    // The `unwrap` cannot fail because to_bits (if it succeeds)
+                    // is guaranteed to return a value that fits into the size.
+                    int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>()))
+                       .map(|u| u.try_into().unwrap())
+                }
+            }
+        )*
+    }
+}
+
+from!(u8, u16, u32, u64, u128, bool);
+try_from!(u8, u16, u32, u64, u128);
+
+impl From<char> for ScalarInt {
+    #[inline]
+    fn from(c: char) -> Self {
+        Self { data: c as u128, size: std::mem::size_of::<char>() as u8 }
+    }
+}
+
+impl TryFrom<ScalarInt> for char {
+    type Error = Size;
+    #[inline]
+    fn try_from(int: ScalarInt) -> Result<Self, Size> {
+        int.to_bits(Size::from_bytes(std::mem::size_of::<char>()))
+            .map(|u| char::from_u32(u.try_into().unwrap()).unwrap())
+    }
+}
+
+impl From<Single> for ScalarInt {
+    #[inline]
+    fn from(f: Single) -> Self {
+        // We trust apfloat to give us properly truncated data.
+        Self { data: f.to_bits(), size: 4 }
+    }
+}
+
+impl TryFrom<ScalarInt> for Single {
+    type Error = Size;
+    #[inline]
+    fn try_from(int: ScalarInt) -> Result<Self, Size> {
+        int.to_bits(Size::from_bytes(4)).map(Self::from_bits)
+    }
+}
+
+impl From<Double> for ScalarInt {
+    #[inline]
+    fn from(f: Double) -> Self {
+        // We trust apfloat to give us properly truncated data.
+        Self { data: f.to_bits(), size: 8 }
+    }
+}
+
+impl TryFrom<ScalarInt> for Double {
+    type Error = Size;
+    #[inline]
+    fn try_from(int: ScalarInt) -> Result<Self, Size> {
+        int.to_bits(Size::from_bytes(8)).map(Self::from_bits)
+    }
+}
+
+impl fmt::Debug for ScalarInt {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        if self.size == 0 {
+            self.check_data();
+            write!(f, "<ZST>")
+        } else {
+            // Dispatch to LowerHex below.
+            write!(f, "0x{:x}", self)
+        }
+    }
+}
+
+impl fmt::LowerHex for ScalarInt {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.check_data();
+        // Format as hex number wide enough to fit any value of the given `size`.
+        // So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
+        // Using a block `{self.data}` here to force a copy instead of using `self.data`
+        // directly, because `write!` takes references to its formatting arguments and
+        // would thus borrow `self.data`. Since `Self`
+        // is a packed struct, that would create a possibly unaligned reference, which
+        // is UB.
+        write!(f, "{:01$x}", { self.data }, self.size as usize * 2)
+    }
+}
+
+impl fmt::UpperHex for ScalarInt {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.check_data();
+        // Format as hex number wide enough to fit any value of the given `size`.
+        // So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
+        // Using a block `{self.data}` here to force a copy instead of using `self.data`
+        // directly, because `write!` takes references to its formatting arguments and
+        // would thus borrow `self.data`. Since `Self`
+        // is a packed struct, that would create a possibly unaligned reference, which
+        // is UB.
+        write!(f, "{:01$X}", { self.data }, self.size as usize * 2)
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index 22c3fd3..7263d06 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -22,7 +22,7 @@
     ExistentialPredicate, FloatVar, FloatVid, GenericParamDefKind, InferConst, InferTy, IntVar,
     IntVid, List, ParamConst, ParamTy, PolyFnSig, Predicate, PredicateInner, PredicateKind,
     ProjectionTy, Region, RegionKind, ReprOptions, TraitObjectVisitor, Ty, TyKind, TyS, TyVar,
-    TyVid, TypeAndMut,
+    TyVid, TypeAndMut, Visibility,
 };
 use rustc_ast as ast;
 use rustc_ast::expand::allocator::AllocatorKind;
@@ -134,7 +134,7 @@
     fn intern_predicate(&self, kind: PredicateKind<'tcx>) -> &'tcx PredicateInner<'tcx> {
         self.predicate
             .intern(kind, |kind| {
-                let flags = super::flags::FlagComputation::for_predicate(&kind);
+                let flags = super::flags::FlagComputation::for_predicate(kind);
 
                 let predicate_struct = PredicateInner {
                     kind,
@@ -368,7 +368,7 @@
     /// leads to a `vec![&&Option<i32>, &Option<i32>]`. Empty vectors are not stored.
     ///
     /// See:
-    /// https://github.com/rust-lang/rfcs/blob/master/text/2005-match-ergonomics.md#definitions
+    /// <https://github.com/rust-lang/rfcs/blob/master/text/2005-match-ergonomics.md#definitions>
     pat_adjustments: ItemLocalMap<Vec<Ty<'tcx>>>,
 
     /// Borrows
@@ -418,6 +418,12 @@
     /// Stores the type, expression, span and optional scope span of all types
     /// that are live across the yield of this generator (if a generator).
     pub generator_interior_types: Vec<GeneratorInteriorTypeCause<'tcx>>,
+
+    /// We sometimes treat byte string literals (which are of type `&[u8; N]`)
+    /// as `&[u8]`, depending on the pattern  in which they are used.
+    /// This hashset records all instances where we behave
+    /// like this to allow `const_to_pat` to reliably handle this situation.
+    pub treat_byte_string_as_slice: ItemLocalSet,
 }
 
 impl<'tcx> TypeckResults<'tcx> {
@@ -443,6 +449,7 @@
             concrete_opaque_types: Default::default(),
             closure_captures: Default::default(),
             generator_interior_types: Default::default(),
+            treat_byte_string_as_slice: Default::default(),
         }
     }
 
@@ -534,10 +541,6 @@
         self.node_type(pat.hir_id)
     }
 
-    pub fn pat_ty_opt(&self, pat: &hir::Pat<'_>) -> Option<Ty<'tcx>> {
-        self.node_type_opt(pat.hir_id)
-    }
-
     // Returns the type of an expression as a monotype.
     //
     // NB (1): This is the PRE-ADJUSTMENT TYPE for the expression.  That is, in
@@ -588,10 +591,7 @@
             return false;
         }
 
-        match self.type_dependent_defs().get(expr.hir_id) {
-            Some(Ok((DefKind::AssocFn, _))) => true,
-            _ => false,
-        }
+        matches!(self.type_dependent_defs().get(expr.hir_id), Some(Ok((DefKind::AssocFn, _))))
     }
 
     pub fn extract_binding_mode(&self, s: &Session, id: HirId, sp: Span) -> Option<BindingMode> {
@@ -684,6 +684,7 @@
             ref concrete_opaque_types,
             ref closure_captures,
             ref generator_interior_types,
+            ref treat_byte_string_as_slice,
         } = *self;
 
         hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
@@ -717,6 +718,7 @@
             concrete_opaque_types.hash_stable(hcx, hasher);
             closure_captures.hash_stable(hcx, hasher);
             generator_interior_types.hash_stable(hcx, hasher);
+            treat_byte_string_as_slice.hash_stable(hcx, hasher);
         })
     }
 }
@@ -851,7 +853,7 @@
 
         CommonConsts {
             unit: mk_const(ty::Const {
-                val: ty::ConstKind::Value(ConstValue::Scalar(Scalar::zst())),
+                val: ty::ConstKind::Value(ConstValue::Scalar(Scalar::ZST)),
                 ty: types.unit,
             }),
         }
@@ -918,6 +920,9 @@
     /// Common consts, pre-interned for your convenience.
     pub consts: CommonConsts<'tcx>,
 
+    /// Visibilities produced by resolver.
+    pub visibilities: FxHashMap<LocalDefId, Visibility>,
+
     /// Resolutions of `extern crate` items produced by resolver.
     extern_crate_map: FxHashMap<LocalDefId, CrateNum>,
 
@@ -1064,7 +1069,7 @@
         )
     }
 
-    pub fn lift<T: ?Sized + Lift<'tcx>>(self, value: &T) -> Option<T::Lifted> {
+    pub fn lift<T: Lift<'tcx>>(self, value: T) -> Option<T::Lifted> {
         value.lift_to_tcx(self)
     }
 
@@ -1086,7 +1091,7 @@
         crate_name: &str,
         output_filenames: &OutputFilenames,
     ) -> GlobalCtxt<'tcx> {
-        let data_layout = TargetDataLayout::parse(&s.target.target).unwrap_or_else(|err| {
+        let data_layout = TargetDataLayout::parse(&s.target).unwrap_or_else(|err| {
             s.fatal(&err);
         });
         let interners = CtxtInterners::new(arena);
@@ -1131,6 +1136,7 @@
             types: common_types,
             lifetimes: common_lifetimes,
             consts: common_consts,
+            visibilities: resolutions.visibilities,
             extern_crate_map: resolutions.extern_crate_map,
             trait_map,
             export_map: resolutions.export_map,
@@ -1529,7 +1535,7 @@
     /// Determines whether identifiers in the assembly have strict naming rules.
     /// Currently, only NVPTX* targets need it.
     pub fn has_strict_asm_symbol_naming(self) -> bool {
-        self.sess.target.target.arch.contains("nvptx")
+        self.sess.target.arch.contains("nvptx")
     }
 
     /// Returns `&'static core::panic::Location<'static>`.
@@ -1572,16 +1578,16 @@
 /// e.g., `()` or `u8`, was interned in a different context.
 pub trait Lift<'tcx>: fmt::Debug {
     type Lifted: fmt::Debug + 'tcx;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted>;
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted>;
 }
 
 macro_rules! nop_lift {
     ($set:ident; $ty:ty => $lifted:ty) => {
         impl<'a, 'tcx> Lift<'tcx> for $ty {
             type Lifted = $lifted;
-            fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-                if tcx.interners.$set.contains_pointer_to(&Interned(*self)) {
-                    Some(unsafe { mem::transmute(*self) })
+            fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+                if tcx.interners.$set.contains_pointer_to(&Interned(self)) {
+                    Some(unsafe { mem::transmute(self) })
                 } else {
                     None
                 }
@@ -1594,12 +1600,12 @@
     ($set:ident; $ty:ty => $lifted:ty) => {
         impl<'a, 'tcx> Lift<'tcx> for &'a List<$ty> {
             type Lifted = &'tcx List<$lifted>;
-            fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+            fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
                 if self.is_empty() {
                     return Some(List::empty());
                 }
-                if tcx.interners.$set.contains_pointer_to(&Interned(*self)) {
-                    Some(unsafe { mem::transmute(*self) })
+                if tcx.interners.$set.contains_pointer_to(&Interned(self)) {
+                    Some(unsafe { mem::transmute(self) })
                 } else {
                     None
                 }
@@ -2039,13 +2045,13 @@
 
 macro_rules! slice_interners {
     ($($field:ident: $method:ident($ty:ty)),+ $(,)?) => (
-        $(impl<'tcx> TyCtxt<'tcx> {
-            pub fn $method(self, v: &[$ty]) -> &'tcx List<$ty> {
+        impl<'tcx> TyCtxt<'tcx> {
+            $(pub fn $method(self, v: &[$ty]) -> &'tcx List<$ty> {
                 self.interners.$field.intern_ref(v, || {
                     Interned(List::from_arena(&*self.arena, v))
                 }).0
-            }
-        })+
+            })+
+        }
     );
 }
 
diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs
index 7153197..65703d0 100644
--- a/compiler/rustc_middle/src/ty/diagnostics.rs
+++ b/compiler/rustc_middle/src/ty/diagnostics.rs
@@ -11,21 +11,16 @@
 impl<'tcx> TyS<'tcx> {
     /// Similar to `TyS::is_primitive`, but also considers inferred numeric values to be primitive.
     pub fn is_primitive_ty(&self) -> bool {
-        match self.kind() {
-            Bool
-            | Char
-            | Str
-            | Int(_)
-            | Uint(_)
-            | Float(_)
+        matches!(
+            self.kind(),
+            Bool | Char | Str | Int(_) | Uint(_) | Float(_)
             | Infer(
                 InferTy::IntVar(_)
                 | InferTy::FloatVar(_)
                 | InferTy::FreshIntTy(_)
-                | InferTy::FreshFloatTy(_),
-            ) => true,
-            _ => false,
-        }
+                | InferTy::FreshFloatTy(_)
+            )
+        )
     }
 
     /// Whether the type is succinctly representable as a type instead of just referred to with a
@@ -64,11 +59,16 @@
 
     /// Whether the type can be safely suggested during error recovery.
     pub fn is_suggestable(&self) -> bool {
-        match self.kind() {
-            Opaque(..) | FnDef(..) | FnPtr(..) | Dynamic(..) | Closure(..) | Infer(..)
-            | Projection(..) => false,
-            _ => true,
-        }
+        !matches!(
+            self.kind(),
+            Opaque(..)
+                | FnDef(..)
+                | FnPtr(..)
+                | Dynamic(..)
+                | Closure(..)
+                | Infer(..)
+                | Projection(..)
+        )
     }
 }
 
diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs
index 82d698b..5ec0ec0 100644
--- a/compiler/rustc_middle/src/ty/error.rs
+++ b/compiler/rustc_middle/src/ty/error.rs
@@ -229,7 +229,7 @@
             ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.def_path_str(def.did)).into(),
             ty::Foreign(def_id) => format!("extern type `{}`", tcx.def_path_str(def_id)).into(),
             ty::Array(t, n) => {
-                let n = tcx.lift(&n).unwrap();
+                let n = tcx.lift(n).unwrap();
                 match n.try_eval_usize(tcx, ty::ParamEnv::empty()) {
                     _ if t.is_simple_ty() => format!("array `{}`", self).into(),
                     Some(n) => format!("array of {} element{}", n, pluralize!(n)).into(),
@@ -334,26 +334,15 @@
         debug!("note_and_explain_type_err err={:?} cause={:?}", err, cause);
         match err {
             Sorts(values) => {
-                let expected_str = values.expected.sort_string(self);
-                let found_str = values.found.sort_string(self);
-                if expected_str == found_str && expected_str == "closure" {
-                    db.note("no two closures, even if identical, have the same type");
-                    db.help("consider boxing your closure and/or using it as a trait object");
-                }
-                if expected_str == found_str && expected_str == "opaque type" {
-                    // Issue #63167
-                    db.note("distinct uses of `impl Trait` result in different opaque types");
-                    let e_str = values.expected.to_string();
-                    let f_str = values.found.to_string();
-                    if e_str == f_str && &e_str == "impl std::future::Future" {
-                        // FIXME: use non-string based check.
-                        db.help(
-                            "if both `Future`s have the same `Output` type, consider \
-                                 `.await`ing on both of them",
-                        );
-                    }
-                }
                 match (values.expected.kind(), values.found.kind()) {
+                    (ty::Closure(..), ty::Closure(..)) => {
+                        db.note("no two closures, even if identical, have the same type");
+                        db.help("consider boxing your closure and/or using it as a trait object");
+                    }
+                    (ty::Opaque(..), ty::Opaque(..)) => {
+                        // Issue #63167
+                        db.note("distinct uses of `impl Trait` result in different opaque types");
+                    }
                     (ty::Float(_), ty::Infer(ty::IntVar(_))) => {
                         if let Ok(
                             // Issue #53280
@@ -382,12 +371,12 @@
                         }
                         db.note(
                             "a type parameter was expected, but a different one was found; \
-                                 you might be missing a type parameter or trait bound",
+                             you might be missing a type parameter or trait bound",
                         );
                         db.note(
                             "for more information, visit \
-                                 https://doc.rust-lang.org/book/ch10-02-traits.html\
-                                 #traits-as-parameters",
+                             https://doc.rust-lang.org/book/ch10-02-traits.html\
+                             #traits-as-parameters",
                         );
                     }
                     (ty::Projection(_), ty::Projection(_)) => {
@@ -471,8 +460,8 @@
                         }
                         db.note(
                             "for more information, visit \
-                                 https://doc.rust-lang.org/book/ch10-02-traits.html\
-                                 #traits-as-parameters",
+                             https://doc.rust-lang.org/book/ch10-02-traits.html\
+                             #traits-as-parameters",
                         );
                     }
                     (ty::Param(p), ty::Closure(..) | ty::Generator(..)) => {
diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs
index c9a4022..8b97a87 100644
--- a/compiler/rustc_middle/src/ty/flags.rs
+++ b/compiler/rustc_middle/src/ty/flags.rs
@@ -22,7 +22,7 @@
         result
     }
 
-    pub fn for_predicate(kind: &ty::PredicateKind<'_>) -> FlagComputation {
+    pub fn for_predicate(kind: ty::PredicateKind<'_>) -> FlagComputation {
         let mut result = FlagComputation::new();
         result.add_predicate_kind(kind);
         result
@@ -53,7 +53,14 @@
 
     /// Adds the flags/depth from a set of types that appear within the current type, but within a
     /// region binder.
-    fn add_bound_computation(&mut self, computation: FlagComputation) {
+    fn bound_computation<T, F>(&mut self, value: ty::Binder<T>, f: F)
+    where
+        F: FnOnce(&mut Self, T),
+    {
+        let mut computation = FlagComputation::new();
+
+        f(&mut computation, value.skip_binder());
+
         self.add_flags(computation.flags);
 
         // The types that contributed to `computation` occurred within
@@ -101,9 +108,7 @@
             }
 
             &ty::GeneratorWitness(ts) => {
-                let mut computation = FlagComputation::new();
-                computation.add_tys(ts.skip_binder());
-                self.add_bound_computation(computation);
+                self.bound_computation(ts, |flags, ts| flags.add_tys(ts));
             }
 
             &ty::Closure(_, substs) => {
@@ -154,20 +159,21 @@
                 self.add_substs(substs);
             }
 
-            &ty::Dynamic(ref obj, r) => {
-                let mut computation = FlagComputation::new();
-                for predicate in obj.skip_binder().iter() {
-                    match predicate {
-                        ty::ExistentialPredicate::Trait(tr) => computation.add_substs(tr.substs),
-                        ty::ExistentialPredicate::Projection(p) => {
-                            let mut proj_computation = FlagComputation::new();
-                            proj_computation.add_existential_projection(&p);
-                            self.add_bound_computation(proj_computation);
+            &ty::Dynamic(obj, r) => {
+                self.bound_computation(obj, |computation, obj| {
+                    for predicate in obj.iter() {
+                        match predicate {
+                            ty::ExistentialPredicate::Trait(tr) => {
+                                computation.add_substs(tr.substs)
+                            }
+                            ty::ExistentialPredicate::Projection(p) => {
+                                computation.add_existential_projection(&p);
+                            }
+                            ty::ExistentialPredicate::AutoTrait(_) => {}
                         }
-                        ty::ExistentialPredicate::AutoTrait(_) => {}
                     }
-                }
-                self.add_bound_computation(computation);
+                });
+
                 self.add_region(r);
             }
 
@@ -195,22 +201,21 @@
                 self.add_substs(substs);
             }
 
-            &ty::FnPtr(f) => {
-                self.add_fn_sig(f);
-            }
+            &ty::FnPtr(fn_sig) => self.bound_computation(fn_sig, |computation, fn_sig| {
+                computation.add_tys(fn_sig.inputs());
+                computation.add_ty(fn_sig.output());
+            }),
         }
     }
 
-    fn add_predicate_kind(&mut self, kind: &ty::PredicateKind<'_>) {
+    fn add_predicate_kind(&mut self, kind: ty::PredicateKind<'_>) {
         match kind {
             ty::PredicateKind::ForAll(binder) => {
-                let mut computation = FlagComputation::new();
-
-                computation.add_predicate_atom(binder.skip_binder());
-
-                self.add_bound_computation(computation);
+                self.bound_computation(binder, |computation, atom| {
+                    computation.add_predicate_atom(atom)
+                });
             }
-            &ty::PredicateKind::Atom(atom) => self.add_predicate_atom(atom),
+            ty::PredicateKind::Atom(atom) => self.add_predicate_atom(atom),
         }
     }
 
@@ -266,15 +271,6 @@
         }
     }
 
-    fn add_fn_sig(&mut self, fn_sig: ty::PolyFnSig<'_>) {
-        let mut computation = FlagComputation::new();
-
-        computation.add_tys(fn_sig.skip_binder().inputs());
-        computation.add_ty(fn_sig.skip_binder().output());
-
-        self.add_bound_computation(computation);
-    }
-
     fn add_region(&mut self, r: ty::Region<'_>) {
         self.add_flags(r.type_flags());
         if let ty::ReLateBound(debruijn, _) = *r {
diff --git a/compiler/rustc_middle/src/ty/fold.rs b/compiler/rustc_middle/src/ty/fold.rs
index 84134be..70a8157 100644
--- a/compiler/rustc_middle/src/ty/fold.rs
+++ b/compiler/rustc_middle/src/ty/fold.rs
@@ -30,8 +30,6 @@
 //!
 //! These methods return true to indicate that the visitor has found what it is
 //! looking for, and does not need to visit anything else.
-
-use crate::ty::structural_impls::PredicateVisitor;
 use crate::ty::{self, flags::FlagComputation, Binder, Ty, TyCtxt, TypeFlags};
 use rustc_hir as hir;
 use rustc_hir::def_id::DefId;
@@ -39,6 +37,7 @@
 use rustc_data_structures::fx::FxHashSet;
 use std::collections::BTreeMap;
 use std::fmt;
+use std::ops::ControlFlow;
 
 /// This trait is implemented for every type that can be folded.
 /// Basically, every type that has a corresponding method in `TypeFolder`.
@@ -50,8 +49,8 @@
         self.super_fold_with(folder)
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool;
-    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()>;
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         self.super_visit_with(visitor)
     }
 
@@ -60,7 +59,7 @@
     /// If `binder` is `ty::INNERMOST`, this indicates whether
     /// there are any late-bound regions that appear free.
     fn has_vars_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool {
-        self.visit_with(&mut HasEscapingVarsVisitor { outer_index: binder })
+        self.visit_with(&mut HasEscapingVarsVisitor { outer_index: binder }).is_break()
     }
 
     /// Returns `true` if this `self` has any regions that escape `binder` (and
@@ -74,7 +73,7 @@
     }
 
     fn has_type_flags(&self, flags: TypeFlags) -> bool {
-        self.visit_with(&mut HasTypeFlagsVisitor { flags })
+        self.visit_with(&mut HasTypeFlagsVisitor { flags }).is_break()
     }
     fn has_projections(&self) -> bool {
         self.has_type_flags(TypeFlags::HAS_PROJECTION)
@@ -97,9 +96,6 @@
     fn has_infer_types_or_consts(&self) -> bool {
         self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_CT_INFER)
     }
-    fn has_infer_consts(&self) -> bool {
-        self.has_type_flags(TypeFlags::HAS_CT_INFER)
-    }
     fn needs_infer(&self) -> bool {
         self.has_type_flags(TypeFlags::NEEDS_INFER)
     }
@@ -113,9 +109,6 @@
     fn needs_subst(&self) -> bool {
         self.has_type_flags(TypeFlags::NEEDS_SUBST)
     }
-    fn has_re_placeholders(&self) -> bool {
-        self.has_type_flags(TypeFlags::HAS_RE_PLACEHOLDER)
-    }
     /// "Free" regions in this context means that it has any region
     /// that is not (a) erased or (b) late-bound.
     fn has_free_regions(&self) -> bool {
@@ -151,11 +144,11 @@
     }
 
     /// A visitor that does not recurse into types, works like `fn walk_shallow` in `Ty`.
-    fn visit_tys_shallow(&self, visit: impl FnMut(Ty<'tcx>) -> bool) -> bool {
+    fn visit_tys_shallow(&self, visit: impl FnMut(Ty<'tcx>) -> ControlFlow<()>) -> ControlFlow<()> {
         pub struct Visitor<F>(F);
 
-        impl<'tcx, F: FnMut(Ty<'tcx>) -> bool> TypeVisitor<'tcx> for Visitor<F> {
-            fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+        impl<'tcx, F: FnMut(Ty<'tcx>) -> ControlFlow<()>> TypeVisitor<'tcx> for Visitor<F> {
+            fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> {
                 self.0(ty)
             }
         }
@@ -168,8 +161,8 @@
     fn super_fold_with<F: TypeFolder<'tcx>>(&self, _: &mut F) -> Self {
         *self
     }
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
-        false
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<()> {
+        ControlFlow::CONTINUE
     }
 }
 
@@ -202,21 +195,25 @@
 }
 
 pub trait TypeVisitor<'tcx>: Sized {
-    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> bool {
+    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> ControlFlow<()> {
         t.super_visit_with(self)
     }
 
-    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> {
         t.super_visit_with(self)
     }
 
-    fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+    fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> {
         r.super_visit_with(self)
     }
 
-    fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+    fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> {
         c.super_visit_with(self)
     }
+
+    fn visit_predicate(&mut self, p: ty::Predicate<'tcx>) -> ControlFlow<()> {
+        p.super_visit_with(self)
+    }
 }
 
 ///////////////////////////////////////////////////////////////////////////
@@ -306,8 +303,6 @@
         value: &impl TypeFoldable<'tcx>,
         callback: impl FnMut(ty::Region<'tcx>) -> bool,
     ) -> bool {
-        return value.visit_with(&mut RegionVisitor { outer_index: ty::INNERMOST, callback });
-
         struct RegionVisitor<F> {
             /// The index of a binder *just outside* the things we have
             /// traversed. If we encounter a bound region bound by this
@@ -334,31 +329,39 @@
         where
             F: FnMut(ty::Region<'tcx>) -> bool,
         {
-            fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> bool {
+            fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> ControlFlow<()> {
                 self.outer_index.shift_in(1);
                 let result = t.as_ref().skip_binder().visit_with(self);
                 self.outer_index.shift_out(1);
                 result
             }
 
-            fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+            fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> {
                 match *r {
                     ty::ReLateBound(debruijn, _) if debruijn < self.outer_index => {
-                        false // ignore bound regions, keep visiting
+                        ControlFlow::CONTINUE
                     }
-                    _ => (self.callback)(r),
+                    _ => {
+                        if (self.callback)(r) {
+                            ControlFlow::BREAK
+                        } else {
+                            ControlFlow::CONTINUE
+                        }
+                    }
                 }
             }
 
-            fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+            fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> {
                 // We're only interested in types involving regions
                 if ty.flags().intersects(TypeFlags::HAS_FREE_REGIONS) {
                     ty.super_visit_with(self)
                 } else {
-                    false // keep visiting
+                    ControlFlow::CONTINUE
                 }
             }
         }
+
+        value.visit_with(&mut RegionVisitor { outer_index: ty::INNERMOST, callback }).is_break()
     }
 }
 
@@ -674,7 +677,7 @@
     {
         let mut collector = LateBoundRegionsCollector::new(just_constraint);
         let result = value.as_ref().skip_binder().visit_with(&mut collector);
-        assert!(!result); // should never have stopped early
+        assert!(result.is_continue()); // should never have stopped early
         collector.regions
     }
 
@@ -688,7 +691,7 @@
     }
 
     /// Rewrite any late-bound regions so that they are anonymous. Region numbers are
-    /// assigned starting at 1 and increasing monotonically in the order traversed
+    /// assigned starting at 0 and increasing monotonically in the order traversed
     /// by the fold operation.
     ///
     /// The chief purpose of this function is to canonicalize regions so that two
@@ -702,8 +705,9 @@
         let mut counter = 0;
         Binder::bind(
             self.replace_late_bound_regions(sig, |_| {
+                let r = self.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BrAnon(counter)));
                 counter += 1;
-                self.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BrAnon(counter)))
+                r
             })
             .0,
         )
@@ -719,21 +723,15 @@
 // vars. See comment on `shift_vars_through_binders` method in
 // `subst.rs` for more details.
 
-enum Direction {
-    In,
-    Out,
-}
-
 struct Shifter<'tcx> {
     tcx: TyCtxt<'tcx>,
     current_index: ty::DebruijnIndex,
     amount: u32,
-    direction: Direction,
 }
 
 impl Shifter<'tcx> {
-    pub fn new(tcx: TyCtxt<'tcx>, amount: u32, direction: Direction) -> Self {
-        Shifter { tcx, current_index: ty::INNERMOST, amount, direction }
+    pub fn new(tcx: TyCtxt<'tcx>, amount: u32) -> Self {
+        Shifter { tcx, current_index: ty::INNERMOST, amount }
     }
 }
 
@@ -755,13 +753,7 @@
                 if self.amount == 0 || debruijn < self.current_index {
                     r
                 } else {
-                    let debruijn = match self.direction {
-                        Direction::In => debruijn.shifted_in(self.amount),
-                        Direction::Out => {
-                            assert!(debruijn.as_u32() >= self.amount);
-                            debruijn.shifted_out(self.amount)
-                        }
-                    };
+                    let debruijn = debruijn.shifted_in(self.amount);
                     let shifted = ty::ReLateBound(debruijn, br);
                     self.tcx.mk_region(shifted)
                 }
@@ -776,13 +768,7 @@
                 if self.amount == 0 || debruijn < self.current_index {
                     ty
                 } else {
-                    let debruijn = match self.direction {
-                        Direction::In => debruijn.shifted_in(self.amount),
-                        Direction::Out => {
-                            assert!(debruijn.as_u32() >= self.amount);
-                            debruijn.shifted_out(self.amount)
-                        }
-                    };
+                    let debruijn = debruijn.shifted_in(self.amount);
                     self.tcx.mk_ty(ty::Bound(debruijn, bound_ty))
                 }
             }
@@ -796,13 +782,7 @@
             if self.amount == 0 || debruijn < self.current_index {
                 ct
             } else {
-                let debruijn = match self.direction {
-                    Direction::In => debruijn.shifted_in(self.amount),
-                    Direction::Out => {
-                        assert!(debruijn.as_u32() >= self.amount);
-                        debruijn.shifted_out(self.amount)
-                    }
-                };
+                let debruijn = debruijn.shifted_in(self.amount);
                 self.tcx.mk_const(ty::Const { val: ty::ConstKind::Bound(debruijn, bound_ct), ty })
             }
         } else {
@@ -830,16 +810,7 @@
 {
     debug!("shift_vars(value={:?}, amount={})", value, amount);
 
-    value.fold_with(&mut Shifter::new(tcx, amount, Direction::In))
-}
-
-pub fn shift_out_vars<'tcx, T>(tcx: TyCtxt<'tcx>, value: &T, amount: u32) -> T
-where
-    T: TypeFoldable<'tcx>,
-{
-    debug!("shift_out_vars(value={:?}, amount={})", value, amount);
-
-    value.fold_with(&mut Shifter::new(tcx, amount, Direction::Out))
+    value.fold_with(&mut Shifter::new(tcx, amount))
 }
 
 /// An "escaping var" is a bound var whose binder is not part of `t`. A bound var can be a
@@ -873,45 +844,55 @@
 }
 
 impl<'tcx> TypeVisitor<'tcx> for HasEscapingVarsVisitor {
-    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> bool {
+    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> ControlFlow<()> {
         self.outer_index.shift_in(1);
         let result = t.super_visit_with(self);
         self.outer_index.shift_out(1);
         result
     }
 
-    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> {
         // If the outer-exclusive-binder is *strictly greater* than
         // `outer_index`, that means that `t` contains some content
         // bound at `outer_index` or above (because
         // `outer_exclusive_binder` is always 1 higher than the
         // content in `t`). Therefore, `t` has some escaping vars.
-        t.outer_exclusive_binder > self.outer_index
+        if t.outer_exclusive_binder > self.outer_index {
+            ControlFlow::BREAK
+        } else {
+            ControlFlow::CONTINUE
+        }
     }
 
-    fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+    fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> {
         // If the region is bound by `outer_index` or anything outside
         // of outer index, then it escapes the binders we have
         // visited.
-        r.bound_at_or_above_binder(self.outer_index)
+        if r.bound_at_or_above_binder(self.outer_index) {
+            ControlFlow::BREAK
+        } else {
+            ControlFlow::CONTINUE
+        }
     }
 
-    fn visit_const(&mut self, ct: &'tcx ty::Const<'tcx>) -> bool {
+    fn visit_const(&mut self, ct: &'tcx ty::Const<'tcx>) -> ControlFlow<()> {
         // we don't have a `visit_infer_const` callback, so we have to
         // hook in here to catch this case (annoying...), but
         // otherwise we do want to remember to visit the rest of the
         // const, as it has types/regions embedded in a lot of other
         // places.
         match ct.val {
-            ty::ConstKind::Bound(debruijn, _) if debruijn >= self.outer_index => true,
+            ty::ConstKind::Bound(debruijn, _) if debruijn >= self.outer_index => ControlFlow::BREAK,
             _ => ct.super_visit_with(self),
         }
     }
-}
 
-impl<'tcx> PredicateVisitor<'tcx> for HasEscapingVarsVisitor {
-    fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> bool {
-        predicate.inner.outer_exclusive_binder > self.outer_index
+    fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<()> {
+        if predicate.inner.outer_exclusive_binder > self.outer_index {
+            ControlFlow::BREAK
+        } else {
+            ControlFlow::CONTINUE
+        }
     }
 }
 
@@ -921,38 +902,41 @@
 }
 
 impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor {
-    fn visit_ty(&mut self, t: Ty<'_>) -> bool {
+    fn visit_ty(&mut self, t: Ty<'_>) -> ControlFlow<()> {
         debug!(
             "HasTypeFlagsVisitor: t={:?} t.flags={:?} self.flags={:?}",
             t,
             t.flags(),
             self.flags
         );
-        t.flags().intersects(self.flags)
+        if t.flags().intersects(self.flags) { ControlFlow::BREAK } else { ControlFlow::CONTINUE }
     }
 
-    fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+    fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> {
         let flags = r.type_flags();
         debug!("HasTypeFlagsVisitor: r={:?} r.flags={:?} self.flags={:?}", r, flags, self.flags);
-        flags.intersects(self.flags)
+        if flags.intersects(self.flags) { ControlFlow::BREAK } else { ControlFlow::CONTINUE }
     }
 
-    fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+    fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> {
         let flags = FlagComputation::for_const(c);
         debug!("HasTypeFlagsVisitor: c={:?} c.flags={:?} self.flags={:?}", c, flags, self.flags);
-        flags.intersects(self.flags)
+        if flags.intersects(self.flags) { ControlFlow::BREAK } else { ControlFlow::CONTINUE }
     }
-}
 
-impl<'tcx> PredicateVisitor<'tcx> for HasTypeFlagsVisitor {
-    fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> bool {
+    fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<()> {
         debug!(
             "HasTypeFlagsVisitor: predicate={:?} predicate.flags={:?} self.flags={:?}",
             predicate, predicate.inner.flags, self.flags
         );
-        predicate.inner.flags.intersects(self.flags)
+        if predicate.inner.flags.intersects(self.flags) {
+            ControlFlow::BREAK
+        } else {
+            ControlFlow::CONTINUE
+        }
     }
 }
+
 /// Collects all the late-bound regions at the innermost binding level
 /// into a hash set.
 struct LateBoundRegionsCollector {
@@ -980,45 +964,45 @@
 }
 
 impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector {
-    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> bool {
+    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> ControlFlow<()> {
         self.current_index.shift_in(1);
         let result = t.super_visit_with(self);
         self.current_index.shift_out(1);
         result
     }
 
-    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> {
         // if we are only looking for "constrained" region, we have to
         // ignore the inputs to a projection, as they may not appear
         // in the normalized form
         if self.just_constrained {
             if let ty::Projection(..) | ty::Opaque(..) = t.kind() {
-                return false;
+                return ControlFlow::CONTINUE;
             }
         }
 
         t.super_visit_with(self)
     }
 
-    fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+    fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> {
         // if we are only looking for "constrained" region, we have to
         // ignore the inputs of an unevaluated const, as they may not appear
         // in the normalized form
         if self.just_constrained {
             if let ty::ConstKind::Unevaluated(..) = c.val {
-                return false;
+                return ControlFlow::CONTINUE;
             }
         }
 
         c.super_visit_with(self)
     }
 
-    fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+    fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> {
         if let ty::ReLateBound(debruijn, br) = *r {
             if debruijn == self.current_index {
                 self.regions.insert(br);
             }
         }
-        false
+        ControlFlow::CONTINUE
     }
 }
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
index 2c1179c..2f7707b 100644
--- a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
+++ b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
@@ -104,14 +104,6 @@
         // ```
         ty.uninhabited_from(self, param_env).contains(self, module)
     }
-
-    pub fn is_ty_uninhabited_from_any_module(
-        self,
-        ty: Ty<'tcx>,
-        param_env: ty::ParamEnv<'tcx>,
-    ) -> bool {
-        !ty.uninhabited_from(self, param_env).is_empty()
-    }
 }
 
 impl<'tcx> AdtDef {
@@ -209,13 +201,13 @@
             ),
 
             Array(ty, len) => match len.try_eval_usize(tcx, param_env) {
+                Some(0) | None => DefIdForest::empty(),
                 // If the array is definitely non-empty, it's uninhabited if
                 // the type of its elements is uninhabited.
-                Some(n) if n != 0 => ty.uninhabited_from(tcx, param_env),
-                _ => DefIdForest::empty(),
+                Some(1..) => ty.uninhabited_from(tcx, param_env),
             },
 
-            // References to uninitialised memory is valid for any type, including
+            // References to uninitialised memory are valid for any type, including
             // uninhabited types, in unsafe code, so we treat all references as
             // inhabited.
             // The precise semantics of inhabitedness with respect to references is currently
diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs
index a6b6209..306cebd 100644
--- a/compiler/rustc_middle/src/ty/instance.rs
+++ b/compiler/rustc_middle/src/ty/instance.rs
@@ -1,6 +1,6 @@
 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use crate::ty::print::{FmtPrinter, Printer};
-use crate::ty::subst::InternalSubsts;
+use crate::ty::subst::{InternalSubsts, Subst};
 use crate::ty::{self, SubstsRef, Ty, TyCtxt, TypeFoldable};
 use rustc_errors::ErrorReported;
 use rustc_hir::def::Namespace;
@@ -22,7 +22,8 @@
     pub substs: SubstsRef<'tcx>,
 }
 
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable, HashStable)]
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable)]
 pub enum InstanceDef<'tcx> {
     /// A user-defined callable item.
     ///
@@ -183,10 +184,10 @@
             ty::InstanceDef::DropGlue(_, Some(_)) => return false,
             _ => return true,
         };
-        match tcx.def_key(def_id).disambiguated_data.data {
-            DefPathData::Ctor | DefPathData::ClosureExpr => true,
-            _ => false,
-        }
+        matches!(
+            tcx.def_key(def_id).disambiguated_data.data,
+            DefPathData::Ctor | DefPathData::ClosureExpr
+        )
     }
 
     /// Returns `true` if the machine code for this instance is instantiated in
@@ -257,7 +258,7 @@
 impl<'tcx> fmt::Display for Instance<'tcx> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         ty::tls::with(|tcx| {
-            let substs = tcx.lift(&self.substs).expect("could not lift for printing");
+            let substs = tcx.lift(self.substs).expect("could not lift for printing");
             FmtPrinter::new(tcx, &mut *f, Namespace::ValueNS)
                 .print_def_path(self.def_id(), substs)?;
             Ok(())
@@ -290,7 +291,17 @@
     }
 
     pub fn mono(tcx: TyCtxt<'tcx>, def_id: DefId) -> Instance<'tcx> {
-        Instance::new(def_id, tcx.empty_substs_for_def_id(def_id))
+        let substs = InternalSubsts::for_item(tcx, def_id, |param, _| match param.kind {
+            ty::GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
+            ty::GenericParamDefKind::Type { .. } => {
+                bug!("Instance::mono: {:?} has type parameters", def_id)
+            }
+            ty::GenericParamDefKind::Const { .. } => {
+                bug!("Instance::mono: {:?} has const parameters", def_id)
+            }
+        });
+
+        Instance::new(def_id, substs)
     }
 
     #[inline]
@@ -459,10 +470,33 @@
     /// This function returns `Some(substs)` in the former case and `None` otherwise -- i.e., if
     /// this function returns `None`, then the MIR body does not require substitution during
     /// codegen.
-    pub fn substs_for_mir_body(&self) -> Option<SubstsRef<'tcx>> {
+    fn substs_for_mir_body(&self) -> Option<SubstsRef<'tcx>> {
         if self.def.has_polymorphic_mir_body() { Some(self.substs) } else { None }
     }
 
+    pub fn subst_mir<T>(&self, tcx: TyCtxt<'tcx>, v: &T) -> T
+    where
+        T: TypeFoldable<'tcx> + Copy,
+    {
+        if let Some(substs) = self.substs_for_mir_body() { v.subst(tcx, substs) } else { *v }
+    }
+
+    pub fn subst_mir_and_normalize_erasing_regions<T>(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+        v: &T,
+    ) -> T
+    where
+        T: TypeFoldable<'tcx> + Clone,
+    {
+        if let Some(substs) = self.substs_for_mir_body() {
+            tcx.subst_and_normalize_erasing_regions(substs, param_env, v)
+        } else {
+            tcx.normalize_erasing_regions(param_env, v.clone())
+        }
+    }
+
     /// Returns a new `Instance` where generic parameters in `instance.substs` are replaced by
     /// identify parameters if they are determined to be unused in `instance.def`.
     pub fn polymorphize(self, tcx: TyCtxt<'tcx>) -> Self {
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index ee669ed..1e93c36 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -106,7 +106,7 @@
         }
 
         if repr.c() {
-            match &tcx.sess.target.target.arch[..] {
+            match &tcx.sess.target.arch[..] {
                 // WARNING: the ARM EABI has two variants; the one corresponding
                 // to `at_least == I32` appears to be used on Linux and NetBSD,
                 // but some systems may use the variant corresponding to no
@@ -1894,7 +1894,7 @@
     }
 }
 
-pub type TyAndLayout<'tcx> = ::rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
+pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
 
 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
     type Ty = Ty<'tcx>;
@@ -2548,7 +2548,7 @@
         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
 
         use rustc_target::spec::abi::Abi::*;
-        let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
+        let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
 
             // It's the ABI's job to select this, not ours.
@@ -2600,20 +2600,16 @@
             extra_args.to_vec()
         };
 
-        let target = &cx.tcx().sess.target.target;
-        let target_env_gnu_like = matches!(&target.target_env[..], "gnu" | "musl");
-        let win_x64_gnu =
-            target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
+        let target = &cx.tcx().sess.target;
+        let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
+        let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
         let linux_s390x_gnu_like =
-            target.target_os == "linux" && target.arch == "s390x" && target_env_gnu_like;
+            target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
         let linux_sparc64_gnu_like =
-            target.target_os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
+            target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
         let linux_powerpc_gnu_like =
-            target.target_os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
-        let rust_abi = match sig.abi {
-            RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
-            _ => false,
-        };
+            target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
+        let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
 
         // Handle safe Rust thin and fat pointers.
         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
@@ -2778,7 +2774,7 @@
                     // anyway, we control all calls to it in libstd.
                     Abi::Vector { .. }
                         if abi != SpecAbi::PlatformIntrinsic
-                            && cx.tcx().sess.target.target.options.simd_types_indirect =>
+                            && cx.tcx().sess.target.simd_types_indirect =>
                     {
                         arg.make_indirect();
                         return;
@@ -2787,8 +2783,9 @@
                     _ => return,
                 }
 
-                let max_by_val_size =
-                    if is_ret { call::max_ret_by_val(cx) } else { Pointer.size(cx) };
+                // Return structures up to 2 pointers in size by value, matching `ScalarPair`. LLVM
+                // will usually return these in 2 registers, which is more efficient than by-ref.
+                let max_by_val_size = if is_ret { Pointer.size(cx) * 2 } else { Pointer.size(cx) };
                 let size = arg.layout.size;
 
                 if arg.layout.is_unsized() || size > max_by_val_size {
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index b7530c0..0042b4a 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -7,7 +7,6 @@
 
 use crate::hir::exports::ExportMap;
 use crate::ich::StableHashingContext;
-use crate::infer::canonical::Canonical;
 use crate::middle::cstore::CrateStoreDyn;
 use crate::middle::resolve_lifetime::ObjectLifetimeDefault;
 use crate::mir::interpret::ErrorHandled;
@@ -47,7 +46,7 @@
 use std::cmp::Ordering;
 use std::fmt;
 use std::hash::{Hash, Hasher};
-use std::ops::Range;
+use std::ops::{ControlFlow, Range};
 use std::ptr;
 use std::str;
 
@@ -88,7 +87,7 @@
 
 pub use self::query::queries;
 
-pub use self::consts::{Const, ConstInt, ConstKind, InferConst};
+pub use self::consts::{Const, ConstInt, ConstKind, InferConst, ScalarInt};
 
 pub mod _match;
 pub mod adjustment;
@@ -126,6 +125,7 @@
 pub struct ResolverOutputs {
     pub definitions: rustc_hir::definitions::Definitions,
     pub cstore: Box<CrateStoreDyn>,
+    pub visibilities: FxHashMap<LocalDefId, Visibility>,
     pub extern_crate_map: FxHashMap<LocalDefId, CrateNum>,
     pub maybe_unused_trait_imports: FxHashSet<LocalDefId>,
     pub maybe_unused_extern_crates: Vec<(LocalDefId, Span)>,
@@ -266,6 +266,10 @@
         self.items.iter().map(|(_, v)| *v)
     }
 
+    pub fn len(&self) -> usize {
+        self.items.len()
+    }
+
     /// Returns an iterator over all associated items with the given name, ignoring hygiene.
     pub fn filter_by_name_unhygienic(
         &self,
@@ -656,8 +660,6 @@
 #[rustc_diagnostic_item = "Ty"]
 pub type Ty<'tcx> = &'tcx TyS<'tcx>;
 
-pub type CanonicalTy<'tcx> = Canonical<'tcx, Ty<'tcx>>;
-
 #[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
 pub struct UpvarPath {
     pub hir_id: hir::HirId,
@@ -767,10 +769,6 @@
 pub struct FloatVarValue(pub ast::FloatTy);
 
 impl ty::EarlyBoundRegion {
-    pub fn to_bound_region(&self) -> ty::BoundRegion {
-        ty::BoundRegion::BrNamed(self.def_id, self.name)
-    }
-
     /// Does this early bound region have a name? Early bound regions normally
     /// always have names except when using anonymous lifetimes (`'_`).
     pub fn has_name(&self) -> bool {
@@ -821,14 +819,6 @@
             bug!("cannot convert a non-lifetime parameter def to an early bound region")
         }
     }
-
-    pub fn to_bound_region(&self) -> ty::BoundRegion {
-        if let GenericParamDefKind::Lifetime = self.kind {
-            self.to_early_bound_region_data().to_bound_region()
-        } else {
-            bug!("cannot convert a non-lifetime parameter def to an early bound region")
-        }
-    }
 }
 
 #[derive(Default)]
@@ -1003,22 +993,6 @@
         instantiated.predicates.extend(self.predicates.iter().map(|(p, _)| p));
         instantiated.spans.extend(self.predicates.iter().map(|(_, s)| s));
     }
-
-    pub fn instantiate_supertrait(
-        &self,
-        tcx: TyCtxt<'tcx>,
-        poly_trait_ref: &ty::PolyTraitRef<'tcx>,
-    ) -> InstantiatedPredicates<'tcx> {
-        assert_eq!(self.parent, None);
-        InstantiatedPredicates {
-            predicates: self
-                .predicates
-                .iter()
-                .map(|(pred, _)| pred.subst_supertrait(tcx, poly_trait_ref))
-                .collect(),
-            spans: self.predicates.iter().map(|(_, sp)| *sp).collect(),
-        }
-    }
 }
 
 #[derive(Debug)]
@@ -1087,9 +1061,21 @@
         }
     }
 
+    /// Converts this to a `Binder<PredicateAtom<'tcx>>`. If the value was an
+    /// `Atom`, then it is not allowed to contain escaping bound vars.
+    pub fn bound_atom(self) -> Binder<PredicateAtom<'tcx>> {
+        match self.kind() {
+            &PredicateKind::ForAll(binder) => binder,
+            &PredicateKind::Atom(atom) => {
+                debug_assert!(!atom.has_escaping_bound_vars());
+                Binder::dummy(atom)
+            }
+        }
+    }
+
     /// Allows using a `Binder<PredicateAtom<'tcx>>` even if the given predicate previously
     /// contained unbound variables by shifting these variables outwards.
-    pub fn bound_atom(self, tcx: TyCtxt<'tcx>) -> Binder<PredicateAtom<'tcx>> {
+    pub fn bound_atom_with_opt_escaping(self, tcx: TyCtxt<'tcx>) -> Binder<PredicateAtom<'tcx>> {
         match self.kind() {
             &PredicateKind::ForAll(binder) => binder,
             &PredicateKind::Atom(atom) => Binder::wrap_nonbinding(tcx, atom),
@@ -1303,7 +1289,6 @@
 #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
 #[derive(HashStable, TypeFoldable)]
 pub struct OutlivesPredicate<A, B>(pub A, pub B); // `A: B`
-pub type PolyOutlivesPredicate<A, B> = ty::Binder<OutlivesPredicate<A, B>>;
 pub type RegionOutlivesPredicate<'tcx> = OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>;
 pub type TypeOutlivesPredicate<'tcx> = OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>;
 pub type PolyRegionOutlivesPredicate<'tcx> = ty::Binder<RegionOutlivesPredicate<'tcx>>;
@@ -1656,7 +1641,7 @@
 #[derive(Hash, HashStable)]
 pub struct WithOptConstParam<T> {
     pub did: T,
-    /// The `DefId` of the corresponding generic paramter in case `did` is
+    /// The `DefId` of the corresponding generic parameter in case `did` is
     /// a const argument.
     ///
     /// Note that even if `did` is a const argument, this may still be `None`.
@@ -1791,8 +1776,9 @@
         ParamEnv::new(self.caller_bounds().fold_with(folder), self.reveal().fold_with(folder))
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.caller_bounds().visit_with(visitor) || self.reveal().visit_with(visitor)
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.caller_bounds().visit_with(visitor)?;
+        self.reveal().visit_with(visitor)
     }
 }
 
@@ -2468,8 +2454,10 @@
         self.variants.iter().flat_map(|v| v.fields.iter())
     }
 
+    /// Whether the ADT lacks fields. Note that this includes uninhabited enums,
+    /// e.g., `enum Void {}` is considered payload free as well.
     pub fn is_payloadfree(&self) -> bool {
-        !self.variants.is_empty() && self.variants.iter().all(|v| v.fields.is_empty())
+        self.variants.iter().all(|v| v.fields.is_empty())
     }
 
     /// Return a `VariantDef` given a variant id.
@@ -2681,15 +2669,15 @@
     /// Returns `true` if a type that impls this closure kind
     /// must also implement `other`.
     pub fn extends(self, other: ty::ClosureKind) -> bool {
-        match (self, other) {
-            (ClosureKind::Fn, ClosureKind::Fn) => true,
-            (ClosureKind::Fn, ClosureKind::FnMut) => true,
-            (ClosureKind::Fn, ClosureKind::FnOnce) => true,
-            (ClosureKind::FnMut, ClosureKind::FnMut) => true,
-            (ClosureKind::FnMut, ClosureKind::FnOnce) => true,
-            (ClosureKind::FnOnce, ClosureKind::FnOnce) => true,
-            _ => false,
-        }
+        matches!(
+            (self, other),
+            (ClosureKind::Fn, ClosureKind::Fn)
+                | (ClosureKind::Fn, ClosureKind::FnMut)
+                | (ClosureKind::Fn, ClosureKind::FnOnce)
+                | (ClosureKind::FnMut, ClosureKind::FnMut)
+                | (ClosureKind::FnMut, ClosureKind::FnOnce)
+                | (ClosureKind::FnOnce, ClosureKind::FnOnce)
+        )
     }
 
     /// Returns the representative scalar type for this closure kind.
@@ -2807,23 +2795,63 @@
             .filter(|item| item.kind == AssocKind::Fn && item.defaultness.has_value())
     }
 
+    fn item_name_from_hir(self, def_id: DefId) -> Option<Ident> {
+        self.hir().get_if_local(def_id).and_then(|node| node.ident())
+    }
+
+    fn item_name_from_def_id(self, def_id: DefId) -> Option<Symbol> {
+        if def_id.index == CRATE_DEF_INDEX {
+            Some(self.original_crate_name(def_id.krate))
+        } else {
+            let def_key = self.def_key(def_id);
+            match def_key.disambiguated_data.data {
+                // The name of a constructor is that of its parent.
+                rustc_hir::definitions::DefPathData::Ctor => self.item_name_from_def_id(DefId {
+                    krate: def_id.krate,
+                    index: def_key.parent.unwrap(),
+                }),
+                _ => def_key.disambiguated_data.data.get_opt_name(),
+            }
+        }
+    }
+
+    /// Look up the name of an item across crates. This does not look at HIR.
+    ///
+    /// When possible, this function should be used for cross-crate lookups over
+    /// [`opt_item_name`] to avoid invalidating the incremental cache. If you
+    /// need to handle items without a name, or HIR items that will not be
+    /// serialized cross-crate, or if you need the span of the item, use
+    /// [`opt_item_name`] instead.
+    ///
+    /// [`opt_item_name`]: Self::opt_item_name
+    pub fn item_name(self, id: DefId) -> Symbol {
+        // Look at cross-crate items first to avoid invalidating the incremental cache
+        // unless we have to.
+        self.item_name_from_def_id(id).unwrap_or_else(|| {
+            bug!("item_name: no name for {:?}", self.def_path(id));
+        })
+    }
+
+    /// Look up the name and span of an item or [`Node`].
+    ///
+    /// See [`item_name`][Self::item_name] for more information.
     pub fn opt_item_name(self, def_id: DefId) -> Option<Ident> {
-        def_id
-            .as_local()
-            .and_then(|def_id| self.hir().get(self.hir().local_def_id_to_hir_id(def_id)).ident())
+        // Look at the HIR first so the span will be correct if this is a local item.
+        self.item_name_from_hir(def_id)
+            .or_else(|| self.item_name_from_def_id(def_id).map(Ident::with_dummy_span))
     }
 
     pub fn opt_associated_item(self, def_id: DefId) -> Option<&'tcx AssocItem> {
         let is_associated_item = if let Some(def_id) = def_id.as_local() {
-            match self.hir().get(self.hir().local_def_id_to_hir_id(def_id)) {
-                Node::TraitItem(_) | Node::ImplItem(_) => true,
-                _ => false,
-            }
+            matches!(
+                self.hir().get(self.hir().local_def_id_to_hir_id(def_id)),
+                Node::TraitItem(_) | Node::ImplItem(_)
+            )
         } else {
-            match self.def_kind(def_id) {
-                DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy => true,
-                _ => false,
-            }
+            matches!(
+                self.def_kind(def_id),
+                DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy
+            )
         };
 
         is_associated_item.then(|| self.associated_item(def_id))
@@ -2933,33 +2961,10 @@
         }
     }
 
-    pub fn item_name(self, id: DefId) -> Symbol {
-        if id.index == CRATE_DEF_INDEX {
-            self.original_crate_name(id.krate)
-        } else {
-            let def_key = self.def_key(id);
-            match def_key.disambiguated_data.data {
-                // The name of a constructor is that of its parent.
-                rustc_hir::definitions::DefPathData::Ctor => {
-                    self.item_name(DefId { krate: id.krate, index: def_key.parent.unwrap() })
-                }
-                _ => def_key.disambiguated_data.data.get_opt_name().unwrap_or_else(|| {
-                    bug!("item_name: no name for {:?}", self.def_path(id));
-                }),
-            }
-        }
-    }
-
     /// Returns the possibly-auto-generated MIR of a `(DefId, Subst)` pair.
     pub fn instance_mir(self, instance: ty::InstanceDef<'tcx>) -> &'tcx Body<'tcx> {
         match instance {
-            ty::InstanceDef::Item(def) => {
-                if let Some((did, param_did)) = def.as_const_arg() {
-                    self.optimized_mir_of_const_arg((did, param_did))
-                } else {
-                    self.optimized_mir(def.did)
-                }
-            }
+            ty::InstanceDef::Item(def) => self.optimized_mir_opt_const_arg(def),
             ty::InstanceDef::VtableShim(..)
             | ty::InstanceDef::ReifyShim(..)
             | ty::InstanceDef::Intrinsic(..)
@@ -3034,10 +3039,12 @@
                 .hygienic_eq(def_name.span.ctxt(), self.expansion_that_defined(def_parent_def_id))
     }
 
-    fn expansion_that_defined(self, scope: DefId) -> ExpnId {
+    pub fn expansion_that_defined(self, scope: DefId) -> ExpnId {
         match scope.as_local() {
+            // Parsing and expansion aren't incremental, so we don't
+            // need to go through a query for the same-crate case.
             Some(scope) => self.hir().definitions().expansion_that_defined(scope),
-            None => ExpnId::root(),
+            None => self.expn_that_defined(scope),
         }
     }
 
diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
index 48a62b6..a594a8a 100644
--- a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
+++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
@@ -23,7 +23,7 @@
     {
         debug!(
             "normalize_erasing_regions::<{}>(value={:?}, param_env={:?})",
-            ::std::any::type_name::<T>(),
+            std::any::type_name::<T>(),
             value,
             param_env,
         );
diff --git a/compiler/rustc_middle/src/ty/outlives.rs b/compiler/rustc_middle/src/ty/outlives.rs
index ca992d3..86750d5 100644
--- a/compiler/rustc_middle/src/ty/outlives.rs
+++ b/compiler/rustc_middle/src/ty/outlives.rs
@@ -4,7 +4,7 @@
 
 use crate::ty::subst::{GenericArg, GenericArgKind};
 use crate::ty::{self, Ty, TyCtxt, TypeFoldable};
-use rustc_data_structures::mini_set::MiniSet;
+use rustc_data_structures::sso::SsoHashSet;
 use smallvec::SmallVec;
 
 #[derive(Debug)]
@@ -51,7 +51,7 @@
     /// Push onto `out` all the things that must outlive `'a` for the condition
     /// `ty0: 'a` to hold. Note that `ty0` must be a **fully resolved type**.
     pub fn push_outlives_components(self, ty0: Ty<'tcx>, out: &mut SmallVec<[Component<'tcx>; 4]>) {
-        let mut visited = MiniSet::new();
+        let mut visited = SsoHashSet::new();
         compute_components(self, ty0, out, &mut visited);
         debug!("components({:?}) = {:?}", ty0, out);
     }
@@ -61,7 +61,7 @@
     tcx: TyCtxt<'tcx>,
     ty: Ty<'tcx>,
     out: &mut SmallVec<[Component<'tcx>; 4]>,
-    visited: &mut MiniSet<GenericArg<'tcx>>,
+    visited: &mut SsoHashSet<GenericArg<'tcx>>,
 ) {
     // Descend through the types, looking for the various "base"
     // components and collecting them into `out`. This is not written
@@ -96,16 +96,14 @@
             }
 
             ty::Closure(_, ref substs) => {
-                for upvar_ty in substs.as_closure().upvar_tys() {
-                    compute_components(tcx, upvar_ty, out, visited);
-                }
+                let tupled_ty = substs.as_closure().tupled_upvars_ty();
+                compute_components(tcx, tupled_ty, out, visited);
             }
 
             ty::Generator(_, ref substs, _) => {
                 // Same as the closure case
-                for upvar_ty in substs.as_generator().upvar_tys() {
-                    compute_components(tcx, upvar_ty, out, visited);
-                }
+                let tupled_ty = substs.as_generator().tupled_upvars_ty();
+                compute_components(tcx, tupled_ty, out, visited);
 
                 // We ignore regions in the generator interior as we don't
                 // want these to affect region inference
@@ -142,7 +140,7 @@
                     // OutlivesProjectionComponents.  Continue walking
                     // through and constrain Pi.
                     let mut subcomponents = smallvec![];
-                    let mut subvisited = MiniSet::new();
+                    let mut subvisited = SsoHashSet::new();
                     compute_components_recursive(tcx, ty.into(), &mut subcomponents, &mut subvisited);
                     out.push(Component::EscapingProjection(subcomponents.into_iter().collect()));
                 }
@@ -194,7 +192,7 @@
     tcx: TyCtxt<'tcx>,
     parent: GenericArg<'tcx>,
     out: &mut SmallVec<[Component<'tcx>; 4]>,
-    visited: &mut MiniSet<GenericArg<'tcx>>,
+    visited: &mut SsoHashSet<GenericArg<'tcx>>,
 ) {
     for child in parent.walk_shallow(visited) {
         match child.unpack() {
diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs
index 225ea23..2e00be2 100644
--- a/compiler/rustc_middle/src/ty/print/mod.rs
+++ b/compiler/rustc_middle/src/ty/print/mod.rs
@@ -2,7 +2,7 @@
 use crate::ty::{self, DefIdTree, Ty, TyCtxt};
 
 use rustc_data_structures::fx::FxHashSet;
-use rustc_data_structures::mini_set::MiniSet;
+use rustc_data_structures::sso::SsoHashSet;
 use rustc_hir::def_id::{CrateNum, DefId};
 use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
 
@@ -269,7 +269,7 @@
 /// deeply nested tuples that have no DefId.
 fn characteristic_def_id_of_type_cached<'a>(
     ty: Ty<'a>,
-    visited: &mut MiniSet<Ty<'a>>,
+    visited: &mut SsoHashSet<Ty<'a>>,
 ) -> Option<DefId> {
     match *ty.kind() {
         ty::Adt(adt_def, _) => Some(adt_def.did),
@@ -316,7 +316,7 @@
     }
 }
 pub fn characteristic_def_id_of_type(ty: Ty<'_>) -> Option<DefId> {
-    characteristic_def_id_of_type_cached(ty, &mut MiniSet::new())
+    characteristic_def_id_of_type_cached(ty, &mut SsoHashSet::new())
 }
 
 impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for ty::RegionKind {
diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs
index 7b5cf68..1e4fd09 100644
--- a/compiler/rustc_middle/src/ty/print/pretty.rs
+++ b/compiler/rustc_middle/src/ty/print/pretty.rs
@@ -1,12 +1,9 @@
 use crate::middle::cstore::{ExternCrate, ExternCrateSource};
 use crate::mir::interpret::{AllocId, ConstValue, GlobalAlloc, Pointer, Scalar};
-use crate::ty::layout::IntegerExt;
 use crate::ty::subst::{GenericArg, GenericArgKind, Subst};
-use crate::ty::{self, ConstInt, DefIdTree, ParamConst, Ty, TyCtxt, TypeFoldable};
+use crate::ty::{self, ConstInt, DefIdTree, ParamConst, ScalarInt, Ty, TyCtxt, TypeFoldable};
 use rustc_apfloat::ieee::{Double, Single};
-use rustc_apfloat::Float;
 use rustc_ast as ast;
-use rustc_attr::{SignedInt, UnsignedInt};
 use rustc_data_structures::fx::FxHashMap;
 use rustc_hir as hir;
 use rustc_hir::def::{self, CtorKind, DefKind, Namespace};
@@ -15,19 +12,23 @@
 use rustc_hir::ItemKind;
 use rustc_session::config::TrimmedDefPaths;
 use rustc_span::symbol::{kw, Ident, Symbol};
-use rustc_target::abi::{Integer, Size};
+use rustc_target::abi::Size;
 use rustc_target::spec::abi::Abi;
 
 use std::cell::Cell;
 use std::char;
 use std::collections::BTreeMap;
+use std::convert::TryFrom;
 use std::fmt::{self, Write as _};
-use std::ops::{Deref, DerefMut};
+use std::ops::{ControlFlow, Deref, DerefMut};
 
 // `pretty` is a separate module only for organization.
 use super::*;
 
 macro_rules! p {
+    (@$lit:literal) => {
+        write!(scoped_cx!(), $lit)?
+    };
     (@write($($data:expr),+)) => {
         write!(scoped_cx!(), $($data),+)?
     };
@@ -37,8 +38,8 @@
     (@$method:ident($($arg:expr),*)) => {
         scoped_cx!() = scoped_cx!().$method($($arg),*)?
     };
-    ($($kind:ident $data:tt),+) => {{
-        $(p!(@$kind $data);)+
+    ($($elem:tt $(($($args:tt)*))?),+) => {{
+        $(p!(@ $elem $(($($args)*))?);)+
     }};
 }
 macro_rules! define_scoped_cx {
@@ -62,7 +63,7 @@
 /// Avoids running any queries during any prints that occur
 /// during the closure. This may alter the appearance of some
 /// types (e.g. forcing verbose printing for opaque types).
-/// This method is used during some queries (e.g. `predicates_of`
+/// This method is used during some queries (e.g. `explicit_item_bounds`
 /// for opaque types), to ensure that any debug printing that
 /// occurs during the query computation does not end up recursively
 /// calling the same query.
@@ -478,7 +479,7 @@
 
             p!(print(self_ty));
             if let Some(trait_ref) = trait_ref {
-                p!(write(" as "), print(trait_ref.print_only_trait_path()));
+                p!(" as ", print(trait_ref.print_only_trait_path()));
             }
             Ok(cx)
         })
@@ -495,9 +496,9 @@
         self.generic_delimiters(|mut cx| {
             define_scoped_cx!(cx);
 
-            p!(write("impl "));
+            p!("impl ");
             if let Some(trait_ref) = trait_ref {
-                p!(print(trait_ref.print_only_trait_path()), write(" for "));
+                p!(print(trait_ref.print_only_trait_path()), " for ");
             }
             p!(print(self_ty));
 
@@ -509,8 +510,8 @@
         define_scoped_cx!(self);
 
         match *ty.kind() {
-            ty::Bool => p!(write("bool")),
-            ty::Char => p!(write("char")),
+            ty::Bool => p!("bool"),
+            ty::Char => p!("char"),
             ty::Int(t) => p!(write("{}", t.name_str())),
             ty::Uint(t) => p!(write("{}", t.name_str())),
             ty::Float(t) => p!(write("{}", t.name_str())),
@@ -525,23 +526,23 @@
                 p!(print(tm.ty))
             }
             ty::Ref(r, ty, mutbl) => {
-                p!(write("&"));
+                p!("&");
                 if self.region_should_not_be_omitted(r) {
-                    p!(print(r), write(" "));
+                    p!(print(r), " ");
                 }
                 p!(print(ty::TypeAndMut { ty, mutbl }))
             }
-            ty::Never => p!(write("!")),
+            ty::Never => p!("!"),
             ty::Tuple(ref tys) => {
-                p!(write("("), comma_sep(tys.iter()));
+                p!("(", comma_sep(tys.iter()));
                 if tys.len() == 1 {
-                    p!(write(","));
+                    p!(",");
                 }
-                p!(write(")"))
+                p!(")")
             }
             ty::FnDef(def_id, substs) => {
                 let sig = self.tcx().fn_sig(def_id).subst(self.tcx(), substs);
-                p!(print(sig), write(" {{"), print_value_path(def_id, substs), write("}}"));
+                p!(print(sig), " {{", print_value_path(def_id, substs), "}}");
             }
             ty::FnPtr(ref bare_fn) => p!(print(bare_fn)),
             ty::Infer(infer_ty) => {
@@ -555,7 +556,7 @@
                     p!(write("{}", infer_ty))
                 }
             }
-            ty::Error(_) => p!(write("[type error]")),
+            ty::Error(_) => p!("[type error]"),
             ty::Param(ref param_ty) => p!(write("{}", param_ty)),
             ty::Bound(debruijn, bound_ty) => match bound_ty.kind {
                 ty::BoundTyKind::Anon => self.pretty_print_bound_var(debruijn, bound_ty.var)?,
@@ -567,11 +568,11 @@
             ty::Dynamic(data, r) => {
                 let print_r = self.region_should_not_be_omitted(r);
                 if print_r {
-                    p!(write("("));
+                    p!("(");
                 }
-                p!(write("dyn "), print(data));
+                p!("dyn ", print(data));
                 if print_r {
-                    p!(write(" + "), print(r), write(")"));
+                    p!(" + ", print(r), ")");
                 }
             }
             ty::Foreign(def_id) => {
@@ -597,27 +598,27 @@
                         p!(write("{}", name));
                         // FIXME(eddyb) print this with `print_def_path`.
                         if !substs.is_empty() {
-                            p!(write("::"));
+                            p!("::");
                             p!(generic_delimiters(|cx| cx.comma_sep(substs.iter())));
                         }
                         return Ok(self);
                     }
                     // Grab the "TraitA + TraitB" from `impl TraitA + TraitB`,
                     // by looking up the projections associated with the def_id.
-                    let bounds = self.tcx().predicates_of(def_id).instantiate(self.tcx(), substs);
+                    let bounds = self.tcx().explicit_item_bounds(def_id);
 
                     let mut first = true;
                     let mut is_sized = false;
-                    p!(write("impl"));
-                    for predicate in bounds.predicates {
+                    p!("impl");
+                    for (predicate, _) in bounds {
+                        let predicate = predicate.subst(self.tcx(), substs);
                         // Note: We can't use `to_opt_poly_trait_ref` here as `predicate`
                         // may contain unbound variables. We therefore do this manually.
                         //
                         // FIXME(lcnr): Find out why exactly this is the case :)
-                        if let ty::PredicateAtom::Trait(pred, _) =
-                            predicate.bound_atom(self.tcx()).skip_binder()
-                        {
-                            let trait_ref = ty::Binder::bind(pred.trait_ref);
+                        let bound_predicate = predicate.bound_atom_with_opt_escaping(self.tcx());
+                        if let ty::PredicateAtom::Trait(pred, _) = bound_predicate.skip_binder() {
+                            let trait_ref = bound_predicate.rebind(pred.trait_ref);
                             // Don't print +Sized, but rather +?Sized if absent.
                             if Some(trait_ref.def_id()) == self.tcx().lang_items().sized_trait() {
                                 is_sized = true;
@@ -634,50 +635,45 @@
                     if !is_sized {
                         p!(write("{}?Sized", if first { " " } else { "+" }));
                     } else if first {
-                        p!(write(" Sized"));
+                        p!(" Sized");
                     }
                     Ok(self)
                 })?);
             }
-            ty::Str => p!(write("str")),
+            ty::Str => p!("str"),
             ty::Generator(did, substs, movability) => {
                 p!(write("["));
                 match movability {
                     hir::Movability::Movable => {}
-                    hir::Movability::Static => p!(write("static ")),
+                    hir::Movability::Static => p!("static "),
                 }
 
                 if !self.tcx().sess.verbose() {
-                    p!(write("generator"));
+                    p!("generator");
                     // FIXME(eddyb) should use `def_span`.
                     if let Some(did) = did.as_local() {
                         let hir_id = self.tcx().hir().local_def_id_to_hir_id(did);
                         let span = self.tcx().hir().span(hir_id);
                         p!(write("@{}", self.tcx().sess.source_map().span_to_string(span)));
                     } else {
-                        p!(write("@{}", self.tcx().def_path_str(did)));
+                        p!(write("@"), print_def_path(did, substs));
                     }
                 } else {
                     p!(print_def_path(did, substs));
-                    if substs.as_generator().is_valid() {
-                        // Search for the first inference variable
-                        p!(write(" upvar_tys=("));
-                        let mut uninferred_ty =
-                            substs.as_generator().upvar_tys().filter(|ty| ty.is_ty_infer());
-                        if uninferred_ty.next().is_some() {
-                            p!(write("unavailable"));
-                        } else {
-                            self = self.comma_sep(substs.as_generator().upvar_tys())?;
-                        }
-                        p!(write(")"));
+                    p!(" upvar_tys=(");
+                    if !substs.as_generator().is_valid() {
+                        p!("unavailable");
+                    } else {
+                        self = self.comma_sep(substs.as_generator().upvar_tys())?;
                     }
+                    p!(")");
                 }
 
                 if substs.as_generator().is_valid() {
-                    p!(write(" "), print(substs.as_generator().witness()));
+                    p!(" ", print(substs.as_generator().witness()));
                 }
 
-                p!(write("]"));
+                p!("]")
             }
             ty::GeneratorWitness(types) => {
                 p!(in_binder(&types));
@@ -690,57 +686,50 @@
                     if let Some(did) = did.as_local() {
                         let hir_id = self.tcx().hir().local_def_id_to_hir_id(did);
                         if self.tcx().sess.opts.debugging_opts.span_free_formats {
-                            p!(write("@"), print_def_path(did.to_def_id(), substs));
+                            p!("@", print_def_path(did.to_def_id(), substs));
                         } else {
                             let span = self.tcx().hir().span(hir_id);
                             p!(write("@{}", self.tcx().sess.source_map().span_to_string(span)));
                         }
                     } else {
-                        p!(write("@{}", self.tcx().def_path_str(did)));
+                        p!(write("@"), print_def_path(did, substs));
                     }
                 } else {
                     p!(print_def_path(did, substs));
-                    if substs.as_closure().is_valid() {
-                        // Search for the first inference variable
-                        let mut uninferred_ty =
-                            substs.as_closure().upvar_tys().filter(|ty| ty.is_ty_infer());
-                        if uninferred_ty.next().is_some() {
-                            // If the upvar substs contain an inference variable we haven't
-                            // finished capture analysis.
-                            p!(write(" closure_substs=(unavailable)"));
-                        } else {
-                            p!(write(" closure_kind_ty="), print(substs.as_closure().kind_ty()));
-                            p!(
-                                write(" closure_sig_as_fn_ptr_ty="),
-                                print(substs.as_closure().sig_as_fn_ptr_ty())
-                            );
-                            p!(write(" upvar_tys=("));
-                            self = self.comma_sep(substs.as_closure().upvar_tys())?;
-                            p!(write(")"));
-                        }
+                    if !substs.as_closure().is_valid() {
+                        p!(" closure_substs=(unavailable)");
+                    } else {
+                        p!(" closure_kind_ty=", print(substs.as_closure().kind_ty()));
+                        p!(
+                            " closure_sig_as_fn_ptr_ty=",
+                            print(substs.as_closure().sig_as_fn_ptr_ty())
+                        );
+                        p!(" upvar_tys=(");
+                        self = self.comma_sep(substs.as_closure().upvar_tys())?;
+                        p!(")");
                     }
                 }
-                p!(write("]"));
+                p!("]");
             }
             ty::Array(ty, sz) => {
-                p!(write("["), print(ty), write("; "));
+                p!("[", print(ty), "; ");
                 if self.tcx().sess.verbose() {
                     p!(write("{:?}", sz));
                 } else if let ty::ConstKind::Unevaluated(..) = sz.val {
                     // Do not try to evaluate unevaluated constants. If we are const evaluating an
                     // array length anon const, rustc will (with debug assertions) print the
                     // constant's path. Which will end up here again.
-                    p!(write("_"));
+                    p!("_");
                 } else if let Some(n) = sz.val.try_to_bits(self.tcx().data_layout.pointer_size) {
                     p!(write("{}", n));
                 } else if let ty::ConstKind::Param(param) = sz.val {
                     p!(write("{}", param));
                 } else {
-                    p!(write("_"));
+                    p!("_");
                 }
-                p!(write("]"))
+                p!("]")
             }
-            ty::Slice(ty) => p!(write("["), print(ty), write("]")),
+            ty::Slice(ty) => p!("[", print(ty), "]"),
         }
 
         Ok(self)
@@ -847,7 +836,7 @@
 
         for (_, def_id) in auto_traits {
             if !first {
-                p!(write(" + "));
+                p!(" + ");
             }
             first = false;
 
@@ -865,16 +854,16 @@
     ) -> Result<Self, Self::Error> {
         define_scoped_cx!(self);
 
-        p!(write("("), comma_sep(inputs.iter().copied()));
+        p!("(", comma_sep(inputs.iter().copied()));
         if c_variadic {
             if !inputs.is_empty() {
-                p!(write(", "));
+                p!(", ");
             }
-            p!(write("..."));
+            p!("...");
         }
-        p!(write(")"));
+        p!(")");
         if !output.is_unit() {
-            p!(write(" -> "), print(output));
+            p!(" -> ", print(output));
         }
 
         Ok(self)
@@ -945,7 +934,7 @@
                 self.pretty_print_bound_var(debruijn, bound_var)?
             }
             ty::ConstKind::Placeholder(placeholder) => p!(write("Placeholder({:?})", placeholder)),
-            ty::ConstKind::Error(_) => p!(write("[const error]")),
+            ty::ConstKind::Error(_) => p!("[const error]"),
         };
         Ok(self)
     }
@@ -969,11 +958,7 @@
                             ty::Array(
                                 ty::TyS { kind: ty::Uint(ast::UintTy::U8), .. },
                                 ty::Const {
-                                    val:
-                                        ty::ConstKind::Value(ConstValue::Scalar(Scalar::Raw {
-                                            data,
-                                            ..
-                                        })),
+                                    val: ty::ConstKind::Value(ConstValue::Scalar(int)),
                                     ..
                                 },
                             ),
@@ -983,45 +968,42 @@
                 ),
             ) => match self.tcx().get_global_alloc(ptr.alloc_id) {
                 Some(GlobalAlloc::Memory(alloc)) => {
-                    if let Ok(byte_str) = alloc.get_bytes(&self.tcx(), ptr, Size::from_bytes(*data))
-                    {
+                    let bytes = int.assert_bits(self.tcx().data_layout.pointer_size);
+                    let size = Size::from_bytes(bytes);
+                    if let Ok(byte_str) = alloc.get_bytes(&self.tcx(), ptr, size) {
                         p!(pretty_print_byte_str(byte_str))
                     } else {
-                        p!(write("<too short allocation>"))
+                        p!("<too short allocation>")
                     }
                 }
                 // FIXME: for statics and functions, we could in principle print more detail.
                 Some(GlobalAlloc::Static(def_id)) => p!(write("<static({:?})>", def_id)),
-                Some(GlobalAlloc::Function(_)) => p!(write("<function>")),
-                None => p!(write("<dangling pointer>")),
+                Some(GlobalAlloc::Function(_)) => p!("<function>"),
+                None => p!("<dangling pointer>"),
             },
             // Bool
-            (Scalar::Raw { data: 0, .. }, ty::Bool) => p!(write("false")),
-            (Scalar::Raw { data: 1, .. }, ty::Bool) => p!(write("true")),
+            (Scalar::Int(int), ty::Bool) if int == ScalarInt::FALSE => p!("false"),
+            (Scalar::Int(int), ty::Bool) if int == ScalarInt::TRUE => p!("true"),
             // Float
-            (Scalar::Raw { data, .. }, ty::Float(ast::FloatTy::F32)) => {
-                p!(write("{}f32", Single::from_bits(data)))
+            (Scalar::Int(int), ty::Float(ast::FloatTy::F32)) => {
+                p!(write("{}f32", Single::try_from(int).unwrap()))
             }
-            (Scalar::Raw { data, .. }, ty::Float(ast::FloatTy::F64)) => {
-                p!(write("{}f64", Double::from_bits(data)))
+            (Scalar::Int(int), ty::Float(ast::FloatTy::F64)) => {
+                p!(write("{}f64", Double::try_from(int).unwrap()))
             }
             // Int
-            (Scalar::Raw { data, .. }, ty::Uint(ui)) => {
-                let size = Integer::from_attr(&self.tcx(), UnsignedInt(*ui)).size();
-                let int = ConstInt::new(data, size, false, ty.is_ptr_sized_integral());
-                if print_ty { p!(write("{:#?}", int)) } else { p!(write("{:?}", int)) }
-            }
-            (Scalar::Raw { data, .. }, ty::Int(i)) => {
-                let size = Integer::from_attr(&self.tcx(), SignedInt(*i)).size();
-                let int = ConstInt::new(data, size, true, ty.is_ptr_sized_integral());
+            (Scalar::Int(int), ty::Uint(_) | ty::Int(_)) => {
+                let int =
+                    ConstInt::new(int, matches!(ty.kind(), ty::Int(_)), ty.is_ptr_sized_integral());
                 if print_ty { p!(write("{:#?}", int)) } else { p!(write("{:?}", int)) }
             }
             // Char
-            (Scalar::Raw { data, .. }, ty::Char) if char::from_u32(data as u32).is_some() => {
-                p!(write("{:?}", char::from_u32(data as u32).unwrap()))
+            (Scalar::Int(int), ty::Char) if char::try_from(int).is_ok() => {
+                p!(write("{:?}", char::try_from(int).unwrap()))
             }
             // Raw pointers
-            (Scalar::Raw { data, .. }, ty::RawPtr(_)) => {
+            (Scalar::Int(int), ty::RawPtr(_)) => {
+                let data = int.assert_bits(self.tcx().data_layout.pointer_size);
                 self = self.typed_value(
                     |mut this| {
                         write!(this, "0x{:x}", data)?;
@@ -1043,14 +1025,16 @@
                 )?;
             }
             // For function type zsts just printing the path is enough
-            (Scalar::Raw { size: 0, .. }, ty::FnDef(d, s)) => p!(print_value_path(*d, s)),
+            (Scalar::Int(int), ty::FnDef(d, s)) if int == ScalarInt::ZST => {
+                p!(print_value_path(*d, s))
+            }
             // Nontrivial types with scalar bit representation
-            (Scalar::Raw { data, size }, _) => {
+            (Scalar::Int(int), _) => {
                 let print = |mut this: Self| {
-                    if size == 0 {
+                    if int.size() == Size::ZERO {
                         write!(this, "transmute(())")?;
                     } else {
-                        write!(this, "transmute(0x{:01$x})", data, size as usize * 2)?;
+                        write!(this, "transmute(0x{:x})", int)?;
                     }
                     Ok(this)
                 };
@@ -1093,13 +1077,13 @@
 
     fn pretty_print_byte_str(mut self, byte_str: &'tcx [u8]) -> Result<Self::Const, Self::Error> {
         define_scoped_cx!(self);
-        p!(write("b\""));
+        p!("b\"");
         for &c in byte_str {
             for e in std::ascii::escape_default(c) {
                 self.write_char(e as char)?;
             }
         }
-        p!(write("\""));
+        p!("\"");
         Ok(self)
     }
 
@@ -1112,7 +1096,7 @@
         define_scoped_cx!(self);
 
         if self.tcx().sess.verbose() {
-            p!(write("ConstValue({:?}: ", ct), print(ty), write(")"));
+            p!(write("ConstValue({:?}: ", ct), print(ty), ")");
             return Ok(self);
         }
 
@@ -1138,7 +1122,7 @@
                 // relocations (we have an active `str` reference here). We don't use this
                 // result to affect interpreter execution.
                 let slice = data.inspect_with_uninit_and_ptr_outside_interpreter(start..end);
-                let s = ::std::str::from_utf8(slice).expect("non utf8 str from miri");
+                let s = std::str::from_utf8(slice).expect("non utf8 str from miri");
                 p!(write("{:?}", s));
                 Ok(self)
             }
@@ -1149,7 +1133,7 @@
                 let ptr = Pointer::new(AllocId(0), offset);
 
                 let byte_str = alloc.get_bytes(&self.tcx(), ptr, n).unwrap();
-                p!(write("*"));
+                p!("*");
                 p!(pretty_print_byte_str(byte_str));
                 Ok(self)
             }
@@ -1173,14 +1157,14 @@
 
                 match *ty.kind() {
                     ty::Array(..) => {
-                        p!(write("["), comma_sep(fields), write("]"));
+                        p!("[", comma_sep(fields), "]");
                     }
                     ty::Tuple(..) => {
-                        p!(write("("), comma_sep(fields));
+                        p!("(", comma_sep(fields));
                         if contents.fields.len() == 1 {
-                            p!(write(","));
+                            p!(",");
                         }
-                        p!(write(")"));
+                        p!(")");
                     }
                     ty::Adt(def, substs) if def.variants.is_empty() => {
                         p!(print_value_path(def.did, substs));
@@ -1194,19 +1178,19 @@
                         match variant_def.ctor_kind {
                             CtorKind::Const => {}
                             CtorKind::Fn => {
-                                p!(write("("), comma_sep(fields), write(")"));
+                                p!("(", comma_sep(fields), ")");
                             }
                             CtorKind::Fictive => {
-                                p!(write(" {{ "));
+                                p!(" {{ ");
                                 let mut first = true;
                                 for (field_def, field) in variant_def.fields.iter().zip(fields) {
                                     if !first {
-                                        p!(write(", "));
+                                        p!(", ");
                                     }
                                     p!(write("{}: ", field_def.ident), print(field));
                                     first = false;
                                 }
-                                p!(write(" }}"));
+                                p!(" }}");
                             }
                         }
                     }
@@ -1224,7 +1208,7 @@
                 // fallback
                 p!(write("{:?}", ct));
                 if print_ty {
-                    p!(write(": "), print(ty));
+                    p!(": ", print(ty));
                 }
                 Ok(self)
             }
@@ -1637,7 +1621,7 @@
             if this.print_alloc_ids {
                 p!(write("{:?}", p));
             } else {
-                p!(write("&_"));
+                p!("&_");
             }
             Ok(this)
         };
@@ -1703,11 +1687,11 @@
             ty::ReVar(_) => {}
             ty::ReErased => {}
             ty::ReStatic => {
-                p!(write("'static"));
+                p!("'static");
                 return Ok(self);
             }
             ty::ReEmpty(ty::UniverseIndex::ROOT) => {
-                p!(write("'<empty>"));
+                p!("'<empty>");
                 return Ok(self);
             }
             ty::ReEmpty(ui) => {
@@ -1716,7 +1700,7 @@
             }
         }
 
-        p!(write("'_"));
+        p!("'_");
 
         Ok(self)
     }
@@ -1812,7 +1796,7 @@
     {
         struct LateBoundRegionNameCollector<'a>(&'a mut FxHashSet<Symbol>);
         impl<'tcx> ty::fold::TypeVisitor<'tcx> for LateBoundRegionNameCollector<'_> {
-            fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+            fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> {
                 if let ty::ReLateBound(_, ty::BrNamed(_, name)) = *r {
                     self.0.insert(name);
                 }
@@ -1847,7 +1831,7 @@
     type Error = P::Error;
     fn print(&self, mut cx: P) -> Result<Self::Output, Self::Error> {
         define_scoped_cx!(cx);
-        p!(print(self.0), write(": "), print(self.1));
+        p!(print(self.0), ": ", print(self.1));
         Ok(cx)
     }
 }
@@ -1857,7 +1841,7 @@
         $(impl fmt::Display for $ty {
             fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
                 ty::tls::with(|tcx| {
-                    tcx.lift(self)
+                    tcx.lift(*self)
                         .expect("could not lift for printing")
                         .print(FmtPrinter::new(tcx, f, Namespace::TypeNS))?;
                     Ok(())
@@ -1945,7 +1929,7 @@
     (self, cx):
 
     &'tcx ty::List<Ty<'tcx>> {
-        p!(write("{{"), comma_sep(self.iter()), write("}}"))
+        p!("{{", comma_sep(self.iter()), "}}")
     }
 
     ty::TypeAndMut<'tcx> {
@@ -1981,7 +1965,7 @@
             p!(write("extern {} ", self.abi));
         }
 
-        p!(write("fn"), pretty_fn_sig(self.inputs(), self.c_variadic, self.output()));
+        p!("fn", pretty_fn_sig(self.inputs(), self.c_variadic, self.output()));
     }
 
     ty::InferTy {
@@ -1990,7 +1974,7 @@
             return Ok(cx);
         }
         match *self {
-            ty::TyVar(_) => p!(write("_")),
+            ty::TyVar(_) => p!("_"),
             ty::IntVar(_) => p!(write("{}", "{integer}")),
             ty::FloatVar(_) => p!(write("{}", "{float}")),
             ty::FreshTy(v) => p!(write("FreshTy({})", v)),
@@ -2016,16 +2000,16 @@
     }
 
     ty::SubtypePredicate<'tcx> {
-        p!(print(self.a), write(" <: "), print(self.b))
+        p!(print(self.a), " <: ", print(self.b))
     }
 
     ty::TraitPredicate<'tcx> {
-        p!(print(self.trait_ref.self_ty()), write(": "),
+        p!(print(self.trait_ref.self_ty()), ": ",
            print(self.trait_ref.print_only_trait_path()))
     }
 
     ty::ProjectionPredicate<'tcx> {
-        p!(print(self.projection_ty), write(" == "), print(self.ty))
+        p!(print(self.projection_ty), " == ", print(self.ty))
     }
 
     ty::ProjectionTy<'tcx> {
@@ -2034,9 +2018,9 @@
 
     ty::ClosureKind {
         match *self {
-            ty::ClosureKind::Fn => p!(write("Fn")),
-            ty::ClosureKind::FnMut => p!(write("FnMut")),
-            ty::ClosureKind::FnOnce => p!(write("FnOnce")),
+            ty::ClosureKind::Fn => p!("Fn"),
+            ty::ClosureKind::FnMut => p!("FnMut"),
+            ty::ClosureKind::FnOnce => p!("FnOnce"),
         }
     }
 
@@ -2051,7 +2035,7 @@
         match *self {
             ty::PredicateAtom::Trait(ref data, constness) => {
                 if let hir::Constness::Const = constness {
-                    p!(write("const "));
+                    p!("const ");
                 }
                 p!(print(data))
             }
@@ -2059,33 +2043,23 @@
             ty::PredicateAtom::RegionOutlives(predicate) => p!(print(predicate)),
             ty::PredicateAtom::TypeOutlives(predicate) => p!(print(predicate)),
             ty::PredicateAtom::Projection(predicate) => p!(print(predicate)),
-            ty::PredicateAtom::WellFormed(arg) => p!(print(arg), write(" well-formed")),
+            ty::PredicateAtom::WellFormed(arg) => p!(print(arg), " well-formed"),
             ty::PredicateAtom::ObjectSafe(trait_def_id) => {
-                p!(write("the trait `"),
-                print_def_path(trait_def_id, &[]),
-                write("` is object-safe"))
+                p!("the trait `", print_def_path(trait_def_id, &[]), "` is object-safe")
             }
             ty::PredicateAtom::ClosureKind(closure_def_id, _closure_substs, kind) => {
-                p!(write("the closure `"),
+                p!("the closure `",
                 print_value_path(closure_def_id, &[]),
                 write("` implements the trait `{}`", kind))
             }
             ty::PredicateAtom::ConstEvaluatable(def, substs) => {
-                p!(write("the constant `"),
-                print_value_path(def.did, substs),
-                write("` can be evaluated"))
+                p!("the constant `", print_value_path(def.did, substs), "` can be evaluated")
             }
             ty::PredicateAtom::ConstEquate(c1, c2) => {
-                p!(write("the constant `"),
-                print(c1),
-                write("` equals `"),
-                print(c2),
-                write("`"))
+                p!("the constant `", print(c1), "` equals `", print(c2), "`")
             }
             ty::PredicateAtom::TypeWellFormedFromEnv(ty) => {
-                p!(write("the type `"),
-                print(ty),
-                write("` is found in the environment"))
+                p!("the type `", print(ty), "` is found in the environment")
             }
         }
     }
diff --git a/compiler/rustc_middle/src/ty/query/mod.rs b/compiler/rustc_middle/src/ty/query/mod.rs
index d3a7412..7ba4d5a 100644
--- a/compiler/rustc_middle/src/ty/query/mod.rs
+++ b/compiler/rustc_middle/src/ty/query/mod.rs
@@ -34,7 +34,6 @@
 use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
 use rustc_data_structures::fingerprint::Fingerprint;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
-use rustc_data_structures::profiling::ProfileCategory::*;
 use rustc_data_structures::stable_hasher::StableVec;
 use rustc_data_structures::svh::Svh;
 use rustc_data_structures::sync::Lrc;
@@ -169,26 +168,71 @@
         return false;
     }
 
-    rustc_dep_node_force!([dep_node, tcx]
-        // These are inputs that are expected to be pre-allocated and that
-        // should therefore always be red or green already.
-        DepKind::CrateMetadata |
+    macro_rules! force_from_dep_node {
+        ($($(#[$attr:meta])* [$($modifiers:tt)*] $name:ident($K:ty),)*) => {
+            match dep_node.kind {
+                // These are inputs that are expected to be pre-allocated and that
+                // should therefore always be red or green already.
+                DepKind::CrateMetadata |
 
-        // These are anonymous nodes.
-        DepKind::TraitSelect |
+                // These are anonymous nodes.
+                DepKind::TraitSelect |
 
-        // We don't have enough information to reconstruct the query key of
-        // these.
-        DepKind::CompileCodegenUnit => {
-            bug!("force_from_dep_node: encountered {:?}", dep_node)
+                // We don't have enough information to reconstruct the query key of
+                // these.
+                DepKind::CompileCodegenUnit |
+
+                // Forcing this makes no sense.
+                DepKind::Null => {
+                    bug!("force_from_dep_node: encountered {:?}", dep_node)
+                }
+
+                $(DepKind::$name => {
+                    debug_assert!(<$K as DepNodeParams<TyCtxt<'_>>>::can_reconstruct_query_key());
+
+                    if let Some(key) = <$K as DepNodeParams<TyCtxt<'_>>>::recover(tcx, dep_node) {
+                        force_query::<queries::$name<'_>, _>(
+                            tcx,
+                            key,
+                            DUMMY_SP,
+                            *dep_node
+                        );
+                        return true;
+                    }
+                })*
+            }
         }
-    );
+    }
+
+    rustc_dep_node_append! { [force_from_dep_node!][] }
 
     false
 }
 
 pub(crate) fn try_load_from_on_disk_cache<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) {
-    rustc_dep_node_try_load_from_on_disk_cache!(dep_node, tcx)
+    macro_rules! try_load_from_on_disk_cache {
+        ($($name:ident,)*) => {
+            match dep_node.kind {
+                $(DepKind::$name => {
+                    if <query_keys::$name<'tcx> as DepNodeParams<TyCtxt<'_>>>::can_reconstruct_query_key() {
+                        debug_assert!(tcx.dep_graph
+                                         .node_color(dep_node)
+                                         .map(|c| c.is_green())
+                                         .unwrap_or(false));
+
+                        let key = <query_keys::$name<'tcx> as DepNodeParams<TyCtxt<'_>>>::recover(tcx, dep_node).unwrap();
+                        if queries::$name::cache_on_disk(tcx, &key, None) {
+                            let _ = tcx.$name(key);
+                        }
+                    }
+                })*
+
+                _ => (),
+            }
+        }
+    }
+
+    rustc_cached_queries!(try_load_from_on_disk_cache!);
 }
 
 mod sealed {
diff --git a/compiler/rustc_middle/src/ty/query/on_disk_cache.rs b/compiler/rustc_middle/src/ty/query/on_disk_cache.rs
index b0c48a8..173e9a3 100644
--- a/compiler/rustc_middle/src/ty/query/on_disk_cache.rs
+++ b/compiler/rustc_middle/src/ty/query/on_disk_cache.rs
@@ -543,7 +543,7 @@
 // tag matches and the correct amount of bytes was read.
 fn decode_tagged<D, T, V>(decoder: &mut D, expected_tag: T) -> Result<V, D::Error>
 where
-    T: Decodable<D> + Eq + ::std::fmt::Debug,
+    T: Decodable<D> + Eq + std::fmt::Debug,
     V: Decodable<D>,
     D: DecoderWithPosition,
 {
@@ -601,29 +601,6 @@
         Ok(ty)
     }
 
-    fn cached_predicate_for_shorthand<F>(
-        &mut self,
-        shorthand: usize,
-        or_insert_with: F,
-    ) -> Result<ty::Predicate<'tcx>, Self::Error>
-    where
-        F: FnOnce(&mut Self) -> Result<ty::Predicate<'tcx>, Self::Error>,
-    {
-        let tcx = self.tcx();
-
-        let cache_key =
-            ty::CReaderCacheKey { cnum: CrateNum::ReservedForIncrCompCache, pos: shorthand };
-
-        if let Some(&pred) = tcx.pred_rcache.borrow().get(&cache_key) {
-            return Ok(pred);
-        }
-
-        let pred = or_insert_with(self)?;
-        // This may overwrite the entry, but it should overwrite with the same value.
-        tcx.pred_rcache.borrow_mut().insert_same(cache_key, pred);
-        Ok(pred)
-    }
-
     fn with_position<F, R>(&mut self, pos: usize, f: F) -> R
     where
         F: FnOnce(&mut Self) -> R,
@@ -1023,7 +1000,7 @@
     let _timer = tcx
         .sess
         .prof
-        .extra_verbose_generic_activity("encode_query_results_for", ::std::any::type_name::<Q>());
+        .extra_verbose_generic_activity("encode_query_results_for", std::any::type_name::<Q>());
 
     let state = Q::query_state(tcx);
     assert!(state.all_inactive());
diff --git a/compiler/rustc_middle/src/ty/query/plumbing.rs b/compiler/rustc_middle/src/ty/query/plumbing.rs
index f3fa363..d038695 100644
--- a/compiler/rustc_middle/src/ty/query/plumbing.rs
+++ b/compiler/rustc_middle/src/ty/query/plumbing.rs
@@ -40,7 +40,8 @@
 
     fn try_collect_active_jobs(
         &self,
-    ) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>> {
+    ) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self::DepKind, Self::Query>>>
+    {
         self.queries.try_collect_active_jobs()
     }
 
@@ -124,20 +125,23 @@
         })
     }
 
-    pub fn try_print_query_stack(handler: &Handler) {
+    pub fn try_print_query_stack(handler: &Handler, num_frames: Option<usize>) {
         eprintln!("query stack during panic:");
 
         // Be careful reyling on global state here: this code is called from
         // a panic hook, which means that the global `Handler` may be in a weird
         // state if it was responsible for triggering the panic.
+        let mut i = 0;
         ty::tls::with_context_opt(|icx| {
             if let Some(icx) = icx {
                 let query_map = icx.tcx.queries.try_collect_active_jobs();
 
                 let mut current_query = icx.query;
-                let mut i = 0;
 
                 while let Some(query) = current_query {
+                    if Some(i) == num_frames {
+                        break;
+                    }
                     let query_info =
                         if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) {
                             info
@@ -163,7 +167,11 @@
             }
         });
 
-        eprintln!("end of query stack");
+        if num_frames == None || num_frames >= Some(i) {
+            eprintln!("end of query stack");
+        } else {
+            eprintln!("we're just showing a limited slice of the query stack");
+        }
     }
 }
 
@@ -234,25 +242,15 @@
     };
 }
 
-macro_rules! define_queries {
-    (<$tcx:tt> $($category:tt {
-        $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($($K:tt)*) -> $V:ty,)*
-    },)*) => {
-        define_queries_inner! { <$tcx>
-            $($( $(#[$attr])* category<$category> [$($modifiers)*] fn $name: $node($($K)*) -> $V,)*)*
-        }
-    }
-}
-
 macro_rules! query_helper_param_ty {
     (DefId) => { impl IntoQueryParam<DefId> };
     ($K:ty) => { $K };
 }
 
-macro_rules! define_queries_inner {
+macro_rules! define_queries {
     (<$tcx:tt>
-     $($(#[$attr:meta])* category<$category:tt>
-        [$($modifiers:tt)*] fn $name:ident: $node:ident($($K:tt)*) -> $V:ty,)*) => {
+     $($(#[$attr:meta])*
+        [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
 
         use std::mem;
         use crate::{
@@ -260,7 +258,6 @@
             rustc_data_structures::stable_hasher::StableHasher,
             ich::StableHashingContext
         };
-        use rustc_data_structures::profiling::ProfileCategory;
 
         define_queries_struct! {
             tcx: $tcx,
@@ -346,7 +343,7 @@
             $(pub type $name<$tcx> = $V;)*
         }
 
-        $(impl<$tcx> QueryConfig<TyCtxt<$tcx>> for queries::$name<$tcx> {
+        $(impl<$tcx> QueryConfig for queries::$name<$tcx> {
             type Key = $($K)*;
             type Value = $V;
             type Stored = <
@@ -354,18 +351,17 @@
                 as QueryStorage
             >::Stored;
             const NAME: &'static str = stringify!($name);
-            const CATEGORY: ProfileCategory = $category;
         }
 
         impl<$tcx> QueryAccessors<TyCtxt<$tcx>> for queries::$name<$tcx> {
             const ANON: bool = is_anon!([$($modifiers)*]);
             const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
-            const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node;
+            const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$name;
 
             type Cache = query_storage!([$($modifiers)*][$($K)*, $V]);
 
             #[inline(always)]
-            fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<TyCtxt<$tcx>, Self::Cache> {
+            fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, <TyCtxt<$tcx> as QueryContext>::Query, Self::Cache> {
                 &tcx.queries.$name
             }
 
@@ -447,7 +443,7 @@
             #[inline(always)]
             #[must_use]
             pub fn $name(self, key: query_helper_param_ty!($($K)*))
-                -> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored
+                -> <queries::$name<$tcx> as QueryConfig>::Stored
             {
                 self.at(DUMMY_SP).$name(key.into_query_param())
             })*
@@ -486,7 +482,7 @@
             $($(#[$attr])*
             #[inline(always)]
             pub fn $name(self, key: query_helper_param_ty!($($K)*))
-                -> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored
+                -> <queries::$name<$tcx> as QueryConfig>::Stored
             {
                 get_query::<queries::$name<'_>, _>(self.tcx, self.span, key.into_query_param())
             })*
@@ -520,7 +516,8 @@
             fallback_extern_providers: Box<Providers>,
 
             $($(#[$attr])*  $name: QueryState<
-                TyCtxt<$tcx>,
+                crate::dep_graph::DepKind,
+                <TyCtxt<$tcx> as QueryContext>::Query,
                 <queries::$name<$tcx> as QueryAccessors<TyCtxt<'tcx>>>::Cache,
             >,)*
         }
@@ -541,7 +538,7 @@
 
             pub(crate) fn try_collect_active_jobs(
                 &self
-            ) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<TyCtxt<'tcx>>>> {
+            ) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<crate::dep_graph::DepKind, <TyCtxt<$tcx> as QueryContext>::Query>>> {
                 let mut jobs = FxHashMap::default();
 
                 $(
diff --git a/compiler/rustc_middle/src/ty/query/profiling_support.rs b/compiler/rustc_middle/src/ty/query/profiling_support.rs
index 4e8db31..cbcecb8 100644
--- a/compiler/rustc_middle/src/ty/query/profiling_support.rs
+++ b/compiler/rustc_middle/src/ty/query/profiling_support.rs
@@ -5,8 +5,7 @@
 use rustc_data_structures::profiling::SelfProfiler;
 use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
 use rustc_hir::definitions::DefPathData;
-use rustc_query_system::query::QueryCache;
-use rustc_query_system::query::QueryState;
+use rustc_query_system::query::{QueryCache, QueryContext, QueryState};
 use std::fmt::Debug;
 use std::io::Write;
 
@@ -231,7 +230,7 @@
 pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
     tcx: TyCtxt<'tcx>,
     query_name: &'static str,
-    query_state: &QueryState<TyCtxt<'tcx>, C>,
+    query_state: &QueryState<crate::dep_graph::DepKind, <TyCtxt<'tcx> as QueryContext>::Query, C>,
     string_cache: &mut QueryKeyStringCache,
 ) where
     C: QueryCache,
diff --git a/compiler/rustc_middle/src/ty/query/stats.rs b/compiler/rustc_middle/src/ty/query/stats.rs
index b496bf8..e0b44ce 100644
--- a/compiler/rustc_middle/src/ty/query/stats.rs
+++ b/compiler/rustc_middle/src/ty/query/stats.rs
@@ -1,11 +1,10 @@
 use crate::ty::query::queries;
 use crate::ty::TyCtxt;
 use rustc_hir::def_id::{DefId, LOCAL_CRATE};
-use rustc_query_system::query::QueryCache;
-use rustc_query_system::query::QueryState;
-use rustc_query_system::query::{QueryAccessors, QueryContext};
+use rustc_query_system::query::{QueryAccessors, QueryCache, QueryContext, QueryState};
 
 use std::any::type_name;
+use std::hash::Hash;
 use std::mem;
 #[cfg(debug_assertions)]
 use std::sync::atomic::Ordering;
@@ -38,10 +37,12 @@
     local_def_id_keys: Option<usize>,
 }
 
-fn stats<CTX: QueryContext, C: QueryCache>(
-    name: &'static str,
-    map: &QueryState<CTX, C>,
-) -> QueryStats {
+fn stats<D, Q, C>(name: &'static str, map: &QueryState<D, Q, C>) -> QueryStats
+where
+    D: Copy + Clone + Eq + Hash,
+    Q: Clone,
+    C: QueryCache,
+{
     let mut stats = QueryStats {
         name,
         #[cfg(debug_assertions)]
@@ -119,21 +120,22 @@
 }
 
 macro_rules! print_stats {
-    (<$tcx:tt> $($category:tt {
-        $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*
-    },)*) => {
+    (<$tcx:tt>
+        $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident($K:ty) -> $V:ty,)*
+    ) => {
         fn query_stats(tcx: TyCtxt<'_>) -> Vec<QueryStats> {
             let mut queries = Vec::new();
 
-            $($(
+            $(
                 queries.push(stats::<
-                    TyCtxt<'_>,
+                    crate::dep_graph::DepKind,
+                    <TyCtxt<'_> as QueryContext>::Query,
                     <queries::$name<'_> as QueryAccessors<TyCtxt<'_>>>::Cache,
                 >(
                     stringify!($name),
                     &tcx.queries.$name,
                 ));
-            )*)*
+            )*
 
             queries
         }
diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs
index c4df0bb..ef5034e2 100644
--- a/compiler/rustc_middle/src/ty/relate.rs
+++ b/compiler/rustc_middle/src/ty/relate.rs
@@ -490,7 +490,7 @@
     let eagerly_eval = |x: &'tcx ty::Const<'tcx>| x.eval(tcx, relation.param_env()).val;
 
     // FIXME(eddyb) doesn't look like everything below checks that `a.ty == b.ty`.
-    // We could probably always assert it early, as `const` generic parameters
+    // We could probably always assert it early, as const generic parameters
     // are not allowed to depend on other generic parameters, i.e. are concrete.
     // (although there could be normalization differences)
 
diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs
index 597ceac..89fd803 100644
--- a/compiler/rustc_middle/src/ty/structural_impls.rs
+++ b/compiler/rustc_middle/src/ty/structural_impls.rs
@@ -14,6 +14,7 @@
 
 use smallvec::SmallVec;
 use std::fmt;
+use std::ops::ControlFlow;
 use std::rc::Rc;
 use std::sync::Arc;
 
@@ -299,6 +300,7 @@
     ::rustc_target::spec::abi::Abi,
     crate::mir::coverage::ExpressionOperandId,
     crate::mir::coverage::CounterValueReference,
+    crate::mir::coverage::InjectedExpressionId,
     crate::mir::coverage::InjectedExpressionIndex,
     crate::mir::coverage::MappedExpressionIndex,
     crate::mir::Local,
@@ -332,24 +334,23 @@
 // FIXME(eddyb) replace all the uses of `Option::map` with `?`.
 impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) {
     type Lifted = (A::Lifted, B::Lifted);
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.0).and_then(|a| tcx.lift(&self.1).map(|b| (a, b)))
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        Some((tcx.lift(self.0)?, tcx.lift(self.1)?))
     }
 }
 
 impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>, C: Lift<'tcx>> Lift<'tcx> for (A, B, C) {
     type Lifted = (A::Lifted, B::Lifted, C::Lifted);
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.0)
-            .and_then(|a| tcx.lift(&self.1).and_then(|b| tcx.lift(&self.2).map(|c| (a, b, c))))
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        Some((tcx.lift(self.0)?, tcx.lift(self.1)?, tcx.lift(self.2)?))
     }
 }
 
 impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Option<T> {
     type Lifted = Option<T::Lifted>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        match *self {
-            Some(ref x) => tcx.lift(x).map(Some),
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match self {
+            Some(x) => tcx.lift(x).map(Some),
             None => Some(None),
         }
     }
@@ -357,89 +358,72 @@
 
 impl<'tcx, T: Lift<'tcx>, E: Lift<'tcx>> Lift<'tcx> for Result<T, E> {
     type Lifted = Result<T::Lifted, E::Lifted>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        match *self {
-            Ok(ref x) => tcx.lift(x).map(Ok),
-            Err(ref e) => tcx.lift(e).map(Err),
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match self {
+            Ok(x) => tcx.lift(x).map(Ok),
+            Err(e) => tcx.lift(e).map(Err),
         }
     }
 }
 
 impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Box<T> {
     type Lifted = Box<T::Lifted>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&**self).map(Box::new)
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(*self).map(Box::new)
     }
 }
 
-impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Rc<T> {
+impl<'tcx, T: Lift<'tcx> + Clone> Lift<'tcx> for Rc<T> {
     type Lifted = Rc<T::Lifted>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&**self).map(Rc::new)
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(self.as_ref().clone()).map(Rc::new)
     }
 }
 
-impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Arc<T> {
+impl<'tcx, T: Lift<'tcx> + Clone> Lift<'tcx> for Arc<T> {
     type Lifted = Arc<T::Lifted>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&**self).map(Arc::new)
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(self.as_ref().clone()).map(Arc::new)
     }
 }
-
-impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for [T] {
-    type Lifted = Vec<T::Lifted>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        // type annotation needed to inform `projection_must_outlive`
-        let mut result: Vec<<T as Lift<'tcx>>::Lifted> = Vec::with_capacity(self.len());
-        for x in self {
-            if let Some(value) = tcx.lift(x) {
-                result.push(value);
-            } else {
-                return None;
-            }
-        }
-        Some(result)
-    }
-}
-
 impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Vec<T> {
     type Lifted = Vec<T::Lifted>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self[..])
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        self.into_iter().map(|v| tcx.lift(v)).collect()
     }
 }
 
 impl<'tcx, I: Idx, T: Lift<'tcx>> Lift<'tcx> for IndexVec<I, T> {
     type Lifted = IndexVec<I, T::Lifted>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        self.iter().map(|e| tcx.lift(e)).collect()
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        self.into_iter().map(|e| tcx.lift(e)).collect()
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::TraitRef<'a> {
     type Lifted = ty::TraitRef<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.substs).map(|substs| ty::TraitRef { def_id: self.def_id, substs })
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(self.substs).map(|substs| ty::TraitRef { def_id: self.def_id, substs })
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialTraitRef<'a> {
     type Lifted = ty::ExistentialTraitRef<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.substs).map(|substs| ty::ExistentialTraitRef { def_id: self.def_id, substs })
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(self.substs).map(|substs| ty::ExistentialTraitRef { def_id: self.def_id, substs })
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialPredicate<'a> {
     type Lifted = ty::ExistentialPredicate<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
         match self {
             ty::ExistentialPredicate::Trait(x) => tcx.lift(x).map(ty::ExistentialPredicate::Trait),
             ty::ExistentialPredicate::Projection(x) => {
                 tcx.lift(x).map(ty::ExistentialPredicate::Projection)
             }
             ty::ExistentialPredicate::AutoTrait(def_id) => {
-                Some(ty::ExistentialPredicate::AutoTrait(*def_id))
+                Some(ty::ExistentialPredicate::AutoTrait(def_id))
             }
         }
     }
@@ -447,15 +431,15 @@
 
 impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> {
     type Lifted = ty::TraitPredicate<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<ty::TraitPredicate<'tcx>> {
-        tcx.lift(&self.trait_ref).map(|trait_ref| ty::TraitPredicate { trait_ref })
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::TraitPredicate<'tcx>> {
+        tcx.lift(self.trait_ref).map(|trait_ref| ty::TraitPredicate { trait_ref })
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::SubtypePredicate<'a> {
     type Lifted = ty::SubtypePredicate<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<ty::SubtypePredicate<'tcx>> {
-        tcx.lift(&(self.a, self.b)).map(|(a, b)| ty::SubtypePredicate {
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::SubtypePredicate<'tcx>> {
+        tcx.lift((self.a, self.b)).map(|(a, b)| ty::SubtypePredicate {
             a_is_expected: self.a_is_expected,
             a,
             b,
@@ -465,33 +449,33 @@
 
 impl<'tcx, A: Copy + Lift<'tcx>, B: Copy + Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate<A, B> {
     type Lifted = ty::OutlivesPredicate<A::Lifted, B::Lifted>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b))
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift((self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b))
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionTy<'a> {
     type Lifted = ty::ProjectionTy<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionTy<'tcx>> {
-        tcx.lift(&self.substs)
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionTy<'tcx>> {
+        tcx.lift(self.substs)
             .map(|substs| ty::ProjectionTy { item_def_id: self.item_def_id, substs })
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> {
     type Lifted = ty::ProjectionPredicate<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionPredicate<'tcx>> {
-        tcx.lift(&(self.projection_ty, self.ty))
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionPredicate<'tcx>> {
+        tcx.lift((self.projection_ty, self.ty))
             .map(|(projection_ty, ty)| ty::ProjectionPredicate { projection_ty, ty })
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialProjection<'a> {
     type Lifted = ty::ExistentialProjection<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.substs).map(|substs| ty::ExistentialProjection {
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(self.substs).map(|substs| ty::ExistentialProjection {
             substs,
-            ty: tcx.lift(&self.ty).expect("type must lift when substs do"),
+            ty: tcx.lift(self.ty).expect("type must lift when substs do"),
             item_def_id: self.item_def_id,
         })
     }
@@ -499,7 +483,7 @@
 
 impl<'a, 'tcx> Lift<'tcx> for ty::PredicateKind<'a> {
     type Lifted = ty::PredicateKind<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
         match self {
             ty::PredicateKind::ForAll(binder) => tcx.lift(binder).map(ty::PredicateKind::ForAll),
             ty::PredicateKind::Atom(atom) => tcx.lift(atom).map(ty::PredicateKind::Atom),
@@ -509,24 +493,24 @@
 
 impl<'a, 'tcx> Lift<'tcx> for ty::PredicateAtom<'a> {
     type Lifted = ty::PredicateAtom<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        match *self {
-            ty::PredicateAtom::Trait(ref data, constness) => {
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match self {
+            ty::PredicateAtom::Trait(data, constness) => {
                 tcx.lift(data).map(|data| ty::PredicateAtom::Trait(data, constness))
             }
-            ty::PredicateAtom::Subtype(ref data) => tcx.lift(data).map(ty::PredicateAtom::Subtype),
-            ty::PredicateAtom::RegionOutlives(ref data) => {
+            ty::PredicateAtom::Subtype(data) => tcx.lift(data).map(ty::PredicateAtom::Subtype),
+            ty::PredicateAtom::RegionOutlives(data) => {
                 tcx.lift(data).map(ty::PredicateAtom::RegionOutlives)
             }
-            ty::PredicateAtom::TypeOutlives(ref data) => {
+            ty::PredicateAtom::TypeOutlives(data) => {
                 tcx.lift(data).map(ty::PredicateAtom::TypeOutlives)
             }
-            ty::PredicateAtom::Projection(ref data) => {
+            ty::PredicateAtom::Projection(data) => {
                 tcx.lift(data).map(ty::PredicateAtom::Projection)
             }
-            ty::PredicateAtom::WellFormed(ty) => tcx.lift(&ty).map(ty::PredicateAtom::WellFormed),
+            ty::PredicateAtom::WellFormed(ty) => tcx.lift(ty).map(ty::PredicateAtom::WellFormed),
             ty::PredicateAtom::ClosureKind(closure_def_id, closure_substs, kind) => {
-                tcx.lift(&closure_substs).map(|closure_substs| {
+                tcx.lift(closure_substs).map(|closure_substs| {
                     ty::PredicateAtom::ClosureKind(closure_def_id, closure_substs, kind)
                 })
             }
@@ -534,13 +518,13 @@
                 Some(ty::PredicateAtom::ObjectSafe(trait_def_id))
             }
             ty::PredicateAtom::ConstEvaluatable(def_id, substs) => {
-                tcx.lift(&substs).map(|substs| ty::PredicateAtom::ConstEvaluatable(def_id, substs))
+                tcx.lift(substs).map(|substs| ty::PredicateAtom::ConstEvaluatable(def_id, substs))
             }
             ty::PredicateAtom::ConstEquate(c1, c2) => {
-                tcx.lift(&(c1, c2)).map(|(c1, c2)| ty::PredicateAtom::ConstEquate(c1, c2))
+                tcx.lift((c1, c2)).map(|(c1, c2)| ty::PredicateAtom::ConstEquate(c1, c2))
             }
             ty::PredicateAtom::TypeWellFormedFromEnv(ty) => {
-                tcx.lift(&ty).map(ty::PredicateAtom::TypeWellFormedFromEnv)
+                tcx.lift(ty).map(ty::PredicateAtom::TypeWellFormedFromEnv)
             }
         }
     }
@@ -548,61 +532,62 @@
 
 impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder<T> {
     type Lifted = ty::Binder<T::Lifted>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(self.as_ref().skip_binder()).map(ty::Binder::bind)
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        self.map_bound(|v| tcx.lift(v)).transpose()
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> {
     type Lifted = ty::ParamEnv<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.caller_bounds())
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(self.caller_bounds())
             .map(|caller_bounds| ty::ParamEnv::new(caller_bounds, self.reveal()))
     }
 }
 
 impl<'a, 'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::ParamEnvAnd<'a, T> {
     type Lifted = ty::ParamEnvAnd<'tcx, T::Lifted>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.param_env).and_then(|param_env| {
-            tcx.lift(&self.value).map(|value| ty::ParamEnvAnd { param_env, value })
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(self.param_env).and_then(|param_env| {
+            tcx.lift(self.value).map(|value| ty::ParamEnvAnd { param_env, value })
         })
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::ClosureSubsts<'a> {
     type Lifted = ty::ClosureSubsts<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.substs).map(|substs| ty::ClosureSubsts { substs })
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(self.substs).map(|substs| ty::ClosureSubsts { substs })
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::GeneratorSubsts<'a> {
     type Lifted = ty::GeneratorSubsts<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.substs).map(|substs| ty::GeneratorSubsts { substs })
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(self.substs).map(|substs| ty::GeneratorSubsts { substs })
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjustment<'a> {
     type Lifted = ty::adjustment::Adjustment<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.kind).and_then(|kind| {
-            tcx.lift(&self.target).map(|target| ty::adjustment::Adjustment { kind, target })
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        let ty::adjustment::Adjustment { kind, target } = self;
+        tcx.lift(kind).and_then(|kind| {
+            tcx.lift(target).map(|target| ty::adjustment::Adjustment { kind, target })
         })
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjust<'a> {
     type Lifted = ty::adjustment::Adjust<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        match *self {
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match self {
             ty::adjustment::Adjust::NeverToAny => Some(ty::adjustment::Adjust::NeverToAny),
             ty::adjustment::Adjust::Pointer(ptr) => Some(ty::adjustment::Adjust::Pointer(ptr)),
-            ty::adjustment::Adjust::Deref(ref overloaded) => {
+            ty::adjustment::Adjust::Deref(overloaded) => {
                 tcx.lift(overloaded).map(ty::adjustment::Adjust::Deref)
             }
-            ty::adjustment::Adjust::Borrow(ref autoref) => {
+            ty::adjustment::Adjust::Borrow(autoref) => {
                 tcx.lift(autoref).map(ty::adjustment::Adjust::Borrow)
             }
         }
@@ -611,8 +596,8 @@
 
 impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::OverloadedDeref<'a> {
     type Lifted = ty::adjustment::OverloadedDeref<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.region).map(|region| ty::adjustment::OverloadedDeref {
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(self.region).map(|region| ty::adjustment::OverloadedDeref {
             region,
             mutbl: self.mutbl,
             span: self.span,
@@ -622,10 +607,10 @@
 
 impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::AutoBorrow<'a> {
     type Lifted = ty::adjustment::AutoBorrow<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        match *self {
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match self {
             ty::adjustment::AutoBorrow::Ref(r, m) => {
-                tcx.lift(&r).map(|r| ty::adjustment::AutoBorrow::Ref(r, m))
+                tcx.lift(r).map(|r| ty::adjustment::AutoBorrow::Ref(r, m))
             }
             ty::adjustment::AutoBorrow::RawPtr(m) => Some(ty::adjustment::AutoBorrow::RawPtr(m)),
         }
@@ -634,16 +619,16 @@
 
 impl<'a, 'tcx> Lift<'tcx> for ty::GenSig<'a> {
     type Lifted = ty::GenSig<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&(self.resume_ty, self.yield_ty, self.return_ty))
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift((self.resume_ty, self.yield_ty, self.return_ty))
             .map(|(resume_ty, yield_ty, return_ty)| ty::GenSig { resume_ty, yield_ty, return_ty })
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::FnSig<'a> {
     type Lifted = ty::FnSig<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.inputs_and_output).map(|x| ty::FnSig {
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(self.inputs_and_output).map(|x| ty::FnSig {
             inputs_and_output: x,
             c_variadic: self.c_variadic,
             unsafety: self.unsafety,
@@ -654,19 +639,20 @@
 
 impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::error::ExpectedFound<T> {
     type Lifted = ty::error::ExpectedFound<T::Lifted>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.expected).and_then(|expected| {
-            tcx.lift(&self.found).map(|found| ty::error::ExpectedFound { expected, found })
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        let ty::error::ExpectedFound { expected, found } = self;
+        tcx.lift(expected).and_then(|expected| {
+            tcx.lift(found).map(|found| ty::error::ExpectedFound { expected, found })
         })
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> {
     type Lifted = ty::error::TypeError<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
         use crate::ty::error::TypeError::*;
 
-        Some(match *self {
+        Some(match self {
             Mismatch => Mismatch,
             UnsafetyMismatch(x) => UnsafetyMismatch(x),
             AbiMismatch(x) => AbiMismatch(x),
@@ -675,51 +661,51 @@
             FixedArraySize(x) => FixedArraySize(x),
             ArgCount => ArgCount,
             RegionsDoesNotOutlive(a, b) => {
-                return tcx.lift(&(a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b));
+                return tcx.lift((a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b));
             }
             RegionsInsufficientlyPolymorphic(a, b) => {
-                return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b));
+                return tcx.lift(b).map(|b| RegionsInsufficientlyPolymorphic(a, b));
             }
             RegionsOverlyPolymorphic(a, b) => {
-                return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b));
+                return tcx.lift(b).map(|b| RegionsOverlyPolymorphic(a, b));
             }
             RegionsPlaceholderMismatch => RegionsPlaceholderMismatch,
             IntMismatch(x) => IntMismatch(x),
             FloatMismatch(x) => FloatMismatch(x),
             Traits(x) => Traits(x),
             VariadicMismatch(x) => VariadicMismatch(x),
-            CyclicTy(t) => return tcx.lift(&t).map(|t| CyclicTy(t)),
-            CyclicConst(ct) => return tcx.lift(&ct).map(|ct| CyclicConst(ct)),
+            CyclicTy(t) => return tcx.lift(t).map(|t| CyclicTy(t)),
+            CyclicConst(ct) => return tcx.lift(ct).map(|ct| CyclicConst(ct)),
             ProjectionMismatched(x) => ProjectionMismatched(x),
-            Sorts(ref x) => return tcx.lift(x).map(Sorts),
-            ExistentialMismatch(ref x) => return tcx.lift(x).map(ExistentialMismatch),
-            ConstMismatch(ref x) => return tcx.lift(x).map(ConstMismatch),
+            Sorts(x) => return tcx.lift(x).map(Sorts),
+            ExistentialMismatch(x) => return tcx.lift(x).map(ExistentialMismatch),
+            ConstMismatch(x) => return tcx.lift(x).map(ConstMismatch),
             IntrinsicCast => IntrinsicCast,
-            TargetFeatureCast(ref x) => TargetFeatureCast(*x),
-            ObjectUnsafeCoercion(ref x) => return tcx.lift(x).map(ObjectUnsafeCoercion),
+            TargetFeatureCast(x) => TargetFeatureCast(x),
+            ObjectUnsafeCoercion(x) => return tcx.lift(x).map(ObjectUnsafeCoercion),
         })
     }
 }
 
 impl<'a, 'tcx> Lift<'tcx> for ty::InstanceDef<'a> {
     type Lifted = ty::InstanceDef<'tcx>;
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
-        match *self {
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match self {
             ty::InstanceDef::Item(def_id) => Some(ty::InstanceDef::Item(def_id)),
             ty::InstanceDef::VtableShim(def_id) => Some(ty::InstanceDef::VtableShim(def_id)),
             ty::InstanceDef::ReifyShim(def_id) => Some(ty::InstanceDef::ReifyShim(def_id)),
             ty::InstanceDef::Intrinsic(def_id) => Some(ty::InstanceDef::Intrinsic(def_id)),
-            ty::InstanceDef::FnPtrShim(def_id, ref ty) => {
+            ty::InstanceDef::FnPtrShim(def_id, ty) => {
                 Some(ty::InstanceDef::FnPtrShim(def_id, tcx.lift(ty)?))
             }
             ty::InstanceDef::Virtual(def_id, n) => Some(ty::InstanceDef::Virtual(def_id, n)),
             ty::InstanceDef::ClosureOnceShim { call_once } => {
                 Some(ty::InstanceDef::ClosureOnceShim { call_once })
             }
-            ty::InstanceDef::DropGlue(def_id, ref ty) => {
+            ty::InstanceDef::DropGlue(def_id, ty) => {
                 Some(ty::InstanceDef::DropGlue(def_id, tcx.lift(ty)?))
             }
-            ty::InstanceDef::CloneShim(def_id, ref ty) => {
+            ty::InstanceDef::CloneShim(def_id, ty) => {
                 Some(ty::InstanceDef::CloneShim(def_id, tcx.lift(ty)?))
             }
         }
@@ -743,8 +729,8 @@
         *self
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool {
-        false
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> ControlFlow<()> {
+        ControlFlow::CONTINUE
     }
 }
 
@@ -753,8 +739,9 @@
         (self.0.fold_with(folder), self.1.fold_with(folder))
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.0.visit_with(visitor) || self.1.visit_with(visitor)
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.0.visit_with(visitor)?;
+        self.1.visit_with(visitor)
     }
 }
 
@@ -765,8 +752,10 @@
         (self.0.fold_with(folder), self.1.fold_with(folder), self.2.fold_with(folder))
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.0.visit_with(visitor) || self.1.visit_with(visitor) || self.2.visit_with(visitor)
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.0.visit_with(visitor)?;
+        self.1.visit_with(visitor)?;
+        self.2.visit_with(visitor)
     }
 }
 
@@ -789,7 +778,7 @@
         Rc::new((**self).fold_with(folder))
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         (**self).visit_with(visitor)
     }
 }
@@ -799,7 +788,7 @@
         Arc::new((**self).fold_with(folder))
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         (**self).visit_with(visitor)
     }
 }
@@ -810,7 +799,7 @@
         box content
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         (**self).visit_with(visitor)
     }
 }
@@ -820,8 +809,8 @@
         self.iter().map(|t| t.fold_with(folder)).collect()
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.iter().any(|t| t.visit_with(visitor))
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.iter().try_for_each(|t| t.visit_with(visitor))
     }
 }
 
@@ -830,8 +819,8 @@
         self.iter().map(|t| t.fold_with(folder)).collect::<Vec<_>>().into_boxed_slice()
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.iter().any(|t| t.visit_with(visitor))
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.iter().try_for_each(|t| t.visit_with(visitor))
     }
 }
 
@@ -844,11 +833,11 @@
         folder.fold_binder(self)
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         self.as_ref().skip_binder().visit_with(visitor)
     }
 
-    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         visitor.visit_binder(self)
     }
 }
@@ -858,8 +847,8 @@
         fold_list(*self, folder, |tcx, v| tcx.intern_existential_predicates(v))
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.iter().any(|p| p.visit_with(visitor))
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.iter().try_for_each(|p| p.visit_with(visitor))
     }
 }
 
@@ -868,8 +857,8 @@
         fold_list(*self, folder, |tcx, v| tcx.intern_type_list(v))
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.iter().any(|t| t.visit_with(visitor))
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.iter().try_for_each(|t| t.visit_with(visitor))
     }
 }
 
@@ -878,8 +867,8 @@
         fold_list(*self, folder, |tcx, v| tcx.intern_projs(v))
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.iter().any(|t| t.visit_with(visitor))
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.iter().try_for_each(|t| t.visit_with(visitor))
     }
 }
 
@@ -904,20 +893,24 @@
         }
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         use crate::ty::InstanceDef::*;
-        self.substs.visit_with(visitor)
-            || match self.def {
-                Item(def) => def.visit_with(visitor),
-                VtableShim(did) | ReifyShim(did) | Intrinsic(did) | Virtual(did, _) => {
-                    did.visit_with(visitor)
-                }
-                FnPtrShim(did, ty) | CloneShim(did, ty) => {
-                    did.visit_with(visitor) || ty.visit_with(visitor)
-                }
-                DropGlue(did, ty) => did.visit_with(visitor) || ty.visit_with(visitor),
-                ClosureOnceShim { call_once } => call_once.visit_with(visitor),
+        self.substs.visit_with(visitor)?;
+        match self.def {
+            Item(def) => def.visit_with(visitor),
+            VtableShim(did) | ReifyShim(did) | Intrinsic(did) | Virtual(did, _) => {
+                did.visit_with(visitor)
             }
+            FnPtrShim(did, ty) | CloneShim(did, ty) => {
+                did.visit_with(visitor)?;
+                ty.visit_with(visitor)
+            }
+            DropGlue(did, ty) => {
+                did.visit_with(visitor)?;
+                ty.visit_with(visitor)
+            }
+            ClosureOnceShim { call_once } => call_once.visit_with(visitor),
+        }
     }
 }
 
@@ -926,7 +919,7 @@
         Self { instance: self.instance.fold_with(folder), promoted: self.promoted }
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         self.instance.visit_with(visitor)
     }
 }
@@ -975,19 +968,26 @@
         folder.fold_ty(*self)
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         match self.kind() {
             ty::RawPtr(ref tm) => tm.visit_with(visitor),
-            ty::Array(typ, sz) => typ.visit_with(visitor) || sz.visit_with(visitor),
+            ty::Array(typ, sz) => {
+                typ.visit_with(visitor)?;
+                sz.visit_with(visitor)
+            }
             ty::Slice(typ) => typ.visit_with(visitor),
             ty::Adt(_, substs) => substs.visit_with(visitor),
             ty::Dynamic(ref trait_ty, ref reg) => {
-                trait_ty.visit_with(visitor) || reg.visit_with(visitor)
+                trait_ty.visit_with(visitor)?;
+                reg.visit_with(visitor)
             }
             ty::Tuple(ts) => ts.visit_with(visitor),
             ty::FnDef(_, substs) => substs.visit_with(visitor),
             ty::FnPtr(ref f) => f.visit_with(visitor),
-            ty::Ref(r, ty, _) => r.visit_with(visitor) || ty.visit_with(visitor),
+            ty::Ref(r, ty, _) => {
+                r.visit_with(visitor)?;
+                ty.visit_with(visitor)
+            }
             ty::Generator(_did, ref substs, _) => substs.visit_with(visitor),
             ty::GeneratorWitness(ref types) => types.visit_with(visitor),
             ty::Closure(_did, ref substs) => substs.visit_with(visitor),
@@ -1006,11 +1006,11 @@
             | ty::Placeholder(..)
             | ty::Param(..)
             | ty::Never
-            | ty::Foreign(..) => false,
+            | ty::Foreign(..) => ControlFlow::CONTINUE,
         }
     }
 
-    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         visitor.visit_ty(self)
     }
 }
@@ -1024,11 +1024,11 @@
         folder.fold_region(*self)
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool {
-        false
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> ControlFlow<()> {
+        ControlFlow::CONTINUE
     }
 
-    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         visitor.visit_region(*self)
     }
 }
@@ -1039,11 +1039,11 @@
         folder.tcx().reuse_or_mk_predicate(*self, new)
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         ty::PredicateKind::super_visit_with(&self.inner.kind, visitor)
     }
 
-    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         visitor.visit_predicate(*self)
     }
 
@@ -1056,23 +1056,13 @@
     }
 }
 
-pub(super) trait PredicateVisitor<'tcx>: TypeVisitor<'tcx> {
-    fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> bool;
-}
-
-impl<T: TypeVisitor<'tcx>> PredicateVisitor<'tcx> for T {
-    default fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> bool {
-        predicate.super_visit_with(self)
-    }
-}
-
 impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> {
     fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
         fold_list(*self, folder, |tcx, v| tcx.intern_predicates(v))
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.iter().any(|p| p.visit_with(visitor))
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.iter().try_for_each(|p| p.visit_with(visitor))
     }
 }
 
@@ -1081,8 +1071,8 @@
         self.iter().map(|x| x.fold_with(folder)).collect()
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.iter().any(|t| t.visit_with(visitor))
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.iter().try_for_each(|t| t.visit_with(visitor))
     }
 }
 
@@ -1101,11 +1091,12 @@
         folder.fold_const(*self)
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.ty.visit_with(visitor) || self.val.visit_with(visitor)
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.ty.visit_with(visitor)?;
+        self.val.visit_with(visitor)
     }
 
-    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         visitor.visit_const(self)
     }
 }
@@ -1125,7 +1116,7 @@
         }
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         match *self {
             ty::ConstKind::Infer(ic) => ic.visit_with(visitor),
             ty::ConstKind::Param(p) => p.visit_with(visitor),
@@ -1133,7 +1124,7 @@
             ty::ConstKind::Value(_)
             | ty::ConstKind::Bound(..)
             | ty::ConstKind::Placeholder(_)
-            | ty::ConstKind::Error(_) => false,
+            | ty::ConstKind::Error(_) => ControlFlow::CONTINUE,
         }
     }
 }
@@ -1143,8 +1134,8 @@
         *self
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool {
-        false
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> ControlFlow<()> {
+        ControlFlow::CONTINUE
     }
 }
 
diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs
index 724ec10..384d08f 100644
--- a/compiler/rustc_middle/src/ty/sty.rs
+++ b/compiler/rustc_middle/src/ty/sty.rs
@@ -210,6 +210,18 @@
             _ => false,
         }
     }
+
+    /// Get the article ("a" or "an") to use with this type.
+    pub fn article(&self) -> &'static str {
+        match self {
+            Int(_) | Float(_) | Array(_, _) => "an",
+            Adt(def, _) if def.is_enum() => "an",
+            // This should never happen, but ICEing and causing the user's code
+            // to not compile felt too harsh.
+            Error(_) => "a",
+            _ => "a",
+        }
+    }
 }
 
 // `TyKind` is used a lot. Make sure it doesn't unintentionally get bigger.
@@ -376,9 +388,19 @@
         self.split().parent_substs
     }
 
+    /// Returns an iterator over the list of types of captured paths by the closure.
+    /// In case there was a type error in figuring out the types of the captured path, an
+    /// empty iterator is returned.
     #[inline]
     pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
-        self.tupled_upvars_ty().tuple_fields()
+        match self.tupled_upvars_ty().kind() {
+            TyKind::Error(_) => None,
+            TyKind::Tuple(..) => Some(self.tupled_upvars_ty().tuple_fields()),
+            TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
+            ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
+        }
+        .into_iter()
+        .flatten()
     }
 
     /// Returns the tuple type representing the upvars for this closure.
@@ -503,9 +525,19 @@
         self.split().witness.expect_ty()
     }
 
+    /// Returns an iterator over the list of types of captured paths by the generator.
+    /// In case there was a type error in figuring out the types of the captured path, an
+    /// empty iterator is returned.
     #[inline]
     pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
-        self.tupled_upvars_ty().tuple_fields()
+        match self.tupled_upvars_ty().kind() {
+            TyKind::Error(_) => None,
+            TyKind::Tuple(..) => Some(self.tupled_upvars_ty().tuple_fields()),
+            TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
+            ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
+        }
+        .into_iter()
+        .flatten()
     }
 
     /// Returns the tuple type representing the upvars for this generator.
@@ -648,13 +680,32 @@
 }
 
 impl<'tcx> UpvarSubsts<'tcx> {
+    /// Returns an iterator over the list of types of captured paths by the closure/generator.
+    /// In case there was a type error in figuring out the types of the captured path, an
+    /// empty iterator is returned.
     #[inline]
     pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
-        let tupled_upvars_ty = match self {
-            UpvarSubsts::Closure(substs) => substs.as_closure().split().tupled_upvars_ty,
-            UpvarSubsts::Generator(substs) => substs.as_generator().split().tupled_upvars_ty,
+        let tupled_tys = match self {
+            UpvarSubsts::Closure(substs) => substs.as_closure().tupled_upvars_ty(),
+            UpvarSubsts::Generator(substs) => substs.as_generator().tupled_upvars_ty(),
         };
-        tupled_upvars_ty.expect_ty().tuple_fields()
+
+        match tupled_tys.kind() {
+            TyKind::Error(_) => None,
+            TyKind::Tuple(..) => Some(self.tupled_upvars_ty().tuple_fields()),
+            TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
+            ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
+        }
+        .into_iter()
+        .flatten()
+    }
+
+    #[inline]
+    pub fn tupled_upvars_ty(self) -> Ty<'tcx> {
+        match self {
+            UpvarSubsts::Closure(substs) => substs.as_closure().tupled_upvars_ty(),
+            UpvarSubsts::Generator(substs) => substs.as_generator().tupled_upvars_ty(),
+        }
     }
 }
 
@@ -695,14 +746,16 @@
         use crate::ty::ToPredicate;
         match self.skip_binder() {
             ExistentialPredicate::Trait(tr) => {
-                Binder(tr).with_self_ty(tcx, self_ty).without_const().to_predicate(tcx)
+                self.rebind(tr).with_self_ty(tcx, self_ty).without_const().to_predicate(tcx)
             }
             ExistentialPredicate::Projection(p) => {
-                Binder(p.with_self_ty(tcx, self_ty)).to_predicate(tcx)
+                self.rebind(p.with_self_ty(tcx, self_ty)).to_predicate(tcx)
             }
             ExistentialPredicate::AutoTrait(did) => {
-                let trait_ref =
-                    Binder(ty::TraitRef { def_id: did, substs: tcx.mk_substs_trait(self_ty, &[]) });
+                let trait_ref = self.rebind(ty::TraitRef {
+                    def_id: did,
+                    substs: tcx.mk_substs_trait(self_ty, &[]),
+                });
                 trait_ref.without_const().to_predicate(tcx)
             }
         }
@@ -767,7 +820,7 @@
 
 impl<'tcx> Binder<&'tcx List<ExistentialPredicate<'tcx>>> {
     pub fn principal(&self) -> Option<ty::Binder<ExistentialTraitRef<'tcx>>> {
-        self.skip_binder().principal().map(Binder::bind)
+        self.map_bound(|b| b.principal()).transpose()
     }
 
     pub fn principal_def_id(&self) -> Option<DefId> {
@@ -850,8 +903,7 @@
     }
 
     pub fn to_poly_trait_predicate(&self) -> ty::PolyTraitPredicate<'tcx> {
-        // Note that we preserve binding levels
-        Binder(ty::TraitPredicate { trait_ref: self.skip_binder() })
+        self.map_bound(|trait_ref| ty::TraitPredicate { trait_ref })
     }
 }
 
@@ -993,6 +1045,19 @@
         Binder(f(self.0))
     }
 
+    /// Wraps a `value` in a binder, using the same bound variables as the
+    /// current `Binder`. This should not be used if the new value *changes*
+    /// the bound variables. Note: the (old or new) value itself does not
+    /// necessarily need to *name* all the bound variables.
+    ///
+    /// This currently doesn't do anything different than `bind`, because we
+    /// don't actually track bound vars. However, semantically, it is different
+    /// because bound vars aren't allowed to change here, whereas they are
+    /// in `bind`. This may be (debug) asserted in the future.
+    pub fn rebind<U>(&self, value: U) -> Binder<U> {
+        Binder(value)
+    }
+
     /// Unwraps and returns the value within, but only if it contains
     /// no bound vars at all. (In other words, if this binder --
     /// and indeed any enclosing binder -- doesn't bind anything at
@@ -1513,6 +1578,9 @@
     /// then this function would return a `exists T. T: Iterator` existential trait
     /// reference.
     pub fn trait_ref(&self, tcx: TyCtxt<'_>) -> ty::ExistentialTraitRef<'tcx> {
+        // FIXME(generic_associated_types): substs is the substs of the
+        // associated type, which should be truncated to get the correct substs
+        // for the trait.
         let def_id = tcx.associated_item(self.item_def_id).container.id();
         ty::ExistentialTraitRef { def_id, substs: self.substs }
     }
@@ -1763,10 +1831,7 @@
 
     #[inline]
     pub fn is_never(&self) -> bool {
-        match self.kind() {
-            Never => true,
-            _ => false,
-        }
+        matches!(self.kind(), Never)
     }
 
     /// Checks whether a type is definitely uninhabited. This is
@@ -1800,10 +1865,10 @@
             }
             ty::Array(ty, len) => {
                 match len.try_eval_usize(tcx, ParamEnv::empty()) {
+                    Some(0) | None => false,
                     // If the array is definitely non-empty, it's uninhabited if
                     // the type of its elements is uninhabited.
-                    Some(n) if n != 0 => ty.conservative_is_privately_uninhabited(tcx),
-                    _ => false,
+                    Some(1..) => ty.conservative_is_privately_uninhabited(tcx),
                 }
             }
             ty::Ref(..) => {
@@ -1823,34 +1888,22 @@
 
     #[inline]
     pub fn is_adt(&self) -> bool {
-        match self.kind() {
-            Adt(..) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Adt(..))
     }
 
     #[inline]
     pub fn is_ref(&self) -> bool {
-        match self.kind() {
-            Ref(..) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Ref(..))
     }
 
     #[inline]
     pub fn is_ty_var(&self) -> bool {
-        match self.kind() {
-            Infer(TyVar(_)) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Infer(TyVar(_)))
     }
 
     #[inline]
     pub fn is_ty_infer(&self) -> bool {
-        match self.kind() {
-            Infer(_) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Infer(_))
     }
 
     #[inline]
@@ -1880,20 +1933,14 @@
     #[inline]
     pub fn is_slice(&self) -> bool {
         match self.kind() {
-            RawPtr(TypeAndMut { ty, .. }) | Ref(_, ty, _) => match ty.kind() {
-                Slice(_) | Str => true,
-                _ => false,
-            },
+            RawPtr(TypeAndMut { ty, .. }) | Ref(_, ty, _) => matches!(ty.kind(), Slice(_) | Str),
             _ => false,
         }
     }
 
     #[inline]
     pub fn is_array(&self) -> bool {
-        match self.kind() {
-            Array(..) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Array(..))
     }
 
     #[inline]
@@ -1940,27 +1987,21 @@
 
     #[inline]
     pub fn is_region_ptr(&self) -> bool {
-        match self.kind() {
-            Ref(..) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Ref(..))
     }
 
     #[inline]
     pub fn is_mutable_ptr(&self) -> bool {
-        match self.kind() {
+        matches!(
+            self.kind(),
             RawPtr(TypeAndMut { mutbl: hir::Mutability::Mut, .. })
-            | Ref(_, _, hir::Mutability::Mut) => true,
-            _ => false,
-        }
+                | Ref(_, _, hir::Mutability::Mut)
+        )
     }
 
     #[inline]
     pub fn is_unsafe_ptr(&self) -> bool {
-        match self.kind() {
-            RawPtr(_) => true,
-            _ => false,
-        }
+        matches!(self.kind(), RawPtr(_))
     }
 
     /// Tests if this is any kind of primitive pointer type (reference, raw pointer, fn pointer).
@@ -1990,35 +2031,22 @@
     /// contents are abstract to rustc.)
     #[inline]
     pub fn is_scalar(&self) -> bool {
-        match self.kind() {
-            Bool
-            | Char
-            | Int(_)
-            | Float(_)
-            | Uint(_)
+        matches!(
+            self.kind(),
+            Bool | Char | Int(_) | Float(_) | Uint(_) | FnDef(..) | FnPtr(_) | RawPtr(_)
             | Infer(IntVar(_) | FloatVar(_))
-            | FnDef(..)
-            | FnPtr(_)
-            | RawPtr(_) => true,
-            _ => false,
-        }
+        )
     }
 
     /// Returns `true` if this type is a floating point type.
     #[inline]
     pub fn is_floating_point(&self) -> bool {
-        match self.kind() {
-            Float(_) | Infer(FloatVar(_)) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Float(_) | Infer(FloatVar(_)))
     }
 
     #[inline]
     pub fn is_trait(&self) -> bool {
-        match self.kind() {
-            Dynamic(..) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Dynamic(..))
     }
 
     #[inline]
@@ -2031,52 +2059,32 @@
 
     #[inline]
     pub fn is_closure(&self) -> bool {
-        match self.kind() {
-            Closure(..) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Closure(..))
     }
 
     #[inline]
     pub fn is_generator(&self) -> bool {
-        match self.kind() {
-            Generator(..) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Generator(..))
     }
 
     #[inline]
     pub fn is_integral(&self) -> bool {
-        match self.kind() {
-            Infer(IntVar(_)) | Int(_) | Uint(_) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Infer(IntVar(_)) | Int(_) | Uint(_))
     }
 
     #[inline]
     pub fn is_fresh_ty(&self) -> bool {
-        match self.kind() {
-            Infer(FreshTy(_)) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Infer(FreshTy(_)))
     }
 
     #[inline]
     pub fn is_fresh(&self) -> bool {
-        match self.kind() {
-            Infer(FreshTy(_)) => true,
-            Infer(FreshIntTy(_)) => true,
-            Infer(FreshFloatTy(_)) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Infer(FreshTy(_) | FreshIntTy(_) | FreshFloatTy(_)))
     }
 
     #[inline]
     pub fn is_char(&self) -> bool {
-        match self.kind() {
-            Char => true,
-            _ => false,
-        }
+        matches!(self.kind(), Char)
     }
 
     #[inline]
@@ -2086,34 +2094,22 @@
 
     #[inline]
     pub fn is_signed(&self) -> bool {
-        match self.kind() {
-            Int(_) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Int(_))
     }
 
     #[inline]
     pub fn is_ptr_sized_integral(&self) -> bool {
-        match self.kind() {
-            Int(ast::IntTy::Isize) | Uint(ast::UintTy::Usize) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Int(ast::IntTy::Isize) | Uint(ast::UintTy::Usize))
     }
 
     #[inline]
     pub fn is_machine(&self) -> bool {
-        match self.kind() {
-            Int(..) | Uint(..) | Float(..) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Int(..) | Uint(..) | Float(..))
     }
 
     #[inline]
     pub fn has_concrete_skeleton(&self) -> bool {
-        match self.kind() {
-            Param(_) | Infer(_) | Error(_) => false,
-            _ => true,
-        }
+        !matches!(self.kind(), Param(_) | Infer(_) | Error(_))
     }
 
     /// Returns the type and mutability of `*ty`.
@@ -2156,26 +2152,17 @@
 
     #[inline]
     pub fn is_fn(&self) -> bool {
-        match self.kind() {
-            FnDef(..) | FnPtr(_) => true,
-            _ => false,
-        }
+        matches!(self.kind(), FnDef(..) | FnPtr(_))
     }
 
     #[inline]
     pub fn is_fn_ptr(&self) -> bool {
-        match self.kind() {
-            FnPtr(_) => true,
-            _ => false,
-        }
+        matches!(self.kind(), FnPtr(_))
     }
 
     #[inline]
     pub fn is_impl_trait(&self) -> bool {
-        match self.kind() {
-            Opaque(..) => true,
-            _ => false,
-        }
+        matches!(self.kind(), Opaque(..))
     }
 
     #[inline]
diff --git a/compiler/rustc_middle/src/ty/subst.rs b/compiler/rustc_middle/src/ty/subst.rs
index 1bd3bcb..07f775c 100644
--- a/compiler/rustc_middle/src/ty/subst.rs
+++ b/compiler/rustc_middle/src/ty/subst.rs
@@ -1,6 +1,5 @@
 // Type substitutions.
 
-use crate::infer::canonical::Canonical;
 use crate::ty::codec::{TyDecoder, TyEncoder};
 use crate::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
 use crate::ty::sty::{ClosureSubsts, GeneratorSubsts};
@@ -18,6 +17,7 @@
 use std::marker::PhantomData;
 use std::mem;
 use std::num::NonZeroUsize;
+use std::ops::ControlFlow;
 
 /// An entity in the Rust type system, which can be one of
 /// several kinds (types, lifetimes, and consts).
@@ -142,11 +142,11 @@
 impl<'a, 'tcx> Lift<'tcx> for GenericArg<'a> {
     type Lifted = GenericArg<'tcx>;
 
-    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+    fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
         match self.unpack() {
-            GenericArgKind::Lifetime(lt) => tcx.lift(&lt).map(|lt| lt.into()),
-            GenericArgKind::Type(ty) => tcx.lift(&ty).map(|ty| ty.into()),
-            GenericArgKind::Const(ct) => tcx.lift(&ct).map(|ct| ct.into()),
+            GenericArgKind::Lifetime(lt) => tcx.lift(lt).map(|lt| lt.into()),
+            GenericArgKind::Type(ty) => tcx.lift(ty).map(|ty| ty.into()),
+            GenericArgKind::Const(ct) => tcx.lift(ct).map(|ct| ct.into()),
         }
     }
 }
@@ -160,7 +160,7 @@
         }
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
         match self.unpack() {
             GenericArgKind::Lifetime(lt) => lt.visit_with(visitor),
             GenericArgKind::Type(ty) => ty.visit_with(visitor),
@@ -392,8 +392,8 @@
         }
     }
 
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.iter().any(|t| t.visit_with(visitor))
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<()> {
+        self.iter().try_for_each(|t| t.visit_with(visitor))
     }
 }
 
@@ -648,8 +648,6 @@
     }
 }
 
-pub type CanonicalUserSubsts<'tcx> = Canonical<'tcx, UserSubsts<'tcx>>;
-
 /// Stores the user-given substs to reach some fully qualified path
 /// (e.g., `<T>::Item` or `<T as Trait>::Item`).
 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
diff --git a/compiler/rustc_middle/src/ty/trait_def.rs b/compiler/rustc_middle/src/ty/trait_def.rs
index 9d5b558..86476df 100644
--- a/compiler/rustc_middle/src/ty/trait_def.rs
+++ b/compiler/rustc_middle/src/ty/trait_def.rs
@@ -123,10 +123,26 @@
         self_ty: Ty<'tcx>,
         mut f: F,
     ) {
+        let _: Option<()> = self.find_map_relevant_impl(def_id, self_ty, |did| {
+            f(did);
+            None
+        });
+    }
+
+    /// Applies function to every impl that could possibly match the self type `self_ty` and returns
+    /// the first non-none value.
+    pub fn find_map_relevant_impl<T, F: FnMut(DefId) -> Option<T>>(
+        self,
+        def_id: DefId,
+        self_ty: Ty<'tcx>,
+        mut f: F,
+    ) -> Option<T> {
         let impls = self.trait_impls_of(def_id);
 
         for &impl_def_id in impls.blanket_impls.iter() {
-            f(impl_def_id);
+            if let result @ Some(_) = f(impl_def_id) {
+                return result;
+            }
         }
 
         // simplify_type(.., false) basically replaces type parameters and
@@ -157,14 +173,20 @@
         if let Some(simp) = fast_reject::simplify_type(self, self_ty, true) {
             if let Some(impls) = impls.non_blanket_impls.get(&simp) {
                 for &impl_def_id in impls {
-                    f(impl_def_id);
+                    if let result @ Some(_) = f(impl_def_id) {
+                        return result;
+                    }
                 }
             }
         } else {
             for &impl_def_id in impls.non_blanket_impls.values().flatten() {
-                f(impl_def_id);
+                if let result @ Some(_) = f(impl_def_id) {
+                    return result;
+                }
             }
         }
+
+        None
     }
 
     /// Returns an iterator containing all impls
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
index 4127b65..5f117e1 100644
--- a/compiler/rustc_middle/src/ty/util.rs
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -2,13 +2,12 @@
 
 use crate::ich::NodeIdHashingMode;
 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
-use crate::mir::interpret::{sign_extend, truncate};
 use crate::ty::fold::TypeFolder;
 use crate::ty::layout::IntegerExt;
 use crate::ty::query::TyCtxtAt;
-use crate::ty::subst::{GenericArgKind, InternalSubsts, Subst, SubstsRef};
+use crate::ty::subst::{GenericArgKind, Subst, SubstsRef};
 use crate::ty::TyKind::*;
-use crate::ty::{self, DefIdTree, GenericParamDefKind, List, Ty, TyCtxt, TypeFoldable};
+use crate::ty::{self, DefIdTree, List, Ty, TyCtxt, TypeFoldable};
 use rustc_apfloat::Float as _;
 use rustc_ast as ast;
 use rustc_attr::{self as attr, SignedInt, UnsignedInt};
@@ -38,7 +37,7 @@
                 let size = ty::tls::with(|tcx| Integer::from_attr(&tcx, SignedInt(ity)).size());
                 let x = self.val;
                 // sign extend the raw representation to be an i128
-                let x = sign_extend(x, size) as i128;
+                let x = size.sign_extend(x) as i128;
                 write!(fmt, "{}", x)
             }
             _ => write!(fmt, "{}", self.val),
@@ -47,7 +46,7 @@
 }
 
 fn signed_min(size: Size) -> i128 {
-    sign_extend(1_u128 << (size.bits() - 1), size) as i128
+    size.sign_extend(1_u128 << (size.bits() - 1)) as i128
 }
 
 fn signed_max(size: Size) -> i128 {
@@ -77,14 +76,14 @@
         let (val, oflo) = if signed {
             let min = signed_min(size);
             let max = signed_max(size);
-            let val = sign_extend(self.val, size) as i128;
+            let val = size.sign_extend(self.val) as i128;
             assert!(n < (i128::MAX as u128));
             let n = n as i128;
             let oflo = val > max - n;
             let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
             // zero the upper bits
             let val = val as u128;
-            let val = truncate(val, size);
+            let val = size.truncate(val);
             (val, oflo)
         } else {
             let max = unsigned_max(size);
@@ -341,19 +340,19 @@
     pub fn calculate_dtor(
         self,
         adt_did: DefId,
-        validate: &mut dyn FnMut(Self, DefId) -> Result<(), ErrorReported>,
+        validate: impl Fn(Self, DefId) -> Result<(), ErrorReported>,
     ) -> Option<ty::Destructor> {
         let drop_trait = self.lang_items().drop_trait()?;
         self.ensure().coherent_trait(drop_trait);
 
-        let mut dtor_did = None;
         let ty = self.type_of(adt_did);
-        self.for_each_relevant_impl(drop_trait, ty, |impl_did| {
+        let dtor_did = self.find_map_relevant_impl(drop_trait, ty, |impl_did| {
             if let Some(item) = self.associated_items(impl_did).in_definition_order().next() {
                 if validate(self, impl_did).is_ok() {
-                    dtor_did = Some(item.def_id);
+                    return Some(item.def_id);
                 }
             }
+            None
         });
 
         Some(ty::Destructor { did: dtor_did? })
@@ -509,20 +508,6 @@
         Some(ty::Binder::bind(env_ty))
     }
 
-    /// Given the `DefId` of some item that has no type or const parameters, make
-    /// a suitable "empty substs" for it.
-    pub fn empty_substs_for_def_id(self, item_def_id: DefId) -> SubstsRef<'tcx> {
-        InternalSubsts::for_item(self, item_def_id, |param, _| match param.kind {
-            GenericParamDefKind::Lifetime => self.lifetimes.re_erased.into(),
-            GenericParamDefKind::Type { .. } => {
-                bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id)
-            }
-            GenericParamDefKind::Const { .. } => {
-                bug!("empty_substs_for_def_id: {:?} has const parameters", item_def_id)
-            }
-        })
-    }
-
     /// Returns `true` if the node pointed to by `def_id` is a `static` item.
     pub fn is_static(self, def_id: DefId) -> bool {
         self.static_mutability(def_id).is_some()
@@ -543,8 +528,12 @@
         // Make sure that any constants in the static's type are evaluated.
         let static_ty = self.normalize_erasing_regions(ty::ParamEnv::empty(), self.type_of(def_id));
 
+        // Make sure that accesses to unsafe statics end up using raw pointers.
+        // For thread-locals, this needs to be kept in sync with `Rvalue::ty`.
         if self.is_mutable_static(def_id) {
             self.mk_mut_ptr(static_ty)
+        } else if self.is_foreign_item(def_id) {
+            self.mk_imm_ptr(static_ty)
         } else {
             self.mk_imm_ref(self.lifetimes.re_erased, static_ty)
         }
@@ -646,8 +635,8 @@
             }
             ty::Char => Some(std::char::MAX as u128),
             ty::Float(fty) => Some(match fty {
-                ast::FloatTy::F32 => ::rustc_apfloat::ieee::Single::INFINITY.to_bits(),
-                ast::FloatTy::F64 => ::rustc_apfloat::ieee::Double::INFINITY.to_bits(),
+                ast::FloatTy::F32 => rustc_apfloat::ieee::Single::INFINITY.to_bits(),
+                ast::FloatTy::F64 => rustc_apfloat::ieee::Double::INFINITY.to_bits(),
             }),
             _ => None,
         };
@@ -660,7 +649,7 @@
         let val = match self.kind() {
             ty::Int(_) | ty::Uint(_) => {
                 let (size, signed) = int_size_and_signed(tcx, self);
-                let val = if signed { truncate(signed_min(size) as u128, size) } else { 0 };
+                let val = if signed { size.truncate(signed_min(size) as u128) } else { 0 };
                 Some(val)
             }
             ty::Char => Some(0),
diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs
index 80ade7d..357a0dd 100644
--- a/compiler/rustc_middle/src/ty/walk.rs
+++ b/compiler/rustc_middle/src/ty/walk.rs
@@ -3,7 +3,7 @@
 
 use crate::ty;
 use crate::ty::subst::{GenericArg, GenericArgKind};
-use rustc_data_structures::mini_set::MiniSet;
+use rustc_data_structures::sso::SsoHashSet;
 use smallvec::{self, SmallVec};
 
 // The TypeWalker's stack is hot enough that it's worth going to some effort to
@@ -13,7 +13,7 @@
 pub struct TypeWalker<'tcx> {
     stack: TypeWalkerStack<'tcx>,
     last_subtree: usize,
-    visited: MiniSet<GenericArg<'tcx>>,
+    visited: SsoHashSet<GenericArg<'tcx>>,
 }
 
 /// An iterator for walking the type tree.
@@ -26,7 +26,7 @@
 /// skips any types that are already there.
 impl<'tcx> TypeWalker<'tcx> {
     pub fn new(root: GenericArg<'tcx>) -> Self {
-        Self { stack: smallvec![root], last_subtree: 1, visited: MiniSet::new() }
+        Self { stack: smallvec![root], last_subtree: 1, visited: SsoHashSet::new() }
     }
 
     /// Skips the subtree corresponding to the last type
@@ -87,7 +87,7 @@
     /// and skips any types that are already there.
     pub fn walk_shallow(
         self,
-        visited: &mut MiniSet<GenericArg<'tcx>>,
+        visited: &mut SsoHashSet<GenericArg<'tcx>>,
     ) -> impl Iterator<Item = GenericArg<'tcx>> {
         let mut stack = SmallVec::new();
         push_inner(&mut stack, self);
diff --git a/compiler/rustc_mir/Cargo.toml b/compiler/rustc_mir/Cargo.toml
index a6d2224..487668c 100644
--- a/compiler/rustc_mir/Cargo.toml
+++ b/compiler/rustc_mir/Cargo.toml
@@ -12,7 +12,6 @@
 rustc_graphviz = { path = "../rustc_graphviz" }
 itertools = "0.9"
 tracing = "0.1"
-log_settings = "0.1.1"
 polonius-engine = "0.12.0"
 regex = "1"
 rustc_middle = { path = "../rustc_middle" }
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/conflict_errors.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/conflict_errors.rs
index 11122b1..1474c7a 100644
--- a/compiler/rustc_mir/src/borrow_check/diagnostics/conflict_errors.rs
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/conflict_errors.rs
@@ -120,7 +120,7 @@
                 let move_out = self.move_data.moves[(*move_site).moi];
                 let moved_place = &self.move_data.move_paths[move_out.path].place;
                 // `*(_1)` where `_1` is a `Box` is actually a move out.
-                let is_box_move = moved_place.as_ref().projection == &[ProjectionElem::Deref]
+                let is_box_move = moved_place.as_ref().projection == [ProjectionElem::Deref]
                     && self.body.local_decls[moved_place.local].ty.is_box();
 
                 !is_box_move
@@ -336,10 +336,11 @@
                 };
                 if let ty::Param(param_ty) = ty.kind() {
                     let tcx = self.infcx.tcx;
-                    let generics = tcx.generics_of(self.mir_def_id);
+                    let generics = tcx.generics_of(self.mir_def_id());
                     let param = generics.type_param(&param_ty, tcx);
-                    if let Some(generics) =
-                        tcx.hir().get_generics(tcx.closure_base_def_id(self.mir_def_id.to_def_id()))
+                    if let Some(generics) = tcx
+                        .hir()
+                        .get_generics(tcx.closure_base_def_id(self.mir_def_id().to_def_id()))
                     {
                         suggest_constraining_type_param(
                             tcx,
@@ -1004,7 +1005,7 @@
                 format!("`{}` would have to be valid for `{}`...", name, region_name),
             );
 
-            let fn_hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(self.mir_def_id);
+            let fn_hir_id = self.mir_hir_id();
             err.span_label(
                 drop_span,
                 format!(
@@ -1019,7 +1020,7 @@
                             match &self
                                 .infcx
                                 .tcx
-                                .typeck(self.mir_def_id)
+                                .typeck(self.mir_def_id())
                                 .node_type(fn_hir_id)
                                 .kind()
                             {
@@ -1369,7 +1370,7 @@
     ) -> DiagnosticBuilder<'cx> {
         let tcx = self.infcx.tcx;
 
-        let (_, escapes_from) = tcx.article_and_description(self.mir_def_id.to_def_id());
+        let (_, escapes_from) = tcx.article_and_description(self.mir_def_id().to_def_id());
 
         let mut err =
             borrowck_errors::borrowed_data_escapes_closure(tcx, escape_span, escapes_from);
@@ -1708,15 +1709,15 @@
     ) -> Option<AnnotatedBorrowFnSignature<'tcx>> {
         // Define a fallback for when we can't match a closure.
         let fallback = || {
-            let is_closure = self.infcx.tcx.is_closure(self.mir_def_id.to_def_id());
+            let is_closure = self.infcx.tcx.is_closure(self.mir_def_id().to_def_id());
             if is_closure {
                 None
             } else {
-                let ty = self.infcx.tcx.type_of(self.mir_def_id);
+                let ty = self.infcx.tcx.type_of(self.mir_def_id());
                 match ty.kind() {
                     ty::FnDef(_, _) | ty::FnPtr(_) => self.annotate_fn_sig(
-                        self.mir_def_id.to_def_id(),
-                        self.infcx.tcx.fn_sig(self.mir_def_id),
+                        self.mir_def_id().to_def_id(),
+                        self.infcx.tcx.fn_sig(self.mir_def_id()),
                     ),
                     _ => None,
                 }
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/move_errors.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/move_errors.rs
index 629e9be..b1cebbd 100644
--- a/compiler/rustc_mir/src/borrow_check/diagnostics/move_errors.rs
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/move_errors.rs
@@ -331,7 +331,7 @@
                 self.cannot_move_out_of_interior_noncopy(span, ty, None)
             }
             ty::Closure(def_id, closure_substs)
-                if def_id.as_local() == Some(self.mir_def_id) && upvar_field.is_some() =>
+                if def_id.as_local() == Some(self.mir_def_id()) && upvar_field.is_some() =>
             {
                 let closure_kind_ty = closure_substs.as_closure().kind_ty();
                 let closure_kind = closure_kind_ty.to_opt_closure_kind();
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs
index d4cdf02..e1af6fc 100644
--- a/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs
@@ -1,11 +1,11 @@
 use rustc_hir as hir;
 use rustc_hir::Node;
 use rustc_index::vec::Idx;
-use rustc_middle::mir::{self, ClearCrossCrate, Local, LocalInfo, Location};
+use rustc_middle::mir::{self, ClearCrossCrate, Local, LocalDecl, LocalInfo, Location};
 use rustc_middle::mir::{Mutability, Place, PlaceRef, ProjectionElem};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::source_map::DesugaringKind;
-use rustc_span::symbol::kw;
+use rustc_span::symbol::{kw, Symbol};
 use rustc_span::Span;
 
 use crate::borrow_check::diagnostics::BorrowedContentSource;
@@ -211,36 +211,12 @@
 
             // Suggest removing a `&mut` from the use of a mutable reference.
             PlaceRef { local, projection: [] }
-                if {
-                    self.body
-                        .local_decls
-                        .get(local)
-                        .map(|local_decl| {
-                            if let Some(box LocalInfo::User(ClearCrossCrate::Set(
-                                mir::BindingForm::ImplicitSelf(kind),
-                            ))) = local_decl.local_info
-                            {
-                                // Check if the user variable is a `&mut self` and we can therefore
-                                // suggest removing the `&mut`.
-                                //
-                                // Deliberately fall into this case for all implicit self types,
-                                // so that we don't fall in to the next case with them.
-                                kind == mir::ImplicitSelfKind::MutRef
-                            } else if Some(kw::SelfLower) == self.local_names[local] {
-                                // Otherwise, check if the name is the self kewyord - in which case
-                                // we have an explicit self. Do the same thing in this case and check
-                                // for a `self: &mut Self` to suggest removing the `&mut`.
-                                if let ty::Ref(_, _, hir::Mutability::Mut) = local_decl.ty.kind() {
-                                    true
-                                } else {
-                                    false
-                                }
-                            } else {
-                                false
-                            }
-                        })
-                        .unwrap_or(false)
-                } =>
+                if self
+                    .body
+                    .local_decls
+                    .get(local)
+                    .map(|l| mut_borrow_of_mutable_ref(l, self.local_names[local]))
+                    .unwrap_or(false) =>
             {
                 err.span_label(span, format!("cannot {ACT}", ACT = act));
                 err.span_label(span, "try removing `&mut` here");
@@ -492,7 +468,7 @@
         err.span_label(sp, format!("cannot {}", act));
 
         let hir = self.infcx.tcx.hir();
-        let closure_id = hir.local_def_id_to_hir_id(self.mir_def_id);
+        let closure_id = self.mir_hir_id();
         let fn_call_id = hir.get_parent_node(closure_id);
         let node = hir.get(fn_call_id);
         let item_id = hir.enclosing_body_owner(fn_call_id);
@@ -581,6 +557,34 @@
     }
 }
 
+fn mut_borrow_of_mutable_ref(local_decl: &LocalDecl<'_>, local_name: Option<Symbol>) -> bool {
+    debug!("local_info: {:?}, ty.kind(): {:?}", local_decl.local_info, local_decl.ty.kind());
+
+    match local_decl.local_info.as_deref() {
+        // Check if mutably borrowing a mutable reference.
+        Some(LocalInfo::User(ClearCrossCrate::Set(mir::BindingForm::Var(
+            mir::VarBindingForm {
+                binding_mode: ty::BindingMode::BindByValue(Mutability::Not), ..
+            },
+        )))) => matches!(local_decl.ty.kind(), ty::Ref(_, _, hir::Mutability::Mut)),
+        Some(LocalInfo::User(ClearCrossCrate::Set(mir::BindingForm::ImplicitSelf(kind)))) => {
+            // Check if the user variable is a `&mut self` and we can therefore
+            // suggest removing the `&mut`.
+            //
+            // Deliberately fall into this case for all implicit self types,
+            // so that we don't fall in to the next case with them.
+            *kind == mir::ImplicitSelfKind::MutRef
+        }
+        _ if Some(kw::SelfLower) == local_name => {
+            // Otherwise, check if the name is the `self` keyword - in which case
+            // we have an explicit self. Do the same thing in this case and check
+            // for a `self: &mut Self` to suggest removing the `&mut`.
+            matches!(local_decl.ty.kind(), ty::Ref(_, _, hir::Mutability::Mut))
+        }
+        _ => false,
+    }
+}
+
 fn suggest_ampmut_self<'tcx>(
     tcx: TyCtxt<'tcx>,
     local_decl: &mir::LocalDecl<'tcx>,
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/region_errors.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/region_errors.rs
index eb1f700..e22dab0 100644
--- a/compiler/rustc_mir/src/borrow_check/diagnostics/region_errors.rs
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/region_errors.rs
@@ -6,6 +6,7 @@
     error_reporting::unexpected_hidden_region_diagnostic, NLLRegionVariableOrigin,
 };
 use rustc_middle::mir::{ConstraintCategory, ReturnConstraint};
+use rustc_middle::ty::subst::Subst;
 use rustc_middle::ty::{self, RegionVid, Ty};
 use rustc_span::symbol::{kw, sym};
 use rustc_span::Span;
@@ -515,7 +516,8 @@
         let mut diag =
             self.infcx.tcx.sess.struct_span_err(*span, "lifetime may not live long enough");
 
-        let (_, mir_def_name) = self.infcx.tcx.article_and_description(self.mir_def_id.to_def_id());
+        let (_, mir_def_name) =
+            self.infcx.tcx.article_and_description(self.mir_def_id().to_def_id());
 
         let fr_name = self.give_region_a_name(*fr).unwrap();
         fr_name.highlight_region_name(&mut diag);
@@ -584,14 +586,14 @@
                 //
                 // eg. check for `impl Trait + 'static` instead of `impl Trait`.
                 let has_static_predicate = {
-                    let predicates_of = self.infcx.tcx.predicates_of(did);
-                    let bounds = predicates_of.instantiate(self.infcx.tcx, substs);
+                    let bounds = self.infcx.tcx.explicit_item_bounds(did);
 
                     let mut found = false;
-                    for predicate in bounds.predicates {
+                    for (bound, _) in bounds {
                         if let ty::PredicateAtom::TypeOutlives(ty::OutlivesPredicate(_, r)) =
-                            predicate.skip_binders()
+                            bound.skip_binders()
                         {
+                            let r = r.subst(self.infcx.tcx, substs);
                             if let ty::RegionKind::ReStatic = r {
                                 found = true;
                                 break;
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs
index 5f64eb3..2a90fb0 100644
--- a/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs
@@ -6,8 +6,8 @@
 use rustc_middle::ty::print::RegionHighlightMode;
 use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
 use rustc_middle::ty::{self, RegionVid, Ty};
-use rustc_span::symbol::kw;
-use rustc_span::{symbol::Symbol, Span, DUMMY_SP};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
 
 use crate::borrow_check::{nll::ToRegionVid, universal_regions::DefiningTy, MirBorrowckCtxt};
 
@@ -39,7 +39,7 @@
     /// The region corresponding to a closure upvar.
     AnonRegionFromUpvar(Span, String),
     /// The region corresponding to the return type of a closure.
-    AnonRegionFromOutput(Span, String, String),
+    AnonRegionFromOutput(RegionNameHighlight, String),
     /// The region from a type yielded by a generator.
     AnonRegionFromYieldTy(Span, String),
     /// An anonymous region from an async fn.
@@ -57,6 +57,10 @@
     /// The anonymous region corresponds to a region where the type annotation is completely missing
     /// from the code, e.g. in a closure arguments `|x| { ... }`, where `x` is a reference.
     CannotMatchHirTy(Span, String),
+    /// The anonymous region corresponds to a region where the type annotation is completely missing
+    /// from the code, and *even if* we print out the full name of the type, the region name won't
+    /// be included. This currently occurs for opaque types like `impl Future`.
+    Occluded(Span, String),
 }
 
 impl RegionName {
@@ -81,13 +85,14 @@
             | RegionNameSource::NamedFreeRegion(span)
             | RegionNameSource::SynthesizedFreeEnvRegion(span, _)
             | RegionNameSource::AnonRegionFromUpvar(span, _)
-            | RegionNameSource::AnonRegionFromOutput(span, _, _)
             | RegionNameSource::AnonRegionFromYieldTy(span, _)
             | RegionNameSource::AnonRegionFromAsyncFn(span) => Some(span),
-            RegionNameSource::AnonRegionFromArgument(ref highlight) => match *highlight {
+            RegionNameSource::AnonRegionFromArgument(ref highlight)
+            | RegionNameSource::AnonRegionFromOutput(ref highlight, _) => match *highlight {
                 RegionNameHighlight::MatchedHirTy(span)
                 | RegionNameHighlight::MatchedAdtAndSegment(span)
-                | RegionNameHighlight::CannotMatchHirTy(span, _) => Some(span),
+                | RegionNameHighlight::CannotMatchHirTy(span, _)
+                | RegionNameHighlight::Occluded(span, _) => Some(span),
             },
         }
     }
@@ -112,6 +117,7 @@
                 diag.span_label(*span, format!("has type `{}`", type_name));
             }
             RegionNameSource::AnonRegionFromArgument(RegionNameHighlight::MatchedHirTy(span))
+            | RegionNameSource::AnonRegionFromOutput(RegionNameHighlight::MatchedHirTy(span), _)
             | RegionNameSource::AnonRegionFromAsyncFn(span) => {
                 diag.span_label(
                     *span,
@@ -120,16 +126,44 @@
             }
             RegionNameSource::AnonRegionFromArgument(
                 RegionNameHighlight::MatchedAdtAndSegment(span),
+            )
+            | RegionNameSource::AnonRegionFromOutput(
+                RegionNameHighlight::MatchedAdtAndSegment(span),
+                _,
             ) => {
                 diag.span_label(*span, format!("let's call this `{}`", self));
             }
+            RegionNameSource::AnonRegionFromArgument(RegionNameHighlight::Occluded(
+                span,
+                type_name,
+            )) => {
+                diag.span_label(
+                    *span,
+                    format!("lifetime `{}` appears in the type {}", self, type_name),
+                );
+            }
+            RegionNameSource::AnonRegionFromOutput(
+                RegionNameHighlight::Occluded(span, type_name),
+                mir_description,
+            ) => {
+                diag.span_label(
+                    *span,
+                    format!(
+                        "return type{} `{}` contains a lifetime `{}`",
+                        mir_description, type_name, self
+                    ),
+                );
+            }
             RegionNameSource::AnonRegionFromUpvar(span, upvar_name) => {
                 diag.span_label(
                     *span,
                     format!("lifetime `{}` appears in the type of `{}`", self, upvar_name),
                 );
             }
-            RegionNameSource::AnonRegionFromOutput(span, mir_description, type_name) => {
+            RegionNameSource::AnonRegionFromOutput(
+                RegionNameHighlight::CannotMatchHirTy(span, type_name),
+                mir_description,
+            ) => {
                 diag.span_label(*span, format!("return type{} is {}", mir_description, type_name));
             }
             RegionNameSource::AnonRegionFromYieldTy(span, type_name) => {
@@ -147,6 +181,14 @@
 }
 
 impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
+    crate fn mir_def_id(&self) -> hir::def_id::LocalDefId {
+        self.body.source.def_id().as_local().unwrap()
+    }
+
+    crate fn mir_hir_id(&self) -> hir::HirId {
+        self.infcx.tcx.hir().local_def_id_to_hir_id(self.mir_def_id())
+    }
+
     /// Generate a synthetic region named `'N`, where `N` is the next value of the counter. Then,
     /// increment the counter.
     ///
@@ -266,12 +308,11 @@
                 }
 
                 ty::BoundRegion::BrEnv => {
-                    let mir_hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(self.mir_def_id);
                     let def_ty = self.regioncx.universal_regions().defining_ty;
 
                     if let DefiningTy::Closure(_, substs) = def_ty {
                         let args_span = if let hir::ExprKind::Closure(_, _, _, span, _) =
-                            tcx.hir().expect_expr(mir_hir_id).kind
+                            tcx.hir().expect_expr(self.mir_hir_id()).kind
                         {
                             span
                         } else {
@@ -342,27 +383,28 @@
             argument_index,
         );
 
-        self.get_argument_hir_ty_for_highlighting(argument_index)
+        let highlight = self
+            .get_argument_hir_ty_for_highlighting(argument_index)
             .and_then(|arg_hir_ty| self.highlight_if_we_can_match_hir_ty(fr, arg_ty, arg_hir_ty))
-            .or_else(|| {
+            .unwrap_or_else(|| {
                 // `highlight_if_we_cannot_match_hir_ty` needs to know the number we will give to
                 // the anonymous region. If it succeeds, the `synthesize_region_name` call below
                 // will increment the counter, "reserving" the number we just used.
                 let counter = *self.next_region_name.try_borrow().unwrap();
                 self.highlight_if_we_cannot_match_hir_ty(fr, arg_ty, span, counter)
-            })
-            .map(|highlight| RegionName {
-                name: self.synthesize_region_name(),
-                source: RegionNameSource::AnonRegionFromArgument(highlight),
-            })
+            });
+
+        Some(RegionName {
+            name: self.synthesize_region_name(),
+            source: RegionNameSource::AnonRegionFromArgument(highlight),
+        })
     }
 
     fn get_argument_hir_ty_for_highlighting(
         &self,
         argument_index: usize,
     ) -> Option<&hir::Ty<'tcx>> {
-        let mir_hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(self.mir_def_id);
-        let fn_decl = self.infcx.tcx.hir().fn_decl_by_hir_id(mir_hir_id)?;
+        let fn_decl = self.infcx.tcx.hir().fn_decl_by_hir_id(self.mir_hir_id())?;
         let argument_hir_ty: &hir::Ty<'_> = fn_decl.inputs.get(argument_index)?;
         match argument_hir_ty.kind {
             // This indicates a variable with no type annotation, like
@@ -393,7 +435,7 @@
         ty: Ty<'tcx>,
         span: Span,
         counter: usize,
-    ) -> Option<RegionNameHighlight> {
+    ) -> RegionNameHighlight {
         let mut highlight = RegionHighlightMode::default();
         highlight.highlighting_region_vid(needle_fr, counter);
         let type_name =
@@ -405,9 +447,9 @@
         );
         if type_name.find(&format!("'{}", counter)).is_some() {
             // Only add a label if we can confirm that a region was labelled.
-            Some(RegionNameHighlight::CannotMatchHirTy(span, type_name))
+            RegionNameHighlight::CannotMatchHirTy(span, type_name)
         } else {
-            None
+            RegionNameHighlight::Occluded(span, type_name)
         }
     }
 
@@ -637,6 +679,7 @@
     /// or be early bound (named, not in argument).
     fn give_name_if_anonymous_region_appears_in_output(&self, fr: RegionVid) -> Option<RegionName> {
         let tcx = self.infcx.tcx;
+        let hir = tcx.hir();
 
         let return_ty = self.regioncx.universal_regions().unnormalized_output_ty;
         debug!("give_name_if_anonymous_region_appears_in_output: return_ty = {:?}", return_ty);
@@ -644,44 +687,123 @@
             return None;
         }
 
-        let mut highlight = RegionHighlightMode::default();
-        highlight.highlighting_region_vid(fr, *self.next_region_name.try_borrow().unwrap());
-        let type_name =
-            self.infcx.extract_inference_diagnostics_data(return_ty.into(), Some(highlight)).name;
+        let mir_hir_id = self.mir_hir_id();
 
-        let mir_hir_id = tcx.hir().local_def_id_to_hir_id(self.mir_def_id);
-
-        let (return_span, mir_description) = match tcx.hir().get(mir_hir_id) {
+        let (return_span, mir_description, hir_ty) = match hir.get(mir_hir_id) {
             hir::Node::Expr(hir::Expr {
-                kind: hir::ExprKind::Closure(_, return_ty, _, span, gen_move),
+                kind: hir::ExprKind::Closure(_, return_ty, body_id, span, _),
                 ..
-            }) => (
-                match return_ty.output {
-                    hir::FnRetTy::DefaultReturn(_) => tcx.sess.source_map().end_point(*span),
-                    hir::FnRetTy::Return(_) => return_ty.output.span(),
-                },
-                if gen_move.is_some() { " of generator" } else { " of closure" },
-            ),
-            hir::Node::ImplItem(hir::ImplItem {
-                kind: hir::ImplItemKind::Fn(method_sig, _),
-                ..
-            }) => (method_sig.decl.output.span(), ""),
-            _ => (self.body.span, ""),
+            }) => {
+                let (mut span, mut hir_ty) = match return_ty.output {
+                    hir::FnRetTy::DefaultReturn(_) => {
+                        (tcx.sess.source_map().end_point(*span), None)
+                    }
+                    hir::FnRetTy::Return(hir_ty) => (return_ty.output.span(), Some(hir_ty)),
+                };
+                let mir_description = match hir.body(*body_id).generator_kind {
+                    Some(hir::GeneratorKind::Async(gen)) => match gen {
+                        hir::AsyncGeneratorKind::Block => " of async block",
+                        hir::AsyncGeneratorKind::Closure => " of async closure",
+                        hir::AsyncGeneratorKind::Fn => {
+                            let parent_item = hir.get(hir.get_parent_item(mir_hir_id));
+                            let output = &parent_item
+                                .fn_decl()
+                                .expect("generator lowered from async fn should be in fn")
+                                .output;
+                            span = output.span();
+                            if let hir::FnRetTy::Return(ret) = output {
+                                hir_ty = Some(self.get_future_inner_return_ty(*ret));
+                            }
+                            " of async function"
+                        }
+                    },
+                    Some(hir::GeneratorKind::Gen) => " of generator",
+                    None => " of closure",
+                };
+                (span, mir_description, hir_ty)
+            }
+            node => match node.fn_decl() {
+                Some(fn_decl) => {
+                    let hir_ty = match fn_decl.output {
+                        hir::FnRetTy::DefaultReturn(_) => None,
+                        hir::FnRetTy::Return(ty) => Some(ty),
+                    };
+                    (fn_decl.output.span(), "", hir_ty)
+                }
+                None => (self.body.span, "", None),
+            },
         };
 
+        let highlight = hir_ty
+            .and_then(|hir_ty| self.highlight_if_we_can_match_hir_ty(fr, return_ty, hir_ty))
+            .unwrap_or_else(|| {
+                // `highlight_if_we_cannot_match_hir_ty` needs to know the number we will give to
+                // the anonymous region. If it succeeds, the `synthesize_region_name` call below
+                // will increment the counter, "reserving" the number we just used.
+                let counter = *self.next_region_name.try_borrow().unwrap();
+                self.highlight_if_we_cannot_match_hir_ty(fr, return_ty, return_span, counter)
+            });
+
         Some(RegionName {
-            // This counter value will already have been used, so this function will increment it
-            // so the next value will be used next and return the region name that would have been
-            // used.
             name: self.synthesize_region_name(),
-            source: RegionNameSource::AnonRegionFromOutput(
-                return_span,
-                mir_description.to_string(),
-                type_name,
-            ),
+            source: RegionNameSource::AnonRegionFromOutput(highlight, mir_description.to_string()),
         })
     }
 
+    /// From the [`hir::Ty`] of an async function's lowered return type,
+    /// retrieve the `hir::Ty` representing the type the user originally wrote.
+    ///
+    /// e.g. given the function:
+    ///
+    /// ```
+    /// async fn foo() -> i32 {}
+    /// ```
+    ///
+    /// this function, given the lowered return type of `foo`, an [`OpaqueDef`] that implements `Future<Output=i32>`,
+    /// returns the `i32`.
+    ///
+    /// [`OpaqueDef`]: hir::TyKind::OpaqueDef
+    fn get_future_inner_return_ty(&self, hir_ty: &'tcx hir::Ty<'tcx>) -> &'tcx hir::Ty<'tcx> {
+        let hir = self.infcx.tcx.hir();
+
+        if let hir::TyKind::OpaqueDef(id, _) = hir_ty.kind {
+            let opaque_ty = hir.item(id.id);
+            if let hir::ItemKind::OpaqueTy(hir::OpaqueTy {
+                bounds:
+                    [hir::GenericBound::LangItemTrait(
+                        hir::LangItem::Future,
+                        _,
+                        _,
+                        hir::GenericArgs {
+                            bindings:
+                                [hir::TypeBinding {
+                                    ident: Ident { name: sym::Output, .. },
+                                    kind: hir::TypeBindingKind::Equality { ty },
+                                    ..
+                                }],
+                            ..
+                        },
+                    )],
+                ..
+            }) = opaque_ty.kind
+            {
+                ty
+            } else {
+                span_bug!(
+                    hir_ty.span,
+                    "bounds from lowered return type of async fn did not match expected format: {:?}",
+                    opaque_ty
+                );
+            }
+        } else {
+            span_bug!(
+                hir_ty.span,
+                "lowered return type of async fn is not OpaqueDef: {:?}",
+                hir_ty
+            );
+        }
+    }
+
     fn give_name_if_anonymous_region_appears_in_yield_ty(
         &self,
         fr: RegionVid,
@@ -702,9 +824,7 @@
         let type_name =
             self.infcx.extract_inference_diagnostics_data(yield_ty.into(), Some(highlight)).name;
 
-        let mir_hir_id = tcx.hir().local_def_id_to_hir_id(self.mir_def_id);
-
-        let yield_span = match tcx.hir().get(mir_hir_id) {
+        let yield_span = match tcx.hir().get(self.mir_hir_id()) {
             hir::Node::Expr(hir::Expr {
                 kind: hir::ExprKind::Closure(_, _, _, span, _), ..
             }) => (tcx.sess.source_map().end_point(*span)),
diff --git a/compiler/rustc_mir/src/borrow_check/invalidation.rs b/compiler/rustc_mir/src/borrow_check/invalidation.rs
index c84ccaf..8c05e6f 100644
--- a/compiler/rustc_mir/src/borrow_check/invalidation.rs
+++ b/compiler/rustc_mir/src/borrow_check/invalidation.rs
@@ -117,7 +117,7 @@
         self.check_activations(location);
 
         match &terminator.kind {
-            TerminatorKind::SwitchInt { ref discr, switch_ty: _, values: _, targets: _ } => {
+            TerminatorKind::SwitchInt { ref discr, switch_ty: _, targets: _ } => {
                 self.consume_operand(location, discr);
             }
             TerminatorKind::Drop { place: drop_place, target: _, unwind: _ } => {
diff --git a/compiler/rustc_mir/src/borrow_check/mod.rs b/compiler/rustc_mir/src/borrow_check/mod.rs
index e423748..de54c55 100644
--- a/compiler/rustc_mir/src/borrow_check/mod.rs
+++ b/compiler/rustc_mir/src/borrow_check/mod.rs
@@ -17,7 +17,7 @@
 use rustc_middle::mir::{Field, ProjectionElem, Promoted, Rvalue, Statement, StatementKind};
 use rustc_middle::mir::{InlineAsmOperand, Terminator, TerminatorKind};
 use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::{self, InstanceDef, ParamEnv, RegionVid, TyCtxt};
+use rustc_middle::ty::{self, ParamEnv, RegionVid, TyCtxt};
 use rustc_session::lint::builtin::{MUTABLE_BORROW_RESERVATION_CONFLICT, UNUSED_MUT};
 use rustc_span::{Span, Symbol, DUMMY_SP};
 
@@ -36,7 +36,6 @@
 use crate::dataflow::move_paths::{InitLocation, LookupResult, MoveData, MoveError};
 use crate::dataflow::MoveDataParamEnv;
 use crate::dataflow::{Analysis, BorrowckFlowState as Flows, BorrowckResults};
-use crate::transform::MirSource;
 
 use self::diagnostics::{AccessKind, RegionName};
 use self::location::LocationTable;
@@ -112,7 +111,7 @@
     let opt_closure_req = tcx.infer_ctxt().enter(|infcx| {
         let input_body: &Body<'_> = &input_body.borrow();
         let promoted: &IndexVec<_, _> = &promoted.borrow();
-        do_mir_borrowck(&infcx, input_body, promoted, def)
+        do_mir_borrowck(&infcx, input_body, promoted)
     });
     debug!("mir_borrowck done");
 
@@ -123,8 +122,9 @@
     infcx: &InferCtxt<'a, 'tcx>,
     input_body: &Body<'tcx>,
     input_promoted: &IndexVec<Promoted, Body<'tcx>>,
-    def: ty::WithOptConstParam<LocalDefId>,
 ) -> BorrowCheckResult<'tcx> {
+    let def = input_body.source.with_opt_param().as_local().unwrap();
+
     debug!("do_mir_borrowck(def = {:?})", def);
 
     let tcx = infcx.tcx;
@@ -186,7 +186,7 @@
     // will have a lifetime tied to the inference context.
     let mut body = input_body.clone();
     let mut promoted = input_promoted.clone();
-    let free_regions = nll::replace_regions_in_mir(infcx, def, param_env, &mut body, &mut promoted);
+    let free_regions = nll::replace_regions_in_mir(infcx, param_env, &mut body, &mut promoted);
     let body = &body; // no further changes
 
     let location_table = &LocationTable::new(&body);
@@ -204,7 +204,7 @@
     let mdpe = MoveDataParamEnv { move_data, param_env };
 
     let mut flow_inits = MaybeInitializedPlaces::new(tcx, &body, &mdpe)
-        .into_engine(tcx, &body, def.did.to_def_id())
+        .into_engine(tcx, &body)
         .pass_name("borrowck")
         .iterate_to_fixpoint()
         .into_results_cursor(&body);
@@ -222,7 +222,6 @@
         nll_errors,
     } = nll::compute_regions(
         infcx,
-        def.did,
         free_regions,
         body,
         &promoted,
@@ -236,20 +235,13 @@
 
     // Dump MIR results into a file, if that is enabled. This let us
     // write unit-tests, as well as helping with debugging.
-    nll::dump_mir_results(
-        infcx,
-        MirSource { instance: InstanceDef::Item(def.to_global()), promoted: None },
-        &body,
-        &regioncx,
-        &opt_closure_req,
-    );
+    nll::dump_mir_results(infcx, &body, &regioncx, &opt_closure_req);
 
     // We also have a `#[rustc_regions]` annotation that causes us to dump
     // information.
     nll::dump_annotation(
         infcx,
         &body,
-        def.did.to_def_id(),
         &regioncx,
         &opt_closure_req,
         &opaque_type_values,
@@ -264,15 +256,15 @@
     let regioncx = Rc::new(regioncx);
 
     let flow_borrows = Borrows::new(tcx, &body, regioncx.clone(), &borrow_set)
-        .into_engine(tcx, &body, def.did.to_def_id())
+        .into_engine(tcx, &body)
         .pass_name("borrowck")
         .iterate_to_fixpoint();
     let flow_uninits = MaybeUninitializedPlaces::new(tcx, &body, &mdpe)
-        .into_engine(tcx, &body, def.did.to_def_id())
+        .into_engine(tcx, &body)
         .pass_name("borrowck")
         .iterate_to_fixpoint();
     let flow_ever_inits = EverInitializedPlaces::new(tcx, &body, &mdpe)
-        .into_engine(tcx, &body, def.did.to_def_id())
+        .into_engine(tcx, &body)
         .pass_name("borrowck")
         .iterate_to_fixpoint();
 
@@ -293,7 +285,6 @@
                 infcx,
                 param_env,
                 body: promoted_body,
-                mir_def_id: def.did,
                 move_data: &move_data,
                 location_table: &LocationTable::new(promoted_body),
                 movable_generator,
@@ -327,7 +318,6 @@
         infcx,
         param_env,
         body,
-        mir_def_id: def.did,
         move_data: &mdpe.move_data,
         location_table,
         movable_generator,
@@ -481,7 +471,6 @@
     crate infcx: &'cx InferCtxt<'cx, 'tcx>,
     param_env: ParamEnv<'tcx>,
     body: &'cx Body<'tcx>,
-    mir_def_id: LocalDefId,
     move_data: &'cx MoveData<'tcx>,
 
     /// Map from MIR `Location` to `LocationIndex`; created
@@ -682,32 +671,19 @@
         self.check_activations(loc, span, flow_state);
 
         match term.kind {
-            TerminatorKind::SwitchInt { ref discr, switch_ty: _, values: _, targets: _ } => {
+            TerminatorKind::SwitchInt { ref discr, switch_ty: _, targets: _ } => {
                 self.consume_operand(loc, (discr, span), flow_state);
             }
-            TerminatorKind::Drop { place: ref drop_place, target: _, unwind: _ } => {
-                let tcx = self.infcx.tcx;
-
-                // Compute the type with accurate region information.
-                let drop_place_ty = drop_place.ty(self.body, self.infcx.tcx);
-
-                // Erase the regions.
-                let drop_place_ty = self.infcx.tcx.erase_regions(&drop_place_ty).ty;
-
-                // "Lift" into the tcx -- once regions are erased, this type should be in the
-                // global arenas; this "lift" operation basically just asserts that is true, but
-                // that is useful later.
-                tcx.lift(&drop_place_ty).unwrap();
-
+            TerminatorKind::Drop { place, target: _, unwind: _ } => {
                 debug!(
                     "visit_terminator_drop \
-                     loc: {:?} term: {:?} drop_place: {:?} drop_place_ty: {:?} span: {:?}",
-                    loc, term, drop_place, drop_place_ty, span
+                     loc: {:?} term: {:?} place: {:?} span: {:?}",
+                    loc, term, place, span
                 );
 
                 self.access_place(
                     loc,
-                    (*drop_place, span),
+                    (place, span),
                     (AccessDepth::Drop, Write(WriteKind::StorageDeadOrDrop)),
                     LocalMutationIsAllowed::Yes,
                     flow_state,
diff --git a/compiler/rustc_mir/src/borrow_check/nll.rs b/compiler/rustc_mir/src/borrow_check/nll.rs
index 66a17cb..359c5f2 100644
--- a/compiler/rustc_mir/src/borrow_check/nll.rs
+++ b/compiler/rustc_mir/src/borrow_check/nll.rs
@@ -2,14 +2,14 @@
 
 use rustc_data_structures::fx::FxHashMap;
 use rustc_errors::Diagnostic;
-use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::def_id::DefId;
 use rustc_index::vec::IndexVec;
 use rustc_infer::infer::InferCtxt;
 use rustc_middle::mir::{
     BasicBlock, Body, ClosureOutlivesSubject, ClosureRegionRequirements, LocalKind, Location,
     Promoted,
 };
-use rustc_middle::ty::{self, InstanceDef, RegionKind, RegionVid};
+use rustc_middle::ty::{self, RegionKind, RegionVid};
 use rustc_span::symbol::sym;
 use std::env;
 use std::fmt::Debug;
@@ -24,7 +24,6 @@
 use crate::dataflow::impls::MaybeInitializedPlaces;
 use crate::dataflow::move_paths::{InitKind, InitLocation, MoveData};
 use crate::dataflow::ResultsCursor;
-use crate::transform::MirSource;
 use crate::util as mir_util;
 use crate::util::pretty;
 
@@ -59,11 +58,12 @@
 /// `compute_regions`.
 pub(in crate::borrow_check) fn replace_regions_in_mir<'cx, 'tcx>(
     infcx: &InferCtxt<'cx, 'tcx>,
-    def: ty::WithOptConstParam<LocalDefId>,
     param_env: ty::ParamEnv<'tcx>,
     body: &mut Body<'tcx>,
     promoted: &mut IndexVec<Promoted, Body<'tcx>>,
 ) -> UniversalRegions<'tcx> {
+    let def = body.source.with_opt_param().as_local().unwrap();
+
     debug!("replace_regions_in_mir(def={:?})", def);
 
     // Compute named region information. This also renumbers the inputs/outputs.
@@ -72,8 +72,7 @@
     // Replace all remaining regions with fresh inference variables.
     renumber::renumber_mir(infcx, body, promoted);
 
-    let source = MirSource { instance: InstanceDef::Item(def.to_global()), promoted: None };
-    mir_util::dump_mir(infcx.tcx, None, "renumber", &0, source, body, |_, _| Ok(()));
+    mir_util::dump_mir(infcx.tcx, None, "renumber", &0, body, |_, _| Ok(()));
 
     universal_regions
 }
@@ -158,7 +157,6 @@
 /// This may result in errors being reported.
 pub(in crate::borrow_check) fn compute_regions<'cx, 'tcx>(
     infcx: &InferCtxt<'cx, 'tcx>,
-    def_id: LocalDefId,
     universal_regions: UniversalRegions<'tcx>,
     body: &Body<'tcx>,
     promoted: &IndexVec<Promoted, Body<'tcx>>,
@@ -182,7 +180,6 @@
             param_env,
             body,
             promoted,
-            def_id,
             &universal_regions,
             location_table,
             borrow_set,
@@ -272,12 +269,14 @@
     // Generate various additional constraints.
     invalidation::generate_invalidates(infcx.tcx, &mut all_facts, location_table, body, borrow_set);
 
+    let def_id = body.source.def_id();
+
     // Dump facts if requested.
     let polonius_output = all_facts.and_then(|all_facts| {
         if infcx.tcx.sess.opts.debugging_opts.nll_facts {
-            let def_path = infcx.tcx.def_path(def_id.to_def_id());
-            let dir_path =
-                PathBuf::from("nll-facts").join(def_path.to_filename_friendly_no_crate());
+            let def_path = infcx.tcx.def_path(def_id);
+            let dir_path = PathBuf::from(&infcx.tcx.sess.opts.debugging_opts.nll_facts_dir)
+                .join(def_path.to_filename_friendly_no_crate());
             all_facts.write_to_dir(dir_path, location_table).unwrap();
         }
 
@@ -295,7 +294,7 @@
 
     // Solve the region constraints.
     let (closure_region_requirements, nll_errors) =
-        regioncx.solve(infcx, &body, def_id.to_def_id(), polonius_output.clone());
+        regioncx.solve(infcx, &body, polonius_output.clone());
 
     if !nll_errors.is_empty() {
         // Suppress unhelpful extra errors in `infer_opaque_types`.
@@ -315,16 +314,15 @@
 
 pub(super) fn dump_mir_results<'a, 'tcx>(
     infcx: &InferCtxt<'a, 'tcx>,
-    source: MirSource<'tcx>,
     body: &Body<'tcx>,
     regioncx: &RegionInferenceContext<'tcx>,
     closure_region_requirements: &Option<ClosureRegionRequirements<'_>>,
 ) {
-    if !mir_util::dump_enabled(infcx.tcx, "nll", source.def_id()) {
+    if !mir_util::dump_enabled(infcx.tcx, "nll", body.source.def_id()) {
         return;
     }
 
-    mir_util::dump_mir(infcx.tcx, None, "nll", &0, source, body, |pass_where, out| {
+    mir_util::dump_mir(infcx.tcx, None, "nll", &0, body, |pass_where, out| {
         match pass_where {
             // Before the CFG, dump out the values for each region variable.
             PassWhere::BeforeCFG => {
@@ -352,14 +350,14 @@
     // Also dump the inference graph constraints as a graphviz file.
     let _: io::Result<()> = try {
         let mut file =
-            pretty::create_dump_file(infcx.tcx, "regioncx.all.dot", None, "nll", &0, source)?;
+            pretty::create_dump_file(infcx.tcx, "regioncx.all.dot", None, "nll", &0, body.source)?;
         regioncx.dump_graphviz_raw_constraints(&mut file)?;
     };
 
     // Also dump the inference graph constraints as a graphviz file.
     let _: io::Result<()> = try {
         let mut file =
-            pretty::create_dump_file(infcx.tcx, "regioncx.scc.dot", None, "nll", &0, source)?;
+            pretty::create_dump_file(infcx.tcx, "regioncx.scc.dot", None, "nll", &0, body.source)?;
         regioncx.dump_graphviz_scc_constraints(&mut file)?;
     };
 }
@@ -367,14 +365,13 @@
 pub(super) fn dump_annotation<'a, 'tcx>(
     infcx: &InferCtxt<'a, 'tcx>,
     body: &Body<'tcx>,
-    mir_def_id: DefId,
     regioncx: &RegionInferenceContext<'tcx>,
     closure_region_requirements: &Option<ClosureRegionRequirements<'_>>,
     opaque_type_values: &FxHashMap<DefId, ty::ResolvedOpaqueTy<'tcx>>,
     errors_buffer: &mut Vec<Diagnostic>,
 ) {
     let tcx = infcx.tcx;
-    let base_def_id = tcx.closure_base_def_id(mir_def_id);
+    let base_def_id = tcx.closure_base_def_id(body.source.def_id());
     if !tcx.has_attr(base_def_id, sym::rustc_regions) {
         return;
     }
diff --git a/compiler/rustc_mir/src/borrow_check/region_infer/mod.rs b/compiler/rustc_mir/src/borrow_check/region_infer/mod.rs
index 3dc082a..a5a7012 100644
--- a/compiler/rustc_mir/src/borrow_check/region_infer/mod.rs
+++ b/compiler/rustc_mir/src/borrow_check/region_infer/mod.rs
@@ -548,9 +548,9 @@
         &mut self,
         infcx: &InferCtxt<'_, 'tcx>,
         body: &Body<'tcx>,
-        mir_def_id: DefId,
         polonius_output: Option<Rc<PoloniusOutput>>,
     ) -> (Option<ClosureRegionRequirements<'tcx>>, RegionErrors<'tcx>) {
+        let mir_def_id = body.source.def_id();
         self.propagate_constraints(body, infcx.tcx);
 
         let mut errors_buffer = RegionErrors::new();
@@ -582,7 +582,7 @@
             self.check_member_constraints(infcx, &mut errors_buffer);
         }
 
-        let outlives_requirements = outlives_requirements.unwrap_or(vec![]);
+        let outlives_requirements = outlives_requirements.unwrap_or_default();
 
         if outlives_requirements.is_empty() {
             (None, errors_buffer)
@@ -1225,7 +1225,9 @@
     /// it. However, it works pretty well in practice. In particular,
     /// this is needed to deal with projection outlives bounds like
     ///
-    ///     <T as Foo<'0>>::Item: '1
+    /// ```ignore (internal compiler representation so lifetime syntax is invalid)
+    /// <T as Foo<'0>>::Item: '1
+    /// ```
     ///
     /// In particular, this routine winds up being important when
     /// there are bounds like `where <T as Foo<'a>>::Item: 'b` in the
@@ -1362,7 +1364,7 @@
     /// terms that the "longer free region" `'a` outlived the "shorter free region" `'b`.
     ///
     /// More details can be found in this blog post by Niko:
-    /// http://smallcultfollowing.com/babysteps/blog/2019/01/17/polonius-and-region-errors/
+    /// <http://smallcultfollowing.com/babysteps/blog/2019/01/17/polonius-and-region-errors/>
     ///
     /// In the canonical example
     ///
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs b/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs
index 4846ef0..444f9fe 100644
--- a/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs
+++ b/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs
@@ -28,42 +28,43 @@
         let (&normalized_output_ty, normalized_input_tys) =
             normalized_inputs_and_output.split_last().unwrap();
 
+        let mir_def_id = body.source.def_id().expect_local();
+
         // If the user explicitly annotated the input types, extract
         // those.
         //
         // e.g., `|x: FxHashMap<_, &'static u32>| ...`
         let user_provided_sig;
-        if !self.tcx().is_closure(self.mir_def_id.to_def_id()) {
+        if !self.tcx().is_closure(mir_def_id.to_def_id()) {
             user_provided_sig = None;
         } else {
-            let typeck_results = self.tcx().typeck(self.mir_def_id);
-            user_provided_sig =
-                match typeck_results.user_provided_sigs.get(&self.mir_def_id.to_def_id()) {
-                    None => None,
-                    Some(user_provided_poly_sig) => {
-                        // Instantiate the canonicalized variables from
-                        // user-provided signature (e.g., the `_` in the code
-                        // above) with fresh variables.
-                        let (poly_sig, _) =
-                            self.infcx.instantiate_canonical_with_fresh_inference_vars(
-                                body.span,
-                                &user_provided_poly_sig,
-                            );
+            let typeck_results = self.tcx().typeck(mir_def_id);
+            user_provided_sig = match typeck_results.user_provided_sigs.get(&mir_def_id.to_def_id())
+            {
+                None => None,
+                Some(user_provided_poly_sig) => {
+                    // Instantiate the canonicalized variables from
+                    // user-provided signature (e.g., the `_` in the code
+                    // above) with fresh variables.
+                    let (poly_sig, _) = self.infcx.instantiate_canonical_with_fresh_inference_vars(
+                        body.span,
+                        &user_provided_poly_sig,
+                    );
 
-                        // Replace the bound items in the fn sig with fresh
-                        // variables, so that they represent the view from
-                        // "inside" the closure.
-                        Some(
-                            self.infcx
-                                .replace_bound_vars_with_fresh_vars(
-                                    body.span,
-                                    LateBoundRegionConversionTime::FnCall,
-                                    &poly_sig,
-                                )
-                                .0,
-                        )
-                    }
+                    // Replace the bound items in the fn sig with fresh
+                    // variables, so that they represent the view from
+                    // "inside" the closure.
+                    Some(
+                        self.infcx
+                            .replace_bound_vars_with_fresh_vars(
+                                body.span,
+                                LateBoundRegionConversionTime::FnCall,
+                                &poly_sig,
+                            )
+                            .0,
+                    )
                 }
+            }
         };
 
         debug!(
@@ -72,7 +73,7 @@
         );
 
         // Equate expected input tys with those in the MIR.
-        for (&normalized_input_ty, argument_index) in normalized_input_tys.iter().zip(0..) {
+        for (argument_index, &normalized_input_ty) in normalized_input_tys.iter().enumerate() {
             // In MIR, argument N is stored in local N+1.
             let local = Local::new(argument_index + 1);
 
@@ -86,8 +87,8 @@
         }
 
         if let Some(user_provided_sig) = user_provided_sig {
-            for (&user_provided_input_ty, argument_index) in
-                user_provided_sig.inputs().iter().zip(0..)
+            for (argument_index, &user_provided_input_ty) in
+                user_provided_sig.inputs().iter().enumerate()
             {
                 // In MIR, closures begin an implicit `self`, so
                 // argument N is stored in local N+2.
@@ -122,7 +123,7 @@
         if let Err(terr) = self.eq_opaque_type_and_type(
             mir_output_ty,
             normalized_output_ty,
-            self.mir_def_id,
+            mir_def_id,
             Locations::All(output_span),
             ConstraintCategory::BoringNoLocation,
         ) {
@@ -145,7 +146,7 @@
             if let Err(err) = self.eq_opaque_type_and_type(
                 mir_output_ty,
                 user_provided_output_ty,
-                self.mir_def_id,
+                mir_def_id,
                 Locations::All(output_span),
                 ConstraintCategory::BoringNoLocation,
             ) {
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/mod.rs b/compiler/rustc_mir/src/borrow_check/type_check/mod.rs
index 3ace146..4093990 100644
--- a/compiler/rustc_mir/src/borrow_check/type_check/mod.rs
+++ b/compiler/rustc_mir/src/borrow_check/type_check/mod.rs
@@ -73,7 +73,7 @@
             $context.last_span,
             &format!(
                 "broken MIR in {:?} ({:?}): {}",
-                $context.mir_def_id,
+                $context.body.source.def_id(),
                 $elem,
                 format_args!($($message)*),
             ),
@@ -113,7 +113,6 @@
 /// - `param_env` -- parameter environment to use for trait solving
 /// - `body` -- MIR body to type-check
 /// - `promoted` -- map of promoted constants within `body`
-/// - `mir_def_id` -- `LocalDefId` from which the MIR is derived
 /// - `universal_regions` -- the universal regions from `body`s function signature
 /// - `location_table` -- MIR location map of `body`
 /// - `borrow_set` -- information about borrows occurring in `body`
@@ -126,7 +125,6 @@
     param_env: ty::ParamEnv<'tcx>,
     body: &Body<'tcx>,
     promoted: &IndexVec<Promoted, Body<'tcx>>,
-    mir_def_id: LocalDefId,
     universal_regions: &Rc<UniversalRegions<'tcx>>,
     location_table: &LocationTable,
     borrow_set: &BorrowSet<'tcx>,
@@ -170,7 +168,6 @@
 
     let opaque_type_values = type_check_internal(
         infcx,
-        mir_def_id,
         param_env,
         body,
         promoted,
@@ -192,7 +189,6 @@
 
 fn type_check_internal<'a, 'tcx, R>(
     infcx: &'a InferCtxt<'a, 'tcx>,
-    mir_def_id: LocalDefId,
     param_env: ty::ParamEnv<'tcx>,
     body: &'a Body<'tcx>,
     promoted: &'a IndexVec<Promoted, Body<'tcx>>,
@@ -205,7 +201,6 @@
     let mut checker = TypeChecker::new(
         infcx,
         body,
-        mir_def_id,
         param_env,
         region_bound_pairs,
         implicit_region_bound,
@@ -272,7 +267,6 @@
     body: &'b Body<'tcx>,
     promoted: &'b IndexVec<Promoted, Body<'tcx>>,
     last_span: Span,
-    mir_def_id: LocalDefId,
     errors_reported: bool,
 }
 
@@ -460,14 +454,7 @@
         body: &'b Body<'tcx>,
         promoted: &'b IndexVec<Promoted, Body<'tcx>>,
     ) -> Self {
-        TypeVerifier {
-            body,
-            promoted,
-            mir_def_id: cx.mir_def_id,
-            cx,
-            last_span: body.span,
-            errors_reported: false,
-        }
+        TypeVerifier { body, promoted, cx, last_span: body.span, errors_reported: false }
     }
 
     fn tcx(&self) -> TyCtxt<'tcx> {
@@ -816,7 +803,6 @@
     /// User type annotations are shared between the main MIR and the MIR of
     /// all of the promoted items.
     user_type_annotations: &'a CanonicalUserTypeAnnotations<'tcx>,
-    mir_def_id: LocalDefId,
     region_bound_pairs: &'a RegionBoundPairs<'tcx>,
     implicit_region_bound: ty::Region<'tcx>,
     reported_errors: FxHashSet<(Ty<'tcx>, Span)>,
@@ -965,7 +951,6 @@
     fn new(
         infcx: &'a InferCtxt<'a, 'tcx>,
         body: &'a Body<'tcx>,
-        mir_def_id: LocalDefId,
         param_env: ty::ParamEnv<'tcx>,
         region_bound_pairs: &'a RegionBoundPairs<'tcx>,
         implicit_region_bound: ty::Region<'tcx>,
@@ -975,7 +960,6 @@
         let mut checker = Self {
             infcx,
             last_span: DUMMY_SP,
-            mir_def_id,
             body,
             user_type_annotations: &body.user_type_annotations,
             param_env,
@@ -990,6 +974,11 @@
         checker
     }
 
+    fn unsized_feature_enabled(&self) -> bool {
+        let features = self.tcx().features();
+        features.unsized_locals || features.unsized_fn_params
+    }
+
     /// Equate the inferred type and the annotated type for user type annotations
     fn check_user_type_annotations(&mut self) {
         debug!(
@@ -1145,7 +1134,7 @@
                 // the resulting inferend values are stored with the
                 // def-id of the base function.
                 let parent_def_id =
-                    self.tcx().closure_base_def_id(self.mir_def_id.to_def_id()).expect_local();
+                    self.tcx().closure_base_def_id(self.body.source.def_id()).expect_local();
                 return self.eq_opaque_type_and_type(sub, sup, parent_def_id, locations, category);
             } else {
                 return Err(terr);
@@ -1242,7 +1231,7 @@
         let concrete_opaque_types = &tcx.typeck(anon_owner_def_id).concrete_opaque_types;
         let mut opaque_type_values = Vec::new();
 
-        debug!("eq_opaque_type_and_type: mir_def_id={:?}", self.mir_def_id);
+        debug!("eq_opaque_type_and_type: mir_def_id={:?}", body.source.def_id());
         let opaque_type_map = self.fully_perform_op(
             locations,
             category,
@@ -1472,7 +1461,7 @@
                 }
 
                 self.check_rvalue(body, rv, location);
-                if !self.tcx().features().unsized_locals {
+                if !self.unsized_feature_enabled() {
                     let trait_ref = ty::TraitRef {
                         def_id: tcx.require_lang_item(LangItem::Sized, Some(self.last_span)),
                         substs: tcx.mk_substs_trait(place_ty, &[]),
@@ -1733,9 +1722,9 @@
                     );
                 }
 
-                // When `#![feature(unsized_locals)]` is not enabled,
+                // When `unsized_fn_params` and `unsized_locals` are both not enabled,
                 // this check is done at `check_local`.
-                if self.tcx().features().unsized_locals {
+                if self.unsized_feature_enabled() {
                     let span = term.source_info.span;
                     self.ensure_place_sized(dest_ty, span);
                 }
@@ -1793,7 +1782,7 @@
                 self.assert_iscleanup(body, block_data, target, is_cleanup)
             }
             TerminatorKind::SwitchInt { ref targets, .. } => {
-                for target in targets {
+                for target in targets.all_targets() {
                     self.assert_iscleanup(body, block_data, *target, is_cleanup);
                 }
             }
@@ -1896,9 +1885,9 @@
             LocalKind::Var | LocalKind::Temp => {}
         }
 
-        // When `#![feature(unsized_locals)]` is enabled, only function calls
+        // When `unsized_fn_params` or `unsized_locals` is enabled, only function calls
         // and nullary ops are checked in `check_call_dest`.
-        if !self.tcx().features().unsized_locals {
+        if !self.unsized_feature_enabled() {
             let span = local_decl.source_info.span;
             let ty = local_decl.ty;
             self.ensure_place_sized(ty, span);
@@ -2001,12 +1990,7 @@
                         let span = body.source_info(location).span;
                         let ty = operand.ty(body, tcx);
                         if !self.infcx.type_is_copy_modulo_regions(self.param_env, ty, span) {
-                            let ccx = ConstCx::new_with_param_env(
-                                tcx,
-                                self.mir_def_id,
-                                body,
-                                self.param_env,
-                            );
+                            let ccx = ConstCx::new_with_param_env(tcx, body, self.param_env);
                             // To determine if `const_in_array_repeat_expressions` feature gate should
                             // be mentioned, need to check if the rvalue is promotable.
                             let should_suggest =
@@ -2015,11 +1999,12 @@
                                 );
                             debug!("check_rvalue: should_suggest={:?}", should_suggest);
 
+                            let def_id = body.source.def_id().expect_local();
                             self.infcx.report_selection_error(
                                 &traits::Obligation::new(
                                     ObligationCause::new(
                                         span,
-                                        self.tcx().hir().local_def_id_to_hir_id(self.mir_def_id),
+                                        self.tcx().hir().local_def_id_to_hir_id(def_id),
                                         traits::ObligationCauseCode::RepeatVec(should_suggest),
                                     ),
                                     self.param_env,
@@ -2044,7 +2029,7 @@
 
             Rvalue::NullaryOp(_, ty) => {
                 // Even with unsized locals cannot box an unsized value.
-                if self.tcx().features().unsized_locals {
+                if self.unsized_feature_enabled() {
                     let span = body.source_info(location).span;
                     self.ensure_place_sized(ty, span);
                 }
diff --git a/compiler/rustc_mir/src/const_eval/error.rs b/compiler/rustc_mir/src/const_eval/error.rs
index 044d27a..39358e0 100644
--- a/compiler/rustc_mir/src/const_eval/error.rs
+++ b/compiler/rustc_mir/src/const_eval/error.rs
@@ -141,7 +141,7 @@
             err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
                 return ErrorHandled::TooGeneric;
             }
-            err_inval!(TypeckError(error_reported)) => {
+            err_inval!(AlreadyReported(error_reported)) => {
                 return ErrorHandled::Reported(error_reported);
             }
             // We must *always* hard error on these, even if the caller wants just a lint.
diff --git a/compiler/rustc_mir/src/const_eval/eval_queries.rs b/compiler/rustc_mir/src/const_eval/eval_queries.rs
index 57aa216..0cac7c0 100644
--- a/compiler/rustc_mir/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_mir/src/const_eval/eval_queries.rs
@@ -1,8 +1,8 @@
 use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr, MemoryExtra};
 use crate::interpret::eval_nullary_intrinsic;
 use crate::interpret::{
-    intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, GlobalId, Immediate,
-    InternKind, InterpCx, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, Scalar,
+    intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId,
+    Immediate, InternKind, InterpCx, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, Scalar,
     ScalarMaybeUninit, StackPopCleanup,
 };
 
@@ -14,7 +14,7 @@
 use rustc_middle::ty::{self, subst::Subst, TyCtxt};
 use rustc_span::source_map::Span;
 use rustc_target::abi::{Abi, LayoutOf};
-use std::convert::{TryFrom, TryInto};
+use std::convert::TryInto;
 
 pub fn note_on_undefined_behavior_error() -> &'static str {
     "The rules on what exactly is undefined behavior aren't clear, \
@@ -59,23 +59,15 @@
     ecx.run()?;
 
     // Intern the result
-    // FIXME: since the DefId of a promoted is the DefId of its owner, this
-    // means that promoteds in statics are actually interned like statics!
-    // However, this is also currently crucial because we promote mutable
-    // non-empty slices in statics to extend their lifetime, and this
-    // ensures that they are put into a mutable allocation.
-    // For other kinds of promoteds in statics (like array initializers), this is rather silly.
-    let intern_kind = match tcx.static_mutability(cid.instance.def_id()) {
-        Some(m) => InternKind::Static(m),
-        None if cid.promoted.is_some() => InternKind::Promoted,
-        _ => InternKind::Constant,
+    let intern_kind = if cid.promoted.is_some() {
+        InternKind::Promoted
+    } else {
+        match tcx.static_mutability(cid.instance.def_id()) {
+            Some(m) => InternKind::Static(m),
+            None => InternKind::Constant,
+        }
     };
-    intern_const_alloc_recursive(
-        ecx,
-        intern_kind,
-        ret,
-        body.ignore_interior_mut_in_const_validation,
-    );
+    intern_const_alloc_recursive(ecx, intern_kind, ret)?;
 
     debug!("eval_body_using_ecx done: {:?}", *ret);
     Ok(ret)
@@ -145,15 +137,16 @@
             let alloc = ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory();
             ConstValue::ByRef { alloc, offset: ptr.offset }
         }
-        Scalar::Raw { data, .. } => {
+        Scalar::Int(int) => {
             assert!(mplace.layout.is_zst());
             assert_eq!(
-                u64::try_from(data).unwrap() % mplace.layout.align.abi.bytes(),
+                int.assert_bits(ecx.tcx.data_layout.pointer_size)
+                    % u128::from(mplace.layout.align.abi.bytes()),
                 0,
                 "this MPlaceTy must come from a validated constant, thus we can assume the \
                 alignment is correct",
             );
-            ConstValue::Scalar(Scalar::zst())
+            ConstValue::Scalar(Scalar::ZST)
         }
     };
     match immediate {
@@ -169,7 +162,7 @@
                     Scalar::Ptr(ptr) => {
                         (ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory(), ptr.offset.bytes())
                     }
-                    Scalar::Raw { .. } => (
+                    Scalar::Int { .. } => (
                         ecx.tcx
                             .intern_const_alloc(Allocation::from_byte_aligned_bytes(b"" as &[u8])),
                         0,
@@ -343,7 +336,7 @@
                     // deny-by-default lint
                     _ => {
                         if let Some(p) = cid.promoted {
-                            let span = tcx.promoted_mir_of_opt_const_arg(def.to_global())[p].span;
+                            let span = tcx.promoted_mir_opt_const_arg(def.to_global())[p].span;
                             if let err_inval!(ReferencedConstant) = err.error {
                                 Err(err.report_as_error(
                                     tcx.at(span),
@@ -376,16 +369,23 @@
             // Since evaluation had no errors, valiate the resulting constant:
             let validation = try {
                 // FIXME do not validate promoteds until a decision on
-                // https://github.com/rust-lang/rust/issues/67465 is made
+                // https://github.com/rust-lang/rust/issues/67465 and
+                // https://github.com/rust-lang/rust/issues/67534 is made.
+                // Promoteds can contain unexpected `UnsafeCell` and reference `static`s, but their
+                // otherwise restricted form ensures that this is still sound. We just lose the
+                // extra safety net of some of the dynamic checks. They can also contain invalid
+                // values, but since we do not usually check intermediate results of a computation
+                // for validity, it might be surprising to do that here.
                 if cid.promoted.is_none() {
                     let mut ref_tracking = RefTracking::new(mplace);
+                    let mut inner = false;
                     while let Some((mplace, path)) = ref_tracking.todo.pop() {
-                        ecx.const_validate_operand(
-                            mplace.into(),
-                            path,
-                            &mut ref_tracking,
-                            /*may_ref_to_static*/ ecx.memory.extra.can_access_statics,
-                        )?;
+                        let mode = match tcx.static_mutability(cid.instance.def_id()) {
+                            Some(_) => CtfeValidationMode::Regular, // a `static`
+                            None => CtfeValidationMode::Const { inner },
+                        };
+                        ecx.const_validate_operand(mplace.into(), path, &mut ref_tracking, mode)?;
+                        inner = true;
                     }
                 }
             };
diff --git a/compiler/rustc_mir/src/const_eval/machine.rs b/compiler/rustc_mir/src/const_eval/machine.rs
index 73ca7e0..c72089e 100644
--- a/compiler/rustc_mir/src/const_eval/machine.rs
+++ b/compiler/rustc_mir/src/const_eval/machine.rs
@@ -70,9 +70,10 @@
     ) -> InterpResult<'tcx> {
         let def_id = instance.def_id();
         if Some(def_id) == self.tcx.lang_items().panic_fn()
+            || Some(def_id) == self.tcx.lang_items().panic_str()
             || Some(def_id) == self.tcx.lang_items().begin_panic_fn()
         {
-            // &'static str
+            // &str
             assert!(args.len() == 1);
 
             let msg_place = self.deref_operand(args[0])?;
@@ -180,9 +181,9 @@
     fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> bool {
         match (a, b) {
             // Comparisons between integers are always known.
-            (Scalar::Raw { .. }, Scalar::Raw { .. }) => a == b,
+            (Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
             // Equality with integers can never be known for sure.
-            (Scalar::Raw { .. }, Scalar::Ptr(_)) | (Scalar::Ptr(_), Scalar::Raw { .. }) => false,
+            (Scalar::Int { .. }, Scalar::Ptr(_)) | (Scalar::Ptr(_), Scalar::Int { .. }) => false,
             // FIXME: return `true` for when both sides are the same pointer, *except* that
             // some things (like functions and vtables) do not have stable addresses
             // so we need to be careful around them (see e.g. #73722).
@@ -193,13 +194,13 @@
     fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> bool {
         match (a, b) {
             // Comparisons between integers are always known.
-            (Scalar::Raw { .. }, Scalar::Raw { .. }) => a != b,
+            (Scalar::Int(_), Scalar::Int(_)) => a != b,
             // Comparisons of abstract pointers with null pointers are known if the pointer
             // is in bounds, because if they are in bounds, the pointer can't be null.
-            (Scalar::Raw { data: 0, .. }, Scalar::Ptr(ptr))
-            | (Scalar::Ptr(ptr), Scalar::Raw { data: 0, .. }) => !self.memory.ptr_may_be_null(ptr),
             // Inequality with integers other than null can never be known for sure.
-            (Scalar::Raw { .. }, Scalar::Ptr(_)) | (Scalar::Ptr(_), Scalar::Raw { .. }) => false,
+            (Scalar::Int(int), Scalar::Ptr(ptr)) | (Scalar::Ptr(ptr), Scalar::Int(int)) => {
+                int.is_null() && !self.memory.ptr_may_be_null(ptr)
+            }
             // FIXME: return `true` for at least some comparisons where we can reliably
             // determine the result of runtime inequality tests at compile-time.
             // Examples include comparison of addresses in different static items.
diff --git a/compiler/rustc_mir/src/const_eval/mod.rs b/compiler/rustc_mir/src/const_eval/mod.rs
index 978d2fe..9dd2a85 100644
--- a/compiler/rustc_mir/src/const_eval/mod.rs
+++ b/compiler/rustc_mir/src/const_eval/mod.rs
@@ -29,7 +29,9 @@
     let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), false);
 
     let loc_place = ecx.alloc_caller_location(file, line, col);
-    intern_const_alloc_recursive(&mut ecx, InternKind::Constant, loc_place, false);
+    if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, loc_place).is_err() {
+        bug!("intern_const_alloc_recursive should not error in this case")
+    }
     ConstValue::Scalar(loc_place.ptr)
 }
 
@@ -50,7 +52,7 @@
     let (field_count, variant, down) = match val.ty.kind() {
         ty::Array(_, len) => (usize::try_from(len.eval_usize(tcx, param_env)).unwrap(), None, op),
         ty::Adt(def, _) if def.variants.is_empty() => {
-            return mir::DestructuredConst { variant: None, fields: tcx.arena.alloc_slice(&[]) };
+            return mir::DestructuredConst { variant: None, fields: &[] };
         }
         ty::Adt(def, _) => {
             let variant = ecx.read_discriminant(op).unwrap().1;
diff --git a/compiler/rustc_mir/src/dataflow/framework/direction.rs b/compiler/rustc_mir/src/dataflow/framework/direction.rs
index ca2bb6e..8a9ced9 100644
--- a/compiler/rustc_mir/src/dataflow/framework/direction.rs
+++ b/compiler/rustc_mir/src/dataflow/framework/direction.rs
@@ -1,5 +1,5 @@
 use rustc_index::bit_set::BitSet;
-use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::mir::{self, BasicBlock, Location, SwitchTargets};
 use rustc_middle::ty::TyCtxt;
 use std::ops::RangeInclusive;
 
@@ -488,11 +488,10 @@
                 }
             }
 
-            SwitchInt { ref targets, ref values, ref discr, switch_ty: _ } => {
+            SwitchInt { ref targets, ref discr, switch_ty: _ } => {
                 let mut applier = SwitchIntEdgeEffectApplier {
                     exit_state,
-                    targets: targets.as_ref(),
-                    values: values.as_ref(),
+                    targets,
                     propagate,
                     effects_applied: false,
                 };
@@ -504,8 +503,8 @@
                 } = applier;
 
                 if !effects_applied {
-                    for &target in targets.iter() {
-                        propagate(target, exit_state);
+                    for target in targets.all_targets() {
+                        propagate(*target, exit_state);
                     }
                 }
             }
@@ -515,8 +514,7 @@
 
 struct SwitchIntEdgeEffectApplier<'a, D, F> {
     exit_state: &'a mut D,
-    values: &'a [u128],
-    targets: &'a [BasicBlock],
+    targets: &'a SwitchTargets,
     propagate: F,
 
     effects_applied: bool,
@@ -531,7 +529,7 @@
         assert!(!self.effects_applied);
 
         let mut tmp = None;
-        for (&value, &target) in self.values.iter().zip(self.targets.iter()) {
+        for (value, target) in self.targets.iter() {
             let tmp = opt_clone_from_or_clone(&mut tmp, self.exit_state);
             apply_edge_effect(tmp, SwitchIntTarget { value: Some(value), target });
             (self.propagate)(target, tmp);
@@ -539,7 +537,7 @@
 
         // Once we get to the final, "otherwise" branch, there is no need to preserve `exit_state`,
         // so pass it directly to `apply_edge_effect` to save a clone of the dataflow state.
-        let otherwise = self.targets.last().copied().unwrap();
+        let otherwise = self.targets.otherwise();
         apply_edge_effect(self.exit_state, SwitchIntTarget { value: None, target: otherwise });
         (self.propagate)(otherwise, self.exit_state);
 
diff --git a/compiler/rustc_mir/src/dataflow/framework/engine.rs b/compiler/rustc_mir/src/dataflow/framework/engine.rs
index f39c78f50..3f9f558 100644
--- a/compiler/rustc_mir/src/dataflow/framework/engine.rs
+++ b/compiler/rustc_mir/src/dataflow/framework/engine.rs
@@ -2,7 +2,6 @@
 
 use std::borrow::BorrowMut;
 use std::ffi::OsString;
-use std::fs;
 use std::path::PathBuf;
 
 use rustc_ast as ast;
@@ -12,7 +11,7 @@
 use rustc_index::bit_set::BitSet;
 use rustc_index::vec::{Idx, IndexVec};
 use rustc_middle::mir::{self, traversal, BasicBlock};
-use rustc_middle::ty::{self, TyCtxt};
+use rustc_middle::ty::TyCtxt;
 use rustc_span::symbol::{sym, Symbol};
 
 use super::fmt::DebugWithContext;
@@ -21,7 +20,7 @@
     visit_results, Analysis, Direction, GenKill, GenKillAnalysis, GenKillSet, JoinSemiLattice,
     ResultsCursor, ResultsVisitor,
 };
-use crate::util::pretty::dump_enabled;
+use crate::util::pretty::{create_dump_file, dump_enabled};
 
 /// A dataflow analysis that has converged to fixpoint.
 pub struct Results<'tcx, A>
@@ -63,15 +62,6 @@
         let blocks = mir::traversal::reachable(body);
         visit_results(body, blocks.map(|(bb, _)| bb), self, vis)
     }
-
-    pub fn visit_in_rpo_with(
-        &self,
-        body: &'mir mir::Body<'tcx>,
-        vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = A::Domain>,
-    ) {
-        let blocks = mir::traversal::reverse_postorder(body);
-        visit_results(body, blocks.map(|(bb, _)| bb), self, vis)
-    }
 }
 
 /// A solver for dataflow problems.
@@ -81,7 +71,6 @@
 {
     tcx: TyCtxt<'tcx>,
     body: &'a mir::Body<'tcx>,
-    def_id: DefId,
     dead_unwinds: Option<&'a BitSet<BasicBlock>>,
     entry_sets: IndexVec<BasicBlock, A::Domain>,
     pass_name: Option<&'static str>,
@@ -103,18 +92,13 @@
     T: Idx,
 {
     /// Creates a new `Engine` to solve a gen-kill dataflow problem.
-    pub fn new_gen_kill(
-        tcx: TyCtxt<'tcx>,
-        body: &'a mir::Body<'tcx>,
-        def_id: DefId,
-        analysis: A,
-    ) -> Self {
+    pub fn new_gen_kill(tcx: TyCtxt<'tcx>, body: &'a mir::Body<'tcx>, analysis: A) -> Self {
         // If there are no back-edges in the control-flow graph, we only ever need to apply the
         // transfer function for each block exactly once (assuming that we process blocks in RPO).
         //
         // In this case, there's no need to compute the block transfer functions ahead of time.
         if !body.is_cfg_cyclic() {
-            return Self::new(tcx, body, def_id, analysis, None);
+            return Self::new(tcx, body, analysis, None);
         }
 
         // Otherwise, compute and store the cumulative transfer function for each block.
@@ -131,7 +115,7 @@
             trans_for_block[bb].apply(state.borrow_mut());
         });
 
-        Self::new(tcx, body, def_id, analysis, Some(apply_trans as Box<_>))
+        Self::new(tcx, body, analysis, Some(apply_trans as Box<_>))
     }
 }
 
@@ -145,19 +129,13 @@
     ///
     /// Gen-kill problems should use `new_gen_kill`, which will coalesce transfer functions for
     /// better performance.
-    pub fn new_generic(
-        tcx: TyCtxt<'tcx>,
-        body: &'a mir::Body<'tcx>,
-        def_id: DefId,
-        analysis: A,
-    ) -> Self {
-        Self::new(tcx, body, def_id, analysis, None)
+    pub fn new_generic(tcx: TyCtxt<'tcx>, body: &'a mir::Body<'tcx>, analysis: A) -> Self {
+        Self::new(tcx, body, analysis, None)
     }
 
     fn new(
         tcx: TyCtxt<'tcx>,
         body: &'a mir::Body<'tcx>,
-        def_id: DefId,
         analysis: A,
         apply_trans_for_block: Option<Box<dyn Fn(BasicBlock, &mut A::Domain)>>,
     ) -> Self {
@@ -173,7 +151,6 @@
             analysis,
             tcx,
             body,
-            def_id,
             dead_unwinds: None,
             pass_name: None,
             entry_sets,
@@ -209,7 +186,6 @@
             analysis,
             body,
             dead_unwinds,
-            def_id,
             mut entry_sets,
             tcx,
             apply_trans_for_block,
@@ -232,12 +208,19 @@
             }
         }
 
+        // `state` is not actually used between iterations;
+        // this is just an optimization to avoid reallocating
+        // every iteration.
         let mut state = analysis.bottom_value(body);
         while let Some(bb) = dirty_queue.pop() {
             let bb_data = &body[bb];
 
-            // Apply the block transfer function, using the cached one if it exists.
+            // Set the state to the entry state of the block.
+            // This is equivalent to `state = entry_sets[bb].clone()`,
+            // but it saves an allocation, thus improving compile times.
             state.clone_from(&entry_sets[bb]);
+
+            // Apply the block transfer function, using the cached one if it exists.
             match &apply_trans_for_block {
                 Some(apply) => apply(bb, &mut state),
                 None => A::Direction::apply_effects_in_block(&analysis, &mut state, bb, bb_data),
@@ -261,9 +244,9 @@
 
         let results = Results { analysis, entry_sets };
 
-        let res = write_graphviz_results(tcx, def_id, &body, &results, pass_name);
+        let res = write_graphviz_results(tcx, &body, &results, pass_name);
         if let Err(e) = res {
-            warn!("Failed to write graphviz dataflow results: {}", e);
+            error!("Failed to write graphviz dataflow results: {}", e);
         }
 
         results
@@ -276,7 +259,6 @@
 /// `rustc_mir` attributes.
 fn write_graphviz_results<A>(
     tcx: TyCtxt<'tcx>,
-    def_id: DefId,
     body: &mir::Body<'tcx>,
     results: &Results<'tcx, A>,
     pass_name: Option<&'static str>,
@@ -285,6 +267,10 @@
     A: Analysis<'tcx>,
     A::Domain: DebugWithContext<A>,
 {
+    use std::fs;
+    use std::io::{self, Write};
+
+    let def_id = body.source.def_id();
     let attrs = match RustcMirAttrs::parse(tcx, def_id) {
         Ok(attrs) => attrs,
 
@@ -292,27 +278,29 @@
         Err(()) => return Ok(()),
     };
 
-    let path = match attrs.output_path(A::NAME) {
-        Some(path) => path,
+    let mut file = match attrs.output_path(A::NAME) {
+        Some(path) => {
+            debug!("printing dataflow results for {:?} to {}", def_id, path.display());
+            if let Some(parent) = path.parent() {
+                fs::create_dir_all(parent)?;
+            }
+            io::BufWriter::new(fs::File::create(&path)?)
+        }
 
         None if tcx.sess.opts.debugging_opts.dump_mir_dataflow
             && dump_enabled(tcx, A::NAME, def_id) =>
         {
-            // FIXME: Use some variant of `pretty::dump_path` for this
-            let mut path = PathBuf::from(&tcx.sess.opts.debugging_opts.dump_mir_dir);
-
-            let crate_name = tcx.crate_name(def_id.krate);
-            let item_name = ty::print::with_forced_impl_filename_line(|| {
-                tcx.def_path(def_id).to_filename_friendly_no_crate()
-            });
-
-            let pass_name = pass_name.map(|s| format!(".{}", s)).unwrap_or_default();
-
-            path.push(format!("{}.{}.{}{}.dot", crate_name, item_name, A::NAME, pass_name));
-            path
+            create_dump_file(
+                tcx,
+                ".dot",
+                None,
+                A::NAME,
+                &pass_name.unwrap_or("-----"),
+                body.source,
+            )?
         }
 
-        None => return Ok(()),
+        _ => return Ok(()),
     };
 
     let style = match attrs.formatter {
@@ -320,10 +308,9 @@
         _ => graphviz::OutputStyle::AfterOnly,
     };
 
-    debug!("printing dataflow results for {:?} to {}", def_id, path.display());
     let mut buf = Vec::new();
 
-    let graphviz = graphviz::Formatter::new(body, def_id, results, style);
+    let graphviz = graphviz::Formatter::new(body, results, style);
     let mut render_opts =
         vec![dot::RenderOption::Fontname(tcx.sess.opts.debugging_opts.graphviz_font.clone())];
     if tcx.sess.opts.debugging_opts.graphviz_dark_mode {
@@ -331,10 +318,7 @@
     }
     dot::render_opts(&graphviz, &mut buf, &render_opts)?;
 
-    if let Some(parent) = path.parent() {
-        fs::create_dir_all(parent)?;
-    }
-    fs::write(&path, buf)?;
+    file.write_all(&buf)?;
 
     Ok(())
 }
diff --git a/compiler/rustc_mir/src/dataflow/framework/graphviz.rs b/compiler/rustc_mir/src/dataflow/framework/graphviz.rs
index 5d4c425..4e54257 100644
--- a/compiler/rustc_mir/src/dataflow/framework/graphviz.rs
+++ b/compiler/rustc_mir/src/dataflow/framework/graphviz.rs
@@ -6,7 +6,6 @@
 
 use regex::Regex;
 use rustc_graphviz as dot;
-use rustc_hir::def_id::DefId;
 use rustc_middle::mir::{self, BasicBlock, Body, Location};
 
 use super::fmt::{DebugDiffWithAdapter, DebugWithAdapter, DebugWithContext};
@@ -33,7 +32,6 @@
     A: Analysis<'tcx>,
 {
     body: &'a Body<'tcx>,
-    def_id: DefId,
     results: &'a Results<'tcx, A>,
     style: OutputStyle,
 }
@@ -42,13 +40,8 @@
 where
     A: Analysis<'tcx>,
 {
-    pub fn new(
-        body: &'a Body<'tcx>,
-        def_id: DefId,
-        results: &'a Results<'tcx, A>,
-        style: OutputStyle,
-    ) -> Self {
-        Formatter { body, def_id, results, style }
+    pub fn new(body: &'a Body<'tcx>, results: &'a Results<'tcx, A>, style: OutputStyle) -> Self {
+        Formatter { body, results, style }
     }
 }
 
@@ -77,7 +70,7 @@
     type Edge = CfgEdge;
 
     fn graph_id(&self) -> dot::Id<'_> {
-        let name = graphviz_safe_def_name(self.def_id);
+        let name = graphviz_safe_def_name(self.body.source.def_id());
         dot::Id::new(format!("graph_for_def_id_{}", name)).unwrap()
     }
 
diff --git a/compiler/rustc_mir/src/dataflow/framework/mod.rs b/compiler/rustc_mir/src/dataflow/framework/mod.rs
index 65c159e..524ad0a 100644
--- a/compiler/rustc_mir/src/dataflow/framework/mod.rs
+++ b/compiler/rustc_mir/src/dataflow/framework/mod.rs
@@ -13,9 +13,9 @@
 //! ```ignore(cross-crate-imports)
 //! use rustc_mir::dataflow::Analysis; // Makes `into_engine` available.
 //!
-//! fn do_my_analysis(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>, did: DefId) {
+//! fn do_my_analysis(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>) {
 //!     let analysis = MyAnalysis::new()
-//!         .into_engine(tcx, body, did)
+//!         .into_engine(tcx, body)
 //!         .iterate_to_fixpoint()
 //!         .into_results_cursor(body);
 //!
@@ -33,7 +33,6 @@
 use std::borrow::BorrowMut;
 use std::cmp::Ordering;
 
-use rustc_hir::def_id::DefId;
 use rustc_index::bit_set::{BitSet, HybridBitSet};
 use rustc_index::vec::Idx;
 use rustc_middle::mir::{self, BasicBlock, Location};
@@ -218,16 +217,11 @@
     ///     .iterate_to_fixpoint()
     ///     .into_results_cursor(body);
     /// ```
-    fn into_engine(
-        self,
-        tcx: TyCtxt<'tcx>,
-        body: &'mir mir::Body<'tcx>,
-        def_id: DefId,
-    ) -> Engine<'mir, 'tcx, Self>
+    fn into_engine(self, tcx: TyCtxt<'tcx>, body: &'mir mir::Body<'tcx>) -> Engine<'mir, 'tcx, Self>
     where
         Self: Sized,
     {
-        Engine::new_generic(tcx, body, def_id, self)
+        Engine::new_generic(tcx, body, self)
     }
 }
 
@@ -381,16 +375,11 @@
 
     /* Extension methods */
 
-    fn into_engine(
-        self,
-        tcx: TyCtxt<'tcx>,
-        body: &'mir mir::Body<'tcx>,
-        def_id: DefId,
-    ) -> Engine<'mir, 'tcx, Self>
+    fn into_engine(self, tcx: TyCtxt<'tcx>, body: &'mir mir::Body<'tcx>) -> Engine<'mir, 'tcx, Self>
     where
         Self: Sized,
     {
-        Engine::new_gen_kill(tcx, body, def_id, self)
+        Engine::new_gen_kill(tcx, body, self)
     }
 }
 
diff --git a/compiler/rustc_mir/src/dataflow/impls/borrows.rs b/compiler/rustc_mir/src/dataflow/impls/borrows.rs
index 0be13b6..6b7889c 100644
--- a/compiler/rustc_mir/src/dataflow/impls/borrows.rs
+++ b/compiler/rustc_mir/src/dataflow/impls/borrows.rs
@@ -177,7 +177,7 @@
         //
         // We are careful always to call this function *before* we
         // set up the gen-bits for the statement or
-        // termanator. That way, if the effect of the statement or
+        // terminator. That way, if the effect of the statement or
         // terminator *does* introduce a new loan of the same
         // region, then setting that gen-bit will override any
         // potential kill introduced here.
diff --git a/compiler/rustc_mir/src/dataflow/impls/liveness.rs b/compiler/rustc_mir/src/dataflow/impls/liveness.rs
index b0da281..a2b0713 100644
--- a/compiler/rustc_mir/src/dataflow/impls/liveness.rs
+++ b/compiler/rustc_mir/src/dataflow/impls/liveness.rs
@@ -8,7 +8,7 @@
 ///
 /// This analysis considers references as being used only at the point of the
 /// borrow. In other words, this analysis does not track uses because of references that already
-/// exist. See [this `mir-datalow` test][flow-test] for an example. You almost never want to use
+/// exist. See [this `mir-dataflow` test][flow-test] for an example. You almost never want to use
 /// this analysis without also looking at the results of [`MaybeBorrowedLocals`].
 ///
 /// [`MaybeBorrowedLocals`]: ../struct.MaybeBorrowedLocals.html
@@ -134,7 +134,7 @@
 
             // `MutatingUseContext::Call` and `MutatingUseContext::Yield` indicate that this is the
             // destination place for a `Call` return or `Yield` resume respectively. Since this is
-            // only a `Def` when the function returns succesfully, we handle this case separately
+            // only a `Def` when the function returns successfully, we handle this case separately
             // in `call_return_effect` above.
             PlaceContext::MutatingUse(MutatingUseContext::Call | MutatingUseContext::Yield) => None,
 
diff --git a/compiler/rustc_mir/src/dataflow/move_paths/builder.rs b/compiler/rustc_mir/src/dataflow/move_paths/builder.rs
index 5c3e353..ab7fada 100644
--- a/compiler/rustc_mir/src/dataflow/move_paths/builder.rs
+++ b/compiler/rustc_mir/src/dataflow/move_paths/builder.rs
@@ -362,17 +362,18 @@
     fn gather_terminator(&mut self, term: &Terminator<'tcx>) {
         match term.kind {
             TerminatorKind::Goto { target: _ }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            // In some sense returning moves the return place into the current
+            // call's destination, however, since there are no statements after
+            // this that could possibly access the return place, this doesn't
+            // need recording.
+            | TerminatorKind::Return
             | TerminatorKind::Resume
             | TerminatorKind::Abort
             | TerminatorKind::GeneratorDrop
-            | TerminatorKind::FalseEdge { .. }
-            | TerminatorKind::FalseUnwind { .. }
             | TerminatorKind::Unreachable => {}
 
-            TerminatorKind::Return => {
-                self.gather_move(Place::return_place());
-            }
-
             TerminatorKind::Assert { ref cond, .. } => {
                 self.gather_operand(cond);
             }
diff --git a/compiler/rustc_mir/src/interpret/cast.rs b/compiler/rustc_mir/src/interpret/cast.rs
index 0e16b0c..6d224bc 100644
--- a/compiler/rustc_mir/src/interpret/cast.rs
+++ b/compiler/rustc_mir/src/interpret/cast.rs
@@ -13,8 +13,7 @@
 use rustc_target::abi::{Integer, LayoutOf, Variants};
 
 use super::{
-    truncate, util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy,
-    PlaceTy,
+    util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy,
 };
 
 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
@@ -139,9 +138,14 @@
 
         // # First handle non-scalar source values.
 
-        // Handle cast from a univariant (ZST) enum.
+        // Handle cast from a ZST enum (0 or 1 variants).
         match src.layout.variants {
             Variants::Single { index } => {
+                if src.layout.abi.is_uninhabited() {
+                    // This is dead code, because an uninhabited enum is UB to
+                    // instantiate.
+                    throw_ub!(Unreachable);
+                }
                 if let Some(discr) = src.layout.ty.discriminant_for_variant(*self.tcx, index) {
                     assert!(src.layout.is_zst());
                     let discr_layout = self.layout_of(discr.ty)?;
@@ -204,7 +208,7 @@
                     RawPtr(_) => self.pointer_size(),
                     _ => bug!(),
                 };
-                let v = truncate(v, size);
+                let v = size.truncate(v);
                 Scalar::from_uint(v, size)
             }
 
diff --git a/compiler/rustc_mir/src/interpret/eval_context.rs b/compiler/rustc_mir/src/interpret/eval_context.rs
index f970969..0f86a18 100644
--- a/compiler/rustc_mir/src/interpret/eval_context.rs
+++ b/compiler/rustc_mir/src/interpret/eval_context.rs
@@ -9,9 +9,7 @@
 use rustc_macros::HashStable;
 use rustc_middle::ich::StableHashingContext;
 use rustc_middle::mir;
-use rustc_middle::mir::interpret::{
-    sign_extend, truncate, GlobalId, InterpResult, Pointer, Scalar,
-};
+use rustc_middle::mir::interpret::{GlobalId, InterpResult, Pointer, Scalar};
 use rustc_middle::ty::layout::{self, TyAndLayout};
 use rustc_middle::ty::{
     self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable,
@@ -48,8 +46,41 @@
         FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Pointer<M::PointerTag>>,
 }
 
+// The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread
+// boundary and dropped in the other thread, it would exit the span in the other thread.
+struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>);
+
+impl SpanGuard {
+    /// By default a `SpanGuard` does nothing.
+    fn new() -> Self {
+        Self(tracing::Span::none(), std::marker::PhantomData)
+    }
+
+    /// If a span is entered, we exit the previous span (if any, normally none) and enter the
+    /// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of
+    /// `Frame` by creating a dummy span to being with and then entering it once the frame has
+    /// been pushed.
+    fn enter(&mut self, span: tracing::Span) {
+        // This executes the destructor on the previous instance of `SpanGuard`, ensuring that
+        // we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we
+        // can't protect the tracing stack, but that'll just lead to weird logging, no actual
+        // problems.
+        *self = Self(span, std::marker::PhantomData);
+        self.0.with_subscriber(|(id, dispatch)| {
+            dispatch.enter(id);
+        });
+    }
+}
+
+impl Drop for SpanGuard {
+    fn drop(&mut self) {
+        self.0.with_subscriber(|(id, dispatch)| {
+            dispatch.exit(id);
+        });
+    }
+}
+
 /// A stack frame.
-#[derive(Clone)]
 pub struct Frame<'mir, 'tcx, Tag = (), Extra = ()> {
     ////////////////////////////////////////////////////////////////////////////////
     // Function and callsite information
@@ -80,6 +111,11 @@
     /// can either directly contain `Scalar` or refer to some part of an `Allocation`.
     pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>,
 
+    /// The span of the `tracing` crate is stored here.
+    /// When the guard is dropped, the span is exited. This gives us
+    /// a full stack trace on all tracing statements.
+    tracing_span: SpanGuard,
+
     ////////////////////////////////////////////////////////////////////////////////
     // Current position within the function
     ////////////////////////////////////////////////////////////////////////////////
@@ -184,6 +220,7 @@
             locals: self.locals,
             loc: self.loc,
             extra,
+            tracing_span: self.tracing_span,
         }
     }
 }
@@ -404,12 +441,12 @@
     #[inline(always)]
     pub fn sign_extend(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
         assert!(ty.abi.is_signed());
-        sign_extend(value, ty.size)
+        ty.size.sign_extend(value)
     }
 
     #[inline(always)]
     pub fn truncate(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
-        truncate(value, ty.size)
+        ty.size.truncate(value)
     }
 
     #[inline]
@@ -432,22 +469,18 @@
         if let Some(def) = def.as_local() {
             if self.tcx.has_typeck_results(def.did) {
                 if let Some(error_reported) = self.tcx.typeck_opt_const_arg(def).tainted_by_errors {
-                    throw_inval!(TypeckError(error_reported))
+                    throw_inval!(AlreadyReported(error_reported))
                 }
             }
         }
         trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
         if let Some(promoted) = promoted {
-            return Ok(&self.tcx.promoted_mir_of_opt_const_arg(def)[promoted]);
+            return Ok(&self.tcx.promoted_mir_opt_const_arg(def)[promoted]);
         }
         match instance {
             ty::InstanceDef::Item(def) => {
                 if self.tcx.is_mir_available(def.did) {
-                    if let Some((did, param_did)) = def.as_const_arg() {
-                        Ok(self.tcx.optimized_mir_of_const_arg((did, param_did)))
-                    } else {
-                        Ok(self.tcx.optimized_mir(def.did))
-                    }
+                    Ok(self.tcx.optimized_mir_opt_const_arg(def))
                 } else {
                     throw_unsup!(NoMirFor(def.did))
                 }
@@ -472,11 +505,7 @@
         frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
         value: T,
     ) -> T {
-        if let Some(substs) = frame.instance.substs_for_mir_body() {
-            self.tcx.subst_and_normalize_erasing_regions(substs, self.param_env, &value)
-        } else {
-            self.tcx.normalize_erasing_regions(self.param_env, value)
-        }
+        frame.instance.subst_mir_and_normalize_erasing_regions(*self.tcx, self.param_env, &value)
     }
 
     /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
@@ -492,8 +521,8 @@
             Ok(Some(instance)) => Ok(instance),
             Ok(None) => throw_inval!(TooGeneric),
 
-            // FIXME(eddyb) this could be a bit more specific than `TypeckError`.
-            Err(error_reported) => throw_inval!(TypeckError(error_reported)),
+            // FIXME(eddyb) this could be a bit more specific than `AlreadyReported`.
+            Err(error_reported) => throw_inval!(AlreadyReported(error_reported)),
         }
     }
 
@@ -637,11 +666,6 @@
         return_place: Option<PlaceTy<'tcx, M::PointerTag>>,
         return_to_block: StackPopCleanup,
     ) -> InterpResult<'tcx> {
-        if !self.stack().is_empty() {
-            info!("PAUSING({}) {}", self.frame_idx(), self.frame().instance);
-        }
-        ::log_settings::settings().indentation += 1;
-
         // first push a stack frame so we have access to the local substs
         let pre_frame = Frame {
             body,
@@ -652,6 +676,7 @@
             // all methods actually know about the frame
             locals: IndexVec::new(),
             instance,
+            tracing_span: SpanGuard::new(),
             extra: (),
         };
         let frame = M::init_frame_extra(self, pre_frame)?;
@@ -696,7 +721,9 @@
         self.frame_mut().locals = locals;
         M::after_stack_push(self)?;
         self.frame_mut().loc = Ok(mir::Location::START);
-        info!("ENTERING({}) {}", self.frame_idx(), self.frame().instance);
+
+        let span = info_span!("frame", "{}", instance);
+        self.frame_mut().tracing_span.enter(span);
 
         Ok(())
     }
@@ -747,10 +774,8 @@
     /// cause us to continue unwinding.
     pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
         info!(
-            "LEAVING({}) {} (unwinding = {})",
-            self.frame_idx(),
-            self.frame().instance,
-            unwinding
+            "popping stack frame ({})",
+            if unwinding { "during unwinding" } else { "returning from function" }
         );
 
         // Sanity check `unwinding`.
@@ -766,7 +791,6 @@
             throw_ub_format!("unwinding past the topmost frame of the stack");
         }
 
-        ::log_settings::settings().indentation -= 1;
         let frame =
             self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
 
@@ -823,15 +847,6 @@
             }
         }
 
-        if !self.stack().is_empty() {
-            info!(
-                "CONTINUING({}) {} (unwinding = {})",
-                self.frame_idx(),
-                self.frame().instance,
-                unwinding
-            );
-        }
-
         Ok(())
     }
 
@@ -995,7 +1010,16 @@
 {
     fn hash_stable(&self, hcx: &mut StableHashingContext<'ctx>, hasher: &mut StableHasher) {
         // Exhaustive match on fields to make sure we forget no field.
-        let Frame { body, instance, return_to_block, return_place, locals, loc, extra } = self;
+        let Frame {
+            body,
+            instance,
+            return_to_block,
+            return_place,
+            locals,
+            loc,
+            extra,
+            tracing_span: _,
+        } = self;
         body.hash_stable(hcx, hasher);
         instance.hash_stable(hcx, hasher);
         return_to_block.hash_stable(hcx, hasher);
diff --git a/compiler/rustc_mir/src/interpret/intern.rs b/compiler/rustc_mir/src/interpret/intern.rs
index dd5e9c9..413be42 100644
--- a/compiler/rustc_mir/src/interpret/intern.rs
+++ b/compiler/rustc_mir/src/interpret/intern.rs
@@ -2,12 +2,24 @@
 //!
 //! After a const evaluation has computed a value, before we destroy the const evaluator's session
 //! memory, we need to extract all memory allocations to the global memory pool so they stay around.
+//!
+//! In principle, this is not very complicated: we recursively walk the final value, follow all the
+//! pointers, and move all reachable allocations to the global `tcx` memory. The only complication
+//! is picking the right mutability for the allocations in a `static` initializer: we want to make
+//! as many allocations as possible immutable so LLVM can put them into read-only memory. At the
+//! same time, we need to make memory that could be mutated by the program mutable to avoid
+//! incorrect compilations. To achieve this, we do a type-based traversal of the final value,
+//! tracking mutable and shared references and `UnsafeCell` to determine the current mutability.
+//! (In principle, we could skip this type-based part for `const` and promoteds, as they need to be
+//! always immutable. At least for `const` however we use this opportunity to reject any `const`
+//! that contains allocations whose mutability we cannot identify.)
 
 use super::validity::RefTracking;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::ErrorReported;
 use rustc_hir as hir;
 use rustc_middle::mir::interpret::InterpResult;
-use rustc_middle::ty::{self, layout::TyAndLayout, query::TyCtxtAt, Ty};
+use rustc_middle::ty::{self, layout::TyAndLayout, Ty};
 use rustc_target::abi::Size;
 
 use rustc_ast::Mutability;
@@ -33,17 +45,13 @@
     /// A list of all encountered allocations. After type-based interning, we traverse this list to
     /// also intern allocations that are only referenced by a raw pointer or inside a union.
     leftover_allocations: &'rt mut FxHashSet<AllocId>,
-    /// The root kind of the value that we're looking at. This field is never mutated and only used
-    /// for sanity assertions that will ICE when `const_qualif` screws up.
+    /// The root kind of the value that we're looking at. This field is never mutated for a
+    /// particular allocation. It is primarily used to make as many allocations as possible
+    /// read-only so LLVM can place them in const memory.
     mode: InternMode,
     /// This field stores whether we are *currently* inside an `UnsafeCell`. This can affect
     /// the intern mode of references we encounter.
     inside_unsafe_cell: bool,
-
-    /// This flag is to avoid triggering UnsafeCells are not allowed behind references in constants
-    /// for promoteds.
-    /// It's a copy of `mir::Body`'s ignore_interior_mut_in_const_validation field
-    ignore_interior_mut_in_const: bool,
 }
 
 #[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
@@ -52,22 +60,14 @@
     /// this is *immutable*, and below mutable references inside an `UnsafeCell`, this
     /// is *mutable*.
     Static(hir::Mutability),
-    /// The "base value" of a const, which can have `UnsafeCell` (as in `const FOO: Cell<i32>`),
-    /// but that interior mutability is simply ignored.
-    ConstBase,
-    /// The "inner values" of a const with references, where `UnsafeCell` is an error.
-    ConstInner,
+    /// A `const`.
+    Const,
 }
 
 /// Signalling data structure to ensure we don't recurse
 /// into the memory of other constants or statics
 struct IsStaticOrFn;
 
-fn mutable_memory_in_const(tcx: TyCtxtAt<'_>, kind: &str) {
-    // FIXME: show this in validation instead so we can point at where in the value the error is?
-    tcx.sess.span_err(tcx.span, &format!("mutable memory ({}) is not allowed in constant", kind));
-}
-
 /// Intern an allocation without looking at its children.
 /// `mode` is the mode of the environment where we found this pointer.
 /// `mutablity` is the mutability of the place to be interned; even if that says
@@ -113,8 +113,8 @@
         // For this, we need to take into account `UnsafeCell`. When `ty` is `None`, we assume
         // no interior mutability.
         let frozen = ty.map_or(true, |ty| ty.is_freeze(ecx.tcx, ecx.param_env));
-        // For statics, allocation mutability is the combination of the place mutability and
-        // the type mutability.
+        // For statics, allocation mutability is the combination of place mutability and
+        // type mutability.
         // The entire allocation needs to be mutable if it contains an `UnsafeCell` anywhere.
         let immutable = mutability == Mutability::Not && frozen;
         if immutable {
@@ -128,9 +128,7 @@
         // See const_eval::machine::MemoryExtra::can_access_statics for why
         // immutability is so important.
 
-        // There are no sensible checks we can do here; grep for `mutable_memory_in_const` to
-        // find the checks we are doing elsewhere to avoid even getting here for memory
-        // that "wants" to be mutable.
+        // Validation will ensure that there is no `UnsafeCell` on an immutable allocation.
         alloc.mutability = Mutability::Not;
     };
     // link the alloc id to the actual allocation
@@ -166,17 +164,13 @@
         mplace: MPlaceTy<'tcx>,
         fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
     ) -> InterpResult<'tcx> {
+        // ZSTs cannot contain pointers, so we can skip them.
+        if mplace.layout.is_zst() {
+            return Ok(());
+        }
+
         if let Some(def) = mplace.layout.ty.ty_adt_def() {
             if Some(def.did) == self.ecx.tcx.lang_items().unsafe_cell_type() {
-                if self.mode == InternMode::ConstInner && !self.ignore_interior_mut_in_const {
-                    // We do not actually make this memory mutable.  But in case the user
-                    // *expected* it to be mutable, make sure we error.  This is just a
-                    // sanity check to prevent users from accidentally exploiting the UB
-                    // they caused.  It also helps us to find cases where const-checking
-                    // failed to prevent an `UnsafeCell` (but as `ignore_interior_mut_in_const`
-                    // shows that part is not airtight).
-                    mutable_memory_in_const(self.ecx.tcx, "`UnsafeCell`");
-                }
                 // We are crossing over an `UnsafeCell`, we can mutate again. This means that
                 // References we encounter inside here are interned as pointing to mutable
                 // allocations.
@@ -187,6 +181,7 @@
                 return walked;
             }
         }
+
         self.walk_aggregate(mplace, fields)
     }
 
@@ -203,13 +198,12 @@
             if let ty::Dynamic(..) =
                 tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
             {
-                // Validation will error (with a better message) on an invalid vtable pointer
-                // so we can safely not do anything if this is not a real pointer.
                 if let Scalar::Ptr(vtable) = mplace.meta.unwrap_meta() {
                     // Explicitly choose const mode here, since vtables are immutable, even
                     // if the reference of the fat pointer is mutable.
-                    self.intern_shallow(vtable.alloc_id, InternMode::ConstInner, None);
+                    self.intern_shallow(vtable.alloc_id, InternMode::Const, None);
                 } else {
+                    // Validation will error (with a better message) on an invalid vtable pointer.
                     // Let validation show the error message, but make sure it *does* error.
                     tcx.sess
                         .delay_span_bug(tcx.span, "vtables pointers cannot be integer pointers");
@@ -218,7 +212,8 @@
             // Check if we have encountered this pointer+layout combination before.
             // Only recurse for allocation-backed pointers.
             if let Scalar::Ptr(ptr) = mplace.ptr {
-                // Compute the mode with which we intern this.
+                // Compute the mode with which we intern this. Our goal here is to make as many
+                // statics as we can immutable so they can be placed in read-only memory by LLVM.
                 let ref_mode = match self.mode {
                     InternMode::Static(mutbl) => {
                         // In statics, merge outer mutability with reference mutability and
@@ -237,8 +232,13 @@
                             }
                             Mutability::Not => {
                                 // A shared reference, things become immutable.
-                                // We do *not* consier `freeze` here -- that is done more precisely
-                                // when traversing the referenced data (by tracking `UnsafeCell`).
+                                // We do *not* consider `freeze` here: `intern_shallow` considers
+                                // `freeze` for the actual mutability of this allocation; the intern
+                                // mode for references contained in this allocation is tracked more
+                                // precisely when traversing the referenced data (by tracking
+                                // `UnsafeCell`). This makes sure that `&(&i32, &Cell<i32>)` still
+                                // has the left inner reference interned into a read-only
+                                // allocation.
                                 InternMode::Static(Mutability::Not)
                             }
                             Mutability::Mut => {
@@ -247,27 +247,11 @@
                             }
                         }
                     }
-                    InternMode::ConstBase | InternMode::ConstInner => {
-                        // Ignore `UnsafeCell`, everything is immutable.  Do some sanity checking
-                        // for mutable references that we encounter -- they must all be ZST.
-                        // This helps to prevent users from accidentally exploiting UB that they
-                        // caused (by somehow getting a mutable reference in a `const`).
-                        if ref_mutability == Mutability::Mut {
-                            match referenced_ty.kind() {
-                                ty::Array(_, n) if n.eval_usize(*tcx, self.ecx.param_env) == 0 => {}
-                                ty::Slice(_)
-                                    if mplace.meta.unwrap_meta().to_machine_usize(self.ecx)?
-                                        == 0 => {}
-                                _ => mutable_memory_in_const(tcx, "`&mut`"),
-                            }
-                        } else {
-                            // A shared reference. We cannot check `freeze` here due to references
-                            // like `&dyn Trait` that are actually immutable.  We do check for
-                            // concrete `UnsafeCell` when traversing the pointee though (if it is
-                            // a new allocation, not yet interned).
-                        }
-                        // Go on with the "inner" rules.
-                        InternMode::ConstInner
+                    InternMode::Const => {
+                        // Ignore `UnsafeCell`, everything is immutable.  Validity does some sanity
+                        // checking for mutable references that we encounter -- they must all be
+                        // ZST.
+                        InternMode::Const
                     }
                 };
                 match self.intern_shallow(ptr.alloc_id, ref_mode, Some(referenced_ty)) {
@@ -302,12 +286,13 @@
 /// tracks where in the value we are and thus can show much better error messages.
 /// Any errors here would anyway be turned into `const_err` lints, whereas validation failures
 /// are hard errors.
+#[tracing::instrument(skip(ecx))]
 pub fn intern_const_alloc_recursive<M: CompileTimeMachine<'mir, 'tcx>>(
     ecx: &mut InterpCx<'mir, 'tcx, M>,
     intern_kind: InternKind,
     ret: MPlaceTy<'tcx>,
-    ignore_interior_mut_in_const: bool,
-) where
+) -> Result<(), ErrorReported>
+where
     'tcx: 'mir,
 {
     let tcx = ecx.tcx;
@@ -315,7 +300,7 @@
         InternKind::Static(mutbl) => InternMode::Static(mutbl),
         // `Constant` includes array lengths.
         // `Promoted` includes non-`Copy` array initializers and `rustc_args_required_const` arguments.
-        InternKind::Constant | InternKind::Promoted => InternMode::ConstBase,
+        InternKind::Constant | InternKind::Promoted => InternMode::Const,
     };
 
     // Type based interning.
@@ -345,7 +330,6 @@
             ecx,
             mode,
             leftover_allocations,
-            ignore_interior_mut_in_const,
             inside_unsafe_cell: false,
         }
         .visit_value(mplace);
@@ -424,12 +408,14 @@
             // Codegen does not like dangling pointers, and generally `tcx` assumes that
             // all allocations referenced anywhere actually exist. So, make sure we error here.
             ecx.tcx.sess.span_err(ecx.tcx.span, "encountered dangling pointer in final constant");
+            return Err(ErrorReported);
         } else if ecx.tcx.get_global_alloc(alloc_id).is_none() {
             // We have hit an `AllocId` that is neither in local or global memory and isn't
             // marked as dangling by local memory.  That should be impossible.
             span_bug!(ecx.tcx.span, "encountered unknown alloc id {:?}", alloc_id);
         }
     }
+    Ok(())
 }
 
 impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
diff --git a/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs
index d9be28c..5c917f0 100644
--- a/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs
+++ b/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs
@@ -15,38 +15,61 @@
     /// Walks up the callstack from the intrinsic's callsite, searching for the first callsite in a
     /// frame which is not `#[track_caller]`.
     crate fn find_closest_untracked_caller_location(&self) -> Span {
-        let frame = self
-            .stack()
-            .iter()
-            .rev()
-            // Find first non-`#[track_caller]` frame.
-            .find(|frame| {
+        for frame in self.stack().iter().rev() {
+            debug!("find_closest_untracked_caller_location: checking frame {:?}", frame.instance);
+
+            // Assert that the frame we look at is actually executing code currently
+            // (`loc` is `Err` when we are unwinding and the frame does not require cleanup).
+            let loc = frame.loc.unwrap();
+
+            // This could be a non-`Call` terminator (such as `Drop`), or not a terminator at all
+            // (such as `box`). Use the normal span by default.
+            let mut source_info = *frame.body.source_info(loc);
+
+            // If this is a `Call` terminator, use the `fn_span` instead.
+            let block = &frame.body.basic_blocks()[loc.block];
+            if loc.statement_index == block.statements.len() {
                 debug!(
-                    "find_closest_untracked_caller_location: checking frame {:?}",
-                    frame.instance
+                    "find_closest_untracked_caller_location: got terminator {:?} ({:?})",
+                    block.terminator(),
+                    block.terminator().kind
                 );
-                !frame.instance.def.requires_caller_location(*self.tcx)
-            })
-            // Assert that there is always such a frame.
-            .unwrap();
-        // Assert that the frame we look at is actually executing code currently
-        // (`loc` is `Err` when we are unwinding and the frame does not require cleanup).
-        let loc = frame.loc.unwrap();
-        // If this is a `Call` terminator, use the `fn_span` instead.
-        let block = &frame.body.basic_blocks()[loc.block];
-        if loc.statement_index == block.statements.len() {
-            debug!(
-                "find_closest_untracked_caller_location:: got terminator {:?} ({:?})",
-                block.terminator(),
-                block.terminator().kind
-            );
-            if let TerminatorKind::Call { fn_span, .. } = block.terminator().kind {
-                return fn_span;
+                if let TerminatorKind::Call { fn_span, .. } = block.terminator().kind {
+                    source_info.span = fn_span;
+                }
+            }
+
+            // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
+            // If so, the starting `source_info.span` is in the innermost inlined
+            // function, and will be replaced with outer callsite spans as long
+            // as the inlined functions were `#[track_caller]`.
+            loop {
+                let scope_data = &frame.body.source_scopes[source_info.scope];
+
+                if let Some((callee, callsite_span)) = scope_data.inlined {
+                    // Stop inside the most nested non-`#[track_caller]` function,
+                    // before ever reaching its caller (which is irrelevant).
+                    if !callee.def.requires_caller_location(*self.tcx) {
+                        return source_info.span;
+                    }
+                    source_info.span = callsite_span;
+                }
+
+                // Skip past all of the parents with `inlined: None`.
+                match scope_data.inlined_parent_scope {
+                    Some(parent) => source_info.scope = parent,
+                    None => break,
+                }
+            }
+
+            // Stop inside the most nested non-`#[track_caller]` function,
+            // before ever reaching its caller (which is irrelevant).
+            if !frame.instance.def.requires_caller_location(*self.tcx) {
+                return source_info.span;
             }
         }
-        // This is a different terminator (such as `Drop`) or not a terminator at all
-        // (such as `box`). Use the normal span.
-        frame.body.source_info(loc).span
+
+        bug!("no non-`#[track_caller]` frame found")
     }
 
     /// Allocate a `const core::panic::Location` with the provided filename and line/column numbers.
diff --git a/compiler/rustc_mir/src/interpret/machine.rs b/compiler/rustc_mir/src/interpret/machine.rs
index 3718da1..66dbacb 100644
--- a/compiler/rustc_mir/src/interpret/machine.rs
+++ b/compiler/rustc_mir/src/interpret/machine.rs
@@ -3,6 +3,7 @@
 //! interpreting common C functions leak into CTFE.
 
 use std::borrow::{Borrow, Cow};
+use std::fmt::Debug;
 use std::hash::Hash;
 
 use rustc_middle::mir;
@@ -79,19 +80,19 @@
 /// and some use case dependent behaviour can instead be applied.
 pub trait Machine<'mir, 'tcx>: Sized {
     /// Additional memory kinds a machine wishes to distinguish from the builtin ones
-    type MemoryKind: ::std::fmt::Debug + ::std::fmt::Display + MayLeak + Eq + 'static;
+    type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
 
     /// Tag tracked alongside every pointer. This is used to implement "Stacked Borrows"
     /// <https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html>.
     /// The `default()` is used for pointers to consts, statics, vtables and functions.
     /// The `Debug` formatting is used for displaying pointers; we cannot use `Display`
     /// as `()` does not implement that, but it should be "nice" output.
-    type PointerTag: ::std::fmt::Debug + Copy + Eq + Hash + 'static;
+    type PointerTag: Debug + Copy + Eq + Hash + 'static;
 
     /// Machines can define extra (non-instance) things that represent values of function pointers.
     /// For example, Miri uses this to return a function pointer from `dlsym`
     /// that can later be called to execute the right thing.
-    type ExtraFnVal: ::std::fmt::Debug + Copy;
+    type ExtraFnVal: Debug + Copy;
 
     /// Extra data stored in every call frame.
     type FrameExtra;
diff --git a/compiler/rustc_mir/src/interpret/mod.rs b/compiler/rustc_mir/src/interpret/mod.rs
index a931b0b..a29ef11 100644
--- a/compiler/rustc_mir/src/interpret/mod.rs
+++ b/compiler/rustc_mir/src/interpret/mod.rs
@@ -24,7 +24,7 @@
 pub use self::memory::{AllocCheck, FnVal, Memory, MemoryKind};
 pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
 pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
-pub use self::validity::RefTracking;
+pub use self::validity::{CtfeValidationMode, RefTracking};
 pub use self::visitor::{MutValueVisitor, ValueVisitor};
 
 crate use self::intrinsics::eval_nullary_intrinsic;
diff --git a/compiler/rustc_mir/src/interpret/operand.rs b/compiler/rustc_mir/src/interpret/operand.rs
index 735f890..d9437a3 100644
--- a/compiler/rustc_mir/src/interpret/operand.rs
+++ b/compiler/rustc_mir/src/interpret/operand.rs
@@ -117,7 +117,7 @@
         ty::tls::with(|tcx| {
             match self.imm {
                 Immediate::Scalar(s) => {
-                    if let Some(ty) = tcx.lift(&self.layout.ty) {
+                    if let Some(ty) = tcx.lift(self.layout.ty) {
                         let cx = FmtPrinter::new(tcx, f, Namespace::ValueNS);
                         p(cx, s, ty)?;
                         return Ok(());
@@ -133,7 +133,7 @@
     }
 }
 
-impl<'tcx, Tag> ::std::ops::Deref for ImmTy<'tcx, Tag> {
+impl<'tcx, Tag> std::ops::Deref for ImmTy<'tcx, Tag> {
     type Target = Immediate<Tag>;
     #[inline(always)]
     fn deref(&self) -> &Immediate<Tag> {
@@ -156,7 +156,7 @@
     pub layout: TyAndLayout<'tcx>,
 }
 
-impl<'tcx, Tag> ::std::ops::Deref for OpTy<'tcx, Tag> {
+impl<'tcx, Tag> std::ops::Deref for OpTy<'tcx, Tag> {
     type Target = Operand<Tag>;
     #[inline(always)]
     fn deref(&self) -> &Operand<Tag> {
@@ -211,14 +211,8 @@
     #[inline]
     pub fn to_const_int(self) -> ConstInt {
         assert!(self.layout.ty.is_integral());
-        ConstInt::new(
-            self.to_scalar()
-                .expect("to_const_int doesn't work on scalar pairs")
-                .assert_bits(self.layout.size),
-            self.layout.size,
-            self.layout.ty.is_signed(),
-            self.layout.ty.is_ptr_sized_integral(),
-        )
+        let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
+        ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
     }
 }
 
@@ -262,7 +256,7 @@
                 }
                 return Ok(Some(ImmTy {
                     // zero-sized type
-                    imm: Scalar::zst().into(),
+                    imm: Scalar::ZST.into(),
                     layout: mplace.layout,
                 }));
             }
@@ -340,7 +334,7 @@
     pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
         let len = mplace.len(self)?;
         let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?;
-        let str = ::std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
+        let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
         Ok(str)
     }
 
@@ -361,7 +355,7 @@
 
         let field_layout = op.layout.field(self, field)?;
         if field_layout.is_zst() {
-            let immediate = Scalar::zst().into();
+            let immediate = Scalar::ZST.into();
             return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout });
         }
         let offset = op.layout.fields.offset(field);
@@ -446,7 +440,7 @@
         let layout = self.layout_of_local(frame, local, layout)?;
         let op = if layout.is_zst() {
             // Do not read from ZST, they might not be initialized
-            Operand::Immediate(Scalar::zst().into())
+            Operand::Immediate(Scalar::ZST.into())
         } else {
             M::access_local(&self, frame, local)?
         };
@@ -544,13 +538,13 @@
         let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
             Ok(match scalar {
                 Scalar::Ptr(ptr) => Scalar::Ptr(self.global_base_pointer(ptr)?),
-                Scalar::Raw { data, size } => Scalar::Raw { data, size },
+                Scalar::Int(int) => Scalar::Int(int),
             })
         };
         // Early-return cases.
         let val_val = match val.val {
             ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
-            ty::ConstKind::Error(_) => throw_inval!(TypeckError(ErrorReported)),
+            ty::ConstKind::Error(_) => throw_inval!(AlreadyReported(ErrorReported)),
             ty::ConstKind::Unevaluated(def, substs, promoted) => {
                 let instance = self.resolve(def, substs)?;
                 return Ok(self.eval_to_allocation(GlobalId { instance, promoted })?.into());
diff --git a/compiler/rustc_mir/src/interpret/place.rs b/compiler/rustc_mir/src/interpret/place.rs
index 72551b2..a003380 100644
--- a/compiler/rustc_mir/src/interpret/place.rs
+++ b/compiler/rustc_mir/src/interpret/place.rs
@@ -3,6 +3,7 @@
 //! All high-level functions to write to memory work on places as destinations.
 
 use std::convert::TryFrom;
+use std::fmt::Debug;
 use std::hash::Hash;
 
 use rustc_macros::HashStable;
@@ -13,9 +14,9 @@
 use rustc_target::abi::{HasDataLayout, LayoutOf, Size, VariantIdx, Variants};
 
 use super::{
-    mir_assign_valid_types, truncate, AllocId, AllocMap, Allocation, AllocationExtra, ConstAlloc,
-    ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand,
-    Pointer, PointerArithmetic, Scalar, ScalarMaybeUninit,
+    mir_assign_valid_types, AllocId, AllocMap, Allocation, AllocationExtra, ConstAlloc, ImmTy,
+    Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand, Pointer,
+    PointerArithmetic, Scalar, ScalarMaybeUninit,
 };
 
 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
@@ -86,7 +87,7 @@
     pub layout: TyAndLayout<'tcx>,
 }
 
-impl<'tcx, Tag> ::std::ops::Deref for PlaceTy<'tcx, Tag> {
+impl<'tcx, Tag> std::ops::Deref for PlaceTy<'tcx, Tag> {
     type Target = Place<Tag>;
     #[inline(always)]
     fn deref(&self) -> &Place<Tag> {
@@ -101,7 +102,7 @@
     pub layout: TyAndLayout<'tcx>,
 }
 
-impl<'tcx, Tag> ::std::ops::Deref for MPlaceTy<'tcx, Tag> {
+impl<'tcx, Tag> std::ops::Deref for MPlaceTy<'tcx, Tag> {
     type Target = MemPlace<Tag>;
     #[inline(always)]
     fn deref(&self) -> &MemPlace<Tag> {
@@ -226,7 +227,7 @@
 }
 
 // These are defined here because they produce a place.
-impl<'tcx, Tag: ::std::fmt::Debug + Copy> OpTy<'tcx, Tag> {
+impl<'tcx, Tag: Debug + Copy> OpTy<'tcx, Tag> {
     #[inline(always)]
     /// Note: do not call `as_ref` on the resulting place. This function should only be used to
     /// read from the resulting mplace, not to get its address back.
@@ -251,7 +252,7 @@
     }
 }
 
-impl<Tag: ::std::fmt::Debug> Place<Tag> {
+impl<Tag: Debug> Place<Tag> {
     #[inline]
     pub fn assert_mem_place(self) -> MemPlace<Tag> {
         match self {
@@ -261,7 +262,7 @@
     }
 }
 
-impl<'tcx, Tag: ::std::fmt::Debug> PlaceTy<'tcx, Tag> {
+impl<'tcx, Tag: Debug> PlaceTy<'tcx, Tag> {
     #[inline]
     pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
         MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout }
@@ -272,7 +273,7 @@
 impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
 where
     // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
-    Tag: ::std::fmt::Debug + Copy + Eq + Hash + 'static,
+    Tag: Debug + Copy + Eq + Hash + 'static,
     M: Machine<'mir, 'tcx, PointerTag = Tag>,
     // FIXME: Working around https://github.com/rust-lang/rust/issues/24159
     M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKind>, Allocation<Tag, M::AllocExtra>)>,
@@ -720,12 +721,8 @@
                     dest.layout.size,
                     "Size mismatch when writing pointer"
                 ),
-                Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Raw { size, .. })) => {
-                    assert_eq!(
-                        Size::from_bytes(size),
-                        dest.layout.size,
-                        "Size mismatch when writing bits"
-                    )
+                Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Int(int))) => {
+                    assert_eq!(int.size(), dest.layout.size, "Size mismatch when writing bits")
                 }
                 Immediate::Scalar(ScalarMaybeUninit::Uninit) => {} // uninit can have any size
                 Immediate::ScalarPair(_, _) => {
@@ -1076,7 +1073,7 @@
                 // their computation, but the in-memory tag is the smallest possible
                 // representation
                 let size = tag_layout.value.size(self);
-                let tag_val = truncate(discr_val, size);
+                let tag_val = size.truncate(discr_val);
 
                 let tag_dest = self.place_field(dest, tag_field)?;
                 self.write_scalar(Scalar::from_uint(tag_val, size), tag_dest)?;
diff --git a/compiler/rustc_mir/src/interpret/terminator.rs b/compiler/rustc_mir/src/interpret/terminator.rs
index 9f200ca..bb11c2a 100644
--- a/compiler/rustc_mir/src/interpret/terminator.rs
+++ b/compiler/rustc_mir/src/interpret/terminator.rs
@@ -24,16 +24,16 @@
 
             Goto { target } => self.go_to_block(target),
 
-            SwitchInt { ref discr, ref values, ref targets, switch_ty } => {
+            SwitchInt { ref discr, ref targets, switch_ty } => {
                 let discr = self.read_immediate(self.eval_operand(discr, None)?)?;
                 trace!("SwitchInt({:?})", *discr);
                 assert_eq!(discr.layout.ty, switch_ty);
 
                 // Branch to the `otherwise` case by default, if no match is found.
-                assert!(!targets.is_empty());
-                let mut target_block = targets[targets.len() - 1];
+                assert!(!targets.iter().is_empty());
+                let mut target_block = targets.otherwise();
 
-                for (index, &const_int) in values.iter().enumerate() {
+                for (const_int, target) in targets.iter() {
                     // Compare using binary_op, to also support pointer values
                     let res = self
                         .overflowing_binary_op(
@@ -43,7 +43,7 @@
                         )?
                         .0;
                     if res.to_bool()? {
-                        target_block = targets[index];
+                        target_block = target;
                         break;
                     }
                 }
diff --git a/compiler/rustc_mir/src/interpret/util.rs b/compiler/rustc_mir/src/interpret/util.rs
index fc5a25f..fce5553c9 100644
--- a/compiler/rustc_mir/src/interpret/util.rs
+++ b/compiler/rustc_mir/src/interpret/util.rs
@@ -1,6 +1,7 @@
 use rustc_middle::mir::interpret::InterpResult;
 use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeVisitor};
 use std::convert::TryInto;
+use std::ops::ControlFlow;
 
 /// Returns `true` if a used generic parameter requires substitution.
 crate fn ensure_monomorphic_enough<'tcx, T>(tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx>
@@ -17,24 +18,24 @@
     };
 
     impl<'tcx> TypeVisitor<'tcx> for UsedParamsNeedSubstVisitor<'tcx> {
-        fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+        fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> {
             if !c.needs_subst() {
-                return false;
+                return ControlFlow::CONTINUE;
             }
 
             match c.val {
-                ty::ConstKind::Param(..) => true,
+                ty::ConstKind::Param(..) => ControlFlow::BREAK,
                 _ => c.super_visit_with(self),
             }
         }
 
-        fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+        fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> {
             if !ty.needs_subst() {
-                return false;
+                return ControlFlow::CONTINUE;
             }
 
             match *ty.kind() {
-                ty::Param(_) => true,
+                ty::Param(_) => ControlFlow::BREAK,
                 ty::Closure(def_id, substs)
                 | ty::Generator(def_id, substs, ..)
                 | ty::FnDef(def_id, substs) => {
@@ -50,11 +51,7 @@
                         match (is_used, subst.needs_subst()) {
                             // Just in case there are closures or generators within this subst,
                             // recurse.
-                            (true, true) if subst.super_visit_with(self) => {
-                                // Only return when we find a parameter so the remaining substs
-                                // are not skipped.
-                                return true;
-                            }
+                            (true, true) => return subst.super_visit_with(self),
                             // Confirm that polymorphization replaced the parameter with
                             // `ty::Param`/`ty::ConstKind::Param`.
                             (false, true) if cfg!(debug_assertions) => match subst.unpack() {
@@ -69,7 +66,7 @@
                             _ => {}
                         }
                     }
-                    false
+                    ControlFlow::CONTINUE
                 }
                 _ => ty.super_visit_with(self),
             }
@@ -77,7 +74,7 @@
     }
 
     let mut vis = UsedParamsNeedSubstVisitor { tcx };
-    if ty.visit_with(&mut vis) {
+    if ty.visit_with(&mut vis).is_break() {
         throw_inval!(TooGeneric);
     } else {
         Ok(())
diff --git a/compiler/rustc_mir/src/interpret/validity.rs b/compiler/rustc_mir/src/interpret/validity.rs
index 2b83e1c..2d235d6 100644
--- a/compiler/rustc_mir/src/interpret/validity.rs
+++ b/compiler/rustc_mir/src/interpret/validity.rs
@@ -113,6 +113,17 @@
     DynDowncast,
 }
 
+/// Extra things to check for during validation of CTFE results.
+pub enum CtfeValidationMode {
+    /// Regular validation, nothing special happening.
+    Regular,
+    /// Validation of a `const`. `inner` says if this is an inner, indirect allocation (as opposed
+    /// to the top-level const allocation).
+    /// Being an inner allocation makes a difference because the top-level allocation of a `const`
+    /// is copied for each use, but the inner allocations are implicitly shared.
+    Const { inner: bool },
+}
+
 /// State for tracking recursive validation of references
 pub struct RefTracking<T, PATH = ()> {
     pub seen: FxHashSet<T>,
@@ -202,9 +213,9 @@
     /// starts must not be changed!  `visit_fields` and `visit_array` rely on
     /// this stack discipline.
     path: Vec<PathElem>,
-    ref_tracking_for_consts:
-        Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>,
-    may_ref_to_static: bool,
+    ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>,
+    /// `None` indicates this is not validating for CTFE (but for runtime).
+    ctfe_mode: Option<CtfeValidationMode>,
     ecx: &'rt InterpCx<'mir, 'tcx, M>,
 }
 
@@ -418,7 +429,7 @@
                 { "a dangling {} (use-after-free)", kind },
         );
         // Recursive checking
-        if let Some(ref mut ref_tracking) = self.ref_tracking_for_consts {
+        if let Some(ref mut ref_tracking) = self.ref_tracking {
             if let Some(ptr) = ptr {
                 // not a ZST
                 // Skip validation entirely for some external statics
@@ -426,19 +437,7 @@
                 if let Some(GlobalAlloc::Static(did)) = alloc_kind {
                     assert!(!self.ecx.tcx.is_thread_local_static(did));
                     assert!(self.ecx.tcx.is_static(did));
-                    if self.may_ref_to_static {
-                        // We skip checking other statics. These statics must be sound by
-                        // themselves, and the only way to get broken statics here is by using
-                        // unsafe code.
-                        // The reasons we don't check other statics is twofold. For one, in all
-                        // sound cases, the static was already validated on its own, and second, we
-                        // trigger cycle errors if we try to compute the value of the other static
-                        // and that static refers back to us.
-                        // We might miss const-invalid data,
-                        // but things are still sound otherwise (in particular re: consts
-                        // referring to statics).
-                        return Ok(());
-                    } else {
+                    if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. })) {
                         // See const_eval::machine::MemoryExtra::can_access_statics for why
                         // this check is so important.
                         // This check is reachable when the const just referenced the static,
@@ -447,6 +446,17 @@
                             { "a {} pointing to a static variable", kind }
                         );
                     }
+                    // We skip checking other statics. These statics must be sound by
+                    // themselves, and the only way to get broken statics here is by using
+                    // unsafe code.
+                    // The reasons we don't check other statics is twofold. For one, in all
+                    // sound cases, the static was already validated on its own, and second, we
+                    // trigger cycle errors if we try to compute the value of the other static
+                    // and that static refers back to us.
+                    // We might miss const-invalid data,
+                    // but things are still sound otherwise (in particular re: consts
+                    // referring to statics).
+                    return Ok(());
                 }
             }
             // Proceed recursively even for ZST, no reason to skip them!
@@ -504,7 +514,7 @@
                 let value = self.ecx.read_scalar(value)?;
                 // NOTE: Keep this in sync with the array optimization for int/float
                 // types below!
-                if self.ref_tracking_for_consts.is_some() {
+                if self.ctfe_mode.is_some() {
                     // Integers/floats in CTFE: Must be scalar bits, pointers are dangerous
                     let is_bits = value.check_init().map_or(false, |v| v.is_bits());
                     if !is_bits {
@@ -532,7 +542,17 @@
                 }
                 Ok(true)
             }
-            ty::Ref(..) => {
+            ty::Ref(_, ty, mutbl) => {
+                if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. }))
+                    && *mutbl == hir::Mutability::Mut
+                {
+                    // A mutable reference inside a const? That does not seem right (except if it is
+                    // a ZST).
+                    let layout = self.ecx.layout_of(ty)?;
+                    if !layout.is_zst() {
+                        throw_validation_failure!(self.path, { "mutable reference in a `const`" });
+                    }
+                }
                 self.check_safe_pointer(value, "reference")?;
                 Ok(true)
             }
@@ -559,9 +579,8 @@
                 // Nothing to check.
                 Ok(true)
             }
-            // The above should be all the (inhabited) primitive types. The rest is compound, we
+            // The above should be all the primitive types. The rest is compound, we
             // check them by visiting their fields/variants.
-            // (`Str` UTF-8 check happens in `visit_aggregate`, too.)
             ty::Adt(..)
             | ty::Tuple(..)
             | ty::Array(..)
@@ -723,6 +742,15 @@
         // Sanity check: `builtin_deref` does not know any pointers that are not primitive.
         assert!(op.layout.ty.builtin_deref(true).is_none());
 
+        // Special check preventing `UnsafeCell` in constants
+        if let Some(def) = op.layout.ty.ty_adt_def() {
+            if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true }))
+                && Some(def.did) == self.ecx.tcx.lang_items().unsafe_cell_type()
+            {
+                throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" });
+            }
+        }
+
         // Recursively walk the value at its type.
         self.walk_value(op)?;
 
@@ -775,17 +803,13 @@
                 );
             }
             ty::Array(tys, ..) | ty::Slice(tys)
-                if {
-                    // This optimization applies for types that can hold arbitrary bytes (such as
-                    // integer and floating point types) or for structs or tuples with no fields.
-                    // FIXME(wesleywiser) This logic could be extended further to arbitrary structs
-                    // or tuples made up of integer/floating point types or inhabited ZSTs with no
-                    // padding.
-                    match tys.kind() {
-                        ty::Int(..) | ty::Uint(..) | ty::Float(..) => true,
-                        _ => false,
-                    }
-                } =>
+                // This optimization applies for types that can hold arbitrary bytes (such as
+                // integer and floating point types) or for structs or tuples with no fields.
+                // FIXME(wesleywiser) This logic could be extended further to arbitrary structs
+                // or tuples made up of integer/floating point types or inhabited ZSTs with no
+                // padding.
+                if matches!(tys.kind(), ty::Int(..) | ty::Uint(..) | ty::Float(..))
+                =>
             {
                 // Optimized handling for arrays of integer/float type.
 
@@ -818,7 +842,7 @@
                     self.ecx,
                     ptr,
                     size,
-                    /*allow_uninit_and_ptr*/ self.ref_tracking_for_consts.is_none(),
+                    /*allow_uninit_and_ptr*/ self.ctfe_mode.is_none(),
                 ) {
                     // In the happy case, we needn't check anything else.
                     Ok(()) => {}
@@ -853,7 +877,7 @@
             // of an array and not all of them, because there's only a single value of a specific
             // ZST type, so either validation fails for all elements or none.
             ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(tys)?.is_zst() => {
-                // Validate just the first element
+                // Validate just the first element (if any).
                 self.walk_aggregate(op, fields.take(1))?
             }
             _ => {
@@ -869,16 +893,13 @@
         &self,
         op: OpTy<'tcx, M::PointerTag>,
         path: Vec<PathElem>,
-        ref_tracking_for_consts: Option<
-            &mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>,
-        >,
-        may_ref_to_static: bool,
+        ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>,
+        ctfe_mode: Option<CtfeValidationMode>,
     ) -> InterpResult<'tcx> {
         trace!("validate_operand_internal: {:?}, {:?}", *op, op.layout.ty);
 
         // Construct a visitor
-        let mut visitor =
-            ValidityVisitor { path, ref_tracking_for_consts, may_ref_to_static, ecx: self };
+        let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };
 
         // Try to cast to ptr *once* instead of all the time.
         let op = self.force_op_ptr(op).unwrap_or(op);
@@ -906,16 +927,18 @@
     /// `ref_tracking` is used to record references that we encounter so that they
     /// can be checked recursively by an outside driving loop.
     ///
-    /// `may_ref_to_static` controls whether references are allowed to point to statics.
+    /// `constant` controls whether this must satisfy the rules for constants:
+    /// - no pointers to statics.
+    /// - no `UnsafeCell` or non-ZST `&mut`.
     #[inline(always)]
     pub fn const_validate_operand(
         &self,
         op: OpTy<'tcx, M::PointerTag>,
         path: Vec<PathElem>,
         ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>,
-        may_ref_to_static: bool,
+        ctfe_mode: CtfeValidationMode,
     ) -> InterpResult<'tcx> {
-        self.validate_operand_internal(op, path, Some(ref_tracking), may_ref_to_static)
+        self.validate_operand_internal(op, path, Some(ref_tracking), Some(ctfe_mode))
     }
 
     /// This function checks the data at `op` to be runtime-valid.
@@ -923,6 +946,6 @@
     /// It will error if the bits at the destination do not match the ones described by the layout.
     #[inline(always)]
     pub fn validate_operand(&self, op: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
-        self.validate_operand_internal(op, vec![], None, false)
+        self.validate_operand_internal(op, vec![], None, None)
     }
 }
diff --git a/compiler/rustc_mir/src/lib.rs b/compiler/rustc_mir/src/lib.rs
index c00c686..2ed115b 100644
--- a/compiler/rustc_mir/src/lib.rs
+++ b/compiler/rustc_mir/src/lib.rs
@@ -27,6 +27,7 @@
 #![feature(option_expect_none)]
 #![feature(or_patterns)]
 #![feature(once_cell)]
+#![feature(control_flow_enum)]
 #![recursion_limit = "256"]
 
 #[macro_use]
diff --git a/compiler/rustc_mir/src/monomorphize/collector.rs b/compiler/rustc_mir/src/monomorphize/collector.rs
index 7e12cc9..938181a 100644
--- a/compiler/rustc_mir/src/monomorphize/collector.rs
+++ b/compiler/rustc_mir/src/monomorphize/collector.rs
@@ -197,6 +197,7 @@
 use rustc_span::source_map::{dummy_spanned, respan, Span, Spanned, DUMMY_SP};
 use smallvec::SmallVec;
 use std::iter;
+use std::ops::Range;
 use std::path::PathBuf;
 
 #[derive(PartialEq)]
@@ -210,9 +211,8 @@
 pub struct InliningMap<'tcx> {
     // Maps a source mono item to the range of mono items
     // accessed by it.
-    // The two numbers in the tuple are the start (inclusive) and
-    // end index (exclusive) within the `targets` vecs.
-    index: FxHashMap<MonoItem<'tcx>, (usize, usize)>,
+    // The range selects elements within the `targets` vecs.
+    index: FxHashMap<MonoItem<'tcx>, Range<usize>>,
     targets: Vec<MonoItem<'tcx>>,
 
     // Contains one bit per mono item in the `targets` field. That bit
@@ -245,7 +245,7 @@
         }
 
         let end_index = self.targets.len();
-        assert!(self.index.insert(source, (start_index, end_index)).is_none());
+        assert!(self.index.insert(source, start_index..end_index).is_none());
     }
 
     // Internally iterate over all items referenced by `source` which will be
@@ -254,9 +254,9 @@
     where
         F: FnMut(MonoItem<'tcx>),
     {
-        if let Some(&(start_index, end_index)) = self.index.get(&source) {
-            for (i, candidate) in self.targets[start_index..end_index].iter().enumerate() {
-                if self.inlines.contains(start_index + i) {
+        if let Some(range) = self.index.get(&source) {
+            for (i, candidate) in self.targets[range.clone()].iter().enumerate() {
+                if self.inlines.contains(range.start + i) {
                     f(*candidate);
                 }
             }
@@ -268,8 +268,8 @@
     where
         F: FnMut(MonoItem<'tcx>, &[MonoItem<'tcx>]),
     {
-        for (&accessor, &(start_index, end_index)) in &self.index {
-            f(accessor, &self.targets[start_index..end_index])
+        for (&accessor, range) in &self.index {
+            f(accessor, &self.targets[range.clone()])
         }
     }
 }
@@ -543,11 +543,11 @@
         T: TypeFoldable<'tcx>,
     {
         debug!("monomorphize: self.instance={:?}", self.instance);
-        if let Some(substs) = self.instance.substs_for_mir_body() {
-            self.tcx.subst_and_normalize_erasing_regions(substs, ty::ParamEnv::reveal_all(), &value)
-        } else {
-            self.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), value)
-        }
+        self.instance.subst_mir_and_normalize_erasing_regions(
+            self.tcx,
+            ty::ParamEnv::reveal_all(),
+            &value,
+        )
     }
 }
 
diff --git a/compiler/rustc_mir/src/monomorphize/partitioning/default.rs b/compiler/rustc_mir/src/monomorphize/partitioning/default.rs
index 3c89111..037b80e 100644
--- a/compiler/rustc_mir/src/monomorphize/partitioning/default.rs
+++ b/compiler/rustc_mir/src/monomorphize/partitioning/default.rs
@@ -532,7 +532,7 @@
 }
 
 fn default_visibility(tcx: TyCtxt<'_>, id: DefId, is_generic: bool) -> Visibility {
-    if !tcx.sess.target.target.options.default_hidden_visibility {
+    if !tcx.sess.target.default_hidden_visibility {
         return Visibility::Default;
     }
 
diff --git a/compiler/rustc_mir/src/monomorphize/polymorphize.rs b/compiler/rustc_mir/src/monomorphize/polymorphize.rs
index 3f6f117..c2ebc95 100644
--- a/compiler/rustc_mir/src/monomorphize/polymorphize.rs
+++ b/compiler/rustc_mir/src/monomorphize/polymorphize.rs
@@ -20,6 +20,7 @@
 };
 use rustc_span::symbol::sym;
 use std::convert::TryInto;
+use std::ops::ControlFlow;
 
 /// Provide implementations of queries relating to polymorphization analysis.
 pub fn provide(providers: &mut Providers) {
@@ -138,7 +139,7 @@
             // predicate is used.
             let any_param_used = {
                 let mut vis = HasUsedGenericParams { unused_parameters };
-                predicate.visit_with(&mut vis)
+                predicate.visit_with(&mut vis).is_break()
             };
 
             if any_param_used {
@@ -249,17 +250,17 @@
 }
 
 impl<'a, 'tcx> TypeVisitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
-    fn visit_const(&mut self, c: &'tcx Const<'tcx>) -> bool {
+    fn visit_const(&mut self, c: &'tcx Const<'tcx>) -> ControlFlow<()> {
         debug!("visit_const: c={:?}", c);
         if !c.has_param_types_or_consts() {
-            return false;
+            return ControlFlow::CONTINUE;
         }
 
         match c.val {
             ty::ConstKind::Param(param) => {
                 debug!("visit_const: param={:?}", param);
                 self.unused_parameters.clear(param.index);
-                false
+                ControlFlow::CONTINUE
             }
             ty::ConstKind::Unevaluated(def, _, Some(p))
                 // Avoid considering `T` unused when constants are of the form:
@@ -270,22 +271,22 @@
                 // the generic parameters, instead, traverse the promoted MIR.
                 let promoted = self.tcx.promoted_mir(def.did);
                 self.visit_body(&promoted[p]);
-                false
+                ControlFlow::CONTINUE
             }
             ty::ConstKind::Unevaluated(def, unevaluated_substs, None)
                 if self.tcx.def_kind(def.did) == DefKind::AnonConst =>
             {
                 self.visit_child_body(def.did, unevaluated_substs);
-                false
+                ControlFlow::CONTINUE
             }
             _ => c.super_visit_with(self),
         }
     }
 
-    fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> {
         debug!("visit_ty: ty={:?}", ty);
         if !ty.has_param_types_or_consts() {
-            return false;
+            return ControlFlow::CONTINUE;
         }
 
         match *ty.kind() {
@@ -293,18 +294,18 @@
                 debug!("visit_ty: def_id={:?}", def_id);
                 // Avoid cycle errors with generators.
                 if def_id == self.def_id {
-                    return false;
+                    return ControlFlow::CONTINUE;
                 }
 
                 // Consider any generic parameters used by any closures/generators as used in the
                 // parent.
                 self.visit_child_body(def_id, substs);
-                false
+                ControlFlow::CONTINUE
             }
             ty::Param(param) => {
                 debug!("visit_ty: param={:?}", param);
                 self.unused_parameters.clear(param.index);
-                false
+                ControlFlow::CONTINUE
             }
             _ => ty.super_visit_with(self),
         }
@@ -317,28 +318,38 @@
 }
 
 impl<'a, 'tcx> TypeVisitor<'tcx> for HasUsedGenericParams<'a> {
-    fn visit_const(&mut self, c: &'tcx Const<'tcx>) -> bool {
+    fn visit_const(&mut self, c: &'tcx Const<'tcx>) -> ControlFlow<()> {
         debug!("visit_const: c={:?}", c);
         if !c.has_param_types_or_consts() {
-            return false;
+            return ControlFlow::CONTINUE;
         }
 
         match c.val {
             ty::ConstKind::Param(param) => {
-                !self.unused_parameters.contains(param.index).unwrap_or(false)
+                if self.unused_parameters.contains(param.index).unwrap_or(false) {
+                    ControlFlow::CONTINUE
+                } else {
+                    ControlFlow::BREAK
+                }
             }
             _ => c.super_visit_with(self),
         }
     }
 
-    fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> {
         debug!("visit_ty: ty={:?}", ty);
         if !ty.has_param_types_or_consts() {
-            return false;
+            return ControlFlow::CONTINUE;
         }
 
         match ty.kind() {
-            ty::Param(param) => !self.unused_parameters.contains(param.index).unwrap_or(false),
+            ty::Param(param) => {
+                if self.unused_parameters.contains(param.index).unwrap_or(false) {
+                    ControlFlow::CONTINUE
+                } else {
+                    ControlFlow::BREAK
+                }
+            }
             _ => ty.super_visit_with(self),
         }
     }
diff --git a/compiler/rustc_mir/src/shim.rs b/compiler/rustc_mir/src/shim.rs
index 7e4d189..b2fa4b1 100644
--- a/compiler/rustc_mir/src/shim.rs
+++ b/compiler/rustc_mir/src/shim.rs
@@ -78,8 +78,6 @@
     run_passes(
         tcx,
         &mut result,
-        instance,
-        None,
         MirPhase::Const,
         &[&[
             &add_moves_for_packed_drops::AddMovesForPackedDrops,
@@ -163,7 +161,9 @@
     block(&mut blocks, TerminatorKind::Goto { target: return_block });
     block(&mut blocks, TerminatorKind::Return);
 
-    let mut body = new_body(blocks, local_decls_for_sig(&sig, span), sig.inputs().len(), span);
+    let source = MirSource::from_instance(ty::InstanceDef::DropGlue(def_id, ty));
+    let mut body =
+        new_body(source, blocks, local_decls_for_sig(&sig, span), sig.inputs().len(), span);
 
     if let Some(..) = ty {
         // The first argument (index 0), but add 1 for the return value.
@@ -202,15 +202,23 @@
 }
 
 fn new_body<'tcx>(
+    source: MirSource<'tcx>,
     basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
     local_decls: IndexVec<Local, LocalDecl<'tcx>>,
     arg_count: usize,
     span: Span,
 ) -> Body<'tcx> {
     Body::new(
+        source,
         basic_blocks,
         IndexVec::from_elem_n(
-            SourceScopeData { span, parent_scope: None, local_data: ClearCrossCrate::Clear },
+            SourceScopeData {
+                span,
+                parent_scope: None,
+                inlined: None,
+                inlined_parent_scope: None,
+                local_data: ClearCrossCrate::Clear,
+            },
             1,
         ),
         local_decls,
@@ -344,7 +352,11 @@
     }
 
     fn into_mir(self) -> Body<'tcx> {
-        new_body(self.blocks, self.local_decls, self.sig.inputs().len(), self.span)
+        let source = MirSource::from_instance(ty::InstanceDef::CloneShim(
+            self.def_id,
+            self.sig.inputs_and_output[0],
+        ));
+        new_body(source, self.blocks, self.local_decls, self.sig.inputs().len(), self.span)
     }
 
     fn source_info(&self) -> SourceInfo {
@@ -834,7 +846,8 @@
         block(&mut blocks, vec![], TerminatorKind::Resume, true);
     }
 
-    let mut body = new_body(blocks, local_decls, sig.inputs().len(), span);
+    let mut body =
+        new_body(MirSource::from_instance(instance), blocks, local_decls, sig.inputs().len(), span);
 
     if let Abi::RustCall = sig.abi {
         body.spread_arg = Some(Local::new(sig.inputs().len()));
@@ -897,18 +910,16 @@
         is_cleanup: false,
     };
 
-    let body =
-        new_body(IndexVec::from_elem_n(start_block, 1), local_decls, sig.inputs().len(), span);
-
-    crate::util::dump_mir(
-        tcx,
-        None,
-        "mir_map",
-        &0,
-        crate::transform::MirSource::item(ctor_id),
-        &body,
-        |_, _| Ok(()),
+    let source = MirSource::item(ctor_id);
+    let body = new_body(
+        source,
+        IndexVec::from_elem_n(start_block, 1),
+        local_decls,
+        sig.inputs().len(),
+        span,
     );
 
+    crate::util::dump_mir(tcx, None, "mir_map", &0, &body, |_, _| Ok(()));
+
     body
 }
diff --git a/compiler/rustc_mir/src/transform/add_call_guards.rs b/compiler/rustc_mir/src/transform/add_call_guards.rs
index 3385911..1dddaeb 100644
--- a/compiler/rustc_mir/src/transform/add_call_guards.rs
+++ b/compiler/rustc_mir/src/transform/add_call_guards.rs
@@ -1,4 +1,4 @@
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use rustc_index::vec::{Idx, IndexVec};
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
@@ -31,7 +31,7 @@
  */
 
 impl<'tcx> MirPass<'tcx> for AddCallGuards {
-    fn run_pass(&self, _tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         self.add_call_guards(body);
     }
 }
diff --git a/compiler/rustc_mir/src/transform/add_moves_for_packed_drops.rs b/compiler/rustc_mir/src/transform/add_moves_for_packed_drops.rs
index a02d0f6..417e0a5 100644
--- a/compiler/rustc_mir/src/transform/add_moves_for_packed_drops.rs
+++ b/compiler/rustc_mir/src/transform/add_moves_for_packed_drops.rs
@@ -1,8 +1,7 @@
-use rustc_hir::def_id::DefId;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
 
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use crate::util;
 use crate::util::patch::MirPatch;
 
@@ -40,22 +39,19 @@
 pub struct AddMovesForPackedDrops;
 
 impl<'tcx> MirPass<'tcx> for AddMovesForPackedDrops {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        debug!("add_moves_for_packed_drops({:?} @ {:?})", src, body.span);
-        add_moves_for_packed_drops(tcx, body, src.def_id());
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        debug!("add_moves_for_packed_drops({:?} @ {:?})", body.source, body.span);
+        add_moves_for_packed_drops(tcx, body);
     }
 }
 
-pub fn add_moves_for_packed_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, def_id: DefId) {
-    let patch = add_moves_for_packed_drops_patch(tcx, body, def_id);
+pub fn add_moves_for_packed_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+    let patch = add_moves_for_packed_drops_patch(tcx, body);
     patch.apply(body);
 }
 
-fn add_moves_for_packed_drops_patch<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    body: &Body<'tcx>,
-    def_id: DefId,
-) -> MirPatch<'tcx> {
+fn add_moves_for_packed_drops_patch<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> MirPatch<'tcx> {
+    let def_id = body.source.def_id();
     let mut patch = MirPatch::new(body);
     let param_env = tcx.param_env(def_id);
 
diff --git a/compiler/rustc_mir/src/transform/add_retag.rs b/compiler/rustc_mir/src/transform/add_retag.rs
index 0c596ba..6fe9f64 100644
--- a/compiler/rustc_mir/src/transform/add_retag.rs
+++ b/compiler/rustc_mir/src/transform/add_retag.rs
@@ -4,7 +4,7 @@
 //! of MIR building, and only after this pass we think of the program has having the
 //! normal MIR semantics.
 
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use rustc_middle::mir::*;
 use rustc_middle::ty::{self, Ty, TyCtxt};
 
@@ -58,13 +58,13 @@
 }
 
 impl<'tcx> MirPass<'tcx> for AddRetag {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         if !tcx.sess.opts.debugging_opts.mir_emit_retag {
             return;
         }
 
         // We need an `AllCallEdges` pass before we can do any work.
-        super::add_call_guards::AllCallEdges.run_pass(tcx, src, body);
+        super::add_call_guards::AllCallEdges.run_pass(tcx, body);
 
         let (span, arg_count) = (body.span, body.arg_count);
         let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut();
@@ -73,6 +73,19 @@
             // a temporary and retag on that.
             is_stable(place.as_ref()) && may_be_reference(place.ty(&*local_decls, tcx).ty)
         };
+        let place_base_raw = |place: &Place<'tcx>| {
+            // If this is a `Deref`, get the type of what we are deref'ing.
+            let deref_base =
+                place.projection.iter().rposition(|p| matches!(p, ProjectionElem::Deref));
+            if let Some(deref_base) = deref_base {
+                let base_proj = &place.projection[..deref_base];
+                let ty = Place::ty_from(place.local, base_proj, &*local_decls, tcx).ty;
+                ty.is_unsafe_ptr()
+            } else {
+                // Not a deref, and thus not raw.
+                false
+            }
+        };
 
         // PART 1
         // Retag arguments at the beginning of the start block.
@@ -136,13 +149,14 @@
             // iterate backwards using indices.
             for i in (0..block_data.statements.len()).rev() {
                 let (retag_kind, place) = match block_data.statements[i].kind {
-                    // Retag-as-raw after escaping to a raw pointer.
-                    StatementKind::Assign(box (place, Rvalue::AddressOf(..))) => {
-                        (RetagKind::Raw, place)
+                    // Retag-as-raw after escaping to a raw pointer, if the referent
+                    // is not already a raw pointer.
+                    StatementKind::Assign(box (lplace, Rvalue::AddressOf(_, ref rplace)))
+                        if !place_base_raw(rplace) =>
+                    {
+                        (RetagKind::Raw, lplace)
                     }
-                    // Assignments of reference or ptr type are the ones where we may have
-                    // to update tags.  This includes `x = &[mut] ...` and hence
-                    // we also retag after taking a reference!
+                    // Retag after assignments of reference type.
                     StatementKind::Assign(box (ref place, ref rvalue)) if needs_retag(place) => {
                         let kind = match rvalue {
                             Rvalue::Ref(_, borrow_kind, _)
diff --git a/compiler/rustc_mir/src/transform/check_const_item_mutation.rs b/compiler/rustc_mir/src/transform/check_const_item_mutation.rs
index b6d57b8..a845704 100644
--- a/compiler/rustc_mir/src/transform/check_const_item_mutation.rs
+++ b/compiler/rustc_mir/src/transform/check_const_item_mutation.rs
@@ -6,12 +6,12 @@
 use rustc_session::lint::builtin::CONST_ITEM_MUTATION;
 use rustc_span::def_id::DefId;
 
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 
 pub struct CheckConstItemMutation;
 
 impl<'tcx> MirPass<'tcx> for CheckConstItemMutation {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         let mut checker = ConstMutationChecker { body, tcx, target_local: None };
         checker.visit_body(&body);
     }
@@ -34,7 +34,6 @@
 
     fn is_const_item_without_destructor(&self, local: Local) -> Option<DefId> {
         let def_id = self.is_const_item(local)?;
-        let mut any_dtor = |_tcx, _def_id| Ok(());
 
         // We avoid linting mutation of a const item if the const's type has a
         // Drop impl. The Drop logic observes the mutation which was performed.
@@ -54,7 +53,7 @@
         //
         //     #[const_mutation_allowed]
         //     pub const LOG: Log = Log { msg: "" };
-        match self.tcx.calculate_dtor(def_id, &mut any_dtor) {
+        match self.tcx.calculate_dtor(def_id, |_, _| Ok(())) {
             Some(_) => None,
             None => Some(def_id),
         }
@@ -62,22 +61,35 @@
 
     fn lint_const_item_usage(
         &self,
+        place: &Place<'tcx>,
         const_item: DefId,
         location: Location,
         decorate: impl for<'b> FnOnce(LintDiagnosticBuilder<'b>) -> DiagnosticBuilder<'b>,
     ) {
-        let source_info = self.body.source_info(location);
-        let lint_root = self.body.source_scopes[source_info.scope]
-            .local_data
-            .as_ref()
-            .assert_crate_local()
-            .lint_root;
+        // Don't lint on borrowing/assigning to a dereference
+        // e.g:
+        //
+        // `unsafe { *FOO = 0; *BAR.field = 1; }`
+        // `unsafe { &mut *FOO }`
+        if !matches!(place.projection.last(), Some(PlaceElem::Deref)) {
+            let source_info = self.body.source_info(location);
+            let lint_root = self.body.source_scopes[source_info.scope]
+                .local_data
+                .as_ref()
+                .assert_crate_local()
+                .lint_root;
 
-        self.tcx.struct_span_lint_hir(CONST_ITEM_MUTATION, lint_root, source_info.span, |lint| {
-            decorate(lint)
-                .span_note(self.tcx.def_span(const_item), "`const` item defined here")
-                .emit()
-        });
+            self.tcx.struct_span_lint_hir(
+                CONST_ITEM_MUTATION,
+                lint_root,
+                source_info.span,
+                |lint| {
+                    decorate(lint)
+                        .span_note(self.tcx.def_span(const_item), "`const` item defined here")
+                        .emit()
+                },
+            );
+        }
     }
 }
 
@@ -89,15 +101,11 @@
             // so emitting a lint would be redundant.
             if !lhs.projection.is_empty() {
                 if let Some(def_id) = self.is_const_item_without_destructor(lhs.local) {
-                    // Don't lint on writes through a pointer
-                    // (e.g. `unsafe { *FOO = 0; *BAR.field = 1; }`)
-                    if !matches!(lhs.projection.last(), Some(PlaceElem::Deref)) {
-                        self.lint_const_item_usage(def_id, loc, |lint| {
-                            let mut lint = lint.build("attempting to modify a `const` item");
-                            lint.note("each usage of a `const` item creates a new temporary - the original `const` item will not be modified");
-                            lint
-                        })
-                    }
+                    self.lint_const_item_usage(&lhs, def_id, loc, |lint| {
+                        let mut lint = lint.build("attempting to modify a `const` item");
+                        lint.note("each usage of a `const` item creates a new temporary; the original `const` item will not be modified");
+                        lint
+                    })
                 }
             }
             // We are looking for MIR of the form:
@@ -128,7 +136,7 @@
                 });
                 let lint_loc =
                     if method_did.is_some() { self.body.terminator_loc(loc.block) } else { loc };
-                self.lint_const_item_usage(def_id, lint_loc, |lint| {
+                self.lint_const_item_usage(place, def_id, lint_loc, |lint| {
                     let mut lint = lint.build("taking a mutable reference to a `const` item");
                     lint
                         .note("each usage of a `const` item creates a new temporary")
diff --git a/compiler/rustc_mir/src/transform/check_consts/mod.rs b/compiler/rustc_mir/src/transform/check_consts/mod.rs
index 8df1348..ba7bea4 100644
--- a/compiler/rustc_mir/src/transform/check_consts/mod.rs
+++ b/compiler/rustc_mir/src/transform/check_consts/mod.rs
@@ -24,25 +24,28 @@
 pub struct ConstCx<'mir, 'tcx> {
     pub body: &'mir mir::Body<'tcx>,
     pub tcx: TyCtxt<'tcx>,
-    pub def_id: LocalDefId,
     pub param_env: ty::ParamEnv<'tcx>,
     pub const_kind: Option<hir::ConstContext>,
 }
 
 impl ConstCx<'mir, 'tcx> {
-    pub fn new(tcx: TyCtxt<'tcx>, def_id: LocalDefId, body: &'mir mir::Body<'tcx>) -> Self {
+    pub fn new(tcx: TyCtxt<'tcx>, body: &'mir mir::Body<'tcx>) -> Self {
+        let def_id = body.source.def_id().expect_local();
         let param_env = tcx.param_env(def_id);
-        Self::new_with_param_env(tcx, def_id, body, param_env)
+        Self::new_with_param_env(tcx, body, param_env)
     }
 
     pub fn new_with_param_env(
         tcx: TyCtxt<'tcx>,
-        def_id: LocalDefId,
         body: &'mir mir::Body<'tcx>,
         param_env: ty::ParamEnv<'tcx>,
     ) -> Self {
-        let const_kind = tcx.hir().body_const_context(def_id);
-        ConstCx { body, tcx, def_id: def_id, param_env, const_kind }
+        let const_kind = tcx.hir().body_const_context(body.source.def_id().expect_local());
+        ConstCx { body, tcx, param_env, const_kind }
+    }
+
+    pub fn def_id(&self) -> LocalDefId {
+        self.body.source.def_id().expect_local()
     }
 
     /// Returns the kind of const context this `Item` represents (`const`, `static`, etc.).
@@ -55,7 +58,7 @@
     pub fn is_const_stable_const_fn(&self) -> bool {
         self.const_kind == Some(hir::ConstContext::ConstFn)
             && self.tcx.features().staged_api
-            && is_const_stable_const_fn(self.tcx, self.def_id.to_def_id())
+            && is_const_stable_const_fn(self.tcx, self.def_id().to_def_id())
     }
 
     /// Returns the function signature of the item being const-checked if it is a `fn` or `const fn`.
@@ -64,19 +67,25 @@
         //
         // FIXME: Is this still an issue?
         let hir_map = self.tcx.hir();
-        let hir_id = hir_map.local_def_id_to_hir_id(self.def_id);
+        let hir_id = hir_map.local_def_id_to_hir_id(self.def_id());
         hir_map.fn_sig_by_hir_id(hir_id)
     }
 }
 
 /// Returns `true` if this `DefId` points to one of the official `panic` lang items.
 pub fn is_lang_panic_fn(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool {
-    Some(def_id) == tcx.lang_items().panic_fn() || Some(def_id) == tcx.lang_items().begin_panic_fn()
+    Some(def_id) == tcx.lang_items().panic_fn()
+        || Some(def_id) == tcx.lang_items().panic_str()
+        || Some(def_id) == tcx.lang_items().begin_panic_fn()
 }
 
-pub fn allow_internal_unstable(tcx: TyCtxt<'tcx>, def_id: DefId, feature_gate: Symbol) -> bool {
+pub fn rustc_allow_const_fn_unstable(
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    feature_gate: Symbol,
+) -> bool {
     let attrs = tcx.get_attrs(def_id);
-    attr::allow_internal_unstable(&tcx.sess, attrs)
+    attr::rustc_allow_const_fn_unstable(&tcx.sess, attrs)
         .map_or(false, |mut features| features.any(|name| name == feature_gate))
 }
 
diff --git a/compiler/rustc_mir/src/transform/check_consts/ops.rs b/compiler/rustc_mir/src/transform/check_consts/ops.rs
index 32e233e..bd51136 100644
--- a/compiler/rustc_mir/src/transform/check_consts/ops.rs
+++ b/compiler/rustc_mir/src/transform/check_consts/ops.rs
@@ -42,18 +42,6 @@
 }
 
 #[derive(Debug)]
-pub struct Abort;
-impl NonConstOp for Abort {
-    fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
-        mcf_status_in_item(ccx)
-    }
-
-    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
-        mcf_build_error(ccx, span, "abort is not stable in const fn")
-    }
-}
-
-#[derive(Debug)]
 pub struct FloatingPointOp;
 impl NonConstOp for FloatingPointOp {
     fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
@@ -236,7 +224,8 @@
 }
 
 #[derive(Debug)]
-pub struct MutBorrow;
+pub struct MutBorrow(pub hir::BorrowKind);
+
 impl NonConstOp for MutBorrow {
     fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
         // Forbid everywhere except in const fn with a feature gate
@@ -248,22 +237,28 @@
     }
 
     fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        let raw = match self.0 {
+            hir::BorrowKind::Raw => "raw ",
+            hir::BorrowKind::Ref => "",
+        };
+
         let mut err = if ccx.const_kind() == hir::ConstContext::ConstFn {
             feature_err(
                 &ccx.tcx.sess.parse_sess,
                 sym::const_mut_refs,
                 span,
-                &format!("mutable references are not allowed in {}s", ccx.const_kind()),
+                &format!("{}mutable references are not allowed in {}s", raw, ccx.const_kind()),
             )
         } else {
             let mut err = struct_span_err!(
                 ccx.tcx.sess,
                 span,
                 E0764,
-                "mutable references are not allowed in {}s",
+                "{}mutable references are not allowed in {}s",
+                raw,
                 ccx.const_kind(),
             );
-            err.span_label(span, format!("`&mut` is only allowed in `const fn`"));
+            err.span_label(span, format!("`&{}mut` is only allowed in `const fn`", raw));
             err
         };
         if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
@@ -282,29 +277,6 @@
     }
 }
 
-// FIXME(ecstaticmorse): Unify this with `MutBorrow`. It has basically the same issues.
-#[derive(Debug)]
-pub struct MutAddressOf;
-impl NonConstOp for MutAddressOf {
-    fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
-        // Forbid everywhere except in const fn with a feature gate
-        if ccx.const_kind() == hir::ConstContext::ConstFn {
-            Status::Unstable(sym::const_mut_refs)
-        } else {
-            Status::Forbidden
-        }
-    }
-
-    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
-        feature_err(
-            &ccx.tcx.sess.parse_sess,
-            sym::const_mut_refs,
-            span,
-            &format!("`&raw mut` is not allowed in {}s", ccx.const_kind()),
-        )
-    }
-}
-
 #[derive(Debug)]
 pub struct MutDeref;
 impl NonConstOp for MutDeref {
@@ -570,12 +542,17 @@
     #[derive(Debug)]
     pub struct ImplTrait;
     impl NonConstOp for ImplTrait {
-        fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
-            mcf_status_in_item(ccx)
+        fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+            Status::Unstable(sym::const_impl_trait)
         }
 
         fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
-            mcf_build_error(ccx, span, "`impl Trait` in const fn is unstable")
+            feature_err(
+                &ccx.tcx.sess.parse_sess,
+                sym::const_impl_trait,
+                span,
+                &format!("`impl Trait` is not allowed in {}s", ccx.const_kind()),
+            )
         }
     }
 
diff --git a/compiler/rustc_mir/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_mir/src/transform/check_consts/post_drop_elaboration.rs
index 9b2568d..1a2d932 100644
--- a/compiler/rustc_mir/src/transform/check_consts/post_drop_elaboration.rs
+++ b/compiler/rustc_mir/src/transform/check_consts/post_drop_elaboration.rs
@@ -1,4 +1,3 @@
-use rustc_hir::def_id::LocalDefId;
 use rustc_middle::mir::visit::Visitor;
 use rustc_middle::mir::{self, BasicBlock, Location};
 use rustc_middle::ty::TyCtxt;
@@ -24,13 +23,14 @@
 ///
 /// This is separate from the rest of the const checking logic because it must run after drop
 /// elaboration.
-pub fn check_live_drops(tcx: TyCtxt<'tcx>, def_id: LocalDefId, body: &mir::Body<'tcx>) {
+pub fn check_live_drops(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>) {
+    let def_id = body.source.def_id().expect_local();
     let const_kind = tcx.hir().body_const_context(def_id);
     if const_kind.is_none() {
         return;
     }
 
-    let ccx = ConstCx { body, tcx, def_id, const_kind, param_env: tcx.param_env(def_id) };
+    let ccx = ConstCx { body, tcx, const_kind, param_env: tcx.param_env(def_id) };
     if !checking_enabled(&ccx) {
         return;
     }
diff --git a/compiler/rustc_mir/src/transform/check_consts/qualifs.rs b/compiler/rustc_mir/src/transform/check_consts/qualifs.rs
index 3f4b3ca..b3d9beb 100644
--- a/compiler/rustc_mir/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_mir/src/transform/check_consts/qualifs.rs
@@ -126,7 +126,7 @@
         // because that component may be part of an enum variant (e.g.,
         // `Option::<NonStructuralMatchTy>::Some`), in which case some values of this type may be
         // structural-match (`Option::None`).
-        let id = cx.tcx.hir().local_def_id_to_hir_id(cx.def_id);
+        let id = cx.tcx.hir().local_def_id_to_hir_id(cx.def_id());
         traits::search_for_structural_match_violation(id, cx.body.span, cx.tcx, ty).is_some()
     }
 
diff --git a/compiler/rustc_mir/src/transform/check_consts/validation.rs b/compiler/rustc_mir/src/transform/check_consts/validation.rs
index 4e714bf..4139b54 100644
--- a/compiler/rustc_mir/src/transform/check_consts/validation.rs
+++ b/compiler/rustc_mir/src/transform/check_consts/validation.rs
@@ -50,7 +50,7 @@
         location: Location,
     ) -> bool {
         let indirectly_mutable = self.indirectly_mutable.get_or_insert_with(|| {
-            let ConstCx { tcx, body, def_id, param_env, .. } = *ccx;
+            let ConstCx { tcx, body, param_env, .. } = *ccx;
 
             // We can use `unsound_ignore_borrow_on_drop` here because custom drop impls are not
             // allowed in a const.
@@ -59,7 +59,7 @@
             // without breaking stable code?
             MaybeMutBorrowedLocals::mut_borrows_only(tcx, &body, param_env)
                 .unsound_ignore_borrow_on_drop()
-                .into_engine(tcx, &body, def_id.to_def_id())
+                .into_engine(tcx, &body)
                 .pass_name("const_qualification")
                 .iterate_to_fixpoint()
                 .into_results_cursor(&body)
@@ -84,10 +84,10 @@
         }
 
         let needs_drop = self.needs_drop.get_or_insert_with(|| {
-            let ConstCx { tcx, body, def_id, .. } = *ccx;
+            let ConstCx { tcx, body, .. } = *ccx;
 
             FlowSensitiveAnalysis::new(NeedsDrop, ccx)
-                .into_engine(tcx, &body, def_id.to_def_id())
+                .into_engine(tcx, &body)
                 .iterate_to_fixpoint()
                 .into_results_cursor(&body)
         });
@@ -111,10 +111,10 @@
         }
 
         let has_mut_interior = self.has_mut_interior.get_or_insert_with(|| {
-            let ConstCx { tcx, body, def_id, .. } = *ccx;
+            let ConstCx { tcx, body, .. } = *ccx;
 
             FlowSensitiveAnalysis::new(HasMutInterior, ccx)
-                .into_engine(tcx, &body, def_id.to_def_id())
+                .into_engine(tcx, &body)
                 .iterate_to_fixpoint()
                 .into_results_cursor(&body)
         });
@@ -157,7 +157,7 @@
 
             hir::ConstContext::Const | hir::ConstContext::Static(_) => {
                 let mut cursor = FlowSensitiveAnalysis::new(CustomEq, ccx)
-                    .into_engine(ccx.tcx, &ccx.body, ccx.def_id.to_def_id())
+                    .into_engine(ccx.tcx, &ccx.body)
                     .iterate_to_fixpoint()
                     .into_results_cursor(&ccx.body);
 
@@ -205,7 +205,8 @@
     }
 
     pub fn check_body(&mut self) {
-        let ConstCx { tcx, body, def_id, .. } = *self.ccx;
+        let ConstCx { tcx, body, .. } = *self.ccx;
+        let def_id = self.ccx.def_id();
 
         // `async` functions cannot be `const fn`. This is checked during AST lowering, so there's
         // no need to emit duplicate errors here.
@@ -219,7 +220,7 @@
             // Prevent const trait methods from being annotated as `stable`.
             // FIXME: Do this as part of stability checking.
             if self.is_const_stable_const_fn() {
-                let hir_id = tcx.hir().local_def_id_to_hir_id(self.def_id);
+                let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
                 if crate::const_eval::is_parent_const_impl_raw(tcx, hir_id) {
                     struct_span_err!(
                         self.ccx.tcx.sess,
@@ -291,7 +292,11 @@
 
             Status::Unstable(gate) if self.tcx.features().enabled(gate) => {
                 let unstable_in_stable = self.ccx.is_const_stable_const_fn()
-                    && !super::allow_internal_unstable(self.tcx, self.def_id.to_def_id(), gate);
+                    && !super::rustc_allow_const_fn_unstable(
+                        self.tcx,
+                        self.def_id().to_def_id(),
+                        gate,
+                    );
                 if unstable_in_stable {
                     emit_unstable_in_stable_error(self.ccx, span, gate);
                 }
@@ -367,9 +372,9 @@
     }
 
     fn check_item_predicates(&mut self) {
-        let ConstCx { tcx, def_id, .. } = *self.ccx;
+        let ConstCx { tcx, .. } = *self.ccx;
 
-        let mut current = def_id.to_def_id();
+        let mut current = self.def_id().to_def_id();
         loop {
             let predicates = tcx.predicates_of(current);
             for (predicate, _) in predicates.predicates {
@@ -434,11 +439,13 @@
     fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &BasicBlockData<'tcx>) {
         trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
 
-        // Just as the old checker did, we skip const-checking basic blocks on the unwind path.
-        // These blocks often drop locals that would otherwise be returned from the function.
+        // We don't const-check basic blocks on the cleanup path since we never unwind during
+        // const-eval: a panic causes an immediate compile error. In other words, cleanup blocks
+        // are unreachable during const-eval.
         //
-        // FIXME: This shouldn't be unsound since a panic at compile time will cause a compiler
-        // error anyway, but maybe we should do more here?
+        // We can't be more conservative (e.g., by const-checking cleanup blocks anyways) because
+        // locals that would never be dropped during normal execution are sometimes dropped during
+        // unwinding, which means backwards-incompatible live-drop errors.
         if block.is_cleanup {
             return;
         }
@@ -522,14 +529,16 @@
 
                 if !is_allowed {
                     if let BorrowKind::Mut { .. } = kind {
-                        self.check_op(ops::MutBorrow);
+                        self.check_op(ops::MutBorrow(hir::BorrowKind::Ref));
                     } else {
                         self.check_op(ops::CellBorrow);
                     }
                 }
             }
 
-            Rvalue::AddressOf(Mutability::Mut, _) => self.check_op(ops::MutAddressOf),
+            Rvalue::AddressOf(Mutability::Mut, _) => {
+                self.check_op(ops::MutBorrow(hir::BorrowKind::Raw))
+            }
 
             Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Shallow, ref place)
             | Rvalue::AddressOf(Mutability::Not, ref place) => {
@@ -734,8 +743,8 @@
 
         match &terminator.kind {
             TerminatorKind::Call { func, .. } => {
-                let ConstCx { tcx, body, def_id: caller, param_env, .. } = *self.ccx;
-                let caller = caller.to_def_id();
+                let ConstCx { tcx, body, param_env, .. } = *self.ccx;
+                let caller = self.def_id().to_def_id();
 
                 let fn_ty = func.ty(body, tcx);
 
@@ -802,7 +811,7 @@
                     }
 
                     // Calling an unstable function *always* requires that the corresponding gate
-                    // be enabled, even if the function has `#[allow_internal_unstable(the_gate)]`.
+                    // be enabled, even if the function has `#[rustc_allow_const_fn_unstable(the_gate)]`.
                     if !tcx.features().declared_lib_features.iter().any(|&(sym, _)| sym == gate) {
                         self.check_op(ops::FnCallUnstable(callee, Some(gate)));
                         return;
@@ -816,7 +825,7 @@
 
                     // Otherwise, we are something const-stable calling a const-unstable fn.
 
-                    if super::allow_internal_unstable(tcx, caller, gate) {
+                    if super::rustc_allow_const_fn_unstable(tcx, caller, gate) {
                         return;
                     }
 
@@ -874,12 +883,16 @@
             }
 
             TerminatorKind::InlineAsm { .. } => self.check_op(ops::InlineAsm),
-            TerminatorKind::Abort => self.check_op(ops::Abort),
 
             TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => {
                 self.check_op(ops::Generator(hir::GeneratorKind::Gen))
             }
 
+            TerminatorKind::Abort => {
+                // Cleanup blocks are skipped for const checking (see `visit_basic_block_data`).
+                span_bug!(self.span, "`Abort` terminator outside of cleanup block")
+            }
+
             TerminatorKind::Assert { .. }
             | TerminatorKind::FalseEdge { .. }
             | TerminatorKind::FalseUnwind { .. }
@@ -958,8 +971,8 @@
         )
         .span_suggestion(
             attr_span,
-            "otherwise `#[allow_internal_unstable]` can be used to bypass stability checks",
-            format!("#[allow_internal_unstable({})]\n", gate),
+            "otherwise `#[rustc_allow_const_fn_unstable]` can be used to bypass stability checks",
+            format!("#[rustc_allow_const_fn_unstable({})]\n", gate),
             Applicability::MaybeIncorrect,
         )
         .emit();
diff --git a/compiler/rustc_mir/src/transform/check_packed_ref.rs b/compiler/rustc_mir/src/transform/check_packed_ref.rs
index 043b2d0..ee88daa 100644
--- a/compiler/rustc_mir/src/transform/check_packed_ref.rs
+++ b/compiler/rustc_mir/src/transform/check_packed_ref.rs
@@ -3,14 +3,14 @@
 use rustc_middle::ty::{self, TyCtxt};
 use rustc_session::lint::builtin::UNALIGNED_REFERENCES;
 
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use crate::util;
 
 pub struct CheckPackedRef;
 
 impl<'tcx> MirPass<'tcx> for CheckPackedRef {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        let param_env = tcx.param_env(src.instance.def_id());
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        let param_env = tcx.param_env(body.source.def_id());
         let source_info = SourceInfo::outermost(body.span);
         let mut checker = PackedRefChecker { body, tcx, param_env, source_info };
         checker.visit_body(&body);
diff --git a/compiler/rustc_mir/src/transform/check_unsafety.rs b/compiler/rustc_mir/src/transform/check_unsafety.rs
index 7309a41..acec3e8 100644
--- a/compiler/rustc_mir/src/transform/check_unsafety.rs
+++ b/compiler/rustc_mir/src/transform/check_unsafety.rs
@@ -204,6 +204,9 @@
             if let [] = proj_base {
                 let decl = &self.body.local_decls[place.local];
                 if decl.internal {
+                    // If the projection root is an artifical local that we introduced when
+                    // desugaring `static`, give a more specific error message
+                    // (avoid the general "raw pointer" clause below, that would only be confusing).
                     if let Some(box LocalInfo::StaticRef { def_id, .. }) = decl.local_info {
                         if self.tcx.is_mutable_static(def_id) {
                             self.require_unsafe(
@@ -690,7 +693,7 @@
                 // should only issue a warning for the sake of backwards compatibility.
                 //
                 // The solution those 2 expectations is to always take the minimum of both lints.
-                // This prevent any new errors (unless both lints are explicitely set to `deny`).
+                // This prevent any new errors (unless both lints are explicitly set to `deny`).
                 let lint = if tcx.lint_level_at_node(SAFE_PACKED_BORROWS, lint_root).0
                     <= tcx.lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, lint_root).0
                 {
diff --git a/compiler/rustc_mir/src/transform/cleanup_post_borrowck.rs b/compiler/rustc_mir/src/transform/cleanup_post_borrowck.rs
index 3f3d247..8ff0fae 100644
--- a/compiler/rustc_mir/src/transform/cleanup_post_borrowck.rs
+++ b/compiler/rustc_mir/src/transform/cleanup_post_borrowck.rs
@@ -18,7 +18,7 @@
 //! [`ForMatchGuard`]: rustc_middle::mir::FakeReadCause::ForMatchGuard
 //! [`Nop`]: rustc_middle::mir::StatementKind::Nop
 
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use rustc_middle::mir::visit::MutVisitor;
 use rustc_middle::mir::{Body, BorrowKind, Location, Rvalue};
 use rustc_middle::mir::{Statement, StatementKind};
@@ -31,7 +31,7 @@
 }
 
 impl<'tcx> MirPass<'tcx> for CleanupNonCodegenStatements {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         let mut delete = DeleteNonCodegenStatements { tcx };
         delete.visit_body(body);
         body.user_type_annotations.raw.clear();
diff --git a/compiler/rustc_mir/src/transform/const_prop.rs b/compiler/rustc_mir/src/transform/const_prop.rs
index 0f04ead..aeb9920 100644
--- a/compiler/rustc_mir/src/transform/const_prop.rs
+++ b/compiler/rustc_mir/src/transform/const_prop.rs
@@ -9,7 +9,6 @@
 use rustc_hir::HirId;
 use rustc_index::bit_set::BitSet;
 use rustc_index::vec::IndexVec;
-use rustc_middle::mir::interpret::{InterpResult, Scalar};
 use rustc_middle::mir::visit::{
     MutVisitor, MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor,
 };
@@ -20,7 +19,9 @@
 };
 use rustc_middle::ty::layout::{HasTyCtxt, LayoutError, TyAndLayout};
 use rustc_middle::ty::subst::{InternalSubsts, Subst};
-use rustc_middle::ty::{self, ConstInt, ConstKind, Instance, ParamEnv, Ty, TyCtxt, TypeFoldable};
+use rustc_middle::ty::{
+    self, ConstInt, ConstKind, Instance, ParamEnv, ScalarInt, Ty, TyCtxt, TypeFoldable,
+};
 use rustc_session::lint;
 use rustc_span::{def_id::DefId, Span};
 use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TargetDataLayout};
@@ -28,11 +29,11 @@
 
 use crate::const_eval::ConstEvalErr;
 use crate::interpret::{
-    self, compile_time_machine, truncate, AllocId, Allocation, ConstValue, Frame, ImmTy, Immediate,
-    InterpCx, LocalState, LocalValue, MemPlace, Memory, MemoryKind, OpTy, Operand as InterpOperand,
-    PlaceTy, Pointer, ScalarMaybeUninit, StackPopCleanup,
+    self, compile_time_machine, AllocId, Allocation, ConstValue, CtfeValidationMode, Frame, ImmTy,
+    Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemPlace, Memory, MemoryKind, OpTy,
+    Operand as InterpOperand, PlaceTy, Pointer, Scalar, ScalarMaybeUninit, StackPopCleanup,
 };
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 
 /// The maximum number of bytes that we'll allocate space for a local or the return value.
 /// Needed for #66397, because otherwise we eval into large places and that can cause OOM or just
@@ -60,30 +61,31 @@
 pub struct ConstProp;
 
 impl<'tcx> MirPass<'tcx> for ConstProp {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         // will be evaluated by miri and produce its errors there
-        if source.promoted.is_some() {
+        if body.source.promoted.is_some() {
             return;
         }
 
         use rustc_middle::hir::map::blocks::FnLikeNode;
-        let hir_id = tcx.hir().local_def_id_to_hir_id(source.def_id().expect_local());
+        let def_id = body.source.def_id().expect_local();
+        let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
 
         let is_fn_like = FnLikeNode::from_node(tcx.hir().get(hir_id)).is_some();
-        let is_assoc_const = tcx.def_kind(source.def_id()) == DefKind::AssocConst;
+        let is_assoc_const = tcx.def_kind(def_id.to_def_id()) == DefKind::AssocConst;
 
         // Only run const prop on functions, methods, closures and associated constants
         if !is_fn_like && !is_assoc_const {
             // skip anon_const/statics/consts because they'll be evaluated by miri anyway
-            trace!("ConstProp skipped for {:?}", source.def_id());
+            trace!("ConstProp skipped for {:?}", def_id);
             return;
         }
 
-        let is_generator = tcx.type_of(source.def_id()).is_generator();
+        let is_generator = tcx.type_of(def_id.to_def_id()).is_generator();
         // FIXME(welseywiser) const prop doesn't work on generators because of query cycles
         // computing their layout.
         if is_generator {
-            trace!("ConstProp skipped for generator {:?}", source.def_id());
+            trace!("ConstProp skipped for generator {:?}", def_id);
             return;
         }
 
@@ -114,7 +116,7 @@
         // the normalization code (leading to cycle errors), since
         // it's usually never invoked in this way.
         let predicates = tcx
-            .predicates_of(source.def_id())
+            .predicates_of(def_id.to_def_id())
             .predicates
             .iter()
             .filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None });
@@ -122,20 +124,21 @@
             tcx,
             traits::elaborate_predicates(tcx, predicates).map(|o| o.predicate).collect(),
         ) {
-            trace!("ConstProp skipped for {:?}: found unsatisfiable predicates", source.def_id());
+            trace!("ConstProp skipped for {:?}: found unsatisfiable predicates", def_id);
             return;
         }
 
-        trace!("ConstProp starting for {:?}", source.def_id());
+        trace!("ConstProp starting for {:?}", def_id);
 
         let dummy_body = &Body::new(
+            body.source,
             body.basic_blocks().clone(),
             body.source_scopes.clone(),
             body.local_decls.clone(),
             Default::default(),
             body.arg_count,
             Default::default(),
-            tcx.def_span(source.def_id()),
+            tcx.def_span(def_id),
             body.generator_kind,
         );
 
@@ -143,10 +146,10 @@
         // constants, instead of just checking for const-folding succeeding.
         // That would require an uniform one-def no-mutation analysis
         // and RPO (or recursing when needing the value of a local).
-        let mut optimization_finder = ConstPropagator::new(body, dummy_body, tcx, source);
+        let mut optimization_finder = ConstPropagator::new(body, dummy_body, tcx);
         optimization_finder.visit_body(body);
 
-        trace!("ConstProp done for {:?}", source.def_id());
+        trace!("ConstProp done for {:?}", def_id);
     }
 }
 
@@ -311,7 +314,7 @@
     param_env: ParamEnv<'tcx>,
     // FIXME(eddyb) avoid cloning these two fields more than once,
     // by accessing them through `ecx` instead.
-    source_scopes: IndexVec<SourceScope, SourceScopeData>,
+    source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
     local_decls: IndexVec<Local, LocalDecl<'tcx>>,
     // Because we have `MutVisitor` we can't obtain the `SourceInfo` from a `Location`. So we store
     // the last known `SourceInfo` here and just keep revisiting it.
@@ -346,9 +349,8 @@
         body: &Body<'tcx>,
         dummy_body: &'mir Body<'tcx>,
         tcx: TyCtxt<'tcx>,
-        source: MirSource<'tcx>,
     ) -> ConstPropagator<'mir, 'tcx> {
-        let def_id = source.def_id();
+        let def_id = body.source.def_id();
         let substs = &InternalSubsts::identity_for_item(tcx, def_id);
         let param_env = tcx.param_env_reveal_all_normalized(def_id);
 
@@ -577,8 +579,7 @@
                             Some(l) => l.to_const_int(),
                             // Invent a dummy value, the diagnostic ignores it anyway
                             None => ConstInt::new(
-                                1,
-                                left_size,
+                                ScalarInt::try_from_uint(1_u8, left_size).unwrap(),
                                 left_ty.is_signed(),
                                 left_ty.is_ptr_sized_integral(),
                             ),
@@ -744,7 +745,7 @@
                             }
                         }
                         BinOp::BitOr => {
-                            if arg_value == truncate(u128::MAX, const_arg.layout.size)
+                            if arg_value == const_arg.layout.size.truncate(u128::MAX)
                                 || (const_arg.layout.ty.is_bool() && arg_value == 1)
                             {
                                 this.ecx.write_immediate(*const_arg, dest)?;
@@ -804,8 +805,9 @@
             value,
             vec![],
             // FIXME: is ref tracking too expensive?
+            // FIXME: what is the point of ref tracking if we do not even check the tracked refs?
             &mut interpret::RefTracking::empty(),
-            /*may_ref_to_static*/ true,
+            CtfeValidationMode::Regular,
         ) {
             trace!("validation error, attempt failed: {:?}", e);
             return;
diff --git a/compiler/rustc_mir/src/transform/copy_prop.rs b/compiler/rustc_mir/src/transform/copy_prop.rs
deleted file mode 100644
index 7419446..0000000
--- a/compiler/rustc_mir/src/transform/copy_prop.rs
+++ /dev/null
@@ -1,384 +0,0 @@
-//! Trivial copy propagation pass.
-//!
-//! This uses def-use analysis to remove values that have exactly one def and one use, which must
-//! be an assignment.
-//!
-//! To give an example, we look for patterns that look like:
-//!
-//!     DEST = SRC
-//!     ...
-//!     USE(DEST)
-//!
-//! where `DEST` and `SRC` are both locals of some form. We replace that with:
-//!
-//!     NOP
-//!     ...
-//!     USE(SRC)
-//!
-//! The assignment `DEST = SRC` must be (a) the only mutation of `DEST` and (b) the only
-//! (non-mutating) use of `SRC`. These restrictions are conservative and may be relaxed in the
-//! future.
-
-use crate::transform::{MirPass, MirSource};
-use crate::util::def_use::DefUseAnalysis;
-use rustc_middle::mir::visit::MutVisitor;
-use rustc_middle::mir::{
-    Body, Constant, Local, LocalKind, Location, Operand, Place, Rvalue, StatementKind,
-};
-use rustc_middle::ty::TyCtxt;
-
-pub struct CopyPropagation;
-
-impl<'tcx> MirPass<'tcx> for CopyPropagation {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        let opts = &tcx.sess.opts.debugging_opts;
-        // We only run when the MIR optimization level is > 1.
-        // This avoids a slow pass, and messing up debug info.
-        // FIXME(76740): This optimization is buggy and can cause unsoundness.
-        if opts.mir_opt_level <= 1 || !opts.unsound_mir_opts {
-            return;
-        }
-
-        let mut def_use_analysis = DefUseAnalysis::new(body);
-        loop {
-            def_use_analysis.analyze(body);
-
-            if eliminate_self_assignments(body, &def_use_analysis) {
-                def_use_analysis.analyze(body);
-            }
-
-            let mut changed = false;
-            for dest_local in body.local_decls.indices() {
-                debug!("considering destination local: {:?}", dest_local);
-
-                let action;
-                let location;
-                {
-                    // The destination must have exactly one def.
-                    let dest_use_info = def_use_analysis.local_info(dest_local);
-                    let dest_def_count = dest_use_info.def_count_not_including_drop();
-                    if dest_def_count == 0 {
-                        debug!("  Can't copy-propagate local: dest {:?} undefined", dest_local);
-                        continue;
-                    }
-                    if dest_def_count > 1 {
-                        debug!(
-                            "  Can't copy-propagate local: dest {:?} defined {} times",
-                            dest_local,
-                            dest_use_info.def_count()
-                        );
-                        continue;
-                    }
-                    if dest_use_info.use_count() == 0 {
-                        debug!("  Can't copy-propagate local: dest {:?} unused", dest_local);
-                        continue;
-                    }
-                    // Conservatively gives up if the dest is an argument,
-                    // because there may be uses of the original argument value.
-                    // Also gives up on the return place, as we cannot propagate into its implicit
-                    // use by `return`.
-                    if matches!(
-                        body.local_kind(dest_local),
-                        LocalKind::Arg | LocalKind::ReturnPointer
-                    ) {
-                        debug!("  Can't copy-propagate local: dest {:?} (argument)", dest_local);
-                        continue;
-                    }
-                    let dest_place_def = dest_use_info.defs_not_including_drop().next().unwrap();
-                    location = dest_place_def.location;
-
-                    let basic_block = &body[location.block];
-                    let statement_index = location.statement_index;
-                    let statement = match basic_block.statements.get(statement_index) {
-                        Some(statement) => statement,
-                        None => {
-                            debug!("  Can't copy-propagate local: used in terminator");
-                            continue;
-                        }
-                    };
-
-                    // That use of the source must be an assignment.
-                    match &statement.kind {
-                        StatementKind::Assign(box (place, Rvalue::Use(operand))) => {
-                            if let Some(local) = place.as_local() {
-                                if local == dest_local {
-                                    let maybe_action = match operand {
-                                        Operand::Copy(src_place) | Operand::Move(src_place) => {
-                                            Action::local_copy(&body, &def_use_analysis, *src_place)
-                                        }
-                                        Operand::Constant(ref src_constant) => {
-                                            Action::constant(src_constant)
-                                        }
-                                    };
-                                    match maybe_action {
-                                        Some(this_action) => action = this_action,
-                                        None => continue,
-                                    }
-                                } else {
-                                    debug!(
-                                        "  Can't copy-propagate local: source use is not an \
-                                    assignment"
-                                    );
-                                    continue;
-                                }
-                            } else {
-                                debug!(
-                                    "  Can't copy-propagate local: source use is not an \
-                                    assignment"
-                                );
-                                continue;
-                            }
-                        }
-                        _ => {
-                            debug!(
-                                "  Can't copy-propagate local: source use is not an \
-                                    assignment"
-                            );
-                            continue;
-                        }
-                    }
-                }
-
-                changed =
-                    action.perform(body, &def_use_analysis, dest_local, location, tcx) || changed;
-                // FIXME(pcwalton): Update the use-def chains to delete the instructions instead of
-                // regenerating the chains.
-                break;
-            }
-            if !changed {
-                break;
-            }
-        }
-    }
-}
-
-fn eliminate_self_assignments(body: &mut Body<'_>, def_use_analysis: &DefUseAnalysis) -> bool {
-    let mut changed = false;
-
-    for dest_local in body.local_decls.indices() {
-        let dest_use_info = def_use_analysis.local_info(dest_local);
-
-        for def in dest_use_info.defs_not_including_drop() {
-            let location = def.location;
-            if let Some(stmt) = body[location.block].statements.get(location.statement_index) {
-                match &stmt.kind {
-                    StatementKind::Assign(box (
-                        place,
-                        Rvalue::Use(Operand::Copy(src_place) | Operand::Move(src_place)),
-                    )) => {
-                        if let (Some(local), Some(src_local)) =
-                            (place.as_local(), src_place.as_local())
-                        {
-                            if local == dest_local && dest_local == src_local {
-                            } else {
-                                continue;
-                            }
-                        } else {
-                            continue;
-                        }
-                    }
-                    _ => {
-                        continue;
-                    }
-                }
-            } else {
-                continue;
-            }
-            debug!("deleting a self-assignment for {:?}", dest_local);
-            body.make_statement_nop(location);
-            changed = true;
-        }
-    }
-
-    changed
-}
-
-enum Action<'tcx> {
-    PropagateLocalCopy(Local),
-    PropagateConstant(Constant<'tcx>),
-}
-
-impl<'tcx> Action<'tcx> {
-    fn local_copy(
-        body: &Body<'tcx>,
-        def_use_analysis: &DefUseAnalysis,
-        src_place: Place<'tcx>,
-    ) -> Option<Action<'tcx>> {
-        // The source must be a local.
-        let src_local = if let Some(local) = src_place.as_local() {
-            local
-        } else {
-            debug!("  Can't copy-propagate local: source is not a local");
-            return None;
-        };
-
-        // We're trying to copy propagate a local.
-        // There must be exactly one use of the source used in a statement (not in a terminator).
-        let src_use_info = def_use_analysis.local_info(src_local);
-        let src_use_count = src_use_info.use_count();
-        if src_use_count == 0 {
-            debug!("  Can't copy-propagate local: no uses");
-            return None;
-        }
-        if src_use_count != 1 {
-            debug!("  Can't copy-propagate local: {} uses", src_use_info.use_count());
-            return None;
-        }
-
-        // Verify that the source doesn't change in between. This is done conservatively for now,
-        // by ensuring that the source has exactly one mutation. The goal is to prevent things
-        // like:
-        //
-        //     DEST = SRC;
-        //     SRC = X;
-        //     USE(DEST);
-        //
-        // From being misoptimized into:
-        //
-        //     SRC = X;
-        //     USE(SRC);
-        let src_def_count = src_use_info.def_count_not_including_drop();
-        // allow function arguments to be propagated
-        let is_arg = body.local_kind(src_local) == LocalKind::Arg;
-        if (is_arg && src_def_count != 0) || (!is_arg && src_def_count != 1) {
-            debug!(
-                "  Can't copy-propagate local: {} defs of src{}",
-                src_def_count,
-                if is_arg { " (argument)" } else { "" },
-            );
-            return None;
-        }
-
-        Some(Action::PropagateLocalCopy(src_local))
-    }
-
-    fn constant(src_constant: &Constant<'tcx>) -> Option<Action<'tcx>> {
-        Some(Action::PropagateConstant(*src_constant))
-    }
-
-    fn perform(
-        self,
-        body: &mut Body<'tcx>,
-        def_use_analysis: &DefUseAnalysis,
-        dest_local: Local,
-        location: Location,
-        tcx: TyCtxt<'tcx>,
-    ) -> bool {
-        match self {
-            Action::PropagateLocalCopy(src_local) => {
-                // Eliminate the destination and the assignment.
-                //
-                // First, remove all markers.
-                //
-                // FIXME(pcwalton): Don't do this. Merge live ranges instead.
-                debug!("  Replacing all uses of {:?} with {:?} (local)", dest_local, src_local);
-                for place_use in &def_use_analysis.local_info(dest_local).defs_and_uses {
-                    if place_use.context.is_storage_marker() {
-                        body.make_statement_nop(place_use.location)
-                    }
-                }
-                for place_use in &def_use_analysis.local_info(src_local).defs_and_uses {
-                    if place_use.context.is_storage_marker() {
-                        body.make_statement_nop(place_use.location)
-                    }
-                }
-
-                // Replace all uses of the destination local with the source local.
-                def_use_analysis.replace_all_defs_and_uses_with(dest_local, body, src_local, tcx);
-
-                // Finally, zap the now-useless assignment instruction.
-                debug!("  Deleting assignment");
-                body.make_statement_nop(location);
-
-                true
-            }
-            Action::PropagateConstant(src_constant) => {
-                // First, remove all markers.
-                //
-                // FIXME(pcwalton): Don't do this. Merge live ranges instead.
-                debug!(
-                    "  Replacing all uses of {:?} with {:?} (constant)",
-                    dest_local, src_constant
-                );
-                let dest_local_info = def_use_analysis.local_info(dest_local);
-                for place_use in &dest_local_info.defs_and_uses {
-                    if place_use.context.is_storage_marker() {
-                        body.make_statement_nop(place_use.location)
-                    }
-                }
-
-                // Replace all uses of the destination local with the constant.
-                let mut visitor = ConstantPropagationVisitor::new(dest_local, src_constant, tcx);
-                for dest_place_use in &dest_local_info.defs_and_uses {
-                    visitor.visit_location(body, dest_place_use.location)
-                }
-
-                // Zap the assignment instruction if we eliminated all the uses. We won't have been
-                // able to do that if the destination was used in a projection, because projections
-                // must have places on their LHS.
-                let use_count = dest_local_info.use_count();
-                if visitor.uses_replaced == use_count {
-                    debug!(
-                        "  {} of {} use(s) replaced; deleting assignment",
-                        visitor.uses_replaced, use_count
-                    );
-                    body.make_statement_nop(location);
-                    true
-                } else if visitor.uses_replaced == 0 {
-                    debug!("  No uses replaced; not deleting assignment");
-                    false
-                } else {
-                    debug!(
-                        "  {} of {} use(s) replaced; not deleting assignment",
-                        visitor.uses_replaced, use_count
-                    );
-                    true
-                }
-            }
-        }
-    }
-}
-
-struct ConstantPropagationVisitor<'tcx> {
-    dest_local: Local,
-    constant: Constant<'tcx>,
-    tcx: TyCtxt<'tcx>,
-    uses_replaced: usize,
-}
-
-impl<'tcx> ConstantPropagationVisitor<'tcx> {
-    fn new(
-        dest_local: Local,
-        constant: Constant<'tcx>,
-        tcx: TyCtxt<'tcx>,
-    ) -> ConstantPropagationVisitor<'tcx> {
-        ConstantPropagationVisitor { dest_local, constant, tcx, uses_replaced: 0 }
-    }
-}
-
-impl<'tcx> MutVisitor<'tcx> for ConstantPropagationVisitor<'tcx> {
-    fn tcx(&self) -> TyCtxt<'tcx> {
-        self.tcx
-    }
-
-    fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) {
-        self.super_operand(operand, location);
-
-        match operand {
-            Operand::Copy(place) | Operand::Move(place) => {
-                if let Some(local) = place.as_local() {
-                    if local == self.dest_local {
-                    } else {
-                        return;
-                    }
-                } else {
-                    return;
-                }
-            }
-            _ => return,
-        }
-
-        *operand = Operand::Constant(box self.constant);
-        self.uses_replaced += 1
-    }
-}
diff --git a/compiler/rustc_mir/src/transform/coverage/counters.rs b/compiler/rustc_mir/src/transform/coverage/counters.rs
new file mode 100644
index 0000000..d6c2f7f
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/coverage/counters.rs
@@ -0,0 +1,615 @@
+use super::Error;
+
+use super::debug;
+use super::graph;
+use super::spans;
+
+use debug::{DebugCounters, NESTED_INDENT};
+use graph::{BasicCoverageBlock, BcbBranch, CoverageGraph, TraverseCoverageGraphWithLoops};
+use spans::CoverageSpan;
+
+use rustc_data_structures::graph::WithNumNodes;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::coverage::*;
+
+/// Manages the counter and expression indexes/IDs to generate `CoverageKind` components for MIR
+/// `Coverage` statements.
+pub(crate) struct CoverageCounters {
+    function_source_hash: u64,
+    next_counter_id: u32,
+    num_expressions: u32,
+    pub debug_counters: DebugCounters,
+}
+
+impl CoverageCounters {
+    pub fn new(function_source_hash: u64) -> Self {
+        Self {
+            function_source_hash,
+            next_counter_id: CounterValueReference::START.as_u32(),
+            num_expressions: 0,
+            debug_counters: DebugCounters::new(),
+        }
+    }
+
+    /// Activate the `DebugCounters` data structures, to provide additional debug formatting
+    /// features when formating `CoverageKind` (counter) values.
+    pub fn enable_debug(&mut self) {
+        self.debug_counters.enable();
+    }
+
+    /// Makes `CoverageKind` `Counter`s and `Expressions` for the `BasicCoverageBlocks` directly or
+    /// indirectly associated with `CoverageSpans`, and returns additional `Expression`s
+    /// representing intermediate values.
+    pub fn make_bcb_counters(
+        &mut self,
+        basic_coverage_blocks: &mut CoverageGraph,
+        coverage_spans: &Vec<CoverageSpan>,
+    ) -> Result<Vec<CoverageKind>, Error> {
+        let mut bcb_counters = BcbCounters::new(self, basic_coverage_blocks);
+        bcb_counters.make_bcb_counters(coverage_spans)
+    }
+
+    fn make_counter<F>(&mut self, debug_block_label_fn: F) -> CoverageKind
+    where
+        F: Fn() -> Option<String>,
+    {
+        let counter = CoverageKind::Counter {
+            function_source_hash: self.function_source_hash,
+            id: self.next_counter(),
+        };
+        if self.debug_counters.is_enabled() {
+            self.debug_counters.add_counter(&counter, (debug_block_label_fn)());
+        }
+        counter
+    }
+
+    fn make_expression<F>(
+        &mut self,
+        lhs: ExpressionOperandId,
+        op: Op,
+        rhs: ExpressionOperandId,
+        debug_block_label_fn: F,
+    ) -> CoverageKind
+    where
+        F: Fn() -> Option<String>,
+    {
+        let id = self.next_expression();
+        let expression = CoverageKind::Expression { id, lhs, op, rhs };
+        if self.debug_counters.is_enabled() {
+            self.debug_counters.add_counter(&expression, (debug_block_label_fn)());
+        }
+        expression
+    }
+
+    pub fn make_identity_counter(&mut self, counter_operand: ExpressionOperandId) -> CoverageKind {
+        let some_debug_block_label = if self.debug_counters.is_enabled() {
+            self.debug_counters.some_block_label(counter_operand).cloned()
+        } else {
+            None
+        };
+        self.make_expression(counter_operand, Op::Add, ExpressionOperandId::ZERO, || {
+            some_debug_block_label.clone()
+        })
+    }
+
+    /// Counter IDs start from one and go up.
+    fn next_counter(&mut self) -> CounterValueReference {
+        assert!(self.next_counter_id < u32::MAX - self.num_expressions);
+        let next = self.next_counter_id;
+        self.next_counter_id += 1;
+        CounterValueReference::from(next)
+    }
+
+    /// Expression IDs start from u32::MAX and go down because a Expression can reference
+    /// (add or subtract counts) of both Counter regions and Expression regions. The counter
+    /// expression operand IDs must be unique across both types.
+    fn next_expression(&mut self) -> InjectedExpressionId {
+        assert!(self.next_counter_id < u32::MAX - self.num_expressions);
+        let next = u32::MAX - self.num_expressions;
+        self.num_expressions += 1;
+        InjectedExpressionId::from(next)
+    }
+}
+
+/// Traverse the `CoverageGraph` and add either a `Counter` or `Expression` to every BCB, to be
+/// injected with `CoverageSpan`s. `Expressions` have no runtime overhead, so if a viable expression
+/// (adding or subtracting two other counters or expressions) can compute the same result as an
+/// embedded counter, an `Expression` should be used.
+struct BcbCounters<'a> {
+    coverage_counters: &'a mut CoverageCounters,
+    basic_coverage_blocks: &'a mut CoverageGraph,
+}
+
+// FIXME(richkadel): Add unit tests for `BcbCounters` functions/algorithms.
+impl<'a> BcbCounters<'a> {
+    fn new(
+        coverage_counters: &'a mut CoverageCounters,
+        basic_coverage_blocks: &'a mut CoverageGraph,
+    ) -> Self {
+        Self { coverage_counters, basic_coverage_blocks }
+    }
+
+    /// If two `BasicCoverageBlock`s branch from another `BasicCoverageBlock`, one of the branches
+    /// can be counted by `Expression` by subtracting the other branch from the branching
+    /// block. Otherwise, the `BasicCoverageBlock` executed the least should have the `Counter`.
+    /// One way to predict which branch executes the least is by considering loops. A loop is exited
+    /// at a branch, so the branch that jumps to a `BasicCoverageBlock` outside the loop is almost
+    /// always executed less than the branch that does not exit the loop.
+    ///
+    /// Returns any non-code-span expressions created to represent intermediate values (such as to
+    /// add two counters so the result can be subtracted from another counter), or an Error with
+    /// message for subsequent debugging.
+    fn make_bcb_counters(
+        &mut self,
+        coverage_spans: &Vec<CoverageSpan>,
+    ) -> Result<Vec<CoverageKind>, Error> {
+        debug!("make_bcb_counters(): adding a counter or expression to each BasicCoverageBlock");
+        let num_bcbs = self.basic_coverage_blocks.num_nodes();
+        let mut collect_intermediate_expressions = Vec::with_capacity(num_bcbs);
+
+        let mut bcbs_with_coverage = BitSet::new_empty(num_bcbs);
+        for covspan in coverage_spans {
+            bcbs_with_coverage.insert(covspan.bcb);
+        }
+
+        // Walk the `CoverageGraph`. For each `BasicCoverageBlock` node with an associated
+        // `CoverageSpan`, add a counter. If the `BasicCoverageBlock` branches, add a counter or
+        // expression to each branch `BasicCoverageBlock` (if the branch BCB has only one incoming
+        // edge) or edge from the branching BCB to the branch BCB (if the branch BCB has multiple
+        // incoming edges).
+        //
+        // The `TraverseCoverageGraphWithLoops` traversal ensures that, when a loop is encountered,
+        // all `BasicCoverageBlock` nodes in the loop are visited before visiting any node outside
+        // the loop. The `traversal` state includes a `context_stack`, providing a way to know if
+        // the current BCB is in one or more nested loops or not.
+        let mut traversal = TraverseCoverageGraphWithLoops::new(&self.basic_coverage_blocks);
+        while let Some(bcb) = traversal.next(self.basic_coverage_blocks) {
+            if bcbs_with_coverage.contains(bcb) {
+                debug!("{:?} has at least one `CoverageSpan`. Get or make its counter", bcb);
+                let branching_counter_operand =
+                    self.get_or_make_counter_operand(bcb, &mut collect_intermediate_expressions)?;
+
+                if self.bcb_needs_branch_counters(bcb) {
+                    self.make_branch_counters(
+                        &mut traversal,
+                        bcb,
+                        branching_counter_operand,
+                        &mut collect_intermediate_expressions,
+                    )?;
+                }
+            } else {
+                debug!(
+                    "{:?} does not have any `CoverageSpan`s. A counter will only be added if \
+                    and when a covered BCB has an expression dependency.",
+                    bcb,
+                );
+            }
+        }
+
+        if traversal.is_complete() {
+            Ok(collect_intermediate_expressions)
+        } else {
+            Error::from_string(format!(
+                "`TraverseCoverageGraphWithLoops` missed some `BasicCoverageBlock`s: {:?}",
+                traversal.unvisited(),
+            ))
+        }
+    }
+
+    fn make_branch_counters(
+        &mut self,
+        traversal: &mut TraverseCoverageGraphWithLoops,
+        branching_bcb: BasicCoverageBlock,
+        branching_counter_operand: ExpressionOperandId,
+        collect_intermediate_expressions: &mut Vec<CoverageKind>,
+    ) -> Result<(), Error> {
+        let branches = self.bcb_branches(branching_bcb);
+        debug!(
+            "{:?} has some branch(es) without counters:\n  {}",
+            branching_bcb,
+            branches
+                .iter()
+                .map(|branch| {
+                    format!("{:?}: {:?}", branch, branch.counter(&self.basic_coverage_blocks))
+                })
+                .collect::<Vec<_>>()
+                .join("\n  "),
+        );
+
+        // Use the `traversal` state to decide if a subset of the branches exit a loop, making it
+        // likely that branch is executed less than branches that do not exit the same loop. In this
+        // case, any branch that does not exit the loop (and has not already been assigned a
+        // counter) should be counted by expression, if possible. (If a preferred expression branch
+        // is not selected based on the loop context, select any branch without an existing
+        // counter.)
+        let expression_branch = self.choose_preferred_expression_branch(traversal, &branches);
+
+        // Assign a Counter or Expression to each branch, plus additional `Expression`s, as needed,
+        // to sum up intermediate results.
+        let mut some_sumup_counter_operand = None;
+        for branch in branches {
+            // Skip the selected `expression_branch`, if any. It's expression will be assigned after
+            // all others.
+            if branch != expression_branch {
+                let branch_counter_operand = if branch.is_only_path_to_target() {
+                    debug!(
+                        "  {:?} has only one incoming edge (from {:?}), so adding a \
+                        counter",
+                        branch, branching_bcb
+                    );
+                    self.get_or_make_counter_operand(
+                        branch.target_bcb,
+                        collect_intermediate_expressions,
+                    )?
+                } else {
+                    debug!("  {:?} has multiple incoming edges, so adding an edge counter", branch);
+                    self.get_or_make_edge_counter_operand(
+                        branching_bcb,
+                        branch.target_bcb,
+                        collect_intermediate_expressions,
+                    )?
+                };
+                if let Some(sumup_counter_operand) =
+                    some_sumup_counter_operand.replace(branch_counter_operand)
+                {
+                    let intermediate_expression = self.coverage_counters.make_expression(
+                        branch_counter_operand,
+                        Op::Add,
+                        sumup_counter_operand,
+                        || None,
+                    );
+                    debug!(
+                        "  [new intermediate expression: {}]",
+                        self.format_counter(&intermediate_expression)
+                    );
+                    let intermediate_expression_operand = intermediate_expression.as_operand_id();
+                    collect_intermediate_expressions.push(intermediate_expression);
+                    some_sumup_counter_operand.replace(intermediate_expression_operand);
+                }
+            }
+        }
+
+        // Assign the final expression to the `expression_branch` by subtracting the total of all
+        // other branches from the counter of the branching BCB.
+        let sumup_counter_operand =
+            some_sumup_counter_operand.expect("sumup_counter_operand should have a value");
+        debug!(
+            "Making an expression for the selected expression_branch: {:?} \
+            (expression_branch predecessors: {:?})",
+            expression_branch,
+            self.bcb_predecessors(expression_branch.target_bcb),
+        );
+        let expression = self.coverage_counters.make_expression(
+            branching_counter_operand,
+            Op::Subtract,
+            sumup_counter_operand,
+            || Some(format!("{:?}", expression_branch)),
+        );
+        debug!("{:?} gets an expression: {}", expression_branch, self.format_counter(&expression));
+        let bcb = expression_branch.target_bcb;
+        if expression_branch.is_only_path_to_target() {
+            self.basic_coverage_blocks[bcb].set_counter(expression)?;
+        } else {
+            self.basic_coverage_blocks[bcb].set_edge_counter_from(branching_bcb, expression)?;
+        }
+        Ok(())
+    }
+
+    fn get_or_make_counter_operand(
+        &mut self,
+        bcb: BasicCoverageBlock,
+        collect_intermediate_expressions: &mut Vec<CoverageKind>,
+    ) -> Result<ExpressionOperandId, Error> {
+        self.recursive_get_or_make_counter_operand(bcb, collect_intermediate_expressions, 1)
+    }
+
+    fn recursive_get_or_make_counter_operand(
+        &mut self,
+        bcb: BasicCoverageBlock,
+        collect_intermediate_expressions: &mut Vec<CoverageKind>,
+        debug_indent_level: usize,
+    ) -> Result<ExpressionOperandId, Error> {
+        // If the BCB already has a counter, return it.
+        if let Some(counter_kind) = self.basic_coverage_blocks[bcb].counter() {
+            debug!(
+                "{}{:?} already has a counter: {}",
+                NESTED_INDENT.repeat(debug_indent_level),
+                bcb,
+                self.format_counter(counter_kind),
+            );
+            return Ok(counter_kind.as_operand_id());
+        }
+
+        // A BCB with only one incoming edge gets a simple `Counter` (via `make_counter()`).
+        // Also, a BCB that loops back to itself gets a simple `Counter`. This may indicate the
+        // program results in a tight infinite loop, but it should still compile.
+        let one_path_to_target = self.bcb_has_one_path_to_target(bcb);
+        if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) {
+            let counter_kind = self.coverage_counters.make_counter(|| Some(format!("{:?}", bcb)));
+            if one_path_to_target {
+                debug!(
+                    "{}{:?} gets a new counter: {}",
+                    NESTED_INDENT.repeat(debug_indent_level),
+                    bcb,
+                    self.format_counter(&counter_kind),
+                );
+            } else {
+                debug!(
+                    "{}{:?} has itself as its own predecessor. It can't be part of its own \
+                    Expression sum, so it will get its own new counter: {}. (Note, the compiled \
+                    code will generate an infinite loop.)",
+                    NESTED_INDENT.repeat(debug_indent_level),
+                    bcb,
+                    self.format_counter(&counter_kind),
+                );
+            }
+            return self.basic_coverage_blocks[bcb].set_counter(counter_kind);
+        }
+
+        // A BCB with multiple incoming edges can compute its count by `Expression`, summing up the
+        // counters and/or expressions of its incoming edges. This will recursively get or create
+        // counters for those incoming edges first, then call `make_expression()` to sum them up,
+        // with additional intermediate expressions as needed.
+        let mut predecessors = self.bcb_predecessors(bcb).clone().into_iter();
+        debug!(
+            "{}{:?} has multiple incoming edges and will get an expression that sums them up...",
+            NESTED_INDENT.repeat(debug_indent_level),
+            bcb,
+        );
+        let first_edge_counter_operand = self.recursive_get_or_make_edge_counter_operand(
+            predecessors.next().unwrap(),
+            bcb,
+            collect_intermediate_expressions,
+            debug_indent_level + 1,
+        )?;
+        let mut some_sumup_edge_counter_operand = None;
+        for predecessor in predecessors {
+            let edge_counter_operand = self.recursive_get_or_make_edge_counter_operand(
+                predecessor,
+                bcb,
+                collect_intermediate_expressions,
+                debug_indent_level + 1,
+            )?;
+            if let Some(sumup_edge_counter_operand) =
+                some_sumup_edge_counter_operand.replace(edge_counter_operand)
+            {
+                let intermediate_expression = self.coverage_counters.make_expression(
+                    sumup_edge_counter_operand,
+                    Op::Add,
+                    edge_counter_operand,
+                    || None,
+                );
+                debug!(
+                    "{}new intermediate expression: {}",
+                    NESTED_INDENT.repeat(debug_indent_level),
+                    self.format_counter(&intermediate_expression)
+                );
+                let intermediate_expression_operand = intermediate_expression.as_operand_id();
+                collect_intermediate_expressions.push(intermediate_expression);
+                some_sumup_edge_counter_operand.replace(intermediate_expression_operand);
+            }
+        }
+        let counter_kind = self.coverage_counters.make_expression(
+            first_edge_counter_operand,
+            Op::Add,
+            some_sumup_edge_counter_operand.unwrap(),
+            || Some(format!("{:?}", bcb)),
+        );
+        debug!(
+            "{}{:?} gets a new counter (sum of predecessor counters): {}",
+            NESTED_INDENT.repeat(debug_indent_level),
+            bcb,
+            self.format_counter(&counter_kind)
+        );
+        self.basic_coverage_blocks[bcb].set_counter(counter_kind)
+    }
+
+    fn get_or_make_edge_counter_operand(
+        &mut self,
+        from_bcb: BasicCoverageBlock,
+        to_bcb: BasicCoverageBlock,
+        collect_intermediate_expressions: &mut Vec<CoverageKind>,
+    ) -> Result<ExpressionOperandId, Error> {
+        self.recursive_get_or_make_edge_counter_operand(
+            from_bcb,
+            to_bcb,
+            collect_intermediate_expressions,
+            1,
+        )
+    }
+
+    fn recursive_get_or_make_edge_counter_operand(
+        &mut self,
+        from_bcb: BasicCoverageBlock,
+        to_bcb: BasicCoverageBlock,
+        collect_intermediate_expressions: &mut Vec<CoverageKind>,
+        debug_indent_level: usize,
+    ) -> Result<ExpressionOperandId, Error> {
+        // If the source BCB has only one successor (assumed to be the given target), an edge
+        // counter is unnecessary. Just get or make a counter for the source BCB.
+        let successors = self.bcb_successors(from_bcb).iter();
+        if successors.len() == 1 {
+            return self.recursive_get_or_make_counter_operand(
+                from_bcb,
+                collect_intermediate_expressions,
+                debug_indent_level + 1,
+            );
+        }
+
+        // If the edge already has a counter, return it.
+        if let Some(counter_kind) = self.basic_coverage_blocks[to_bcb].edge_counter_from(from_bcb) {
+            debug!(
+                "{}Edge {:?}->{:?} already has a counter: {}",
+                NESTED_INDENT.repeat(debug_indent_level),
+                from_bcb,
+                to_bcb,
+                self.format_counter(counter_kind)
+            );
+            return Ok(counter_kind.as_operand_id());
+        }
+
+        // Make a new counter to count this edge.
+        let counter_kind =
+            self.coverage_counters.make_counter(|| Some(format!("{:?}->{:?}", from_bcb, to_bcb)));
+        debug!(
+            "{}Edge {:?}->{:?} gets a new counter: {}",
+            NESTED_INDENT.repeat(debug_indent_level),
+            from_bcb,
+            to_bcb,
+            self.format_counter(&counter_kind)
+        );
+        self.basic_coverage_blocks[to_bcb].set_edge_counter_from(from_bcb, counter_kind)
+    }
+
+    /// Select a branch for the expression, either the recommended `reloop_branch`, or if none was
+    /// found, select any branch.
+    fn choose_preferred_expression_branch(
+        &self,
+        traversal: &TraverseCoverageGraphWithLoops,
+        branches: &Vec<BcbBranch>,
+    ) -> BcbBranch {
+        let branch_needs_a_counter =
+            |branch: &BcbBranch| branch.counter(&self.basic_coverage_blocks).is_none();
+
+        let some_reloop_branch = self.find_some_reloop_branch(traversal, &branches);
+        if let Some(reloop_branch_without_counter) =
+            some_reloop_branch.filter(branch_needs_a_counter)
+        {
+            debug!(
+                "Selecting reloop_branch={:?} that still needs a counter, to get the \
+                `Expression`",
+                reloop_branch_without_counter
+            );
+            reloop_branch_without_counter
+        } else {
+            let &branch_without_counter = branches
+                .iter()
+                .find(|&&branch| branch.counter(&self.basic_coverage_blocks).is_none())
+                .expect(
+                    "needs_branch_counters was `true` so there should be at least one \
+                    branch",
+                );
+            debug!(
+                "Selecting any branch={:?} that still needs a counter, to get the \
+                `Expression` because there was no `reloop_branch`, or it already had a \
+                counter",
+                branch_without_counter
+            );
+            branch_without_counter
+        }
+    }
+
+    /// At most, one of the branches (or its edge, from the branching_bcb, if the branch has
+    /// multiple incoming edges) can have a counter computed by expression.
+    ///
+    /// If at least one of the branches leads outside of a loop (`found_loop_exit` is
+    /// true), and at least one other branch does not exit the loop (the first of which
+    /// is captured in `some_reloop_branch`), it's likely any reloop branch will be
+    /// executed far more often than loop exit branch, making the reloop branch a better
+    /// candidate for an expression.
+    fn find_some_reloop_branch(
+        &self,
+        traversal: &TraverseCoverageGraphWithLoops,
+        branches: &Vec<BcbBranch>,
+    ) -> Option<BcbBranch> {
+        let branch_needs_a_counter =
+            |branch: &BcbBranch| branch.counter(&self.basic_coverage_blocks).is_none();
+
+        let mut some_reloop_branch: Option<BcbBranch> = None;
+        for context in traversal.context_stack.iter().rev() {
+            if let Some((backedge_from_bcbs, _)) = &context.loop_backedges {
+                let mut found_loop_exit = false;
+                for &branch in branches.iter() {
+                    if backedge_from_bcbs.iter().any(|&backedge_from_bcb| {
+                        self.bcb_is_dominated_by(backedge_from_bcb, branch.target_bcb)
+                    }) {
+                        if let Some(reloop_branch) = some_reloop_branch {
+                            if reloop_branch.counter(&self.basic_coverage_blocks).is_none() {
+                                // we already found a candidate reloop_branch that still
+                                // needs a counter
+                                continue;
+                            }
+                        }
+                        // The path from branch leads back to the top of the loop. Set this
+                        // branch as the `reloop_branch`. If this branch already has a
+                        // counter, and we find another reloop branch that doesn't have a
+                        // counter yet, that branch will be selected as the `reloop_branch`
+                        // instead.
+                        some_reloop_branch = Some(branch);
+                    } else {
+                        // The path from branch leads outside this loop
+                        found_loop_exit = true;
+                    }
+                    if found_loop_exit
+                        && some_reloop_branch.filter(branch_needs_a_counter).is_some()
+                    {
+                        // Found both a branch that exits the loop and a branch that returns
+                        // to the top of the loop (`reloop_branch`), and the `reloop_branch`
+                        // doesn't already have a counter.
+                        break;
+                    }
+                }
+                if !found_loop_exit {
+                    debug!(
+                        "No branches exit the loop, so any branch without an existing \
+                        counter can have the `Expression`."
+                    );
+                    break;
+                }
+                if some_reloop_branch.is_some() {
+                    debug!(
+                        "Found a branch that exits the loop and a branch the loops back to \
+                        the top of the loop (`reloop_branch`). The `reloop_branch` will \
+                        get the `Expression`, as long as it still needs a counter."
+                    );
+                    break;
+                }
+                // else all branches exited this loop context, so run the same checks with
+                // the outer loop(s)
+            }
+        }
+        some_reloop_branch
+    }
+
+    #[inline]
+    fn bcb_predecessors(&self, bcb: BasicCoverageBlock) -> &Vec<BasicCoverageBlock> {
+        &self.basic_coverage_blocks.predecessors[bcb]
+    }
+
+    #[inline]
+    fn bcb_successors(&self, bcb: BasicCoverageBlock) -> &Vec<BasicCoverageBlock> {
+        &self.basic_coverage_blocks.successors[bcb]
+    }
+
+    #[inline]
+    fn bcb_branches(&self, from_bcb: BasicCoverageBlock) -> Vec<BcbBranch> {
+        self.bcb_successors(from_bcb)
+            .iter()
+            .map(|&to_bcb| BcbBranch::from_to(from_bcb, to_bcb, &self.basic_coverage_blocks))
+            .collect::<Vec<_>>()
+    }
+
+    fn bcb_needs_branch_counters(&self, bcb: BasicCoverageBlock) -> bool {
+        let branch_needs_a_counter =
+            |branch: &BcbBranch| branch.counter(&self.basic_coverage_blocks).is_none();
+        let branches = self.bcb_branches(bcb);
+        branches.len() > 1 && branches.iter().any(branch_needs_a_counter)
+    }
+
+    /// Returns true if the BasicCoverageBlock has zero or one incoming edge. (If zero, it should be
+    /// the entry point for the function.)
+    #[inline]
+    fn bcb_has_one_path_to_target(&self, bcb: BasicCoverageBlock) -> bool {
+        self.bcb_predecessors(bcb).len() <= 1
+    }
+
+    #[inline]
+    fn bcb_is_dominated_by(&self, node: BasicCoverageBlock, dom: BasicCoverageBlock) -> bool {
+        self.basic_coverage_blocks.is_dominated_by(node, dom)
+    }
+
+    #[inline]
+    fn format_counter(&self, counter_kind: &CoverageKind) -> String {
+        self.coverage_counters.debug_counters.format_counter(counter_kind)
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/coverage/debug.rs b/compiler/rustc_mir/src/transform/coverage/debug.rs
new file mode 100644
index 0000000..ffa7951
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/coverage/debug.rs
@@ -0,0 +1,836 @@
+//! The `InstrumentCoverage` MIR pass implementation includes debugging tools and options
+//! to help developers understand and/or improve the analysis and instrumentation of a MIR.
+//!
+//! To enable coverage, include the rustc command line option:
+//!
+//!   * `-Z instrument-coverage`
+//!
+//! MIR Dump Files, with additional `CoverageGraph` graphviz and `CoverageSpan` spanview
+//! ------------------------------------------------------------------------------------
+//!
+//! Additional debugging options include:
+//!
+//!   * `-Z dump-mir=InstrumentCoverage` - Generate `.mir` files showing the state of the MIR,
+//!     before and after the `InstrumentCoverage` pass, for each compiled function.
+//!
+//!   * `-Z dump-mir-graphviz` - If `-Z dump-mir` is also enabled for the current MIR node path,
+//!     each MIR dump is accompanied by a before-and-after graphical view of the MIR, in Graphviz
+//!     `.dot` file format (which can be visually rendered as a graph using any of a number of free
+//!     Graphviz viewers and IDE extensions).
+//!
+//!     For the `InstrumentCoverage` pass, this option also enables generation of an additional
+//!     Graphviz `.dot` file for each function, rendering the `CoverageGraph`: the control flow
+//!     graph (CFG) of `BasicCoverageBlocks` (BCBs), as nodes, internally labeled to show the
+//!     `CoverageSpan`-based MIR elements each BCB represents (`BasicBlock`s, `Statement`s and
+//!     `Terminator`s), assigned coverage counters and/or expressions, and edge counters, as needed.
+//!
+//!     (Note the additional option, `-Z graphviz-dark-mode`, can be added, to change the rendered
+//!     output from its default black-on-white background to a dark color theme, if desired.)
+//!
+//!   * `-Z dump-mir-spanview` - If `-Z dump-mir` is also enabled for the current MIR node path,
+//!     each MIR dump is accompanied by a before-and-after `.html` document showing the function's
+//!     original source code, highlighted by it's MIR spans, at the `statement`-level (by default),
+//!     `terminator` only, or encompassing span for the `Terminator` plus all `Statement`s, in each
+//!     `block` (`BasicBlock`).
+//!
+//!     For the `InstrumentCoverage` pass, this option also enables generation of an additional
+//!     spanview `.html` file for each function, showing the aggregated `CoverageSpan`s that will
+//!     require counters (or counter expressions) for accurate coverage analysis.
+//!
+//! Debug Logging
+//! -------------
+//!
+//! The `InstrumentCoverage` pass includes debug logging messages at various phases and decision
+//! points, which can be enabled via environment variable:
+//!
+//! ```shell
+//! RUSTC_LOG=rustc_mir::transform::coverage=debug
+//! ```
+//!
+//! Other module paths with coverage-related debug logs may also be of interest, particularly for
+//! debugging the coverage map data, injected as global variables in the LLVM IR (during rustc's
+//! code generation pass). For example:
+//!
+//! ```shell
+//! RUSTC_LOG=rustc_mir::transform::coverage,rustc_codegen_ssa::coverageinfo,rustc_codegen_llvm::coverageinfo=debug
+//! ```
+//!
+//! Coverage Debug Options
+//! ---------------------------------
+//!
+//! Additional debugging options can be enabled using the environment variable:
+//!
+//! ```shell
+//! RUSTC_COVERAGE_DEBUG_OPTIONS=<options>
+//! ```
+//!
+//! These options are comma-separated, and specified in the format `option-name=value`. For example:
+//!
+//! ```shell
+//! $ RUSTC_COVERAGE_DEBUG_OPTIONS=counter-format=id+operation,allow-unused-expressions=yes cargo build
+//! ```
+//!
+//! Coverage debug options include:
+//!
+//!   * `allow-unused-expressions=yes` or `no` (default: `no`)
+//!
+//!     The `InstrumentCoverage` algorithms _should_ only create and assign expressions to a
+//!     `BasicCoverageBlock`, or an incoming edge, if that expression is either (a) required to
+//!     count a `CoverageSpan`, or (b) a dependency of some other required counter expression.
+//!
+//!     If an expression is generated that does not map to a `CoverageSpan` or dependency, this
+//!     probably indicates there was a bug in the algorithm that creates and assigns counters
+//!     and expressions.
+//!
+//!     When this kind of bug is encountered, the rustc compiler will panic by default. Setting:
+//!     `allow-unused-expressions=yes` will log a warning message instead of panicking (effectively
+//!     ignoring the unused expressions), which may be helpful when debugging the root cause of
+//!     the problem.
+//!
+//!   * `counter-format=<choices>`, where `<choices>` can be any plus-separated combination of `id`,
+//!     `block`, and/or `operation` (default: `block+operation`)
+//!
+//!     This option effects both the `CoverageGraph` (graphviz `.dot` files) and debug logging, when
+//!     generating labels for counters and expressions.
+//!
+//!     Depending on the values and combinations, counters can be labeled by:
+//!
+//!         * `id` - counter or expression ID (ascending counter IDs, starting at 1, or descending
+//!           expression IDs, starting at `u32:MAX`)
+//!         * `block` - the `BasicCoverageBlock` label (for example, `bcb0`) or edge label (for
+//!           example `bcb0->bcb1`), for counters or expressions assigned to count a
+//!           `BasicCoverageBlock` or edge. Intermediate expressions (not directly associated with
+//!           a BCB or edge) will be labeled by their expression ID, unless `operation` is also
+//!           specified.
+//!         * `operation` - applied to expressions only, labels include the left-hand-side counter
+//!           or expression label (lhs operand), the operator (`+` or `-`), and the right-hand-side
+//!           counter or expression (rhs operand). Expression operand labels are generated
+//!           recursively, generating labels with nested operations, enclosed in parentheses
+//!           (for example: `bcb2 + (bcb0 - bcb1)`).
+
+use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
+use super::spans::CoverageSpan;
+
+use crate::util::generic_graphviz::GraphvizWriter;
+use crate::util::pretty;
+use crate::util::spanview::{self, SpanViewable};
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::{self, BasicBlock, TerminatorKind};
+use rustc_middle::ty::TyCtxt;
+
+use std::lazy::SyncOnceCell;
+
+pub const NESTED_INDENT: &str = "    ";
+
+const RUSTC_COVERAGE_DEBUG_OPTIONS: &str = "RUSTC_COVERAGE_DEBUG_OPTIONS";
+
+pub(crate) fn debug_options<'a>() -> &'a DebugOptions {
+    static DEBUG_OPTIONS: SyncOnceCell<DebugOptions> = SyncOnceCell::new();
+
+    &DEBUG_OPTIONS.get_or_init(|| DebugOptions::from_env())
+}
+
+/// Parses and maintains coverage-specific debug options captured from the environment variable
+/// "RUSTC_COVERAGE_DEBUG_OPTIONS", if set.
+#[derive(Debug, Clone)]
+pub(crate) struct DebugOptions {
+    pub allow_unused_expressions: bool,
+    counter_format: ExpressionFormat,
+}
+
+impl DebugOptions {
+    fn from_env() -> Self {
+        let mut allow_unused_expressions = true;
+        let mut counter_format = ExpressionFormat::default();
+
+        if let Ok(env_debug_options) = std::env::var(RUSTC_COVERAGE_DEBUG_OPTIONS) {
+            for setting_str in env_debug_options.replace(" ", "").replace("-", "_").split(',') {
+                let mut setting = setting_str.splitn(2, '=');
+                match setting.next() {
+                    Some(option) if option == "allow_unused_expressions" => {
+                        allow_unused_expressions = bool_option_val(option, setting.next());
+                        debug!(
+                            "{} env option `allow_unused_expressions` is set to {}",
+                            RUSTC_COVERAGE_DEBUG_OPTIONS, allow_unused_expressions
+                        );
+                    }
+                    Some(option) if option == "counter_format" => {
+                        if let Some(strval) = setting.next() {
+                            counter_format = counter_format_option_val(strval);
+                            debug!(
+                                "{} env option `counter_format` is set to {:?}",
+                                RUSTC_COVERAGE_DEBUG_OPTIONS, counter_format
+                            );
+                        } else {
+                            bug!(
+                                "`{}` option in environment variable {} requires one or more \
+                                plus-separated choices (a non-empty subset of \
+                                `id+block+operation`)",
+                                option,
+                                RUSTC_COVERAGE_DEBUG_OPTIONS
+                            );
+                        }
+                    }
+                    Some("") => {}
+                    Some(invalid) => bug!(
+                        "Unsupported setting `{}` in environment variable {}",
+                        invalid,
+                        RUSTC_COVERAGE_DEBUG_OPTIONS
+                    ),
+                    None => {}
+                }
+            }
+        }
+
+        Self { allow_unused_expressions, counter_format }
+    }
+}
+
+fn bool_option_val(option: &str, some_strval: Option<&str>) -> bool {
+    if let Some(val) = some_strval {
+        if vec!["yes", "y", "on", "true"].contains(&val) {
+            true
+        } else if vec!["no", "n", "off", "false"].contains(&val) {
+            false
+        } else {
+            bug!(
+                "Unsupported value `{}` for option `{}` in environment variable {}",
+                option,
+                val,
+                RUSTC_COVERAGE_DEBUG_OPTIONS
+            )
+        }
+    } else {
+        true
+    }
+}
+
+fn counter_format_option_val(strval: &str) -> ExpressionFormat {
+    let mut counter_format = ExpressionFormat { id: false, block: false, operation: false };
+    let components = strval.splitn(3, '+');
+    for component in components {
+        match component {
+            "id" => counter_format.id = true,
+            "block" => counter_format.block = true,
+            "operation" => counter_format.operation = true,
+            _ => bug!(
+                "Unsupported counter_format choice `{}` in environment variable {}",
+                component,
+                RUSTC_COVERAGE_DEBUG_OPTIONS
+            ),
+        }
+    }
+    counter_format
+}
+
+#[derive(Debug, Clone)]
+struct ExpressionFormat {
+    id: bool,
+    block: bool,
+    operation: bool,
+}
+
+impl Default for ExpressionFormat {
+    fn default() -> Self {
+        Self { id: false, block: true, operation: true }
+    }
+}
+
+/// If enabled, this struct maintains a map from `CoverageKind` IDs (as `ExpressionOperandId`) to
+/// the `CoverageKind` data and optional label (normally, the counter's associated
+/// `BasicCoverageBlock` format string, if any).
+///
+/// Use `format_counter` to convert one of these `CoverageKind` counters to a debug output string,
+/// as directed by the `DebugOptions`. This allows the format of counter labels in logs and dump
+/// files (including the `CoverageGraph` graphviz file) to be changed at runtime, via environment
+/// variable.
+///
+/// `DebugCounters` supports a recursive rendering of `Expression` counters, so they can be
+/// presented as nested expressions such as `(bcb3 - (bcb0 + bcb1))`.
+pub(crate) struct DebugCounters {
+    some_counters: Option<FxHashMap<ExpressionOperandId, DebugCounter>>,
+}
+
+impl DebugCounters {
+    pub fn new() -> Self {
+        Self { some_counters: None }
+    }
+
+    pub fn enable(&mut self) {
+        debug_assert!(!self.is_enabled());
+        self.some_counters.replace(FxHashMap::default());
+    }
+
+    pub fn is_enabled(&self) -> bool {
+        self.some_counters.is_some()
+    }
+
+    pub fn add_counter(&mut self, counter_kind: &CoverageKind, some_block_label: Option<String>) {
+        if let Some(counters) = &mut self.some_counters {
+            let id: ExpressionOperandId = match *counter_kind {
+                CoverageKind::Counter { id, .. } => id.into(),
+                CoverageKind::Expression { id, .. } => id.into(),
+                _ => bug!(
+                    "the given `CoverageKind` is not an counter or expression: {:?}",
+                    counter_kind
+                ),
+            };
+            counters
+                .insert(id.into(), DebugCounter::new(counter_kind.clone(), some_block_label))
+                .expect_none(
+                    "attempt to add the same counter_kind to DebugCounters more than once",
+                );
+        }
+    }
+
+    pub fn some_block_label(&self, operand: ExpressionOperandId) -> Option<&String> {
+        self.some_counters.as_ref().map_or(None, |counters| {
+            counters
+                .get(&operand)
+                .map_or(None, |debug_counter| debug_counter.some_block_label.as_ref())
+        })
+    }
+
+    pub fn format_counter(&self, counter_kind: &CoverageKind) -> String {
+        match *counter_kind {
+            CoverageKind::Counter { .. } => {
+                format!("Counter({})", self.format_counter_kind(counter_kind))
+            }
+            CoverageKind::Expression { .. } => {
+                format!("Expression({})", self.format_counter_kind(counter_kind))
+            }
+            CoverageKind::Unreachable { .. } => "Unreachable".to_owned(),
+        }
+    }
+
+    fn format_counter_kind(&self, counter_kind: &CoverageKind) -> String {
+        let counter_format = &debug_options().counter_format;
+        if let CoverageKind::Expression { id, lhs, op, rhs } = *counter_kind {
+            if counter_format.operation {
+                return format!(
+                    "{}{} {} {}",
+                    if counter_format.id || self.some_counters.is_none() {
+                        format!("#{} = ", id.index())
+                    } else {
+                        String::new()
+                    },
+                    self.format_operand(lhs),
+                    if op == Op::Add { "+" } else { "-" },
+                    self.format_operand(rhs),
+                );
+            }
+        }
+
+        let id: ExpressionOperandId = match *counter_kind {
+            CoverageKind::Counter { id, .. } => id.into(),
+            CoverageKind::Expression { id, .. } => id.into(),
+            _ => {
+                bug!("the given `CoverageKind` is not an counter or expression: {:?}", counter_kind)
+            }
+        };
+        if self.some_counters.is_some() && (counter_format.block || !counter_format.id) {
+            let counters = self.some_counters.as_ref().unwrap();
+            if let Some(DebugCounter { some_block_label: Some(block_label), .. }) =
+                counters.get(&id.into())
+            {
+                return if counter_format.id {
+                    format!("{}#{}", block_label, id.index())
+                } else {
+                    format!("{}", block_label)
+                };
+            }
+        }
+        format!("#{}", id.index())
+    }
+
+    fn format_operand(&self, operand: ExpressionOperandId) -> String {
+        if operand.index() == 0 {
+            return String::from("0");
+        }
+        if let Some(counters) = &self.some_counters {
+            if let Some(DebugCounter { counter_kind, some_block_label }) = counters.get(&operand) {
+                if let CoverageKind::Expression { .. } = counter_kind {
+                    if let Some(block_label) = some_block_label {
+                        if debug_options().counter_format.block {
+                            return format!(
+                                "{}:({})",
+                                block_label,
+                                self.format_counter_kind(counter_kind)
+                            );
+                        }
+                    }
+                    return format!("({})", self.format_counter_kind(counter_kind));
+                }
+                return format!("{}", self.format_counter_kind(counter_kind));
+            }
+        }
+        format!("#{}", operand.index().to_string())
+    }
+}
+
+/// A non-public support class to `DebugCounters`.
+#[derive(Debug)]
+struct DebugCounter {
+    counter_kind: CoverageKind,
+    some_block_label: Option<String>,
+}
+
+impl DebugCounter {
+    fn new(counter_kind: CoverageKind, some_block_label: Option<String>) -> Self {
+        Self { counter_kind, some_block_label }
+    }
+}
+
+/// If enabled, this data structure captures additional debugging information used when generating
+/// a Graphviz (.dot file) representation of the `CoverageGraph`, for debugging purposes.
+pub(crate) struct GraphvizData {
+    some_bcb_to_coverage_spans_with_counters:
+        Option<FxHashMap<BasicCoverageBlock, Vec<(CoverageSpan, CoverageKind)>>>,
+    some_bcb_to_dependency_counters: Option<FxHashMap<BasicCoverageBlock, Vec<CoverageKind>>>,
+    some_edge_to_counter: Option<FxHashMap<(BasicCoverageBlock, BasicBlock), CoverageKind>>,
+}
+
+impl GraphvizData {
+    pub fn new() -> Self {
+        Self {
+            some_bcb_to_coverage_spans_with_counters: None,
+            some_bcb_to_dependency_counters: None,
+            some_edge_to_counter: None,
+        }
+    }
+
+    pub fn enable(&mut self) {
+        debug_assert!(!self.is_enabled());
+        self.some_bcb_to_coverage_spans_with_counters = Some(FxHashMap::default());
+        self.some_bcb_to_dependency_counters = Some(FxHashMap::default());
+        self.some_edge_to_counter = Some(FxHashMap::default());
+    }
+
+    pub fn is_enabled(&self) -> bool {
+        self.some_bcb_to_coverage_spans_with_counters.is_some()
+    }
+
+    pub fn add_bcb_coverage_span_with_counter(
+        &mut self,
+        bcb: BasicCoverageBlock,
+        coverage_span: &CoverageSpan,
+        counter_kind: &CoverageKind,
+    ) {
+        if let Some(bcb_to_coverage_spans_with_counters) =
+            self.some_bcb_to_coverage_spans_with_counters.as_mut()
+        {
+            bcb_to_coverage_spans_with_counters
+                .entry(bcb)
+                .or_insert_with(|| Vec::new())
+                .push((coverage_span.clone(), counter_kind.clone()));
+        }
+    }
+
+    pub fn get_bcb_coverage_spans_with_counters(
+        &self,
+        bcb: BasicCoverageBlock,
+    ) -> Option<&Vec<(CoverageSpan, CoverageKind)>> {
+        if let Some(bcb_to_coverage_spans_with_counters) =
+            self.some_bcb_to_coverage_spans_with_counters.as_ref()
+        {
+            bcb_to_coverage_spans_with_counters.get(&bcb)
+        } else {
+            None
+        }
+    }
+
+    pub fn add_bcb_dependency_counter(
+        &mut self,
+        bcb: BasicCoverageBlock,
+        counter_kind: &CoverageKind,
+    ) {
+        if let Some(bcb_to_dependency_counters) = self.some_bcb_to_dependency_counters.as_mut() {
+            bcb_to_dependency_counters
+                .entry(bcb)
+                .or_insert_with(|| Vec::new())
+                .push(counter_kind.clone());
+        }
+    }
+
+    pub fn get_bcb_dependency_counters(
+        &self,
+        bcb: BasicCoverageBlock,
+    ) -> Option<&Vec<CoverageKind>> {
+        if let Some(bcb_to_dependency_counters) = self.some_bcb_to_dependency_counters.as_ref() {
+            bcb_to_dependency_counters.get(&bcb)
+        } else {
+            None
+        }
+    }
+
+    pub fn set_edge_counter(
+        &mut self,
+        from_bcb: BasicCoverageBlock,
+        to_bb: BasicBlock,
+        counter_kind: &CoverageKind,
+    ) {
+        if let Some(edge_to_counter) = self.some_edge_to_counter.as_mut() {
+            edge_to_counter.insert((from_bcb, to_bb), counter_kind.clone()).expect_none(
+                "invalid attempt to insert more than one edge counter for the same edge",
+            );
+        }
+    }
+
+    pub fn get_edge_counter(
+        &self,
+        from_bcb: BasicCoverageBlock,
+        to_bb: BasicBlock,
+    ) -> Option<&CoverageKind> {
+        if let Some(edge_to_counter) = self.some_edge_to_counter.as_ref() {
+            edge_to_counter.get(&(from_bcb, to_bb))
+        } else {
+            None
+        }
+    }
+}
+
+/// If enabled, this struct captures additional data used to track whether expressions were used,
+/// directly or indirectly, to compute the coverage counts for all `CoverageSpan`s, and any that are
+/// _not_ used are retained in the `unused_expressions` Vec, to be included in debug output (logs
+/// and/or a `CoverageGraph` graphviz output).
+pub(crate) struct UsedExpressions {
+    some_used_expression_operands:
+        Option<FxHashMap<ExpressionOperandId, Vec<InjectedExpressionId>>>,
+    some_unused_expressions:
+        Option<Vec<(CoverageKind, Option<BasicCoverageBlock>, BasicCoverageBlock)>>,
+}
+
+impl UsedExpressions {
+    pub fn new() -> Self {
+        Self { some_used_expression_operands: None, some_unused_expressions: None }
+    }
+
+    pub fn enable(&mut self) {
+        debug_assert!(!self.is_enabled());
+        self.some_used_expression_operands = Some(FxHashMap::default());
+        self.some_unused_expressions = Some(Vec::new());
+    }
+
+    pub fn is_enabled(&self) -> bool {
+        self.some_used_expression_operands.is_some()
+    }
+
+    pub fn add_expression_operands(&mut self, expression: &CoverageKind) {
+        if let Some(used_expression_operands) = self.some_used_expression_operands.as_mut() {
+            if let CoverageKind::Expression { id, lhs, rhs, .. } = *expression {
+                used_expression_operands.entry(lhs).or_insert_with(|| Vec::new()).push(id);
+                used_expression_operands.entry(rhs).or_insert_with(|| Vec::new()).push(id);
+            }
+        }
+    }
+
+    pub fn expression_is_used(&self, expression: &CoverageKind) -> bool {
+        if let Some(used_expression_operands) = self.some_used_expression_operands.as_ref() {
+            used_expression_operands.contains_key(&expression.as_operand_id())
+        } else {
+            false
+        }
+    }
+
+    pub fn add_unused_expression_if_not_found(
+        &mut self,
+        expression: &CoverageKind,
+        edge_from_bcb: Option<BasicCoverageBlock>,
+        target_bcb: BasicCoverageBlock,
+    ) {
+        if let Some(used_expression_operands) = self.some_used_expression_operands.as_ref() {
+            if !used_expression_operands.contains_key(&expression.as_operand_id()) {
+                self.some_unused_expressions.as_mut().unwrap().push((
+                    expression.clone(),
+                    edge_from_bcb,
+                    target_bcb,
+                ));
+            }
+        }
+    }
+
+    /// Return the list of unused counters (if any) as a tuple with the counter (`CoverageKind`),
+    /// optional `from_bcb` (if it was an edge counter), and `target_bcb`.
+    pub fn get_unused_expressions(
+        &self,
+    ) -> Vec<(CoverageKind, Option<BasicCoverageBlock>, BasicCoverageBlock)> {
+        if let Some(unused_expressions) = self.some_unused_expressions.as_ref() {
+            unused_expressions.clone()
+        } else {
+            Vec::new()
+        }
+    }
+
+    /// If enabled, validate that every BCB or edge counter not directly associated with a coverage
+    /// span is at least indirectly associated (it is a dependency of a BCB counter that _is_
+    /// associated with a coverage span).
+    pub fn validate(
+        &mut self,
+        bcb_counters_without_direct_coverage_spans: &Vec<(
+            Option<BasicCoverageBlock>,
+            BasicCoverageBlock,
+            CoverageKind,
+        )>,
+    ) {
+        if self.is_enabled() {
+            let mut not_validated = bcb_counters_without_direct_coverage_spans
+                .iter()
+                .map(|(_, _, counter_kind)| counter_kind)
+                .collect::<Vec<_>>();
+            let mut validating_count = 0;
+            while not_validated.len() != validating_count {
+                let to_validate = not_validated.split_off(0);
+                validating_count = to_validate.len();
+                for counter_kind in to_validate {
+                    if self.expression_is_used(counter_kind) {
+                        self.add_expression_operands(counter_kind);
+                    } else {
+                        not_validated.push(counter_kind);
+                    }
+                }
+            }
+        }
+    }
+
+    pub fn alert_on_unused_expressions(&self, debug_counters: &DebugCounters) {
+        if let Some(unused_expressions) = self.some_unused_expressions.as_ref() {
+            for (counter_kind, edge_from_bcb, target_bcb) in unused_expressions {
+                let unused_counter_message = if let Some(from_bcb) = edge_from_bcb.as_ref() {
+                    format!(
+                        "non-coverage edge counter found without a dependent expression, in \
+                        {:?}->{:?}; counter={}",
+                        from_bcb,
+                        target_bcb,
+                        debug_counters.format_counter(&counter_kind),
+                    )
+                } else {
+                    format!(
+                        "non-coverage counter found without a dependent expression, in {:?}; \
+                        counter={}",
+                        target_bcb,
+                        debug_counters.format_counter(&counter_kind),
+                    )
+                };
+
+                if debug_options().allow_unused_expressions {
+                    debug!("WARNING: {}", unused_counter_message);
+                } else {
+                    bug!("{}", unused_counter_message);
+                }
+            }
+        }
+    }
+}
+
+/// Generates the MIR pass `CoverageSpan`-specific spanview dump file.
+pub(crate) fn dump_coverage_spanview(
+    tcx: TyCtxt<'tcx>,
+    mir_body: &mir::Body<'tcx>,
+    basic_coverage_blocks: &CoverageGraph,
+    pass_name: &str,
+    coverage_spans: &Vec<CoverageSpan>,
+) {
+    let mir_source = mir_body.source;
+    let def_id = mir_source.def_id();
+
+    let span_viewables = span_viewables(tcx, mir_body, basic_coverage_blocks, &coverage_spans);
+    let mut file = pretty::create_dump_file(tcx, "html", None, pass_name, &0, mir_source)
+        .expect("Unexpected error creating MIR spanview HTML file");
+    let crate_name = tcx.crate_name(def_id.krate);
+    let item_name = tcx.def_path(def_id).to_filename_friendly_no_crate();
+    let title = format!("{}.{} - Coverage Spans", crate_name, item_name);
+    spanview::write_document(tcx, def_id, span_viewables, &title, &mut file)
+        .expect("Unexpected IO error dumping coverage spans as HTML");
+}
+
+/// Converts the computed `BasicCoverageBlockData`s into `SpanViewable`s.
+fn span_viewables(
+    tcx: TyCtxt<'tcx>,
+    mir_body: &mir::Body<'tcx>,
+    basic_coverage_blocks: &CoverageGraph,
+    coverage_spans: &Vec<CoverageSpan>,
+) -> Vec<SpanViewable> {
+    let mut span_viewables = Vec::new();
+    for coverage_span in coverage_spans {
+        let tooltip = coverage_span.format_coverage_statements(tcx, mir_body);
+        let CoverageSpan { span, bcb, .. } = coverage_span;
+        let bcb_data = &basic_coverage_blocks[*bcb];
+        let id = bcb_data.id();
+        let leader_bb = bcb_data.leader_bb();
+        span_viewables.push(SpanViewable { bb: leader_bb, span: *span, id, tooltip });
+    }
+    span_viewables
+}
+
+/// Generates the MIR pass coverage-specific graphviz dump file.
+pub(crate) fn dump_coverage_graphviz(
+    tcx: TyCtxt<'tcx>,
+    mir_body: &mir::Body<'tcx>,
+    pass_name: &str,
+    basic_coverage_blocks: &CoverageGraph,
+    debug_counters: &DebugCounters,
+    graphviz_data: &GraphvizData,
+    intermediate_expressions: &Vec<CoverageKind>,
+    debug_used_expressions: &UsedExpressions,
+) {
+    let mir_source = mir_body.source;
+    let def_id = mir_source.def_id();
+    let node_content = |bcb| {
+        bcb_to_string_sections(
+            tcx,
+            mir_body,
+            debug_counters,
+            &basic_coverage_blocks[bcb],
+            graphviz_data.get_bcb_coverage_spans_with_counters(bcb),
+            graphviz_data.get_bcb_dependency_counters(bcb),
+            // intermediate_expressions are injected into the mir::START_BLOCK, so
+            // include them in the first BCB.
+            if bcb.index() == 0 { Some(&intermediate_expressions) } else { None },
+        )
+    };
+    let edge_labels = |from_bcb| {
+        let from_bcb_data = &basic_coverage_blocks[from_bcb];
+        let from_terminator = from_bcb_data.terminator(mir_body);
+        let mut edge_labels = from_terminator.kind.fmt_successor_labels();
+        edge_labels.retain(|label| label != "unreachable");
+        let edge_counters = from_terminator
+            .successors()
+            .map(|&successor_bb| graphviz_data.get_edge_counter(from_bcb, successor_bb));
+        edge_labels
+            .iter()
+            .zip(edge_counters)
+            .map(|(label, some_counter)| {
+                if let Some(counter) = some_counter {
+                    format!("{}\n{}", label, debug_counters.format_counter(counter))
+                } else {
+                    label.to_string()
+                }
+            })
+            .collect::<Vec<_>>()
+    };
+    let graphviz_name = format!("Cov_{}_{}", def_id.krate.index(), def_id.index.index());
+    let mut graphviz_writer =
+        GraphvizWriter::new(basic_coverage_blocks, &graphviz_name, node_content, edge_labels);
+    let unused_expressions = debug_used_expressions.get_unused_expressions();
+    if unused_expressions.len() > 0 {
+        graphviz_writer.set_graph_label(&format!(
+            "Unused expressions:\n  {}",
+            unused_expressions
+                .as_slice()
+                .iter()
+                .map(|(counter_kind, edge_from_bcb, target_bcb)| {
+                    if let Some(from_bcb) = edge_from_bcb.as_ref() {
+                        format!(
+                            "{:?}->{:?}: {}",
+                            from_bcb,
+                            target_bcb,
+                            debug_counters.format_counter(&counter_kind),
+                        )
+                    } else {
+                        format!(
+                            "{:?}: {}",
+                            target_bcb,
+                            debug_counters.format_counter(&counter_kind),
+                        )
+                    }
+                })
+                .collect::<Vec<_>>()
+                .join("\n  ")
+        ));
+    }
+    let mut file = pretty::create_dump_file(tcx, "dot", None, pass_name, &0, mir_source)
+        .expect("Unexpected error creating BasicCoverageBlock graphviz DOT file");
+    graphviz_writer
+        .write_graphviz(tcx, &mut file)
+        .expect("Unexpected error writing BasicCoverageBlock graphviz DOT file");
+}
+
+fn bcb_to_string_sections(
+    tcx: TyCtxt<'tcx>,
+    mir_body: &mir::Body<'tcx>,
+    debug_counters: &DebugCounters,
+    bcb_data: &BasicCoverageBlockData,
+    some_coverage_spans_with_counters: Option<&Vec<(CoverageSpan, CoverageKind)>>,
+    some_dependency_counters: Option<&Vec<CoverageKind>>,
+    some_intermediate_expressions: Option<&Vec<CoverageKind>>,
+) -> Vec<String> {
+    let len = bcb_data.basic_blocks.len();
+    let mut sections = Vec::new();
+    if let Some(collect_intermediate_expressions) = some_intermediate_expressions {
+        sections.push(
+            collect_intermediate_expressions
+                .iter()
+                .map(|expression| {
+                    format!("Intermediate {}", debug_counters.format_counter(expression))
+                })
+                .collect::<Vec<_>>()
+                .join("\n"),
+        );
+    }
+    if let Some(coverage_spans_with_counters) = some_coverage_spans_with_counters {
+        sections.push(
+            coverage_spans_with_counters
+                .iter()
+                .map(|(covspan, counter)| {
+                    format!(
+                        "{} at {}",
+                        debug_counters.format_counter(counter),
+                        covspan.format(tcx, mir_body)
+                    )
+                })
+                .collect::<Vec<_>>()
+                .join("\n"),
+        );
+    }
+    if let Some(dependency_counters) = some_dependency_counters {
+        sections.push(format!(
+            "Non-coverage counters:\n  {}",
+            dependency_counters
+                .iter()
+                .map(|counter| debug_counters.format_counter(counter))
+                .collect::<Vec<_>>()
+                .join("  \n"),
+        ));
+    }
+    if let Some(counter_kind) = &bcb_data.counter_kind {
+        sections.push(format!("{:?}", counter_kind));
+    }
+    let non_term_blocks = bcb_data.basic_blocks[0..len - 1]
+        .iter()
+        .map(|&bb| format!("{:?}: {}", bb, term_type(&mir_body[bb].terminator().kind)))
+        .collect::<Vec<_>>();
+    if non_term_blocks.len() > 0 {
+        sections.push(non_term_blocks.join("\n"));
+    }
+    sections.push(format!(
+        "{:?}: {}",
+        bcb_data.basic_blocks.last().unwrap(),
+        term_type(&bcb_data.terminator(mir_body).kind)
+    ));
+    sections
+}
+
+/// Returns a simple string representation of a `TerminatorKind` variant, indenpendent of any
+/// values it might hold.
+pub(crate) fn term_type(kind: &TerminatorKind<'tcx>) -> &'static str {
+    match kind {
+        TerminatorKind::Goto { .. } => "Goto",
+        TerminatorKind::SwitchInt { .. } => "SwitchInt",
+        TerminatorKind::Resume => "Resume",
+        TerminatorKind::Abort => "Abort",
+        TerminatorKind::Return => "Return",
+        TerminatorKind::Unreachable => "Unreachable",
+        TerminatorKind::Drop { .. } => "Drop",
+        TerminatorKind::DropAndReplace { .. } => "DropAndReplace",
+        TerminatorKind::Call { .. } => "Call",
+        TerminatorKind::Assert { .. } => "Assert",
+        TerminatorKind::Yield { .. } => "Yield",
+        TerminatorKind::GeneratorDrop => "GeneratorDrop",
+        TerminatorKind::FalseEdge { .. } => "FalseEdge",
+        TerminatorKind::FalseUnwind { .. } => "FalseUnwind",
+        TerminatorKind::InlineAsm { .. } => "InlineAsm",
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/coverage/graph.rs b/compiler/rustc_mir/src/transform/coverage/graph.rs
new file mode 100644
index 0000000..c2ed2cb
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/coverage/graph.rs
@@ -0,0 +1,759 @@
+use super::Error;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::graph::dominators::{self, Dominators};
+use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode};
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind};
+
+use std::ops::{Index, IndexMut};
+
+const ID_SEPARATOR: &str = ",";
+
+/// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s
+/// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s, plus a
+/// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional
+/// set of additional counters--if needed--to count incoming edges, if there are more than one.
+/// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.)
+pub(crate) struct CoverageGraph {
+    bcbs: IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
+    bb_to_bcb: IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
+    pub successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
+    pub predecessors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
+    dominators: Option<Dominators<BasicCoverageBlock>>,
+}
+
+impl CoverageGraph {
+    pub fn from_mir(mir_body: &mir::Body<'tcx>) -> Self {
+        let (bcbs, bb_to_bcb) = Self::compute_basic_coverage_blocks(mir_body);
+
+        // Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock
+        // equivalents. Note that since the BasicCoverageBlock graph has been fully simplified, the
+        // each predecessor of a BCB leader_bb should be in a unique BCB, and each successor of a
+        // BCB last_bb should bin in its own unique BCB. Therefore, collecting the BCBs using
+        // `bb_to_bcb` should work without requiring a deduplication step.
+
+        let successors = IndexVec::from_fn_n(
+            |bcb| {
+                let bcb_data = &bcbs[bcb];
+                let bcb_successors =
+                    bcb_filtered_successors(&mir_body, &bcb_data.terminator(mir_body).kind)
+                        .filter_map(|&successor_bb| bb_to_bcb[successor_bb])
+                        .collect::<Vec<_>>();
+                debug_assert!({
+                    let mut sorted = bcb_successors.clone();
+                    sorted.sort_unstable();
+                    let initial_len = sorted.len();
+                    sorted.dedup();
+                    sorted.len() == initial_len
+                });
+                bcb_successors
+            },
+            bcbs.len(),
+        );
+
+        let mut predecessors = IndexVec::from_elem_n(Vec::new(), bcbs.len());
+        for (bcb, bcb_successors) in successors.iter_enumerated() {
+            for &successor in bcb_successors {
+                predecessors[successor].push(bcb);
+            }
+        }
+
+        let mut basic_coverage_blocks =
+            Self { bcbs, bb_to_bcb, successors, predecessors, dominators: None };
+        let dominators = dominators::dominators(&basic_coverage_blocks);
+        basic_coverage_blocks.dominators = Some(dominators);
+        basic_coverage_blocks
+    }
+
+    fn compute_basic_coverage_blocks(
+        mir_body: &mir::Body<'tcx>,
+    ) -> (
+        IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
+        IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
+    ) {
+        let num_basic_blocks = mir_body.num_nodes();
+        let mut bcbs = IndexVec::with_capacity(num_basic_blocks);
+        let mut bb_to_bcb = IndexVec::from_elem_n(None, num_basic_blocks);
+
+        // Walk the MIR CFG using a Preorder traversal, which starts from `START_BLOCK` and follows
+        // each block terminator's `successors()`. Coverage spans must map to actual source code,
+        // so compiler generated blocks and paths can be ignored. To that end, the CFG traversal
+        // intentionally omits unwind paths.
+        // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
+        // `catch_unwind()` handlers.
+        let mir_cfg_without_unwind = ShortCircuitPreorder::new(&mir_body, bcb_filtered_successors);
+
+        let mut basic_blocks = Vec::new();
+        for (bb, data) in mir_cfg_without_unwind {
+            if let Some(last) = basic_blocks.last() {
+                let predecessors = &mir_body.predecessors()[bb];
+                if predecessors.len() > 1 || !predecessors.contains(last) {
+                    // The `bb` has more than one _incoming_ edge, and should start its own
+                    // `BasicCoverageBlockData`. (Note, the `basic_blocks` vector does not yet
+                    // include `bb`; it contains a sequence of one or more sequential basic_blocks
+                    // with no intermediate branches in or out. Save these as a new
+                    // `BasicCoverageBlockData` before starting the new one.)
+                    Self::add_basic_coverage_block(
+                        &mut bcbs,
+                        &mut bb_to_bcb,
+                        basic_blocks.split_off(0),
+                    );
+                    debug!(
+                        "  because {}",
+                        if predecessors.len() > 1 {
+                            "predecessors.len() > 1".to_owned()
+                        } else {
+                            format!("bb {} is not in precessors: {:?}", bb.index(), predecessors)
+                        }
+                    );
+                }
+            }
+            basic_blocks.push(bb);
+
+            let term = data.terminator();
+
+            match term.kind {
+                TerminatorKind::Return { .. }
+                // FIXME(richkadel): Add test(s) for `Abort` coverage.
+                | TerminatorKind::Abort
+                // FIXME(richkadel): Add test(s) for `Assert` coverage.
+                // Should `Assert` be handled like `FalseUnwind` instead? Since we filter out unwind
+                // branches when creating the BCB CFG, aren't `Assert`s (without unwinds) just like
+                // `FalseUnwinds` (which are kind of like `Goto`s)?
+                | TerminatorKind::Assert { .. }
+                // FIXME(richkadel): Add test(s) for `Yield` coverage, and confirm coverage is
+                // sensible for code using the `yield` keyword.
+                | TerminatorKind::Yield { .. }
+                // FIXME(richkadel): Also add coverage tests using async/await, and threading.
+
+                | TerminatorKind::SwitchInt { .. } => {
+                    // The `bb` has more than one _outgoing_ edge, or exits the function. Save the
+                    // current sequence of `basic_blocks` gathered to this point, as a new
+                    // `BasicCoverageBlockData`.
+                    Self::add_basic_coverage_block(
+                        &mut bcbs,
+                        &mut bb_to_bcb,
+                        basic_blocks.split_off(0),
+                    );
+                    debug!("  because term.kind = {:?}", term.kind);
+                    // Note that this condition is based on `TerminatorKind`, even though it
+                    // theoretically boils down to `successors().len() != 1`; that is, either zero
+                    // (e.g., `Return`, `Abort`) or multiple successors (e.g., `SwitchInt`), but
+                    // since the BCB CFG ignores things like unwind branches (which exist in the
+                    // `Terminator`s `successors()` list) checking the number of successors won't
+                    // work.
+                }
+                TerminatorKind::Goto { .. }
+                | TerminatorKind::Resume
+                | TerminatorKind::Unreachable
+                | TerminatorKind::Drop { .. }
+                | TerminatorKind::DropAndReplace { .. }
+                | TerminatorKind::Call { .. }
+                | TerminatorKind::GeneratorDrop
+                | TerminatorKind::FalseEdge { .. }
+                | TerminatorKind::FalseUnwind { .. }
+                | TerminatorKind::InlineAsm { .. } => {}
+            }
+        }
+
+        if !basic_blocks.is_empty() {
+            // process any remaining basic_blocks into a final `BasicCoverageBlockData`
+            Self::add_basic_coverage_block(&mut bcbs, &mut bb_to_bcb, basic_blocks.split_off(0));
+            debug!("  because the end of the MIR CFG was reached while traversing");
+        }
+
+        (bcbs, bb_to_bcb)
+    }
+
+    fn add_basic_coverage_block(
+        bcbs: &mut IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
+        bb_to_bcb: &mut IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
+        basic_blocks: Vec<BasicBlock>,
+    ) {
+        let bcb = BasicCoverageBlock::from_usize(bcbs.len());
+        for &bb in basic_blocks.iter() {
+            bb_to_bcb[bb] = Some(bcb);
+        }
+        let bcb_data = BasicCoverageBlockData::from(basic_blocks);
+        debug!("adding bcb{}: {:?}", bcb.index(), bcb_data);
+        bcbs.push(bcb_data);
+    }
+
+    #[inline(always)]
+    pub fn iter_enumerated(
+        &self,
+    ) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> {
+        self.bcbs.iter_enumerated()
+    }
+
+    #[inline(always)]
+    pub fn iter_enumerated_mut(
+        &mut self,
+    ) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> {
+        self.bcbs.iter_enumerated_mut()
+    }
+
+    #[inline(always)]
+    pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> {
+        if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None }
+    }
+
+    #[inline(always)]
+    pub fn is_dominated_by(&self, node: BasicCoverageBlock, dom: BasicCoverageBlock) -> bool {
+        self.dominators.as_ref().unwrap().is_dominated_by(node, dom)
+    }
+
+    #[inline(always)]
+    pub fn dominators(&self) -> &Dominators<BasicCoverageBlock> {
+        self.dominators.as_ref().unwrap()
+    }
+}
+
+impl Index<BasicCoverageBlock> for CoverageGraph {
+    type Output = BasicCoverageBlockData;
+
+    #[inline]
+    fn index(&self, index: BasicCoverageBlock) -> &BasicCoverageBlockData {
+        &self.bcbs[index]
+    }
+}
+
+impl IndexMut<BasicCoverageBlock> for CoverageGraph {
+    #[inline]
+    fn index_mut(&mut self, index: BasicCoverageBlock) -> &mut BasicCoverageBlockData {
+        &mut self.bcbs[index]
+    }
+}
+
+impl graph::DirectedGraph for CoverageGraph {
+    type Node = BasicCoverageBlock;
+}
+
+impl graph::WithNumNodes for CoverageGraph {
+    #[inline]
+    fn num_nodes(&self) -> usize {
+        self.bcbs.len()
+    }
+}
+
+impl graph::WithStartNode for CoverageGraph {
+    #[inline]
+    fn start_node(&self) -> Self::Node {
+        self.bcb_from_bb(mir::START_BLOCK)
+            .expect("mir::START_BLOCK should be in a BasicCoverageBlock")
+    }
+}
+
+type BcbSuccessors<'graph> = std::slice::Iter<'graph, BasicCoverageBlock>;
+
+impl<'graph> graph::GraphSuccessors<'graph> for CoverageGraph {
+    type Item = BasicCoverageBlock;
+    type Iter = std::iter::Cloned<BcbSuccessors<'graph>>;
+}
+
+impl graph::WithSuccessors for CoverageGraph {
+    #[inline]
+    fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
+        self.successors[node].iter().cloned()
+    }
+}
+
+impl graph::GraphPredecessors<'graph> for CoverageGraph {
+    type Item = BasicCoverageBlock;
+    type Iter = std::vec::IntoIter<BasicCoverageBlock>;
+}
+
+impl graph::WithPredecessors for CoverageGraph {
+    #[inline]
+    fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
+        self.predecessors[node].clone().into_iter()
+    }
+}
+
+rustc_index::newtype_index! {
+    /// A node in the [control-flow graph][CFG] of CoverageGraph.
+    pub(crate) struct BasicCoverageBlock {
+        DEBUG_FORMAT = "bcb{}",
+    }
+}
+
+/// A BasicCoverageBlockData (BCB) represents the maximal-length sequence of MIR BasicBlocks without
+/// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without
+/// altering the original MIR CFG.
+///
+/// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not
+/// necessary). The BCB-based CFG is a more aggressive simplification. For example:
+///
+///   * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code,
+///     that is injected by the Rust compiler but has no physical source code to count. This also
+///     means a BasicBlock with a `Call` terminator can be merged into its primary successor target
+///     block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage
+///     of `#[should_panic]` tests and `catch_unwind()` handlers")
+///   * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are
+///     not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as
+///     a `Goto`, and merged with its successor into the same BCB.
+///
+/// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`.
+/// In some cases, a BCB's execution count can be computed by `Expression`. Additional
+/// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO`
+/// to the BCB's primary counter or expression).
+///
+/// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based
+/// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow)
+/// significance.
+#[derive(Debug, Clone)]
+pub(crate) struct BasicCoverageBlockData {
+    pub basic_blocks: Vec<BasicBlock>,
+    pub counter_kind: Option<CoverageKind>,
+    edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>,
+}
+
+impl BasicCoverageBlockData {
+    pub fn from(basic_blocks: Vec<BasicBlock>) -> Self {
+        assert!(basic_blocks.len() > 0);
+        Self { basic_blocks, counter_kind: None, edge_from_bcbs: None }
+    }
+
+    #[inline(always)]
+    pub fn leader_bb(&self) -> BasicBlock {
+        self.basic_blocks[0]
+    }
+
+    #[inline(always)]
+    pub fn last_bb(&self) -> BasicBlock {
+        *self.basic_blocks.last().unwrap()
+    }
+
+    #[inline(always)]
+    pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> {
+        &mir_body[self.last_bb()].terminator()
+    }
+
+    pub fn set_counter(
+        &mut self,
+        counter_kind: CoverageKind,
+    ) -> Result<ExpressionOperandId, Error> {
+        debug_assert!(
+            // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
+            // have an expression (to be injected into an existing `BasicBlock` represented by this
+            // `BasicCoverageBlock`).
+            self.edge_from_bcbs.is_none() || counter_kind.is_expression(),
+            "attempt to add a `Counter` to a BCB target with existing incoming edge counters"
+        );
+        let operand = counter_kind.as_operand_id();
+        if let Some(replaced) = self.counter_kind.replace(counter_kind) {
+            Error::from_string(format!(
+                "attempt to set a BasicCoverageBlock coverage counter more than once; \
+                {:?} already had counter {:?}",
+                self, replaced,
+            ))
+        } else {
+            Ok(operand)
+        }
+    }
+
+    #[inline(always)]
+    pub fn counter(&self) -> Option<&CoverageKind> {
+        self.counter_kind.as_ref()
+    }
+
+    #[inline(always)]
+    pub fn take_counter(&mut self) -> Option<CoverageKind> {
+        self.counter_kind.take()
+    }
+
+    pub fn set_edge_counter_from(
+        &mut self,
+        from_bcb: BasicCoverageBlock,
+        counter_kind: CoverageKind,
+    ) -> Result<ExpressionOperandId, Error> {
+        if level_enabled!(tracing::Level::DEBUG) {
+            // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
+            // have an expression (to be injected into an existing `BasicBlock` represented by this
+            // `BasicCoverageBlock`).
+            if !self.counter_kind.as_ref().map_or(true, |c| c.is_expression()) {
+                return Error::from_string(format!(
+                    "attempt to add an incoming edge counter from {:?} when the target BCB already \
+                    has a `Counter`",
+                    from_bcb
+                ));
+            }
+        }
+        let operand = counter_kind.as_operand_id();
+        if let Some(replaced) = self
+            .edge_from_bcbs
+            .get_or_insert_with(|| FxHashMap::default())
+            .insert(from_bcb, counter_kind)
+        {
+            Error::from_string(format!(
+                "attempt to set an edge counter more than once; from_bcb: \
+                {:?} already had counter {:?}",
+                from_bcb, replaced,
+            ))
+        } else {
+            Ok(operand)
+        }
+    }
+
+    #[inline]
+    pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> {
+        if let Some(edge_from_bcbs) = &self.edge_from_bcbs {
+            edge_from_bcbs.get(&from_bcb)
+        } else {
+            None
+        }
+    }
+
+    #[inline]
+    pub fn take_edge_counters(
+        &mut self,
+    ) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> {
+        self.edge_from_bcbs.take().map_or(None, |m| Some(m.into_iter()))
+    }
+
+    pub fn id(&self) -> String {
+        format!(
+            "@{}",
+            self.basic_blocks
+                .iter()
+                .map(|bb| bb.index().to_string())
+                .collect::<Vec<_>>()
+                .join(ID_SEPARATOR)
+        )
+    }
+}
+
+/// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
+/// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_
+/// the specific branching BCB, representing the edge between the two. The latter case
+/// distinguishes this incoming edge from other incoming edges to the same `target_bcb`.
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub(crate) struct BcbBranch {
+    pub edge_from_bcb: Option<BasicCoverageBlock>,
+    pub target_bcb: BasicCoverageBlock,
+}
+
+impl BcbBranch {
+    pub fn from_to(
+        from_bcb: BasicCoverageBlock,
+        to_bcb: BasicCoverageBlock,
+        basic_coverage_blocks: &CoverageGraph,
+    ) -> Self {
+        let edge_from_bcb = if basic_coverage_blocks.predecessors[to_bcb].len() > 1 {
+            Some(from_bcb)
+        } else {
+            None
+        };
+        Self { edge_from_bcb, target_bcb: to_bcb }
+    }
+
+    pub fn counter<'a>(
+        &self,
+        basic_coverage_blocks: &'a CoverageGraph,
+    ) -> Option<&'a CoverageKind> {
+        if let Some(from_bcb) = self.edge_from_bcb {
+            basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb)
+        } else {
+            basic_coverage_blocks[self.target_bcb].counter()
+        }
+    }
+
+    pub fn is_only_path_to_target(&self) -> bool {
+        self.edge_from_bcb.is_none()
+    }
+}
+
+impl std::fmt::Debug for BcbBranch {
+    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        if let Some(from_bcb) = self.edge_from_bcb {
+            write!(fmt, "{:?}->{:?}", from_bcb, self.target_bcb)
+        } else {
+            write!(fmt, "{:?}", self.target_bcb)
+        }
+    }
+}
+
+// Returns the `Terminator`s non-unwind successors.
+// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
+// `catch_unwind()` handlers.
+fn bcb_filtered_successors<'a, 'tcx>(
+    body: &'tcx &'a mir::Body<'tcx>,
+    term_kind: &'tcx TerminatorKind<'tcx>,
+) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a> {
+    let mut successors = term_kind.successors();
+    box match &term_kind {
+        // SwitchInt successors are never unwind, and all of them should be traversed.
+        TerminatorKind::SwitchInt { .. } => successors,
+        // For all other kinds, return only the first successor, if any, and ignore unwinds.
+        // NOTE: `chain(&[])` is required to coerce the `option::iter` (from
+        // `next().into_iter()`) into the `mir::Successors` aliased type.
+        _ => successors.next().into_iter().chain(&[]),
+    }
+    .filter(move |&&successor| body[successor].terminator().kind != TerminatorKind::Unreachable)
+}
+
+/// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
+/// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that
+/// ensures a loop is completely traversed before processing Blocks after the end of the loop.
+// FIXME(richkadel): Add unit tests for TraversalContext.
+#[derive(Debug)]
+pub(crate) struct TraversalContext {
+    /// From one or more backedges returning to a loop header.
+    pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>,
+
+    /// worklist, to be traversed, of CoverageGraph in the loop with the given loop
+    /// backedges, such that the loop is the inner inner-most loop containing these
+    /// CoverageGraph
+    pub worklist: Vec<BasicCoverageBlock>,
+}
+
+pub(crate) struct TraverseCoverageGraphWithLoops {
+    pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
+    pub context_stack: Vec<TraversalContext>,
+    visited: BitSet<BasicCoverageBlock>,
+}
+
+impl TraverseCoverageGraphWithLoops {
+    pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self {
+        let start_bcb = basic_coverage_blocks.start_node();
+        let backedges = find_loop_backedges(basic_coverage_blocks);
+        let mut context_stack = Vec::new();
+        context_stack.push(TraversalContext { loop_backedges: None, worklist: vec![start_bcb] });
+        // `context_stack` starts with a `TraversalContext` for the main function context (beginning
+        // with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top
+        // of the stack as loops are entered, and popped off of the stack when a loop's worklist is
+        // exhausted.
+        let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes());
+        Self { backedges, context_stack, visited }
+    }
+
+    pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> {
+        debug!(
+            "TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
+            self.context_stack.iter().rev().collect::<Vec<_>>()
+        );
+        while let Some(next_bcb) = {
+            // Strip contexts with empty worklists from the top of the stack
+            while self.context_stack.last().map_or(false, |context| context.worklist.is_empty()) {
+                self.context_stack.pop();
+            }
+            // Pop the next bcb off of the current context_stack. If none, all BCBs were visited.
+            self.context_stack.last_mut().map_or(None, |context| context.worklist.pop())
+        } {
+            if !self.visited.insert(next_bcb) {
+                debug!("Already visited: {:?}", next_bcb);
+                continue;
+            }
+            debug!("Visiting {:?}", next_bcb);
+            if self.backedges[next_bcb].len() > 0 {
+                debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb);
+                self.context_stack.push(TraversalContext {
+                    loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)),
+                    worklist: Vec::new(),
+                });
+            }
+            self.extend_worklist(basic_coverage_blocks, next_bcb);
+            return Some(next_bcb);
+        }
+        None
+    }
+
+    pub fn extend_worklist(
+        &mut self,
+        basic_coverage_blocks: &CoverageGraph,
+        bcb: BasicCoverageBlock,
+    ) {
+        let successors = &basic_coverage_blocks.successors[bcb];
+        debug!("{:?} has {} successors:", bcb, successors.len());
+        for &successor in successors {
+            if successor == bcb {
+                debug!(
+                    "{:?} has itself as its own successor. (Note, the compiled code will \
+                    generate an infinite loop.)",
+                    bcb
+                );
+                // Don't re-add this successor to the worklist. We are already processing it.
+                break;
+            }
+            for context in self.context_stack.iter_mut().rev() {
+                // Add successors of the current BCB to the appropriate context. Successors that
+                // stay within a loop are added to the BCBs context worklist. Successors that
+                // exit the loop (they are not dominated by the loop header) must be reachable
+                // from other BCBs outside the loop, and they will be added to a different
+                // worklist.
+                //
+                // Branching blocks (with more than one successor) must be processed before
+                // blocks with only one successor, to prevent unnecessarily complicating
+                // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the
+                // branching block would have given an `Expression` (or vice versa).
+                let (some_successor_to_add, some_loop_header) =
+                    if let Some((_, loop_header)) = context.loop_backedges {
+                        if basic_coverage_blocks.is_dominated_by(successor, loop_header) {
+                            (Some(successor), Some(loop_header))
+                        } else {
+                            (None, None)
+                        }
+                    } else {
+                        (Some(successor), None)
+                    };
+                if let Some(successor_to_add) = some_successor_to_add {
+                    if basic_coverage_blocks.successors[successor_to_add].len() > 1 {
+                        debug!(
+                            "{:?} successor is branching. Prioritize it at the beginning of \
+                            the {}",
+                            successor_to_add,
+                            if let Some(loop_header) = some_loop_header {
+                                format!("worklist for the loop headed by {:?}", loop_header)
+                            } else {
+                                String::from("non-loop worklist")
+                            },
+                        );
+                        context.worklist.insert(0, successor_to_add);
+                    } else {
+                        debug!(
+                            "{:?} successor is non-branching. Defer it to the end of the {}",
+                            successor_to_add,
+                            if let Some(loop_header) = some_loop_header {
+                                format!("worklist for the loop headed by {:?}", loop_header)
+                            } else {
+                                String::from("non-loop worklist")
+                            },
+                        );
+                        context.worklist.push(successor_to_add);
+                    }
+                    break;
+                }
+            }
+        }
+    }
+
+    pub fn is_complete(&self) -> bool {
+        self.visited.count() == self.visited.domain_size()
+    }
+
+    pub fn unvisited(&self) -> Vec<BasicCoverageBlock> {
+        let mut unvisited_set: BitSet<BasicCoverageBlock> =
+            BitSet::new_filled(self.visited.domain_size());
+        unvisited_set.subtract(&self.visited);
+        unvisited_set.iter().collect::<Vec<_>>()
+    }
+}
+
+fn find_loop_backedges(
+    basic_coverage_blocks: &CoverageGraph,
+) -> IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>> {
+    let num_bcbs = basic_coverage_blocks.num_nodes();
+    let mut backedges = IndexVec::from_elem_n(Vec::<BasicCoverageBlock>::new(), num_bcbs);
+
+    // Identify loops by their backedges.
+    //
+    // The computational complexity is bounded by: n(s) x d where `n` is the number of
+    // `BasicCoverageBlock` nodes (the simplified/reduced representation of the CFG derived from the
+    // MIR); `s` is the average number of successors per node (which is most likely less than 2, and
+    // independent of the size of the function, so it can be treated as a constant);
+    // and `d` is the average number of dominators per node.
+    //
+    // The average number of dominators depends on the size and complexity of the function, and
+    // nodes near the start of the function's control flow graph typically have less dominators
+    // than nodes near the end of the CFG. Without doing a detailed mathematical analysis, I
+    // think the resulting complexity has the characteristics of O(n log n).
+    //
+    // The overall complexity appears to be comparable to many other MIR transform algorithms, and I
+    // don't expect that this function is creating a performance hot spot, but if this becomes an
+    // issue, there may be ways to optimize the `is_dominated_by` algorithm (as indicated by an
+    // existing `FIXME` comment in that code), or possibly ways to optimize it's usage here, perhaps
+    // by keeping track of results for visited `BasicCoverageBlock`s if they can be used to short
+    // circuit downstream `is_dominated_by` checks.
+    //
+    // For now, that kind of optimization seems unnecessarily complicated.
+    for (bcb, _) in basic_coverage_blocks.iter_enumerated() {
+        for &successor in &basic_coverage_blocks.successors[bcb] {
+            if basic_coverage_blocks.is_dominated_by(bcb, successor) {
+                let loop_header = successor;
+                let backedge_from_bcb = bcb;
+                debug!(
+                    "Found BCB backedge: {:?} -> loop_header: {:?}",
+                    backedge_from_bcb, loop_header
+                );
+                backedges[loop_header].push(backedge_from_bcb);
+            }
+        }
+    }
+    backedges
+}
+
+pub struct ShortCircuitPreorder<
+    'a,
+    'tcx,
+    F: Fn(
+        &'tcx &'a mir::Body<'tcx>,
+        &'tcx TerminatorKind<'tcx>,
+    ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a>,
+> {
+    body: &'tcx &'a mir::Body<'tcx>,
+    visited: BitSet<BasicBlock>,
+    worklist: Vec<BasicBlock>,
+    filtered_successors: F,
+}
+
+impl<
+    'a,
+    'tcx,
+    F: Fn(
+        &'tcx &'a mir::Body<'tcx>,
+        &'tcx TerminatorKind<'tcx>,
+    ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a>,
+> ShortCircuitPreorder<'a, 'tcx, F>
+{
+    pub fn new(
+        body: &'tcx &'a mir::Body<'tcx>,
+        filtered_successors: F,
+    ) -> ShortCircuitPreorder<'a, 'tcx, F> {
+        let worklist = vec![mir::START_BLOCK];
+
+        ShortCircuitPreorder {
+            body,
+            visited: BitSet::new_empty(body.basic_blocks().len()),
+            worklist,
+            filtered_successors,
+        }
+    }
+}
+
+impl<
+    'a: 'tcx,
+    'tcx,
+    F: Fn(
+        &'tcx &'a mir::Body<'tcx>,
+        &'tcx TerminatorKind<'tcx>,
+    ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a>,
+> Iterator for ShortCircuitPreorder<'a, 'tcx, F>
+{
+    type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+    fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+        while let Some(idx) = self.worklist.pop() {
+            if !self.visited.insert(idx) {
+                continue;
+            }
+
+            let data = &self.body[idx];
+
+            if let Some(ref term) = data.terminator {
+                self.worklist.extend((self.filtered_successors)(&self.body, &term.kind));
+            }
+
+            return Some((idx, data));
+        }
+
+        None
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let size = self.body.basic_blocks().len() - self.visited.count();
+        (size, Some(size))
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/coverage/mod.rs b/compiler/rustc_mir/src/transform/coverage/mod.rs
new file mode 100644
index 0000000..c553492
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/coverage/mod.rs
@@ -0,0 +1,539 @@
+pub mod query;
+
+mod counters;
+mod debug;
+mod graph;
+mod spans;
+
+use counters::CoverageCounters;
+use graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
+use spans::{CoverageSpan, CoverageSpans};
+
+use crate::transform::MirPass;
+use crate::util::pretty;
+
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::graph::WithNumNodes;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::Lrc;
+use rustc_index::vec::IndexVec;
+use rustc_middle::hir;
+use rustc_middle::hir::map::blocks::FnLikeNode;
+use rustc_middle::ich::StableHashingContext;
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::{
+    self, BasicBlock, BasicBlockData, Coverage, SourceInfo, Statement, StatementKind, Terminator,
+    TerminatorKind,
+};
+use rustc_middle::ty::TyCtxt;
+use rustc_span::def_id::DefId;
+use rustc_span::{CharPos, Pos, SourceFile, Span, Symbol};
+
+/// A simple error message wrapper for `coverage::Error`s.
+#[derive(Debug)]
+pub(crate) struct Error {
+    message: String,
+}
+
+impl Error {
+    pub fn from_string<T>(message: String) -> Result<T, Error> {
+        Err(Self { message })
+    }
+}
+
+/// Inserts `StatementKind::Coverage` statements that either instrument the binary with injected
+/// counters, via intrinsic `llvm.instrprof.increment`, and/or inject metadata used during codegen
+/// to construct the coverage map.
+pub struct InstrumentCoverage;
+
+impl<'tcx> MirPass<'tcx> for InstrumentCoverage {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, mir_body: &mut mir::Body<'tcx>) {
+        let mir_source = mir_body.source;
+
+        // If the InstrumentCoverage pass is called on promoted MIRs, skip them.
+        // See: https://github.com/rust-lang/rust/pull/73011#discussion_r438317601
+        if mir_source.promoted.is_some() {
+            trace!(
+                "InstrumentCoverage skipped for {:?} (already promoted for Miri evaluation)",
+                mir_source.def_id()
+            );
+            return;
+        }
+
+        let hir_id = tcx.hir().local_def_id_to_hir_id(mir_source.def_id().expect_local());
+        let is_fn_like = FnLikeNode::from_node(tcx.hir().get(hir_id)).is_some();
+
+        // Only instrument functions, methods, and closures (not constants since they are evaluated
+        // at compile time by Miri).
+        // FIXME(#73156): Handle source code coverage in const eval, but note, if and when const
+        // expressions get coverage spans, we will probably have to "carve out" space for const
+        // expressions from coverage spans in enclosing MIR's, like we do for closures. (That might
+        // be tricky if const expressions have no corresponding statements in the enclosing MIR.
+        // Closures are carved out by their initial `Assign` statement.)
+        if !is_fn_like {
+            trace!("InstrumentCoverage skipped for {:?} (not an FnLikeNode)", mir_source.def_id());
+            return;
+        }
+
+        trace!("InstrumentCoverage starting for {:?}", mir_source.def_id());
+        Instrumentor::new(&self.name(), tcx, mir_body).inject_counters();
+        trace!("InstrumentCoverage starting for {:?}", mir_source.def_id());
+    }
+}
+
+struct Instrumentor<'a, 'tcx> {
+    pass_name: &'a str,
+    tcx: TyCtxt<'tcx>,
+    mir_body: &'a mut mir::Body<'tcx>,
+    body_span: Span,
+    basic_coverage_blocks: CoverageGraph,
+    coverage_counters: CoverageCounters,
+}
+
+impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
+    fn new(pass_name: &'a str, tcx: TyCtxt<'tcx>, mir_body: &'a mut mir::Body<'tcx>) -> Self {
+        let hir_body = hir_body(tcx, mir_body.source.def_id());
+        let body_span = hir_body.value.span;
+        let function_source_hash = hash_mir_source(tcx, hir_body);
+        let basic_coverage_blocks = CoverageGraph::from_mir(mir_body);
+        Self {
+            pass_name,
+            tcx,
+            mir_body,
+            body_span,
+            basic_coverage_blocks,
+            coverage_counters: CoverageCounters::new(function_source_hash),
+        }
+    }
+
+    fn inject_counters(&'a mut self) {
+        let tcx = self.tcx;
+        let source_map = tcx.sess.source_map();
+        let mir_source = self.mir_body.source;
+        let def_id = mir_source.def_id();
+        let body_span = self.body_span;
+
+        debug!("instrumenting {:?}, span: {}", def_id, source_map.span_to_string(body_span));
+
+        let mut graphviz_data = debug::GraphvizData::new();
+        let mut debug_used_expressions = debug::UsedExpressions::new();
+
+        let dump_mir = pretty::dump_enabled(tcx, self.pass_name, def_id);
+        let dump_graphviz = dump_mir && tcx.sess.opts.debugging_opts.dump_mir_graphviz;
+        let dump_spanview = dump_mir && tcx.sess.opts.debugging_opts.dump_mir_spanview.is_some();
+
+        if dump_graphviz {
+            graphviz_data.enable();
+            self.coverage_counters.enable_debug();
+        }
+
+        if dump_graphviz || level_enabled!(tracing::Level::DEBUG) {
+            debug_used_expressions.enable();
+        }
+
+        ////////////////////////////////////////////////////
+        // Compute `CoverageSpan`s from the `CoverageGraph`.
+        let coverage_spans = CoverageSpans::generate_coverage_spans(
+            &self.mir_body,
+            body_span,
+            &self.basic_coverage_blocks,
+        );
+
+        if dump_spanview {
+            debug::dump_coverage_spanview(
+                tcx,
+                self.mir_body,
+                &self.basic_coverage_blocks,
+                self.pass_name,
+                &coverage_spans,
+            );
+        }
+
+        ////////////////////////////////////////////////////
+        // Create an optimized mix of `Counter`s and `Expression`s for the `CoverageGraph`. Ensure
+        // every `CoverageSpan` has a `Counter` or `Expression` assigned to its `BasicCoverageBlock`
+        // and all `Expression` dependencies (operands) are also generated, for any other
+        // `BasicCoverageBlock`s not already associated with a `CoverageSpan`.
+        //
+        // Intermediate expressions (used to compute other `Expression` values), which have no
+        // direct associate to any `BasicCoverageBlock`, are returned in the method `Result`.
+        let intermediate_expressions_or_error = self
+            .coverage_counters
+            .make_bcb_counters(&mut self.basic_coverage_blocks, &coverage_spans);
+
+        let (result, intermediate_expressions) = match intermediate_expressions_or_error {
+            Ok(intermediate_expressions) => {
+                // If debugging, add any intermediate expressions (which are not associated with any
+                // BCB) to the `debug_used_expressions` map.
+                if debug_used_expressions.is_enabled() {
+                    for intermediate_expression in &intermediate_expressions {
+                        debug_used_expressions.add_expression_operands(intermediate_expression);
+                    }
+                }
+
+                ////////////////////////////////////////////////////
+                // Remove the counter or edge counter from of each `CoverageSpan`s associated
+                // `BasicCoverageBlock`, and inject a `Coverage` statement into the MIR.
+                //
+                // `Coverage` statements injected from `CoverageSpan`s will include the code regions
+                // (source code start and end positions) to be counted by the associated counter.
+                //
+                // These `CoverageSpan`-associated counters are removed from their associated
+                // `BasicCoverageBlock`s so that the only remaining counters in the `CoverageGraph`
+                // are indirect counters (to be injected next, without associated code regions).
+                self.inject_coverage_span_counters(
+                    coverage_spans,
+                    &mut graphviz_data,
+                    &mut debug_used_expressions,
+                );
+
+                ////////////////////////////////////////////////////
+                // For any remaining `BasicCoverageBlock` counters (that were not associated with
+                // any `CoverageSpan`), inject `Coverage` statements (_without_ code region `Span`s)
+                // to ensure `BasicCoverageBlock` counters that other `Expression`s may depend on
+                // are in fact counted, even though they don't directly contribute to counting
+                // their own independent code region's coverage.
+                self.inject_indirect_counters(&mut graphviz_data, &mut debug_used_expressions);
+
+                // Intermediate expressions will be injected as the final step, after generating
+                // debug output, if any.
+                ////////////////////////////////////////////////////
+
+                (Ok(()), intermediate_expressions)
+            }
+            Err(e) => (Err(e), Vec::new()),
+        };
+
+        if graphviz_data.is_enabled() {
+            // Even if there was an error, a partial CoverageGraph can still generate a useful
+            // graphviz output.
+            debug::dump_coverage_graphviz(
+                tcx,
+                self.mir_body,
+                self.pass_name,
+                &self.basic_coverage_blocks,
+                &self.coverage_counters.debug_counters,
+                &graphviz_data,
+                &intermediate_expressions,
+                &debug_used_expressions,
+            );
+        }
+
+        if let Err(e) = result {
+            bug!("Error processing: {:?}: {:?}", self.mir_body.source.def_id(), e)
+        };
+
+        // Depending on current `debug_options()`, `alert_on_unused_expressions()` could panic, so
+        // this check is performed as late as possible, to allow other debug output (logs and dump
+        // files), which might be helpful in analyzing unused expressions, to still be generated.
+        debug_used_expressions.alert_on_unused_expressions(&self.coverage_counters.debug_counters);
+
+        ////////////////////////////////////////////////////
+        // Finally, inject the intermediate expressions collected along the way.
+        for intermediate_expression in intermediate_expressions {
+            inject_intermediate_expression(self.mir_body, intermediate_expression);
+        }
+    }
+
+    /// Inject a counter for each `CoverageSpan`. There can be multiple `CoverageSpan`s for a given
+    /// BCB, but only one actual counter needs to be incremented per BCB. `bb_counters` maps each
+    /// `bcb` to its `Counter`, when injected. Subsequent `CoverageSpan`s for a BCB that already has
+    /// a `Counter` will inject an `Expression` instead, and compute its value by adding `ZERO` to
+    /// the BCB `Counter` value.
+    ///
+    /// If debugging, add every BCB `Expression` associated with a `CoverageSpan`s to the
+    /// `used_expression_operands` map.
+    fn inject_coverage_span_counters(
+        &mut self,
+        coverage_spans: Vec<CoverageSpan>,
+        graphviz_data: &mut debug::GraphvizData,
+        debug_used_expressions: &mut debug::UsedExpressions,
+    ) {
+        let tcx = self.tcx;
+        let source_map = tcx.sess.source_map();
+        let body_span = self.body_span;
+        let source_file = source_map.lookup_source_file(body_span.lo());
+        let file_name = Symbol::intern(&source_file.name.to_string());
+
+        let mut bcb_counters = IndexVec::from_elem_n(None, self.basic_coverage_blocks.num_nodes());
+        for covspan in coverage_spans {
+            let bcb = covspan.bcb;
+            let span = covspan.span;
+            let counter_kind = if let Some(&counter_operand) = bcb_counters[bcb].as_ref() {
+                self.coverage_counters.make_identity_counter(counter_operand)
+            } else if let Some(counter_kind) = self.bcb_data_mut(bcb).take_counter() {
+                bcb_counters[bcb] = Some(counter_kind.as_operand_id());
+                debug_used_expressions.add_expression_operands(&counter_kind);
+                counter_kind
+            } else {
+                bug!("Every BasicCoverageBlock should have a Counter or Expression");
+            };
+            graphviz_data.add_bcb_coverage_span_with_counter(bcb, &covspan, &counter_kind);
+            // FIXME(#78542): Can spans for `TerminatorKind::Goto` be improved to avoid special
+            // cases?
+            let some_code_region = if self.is_code_region_redundant(bcb, span, body_span) {
+                None
+            } else {
+                Some(make_code_region(file_name, &source_file, span, body_span))
+            };
+            inject_statement(self.mir_body, counter_kind, self.bcb_last_bb(bcb), some_code_region);
+        }
+    }
+
+    /// Returns true if the type of `BasicCoverageBlock` (specifically, it's `BasicBlock`s
+    /// `TerminatorKind`) with the given `Span` (relative to the `body_span`) is known to produce
+    /// a redundant coverage count.
+    ///
+    /// There is at least one case for this, and if it's not handled, the last line in a function
+    /// will be double-counted.
+    ///
+    /// If this method returns `true`, the counter (which other `Expressions` may depend on) is
+    /// still injected, but without an associated code region.
+    // FIXME(#78542): Can spans for `TerminatorKind::Goto` be improved to avoid special cases?
+    fn is_code_region_redundant(
+        &self,
+        bcb: BasicCoverageBlock,
+        span: Span,
+        body_span: Span,
+    ) -> bool {
+        if span.hi() == body_span.hi() {
+            // All functions execute a `Return`-terminated `BasicBlock`, regardless of how the
+            // function returns; but only some functions also _can_ return after a `Goto` block
+            // that ends on the closing brace of the function (with the `Return`). When this
+            // happens, the last character is counted 2 (or possibly more) times, when we know
+            // the function returned only once (of course). By giving all `Goto` terminators at
+            // the end of a function a `non-reportable` code region, they are still counted
+            // if appropriate, but they don't increment the line counter, as long as their is
+            // also a `Return` on that last line.
+            if let TerminatorKind::Goto { .. } = self.bcb_terminator(bcb).kind {
+                return true;
+            }
+        }
+        false
+    }
+
+    /// `inject_coverage_span_counters()` looped through the `CoverageSpan`s and injected the
+    /// counter from the `CoverageSpan`s `BasicCoverageBlock`, removing it from the BCB in the
+    /// process (via `take_counter()`).
+    ///
+    /// Any other counter associated with a `BasicCoverageBlock`, or its incoming edge, but not
+    /// associated with a `CoverageSpan`, should only exist if the counter is a `Expression`
+    /// dependency (one of the expression operands). Collect them, and inject the additional
+    /// counters into the MIR, without a reportable coverage span.
+    fn inject_indirect_counters(
+        &mut self,
+        graphviz_data: &mut debug::GraphvizData,
+        debug_used_expressions: &mut debug::UsedExpressions,
+    ) {
+        let mut bcb_counters_without_direct_coverage_spans = Vec::new();
+        for (target_bcb, target_bcb_data) in self.basic_coverage_blocks.iter_enumerated_mut() {
+            if let Some(counter_kind) = target_bcb_data.take_counter() {
+                bcb_counters_without_direct_coverage_spans.push((None, target_bcb, counter_kind));
+            }
+            if let Some(edge_counters) = target_bcb_data.take_edge_counters() {
+                for (from_bcb, counter_kind) in edge_counters {
+                    bcb_counters_without_direct_coverage_spans.push((
+                        Some(from_bcb),
+                        target_bcb,
+                        counter_kind,
+                    ));
+                }
+            }
+        }
+
+        // If debug is enabled, validate that every BCB or edge counter not directly associated
+        // with a coverage span is at least indirectly associated (it is a dependency of a BCB
+        // counter that _is_ associated with a coverage span).
+        debug_used_expressions.validate(&bcb_counters_without_direct_coverage_spans);
+
+        for (edge_from_bcb, target_bcb, counter_kind) in bcb_counters_without_direct_coverage_spans
+        {
+            debug_used_expressions.add_unused_expression_if_not_found(
+                &counter_kind,
+                edge_from_bcb,
+                target_bcb,
+            );
+
+            match counter_kind {
+                CoverageKind::Counter { .. } => {
+                    let inject_to_bb = if let Some(from_bcb) = edge_from_bcb {
+                        // The MIR edge starts `from_bb` (the outgoing / last BasicBlock in
+                        // `from_bcb`) and ends at `to_bb` (the incoming / first BasicBlock in the
+                        // `target_bcb`; also called the `leader_bb`).
+                        let from_bb = self.bcb_last_bb(from_bcb);
+                        let to_bb = self.bcb_leader_bb(target_bcb);
+
+                        let new_bb = inject_edge_counter_basic_block(self.mir_body, from_bb, to_bb);
+                        graphviz_data.set_edge_counter(from_bcb, new_bb, &counter_kind);
+                        debug!(
+                            "Edge {:?} (last {:?}) -> {:?} (leader {:?}) requires a new MIR \
+                            BasicBlock {:?}, for unclaimed edge counter {}",
+                            edge_from_bcb,
+                            from_bb,
+                            target_bcb,
+                            to_bb,
+                            new_bb,
+                            self.format_counter(&counter_kind),
+                        );
+                        new_bb
+                    } else {
+                        let target_bb = self.bcb_last_bb(target_bcb);
+                        graphviz_data.add_bcb_dependency_counter(target_bcb, &counter_kind);
+                        debug!(
+                            "{:?} ({:?}) gets a new Coverage statement for unclaimed counter {}",
+                            target_bcb,
+                            target_bb,
+                            self.format_counter(&counter_kind),
+                        );
+                        target_bb
+                    };
+
+                    inject_statement(self.mir_body, counter_kind, inject_to_bb, None);
+                }
+                CoverageKind::Expression { .. } => {
+                    inject_intermediate_expression(self.mir_body, counter_kind)
+                }
+                _ => bug!("CoverageKind should be a counter"),
+            }
+        }
+    }
+
+    #[inline]
+    fn bcb_leader_bb(&self, bcb: BasicCoverageBlock) -> BasicBlock {
+        self.bcb_data(bcb).leader_bb()
+    }
+
+    #[inline]
+    fn bcb_last_bb(&self, bcb: BasicCoverageBlock) -> BasicBlock {
+        self.bcb_data(bcb).last_bb()
+    }
+
+    #[inline]
+    fn bcb_terminator(&self, bcb: BasicCoverageBlock) -> &Terminator<'tcx> {
+        self.bcb_data(bcb).terminator(self.mir_body)
+    }
+
+    #[inline]
+    fn bcb_data(&self, bcb: BasicCoverageBlock) -> &BasicCoverageBlockData {
+        &self.basic_coverage_blocks[bcb]
+    }
+
+    #[inline]
+    fn bcb_data_mut(&mut self, bcb: BasicCoverageBlock) -> &mut BasicCoverageBlockData {
+        &mut self.basic_coverage_blocks[bcb]
+    }
+
+    #[inline]
+    fn format_counter(&self, counter_kind: &CoverageKind) -> String {
+        self.coverage_counters.debug_counters.format_counter(counter_kind)
+    }
+}
+
+fn inject_edge_counter_basic_block(
+    mir_body: &mut mir::Body<'tcx>,
+    from_bb: BasicBlock,
+    to_bb: BasicBlock,
+) -> BasicBlock {
+    let span = mir_body[from_bb].terminator().source_info.span.shrink_to_hi();
+    let new_bb = mir_body.basic_blocks_mut().push(BasicBlockData {
+        statements: vec![], // counter will be injected here
+        terminator: Some(Terminator {
+            source_info: SourceInfo::outermost(span),
+            kind: TerminatorKind::Goto { target: to_bb },
+        }),
+        is_cleanup: false,
+    });
+    let edge_ref = mir_body[from_bb]
+        .terminator_mut()
+        .successors_mut()
+        .find(|successor| **successor == to_bb)
+        .expect("from_bb should have a successor for to_bb");
+    *edge_ref = new_bb;
+    new_bb
+}
+
+fn inject_statement(
+    mir_body: &mut mir::Body<'tcx>,
+    counter_kind: CoverageKind,
+    bb: BasicBlock,
+    some_code_region: Option<CodeRegion>,
+) {
+    debug!(
+        "  injecting statement {:?} for {:?} at code region: {:?}",
+        counter_kind, bb, some_code_region
+    );
+    let data = &mut mir_body[bb];
+    let source_info = data.terminator().source_info;
+    let statement = Statement {
+        source_info,
+        kind: StatementKind::Coverage(box Coverage {
+            kind: counter_kind,
+            code_region: some_code_region,
+        }),
+    };
+    data.statements.push(statement);
+}
+
+// Non-code expressions are injected into the coverage map, without generating executable code.
+fn inject_intermediate_expression(mir_body: &mut mir::Body<'tcx>, expression: CoverageKind) {
+    debug_assert!(if let CoverageKind::Expression { .. } = expression { true } else { false });
+    debug!("  injecting non-code expression {:?}", expression);
+    let inject_in_bb = mir::START_BLOCK;
+    let data = &mut mir_body[inject_in_bb];
+    let source_info = data.terminator().source_info;
+    let statement = Statement {
+        source_info,
+        kind: StatementKind::Coverage(box Coverage { kind: expression, code_region: None }),
+    };
+    data.statements.push(statement);
+}
+
+/// Convert the Span into its file name, start line and column, and end line and column
+fn make_code_region(
+    file_name: Symbol,
+    source_file: &Lrc<SourceFile>,
+    span: Span,
+    body_span: Span,
+) -> CodeRegion {
+    let (start_line, mut start_col) = source_file.lookup_file_pos(span.lo());
+    let (end_line, end_col) = if span.hi() == span.lo() {
+        let (end_line, mut end_col) = (start_line, start_col);
+        // Extend an empty span by one character so the region will be counted.
+        let CharPos(char_pos) = start_col;
+        if span.hi() == body_span.hi() {
+            start_col = CharPos(char_pos - 1);
+        } else {
+            end_col = CharPos(char_pos + 1);
+        }
+        (end_line, end_col)
+    } else {
+        source_file.lookup_file_pos(span.hi())
+    };
+    CodeRegion {
+        file_name,
+        start_line: start_line as u32,
+        start_col: start_col.to_u32() + 1,
+        end_line: end_line as u32,
+        end_col: end_col.to_u32() + 1,
+    }
+}
+
+fn hir_body<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> &'tcx rustc_hir::Body<'tcx> {
+    let hir_node = tcx.hir().get_if_local(def_id).expect("expected DefId is local");
+    let fn_body_id = hir::map::associated_body(hir_node).expect("HIR node is a function with body");
+    tcx.hir().body(fn_body_id)
+}
+
+fn hash_mir_source<'tcx>(tcx: TyCtxt<'tcx>, hir_body: &'tcx rustc_hir::Body<'tcx>) -> u64 {
+    let mut hcx = tcx.create_no_span_stable_hashing_context();
+    hash(&mut hcx, &hir_body.value).to_smaller_hash()
+}
+
+fn hash(
+    hcx: &mut StableHashingContext<'tcx>,
+    node: &impl HashStable<StableHashingContext<'tcx>>,
+) -> Fingerprint {
+    let mut stable_hasher = StableHasher::new();
+    node.hash_stable(hcx, &mut stable_hasher);
+    stable_hasher.finish()
+}
diff --git a/compiler/rustc_mir/src/transform/coverage/query.rs b/compiler/rustc_mir/src/transform/coverage/query.rs
new file mode 100644
index 0000000..e86bb96
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/coverage/query.rs
@@ -0,0 +1,125 @@
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{Coverage, CoverageInfo, Location};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::def_id::DefId;
+
+/// The `query` provider for `CoverageInfo`, requested by `codegen_coverage()` (to inject each
+/// counter) and `FunctionCoverage::new()` (to extract the coverage map metadata from the MIR).
+pub(crate) fn provide(providers: &mut Providers) {
+    providers.coverageinfo = |tcx, def_id| coverageinfo_from_mir(tcx, def_id);
+}
+
+/// The `num_counters` argument to `llvm.instrprof.increment` is the max counter_id + 1, or in
+/// other words, the number of counter value references injected into the MIR (plus 1 for the
+/// reserved `ZERO` counter, which uses counter ID `0` when included in an expression). Injected
+/// counters have a counter ID from `1..num_counters-1`.
+///
+/// `num_expressions` is the number of counter expressions added to the MIR body.
+///
+/// Both `num_counters` and `num_expressions` are used to initialize new vectors, during backend
+/// code generate, to lookup counters and expressions by simple u32 indexes.
+///
+/// MIR optimization may split and duplicate some BasicBlock sequences, or optimize out some code
+/// including injected counters. (It is OK if some counters are optimized out, but those counters
+/// are still included in the total `num_counters` or `num_expressions`.) Simply counting the
+/// calls may not work; but computing the number of counters or expressions by adding `1` to the
+/// highest ID (for a given instrumented function) is valid.
+///
+/// This visitor runs twice, first with `add_missing_operands` set to `false`, to find the maximum
+/// counter ID and maximum expression ID based on their enum variant `id` fields; then, as a
+/// safeguard, with `add_missing_operands` set to `true`, to find any other counter or expression
+/// IDs referenced by expression operands, if not already seen.
+///
+/// Ideally, each operand ID in a MIR `CoverageKind::Expression` will have a separate MIR `Coverage`
+/// statement for the `Counter` or `Expression` with the referenced ID. but since current or future
+/// MIR optimizations can theoretically optimize out segments of a MIR, it may not be possible to
+/// guarantee this, so the second pass ensures the `CoverageInfo` counts include all referenced IDs.
+struct CoverageVisitor {
+    info: CoverageInfo,
+    add_missing_operands: bool,
+}
+
+impl CoverageVisitor {
+    /// Updates `num_counters` to the maximum encountered zero-based counter_id plus 1. Note the
+    /// final computed number of counters should be the number of all `CoverageKind::Counter`
+    /// statements in the MIR *plus one* for the implicit `ZERO` counter.
+    #[inline(always)]
+    fn update_num_counters(&mut self, counter_id: u32) {
+        self.info.num_counters = std::cmp::max(self.info.num_counters, counter_id + 1);
+    }
+
+    /// Computes an expression index for each expression ID, and updates `num_expressions` to the
+    /// maximum encountered index plus 1.
+    #[inline(always)]
+    fn update_num_expressions(&mut self, expression_id: u32) {
+        let expression_index = u32::MAX - expression_id;
+        self.info.num_expressions = std::cmp::max(self.info.num_expressions, expression_index + 1);
+    }
+
+    fn update_from_expression_operand(&mut self, operand_id: u32) {
+        if operand_id >= self.info.num_counters {
+            let operand_as_expression_index = u32::MAX - operand_id;
+            if operand_as_expression_index >= self.info.num_expressions {
+                // The operand ID is outside the known range of counter IDs and also outside the
+                // known range of expression IDs. In either case, the result of a missing operand
+                // (if and when used in an expression) will be zero, so from a computation
+                // perspective, it doesn't matter whether it is interepretted as a counter or an
+                // expression.
+                //
+                // However, the `num_counters` and `num_expressions` query results are used to
+                // allocate arrays when generating the coverage map (during codegen), so choose
+                // the type that grows either `num_counters` or `num_expressions` the least.
+                if operand_id - self.info.num_counters
+                    < operand_as_expression_index - self.info.num_expressions
+                {
+                    self.update_num_counters(operand_id)
+                } else {
+                    self.update_num_expressions(operand_id)
+                }
+            }
+        }
+    }
+}
+
+impl Visitor<'_> for CoverageVisitor {
+    fn visit_coverage(&mut self, coverage: &Coverage, _location: Location) {
+        if self.add_missing_operands {
+            match coverage.kind {
+                CoverageKind::Expression { lhs, rhs, .. } => {
+                    self.update_from_expression_operand(u32::from(lhs));
+                    self.update_from_expression_operand(u32::from(rhs));
+                }
+                _ => {}
+            }
+        } else {
+            match coverage.kind {
+                CoverageKind::Counter { id, .. } => {
+                    self.update_num_counters(u32::from(id));
+                }
+                CoverageKind::Expression { id, .. } => {
+                    self.update_num_expressions(u32::from(id));
+                }
+                _ => {}
+            }
+        }
+    }
+}
+
+fn coverageinfo_from_mir<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> CoverageInfo {
+    let mir_body = tcx.optimized_mir(def_id);
+
+    let mut coverage_visitor = CoverageVisitor {
+        // num_counters always has at least the `ZERO` counter.
+        info: CoverageInfo { num_counters: 1, num_expressions: 0 },
+        add_missing_operands: false,
+    };
+
+    coverage_visitor.visit_body(mir_body);
+
+    coverage_visitor.add_missing_operands = true;
+    coverage_visitor.visit_body(mir_body);
+
+    coverage_visitor.info
+}
diff --git a/compiler/rustc_mir/src/transform/coverage/spans.rs b/compiler/rustc_mir/src/transform/coverage/spans.rs
new file mode 100644
index 0000000..cda4fc1
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/coverage/spans.rs
@@ -0,0 +1,753 @@
+use super::debug::term_type;
+use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
+
+use crate::util::spanview::source_range_no_file;
+
+use rustc_data_structures::graph::WithNumNodes;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::{
+    self, AggregateKind, BasicBlock, FakeReadCause, Rvalue, Statement, StatementKind, Terminator,
+    TerminatorKind,
+};
+use rustc_middle::ty::TyCtxt;
+
+use rustc_span::source_map::original_sp;
+use rustc_span::{BytePos, Span, SyntaxContext};
+
+use std::cmp::Ordering;
+
+#[derive(Debug, Copy, Clone)]
+pub(crate) enum CoverageStatement {
+    Statement(BasicBlock, Span, usize),
+    Terminator(BasicBlock, Span),
+}
+
+impl CoverageStatement {
+    pub fn format(&self, tcx: TyCtxt<'tcx>, mir_body: &'a mir::Body<'tcx>) -> String {
+        match *self {
+            Self::Statement(bb, span, stmt_index) => {
+                let stmt = &mir_body[bb].statements[stmt_index];
+                format!(
+                    "{}: @{}[{}]: {:?}",
+                    source_range_no_file(tcx, &span),
+                    bb.index(),
+                    stmt_index,
+                    stmt
+                )
+            }
+            Self::Terminator(bb, span) => {
+                let term = mir_body[bb].terminator();
+                format!(
+                    "{}: @{}.{}: {:?}",
+                    source_range_no_file(tcx, &span),
+                    bb.index(),
+                    term_type(&term.kind),
+                    term.kind
+                )
+            }
+        }
+    }
+
+    pub fn span(&self) -> &Span {
+        match self {
+            Self::Statement(_, span, _) | Self::Terminator(_, span) => span,
+        }
+    }
+}
+
+/// A BCB is deconstructed into one or more `Span`s. Each `Span` maps to a `CoverageSpan` that
+/// references the originating BCB and one or more MIR `Statement`s and/or `Terminator`s.
+/// Initially, the `Span`s come from the `Statement`s and `Terminator`s, but subsequent
+/// transforms can combine adjacent `Span`s and `CoverageSpan` from the same BCB, merging the
+/// `CoverageStatement` vectors, and the `Span`s to cover the extent of the combined `Span`s.
+///
+/// Note: A `CoverageStatement` merged into another CoverageSpan may come from a `BasicBlock` that
+/// is not part of the `CoverageSpan` bcb if the statement was included because it's `Span` matches
+/// or is subsumed by the `Span` associated with this `CoverageSpan`, and it's `BasicBlock`
+/// `is_dominated_by()` the `BasicBlock`s in this `CoverageSpan`.
+#[derive(Debug, Clone)]
+pub(crate) struct CoverageSpan {
+    pub span: Span,
+    pub bcb: BasicCoverageBlock,
+    pub coverage_statements: Vec<CoverageStatement>,
+    pub is_closure: bool,
+}
+
+impl CoverageSpan {
+    pub fn for_statement(
+        statement: &Statement<'tcx>,
+        span: Span,
+        bcb: BasicCoverageBlock,
+        bb: BasicBlock,
+        stmt_index: usize,
+    ) -> Self {
+        let is_closure = match statement.kind {
+            StatementKind::Assign(box (
+                _,
+                Rvalue::Aggregate(box AggregateKind::Closure(_, _), _),
+            )) => true,
+            _ => false,
+        };
+
+        Self {
+            span,
+            bcb,
+            coverage_statements: vec![CoverageStatement::Statement(bb, span, stmt_index)],
+            is_closure,
+        }
+    }
+
+    pub fn for_terminator(span: Span, bcb: BasicCoverageBlock, bb: BasicBlock) -> Self {
+        Self {
+            span,
+            bcb,
+            coverage_statements: vec![CoverageStatement::Terminator(bb, span)],
+            is_closure: false,
+        }
+    }
+
+    pub fn merge_from(&mut self, mut other: CoverageSpan) {
+        debug_assert!(self.is_mergeable(&other));
+        self.span = self.span.to(other.span);
+        if other.is_closure {
+            self.is_closure = true;
+        }
+        self.coverage_statements.append(&mut other.coverage_statements);
+    }
+
+    pub fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) {
+        self.coverage_statements.retain(|covstmt| covstmt.span().hi() <= cutoff_pos);
+        if let Some(highest_covstmt) =
+            self.coverage_statements.iter().max_by_key(|covstmt| covstmt.span().hi())
+        {
+            self.span = self.span.with_hi(highest_covstmt.span().hi());
+        }
+    }
+
+    #[inline]
+    pub fn is_mergeable(&self, other: &Self) -> bool {
+        self.is_in_same_bcb(other) && !(self.is_closure || other.is_closure)
+    }
+
+    #[inline]
+    pub fn is_in_same_bcb(&self, other: &Self) -> bool {
+        self.bcb == other.bcb
+    }
+
+    pub fn format(&self, tcx: TyCtxt<'tcx>, mir_body: &'a mir::Body<'tcx>) -> String {
+        format!(
+            "{}\n    {}",
+            source_range_no_file(tcx, &self.span),
+            self.format_coverage_statements(tcx, mir_body).replace("\n", "\n    "),
+        )
+    }
+
+    pub fn format_coverage_statements(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        mir_body: &'a mir::Body<'tcx>,
+    ) -> String {
+        let mut sorted_coverage_statements = self.coverage_statements.clone();
+        sorted_coverage_statements.sort_unstable_by_key(|covstmt| match *covstmt {
+            CoverageStatement::Statement(bb, _, index) => (bb, index),
+            CoverageStatement::Terminator(bb, _) => (bb, usize::MAX),
+        });
+        sorted_coverage_statements
+            .iter()
+            .map(|covstmt| covstmt.format(tcx, mir_body))
+            .collect::<Vec<_>>()
+            .join("\n")
+    }
+}
+
+/// Converts the initial set of `CoverageSpan`s (one per MIR `Statement` or `Terminator`) into a
+/// minimal set of `CoverageSpan`s, using the BCB CFG to determine where it is safe and useful to:
+///
+///  * Remove duplicate source code coverage regions
+///  * Merge spans that represent continuous (both in source code and control flow), non-branching
+///    execution
+///  * Carve out (leave uncovered) any span that will be counted by another MIR (notably, closures)
+pub struct CoverageSpans<'a, 'tcx> {
+    /// The MIR, used to look up `BasicBlockData`.
+    mir_body: &'a mir::Body<'tcx>,
+
+    /// A `Span` covering the function body of the MIR (typically from left curly brace to right
+    /// curly brace).
+    body_span: Span,
+
+    /// The BasicCoverageBlock Control Flow Graph (BCB CFG).
+    basic_coverage_blocks: &'a CoverageGraph,
+
+    /// The initial set of `CoverageSpan`s, sorted by `Span` (`lo` and `hi`) and by relative
+    /// dominance between the `BasicCoverageBlock`s of equal `Span`s.
+    sorted_spans_iter: Option<std::vec::IntoIter<CoverageSpan>>,
+
+    /// The current `CoverageSpan` to compare to its `prev`, to possibly merge, discard, force the
+    /// discard of the `prev` (and or `pending_dups`), or keep both (with `prev` moved to
+    /// `pending_dups`). If `curr` is not discarded or merged, it becomes `prev` for the next
+    /// iteration.
+    some_curr: Option<CoverageSpan>,
+
+    /// The original `span` for `curr`, in case the `curr` span is modified.
+    curr_original_span: Span,
+
+    /// The CoverageSpan from a prior iteration; typically assigned from that iteration's `curr`.
+    /// If that `curr` was discarded, `prev` retains its value from the previous iteration.
+    some_prev: Option<CoverageSpan>,
+
+    /// Assigned from `curr_original_span` from the previous iteration.
+    prev_original_span: Span,
+
+    /// One or more `CoverageSpan`s with the same `Span` but different `BasicCoverageBlock`s, and
+    /// no `BasicCoverageBlock` in this list dominates another `BasicCoverageBlock` in the list.
+    /// If a new `curr` span also fits this criteria (compared to an existing list of
+    /// `pending_dups`), that `curr` `CoverageSpan` moves to `prev` before possibly being added to
+    /// the `pending_dups` list, on the next iteration. As a result, if `prev` and `pending_dups`
+    /// have the same `Span`, the criteria for `pending_dups` holds for `prev` as well: a `prev`
+    /// with a matching `Span` does not dominate any `pending_dup` and no `pending_dup` dominates a
+    /// `prev` with a matching `Span`)
+    pending_dups: Vec<CoverageSpan>,
+
+    /// The final `CoverageSpan`s to add to the coverage map. A `Counter` or `Expression`
+    /// will also be injected into the MIR for each `CoverageSpan`.
+    refined_spans: Vec<CoverageSpan>,
+}
+
+impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
+    pub(crate) fn generate_coverage_spans(
+        mir_body: &'a mir::Body<'tcx>,
+        body_span: Span,
+        basic_coverage_blocks: &'a CoverageGraph,
+    ) -> Vec<CoverageSpan> {
+        let mut coverage_spans = CoverageSpans {
+            mir_body,
+            body_span,
+            basic_coverage_blocks,
+            sorted_spans_iter: None,
+            refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2),
+            some_curr: None,
+            curr_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)),
+            some_prev: None,
+            prev_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)),
+            pending_dups: Vec::new(),
+        };
+
+        let sorted_spans = coverage_spans.mir_to_initial_sorted_coverage_spans();
+
+        coverage_spans.sorted_spans_iter = Some(sorted_spans.into_iter());
+        coverage_spans.some_prev = coverage_spans.sorted_spans_iter.as_mut().unwrap().next();
+        coverage_spans.prev_original_span =
+            coverage_spans.some_prev.as_ref().expect("at least one span").span;
+
+        coverage_spans.to_refined_spans()
+    }
+
+    /// Generate a minimal set of `CoverageSpan`s, each representing a contiguous code region to be
+    /// counted.
+    ///
+    /// The basic steps are:
+    ///
+    /// 1. Extract an initial set of spans from the `Statement`s and `Terminator`s of each
+    ///    `BasicCoverageBlockData`.
+    /// 2. Sort the spans by span.lo() (starting position). Spans that start at the same position
+    ///    are sorted with longer spans before shorter spans; and equal spans are sorted
+    ///    (deterministically) based on "dominator" relationship (if any).
+    /// 3. Traverse the spans in sorted order to identify spans that can be dropped (for instance,
+    ///    if another span or spans are already counting the same code region), or should be merged
+    ///    into a broader combined span (because it represents a contiguous, non-branching, and
+    ///    uninterrupted region of source code).
+    ///
+    ///    Closures are exposed in their enclosing functions as `Assign` `Rvalue`s, and since
+    ///    closures have their own MIR, their `Span` in their enclosing function should be left
+    ///    "uncovered".
+    ///
+    /// Note the resulting vector of `CoverageSpan`s does may not be fully sorted (and does not need
+    /// to be).
+    fn mir_to_initial_sorted_coverage_spans(&self) -> Vec<CoverageSpan> {
+        let mut initial_spans = Vec::<CoverageSpan>::with_capacity(self.mir_body.num_nodes() * 2);
+        for (bcb, bcb_data) in self.basic_coverage_blocks.iter_enumerated() {
+            for coverage_span in self.bcb_to_initial_coverage_spans(bcb, bcb_data) {
+                initial_spans.push(coverage_span);
+            }
+        }
+
+        if initial_spans.is_empty() {
+            // This can happen if, for example, the function is unreachable (contains only a
+            // `BasicBlock`(s) with an `Unreachable` terminator).
+            return initial_spans;
+        }
+
+        initial_spans.sort_unstable_by(|a, b| {
+            if a.span.lo() == b.span.lo() {
+                if a.span.hi() == b.span.hi() {
+                    if a.is_in_same_bcb(b) {
+                        Some(Ordering::Equal)
+                    } else {
+                        // Sort equal spans by dominator relationship, in reverse order (so
+                        // dominators always come after the dominated equal spans). When later
+                        // comparing two spans in order, the first will either dominate the second,
+                        // or they will have no dominator relationship.
+                        self.basic_coverage_blocks.dominators().rank_partial_cmp(b.bcb, a.bcb)
+                    }
+                } else {
+                    // Sort hi() in reverse order so shorter spans are attempted after longer spans.
+                    // This guarantees that, if a `prev` span overlaps, and is not equal to, a
+                    // `curr` span, the prev span either extends further left of the curr span, or
+                    // they start at the same position and the prev span extends further right of
+                    // the end of the curr span.
+                    b.span.hi().partial_cmp(&a.span.hi())
+                }
+            } else {
+                a.span.lo().partial_cmp(&b.span.lo())
+            }
+            .unwrap()
+        });
+
+        initial_spans
+    }
+
+    /// Iterate through the sorted `CoverageSpan`s, and return the refined list of merged and
+    /// de-duplicated `CoverageSpan`s.
+    fn to_refined_spans(mut self) -> Vec<CoverageSpan> {
+        while self.next_coverage_span() {
+            if self.curr().is_mergeable(self.prev()) {
+                debug!("  same bcb (and neither is a closure), merge with prev={:?}", self.prev());
+                let prev = self.take_prev();
+                self.curr_mut().merge_from(prev);
+            // Note that curr.span may now differ from curr_original_span
+            } else if self.prev_ends_before_curr() {
+                debug!(
+                    "  different bcbs and disjoint spans, so keep curr for next iter, and add \
+                    prev={:?}",
+                    self.prev()
+                );
+                let prev = self.take_prev();
+                self.refined_spans.push(prev);
+            } else if self.prev().is_closure {
+                // drop any equal or overlapping span (`curr`) and keep `prev` to test again in the
+                // next iter
+                debug!(
+                    "  curr overlaps a closure (prev). Drop curr and keep prev for next iter. \
+                    prev={:?}",
+                    self.prev()
+                );
+                self.discard_curr();
+            } else if self.curr().is_closure {
+                self.carve_out_span_for_closure();
+            } else if self.prev_original_span == self.curr().span {
+                // Note that this compares the new span to `prev_original_span`, which may not
+                // be the full `prev.span` (if merged during the previous iteration).
+                self.hold_pending_dups_unless_dominated();
+            } else {
+                self.cutoff_prev_at_overlapping_curr();
+            }
+        }
+
+        debug!("    AT END, adding last prev={:?}", self.prev());
+        let prev = self.take_prev();
+        let CoverageSpans {
+            mir_body, basic_coverage_blocks, pending_dups, mut refined_spans, ..
+        } = self;
+        for dup in pending_dups {
+            debug!("    ...adding at least one pending dup={:?}", dup);
+            refined_spans.push(dup);
+        }
+        refined_spans.push(prev);
+
+        // Remove `CoverageSpan`s with empty spans ONLY if the empty `CoverageSpan`s BCB also has at
+        // least one other non-empty `CoverageSpan`.
+        let mut has_coverage = BitSet::new_empty(basic_coverage_blocks.num_nodes());
+        for covspan in &refined_spans {
+            if !covspan.span.is_empty() {
+                has_coverage.insert(covspan.bcb);
+            }
+        }
+        refined_spans.retain(|covspan| {
+            !(covspan.span.is_empty()
+                && is_goto(&basic_coverage_blocks[covspan.bcb].terminator(mir_body).kind)
+                && has_coverage.contains(covspan.bcb))
+        });
+
+        // Remove `CoverageSpan`s derived from closures, originally added to ensure the coverage
+        // regions for the current function leave room for the closure's own coverage regions
+        // (injected separately, from the closure's own MIR).
+        refined_spans.retain(|covspan| !covspan.is_closure);
+        refined_spans
+    }
+
+    // Generate a set of `CoverageSpan`s from the filtered set of `Statement`s and `Terminator`s of
+    // the `BasicBlock`(s) in the given `BasicCoverageBlockData`. One `CoverageSpan` is generated
+    // for each `Statement` and `Terminator`. (Note that subsequent stages of coverage analysis will
+    // merge some `CoverageSpan`s, at which point a `CoverageSpan` may represent multiple
+    // `Statement`s and/or `Terminator`s.)
+    fn bcb_to_initial_coverage_spans(
+        &self,
+        bcb: BasicCoverageBlock,
+        bcb_data: &'a BasicCoverageBlockData,
+    ) -> Vec<CoverageSpan> {
+        bcb_data
+            .basic_blocks
+            .iter()
+            .flat_map(|&bb| {
+                let data = &self.mir_body[bb];
+                data.statements
+                    .iter()
+                    .enumerate()
+                    .filter_map(move |(index, statement)| {
+                        filtered_statement_span(statement, self.body_span).map(|span| {
+                            CoverageSpan::for_statement(statement, span, bcb, bb, index)
+                        })
+                    })
+                    .chain(
+                        filtered_terminator_span(data.terminator(), self.body_span)
+                            .map(|span| CoverageSpan::for_terminator(span, bcb, bb)),
+                    )
+            })
+            .collect()
+    }
+
+    fn curr(&self) -> &CoverageSpan {
+        self.some_curr
+            .as_ref()
+            .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+    }
+
+    fn curr_mut(&mut self) -> &mut CoverageSpan {
+        self.some_curr
+            .as_mut()
+            .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+    }
+
+    fn prev(&self) -> &CoverageSpan {
+        self.some_prev
+            .as_ref()
+            .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
+    }
+
+    fn prev_mut(&mut self) -> &mut CoverageSpan {
+        self.some_prev
+            .as_mut()
+            .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
+    }
+
+    fn take_prev(&mut self) -> CoverageSpan {
+        self.some_prev.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
+    }
+
+    /// If there are `pending_dups` but `prev` is not a matching dup (`prev.span` doesn't match the
+    /// `pending_dups` spans), then one of the following two things happened during the previous
+    /// iteration:
+    ///   * the previous `curr` span (which is now `prev`) was not a duplicate of the pending_dups
+    ///     (in which case there should be at least two spans in `pending_dups`); or
+    ///   * the `span` of `prev` was modified by `curr_mut().merge_from(prev)` (in which case
+    ///     `pending_dups` could have as few as one span)
+    /// In either case, no more spans will match the span of `pending_dups`, so
+    /// add the `pending_dups` if they don't overlap `curr`, and clear the list.
+    fn check_pending_dups(&mut self) {
+        if let Some(dup) = self.pending_dups.last() {
+            if dup.span != self.prev().span {
+                debug!(
+                    "    SAME spans, but pending_dups are NOT THE SAME, so BCBs matched on \
+                    previous iteration, or prev started a new disjoint span"
+                );
+                if dup.span.hi() <= self.curr().span.lo() {
+                    let pending_dups = self.pending_dups.split_off(0);
+                    for dup in pending_dups.into_iter() {
+                        debug!("    ...adding at least one pending={:?}", dup);
+                        self.refined_spans.push(dup);
+                    }
+                } else {
+                    self.pending_dups.clear();
+                }
+            }
+        }
+    }
+
+    /// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order.
+    fn next_coverage_span(&mut self) -> bool {
+        if let Some(curr) = self.some_curr.take() {
+            self.some_prev = Some(curr);
+            self.prev_original_span = self.curr_original_span;
+        }
+        while let Some(curr) = self.sorted_spans_iter.as_mut().unwrap().next() {
+            debug!("FOR curr={:?}", curr);
+            if self.prev_starts_after_next(&curr) {
+                debug!(
+                    "  prev.span starts after curr.span, so curr will be dropped (skipping past \
+                    closure?); prev={:?}",
+                    self.prev()
+                );
+            } else {
+                // Save a copy of the original span for `curr` in case the `CoverageSpan` is changed
+                // by `self.curr_mut().merge_from(prev)`.
+                self.curr_original_span = curr.span;
+                self.some_curr.replace(curr);
+                self.check_pending_dups();
+                return true;
+            }
+        }
+        false
+    }
+
+    /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the
+    /// `curr` coverage span.
+    fn discard_curr(&mut self) {
+        self.some_curr = None;
+    }
+
+    /// Returns true if the curr span should be skipped because prev has already advanced beyond the
+    /// end of curr. This can only happen if a prior iteration updated `prev` to skip past a region
+    /// of code, such as skipping past a closure.
+    fn prev_starts_after_next(&self, next_curr: &CoverageSpan) -> bool {
+        self.prev().span.lo() > next_curr.span.lo()
+    }
+
+    /// Returns true if the curr span starts past the end of the prev span, which means they don't
+    /// overlap, so we now know the prev can be added to the refined coverage spans.
+    fn prev_ends_before_curr(&self) -> bool {
+        self.prev().span.hi() <= self.curr().span.lo()
+    }
+
+    /// If `prev`s span extends left of the closure (`curr`), carve out the closure's
+    /// span from `prev`'s span. (The closure's coverage counters will be injected when
+    /// processing the closure's own MIR.) Add the portion of the span to the left of the
+    /// closure; and if the span extends to the right of the closure, update `prev` to
+    /// that portion of the span. For any `pending_dups`, repeat the same process.
+    fn carve_out_span_for_closure(&mut self) {
+        let curr_span = self.curr().span;
+        let left_cutoff = curr_span.lo();
+        let right_cutoff = curr_span.hi();
+        let has_pre_closure_span = self.prev().span.lo() < right_cutoff;
+        let has_post_closure_span = self.prev().span.hi() > right_cutoff;
+        let mut pending_dups = self.pending_dups.split_off(0);
+        if has_pre_closure_span {
+            let mut pre_closure = self.prev().clone();
+            pre_closure.span = pre_closure.span.with_hi(left_cutoff);
+            debug!("  prev overlaps a closure. Adding span for pre_closure={:?}", pre_closure);
+            if !pending_dups.is_empty() {
+                for mut dup in pending_dups.iter().cloned() {
+                    dup.span = dup.span.with_hi(left_cutoff);
+                    debug!("    ...and at least one pre_closure dup={:?}", dup);
+                    self.refined_spans.push(dup);
+                }
+            }
+            self.refined_spans.push(pre_closure);
+        }
+        if has_post_closure_span {
+            // Update prev.span to start after the closure (and discard curr)
+            self.prev_mut().span = self.prev().span.with_lo(right_cutoff);
+            self.prev_original_span = self.prev().span;
+            for dup in pending_dups.iter_mut() {
+                dup.span = dup.span.with_lo(right_cutoff);
+            }
+            self.pending_dups.append(&mut pending_dups);
+            self.discard_curr(); // since self.prev() was already updated
+        } else {
+            pending_dups.clear();
+        }
+    }
+
+    /// Called if `curr.span` equals `prev_original_span` (and potentially equal to all
+    /// `pending_dups` spans, if any); but keep in mind, `prev.span` may start at a `Span.lo()` that
+    /// is less than (further left of) `prev_original_span.lo()`.
+    ///
+    /// When two `CoverageSpan`s have the same `Span`, dominated spans can be discarded; but if
+    /// neither `CoverageSpan` dominates the other, both (or possibly more than two) are held,
+    /// until their disposition is determined. In this latter case, the `prev` dup is moved into
+    /// `pending_dups` so the new `curr` dup can be moved to `prev` for the next iteration.
+    fn hold_pending_dups_unless_dominated(&mut self) {
+        // Equal coverage spans are ordered by dominators before dominated (if any), so it should be
+        // impossible for `curr` to dominate any previous `CoverageSpan`.
+        debug_assert!(!self.span_bcb_is_dominated_by(self.prev(), self.curr()));
+
+        let initial_pending_count = self.pending_dups.len();
+        if initial_pending_count > 0 {
+            let mut pending_dups = self.pending_dups.split_off(0);
+            pending_dups.retain(|dup| !self.span_bcb_is_dominated_by(self.curr(), dup));
+            self.pending_dups.append(&mut pending_dups);
+            if self.pending_dups.len() < initial_pending_count {
+                debug!(
+                    "  discarded {} of {} pending_dups that dominated curr",
+                    initial_pending_count - self.pending_dups.len(),
+                    initial_pending_count
+                );
+            }
+        }
+
+        if self.span_bcb_is_dominated_by(self.curr(), self.prev()) {
+            debug!(
+                "  different bcbs but SAME spans, and prev dominates curr. Discard prev={:?}",
+                self.prev()
+            );
+            self.cutoff_prev_at_overlapping_curr();
+        // If one span dominates the other, assocate the span with the code from the dominated
+        // block only (`curr`), and discard the overlapping portion of the `prev` span. (Note
+        // that if `prev.span` is wider than `prev_original_span`, a `CoverageSpan` will still
+        // be created for `prev`s block, for the non-overlapping portion, left of `curr.span`.)
+        //
+        // For example:
+        //     match somenum {
+        //         x if x < 1 => { ... }
+        //     }...
+        //
+        // The span for the first `x` is referenced by both the pattern block (every time it is
+        // evaluated) and the arm code (only when matched). The counter will be applied only to
+        // the dominated block. This allows coverage to track and highlight things like the
+        // assignment of `x` above, if the branch is matched, making `x` available to the arm
+        // code; and to track and highlight the question mark `?` "try" operator at the end of
+        // a function call returning a `Result`, so the `?` is covered when the function returns
+        // an `Err`, and not counted as covered if the function always returns `Ok`.
+        } else {
+            // Save `prev` in `pending_dups`. (`curr` will become `prev` in the next iteration.)
+            // If the `curr` CoverageSpan is later discarded, `pending_dups` can be discarded as
+            // well; but if `curr` is added to refined_spans, the `pending_dups` will also be added.
+            debug!(
+                "  different bcbs but SAME spans, and neither dominates, so keep curr for \
+                next iter, and, pending upcoming spans (unless overlapping) add prev={:?}",
+                self.prev()
+            );
+            let prev = self.take_prev();
+            self.pending_dups.push(prev);
+        }
+    }
+
+    /// `curr` overlaps `prev`. If `prev`s span extends left of `curr`s span, keep _only_
+    /// statements that end before `curr.lo()` (if any), and add the portion of the
+    /// combined span for those statements. Any other statements have overlapping spans
+    /// that can be ignored because `curr` and/or other upcoming statements/spans inside
+    /// the overlap area will produce their own counters. This disambiguation process
+    /// avoids injecting multiple counters for overlapping spans, and the potential for
+    /// double-counting.
+    fn cutoff_prev_at_overlapping_curr(&mut self) {
+        debug!(
+            "  different bcbs, overlapping spans, so ignore/drop pending and only add prev \
+            if it has statements that end before curr; prev={:?}",
+            self.prev()
+        );
+        if self.pending_dups.is_empty() {
+            let curr_span = self.curr().span;
+            self.prev_mut().cutoff_statements_at(curr_span.lo());
+            if self.prev().coverage_statements.is_empty() {
+                debug!("  ... no non-overlapping statements to add");
+            } else {
+                debug!("  ... adding modified prev={:?}", self.prev());
+                let prev = self.take_prev();
+                self.refined_spans.push(prev);
+            }
+        } else {
+            // with `pending_dups`, `prev` cannot have any statements that don't overlap
+            self.pending_dups.clear();
+        }
+    }
+
+    fn span_bcb_is_dominated_by(&self, covspan: &CoverageSpan, dom_covspan: &CoverageSpan) -> bool {
+        self.basic_coverage_blocks.is_dominated_by(covspan.bcb, dom_covspan.bcb)
+    }
+}
+
+fn filtered_statement_span(statement: &'a Statement<'tcx>, body_span: Span) -> Option<Span> {
+    match statement.kind {
+        // These statements have spans that are often outside the scope of the executed source code
+        // for their parent `BasicBlock`.
+        StatementKind::StorageLive(_)
+        | StatementKind::StorageDead(_)
+        // Coverage should not be encountered, but don't inject coverage coverage
+        | StatementKind::Coverage(_)
+        // Ignore `Nop`s
+        | StatementKind::Nop => None,
+
+        // FIXME(#78546): MIR InstrumentCoverage - Can the source_info.span for `FakeRead`
+        // statements be more consistent?
+        //
+        // FakeReadCause::ForGuardBinding, in this example:
+        //     match somenum {
+        //         x if x < 1 => { ... }
+        //     }...
+        // The BasicBlock within the match arm code included one of these statements, but the span
+        // for it covered the `1` in this source. The actual statements have nothing to do with that
+        // source span:
+        //     FakeRead(ForGuardBinding, _4);
+        // where `_4` is:
+        //     _4 = &_1; (at the span for the first `x`)
+        // and `_1` is the `Place` for `somenum`.
+        //
+        // If and when the Issue is resolved, remove this special case match pattern:
+        StatementKind::FakeRead(cause, _) if cause == FakeReadCause::ForGuardBinding => None,
+
+        // Retain spans from all other statements
+        StatementKind::FakeRead(_, _) // Not including `ForGuardBinding`
+        | StatementKind::Assign(_)
+        | StatementKind::SetDiscriminant { .. }
+        | StatementKind::LlvmInlineAsm(_)
+        | StatementKind::Retag(_, _)
+        | StatementKind::AscribeUserType(_, _) => {
+            Some(function_source_span(statement.source_info.span, body_span))
+        }
+    }
+}
+
+fn filtered_terminator_span(terminator: &'a Terminator<'tcx>, body_span: Span) -> Option<Span> {
+    match terminator.kind {
+        // These terminators have spans that don't positively contribute to computing a reasonable
+        // span of actually executed source code. (For example, SwitchInt terminators extracted from
+        // an `if condition { block }` has a span that includes the executed block, if true,
+        // but for coverage, the code region executed, up to *and* through the SwitchInt,
+        // actually stops before the if's block.)
+        TerminatorKind::Unreachable // Unreachable blocks are not connected to the MIR CFG
+        | TerminatorKind::Assert { .. }
+        | TerminatorKind::Drop { .. }
+        | TerminatorKind::DropAndReplace { .. }
+        | TerminatorKind::SwitchInt { .. }
+        // For `FalseEdge`, only the `real` branch is taken, so it is similar to a `Goto`.
+        // FIXME(richkadel): Note that `Goto` was moved to it's own match arm, for the reasons
+        // described below. Add tests to confirm whether or not similar cases also apply to
+        // `FalseEdge`.
+        | TerminatorKind::FalseEdge { .. } => None,
+
+        // FIXME(#78542): Can spans for `TerminatorKind::Goto` be improved to avoid special cases?
+        //
+        // `Goto`s are often the targets of `SwitchInt` branches, and certain important
+        // optimizations to replace some `Counter`s with `Expression`s require a separate
+        // `BasicCoverageBlock` for each branch, to support the `Counter`, when needed.
+        //
+        // Also, some test cases showed that `Goto` terminators, and to some degree their `Span`s,
+        // provided useful context for coverage, such as to count and show when `if` blocks
+        // _without_ `else` blocks execute the `false` case (counting when the body of the `if`
+        // was _not_ taken). In these cases, the `Goto` span is ultimately given a `CoverageSpan`
+        // of 1 character, at the end of it's original `Span`.
+        //
+        // However, in other cases, a visible `CoverageSpan` is not wanted, but the `Goto`
+        // block must still be counted (for example, to contribute its count to an `Expression`
+        // that reports the execution count for some other block). In these cases, the code region
+        // is set to `None`. (See `Instrumentor::is_code_region_redundant()`.)
+        TerminatorKind::Goto { .. } => {
+            Some(function_source_span(terminator.source_info.span.shrink_to_hi(), body_span))
+        }
+
+        // Retain spans from all other terminators
+        TerminatorKind::Resume
+        | TerminatorKind::Abort
+        | TerminatorKind::Return
+        | TerminatorKind::Call { .. }
+        | TerminatorKind::Yield { .. }
+        | TerminatorKind::GeneratorDrop
+        | TerminatorKind::FalseUnwind { .. }
+        | TerminatorKind::InlineAsm { .. } => {
+            Some(function_source_span(terminator.source_info.span, body_span))
+        }
+    }
+}
+
+#[inline]
+fn function_source_span(span: Span, body_span: Span) -> Span {
+    let span = original_sp(span, body_span).with_ctxt(SyntaxContext::root());
+    if body_span.contains(span) { span } else { body_span }
+}
+
+#[inline(always)]
+fn is_goto(term_kind: &TerminatorKind<'tcx>) -> bool {
+    match term_kind {
+        TerminatorKind::Goto { .. } => true,
+        _ => false,
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/deaggregator.rs b/compiler/rustc_mir/src/transform/deaggregator.rs
index 66989a9..5bd7256 100644
--- a/compiler/rustc_mir/src/transform/deaggregator.rs
+++ b/compiler/rustc_mir/src/transform/deaggregator.rs
@@ -1,4 +1,4 @@
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use crate::util::expand_aggregate;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
@@ -6,7 +6,7 @@
 pub struct Deaggregator;
 
 impl<'tcx> MirPass<'tcx> for Deaggregator {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut();
         let local_decls = &*local_decls;
         for bb in basic_blocks {
diff --git a/compiler/rustc_mir/src/transform/dest_prop.rs b/compiler/rustc_mir/src/transform/dest_prop.rs
index 97d2617..46de5db 100644
--- a/compiler/rustc_mir/src/transform/dest_prop.rs
+++ b/compiler/rustc_mir/src/transform/dest_prop.rs
@@ -8,7 +8,7 @@
 //! inside a single block to shuffle a value around unnecessarily.
 //!
 //! LLVM by itself is not good enough at eliminating these redundant copies (eg. see
-//! https://github.com/rust-lang/rust/issues/32966), so this leaves some performance on the table
+//! <https://github.com/rust-lang/rust/issues/32966>), so this leaves some performance on the table
 //! that we can regain by implementing an optimization for removing these assign statements in rustc
 //! itself. When this optimization runs fast enough, it can also speed up the constant evaluation
 //! and code generation phases of rustc due to the reduced number of statements and locals.
@@ -99,7 +99,7 @@
 use crate::dataflow::impls::{MaybeInitializedLocals, MaybeLiveLocals};
 use crate::dataflow::Analysis;
 use crate::{
-    transform::{MirPass, MirSource},
+    transform::MirPass,
     util::{dump_mir, PassWhere},
 };
 use itertools::Itertools;
@@ -126,16 +126,18 @@
 pub struct DestinationPropagation;
 
 impl<'tcx> MirPass<'tcx> for DestinationPropagation {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         // Only run at mir-opt-level=2 or higher for now (we don't fix up debuginfo and remove
         // storage statements at the moment).
         if tcx.sess.opts.debugging_opts.mir_opt_level <= 1 {
             return;
         }
 
+        let def_id = body.source.def_id();
+
         let candidates = find_candidates(tcx, body);
         if candidates.is_empty() {
-            debug!("{:?}: no dest prop candidates, done", source.def_id());
+            debug!("{:?}: no dest prop candidates, done", def_id);
             return;
         }
 
@@ -152,7 +154,7 @@
         let relevant = relevant_locals.count();
         debug!(
             "{:?}: {} locals ({} relevant), {} blocks",
-            source.def_id(),
+            def_id,
             body.local_decls.len(),
             relevant,
             body.basic_blocks().len()
@@ -160,23 +162,21 @@
         if relevant > MAX_LOCALS {
             warn!(
                 "too many candidate locals in {:?} ({}, max is {}), not optimizing",
-                source.def_id(),
-                relevant,
-                MAX_LOCALS
+                def_id, relevant, MAX_LOCALS
             );
             return;
         }
         if body.basic_blocks().len() > MAX_BLOCKS {
             warn!(
                 "too many blocks in {:?} ({}, max is {}), not optimizing",
-                source.def_id(),
+                def_id,
                 body.basic_blocks().len(),
                 MAX_BLOCKS
             );
             return;
         }
 
-        let mut conflicts = Conflicts::build(tcx, body, source, &relevant_locals);
+        let mut conflicts = Conflicts::build(tcx, body, &relevant_locals);
 
         let mut replacements = Replacements::new(body.local_decls.len());
         for candidate @ CandidateAssignment { dest, src, loc } in candidates {
@@ -192,7 +192,7 @@
             }
 
             if !tcx.consider_optimizing(|| {
-                format!("DestinationPropagation {:?} {:?}", source.def_id(), candidate)
+                format!("DestinationPropagation {:?} {:?}", def_id, candidate)
             }) {
                 break;
             }
@@ -398,7 +398,6 @@
     fn build<'tcx>(
         tcx: TyCtxt<'tcx>,
         body: &'_ Body<'tcx>,
-        source: MirSource<'tcx>,
         relevant_locals: &'a BitSet<Local>,
     ) -> Self {
         // We don't have to look out for locals that have their address taken, since
@@ -409,69 +408,57 @@
             body.local_decls.len(),
         );
 
-        let def_id = source.def_id();
         let mut init = MaybeInitializedLocals
-            .into_engine(tcx, body, def_id)
+            .into_engine(tcx, body)
             .iterate_to_fixpoint()
             .into_results_cursor(body);
-        let mut live = MaybeLiveLocals
-            .into_engine(tcx, body, def_id)
-            .iterate_to_fixpoint()
-            .into_results_cursor(body);
+        let mut live =
+            MaybeLiveLocals.into_engine(tcx, body).iterate_to_fixpoint().into_results_cursor(body);
 
         let mut reachable = None;
-        dump_mir(
-            tcx,
-            None,
-            "DestinationPropagation-dataflow",
-            &"",
-            source,
-            body,
-            |pass_where, w| {
-                let reachable =
-                    reachable.get_or_insert_with(|| traversal::reachable_as_bitset(body));
+        dump_mir(tcx, None, "DestinationPropagation-dataflow", &"", body, |pass_where, w| {
+            let reachable = reachable.get_or_insert_with(|| traversal::reachable_as_bitset(body));
 
-                match pass_where {
-                    PassWhere::BeforeLocation(loc) if reachable.contains(loc.block) => {
-                        init.seek_before_primary_effect(loc);
-                        live.seek_after_primary_effect(loc);
+            match pass_where {
+                PassWhere::BeforeLocation(loc) if reachable.contains(loc.block) => {
+                    init.seek_before_primary_effect(loc);
+                    live.seek_after_primary_effect(loc);
 
-                        writeln!(w, "        // init: {:?}", init.get())?;
-                        writeln!(w, "        // live: {:?}", live.get())?;
-                    }
-                    PassWhere::AfterTerminator(bb) if reachable.contains(bb) => {
-                        let loc = body.terminator_loc(bb);
-                        init.seek_after_primary_effect(loc);
-                        live.seek_before_primary_effect(loc);
+                    writeln!(w, "        // init: {:?}", init.get())?;
+                    writeln!(w, "        // live: {:?}", live.get())?;
+                }
+                PassWhere::AfterTerminator(bb) if reachable.contains(bb) => {
+                    let loc = body.terminator_loc(bb);
+                    init.seek_after_primary_effect(loc);
+                    live.seek_before_primary_effect(loc);
 
-                        writeln!(w, "        // init: {:?}", init.get())?;
-                        writeln!(w, "        // live: {:?}", live.get())?;
-                    }
-
-                    PassWhere::BeforeBlock(bb) if reachable.contains(bb) => {
-                        init.seek_to_block_start(bb);
-                        live.seek_to_block_start(bb);
-
-                        writeln!(w, "    // init: {:?}", init.get())?;
-                        writeln!(w, "    // live: {:?}", live.get())?;
-                    }
-
-                    PassWhere::BeforeCFG | PassWhere::AfterCFG | PassWhere::AfterLocation(_) => {}
-
-                    PassWhere::BeforeLocation(_) | PassWhere::AfterTerminator(_) => {
-                        writeln!(w, "        // init: <unreachable>")?;
-                        writeln!(w, "        // live: <unreachable>")?;
-                    }
-
-                    PassWhere::BeforeBlock(_) => {
-                        writeln!(w, "    // init: <unreachable>")?;
-                        writeln!(w, "    // live: <unreachable>")?;
-                    }
+                    writeln!(w, "        // init: {:?}", init.get())?;
+                    writeln!(w, "        // live: {:?}", live.get())?;
                 }
 
-                Ok(())
-            },
-        );
+                PassWhere::BeforeBlock(bb) if reachable.contains(bb) => {
+                    init.seek_to_block_start(bb);
+                    live.seek_to_block_start(bb);
+
+                    writeln!(w, "    // init: {:?}", init.get())?;
+                    writeln!(w, "    // live: {:?}", live.get())?;
+                }
+
+                PassWhere::BeforeCFG | PassWhere::AfterCFG | PassWhere::AfterLocation(_) => {}
+
+                PassWhere::BeforeLocation(_) | PassWhere::AfterTerminator(_) => {
+                    writeln!(w, "        // init: <unreachable>")?;
+                    writeln!(w, "        // live: <unreachable>")?;
+                }
+
+                PassWhere::BeforeBlock(_) => {
+                    writeln!(w, "    // init: <unreachable>")?;
+                    writeln!(w, "    // live: <unreachable>")?;
+                }
+            }
+
+            Ok(())
+        });
 
         let mut this = Self {
             relevant_locals,
diff --git a/compiler/rustc_mir/src/transform/dump_mir.rs b/compiler/rustc_mir/src/transform/dump_mir.rs
index 5ce6f4f..5b6edf1 100644
--- a/compiler/rustc_mir/src/transform/dump_mir.rs
+++ b/compiler/rustc_mir/src/transform/dump_mir.rs
@@ -5,7 +5,7 @@
 use std::fs::File;
 use std::io;
 
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use crate::util as mir_util;
 use rustc_middle::mir::Body;
 use rustc_middle::ty::TyCtxt;
@@ -18,7 +18,7 @@
         Cow::Borrowed(self.0)
     }
 
-    fn run_pass(&self, _tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, _body: &mut Body<'tcx>) {}
+    fn run_pass(&self, _tcx: TyCtxt<'tcx>, _body: &mut Body<'tcx>) {}
 }
 
 pub struct Disambiguator {
@@ -36,17 +36,15 @@
     tcx: TyCtxt<'tcx>,
     pass_num: &dyn fmt::Display,
     pass_name: &str,
-    source: MirSource<'tcx>,
     body: &Body<'tcx>,
     is_after: bool,
 ) {
-    if mir_util::dump_enabled(tcx, pass_name, source.def_id()) {
+    if mir_util::dump_enabled(tcx, pass_name, body.source.def_id()) {
         mir_util::dump_mir(
             tcx,
             Some(pass_num),
             pass_name,
             &Disambiguator { is_after },
-            source,
             body,
             |_, _| Ok(()),
         );
diff --git a/compiler/rustc_mir/src/transform/early_otherwise_branch.rs b/compiler/rustc_mir/src/transform/early_otherwise_branch.rs
index ba64e6c..f97dcf4 100644
--- a/compiler/rustc_mir/src/transform/early_otherwise_branch.rs
+++ b/compiler/rustc_mir/src/transform/early_otherwise_branch.rs
@@ -1,10 +1,7 @@
-use crate::{
-    transform::{MirPass, MirSource},
-    util::patch::MirPatch,
-};
+use crate::{transform::MirPass, util::patch::MirPatch};
 use rustc_middle::mir::*;
 use rustc_middle::ty::{Ty, TyCtxt};
-use std::{borrow::Cow, fmt::Debug};
+use std::fmt::Debug;
 
 use super::simplify::simplify_cfg;
 
@@ -28,11 +25,11 @@
 pub struct EarlyOtherwiseBranch;
 
 impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         if tcx.sess.opts.debugging_opts.mir_opt_level < 2 {
             return;
         }
-        trace!("running EarlyOtherwiseBranch on {:?}", source);
+        trace!("running EarlyOtherwiseBranch on {:?}", body.source);
         // we are only interested in this bb if the terminator is a switchInt
         let bbs_with_switch =
             body.basic_blocks().iter_enumerated().filter(|(_, bb)| is_switch(bb.terminator()));
@@ -98,15 +95,17 @@
                 StatementKind::Assign(box (Place::from(not_equal_temp), not_equal_rvalue)),
             );
 
-            let (mut targets_to_jump_to, values_to_jump_to): (Vec<_>, Vec<_>) = opt_to_apply
+            let new_targets = opt_to_apply
                 .infos
                 .iter()
                 .flat_map(|x| x.second_switch_info.targets_with_values.iter())
-                .cloned()
-                .unzip();
+                .cloned();
 
-            // add otherwise case in the end
-            targets_to_jump_to.push(opt_to_apply.infos[0].first_switch_info.otherwise_bb);
+            let targets = SwitchTargets::new(
+                new_targets,
+                opt_to_apply.infos[0].first_switch_info.otherwise_bb,
+            );
+
             // new block that jumps to the correct discriminant case. This block is switched to if the discriminants are equal
             let new_switch_data = BasicBlockData::new(Some(Terminator {
                 source_info: opt_to_apply.infos[0].second_switch_info.discr_source_info,
@@ -114,8 +113,7 @@
                     // the first and second discriminants are equal, so just pick one
                     discr: Operand::Copy(first_descriminant_place),
                     switch_ty: discr_type,
-                    values: Cow::from(values_to_jump_to),
-                    targets: targets_to_jump_to,
+                    targets,
                 },
             }));
 
@@ -179,7 +177,7 @@
     /// The basic block that the otherwise branch points to
     otherwise_bb: BasicBlock,
     /// Target along with the value being branched from. Otherwise is not included
-    targets_with_values: Vec<(BasicBlock, u128)>,
+    targets_with_values: Vec<(u128, BasicBlock)>,
     discr_source_info: SourceInfo,
     /// The place of the discriminant used in the switch
     discr_used_in_switch: Place<'tcx>,
@@ -214,7 +212,7 @@
         let discr = self.find_switch_discriminant_info(bb, switch)?;
 
         // go through each target, finding a discriminant read, and a switch
-        let results = discr.targets_with_values.iter().map(|(target, value)| {
+        let results = discr.targets_with_values.iter().map(|(value, target)| {
             self.find_discriminant_switch_pairing(&discr, target.clone(), value.clone())
         });
 
@@ -256,7 +254,7 @@
             }
 
             // check that the value being matched on is the same. The
-            if this_bb_discr_info.targets_with_values.iter().find(|x| x.1 == value).is_none() {
+            if this_bb_discr_info.targets_with_values.iter().find(|x| x.0 == value).is_none() {
                 trace!("NO: values being matched on are not the same");
                 return None;
             }
@@ -273,7 +271,7 @@
             //  ```
             // We check this by seeing that the value of the first discriminant is the only other discriminant value being used as a target in the second switch
             if !(this_bb_discr_info.targets_with_values.len() == 1
-                && this_bb_discr_info.targets_with_values[0].1 == value)
+                && this_bb_discr_info.targets_with_values[0].0 == value)
             {
                 trace!(
                     "NO: The second switch did not have only 1 target (besides otherwise) that had the same value as the value from the first switch that got us here"
@@ -299,18 +297,14 @@
         switch: &Terminator<'tcx>,
     ) -> Option<SwitchDiscriminantInfo<'tcx>> {
         match &switch.kind {
-            TerminatorKind::SwitchInt { discr, targets, values, .. } => {
+            TerminatorKind::SwitchInt { discr, targets, .. } => {
                 let discr_local = discr.place()?.as_local()?;
                 // the declaration of the discriminant read. Place of this read is being used in the switch
                 let discr_decl = &self.body.local_decls()[discr_local];
                 let discr_ty = discr_decl.ty;
                 // the otherwise target lies as the last element
-                let otherwise_bb = targets.get(values.len())?.clone();
-                let targets_with_values = targets
-                    .iter()
-                    .zip(values.iter())
-                    .map(|(t, v)| (t.clone(), v.clone()))
-                    .collect();
+                let otherwise_bb = targets.otherwise();
+                let targets_with_values = targets.iter().collect();
 
                 // find the place of the adt where the discriminant is being read from
                 // assume this is the last statement of the block
diff --git a/compiler/rustc_mir/src/transform/elaborate_drops.rs b/compiler/rustc_mir/src/transform/elaborate_drops.rs
index a8b2ee5..3d435f6 100644
--- a/compiler/rustc_mir/src/transform/elaborate_drops.rs
+++ b/compiler/rustc_mir/src/transform/elaborate_drops.rs
@@ -5,12 +5,11 @@
 use crate::dataflow::MoveDataParamEnv;
 use crate::dataflow::{on_all_children_bits, on_all_drop_children_bits};
 use crate::dataflow::{Analysis, ResultsCursor};
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use crate::util::elaborate_drops::{elaborate_drop, DropFlagState, Unwind};
 use crate::util::elaborate_drops::{DropElaborator, DropFlagMode, DropStyle};
 use crate::util::patch::MirPatch;
 use rustc_data_structures::fx::FxHashMap;
-use rustc_hir as hir;
 use rustc_index::bit_set::BitSet;
 use rustc_middle::mir::*;
 use rustc_middle::ty::{self, TyCtxt};
@@ -21,11 +20,11 @@
 pub struct ElaborateDrops;
 
 impl<'tcx> MirPass<'tcx> for ElaborateDrops {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        debug!("elaborate_drops({:?} @ {:?})", src, body.span);
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        debug!("elaborate_drops({:?} @ {:?})", body.source, body.span);
 
-        let def_id = src.def_id();
-        let param_env = tcx.param_env_reveal_all_normalized(src.def_id());
+        let def_id = body.source.def_id();
+        let param_env = tcx.param_env_reveal_all_normalized(def_id);
         let move_data = match MoveData::gather_moves(body, tcx, param_env) {
             Ok(move_data) => move_data,
             Err((move_data, _)) => {
@@ -39,10 +38,10 @@
         let elaborate_patch = {
             let body = &*body;
             let env = MoveDataParamEnv { move_data, param_env };
-            let dead_unwinds = find_dead_unwinds(tcx, body, def_id, &env);
+            let dead_unwinds = find_dead_unwinds(tcx, body, &env);
 
             let inits = MaybeInitializedPlaces::new(tcx, body, &env)
-                .into_engine(tcx, body, def_id)
+                .into_engine(tcx, body)
                 .dead_unwinds(&dead_unwinds)
                 .pass_name("elaborate_drops")
                 .iterate_to_fixpoint()
@@ -50,7 +49,7 @@
 
             let uninits = MaybeUninitializedPlaces::new(tcx, body, &env)
                 .mark_inactive_variants_as_uninit()
-                .into_engine(tcx, body, def_id)
+                .into_engine(tcx, body)
                 .dead_unwinds(&dead_unwinds)
                 .pass_name("elaborate_drops")
                 .iterate_to_fixpoint()
@@ -76,7 +75,6 @@
 fn find_dead_unwinds<'tcx>(
     tcx: TyCtxt<'tcx>,
     body: &Body<'tcx>,
-    def_id: hir::def_id::DefId,
     env: &MoveDataParamEnv<'tcx>,
 ) -> BitSet<BasicBlock> {
     debug!("find_dead_unwinds({:?})", body.span);
@@ -84,7 +82,7 @@
     // reach cleanup blocks, which can't have unwind edges themselves.
     let mut dead_unwinds = BitSet::new_empty(body.basic_blocks().len());
     let mut flow_inits = MaybeInitializedPlaces::new(tcx, body, &env)
-        .into_engine(tcx, body, def_id)
+        .into_engine(tcx, body)
         .pass_name("find_dead_unwinds")
         .iterate_to_fixpoint()
         .into_results_cursor(body);
diff --git a/compiler/rustc_mir/src/transform/function_item_references.rs b/compiler/rustc_mir/src/transform/function_item_references.rs
new file mode 100644
index 0000000..d592580
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/function_item_references.rs
@@ -0,0 +1,223 @@
+use rustc_errors::Applicability;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{
+    self,
+    subst::{GenericArgKind, Subst, SubstsRef},
+    PredicateAtom, Ty, TyCtxt, TyS,
+};
+use rustc_session::lint::builtin::FUNCTION_ITEM_REFERENCES;
+use rustc_span::{symbol::sym, Span};
+use rustc_target::spec::abi::Abi;
+
+use crate::transform::MirPass;
+
+pub struct FunctionItemReferences;
+
+impl<'tcx> MirPass<'tcx> for FunctionItemReferences {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        let mut checker = FunctionItemRefChecker { tcx, body };
+        checker.visit_body(&body);
+    }
+}
+
+struct FunctionItemRefChecker<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    body: &'a Body<'tcx>,
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for FunctionItemRefChecker<'a, 'tcx> {
+    /// Emits a lint for function reference arguments bound by `fmt::Pointer` or passed to
+    /// `transmute`. This only handles arguments in calls outside macro expansions to avoid double
+    /// counting function references formatted as pointers by macros.
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        if let TerminatorKind::Call {
+            func,
+            args,
+            destination: _,
+            cleanup: _,
+            from_hir_call: _,
+            fn_span: _,
+        } = &terminator.kind
+        {
+            let source_info = *self.body.source_info(location);
+            // Only handle function calls outside macros
+            if !source_info.span.from_expansion() {
+                let func_ty = func.ty(self.body, self.tcx);
+                if let ty::FnDef(def_id, substs_ref) = *func_ty.kind() {
+                    // Handle calls to `transmute`
+                    if self.tcx.is_diagnostic_item(sym::transmute, def_id) {
+                        let arg_ty = args[0].ty(self.body, self.tcx);
+                        for generic_inner_ty in arg_ty.walk() {
+                            if let GenericArgKind::Type(inner_ty) = generic_inner_ty.unpack() {
+                                if let Some((fn_id, fn_substs)) =
+                                    FunctionItemRefChecker::is_fn_ref(inner_ty)
+                                {
+                                    let span = self.nth_arg_span(&args, 0);
+                                    self.emit_lint(fn_id, fn_substs, source_info, span);
+                                }
+                            }
+                        }
+                    } else {
+                        self.check_bound_args(def_id, substs_ref, &args, source_info);
+                    }
+                }
+            }
+        }
+        self.super_terminator(terminator, location);
+    }
+
+    /// Emits a lint for function references formatted with `fmt::Pointer::fmt` by macros. These
+    /// cases are handled as operands instead of call terminators to avoid any dependence on
+    /// unstable, internal formatting details like whether `fmt` is called directly or not.
+    fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
+        let source_info = *self.body.source_info(location);
+        if source_info.span.from_expansion() {
+            let op_ty = operand.ty(self.body, self.tcx);
+            if let ty::FnDef(def_id, substs_ref) = *op_ty.kind() {
+                if self.tcx.is_diagnostic_item(sym::pointer_trait_fmt, def_id) {
+                    let param_ty = substs_ref.type_at(0);
+                    if let Some((fn_id, fn_substs)) = FunctionItemRefChecker::is_fn_ref(param_ty) {
+                        // The operand's ctxt wouldn't display the lint since it's inside a macro so
+                        // we have to use the callsite's ctxt.
+                        let callsite_ctxt = source_info.span.source_callsite().ctxt();
+                        let span = source_info.span.with_ctxt(callsite_ctxt);
+                        self.emit_lint(fn_id, fn_substs, source_info, span);
+                    }
+                }
+            }
+        }
+        self.super_operand(operand, location);
+    }
+}
+
+impl<'a, 'tcx> FunctionItemRefChecker<'a, 'tcx> {
+    /// Emits a lint for function reference arguments bound by `fmt::Pointer` in calls to the
+    /// function defined by `def_id` with the substitutions `substs_ref`.
+    fn check_bound_args(
+        &self,
+        def_id: DefId,
+        substs_ref: SubstsRef<'tcx>,
+        args: &Vec<Operand<'tcx>>,
+        source_info: SourceInfo,
+    ) {
+        let param_env = self.tcx.param_env(def_id);
+        let bounds = param_env.caller_bounds();
+        for bound in bounds {
+            if let Some(bound_ty) = self.is_pointer_trait(&bound.skip_binders()) {
+                // Get the argument types as they appear in the function signature.
+                let arg_defs = self.tcx.fn_sig(def_id).skip_binder().inputs();
+                for (arg_num, arg_def) in arg_defs.iter().enumerate() {
+                    // For all types reachable from the argument type in the fn sig
+                    for generic_inner_ty in arg_def.walk() {
+                        if let GenericArgKind::Type(inner_ty) = generic_inner_ty.unpack() {
+                            // If the inner type matches the type bound by `Pointer`
+                            if TyS::same_type(inner_ty, bound_ty) {
+                                // Do a substitution using the parameters from the callsite
+                                let subst_ty = inner_ty.subst(self.tcx, substs_ref);
+                                if let Some((fn_id, fn_substs)) =
+                                    FunctionItemRefChecker::is_fn_ref(subst_ty)
+                                {
+                                    let span = self.nth_arg_span(args, arg_num);
+                                    self.emit_lint(fn_id, fn_substs, source_info, span);
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    /// If the given predicate is the trait `fmt::Pointer`, returns the bound parameter type.
+    fn is_pointer_trait(&self, bound: &PredicateAtom<'tcx>) -> Option<Ty<'tcx>> {
+        if let ty::PredicateAtom::Trait(predicate, _) = bound {
+            if self.tcx.is_diagnostic_item(sym::pointer_trait, predicate.def_id()) {
+                Some(predicate.trait_ref.self_ty())
+            } else {
+                None
+            }
+        } else {
+            None
+        }
+    }
+
+    /// If a type is a reference or raw pointer to the anonymous type of a function definition,
+    /// returns that function's `DefId` and `SubstsRef`.
+    fn is_fn_ref(ty: Ty<'tcx>) -> Option<(DefId, SubstsRef<'tcx>)> {
+        let referent_ty = match ty.kind() {
+            ty::Ref(_, referent_ty, _) => Some(referent_ty),
+            ty::RawPtr(ty_and_mut) => Some(&ty_and_mut.ty),
+            _ => None,
+        };
+        referent_ty
+            .map(|ref_ty| {
+                if let ty::FnDef(def_id, substs_ref) = *ref_ty.kind() {
+                    Some((def_id, substs_ref))
+                } else {
+                    None
+                }
+            })
+            .unwrap_or(None)
+    }
+
+    fn nth_arg_span(&self, args: &Vec<Operand<'tcx>>, n: usize) -> Span {
+        match &args[n] {
+            Operand::Copy(place) | Operand::Move(place) => {
+                self.body.local_decls[place.local].source_info.span
+            }
+            Operand::Constant(constant) => constant.span,
+        }
+    }
+
+    fn emit_lint(
+        &self,
+        fn_id: DefId,
+        fn_substs: SubstsRef<'tcx>,
+        source_info: SourceInfo,
+        span: Span,
+    ) {
+        let lint_root = self.body.source_scopes[source_info.scope]
+            .local_data
+            .as_ref()
+            .assert_crate_local()
+            .lint_root;
+        let fn_sig = self.tcx.fn_sig(fn_id);
+        let unsafety = fn_sig.unsafety().prefix_str();
+        let abi = match fn_sig.abi() {
+            Abi::Rust => String::from(""),
+            other_abi => {
+                let mut s = String::from("extern \"");
+                s.push_str(other_abi.name());
+                s.push_str("\" ");
+                s
+            }
+        };
+        let ident = self.tcx.item_name(fn_id).to_ident_string();
+        let ty_params = fn_substs.types().map(|ty| format!("{}", ty));
+        let const_params = fn_substs.consts().map(|c| format!("{}", c));
+        let params = ty_params.chain(const_params).collect::<Vec<String>>().join(", ");
+        let num_args = fn_sig.inputs().map_bound(|inputs| inputs.len()).skip_binder();
+        let variadic = if fn_sig.c_variadic() { ", ..." } else { "" };
+        let ret = if fn_sig.output().skip_binder().is_unit() { "" } else { " -> _" };
+        self.tcx.struct_span_lint_hir(FUNCTION_ITEM_REFERENCES, lint_root, span, |lint| {
+            lint.build("taking a reference to a function item does not give a function pointer")
+                .span_suggestion(
+                    span,
+                    &format!("cast `{}` to obtain a function pointer", ident),
+                    format!(
+                        "{} as {}{}fn({}{}){}",
+                        if params.is_empty() { ident } else { format!("{}::<{}>", ident, params) },
+                        unsafety,
+                        abi,
+                        vec!["_"; num_args].join(", "),
+                        variadic,
+                        ret,
+                    ),
+                    Applicability::Unspecified,
+                )
+                .emit();
+        });
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/generator.rs b/compiler/rustc_mir/src/transform/generator.rs
index 1fffcf8..039d475 100644
--- a/compiler/rustc_mir/src/transform/generator.rs
+++ b/compiler/rustc_mir/src/transform/generator.rs
@@ -55,13 +55,12 @@
 use crate::dataflow::{self, Analysis};
 use crate::transform::no_landing_pads::no_landing_pads;
 use crate::transform::simplify;
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use crate::util::dump_mir;
 use crate::util::expand_aggregate;
 use crate::util::storage;
 use rustc_data_structures::fx::FxHashMap;
 use rustc_hir as hir;
-use rustc_hir::def_id::DefId;
 use rustc_hir::lang_items::LangItem;
 use rustc_index::bit_set::{BitMatrix, BitSet};
 use rustc_index::vec::{Idx, IndexVec};
@@ -72,7 +71,6 @@
 use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
 use rustc_target::abi::VariantIdx;
 use rustc_target::spec::PanicStrategy;
-use std::borrow::Cow;
 use std::{iter, ops};
 
 pub struct StateTransform;
@@ -451,24 +449,22 @@
 fn locals_live_across_suspend_points(
     tcx: TyCtxt<'tcx>,
     body: &Body<'tcx>,
-    source: MirSource<'tcx>,
     always_live_locals: &storage::AlwaysLiveLocals,
     movable: bool,
 ) -> LivenessInfo {
-    let def_id = source.def_id();
     let body_ref: &Body<'_> = &body;
 
     // Calculate when MIR locals have live storage. This gives us an upper bound of their
     // lifetimes.
     let mut storage_live = MaybeStorageLive::new(always_live_locals.clone())
-        .into_engine(tcx, body_ref, def_id)
+        .into_engine(tcx, body_ref)
         .iterate_to_fixpoint()
         .into_results_cursor(body_ref);
 
     // Calculate the MIR locals which have been previously
     // borrowed (even if they are still active).
     let borrowed_locals_results = MaybeBorrowedLocals::all_borrows()
-        .into_engine(tcx, body_ref, def_id)
+        .into_engine(tcx, body_ref)
         .pass_name("generator")
         .iterate_to_fixpoint();
 
@@ -478,14 +474,14 @@
     // Calculate the MIR locals that we actually need to keep storage around
     // for.
     let requires_storage_results = MaybeRequiresStorage::new(body, &borrowed_locals_results)
-        .into_engine(tcx, body_ref, def_id)
+        .into_engine(tcx, body_ref)
         .iterate_to_fixpoint();
     let mut requires_storage_cursor =
         dataflow::ResultsCursor::new(body_ref, &requires_storage_results);
 
     // Calculate the liveness of MIR locals ignoring borrows.
     let mut liveness = MaybeLiveLocals
-        .into_engine(tcx, body_ref, def_id)
+        .into_engine(tcx, body_ref)
         .pass_name("generator")
         .iterate_to_fixpoint()
         .into_results_cursor(body_ref);
@@ -723,11 +719,11 @@
 fn sanitize_witness<'tcx>(
     tcx: TyCtxt<'tcx>,
     body: &Body<'tcx>,
-    did: DefId,
     witness: Ty<'tcx>,
     upvars: &Vec<Ty<'tcx>>,
     saved_locals: &GeneratorSavedLocals,
 ) {
+    let did = body.source.def_id();
     let allowed_upvars = tcx.erase_regions(upvars);
     let allowed = match witness.kind() {
         ty::GeneratorWitness(s) => tcx.erase_late_bound_regions(&s),
@@ -842,11 +838,12 @@
 ) {
     let default_block = insert_term_block(body, default);
     let (assign, discr) = transform.get_discr(body);
+    let switch_targets =
+        SwitchTargets::new(cases.iter().map(|(i, bb)| ((*i) as u128, *bb)), default_block);
     let switch = TerminatorKind::SwitchInt {
         discr: Operand::Move(discr),
         switch_ty: transform.discr_ty,
-        values: Cow::from(cases.iter().map(|&(i, _)| i as u128).collect::<Vec<_>>()),
-        targets: cases.iter().map(|&(_, d)| d).chain(iter::once(default_block)).collect(),
+        targets: switch_targets,
     };
 
     let source_info = SourceInfo::outermost(body.span);
@@ -866,7 +863,7 @@
     }
 }
 
-fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, body: &mut Body<'tcx>) {
+fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     use crate::shim::DropShimElaborator;
     use crate::util::elaborate_drops::{elaborate_drop, Unwind};
     use crate::util::patch::MirPatch;
@@ -875,6 +872,7 @@
     // this is ok because `open_drop` can only be reached within that own
     // generator's resume function.
 
+    let def_id = body.source.def_id();
     let param_env = tcx.param_env(def_id);
 
     let mut elaborator = DropShimElaborator { body, patch: MirPatch::new(body), tcx, param_env };
@@ -915,7 +913,6 @@
 fn create_generator_drop_shim<'tcx>(
     tcx: TyCtxt<'tcx>,
     transform: &TransformVisitor<'tcx>,
-    source: MirSource<'tcx>,
     gen_ty: Ty<'tcx>,
     body: &mut Body<'tcx>,
     drop_clean: BasicBlock,
@@ -968,7 +965,7 @@
     // unrelated code from the resume part of the function
     simplify::remove_dead_blocks(&mut body);
 
-    dump_mir(tcx, None, "generator_drop", &0, source, &body, |_, _| Ok(()));
+    dump_mir(tcx, None, "generator_drop", &0, &body, |_, _| Ok(()));
 
     body
 }
@@ -1070,7 +1067,6 @@
 fn create_generator_resume_function<'tcx>(
     tcx: TyCtxt<'tcx>,
     transform: TransformVisitor<'tcx>,
-    source: MirSource<'tcx>,
     body: &mut Body<'tcx>,
     can_return: bool,
 ) {
@@ -1142,7 +1138,7 @@
     // unrelated code from the drop part of the function
     simplify::remove_dead_blocks(body);
 
-    dump_mir(tcx, None, "generator_resume", &0, source, body, |_, _| Ok(()));
+    dump_mir(tcx, None, "generator_resume", &0, body, |_, _| Ok(()));
 }
 
 fn insert_clean_drop(body: &mut Body<'_>) -> BasicBlock {
@@ -1239,7 +1235,7 @@
 }
 
 impl<'tcx> MirPass<'tcx> for StateTransform {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         let yield_ty = if let Some(yield_ty) = body.yield_ty {
             yield_ty
         } else {
@@ -1249,8 +1245,6 @@
 
         assert!(body.generator_drop.is_none());
 
-        let def_id = source.def_id();
-
         // The first argument is the generator type passed by value
         let gen_ty = body.local_decls.raw[1].ty;
 
@@ -1307,9 +1301,9 @@
         let always_live_locals = storage::AlwaysLiveLocals::new(&body);
 
         let liveness_info =
-            locals_live_across_suspend_points(tcx, body, source, &always_live_locals, movable);
+            locals_live_across_suspend_points(tcx, body, &always_live_locals, movable);
 
-        sanitize_witness(tcx, body, def_id, interior, &upvars, &liveness_info.saved_locals);
+        sanitize_witness(tcx, body, interior, &upvars, &liveness_info.saved_locals);
 
         if tcx.sess.opts.debugging_opts.validate_mir {
             let mut vis = EnsureGeneratorFieldAssignmentsNeverAlias {
@@ -1356,23 +1350,22 @@
         // This is expanded to a drop ladder in `elaborate_generator_drops`.
         let drop_clean = insert_clean_drop(body);
 
-        dump_mir(tcx, None, "generator_pre-elab", &0, source, body, |_, _| Ok(()));
+        dump_mir(tcx, None, "generator_pre-elab", &0, body, |_, _| Ok(()));
 
         // Expand `drop(generator_struct)` to a drop ladder which destroys upvars.
         // If any upvars are moved out of, drop elaboration will handle upvar destruction.
         // However we need to also elaborate the code generated by `insert_clean_drop`.
-        elaborate_generator_drops(tcx, def_id, body);
+        elaborate_generator_drops(tcx, body);
 
-        dump_mir(tcx, None, "generator_post-transform", &0, source, body, |_, _| Ok(()));
+        dump_mir(tcx, None, "generator_post-transform", &0, body, |_, _| Ok(()));
 
         // Create a copy of our MIR and use it to create the drop shim for the generator
-        let drop_shim =
-            create_generator_drop_shim(tcx, &transform, source, gen_ty, body, drop_clean);
+        let drop_shim = create_generator_drop_shim(tcx, &transform, gen_ty, body, drop_clean);
 
         body.generator_drop = Some(box drop_shim);
 
         // Create the Generator::resume function
-        create_generator_resume_function(tcx, transform, source, body, can_return);
+        create_generator_resume_function(tcx, transform, body, can_return);
     }
 }
 
diff --git a/compiler/rustc_mir/src/transform/inline.rs b/compiler/rustc_mir/src/transform/inline.rs
index 4e7cacc..7737672 100644
--- a/compiler/rustc_mir/src/transform/inline.rs
+++ b/compiler/rustc_mir/src/transform/inline.rs
@@ -1,23 +1,20 @@
 //! Inlining pass for MIR functions
 
 use rustc_attr as attr;
-use rustc_hir::def_id::DefId;
+use rustc_hir as hir;
 use rustc_index::bit_set::BitSet;
-use rustc_index::vec::{Idx, IndexVec};
+use rustc_index::vec::Idx;
 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
 use rustc_middle::mir::visit::*;
 use rustc_middle::mir::*;
-use rustc_middle::ty::subst::{Subst, SubstsRef};
 use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
+use rustc_span::{hygiene::ExpnKind, ExpnData, Span};
 use rustc_target::spec::abi::Abi;
 
 use super::simplify::{remove_dead_blocks, CfgSimplifier};
-use crate::transform::{MirPass, MirSource};
-use std::collections::VecDeque;
+use crate::transform::MirPass;
 use std::iter;
-
-const DEFAULT_THRESHOLD: usize = 50;
-const HINT_THRESHOLD: usize = 100;
+use std::ops::{Range, RangeFrom};
 
 const INSTR_COST: usize = 5;
 const CALL_PENALTY: usize = 25;
@@ -30,157 +27,136 @@
 
 #[derive(Copy, Clone, Debug)]
 struct CallSite<'tcx> {
-    callee: DefId,
-    substs: SubstsRef<'tcx>,
-    bb: BasicBlock,
-    location: SourceInfo,
+    callee: Instance<'tcx>,
+    block: BasicBlock,
+    target: Option<BasicBlock>,
+    source_info: SourceInfo,
 }
 
 impl<'tcx> MirPass<'tcx> for Inline {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
-            if tcx.sess.opts.debugging_opts.instrument_coverage {
-                // The current implementation of source code coverage injects code region counters
-                // into the MIR, and assumes a 1-to-1 correspondence between MIR and source-code-
-                // based function.
-                debug!("function inlining is disabled when compiling with `instrument_coverage`");
-            } else {
-                Inliner { tcx, source, codegen_fn_attrs: tcx.codegen_fn_attrs(source.def_id()) }
-                    .run_pass(body);
-            }
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        if tcx.sess.opts.debugging_opts.mir_opt_level < 2 {
+            return;
+        }
+
+        if tcx.sess.opts.debugging_opts.instrument_coverage {
+            // The current implementation of source code coverage injects code region counters
+            // into the MIR, and assumes a 1-to-1 correspondence between MIR and source-code-
+            // based function.
+            debug!("function inlining is disabled when compiling with `instrument_coverage`");
+            return;
+        }
+
+        if inline(tcx, body) {
+            debug!("running simplify cfg on {:?}", body.source);
+            CfgSimplifier::new(body).simplify();
+            remove_dead_blocks(body);
         }
     }
 }
 
+fn inline(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
+    let def_id = body.source.def_id();
+    let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+
+    // Only do inlining into fn bodies.
+    if !tcx.hir().body_owner_kind(hir_id).is_fn_or_closure() {
+        return false;
+    }
+    if body.source.promoted.is_some() {
+        return false;
+    }
+
+    let mut this = Inliner {
+        tcx,
+        param_env: tcx.param_env_reveal_all_normalized(body.source.def_id()),
+        codegen_fn_attrs: tcx.codegen_fn_attrs(body.source.def_id()),
+        hir_id,
+        history: Vec::new(),
+        changed: false,
+    };
+    let blocks = BasicBlock::new(0)..body.basic_blocks().next_index();
+    this.process_blocks(body, blocks);
+    this.changed
+}
+
 struct Inliner<'tcx> {
     tcx: TyCtxt<'tcx>,
-    source: MirSource<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    /// Caller codegen attributes.
     codegen_fn_attrs: &'tcx CodegenFnAttrs,
+    /// Caller HirID.
+    hir_id: hir::HirId,
+    /// Stack of inlined instances.
+    history: Vec<Instance<'tcx>>,
+    /// Indicates that the caller body has been modified.
+    changed: bool,
 }
 
 impl Inliner<'tcx> {
-    fn run_pass(&self, caller_body: &mut Body<'tcx>) {
-        // Keep a queue of callsites to try inlining on. We take
-        // advantage of the fact that queries detect cycles here to
-        // allow us to try and fetch the fully optimized MIR of a
-        // call; if it succeeds, we can inline it and we know that
-        // they do not call us.  Otherwise, we just don't try to
-        // inline.
-        //
-        // We use a queue so that we inline "broadly" before we inline
-        // in depth. It is unclear if this is the best heuristic,
-        // really, but that's true of all the heuristics in this
-        // file. =)
+    fn process_blocks(&mut self, caller_body: &mut Body<'tcx>, blocks: Range<BasicBlock>) {
+        for bb in blocks {
+            let callsite = match self.get_valid_function_call(bb, &caller_body[bb], caller_body) {
+                None => continue,
+                Some(it) => it,
+            };
 
-        let mut callsites = VecDeque::new();
-
-        let param_env = self.tcx.param_env_reveal_all_normalized(self.source.def_id());
-
-        // Only do inlining into fn bodies.
-        let id = self.tcx.hir().local_def_id_to_hir_id(self.source.def_id().expect_local());
-        if self.tcx.hir().body_owner_kind(id).is_fn_or_closure() && self.source.promoted.is_none() {
-            for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated() {
-                if let Some(callsite) =
-                    self.get_valid_function_call(bb, bb_data, caller_body, param_env)
-                {
-                    callsites.push_back(callsite);
-                }
+            if !self.is_mir_available(&callsite.callee, caller_body) {
+                debug!("MIR unavailable {}", callsite.callee);
+                continue;
             }
+
+            let callee_body = self.tcx.instance_mir(callsite.callee.def);
+            if !self.should_inline(callsite, callee_body) {
+                continue;
+            }
+
+            if !self.tcx.consider_optimizing(|| {
+                format!("Inline {:?} into {}", callee_body.span, callsite.callee)
+            }) {
+                return;
+            }
+
+            let callee_body = callsite.callee.subst_mir_and_normalize_erasing_regions(
+                self.tcx,
+                self.param_env,
+                callee_body,
+            );
+
+            let old_blocks = caller_body.basic_blocks().next_index();
+            self.inline_call(callsite, caller_body, callee_body);
+            let new_blocks = old_blocks..caller_body.basic_blocks().next_index();
+            self.changed = true;
+
+            self.history.push(callsite.callee);
+            self.process_blocks(caller_body, new_blocks);
+            self.history.pop();
+        }
+    }
+
+    fn is_mir_available(&self, callee: &Instance<'tcx>, caller_body: &Body<'tcx>) -> bool {
+        if let InstanceDef::Item(_) = callee.def {
+            if !self.tcx.is_mir_available(callee.def_id()) {
+                return false;
+            }
+        }
+
+        if let Some(callee_def_id) = callee.def_id().as_local() {
+            let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id);
+            // Avoid a cycle here by only using `instance_mir` only if we have
+            // a lower `HirId` than the callee. This ensures that the callee will
+            // not inline us. This trick only works without incremental compilation.
+            // So don't do it if that is enabled. Also avoid inlining into generators,
+            // since their `optimized_mir` is used for layout computation, which can
+            // create a cycle, even when no attempt is made to inline the function
+            // in the other direction.
+            !self.tcx.dep_graph.is_fully_enabled()
+                && self.hir_id < callee_hir_id
+                && caller_body.generator_kind.is_none()
         } else {
-            return;
-        }
-
-        let mut local_change;
-        let mut changed = false;
-
-        loop {
-            local_change = false;
-            while let Some(callsite) = callsites.pop_front() {
-                debug!("checking whether to inline callsite {:?}", callsite);
-                if !self.tcx.is_mir_available(callsite.callee) {
-                    debug!("checking whether to inline callsite {:?} - MIR unavailable", callsite);
-                    continue;
-                }
-
-                let callee_body = if let Some(callee_def_id) = callsite.callee.as_local() {
-                    let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id);
-                    let self_hir_id =
-                        self.tcx.hir().local_def_id_to_hir_id(self.source.def_id().expect_local());
-                    // Avoid a cycle here by only using `optimized_mir` only if we have
-                    // a lower `HirId` than the callee. This ensures that the callee will
-                    // not inline us. This trick only works without incremental compilation.
-                    // So don't do it if that is enabled. Also avoid inlining into generators,
-                    // since their `optimized_mir` is used for layout computation, which can
-                    // create a cycle, even when no attempt is made to inline the function
-                    // in the other direction.
-                    if !self.tcx.dep_graph.is_fully_enabled()
-                        && self_hir_id < callee_hir_id
-                        && caller_body.generator_kind.is_none()
-                    {
-                        self.tcx.optimized_mir(callsite.callee)
-                    } else {
-                        continue;
-                    }
-                } else {
-                    // This cannot result in a cycle since the callee MIR is from another crate
-                    // and is already optimized.
-                    self.tcx.optimized_mir(callsite.callee)
-                };
-
-                let callee_body = if self.consider_optimizing(callsite, callee_body) {
-                    self.tcx.subst_and_normalize_erasing_regions(
-                        &callsite.substs,
-                        param_env,
-                        callee_body,
-                    )
-                } else {
-                    continue;
-                };
-
-                // Copy only unevaluated constants from the callee_body into the caller_body.
-                // Although we are only pushing `ConstKind::Unevaluated` consts to
-                // `required_consts`, here we may not only have `ConstKind::Unevaluated`
-                // because we are calling `subst_and_normalize_erasing_regions`.
-                caller_body.required_consts.extend(
-                    callee_body.required_consts.iter().copied().filter(|&constant| {
-                        matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _))
-                    }),
-                );
-
-                let start = caller_body.basic_blocks().len();
-                debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body);
-                if !self.inline_call(callsite, caller_body, callee_body) {
-                    debug!("attempting to inline callsite {:?} - failure", callsite);
-                    continue;
-                }
-                debug!("attempting to inline callsite {:?} - success", callsite);
-
-                // Add callsites from inlined function
-                for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) {
-                    if let Some(new_callsite) =
-                        self.get_valid_function_call(bb, bb_data, caller_body, param_env)
-                    {
-                        // Don't inline the same function multiple times.
-                        if callsite.callee != new_callsite.callee {
-                            callsites.push_back(new_callsite);
-                        }
-                    }
-                }
-
-                local_change = true;
-                changed = true;
-            }
-
-            if !local_change {
-                break;
-            }
-        }
-
-        // Simplify if we inlined anything.
-        if changed {
-            debug!("running simplify cfg on {:?}", self.source);
-            CfgSimplifier::new(caller_body).simplify();
-            remove_dead_blocks(caller_body);
+            // This cannot result in a cycle since the callee MIR is from another crate
+            // and is already optimized.
+            true
         }
     }
 
@@ -189,7 +165,6 @@
         bb: BasicBlock,
         bb_data: &BasicBlockData<'tcx>,
         caller_body: &Body<'tcx>,
-        param_env: ParamEnv<'tcx>,
     ) -> Option<CallSite<'tcx>> {
         // Don't inline calls that are in cleanup blocks.
         if bb_data.is_cleanup {
@@ -198,20 +173,25 @@
 
         // Only consider direct calls to functions
         let terminator = bb_data.terminator();
-        if let TerminatorKind::Call { func: ref op, .. } = terminator.kind {
+        if let TerminatorKind::Call { func: ref op, ref destination, .. } = terminator.kind {
             if let ty::FnDef(callee_def_id, substs) = *op.ty(caller_body, self.tcx).kind() {
-                let instance =
-                    Instance::resolve(self.tcx, param_env, callee_def_id, substs).ok().flatten()?;
+                // To resolve an instance its substs have to be fully normalized, so
+                // we do this here.
+                let normalized_substs = self.tcx.normalize_erasing_regions(self.param_env, substs);
+                let callee =
+                    Instance::resolve(self.tcx, self.param_env, callee_def_id, normalized_substs)
+                        .ok()
+                        .flatten()?;
 
-                if let InstanceDef::Virtual(..) = instance.def {
+                if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def {
                     return None;
                 }
 
                 return Some(CallSite {
-                    callee: instance.def_id(),
-                    substs: instance.substs,
-                    bb,
-                    location: terminator.source_info,
+                    callee,
+                    block: bb,
+                    target: destination.map(|(_, target)| target),
+                    source_info: terminator.source_info,
                 });
             }
         }
@@ -219,14 +199,6 @@
         None
     }
 
-    fn consider_optimizing(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
-        debug!("consider_optimizing({:?})", callsite);
-        self.should_inline(callsite, callee_body)
-            && self.tcx.consider_optimizing(|| {
-                format!("Inline {:?} into {:?}", callee_body.span, callsite)
-            })
-    }
-
     fn should_inline(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
         debug!("should_inline({:?})", callsite);
         let tcx = self.tcx;
@@ -237,12 +209,7 @@
             return false;
         }
 
-        let codegen_fn_attrs = tcx.codegen_fn_attrs(callsite.callee);
-
-        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::TRACK_CALLER) {
-            debug!("`#[track_caller]` present - not inlining");
-            return false;
-        }
+        let codegen_fn_attrs = tcx.codegen_fn_attrs(callsite.callee.def_id());
 
         let self_features = &self.codegen_fn_attrs.target_features;
         let callee_features = &codegen_fn_attrs.target_features;
@@ -276,14 +243,18 @@
         // Only inline local functions if they would be eligible for cross-crate
         // inlining. This is to ensure that the final crate doesn't have MIR that
         // reference unexported symbols
-        if callsite.callee.is_local() {
-            if callsite.substs.non_erasable_generics().count() == 0 && !hinted {
+        if callsite.callee.def_id().is_local() {
+            if callsite.callee.substs.non_erasable_generics().count() == 0 && !hinted {
                 debug!("    callee is an exported function - not inlining");
                 return false;
             }
         }
 
-        let mut threshold = if hinted { HINT_THRESHOLD } else { DEFAULT_THRESHOLD };
+        let mut threshold = if hinted {
+            self.tcx.sess.opts.debugging_opts.inline_mir_hint_threshold
+        } else {
+            self.tcx.sess.opts.debugging_opts.inline_mir_threshold
+        };
 
         // Significantly lower the threshold for inlining cold functions
         if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
@@ -299,9 +270,6 @@
         debug!("    final inline threshold = {}", threshold);
 
         // FIXME: Give a bonus to functions with only a single caller
-
-        let param_env = tcx.param_env(self.source.def_id());
-
         let mut first_block = true;
         let mut cost = 0;
 
@@ -333,8 +301,8 @@
                     work_list.push(target);
                     // If the place doesn't actually need dropping, treat it like
                     // a regular goto.
-                    let ty = place.ty(callee_body, tcx).subst(tcx, callsite.substs).ty;
-                    if ty.needs_drop(tcx, param_env) {
+                    let ty = callsite.callee.subst_mir(self.tcx, &place.ty(callee_body, tcx).ty);
+                    if ty.needs_drop(tcx, self.param_env) {
                         cost += CALL_PENALTY;
                         if let Some(unwind) = unwind {
                             cost += LANDINGPAD_PENALTY;
@@ -354,7 +322,18 @@
                 }
 
                 TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => {
-                    if let ty::FnDef(def_id, _) = *f.literal.ty.kind() {
+                    if let ty::FnDef(def_id, substs) =
+                        *callsite.callee.subst_mir(self.tcx, &f.literal.ty).kind()
+                    {
+                        let substs = self.tcx.normalize_erasing_regions(self.param_env, substs);
+                        if let Ok(Some(instance)) =
+                            Instance::resolve(self.tcx, self.param_env, def_id, substs)
+                        {
+                            if callsite.callee == instance || self.history.contains(&instance) {
+                                debug!("`callee is recursive - not inlining");
+                                return false;
+                            }
+                        }
                         // Don't give intrinsics the extra penalty for calls
                         let f = tcx.fn_sig(def_id);
                         if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
@@ -395,11 +374,10 @@
         let ptr_size = tcx.data_layout.pointer_size.bytes();
 
         for v in callee_body.vars_and_temps_iter() {
-            let v = &callee_body.local_decls[v];
-            let ty = v.ty.subst(tcx, callsite.substs);
+            let ty = callsite.callee.subst_mir(self.tcx, &callee_body.local_decls[v].ty);
             // Cost of the var is the size in machine-words, if we know
             // it.
-            if let Some(size) = type_size_of(tcx, param_env, ty) {
+            if let Some(size) = type_size_of(tcx, self.param_env, ty) {
                 cost += (size / ptr_size) as usize;
             } else {
                 cost += UNKNOWN_SIZE_COST;
@@ -425,43 +403,10 @@
         callsite: CallSite<'tcx>,
         caller_body: &mut Body<'tcx>,
         mut callee_body: Body<'tcx>,
-    ) -> bool {
-        let terminator = caller_body[callsite.bb].terminator.take().unwrap();
+    ) {
+        let terminator = caller_body[callsite.block].terminator.take().unwrap();
         match terminator.kind {
-            // FIXME: Handle inlining of diverging calls
-            TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => {
-                debug!("inlined {:?} into {:?}", callsite.callee, self.source);
-
-                let mut local_map = IndexVec::with_capacity(callee_body.local_decls.len());
-                let mut scope_map = IndexVec::with_capacity(callee_body.source_scopes.len());
-
-                for mut scope in callee_body.source_scopes.iter().cloned() {
-                    if scope.parent_scope.is_none() {
-                        scope.parent_scope = Some(callsite.location.scope);
-                        // FIXME(eddyb) is this really needed?
-                        // (also note that it's always overwritten below)
-                        scope.span = callee_body.span;
-                    }
-
-                    // FIXME(eddyb) this doesn't seem right at all.
-                    // The inlined source scopes should probably be annotated as
-                    // such, but also contain all of the original information.
-                    scope.span = callsite.location.span;
-
-                    let idx = caller_body.source_scopes.push(scope);
-                    scope_map.push(idx);
-                }
-
-                for loc in callee_body.vars_and_temps_iter() {
-                    let mut local = callee_body.local_decls[loc].clone();
-
-                    local.source_info.scope = scope_map[local.source_info.scope];
-                    local.source_info.span = callsite.location.span;
-
-                    let idx = caller_body.local_decls.push(local);
-                    local_map.push(idx);
-                }
-
+            TerminatorKind::Call { args, destination, cleanup, .. } => {
                 // If the call is something like `a[*i] = f(i)`, where
                 // `i : &mut usize`, then just duplicating the `a[*i]`
                 // Place could result in two different locations if `f`
@@ -478,73 +423,103 @@
                     false
                 }
 
-                let dest = if dest_needs_borrow(destination.0) {
-                    debug!("creating temp for return destination");
-                    let dest = Rvalue::Ref(
-                        self.tcx.lifetimes.re_erased,
-                        BorrowKind::Mut { allow_two_phase_borrow: false },
-                        destination.0,
-                    );
-
-                    let ty = dest.ty(caller_body, self.tcx);
-
-                    let temp = LocalDecl::new(ty, callsite.location.span);
-
-                    let tmp = caller_body.local_decls.push(temp);
-                    let tmp = Place::from(tmp);
-
-                    let stmt = Statement {
-                        source_info: callsite.location,
-                        kind: StatementKind::Assign(box (tmp, dest)),
-                    };
-                    caller_body[callsite.bb].statements.push(stmt);
-                    self.tcx.mk_place_deref(tmp)
+                let dest = if let Some((destination_place, _)) = destination {
+                    if dest_needs_borrow(destination_place) {
+                        trace!("creating temp for return destination");
+                        let dest = Rvalue::Ref(
+                            self.tcx.lifetimes.re_erased,
+                            BorrowKind::Mut { allow_two_phase_borrow: false },
+                            destination_place,
+                        );
+                        let dest_ty = dest.ty(caller_body, self.tcx);
+                        let temp = Place::from(self.new_call_temp(caller_body, &callsite, dest_ty));
+                        caller_body[callsite.block].statements.push(Statement {
+                            source_info: callsite.source_info,
+                            kind: StatementKind::Assign(box (temp, dest)),
+                        });
+                        self.tcx.mk_place_deref(temp)
+                    } else {
+                        destination_place
+                    }
                 } else {
-                    destination.0
+                    trace!("creating temp for return place");
+                    Place::from(self.new_call_temp(caller_body, &callsite, callee_body.return_ty()))
                 };
 
-                let return_block = destination.1;
-
                 // Copy the arguments if needed.
-                let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, return_block);
+                let args: Vec<_> = self.make_call_args(args, &callsite, caller_body);
 
-                let bb_len = caller_body.basic_blocks().len();
                 let mut integrator = Integrator {
-                    block_idx: bb_len,
                     args: &args,
-                    local_map,
-                    scope_map,
+                    new_locals: Local::new(caller_body.local_decls.len())..,
+                    new_scopes: SourceScope::new(caller_body.source_scopes.len())..,
+                    new_blocks: BasicBlock::new(caller_body.basic_blocks().len())..,
                     destination: dest,
-                    return_block,
+                    return_block: callsite.target,
                     cleanup_block: cleanup,
                     in_cleanup_block: false,
                     tcx: self.tcx,
+                    callsite_span: callsite.source_info.span,
+                    body_span: callee_body.span,
                 };
 
-                for mut var_debug_info in callee_body.var_debug_info.drain(..) {
-                    integrator.visit_var_debug_info(&mut var_debug_info);
-                    caller_body.var_debug_info.push(var_debug_info);
+                // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones
+                // (or existing ones, in a few special cases) in the caller.
+                integrator.visit_body(&mut callee_body);
+
+                for scope in &mut callee_body.source_scopes {
+                    // FIXME(eddyb) move this into a `fn visit_scope_data` in `Integrator`.
+                    if scope.parent_scope.is_none() {
+                        let callsite_scope = &caller_body.source_scopes[callsite.source_info.scope];
+
+                        // Attach the outermost callee scope as a child of the callsite
+                        // scope, via the `parent_scope` and `inlined_parent_scope` chains.
+                        scope.parent_scope = Some(callsite.source_info.scope);
+                        assert_eq!(scope.inlined_parent_scope, None);
+                        scope.inlined_parent_scope = if callsite_scope.inlined.is_some() {
+                            Some(callsite.source_info.scope)
+                        } else {
+                            callsite_scope.inlined_parent_scope
+                        };
+
+                        // Mark the outermost callee scope as an inlined one.
+                        assert_eq!(scope.inlined, None);
+                        scope.inlined = Some((callsite.callee, callsite.source_info.span));
+                    } else if scope.inlined_parent_scope.is_none() {
+                        // Make it easy to find the scope with `inlined` set above.
+                        scope.inlined_parent_scope =
+                            Some(integrator.map_scope(OUTERMOST_SOURCE_SCOPE));
+                    }
                 }
 
-                for (bb, mut block) in callee_body.basic_blocks_mut().drain_enumerated(..) {
-                    integrator.visit_basic_block_data(bb, &mut block);
-                    caller_body.basic_blocks_mut().push(block);
-                }
+                // Insert all of the (mapped) parts of the callee body into the caller.
+                caller_body.local_decls.extend(
+                    // FIXME(eddyb) make `Range<Local>` iterable so that we can use
+                    // `callee_body.local_decls.drain(callee_body.vars_and_temps())`
+                    callee_body
+                        .vars_and_temps_iter()
+                        .map(|local| callee_body.local_decls[local].clone()),
+                );
+                caller_body.source_scopes.extend(callee_body.source_scopes.drain(..));
+                caller_body.var_debug_info.extend(callee_body.var_debug_info.drain(..));
+                caller_body.basic_blocks_mut().extend(callee_body.basic_blocks_mut().drain(..));
 
-                let terminator = Terminator {
-                    source_info: callsite.location,
-                    kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) },
-                };
+                caller_body[callsite.block].terminator = Some(Terminator {
+                    source_info: callsite.source_info,
+                    kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) },
+                });
 
-                caller_body[callsite.bb].terminator = Some(terminator);
-
-                true
+                // Copy only unevaluated constants from the callee_body into the caller_body.
+                // Although we are only pushing `ConstKind::Unevaluated` consts to
+                // `required_consts`, here we may not only have `ConstKind::Unevaluated`
+                // because we are calling `subst_and_normalize_erasing_regions`.
+                caller_body.required_consts.extend(
+                    callee_body.required_consts.iter().copied().filter(|&constant| {
+                        matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _))
+                    }),
+                );
             }
-            kind => {
-                caller_body[callsite.bb].terminator =
-                    Some(Terminator { source_info: terminator.source_info, kind });
-                false
-            }
+            kind => bug!("unexpected terminator kind {:?}", kind),
         }
     }
 
@@ -553,7 +528,6 @@
         args: Vec<Operand<'tcx>>,
         callsite: &CallSite<'tcx>,
         caller_body: &mut Body<'tcx>,
-        return_block: BasicBlock,
     ) -> Vec<Local> {
         let tcx = self.tcx;
 
@@ -580,20 +554,12 @@
         //     tmp2 = tuple_tmp.2
         //
         // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
-        if tcx.is_closure(callsite.callee) {
+        // FIXME(eddyb) make this check for `"rust-call"` ABI combined with
+        // `callee_body.spread_arg == None`, instead of special-casing closures.
+        if tcx.is_closure(callsite.callee.def_id()) {
             let mut args = args.into_iter();
-            let self_ = self.create_temp_if_necessary(
-                args.next().unwrap(),
-                callsite,
-                caller_body,
-                return_block,
-            );
-            let tuple = self.create_temp_if_necessary(
-                args.next().unwrap(),
-                callsite,
-                caller_body,
-                return_block,
-            );
+            let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
+            let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
             assert!(args.next().is_none());
 
             let tuple = Place::from(tuple);
@@ -613,13 +579,13 @@
                     Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty.expect_ty()));
 
                 // Spill to a local to make e.g., `tmp0`.
-                self.create_temp_if_necessary(tuple_field, callsite, caller_body, return_block)
+                self.create_temp_if_necessary(tuple_field, callsite, caller_body)
             });
 
             closure_ref_arg.chain(tuple_tmp_args).collect()
         } else {
             args.into_iter()
-                .map(|a| self.create_temp_if_necessary(a, callsite, caller_body, return_block))
+                .map(|a| self.create_temp_if_necessary(a, callsite, caller_body))
                 .collect()
         }
     }
@@ -631,43 +597,52 @@
         arg: Operand<'tcx>,
         callsite: &CallSite<'tcx>,
         caller_body: &mut Body<'tcx>,
-        return_block: BasicBlock,
     ) -> Local {
-        // FIXME: Analysis of the usage of the arguments to avoid
-        // unnecessary temporaries.
-
+        // Reuse the operand if it is a moved temporary.
         if let Operand::Move(place) = &arg {
             if let Some(local) = place.as_local() {
                 if caller_body.local_kind(local) == LocalKind::Temp {
-                    // Reuse the operand if it's a temporary already
                     return local;
                 }
             }
         }
 
-        debug!("creating temp for argument {:?}", arg);
-        // Otherwise, create a temporary for the arg
-        let arg = Rvalue::Use(arg);
-
-        let ty = arg.ty(caller_body, self.tcx);
-
-        let arg_tmp = LocalDecl::new(ty, callsite.location.span);
-        let arg_tmp = caller_body.local_decls.push(arg_tmp);
-
-        caller_body[callsite.bb].statements.push(Statement {
-            source_info: callsite.location,
-            kind: StatementKind::StorageLive(arg_tmp),
+        // Otherwise, create a temporary for the argument.
+        trace!("creating temp for argument {:?}", arg);
+        let arg_ty = arg.ty(caller_body, self.tcx);
+        let local = self.new_call_temp(caller_body, callsite, arg_ty);
+        caller_body[callsite.block].statements.push(Statement {
+            source_info: callsite.source_info,
+            kind: StatementKind::Assign(box (Place::from(local), Rvalue::Use(arg))),
         });
-        caller_body[callsite.bb].statements.push(Statement {
-            source_info: callsite.location,
-            kind: StatementKind::Assign(box (Place::from(arg_tmp), arg)),
-        });
-        caller_body[return_block].statements.insert(
-            0,
-            Statement { source_info: callsite.location, kind: StatementKind::StorageDead(arg_tmp) },
-        );
+        local
+    }
 
-        arg_tmp
+    /// Introduces a new temporary into the caller body that is live for the duration of the call.
+    fn new_call_temp(
+        &self,
+        caller_body: &mut Body<'tcx>,
+        callsite: &CallSite<'tcx>,
+        ty: Ty<'tcx>,
+    ) -> Local {
+        let local = caller_body.local_decls.push(LocalDecl::new(ty, callsite.source_info.span));
+
+        caller_body[callsite.block].statements.push(Statement {
+            source_info: callsite.source_info,
+            kind: StatementKind::StorageLive(local),
+        });
+
+        if let Some(block) = callsite.target {
+            caller_body[block].statements.insert(
+                0,
+                Statement {
+                    source_info: callsite.source_info,
+                    kind: StatementKind::StorageDead(local),
+                },
+            );
+        }
+
+        local
     }
 }
 
@@ -687,35 +662,45 @@
  * stuff.
 */
 struct Integrator<'a, 'tcx> {
-    block_idx: usize,
     args: &'a [Local],
-    local_map: IndexVec<Local, Local>,
-    scope_map: IndexVec<SourceScope, SourceScope>,
+    new_locals: RangeFrom<Local>,
+    new_scopes: RangeFrom<SourceScope>,
+    new_blocks: RangeFrom<BasicBlock>,
     destination: Place<'tcx>,
-    return_block: BasicBlock,
+    return_block: Option<BasicBlock>,
     cleanup_block: Option<BasicBlock>,
     in_cleanup_block: bool,
     tcx: TyCtxt<'tcx>,
+    callsite_span: Span,
+    body_span: Span,
 }
 
 impl<'a, 'tcx> Integrator<'a, 'tcx> {
-    fn update_target(&self, tgt: BasicBlock) -> BasicBlock {
-        let new = BasicBlock::new(tgt.index() + self.block_idx);
-        debug!("updating target `{:?}`, new: `{:?}`", tgt, new);
+    fn map_local(&self, local: Local) -> Local {
+        let new = if local == RETURN_PLACE {
+            self.destination.local
+        } else {
+            let idx = local.index() - 1;
+            if idx < self.args.len() {
+                self.args[idx]
+            } else {
+                Local::new(self.new_locals.start.index() + (idx - self.args.len()))
+            }
+        };
+        trace!("mapping local `{:?}` to `{:?}`", local, new);
         new
     }
 
-    fn make_integrate_local(&self, local: Local) -> Local {
-        if local == RETURN_PLACE {
-            return self.destination.local;
-        }
+    fn map_scope(&self, scope: SourceScope) -> SourceScope {
+        let new = SourceScope::new(self.new_scopes.start.index() + scope.index());
+        trace!("mapping scope `{:?}` to `{:?}`", scope, new);
+        new
+    }
 
-        let idx = local.index() - 1;
-        if idx < self.args.len() {
-            return self.args[idx];
-        }
-
-        self.local_map[Local::new(idx - self.args.len())]
+    fn map_block(&self, block: BasicBlock) -> BasicBlock {
+        let new = BasicBlock::new(self.new_blocks.start.index() + block.index());
+        trace!("mapping block `{:?}` to `{:?}`", block, new);
+        new
     }
 }
 
@@ -725,10 +710,28 @@
     }
 
     fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) {
-        *local = self.make_integrate_local(*local);
+        *local = self.map_local(*local);
+    }
+
+    fn visit_source_scope(&mut self, scope: &mut SourceScope) {
+        *scope = self.map_scope(*scope);
+    }
+
+    fn visit_span(&mut self, span: &mut Span) {
+        // Make sure that all spans track the fact that they were inlined.
+        *span = self.callsite_span.fresh_expansion(ExpnData {
+            def_site: self.body_span,
+            ..ExpnData::default(ExpnKind::Inlined, *span, self.tcx.sess.edition(), None)
+        });
     }
 
     fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
+        for elem in place.projection {
+            // FIXME: Make sure that return place is not used in an indexing projection, since it
+            // won't be rebased as it is supposed to be.
+            assert_ne!(ProjectionElem::Index(RETURN_PLACE), elem);
+        }
+
         // If this is the `RETURN_PLACE`, we need to rebase any projections onto it.
         let dest_proj_len = self.destination.projection.len();
         if place.local == RETURN_PLACE && dest_proj_len > 0 {
@@ -769,18 +772,18 @@
         match terminator.kind {
             TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(),
             TerminatorKind::Goto { ref mut target } => {
-                *target = self.update_target(*target);
+                *target = self.map_block(*target);
             }
             TerminatorKind::SwitchInt { ref mut targets, .. } => {
-                for tgt in targets {
-                    *tgt = self.update_target(*tgt);
+                for tgt in targets.all_targets_mut() {
+                    *tgt = self.map_block(*tgt);
                 }
             }
             TerminatorKind::Drop { ref mut target, ref mut unwind, .. }
             | TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
-                *target = self.update_target(*target);
+                *target = self.map_block(*target);
                 if let Some(tgt) = *unwind {
-                    *unwind = Some(self.update_target(tgt));
+                    *unwind = Some(self.map_block(tgt));
                 } else if !self.in_cleanup_block {
                     // Unless this drop is in a cleanup block, add an unwind edge to
                     // the original call's cleanup block
@@ -789,10 +792,10 @@
             }
             TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => {
                 if let Some((_, ref mut tgt)) = *destination {
-                    *tgt = self.update_target(*tgt);
+                    *tgt = self.map_block(*tgt);
                 }
                 if let Some(tgt) = *cleanup {
-                    *cleanup = Some(self.update_target(tgt));
+                    *cleanup = Some(self.map_block(tgt));
                 } else if !self.in_cleanup_block {
                     // Unless this call is in a cleanup block, add an unwind edge to
                     // the original call's cleanup block
@@ -800,9 +803,9 @@
                 }
             }
             TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
-                *target = self.update_target(*target);
+                *target = self.map_block(*target);
                 if let Some(tgt) = *cleanup {
-                    *cleanup = Some(self.update_target(tgt));
+                    *cleanup = Some(self.map_block(tgt));
                 } else if !self.in_cleanup_block {
                     // Unless this assert is in a cleanup block, add an unwind edge to
                     // the original call's cleanup block
@@ -810,7 +813,11 @@
                 }
             }
             TerminatorKind::Return => {
-                terminator.kind = TerminatorKind::Goto { target: self.return_block };
+                terminator.kind = if let Some(tgt) = self.return_block {
+                    TerminatorKind::Goto { target: tgt }
+                } else {
+                    TerminatorKind::Unreachable
+                }
             }
             TerminatorKind::Resume => {
                 if let Some(tgt) = self.cleanup_block {
@@ -820,8 +827,8 @@
             TerminatorKind::Abort => {}
             TerminatorKind::Unreachable => {}
             TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => {
-                *real_target = self.update_target(*real_target);
-                *imaginary_target = self.update_target(*imaginary_target);
+                *real_target = self.map_block(*real_target);
+                *imaginary_target = self.map_block(*imaginary_target);
             }
             TerminatorKind::FalseUnwind { real_target: _, unwind: _ } =>
             // see the ordering of passes in the optimized_mir query.
@@ -830,13 +837,9 @@
             }
             TerminatorKind::InlineAsm { ref mut destination, .. } => {
                 if let Some(ref mut tgt) = *destination {
-                    *tgt = self.update_target(*tgt);
+                    *tgt = self.map_block(*tgt);
                 }
             }
         }
     }
-
-    fn visit_source_scope(&mut self, scope: &mut SourceScope) {
-        *scope = self.scope_map[*scope];
-    }
 }
diff --git a/compiler/rustc_mir/src/transform/instcombine.rs b/compiler/rustc_mir/src/transform/instcombine.rs
index ada2489..59b7db2 100644
--- a/compiler/rustc_mir/src/transform/instcombine.rs
+++ b/compiler/rustc_mir/src/transform/instcombine.rs
@@ -1,6 +1,6 @@
 //! Performs various peephole optimizations.
 
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_hir::Mutability;
 use rustc_index::vec::Idx;
@@ -19,7 +19,7 @@
 pub struct InstCombine;
 
 impl<'tcx> MirPass<'tcx> for InstCombine {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         // First, find optimization opportunities. This is done in a pre-pass to keep the MIR
         // read-only so that we can do global analyses on the MIR in the process (e.g.
         // `Place::ty()`).
@@ -137,6 +137,8 @@
                 _ => None,
             }?;
 
+            let mut dead_locals_seen = vec![];
+
             let stmt_index = location.statement_index;
             // Look behind for statement that assigns the local from a address of operator.
             // 6 is chosen as a heuristic determined by seeing the number of times
@@ -160,6 +162,11 @@
                                 BorrowKind::Shared,
                                 place_taken_address_of,
                             ) => {
+                                // Make sure that the place has not been marked dead
+                                if dead_locals_seen.contains(&place_taken_address_of.local) {
+                                    return None;
+                                }
+
                                 self.optimizations
                                     .unneeded_deref
                                     .insert(location, *place_taken_address_of);
@@ -178,13 +185,19 @@
                     // Inline asm can do anything, so bail out of the optimization.
                     rustc_middle::mir::StatementKind::LlvmInlineAsm(_) => return None,
 
+                    // Remember `StorageDead`s, as the local being marked dead could be the
+                    // place RHS we are looking for, in which case we need to abort to avoid UB
+                    // using an uninitialized place
+                    rustc_middle::mir::StatementKind::StorageDead(dead) => {
+                        dead_locals_seen.push(*dead)
+                    }
+
                     // Check that `local_being_deref` is not being used in a mutating way which can cause misoptimization.
                     rustc_middle::mir::StatementKind::Assign(box (_, _))
                     | rustc_middle::mir::StatementKind::Coverage(_)
                     | rustc_middle::mir::StatementKind::Nop
                     | rustc_middle::mir::StatementKind::FakeRead(_, _)
                     | rustc_middle::mir::StatementKind::StorageLive(_)
-                    | rustc_middle::mir::StatementKind::StorageDead(_)
                     | rustc_middle::mir::StatementKind::Retag(_, _)
                     | rustc_middle::mir::StatementKind::AscribeUserType(_, _)
                     | rustc_middle::mir::StatementKind::SetDiscriminant { .. } => {
diff --git a/compiler/rustc_mir/src/transform/instrument_coverage.rs b/compiler/rustc_mir/src/transform/instrument_coverage.rs
deleted file mode 100644
index a5b30a2..0000000
--- a/compiler/rustc_mir/src/transform/instrument_coverage.rs
+++ /dev/null
@@ -1,491 +0,0 @@
-use crate::transform::{MirPass, MirSource};
-use crate::util::pretty;
-use crate::util::spanview::{
-    source_range_no_file, statement_kind_name, terminator_kind_name, write_spanview_document,
-    SpanViewable, TOOLTIP_INDENT,
-};
-
-use rustc_data_structures::fingerprint::Fingerprint;
-use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_index::bit_set::BitSet;
-use rustc_middle::hir;
-use rustc_middle::ich::StableHashingContext;
-use rustc_middle::mir;
-use rustc_middle::mir::coverage::*;
-use rustc_middle::mir::visit::Visitor;
-use rustc_middle::mir::{
-    BasicBlock, BasicBlockData, Coverage, CoverageInfo, Location, Statement, StatementKind,
-    TerminatorKind,
-};
-use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::TyCtxt;
-use rustc_span::def_id::DefId;
-use rustc_span::{FileName, Pos, RealFileName, Span, Symbol};
-
-/// Inserts `StatementKind::Coverage` statements that either instrument the binary with injected
-/// counters, via intrinsic `llvm.instrprof.increment`, and/or inject metadata used during codegen
-/// to construct the coverage map.
-pub struct InstrumentCoverage;
-
-/// The `query` provider for `CoverageInfo`, requested by `codegen_coverage()` (to inject each
-/// counter) and `FunctionCoverage::new()` (to extract the coverage map metadata from the MIR).
-pub(crate) fn provide(providers: &mut Providers) {
-    providers.coverageinfo = |tcx, def_id| coverageinfo_from_mir(tcx, def_id);
-}
-
-struct CoverageVisitor {
-    info: CoverageInfo,
-}
-
-impl Visitor<'_> for CoverageVisitor {
-    fn visit_coverage(&mut self, coverage: &Coverage, _location: Location) {
-        match coverage.kind {
-            CoverageKind::Counter { id, .. } => {
-                let counter_id = u32::from(id);
-                self.info.num_counters = std::cmp::max(self.info.num_counters, counter_id + 1);
-            }
-            CoverageKind::Expression { id, .. } => {
-                let expression_index = u32::MAX - u32::from(id);
-                self.info.num_expressions =
-                    std::cmp::max(self.info.num_expressions, expression_index + 1);
-            }
-            _ => {}
-        }
-    }
-}
-
-fn coverageinfo_from_mir<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> CoverageInfo {
-    let mir_body = tcx.optimized_mir(def_id);
-
-    // The `num_counters` argument to `llvm.instrprof.increment` is the number of injected
-    // counters, with each counter having a counter ID from `0..num_counters-1`. MIR optimization
-    // may split and duplicate some BasicBlock sequences. Simply counting the calls may not
-    // work; but computing the num_counters by adding `1` to the highest counter_id (for a given
-    // instrumented function) is valid.
-    //
-    // `num_expressions` is the number of counter expressions added to the MIR body. Both
-    // `num_counters` and `num_expressions` are used to initialize new vectors, during backend
-    // code generate, to lookup counters and expressions by simple u32 indexes.
-    let mut coverage_visitor =
-        CoverageVisitor { info: CoverageInfo { num_counters: 0, num_expressions: 0 } };
-
-    coverage_visitor.visit_body(mir_body);
-    coverage_visitor.info
-}
-
-impl<'tcx> MirPass<'tcx> for InstrumentCoverage {
-    fn run_pass(
-        &self,
-        tcx: TyCtxt<'tcx>,
-        mir_source: MirSource<'tcx>,
-        mir_body: &mut mir::Body<'tcx>,
-    ) {
-        // If the InstrumentCoverage pass is called on promoted MIRs, skip them.
-        // See: https://github.com/rust-lang/rust/pull/73011#discussion_r438317601
-        if mir_source.promoted.is_none() {
-            Instrumentor::new(&self.name(), tcx, mir_source, mir_body).inject_counters();
-        }
-    }
-}
-
-#[derive(Clone)]
-struct CoverageRegion {
-    pub span: Span,
-    pub blocks: Vec<BasicBlock>,
-}
-
-struct Instrumentor<'a, 'tcx> {
-    pass_name: &'a str,
-    tcx: TyCtxt<'tcx>,
-    mir_source: MirSource<'tcx>,
-    mir_body: &'a mut mir::Body<'tcx>,
-    hir_body: &'tcx rustc_hir::Body<'tcx>,
-    function_source_hash: Option<u64>,
-    num_counters: u32,
-    num_expressions: u32,
-}
-
-impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
-    fn new(
-        pass_name: &'a str,
-        tcx: TyCtxt<'tcx>,
-        mir_source: MirSource<'tcx>,
-        mir_body: &'a mut mir::Body<'tcx>,
-    ) -> Self {
-        let hir_body = hir_body(tcx, mir_source.def_id());
-        Self {
-            pass_name,
-            tcx,
-            mir_source,
-            mir_body,
-            hir_body,
-            function_source_hash: None,
-            num_counters: 0,
-            num_expressions: 0,
-        }
-    }
-
-    /// Counter IDs start from zero and go up.
-    fn next_counter(&mut self) -> CounterValueReference {
-        assert!(self.num_counters < u32::MAX - self.num_expressions);
-        let next = self.num_counters;
-        self.num_counters += 1;
-        CounterValueReference::from(next)
-    }
-
-    /// Expression IDs start from u32::MAX and go down because a CounterExpression can reference
-    /// (add or subtract counts) of both Counter regions and CounterExpression regions. The counter
-    /// expression operand IDs must be unique across both types.
-    fn next_expression(&mut self) -> InjectedExpressionIndex {
-        assert!(self.num_counters < u32::MAX - self.num_expressions);
-        let next = u32::MAX - self.num_expressions;
-        self.num_expressions += 1;
-        InjectedExpressionIndex::from(next)
-    }
-
-    fn function_source_hash(&mut self) -> u64 {
-        match self.function_source_hash {
-            Some(hash) => hash,
-            None => {
-                let hash = hash_mir_source(self.tcx, self.hir_body);
-                self.function_source_hash.replace(hash);
-                hash
-            }
-        }
-    }
-
-    fn inject_counters(&mut self) {
-        let tcx = self.tcx;
-        let def_id = self.mir_source.def_id();
-        let mir_body = &self.mir_body;
-        let body_span = self.hir_body.value.span;
-        debug!(
-            "instrumenting {:?}, span: {}",
-            def_id,
-            tcx.sess.source_map().span_to_string(body_span)
-        );
-
-        if !tcx.sess.opts.debugging_opts.experimental_coverage {
-            // Coverage at the function level should be accurate. This is the default implementation
-            // if `-Z experimental-coverage` is *NOT* enabled.
-            let block = rustc_middle::mir::START_BLOCK;
-            let counter = self.make_counter();
-            self.inject_statement(counter, body_span, block);
-            return;
-        }
-        // FIXME(richkadel): else if `-Z experimental-coverage` *IS* enabled: Efforts are still in
-        // progress to identify the correct code region spans and associated counters to generate
-        // accurate Rust coverage reports.
-
-        let block_span = |data: &BasicBlockData<'tcx>| {
-            // The default span will be the `Terminator` span; but until we have a smarter solution,
-            // the coverage region also incorporates at least the statements in this BasicBlock as
-            // well. Extend the span to encompass all, if possible.
-            // FIXME(richkadel): Assuming the terminator's span is already known to be contained in `body_span`.
-            let mut span = data.terminator().source_info.span;
-            // FIXME(richkadel): It's looking unlikely that we should compute a span from MIR
-            // spans, but if we do keep something like this logic, we will need a smarter way
-            // to combine `Statement`s and/or `Terminator`s with `Span`s from different
-            // files.
-            for statement_span in data.statements.iter().map(|statement| statement.source_info.span)
-            {
-                // Only combine Spans from the function's body_span.
-                if body_span.contains(statement_span) {
-                    span = span.to(statement_span);
-                }
-            }
-            span
-        };
-
-        // Traverse the CFG but ignore anything following an `unwind`
-        let cfg_without_unwind = ShortCircuitPreorder::new(mir_body, |term_kind| {
-            let mut successors = term_kind.successors();
-            match &term_kind {
-                // SwitchInt successors are never unwind, and all of them should be traversed
-                TerminatorKind::SwitchInt { .. } => successors,
-                // For all other kinds, return only the first successor, if any, and ignore unwinds
-                _ => successors.next().into_iter().chain(&[]),
-            }
-        });
-
-        let mut coverage_regions = Vec::with_capacity(cfg_without_unwind.size_hint().0);
-        for (bb, data) in cfg_without_unwind {
-            if !body_span.contains(data.terminator().source_info.span) {
-                continue;
-            }
-
-            // FIXME(richkadel): Regions will soon contain multiple blocks.
-            let mut blocks = Vec::new();
-            blocks.push(bb);
-            let span = block_span(data);
-            coverage_regions.push(CoverageRegion { span, blocks });
-        }
-
-        let span_viewables = if pretty::dump_enabled(tcx, self.pass_name, def_id) {
-            Some(self.span_viewables(&coverage_regions))
-        } else {
-            None
-        };
-
-        // Inject counters for the selected spans
-        for CoverageRegion { span, blocks } in coverage_regions {
-            debug!(
-                "Injecting counter at: {:?}:\n{}\n==========",
-                span,
-                tcx.sess.source_map().span_to_snippet(span).expect("Error getting source for span"),
-            );
-            let counter = self.make_counter();
-            self.inject_statement(counter, span, blocks[0]);
-        }
-
-        if let Some(span_viewables) = span_viewables {
-            let mut file =
-                pretty::create_dump_file(tcx, "html", None, self.pass_name, &0, self.mir_source)
-                    .expect("Unexpected error creating MIR spanview HTML file");
-            write_spanview_document(tcx, def_id, span_viewables, &mut file)
-                .expect("Unexpected IO error dumping coverage spans as HTML");
-        }
-
-        // FIXME(richkadel): Some regions will be counted by "counter expression". Counter
-        // expressions are supported, but are not yet generated. When they are, remove this `fake_use`
-        // block.
-        let fake_use = false;
-        if fake_use {
-            let add = false;
-            let fake_counter = CoverageKind::Counter {
-                function_source_hash: self.function_source_hash(),
-                id: CounterValueReference::from_u32(1),
-            };
-            let fake_expression = CoverageKind::Expression {
-                id: InjectedExpressionIndex::from(u32::MAX - 1),
-                lhs: ExpressionOperandId::from_u32(1),
-                op: Op::Add,
-                rhs: ExpressionOperandId::from_u32(2),
-            };
-
-            let lhs = fake_counter.as_operand_id();
-            let op = if add { Op::Add } else { Op::Subtract };
-            let rhs = fake_expression.as_operand_id();
-
-            let block = rustc_middle::mir::START_BLOCK;
-
-            let expression = self.make_expression(lhs, op, rhs);
-            self.inject_statement(expression, body_span, block);
-        }
-    }
-
-    fn make_counter(&mut self) -> CoverageKind {
-        CoverageKind::Counter {
-            function_source_hash: self.function_source_hash(),
-            id: self.next_counter(),
-        }
-    }
-
-    fn make_expression(
-        &mut self,
-        lhs: ExpressionOperandId,
-        op: Op,
-        rhs: ExpressionOperandId,
-    ) -> CoverageKind {
-        CoverageKind::Expression { id: self.next_expression(), lhs, op, rhs }
-    }
-
-    fn inject_statement(&mut self, coverage_kind: CoverageKind, span: Span, block: BasicBlock) {
-        let code_region = make_code_region(self.tcx, &span);
-        debug!("  injecting statement {:?} covering {:?}", coverage_kind, code_region);
-
-        let data = &mut self.mir_body[block];
-        let source_info = data.terminator().source_info;
-        let statement = Statement {
-            source_info,
-            kind: StatementKind::Coverage(box Coverage { kind: coverage_kind, code_region }),
-        };
-        data.statements.push(statement);
-    }
-
-    /// Converts the computed `CoverageRegion`s into `SpanViewable`s.
-    fn span_viewables(&self, coverage_regions: &Vec<CoverageRegion>) -> Vec<SpanViewable> {
-        let mut span_viewables = Vec::new();
-        for coverage_region in coverage_regions {
-            span_viewables.push(SpanViewable {
-                span: coverage_region.span,
-                id: format!("{}", coverage_region.blocks[0].index()),
-                tooltip: self.make_tooltip_text(coverage_region),
-            });
-        }
-        span_viewables
-    }
-
-    /// A custom tooltip renderer used in a spanview HTML+CSS document used for coverage analysis.
-    fn make_tooltip_text(&self, coverage_region: &CoverageRegion) -> String {
-        const INCLUDE_COVERAGE_STATEMENTS: bool = false;
-        let tcx = self.tcx;
-        let source_map = tcx.sess.source_map();
-        let mut text = Vec::new();
-        for (i, &bb) in coverage_region.blocks.iter().enumerate() {
-            if i > 0 {
-                text.push("\n".to_owned());
-            }
-            text.push(format!("{:?}: {}:", bb, &source_map.span_to_string(coverage_region.span)));
-            let data = &self.mir_body.basic_blocks()[bb];
-            for statement in &data.statements {
-                let statement_string = match statement.kind {
-                    StatementKind::Coverage(box ref coverage) => match coverage.kind {
-                        CoverageKind::Counter { id, .. } => {
-                            if !INCLUDE_COVERAGE_STATEMENTS {
-                                continue;
-                            }
-                            format!("increment counter #{}", id.index())
-                        }
-                        CoverageKind::Expression { id, lhs, op, rhs } => {
-                            if !INCLUDE_COVERAGE_STATEMENTS {
-                                continue;
-                            }
-                            format!(
-                                "expression #{} = {} {} {}",
-                                id.index(),
-                                lhs.index(),
-                                if op == Op::Add { "+" } else { "-" },
-                                rhs.index()
-                            )
-                        }
-                        CoverageKind::Unreachable => {
-                            if !INCLUDE_COVERAGE_STATEMENTS {
-                                continue;
-                            }
-                            String::from("unreachable")
-                        }
-                    },
-                    _ => format!("{:?}", statement),
-                };
-                let source_range = source_range_no_file(tcx, &statement.source_info.span);
-                text.push(format!(
-                    "\n{}{}: {}: {}",
-                    TOOLTIP_INDENT,
-                    source_range,
-                    statement_kind_name(statement),
-                    statement_string
-                ));
-            }
-            let term = data.terminator();
-            let source_range = source_range_no_file(tcx, &term.source_info.span);
-            text.push(format!(
-                "\n{}{}: {}: {:?}",
-                TOOLTIP_INDENT,
-                source_range,
-                terminator_kind_name(term),
-                term.kind
-            ));
-        }
-        text.join("")
-    }
-}
-
-/// Convert the Span into its file name, start line and column, and end line and column
-fn make_code_region<'tcx>(tcx: TyCtxt<'tcx>, span: &Span) -> CodeRegion {
-    let source_map = tcx.sess.source_map();
-    let start = source_map.lookup_char_pos(span.lo());
-    let end = if span.hi() == span.lo() {
-        start.clone()
-    } else {
-        let end = source_map.lookup_char_pos(span.hi());
-        debug_assert_eq!(
-            start.file.name,
-            end.file.name,
-            "Region start ({:?} -> {:?}) and end ({:?} -> {:?}) don't come from the same source file!",
-            span.lo(),
-            start,
-            span.hi(),
-            end
-        );
-        end
-    };
-    match &start.file.name {
-        FileName::Real(RealFileName::Named(path)) => CodeRegion {
-            file_name: Symbol::intern(&path.to_string_lossy()),
-            start_line: start.line as u32,
-            start_col: start.col.to_u32() + 1,
-            end_line: end.line as u32,
-            end_col: end.col.to_u32() + 1,
-        },
-        _ => bug!("start.file.name should be a RealFileName, but it was: {:?}", start.file.name),
-    }
-}
-
-fn hir_body<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> &'tcx rustc_hir::Body<'tcx> {
-    let hir_node = tcx.hir().get_if_local(def_id).expect("expected DefId is local");
-    let fn_body_id = hir::map::associated_body(hir_node).expect("HIR node is a function with body");
-    tcx.hir().body(fn_body_id)
-}
-
-fn hash_mir_source<'tcx>(tcx: TyCtxt<'tcx>, hir_body: &'tcx rustc_hir::Body<'tcx>) -> u64 {
-    let mut hcx = tcx.create_no_span_stable_hashing_context();
-    hash(&mut hcx, &hir_body.value).to_smaller_hash()
-}
-
-fn hash(
-    hcx: &mut StableHashingContext<'tcx>,
-    node: &impl HashStable<StableHashingContext<'tcx>>,
-) -> Fingerprint {
-    let mut stable_hasher = StableHasher::new();
-    node.hash_stable(hcx, &mut stable_hasher);
-    stable_hasher.finish()
-}
-
-pub struct ShortCircuitPreorder<
-    'a,
-    'tcx,
-    F: Fn(&'tcx TerminatorKind<'tcx>) -> mir::Successors<'tcx>,
-> {
-    body: &'a mir::Body<'tcx>,
-    visited: BitSet<BasicBlock>,
-    worklist: Vec<BasicBlock>,
-    filtered_successors: F,
-}
-
-impl<'a, 'tcx, F: Fn(&'tcx TerminatorKind<'tcx>) -> mir::Successors<'tcx>>
-    ShortCircuitPreorder<'a, 'tcx, F>
-{
-    pub fn new(
-        body: &'a mir::Body<'tcx>,
-        filtered_successors: F,
-    ) -> ShortCircuitPreorder<'a, 'tcx, F> {
-        let worklist = vec![mir::START_BLOCK];
-
-        ShortCircuitPreorder {
-            body,
-            visited: BitSet::new_empty(body.basic_blocks().len()),
-            worklist,
-            filtered_successors,
-        }
-    }
-}
-
-impl<'a: 'tcx, 'tcx, F: Fn(&'tcx TerminatorKind<'tcx>) -> mir::Successors<'tcx>> Iterator
-    for ShortCircuitPreorder<'a, 'tcx, F>
-{
-    type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
-
-    fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
-        while let Some(idx) = self.worklist.pop() {
-            if !self.visited.insert(idx) {
-                continue;
-            }
-
-            let data = &self.body[idx];
-
-            if let Some(ref term) = data.terminator {
-                self.worklist.extend((self.filtered_successors)(&term.kind));
-            }
-
-            return Some((idx, data));
-        }
-
-        None
-    }
-
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        let size = self.body.basic_blocks().len() - self.visited.count();
-        (size, Some(size))
-    }
-}
diff --git a/compiler/rustc_mir/src/transform/match_branches.rs b/compiler/rustc_mir/src/transform/match_branches.rs
index 70ae547..82c0b92 100644
--- a/compiler/rustc_mir/src/transform/match_branches.rs
+++ b/compiler/rustc_mir/src/transform/match_branches.rs
@@ -1,4 +1,4 @@
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
 
@@ -37,33 +37,33 @@
 /// ```
 
 impl<'tcx> MirPass<'tcx> for MatchBranchSimplification {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        // FIXME: This optimization can result in unsoundness, because it introduces
-        // additional uses of a place holding the discriminant value without ensuring that
-        // it is valid to do so.
-        if !tcx.sess.opts.debugging_opts.unsound_mir_opts {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        if tcx.sess.opts.debugging_opts.mir_opt_level <= 1 {
             return;
         }
 
-        let param_env = tcx.param_env(src.def_id());
-        let bbs = body.basic_blocks_mut();
+        let param_env = tcx.param_env(body.source.def_id());
+        let (bbs, local_decls) = body.basic_blocks_and_local_decls_mut();
         'outer: for bb_idx in bbs.indices() {
             let (discr, val, switch_ty, first, second) = match bbs[bb_idx].terminator().kind {
                 TerminatorKind::SwitchInt {
-                    discr: Operand::Copy(ref place) | Operand::Move(ref place),
+                    discr: ref discr @ (Operand::Copy(_) | Operand::Move(_)),
                     switch_ty,
                     ref targets,
-                    ref values,
                     ..
-                } if targets.len() == 2 && values.len() == 1 && targets[0] != targets[1] => {
-                    (place, values[0], switch_ty, targets[0], targets[1])
+                } if targets.iter().len() == 1 => {
+                    let (value, target) = targets.iter().next().unwrap();
+                    if target == targets.otherwise() {
+                        continue;
+                    }
+                    (discr, value, switch_ty, target, targets.otherwise())
                 }
                 // Only optimize switch int statements
                 _ => continue,
             };
 
             // Check that destinations are identical, and if not, then don't optimize this block
-            if &bbs[first].terminator().kind != &bbs[second].terminator().kind {
+            if bbs[first].terminator().kind != bbs[second].terminator().kind {
                 continue;
             }
 
@@ -96,6 +96,10 @@
             // Take ownership of items now that we know we can optimize.
             let discr = discr.clone();
 
+            // Introduce a temporary for the discriminant value.
+            let source_info = bbs[bb_idx].terminator().source_info;
+            let discr_local = local_decls.push(LocalDecl::new(switch_ty, source_info.span));
+
             // We already checked that first and second are different blocks,
             // and bb_idx has a different terminator from both of them.
             let (from, first, second) = bbs.pick3_mut(bb_idx, first, second);
@@ -124,7 +128,11 @@
                                 rustc_span::DUMMY_SP,
                             );
                             let op = if f_b { BinOp::Eq } else { BinOp::Ne };
-                            let rhs = Rvalue::BinaryOp(op, Operand::Copy(discr.clone()), const_cmp);
+                            let rhs = Rvalue::BinaryOp(
+                                op,
+                                Operand::Copy(Place::from(discr_local)),
+                                const_cmp,
+                            );
                             Statement {
                                 source_info: f.source_info,
                                 kind: StatementKind::Assign(box (*lhs, rhs)),
@@ -135,7 +143,16 @@
                     _ => unreachable!(),
                 }
             });
+
+            from.statements
+                .push(Statement { source_info, kind: StatementKind::StorageLive(discr_local) });
+            from.statements.push(Statement {
+                source_info,
+                kind: StatementKind::Assign(box (Place::from(discr_local), Rvalue::Use(discr))),
+            });
             from.statements.extend(new_stmts);
+            from.statements
+                .push(Statement { source_info, kind: StatementKind::StorageDead(discr_local) });
             from.terminator_mut().kind = first.terminator().kind.clone();
         }
     }
diff --git a/compiler/rustc_mir/src/transform/mod.rs b/compiler/rustc_mir/src/transform/mod.rs
index 6719381..e3fea2d 100644
--- a/compiler/rustc_mir/src/transform/mod.rs
+++ b/compiler/rustc_mir/src/transform/mod.rs
@@ -9,7 +9,7 @@
 use rustc_middle::mir::{traversal, Body, ConstQualifs, MirPhase, Promoted};
 use rustc_middle::ty::query::Providers;
 use rustc_middle::ty::steal::Steal;
-use rustc_middle::ty::{self, InstanceDef, TyCtxt, TypeFoldable};
+use rustc_middle::ty::{self, TyCtxt, TypeFoldable};
 use rustc_span::{Span, Symbol};
 use std::borrow::Cow;
 
@@ -22,16 +22,16 @@
 pub mod check_unsafety;
 pub mod cleanup_post_borrowck;
 pub mod const_prop;
-pub mod copy_prop;
+pub mod coverage;
 pub mod deaggregator;
 pub mod dest_prop;
 pub mod dump_mir;
 pub mod early_otherwise_branch;
 pub mod elaborate_drops;
+pub mod function_item_references;
 pub mod generator;
 pub mod inline;
 pub mod instcombine;
-pub mod instrument_coverage;
 pub mod match_branches;
 pub mod multiple_return_terminators;
 pub mod no_landing_pads;
@@ -49,6 +49,8 @@
 pub mod unreachable_prop;
 pub mod validate;
 
+pub use rustc_middle::mir::MirSource;
+
 pub(crate) fn provide(providers: &mut Providers) {
     self::check_unsafety::provide(providers);
     *providers = Providers {
@@ -83,7 +85,7 @@
         },
         ..*providers
     };
-    instrument_coverage::provide(providers);
+    coverage::query::provide(providers);
 }
 
 fn is_mir_available(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
@@ -132,37 +134,10 @@
     set
 }
 
-/// Where a specific `mir::Body` comes from.
-#[derive(Debug, Copy, Clone)]
-pub struct MirSource<'tcx> {
-    pub instance: InstanceDef<'tcx>,
-
-    /// If `Some`, this is a promoted rvalue within the parent function.
-    pub promoted: Option<Promoted>,
-}
-
-impl<'tcx> MirSource<'tcx> {
-    pub fn item(def_id: DefId) -> Self {
-        MirSource {
-            instance: InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)),
-            promoted: None,
-        }
-    }
-
-    pub fn with_opt_param(self) -> ty::WithOptConstParam<DefId> {
-        self.instance.with_opt_param()
-    }
-
-    #[inline]
-    pub fn def_id(&self) -> DefId {
-        self.instance.def_id()
-    }
-}
-
 /// Generates a default name for the pass based on the name of the
 /// type `T`.
 pub fn default_name<T: ?Sized>() -> Cow<'static, str> {
-    let name = ::std::any::type_name::<T>();
+    let name = std::any::type_name::<T>();
     if let Some(tail) = name.rfind(':') { Cow::from(&name[tail + 1..]) } else { Cow::from(name) }
 }
 
@@ -174,19 +149,16 @@
         default_name::<Self>()
     }
 
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>);
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>);
 }
 
 pub fn run_passes(
     tcx: TyCtxt<'tcx>,
     body: &mut Body<'tcx>,
-    instance: InstanceDef<'tcx>,
-    promoted: Option<Promoted>,
     mir_phase: MirPhase,
     passes: &[&[&dyn MirPass<'tcx>]],
 ) {
     let phase_index = mir_phase.phase_index();
-    let source = MirSource { instance, promoted };
     let validate = tcx.sess.opts.debugging_opts.validate_mir;
 
     if body.phase >= mir_phase {
@@ -195,7 +167,7 @@
 
     if validate {
         validate::Validator { when: format!("input to phase {:?}", mir_phase), mir_phase }
-            .run_pass(tcx, source, body);
+            .run_pass(tcx, body);
     }
 
     let mut index = 0;
@@ -205,13 +177,12 @@
                 tcx,
                 &format_args!("{:03}-{:03}", phase_index, index),
                 &pass.name(),
-                source,
                 body,
                 is_after,
             );
         };
         run_hooks(body, index, false);
-        pass.run_pass(tcx, source, body);
+        pass.run_pass(tcx, body);
         run_hooks(body, index, true);
 
         if validate {
@@ -219,7 +190,7 @@
                 when: format!("after {} in phase {:?}", pass.name(), mir_phase),
                 mir_phase,
             }
-            .run_pass(tcx, source, body);
+            .run_pass(tcx, body);
         }
 
         index += 1;
@@ -235,7 +206,7 @@
 
     if mir_phase == MirPhase::Optimization {
         validate::Validator { when: format!("end of phase {:?}", mir_phase), mir_phase }
-            .run_pass(tcx, source, body);
+            .run_pass(tcx, body);
     }
 }
 
@@ -258,13 +229,7 @@
         return Default::default();
     }
 
-    let ccx = check_consts::ConstCx {
-        body,
-        tcx,
-        def_id: def.did,
-        const_kind,
-        param_env: tcx.param_env(def.did),
-    };
+    let ccx = check_consts::ConstCx { body, tcx, const_kind, param_env: tcx.param_env(def.did) };
 
     let mut validator = check_consts::validation::Validator::new(&ccx);
     validator.check_body();
@@ -292,26 +257,17 @@
 
     let mut body = tcx.mir_built(def).steal();
 
-    util::dump_mir(
-        tcx,
-        None,
-        "mir_map",
-        &0,
-        MirSource { instance: InstanceDef::Item(def.to_global()), promoted: None },
-        &body,
-        |_, _| Ok(()),
-    );
+    util::dump_mir(tcx, None, "mir_map", &0, &body, |_, _| Ok(()));
 
     run_passes(
         tcx,
         &mut body,
-        InstanceDef::Item(def.to_global()),
-        None,
         MirPhase::Const,
         &[&[
             // MIR-level lints.
             &check_packed_ref::CheckPackedRef,
             &check_const_item_mutation::CheckConstItemMutation,
+            &function_item_references::FunctionItemReferences,
             // What we need to do constant evaluation.
             &simplify::SimplifyCfg::new("initial"),
             &rustc_peek::SanityCheck,
@@ -332,11 +288,7 @@
     // this point, before we steal the mir-const result.
     // Also this means promotion can rely on all const checks having been done.
     let _ = tcx.mir_const_qualif_opt_const_arg(def);
-    let _ = if let Some(param_did) = def.const_param_did {
-        tcx.mir_abstract_const_of_const_arg((def.did, param_did))
-    } else {
-        tcx.mir_abstract_const(def.did.to_def_id())
-    };
+    let _ = tcx.mir_abstract_const_opt_const_arg(def.to_global());
     let mut body = tcx.mir_const(def).steal();
 
     let mut required_consts = Vec::new();
@@ -354,19 +306,12 @@
     ];
 
     let opt_coverage: &[&dyn MirPass<'tcx>] = if tcx.sess.opts.debugging_opts.instrument_coverage {
-        &[&instrument_coverage::InstrumentCoverage]
+        &[&coverage::InstrumentCoverage]
     } else {
         &[]
     };
 
-    run_passes(
-        tcx,
-        &mut body,
-        InstanceDef::Item(def.to_global()),
-        None,
-        MirPhase::ConstPromotion,
-        &[promote, opt_coverage],
-    );
+    run_passes(tcx, &mut body, MirPhase::ConstPromotion, &[promote, opt_coverage]);
 
     let promoted = promote_pass.promoted_fragments.into_inner();
     (tcx.alloc_steal_mir(body), tcx.alloc_steal_promoted(promoted))
@@ -391,19 +336,14 @@
     let (body, _) = tcx.mir_promoted(def);
     let mut body = body.steal();
 
-    run_post_borrowck_cleanup_passes(tcx, &mut body, def.did, None);
-    check_consts::post_drop_elaboration::check_live_drops(tcx, def.did, &body);
+    run_post_borrowck_cleanup_passes(tcx, &mut body);
+    check_consts::post_drop_elaboration::check_live_drops(tcx, &body);
     tcx.alloc_steal_mir(body)
 }
 
 /// After this series of passes, no lifetime analysis based on borrowing can be done.
-fn run_post_borrowck_cleanup_passes<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    body: &mut Body<'tcx>,
-    def_id: LocalDefId,
-    promoted: Option<Promoted>,
-) {
-    debug!("post_borrowck_cleanup({:?})", def_id);
+fn run_post_borrowck_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+    debug!("post_borrowck_cleanup({:?})", body.source.def_id());
 
     let post_borrowck_cleanup: &[&dyn MirPass<'tcx>] = &[
         // Remove all things only needed by analysis
@@ -428,22 +368,10 @@
         &deaggregator::Deaggregator,
     ];
 
-    run_passes(
-        tcx,
-        body,
-        InstanceDef::Item(ty::WithOptConstParam::unknown(def_id.to_def_id())),
-        promoted,
-        MirPhase::DropLowering,
-        &[post_borrowck_cleanup],
-    );
+    run_passes(tcx, body, MirPhase::DropLowering, &[post_borrowck_cleanup]);
 }
 
-fn run_optimization_passes<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    body: &mut Body<'tcx>,
-    def_id: LocalDefId,
-    promoted: Option<Promoted>,
-) {
+fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     let mir_opt_level = tcx.sess.opts.debugging_opts.mir_opt_level;
 
     // Lowering generator control-flow and variables has to happen before we do anything else
@@ -474,8 +402,7 @@
         &simplify_try::SimplifyArmIdentity,
         &simplify_try::SimplifyBranchSame,
         &dest_prop::DestinationPropagation,
-        &copy_prop::CopyPropagation,
-        &simplify_branches::SimplifyBranches::new("after-copy-prop"),
+        &simplify_branches::SimplifyBranches::new("final"),
         &remove_noop_landing_pads::RemoveNoopLandingPads,
         &simplify::SimplifyCfg::new("final"),
         &nrvo::RenameReturnPlace,
@@ -502,8 +429,6 @@
     run_passes(
         tcx,
         body,
-        InstanceDef::Item(ty::WithOptConstParam::unknown(def_id.to_def_id())),
-        promoted,
         MirPhase::GeneratorLowering,
         &[
             if mir_opt_level > 0 {
@@ -519,8 +444,6 @@
     run_passes(
         tcx,
         body,
-        InstanceDef::Item(ty::WithOptConstParam::unknown(def_id.to_def_id())),
-        promoted,
         MirPhase::Optimization,
         &[
             if mir_opt_level > 0 { optimizations } else { no_optimizations },
@@ -558,7 +481,7 @@
     }
 
     let mut body = tcx.mir_drops_elaborated_and_const_checked(def).steal();
-    run_optimization_passes(tcx, &mut body, def.did, None);
+    run_optimization_passes(tcx, &mut body);
 
     debug_assert!(!body.has_free_regions(), "Free regions in optimized MIR");
 
@@ -581,9 +504,9 @@
     let (_, promoted) = tcx.mir_promoted(def);
     let mut promoted = promoted.steal();
 
-    for (p, mut body) in promoted.iter_enumerated_mut() {
-        run_post_borrowck_cleanup_passes(tcx, &mut body, def.did, Some(p));
-        run_optimization_passes(tcx, &mut body, def.did, Some(p));
+    for body in &mut promoted {
+        run_post_borrowck_cleanup_passes(tcx, body);
+        run_optimization_passes(tcx, body);
     }
 
     debug_assert!(!promoted.has_free_regions(), "Free regions in promoted MIR");
diff --git a/compiler/rustc_mir/src/transform/multiple_return_terminators.rs b/compiler/rustc_mir/src/transform/multiple_return_terminators.rs
index 3c9c845..c37b54a 100644
--- a/compiler/rustc_mir/src/transform/multiple_return_terminators.rs
+++ b/compiler/rustc_mir/src/transform/multiple_return_terminators.rs
@@ -1,7 +1,7 @@
 //! This pass removes jumps to basic blocks containing only a return, and replaces them with a
 //! return instead.
 
-use crate::transform::{simplify, MirPass, MirSource};
+use crate::transform::{simplify, MirPass};
 use rustc_index::bit_set::BitSet;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
@@ -9,7 +9,7 @@
 pub struct MultipleReturnTerminators;
 
 impl<'tcx> MirPass<'tcx> for MultipleReturnTerminators {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         if tcx.sess.opts.debugging_opts.mir_opt_level < 3 {
             return;
         }
diff --git a/compiler/rustc_mir/src/transform/no_landing_pads.rs b/compiler/rustc_mir/src/transform/no_landing_pads.rs
index 1d83733..83954c9 100644
--- a/compiler/rustc_mir/src/transform/no_landing_pads.rs
+++ b/compiler/rustc_mir/src/transform/no_landing_pads.rs
@@ -1,7 +1,7 @@
 //! This pass removes the unwind branch of all the terminators when the no-landing-pads option is
 //! specified.
 
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use rustc_middle::mir::visit::MutVisitor;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
@@ -18,7 +18,7 @@
 }
 
 impl<'tcx> MirPass<'tcx> for NoLandingPads<'tcx> {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         no_landing_pads(tcx, body)
     }
 }
diff --git a/compiler/rustc_mir/src/transform/nrvo.rs b/compiler/rustc_mir/src/transform/nrvo.rs
index 1ffb5a8..45b906b 100644
--- a/compiler/rustc_mir/src/transform/nrvo.rs
+++ b/compiler/rustc_mir/src/transform/nrvo.rs
@@ -1,10 +1,12 @@
+//! See the docs for [`RenameReturnPlace`].
+
 use rustc_hir::Mutability;
 use rustc_index::bit_set::HybridBitSet;
 use rustc_middle::mir::visit::{MutVisitor, NonUseContext, PlaceContext, Visitor};
 use rustc_middle::mir::{self, BasicBlock, Local, Location};
 use rustc_middle::ty::TyCtxt;
 
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 
 /// This pass looks for MIR that always copies the same local into the return place and eliminates
 /// the copy by renaming all uses of that local to `_0`.
@@ -31,28 +33,22 @@
 pub struct RenameReturnPlace;
 
 impl<'tcx> MirPass<'tcx> for RenameReturnPlace {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut mir::Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut mir::Body<'tcx>) {
         if tcx.sess.opts.debugging_opts.mir_opt_level == 0 {
             return;
         }
 
-        if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
-            // The `DestinationPropagation` pass runs at level 2, so this pass is redundant (and
-            // fails some asserts).
-            return;
-        }
-
         let returned_local = match local_eligible_for_nrvo(body) {
             Some(l) => l,
             None => {
-                debug!("`{:?}` was ineligible for NRVO", src.def_id());
+                debug!("`{:?}` was ineligible for NRVO", body.source.def_id());
                 return;
             }
         };
 
         debug!(
             "`{:?}` was eligible for NRVO, making {:?} the return place",
-            src.def_id(),
+            body.source.def_id(),
             returned_local
         );
 
diff --git a/compiler/rustc_mir/src/transform/promote_consts.rs b/compiler/rustc_mir/src/transform/promote_consts.rs
index 89f7531..927aae8 100644
--- a/compiler/rustc_mir/src/transform/promote_consts.rs
+++ b/compiler/rustc_mir/src/transform/promote_consts.rs
@@ -32,7 +32,7 @@
 
 use crate::const_eval::{is_const_fn, is_unstable_const_fn};
 use crate::transform::check_consts::{is_lang_panic_fn, qualifs, ConstCx};
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 
 /// A `MirPass` for promotion.
 ///
@@ -47,7 +47,7 @@
 }
 
 impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         // There's not really any point in promoting errorful MIR.
         //
         // This does not include MIR that failed const-checking, which we still try to promote.
@@ -56,19 +56,17 @@
             return;
         }
 
-        if src.promoted.is_some() {
+        if body.source.promoted.is_some() {
             return;
         }
 
-        let def = src.with_opt_param().expect_local();
-
         let mut rpo = traversal::reverse_postorder(body);
-        let ccx = ConstCx::new(tcx, def.did, body);
+        let ccx = ConstCx::new(tcx, body);
         let (temps, all_candidates) = collect_temps_and_candidates(&ccx, &mut rpo);
 
         let promotable_candidates = validate_candidates(&ccx, &temps, &all_candidates);
 
-        let promoted = promote_candidates(def.to_global(), body, tcx, temps, promotable_candidates);
+        let promoted = promote_candidates(body, tcx, temps, promotable_candidates);
         self.promoted_fragments.set(promoted);
     }
 }
@@ -126,6 +124,15 @@
             Candidate::Argument { .. } | Candidate::InlineAsm { .. } => true,
         }
     }
+
+    fn source_info(&self, body: &Body<'_>) -> SourceInfo {
+        match self {
+            Candidate::Ref(location) | Candidate::Repeat(location) => *body.source_info(*location),
+            Candidate::Argument { bb, .. } | Candidate::InlineAsm { bb, .. } => {
+                *body.source_info(body.terminator_loc(*bb))
+            }
+        }
+    }
 }
 
 fn args_required_const(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Vec<usize>> {
@@ -294,17 +301,6 @@
 struct Unpromotable;
 
 impl<'tcx> Validator<'_, 'tcx> {
-    /// Determines if this code could be executed at runtime and thus is subject to codegen.
-    /// That means even unused constants need to be evaluated.
-    ///
-    /// `const_kind` should not be used in this file other than through this method!
-    fn maybe_runtime(&self) -> bool {
-        match self.const_kind {
-            None | Some(hir::ConstContext::ConstFn) => true,
-            Some(hir::ConstContext::Static(_) | hir::ConstContext::Const) => false,
-        }
-    }
-
     fn validate_candidate(&self, candidate: Candidate) -> Result<(), Unpromotable> {
         match candidate {
             Candidate::Ref(loc) => {
@@ -555,14 +551,12 @@
                     }
 
                     ProjectionElem::Field(..) => {
-                        if self.maybe_runtime() {
-                            let base_ty =
-                                Place::ty_from(place.local, proj_base, self.body, self.tcx).ty;
-                            if let Some(def) = base_ty.ty_adt_def() {
-                                // No promotion of union field accesses.
-                                if def.is_union() {
-                                    return Err(Unpromotable);
-                                }
+                        let base_ty =
+                            Place::ty_from(place.local, proj_base, self.body, self.tcx).ty;
+                        if let Some(def) = base_ty.ty_adt_def() {
+                            // No promotion of union field accesses.
+                            if def.is_union() {
+                                return Err(Unpromotable);
                             }
                         }
                     }
@@ -744,7 +738,14 @@
     ) -> Result<(), Unpromotable> {
         let fn_ty = callee.ty(self.body, self.tcx);
 
-        if !self.explicit && self.maybe_runtime() {
+        // When doing explicit promotion and inside const/static items, we promote all (eligible) function calls.
+        // Everywhere else, we require `#[rustc_promotable]` on the callee.
+        let promote_all_const_fn = self.explicit
+            || matches!(
+                self.const_kind,
+                Some(hir::ConstContext::Static(_) | hir::ConstContext::Const)
+            );
+        if !promote_all_const_fn {
             if let ty::FnDef(def_id, _) = *fn_ty.kind() {
                 // Never promote runtime `const fn` calls of
                 // functions without `#[rustc_promotable]`.
@@ -758,7 +759,7 @@
             ty::FnDef(def_id, _) => {
                 is_const_fn(self.tcx, def_id)
                     || is_unstable_const_fn(self.tcx, def_id).is_some()
-                    || is_lang_panic_fn(self.tcx, self.def_id.to_def_id())
+                    || is_lang_panic_fn(self.tcx, def_id)
             }
             _ => false,
         };
@@ -955,6 +956,7 @@
                             from_hir_call,
                             fn_span,
                         },
+                        source_info: SourceInfo::outermost(terminator.source_info.span),
                         ..terminator
                     };
                 }
@@ -970,10 +972,10 @@
 
     fn promote_candidate(
         mut self,
-        def: ty::WithOptConstParam<DefId>,
         candidate: Candidate,
         next_promoted_id: usize,
     ) -> Option<Body<'tcx>> {
+        let def = self.source.source.with_opt_param();
         let mut rvalue = {
             let promoted = &mut self.promoted;
             let promoted_id = Promoted::new(next_promoted_id);
@@ -1133,7 +1135,6 @@
 }
 
 pub fn promote_candidates<'tcx>(
-    def: ty::WithOptConstParam<DefId>,
     body: &mut Body<'tcx>,
     tcx: TyCtxt<'tcx>,
     mut temps: IndexVec<Local, TempState>,
@@ -1166,11 +1167,13 @@
         // Declare return place local so that `mir::Body::new` doesn't complain.
         let initial_locals = iter::once(LocalDecl::new(tcx.types.never, body.span)).collect();
 
-        let mut promoted = Body::new(
+        let mut scope = body.source_scopes[candidate.source_info(body).scope].clone();
+        scope.parent_scope = None;
+
+        let promoted = Body::new(
+            body.source, // `promoted` gets filled in below
             IndexVec::new(),
-            // FIXME: maybe try to filter this to avoid blowing up
-            // memory usage?
-            body.source_scopes.clone(),
+            IndexVec::from_elem_n(scope, 1),
             initial_locals,
             IndexVec::new(),
             0,
@@ -1178,7 +1181,6 @@
             body.span,
             body.generator_kind,
         );
-        promoted.ignore_interior_mut_in_const_validation = true;
 
         let promoter = Promoter {
             promoted,
@@ -1190,7 +1192,8 @@
         };
 
         //FIXME(oli-obk): having a `maybe_push()` method on `IndexVec` might be nice
-        if let Some(promoted) = promoter.promote_candidate(def, candidate, promotions.len()) {
+        if let Some(mut promoted) = promoter.promote_candidate(candidate, promotions.len()) {
+            promoted.source.promoted = Some(promotions.next_index());
             promotions.push(promoted);
         }
     }
@@ -1248,7 +1251,9 @@
     debug!(
         "should_suggest_const_in_array_repeat_expressions_flag: def_id={:?} \
             should_promote={:?} feature_flag={:?}",
-        validator.ccx.def_id, should_promote, feature_flag
+        validator.ccx.def_id(),
+        should_promote,
+        feature_flag
     );
     should_promote && !feature_flag
 }
diff --git a/compiler/rustc_mir/src/transform/remove_noop_landing_pads.rs b/compiler/rustc_mir/src/transform/remove_noop_landing_pads.rs
index 4079f01..31e201c 100644
--- a/compiler/rustc_mir/src/transform/remove_noop_landing_pads.rs
+++ b/compiler/rustc_mir/src/transform/remove_noop_landing_pads.rs
@@ -1,4 +1,4 @@
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use crate::util::patch::MirPatch;
 use rustc_index::bit_set::BitSet;
 use rustc_middle::mir::*;
@@ -20,7 +20,7 @@
 }
 
 impl<'tcx> MirPass<'tcx> for RemoveNoopLandingPads {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         remove_noop_landing_pads(tcx, body);
     }
 }
@@ -43,7 +43,7 @@
                     // These are all nops in a landing pad
                 }
 
-                StatementKind::Assign(box (place, Rvalue::Use(_))) => {
+                StatementKind::Assign(box (place, Rvalue::Use(_) | Rvalue::Discriminant(_))) => {
                     if place.as_local().is_some() {
                         // Writing to a local (e.g., a drop flag) does not
                         // turn a landing pad to a non-nop
diff --git a/compiler/rustc_mir/src/transform/remove_unneeded_drops.rs b/compiler/rustc_mir/src/transform/remove_unneeded_drops.rs
index b9f2978..aaf3eca 100644
--- a/compiler/rustc_mir/src/transform/remove_unneeded_drops.rs
+++ b/compiler/rustc_mir/src/transform/remove_unneeded_drops.rs
@@ -1,23 +1,22 @@
 //! This pass replaces a drop of a type that does not need dropping, with a goto
 
-use crate::transform::{MirPass, MirSource};
-use rustc_hir::def_id::LocalDefId;
+use crate::transform::MirPass;
 use rustc_middle::mir::visit::Visitor;
 use rustc_middle::mir::*;
-use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::{ParamEnv, TyCtxt};
 
 use super::simplify::simplify_cfg;
 
 pub struct RemoveUnneededDrops;
 
 impl<'tcx> MirPass<'tcx> for RemoveUnneededDrops {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        trace!("Running RemoveUnneededDrops on {:?}", source);
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        trace!("Running RemoveUnneededDrops on {:?}", body.source);
         let mut opt_finder = RemoveUnneededDropsOptimizationFinder {
             tcx,
             body,
+            param_env: tcx.param_env(body.source.def_id()),
             optimizations: vec![],
-            def_id: source.def_id().expect_local(),
         };
         opt_finder.visit_body(body);
         let should_simplify = !opt_finder.optimizations.is_empty();
@@ -40,7 +39,7 @@
         match terminator.kind {
             TerminatorKind::Drop { place, target, .. } => {
                 let ty = place.ty(self.body, self.tcx);
-                let needs_drop = ty.ty.needs_drop(self.tcx, self.tcx.param_env(self.def_id));
+                let needs_drop = ty.ty.needs_drop(self.tcx, self.param_env);
                 if !needs_drop {
                     self.optimizations.push((location, target));
                 }
@@ -54,5 +53,5 @@
     tcx: TyCtxt<'tcx>,
     body: &'a Body<'tcx>,
     optimizations: Vec<(Location, BasicBlock)>,
-    def_id: LocalDefId,
+    param_env: ParamEnv<'tcx>,
 }
diff --git a/compiler/rustc_mir/src/transform/rustc_peek.rs b/compiler/rustc_mir/src/transform/rustc_peek.rs
index 015af44..205f718d 100644
--- a/compiler/rustc_mir/src/transform/rustc_peek.rs
+++ b/compiler/rustc_mir/src/transform/rustc_peek.rs
@@ -5,8 +5,7 @@
 use rustc_span::Span;
 use rustc_target::spec::abi::Abi;
 
-use crate::transform::{MirPass, MirSource};
-use rustc_hir::def_id::DefId;
+use crate::transform::MirPass;
 use rustc_index::bit_set::BitSet;
 use rustc_middle::mir::{self, Body, Local, Location};
 use rustc_middle::ty::{self, Ty, TyCtxt};
@@ -23,9 +22,9 @@
 pub struct SanityCheck;
 
 impl<'tcx> MirPass<'tcx> for SanityCheck {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         use crate::dataflow::has_rustc_mir_with;
-        let def_id = src.def_id();
+        let def_id = body.source.def_id();
         if !tcx.has_attr(def_id, sym::rustc_mir) {
             debug!("skipping rustc_peek::SanityCheck on {}", tcx.def_path_str(def_id));
             return;
@@ -41,41 +40,40 @@
 
         if has_rustc_mir_with(sess, &attributes, sym::rustc_peek_maybe_init).is_some() {
             let flow_inits = MaybeInitializedPlaces::new(tcx, body, &mdpe)
-                .into_engine(tcx, body, def_id)
+                .into_engine(tcx, body)
                 .iterate_to_fixpoint();
 
-            sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_inits);
+            sanity_check_via_rustc_peek(tcx, body, &attributes, &flow_inits);
         }
 
         if has_rustc_mir_with(sess, &attributes, sym::rustc_peek_maybe_uninit).is_some() {
             let flow_uninits = MaybeUninitializedPlaces::new(tcx, body, &mdpe)
-                .into_engine(tcx, body, def_id)
+                .into_engine(tcx, body)
                 .iterate_to_fixpoint();
 
-            sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_uninits);
+            sanity_check_via_rustc_peek(tcx, body, &attributes, &flow_uninits);
         }
 
         if has_rustc_mir_with(sess, &attributes, sym::rustc_peek_definite_init).is_some() {
             let flow_def_inits = DefinitelyInitializedPlaces::new(tcx, body, &mdpe)
-                .into_engine(tcx, body, def_id)
+                .into_engine(tcx, body)
                 .iterate_to_fixpoint();
 
-            sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_def_inits);
+            sanity_check_via_rustc_peek(tcx, body, &attributes, &flow_def_inits);
         }
 
         if has_rustc_mir_with(sess, &attributes, sym::rustc_peek_indirectly_mutable).is_some() {
             let flow_mut_borrowed = MaybeMutBorrowedLocals::mut_borrows_only(tcx, body, param_env)
-                .into_engine(tcx, body, def_id)
+                .into_engine(tcx, body)
                 .iterate_to_fixpoint();
 
-            sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_mut_borrowed);
+            sanity_check_via_rustc_peek(tcx, body, &attributes, &flow_mut_borrowed);
         }
 
         if has_rustc_mir_with(sess, &attributes, sym::rustc_peek_liveness).is_some() {
-            let flow_liveness =
-                MaybeLiveLocals.into_engine(tcx, body, def_id).iterate_to_fixpoint();
+            let flow_liveness = MaybeLiveLocals.into_engine(tcx, body).iterate_to_fixpoint();
 
-            sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_liveness);
+            sanity_check_via_rustc_peek(tcx, body, &attributes, &flow_liveness);
         }
 
         if has_rustc_mir_with(sess, &attributes, sym::stop_after_dataflow).is_some() {
@@ -103,12 +101,12 @@
 pub fn sanity_check_via_rustc_peek<'tcx, A>(
     tcx: TyCtxt<'tcx>,
     body: &Body<'tcx>,
-    def_id: DefId,
     _attributes: &[ast::Attribute],
     results: &Results<'tcx, A>,
 ) where
     A: RustcPeekAt<'tcx>,
 {
+    let def_id = body.source.def_id();
     debug!("sanity_check_via_rustc_peek def_id: {:?}", def_id);
 
     let mut cursor = ResultsCursor::new(body, results);
diff --git a/compiler/rustc_mir/src/transform/simplify.rs b/compiler/rustc_mir/src/transform/simplify.rs
index 3fc8e6d..b7c9a3a 100644
--- a/compiler/rustc_mir/src/transform/simplify.rs
+++ b/compiler/rustc_mir/src/transform/simplify.rs
@@ -27,7 +27,7 @@
 //! naively generate still contains the `_a = ()` write in the unreachable block "after" the
 //! return.
 
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use rustc_index::bit_set::BitSet;
 use rustc_index::vec::{Idx, IndexVec};
 use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
@@ -35,6 +35,7 @@
 use rustc_middle::ty::TyCtxt;
 use smallvec::SmallVec;
 use std::borrow::Cow;
+use std::convert::TryInto;
 
 pub struct SimplifyCfg {
     label: String,
@@ -59,7 +60,7 @@
         Cow::Borrowed(&self.label)
     }
 
-    fn run_pass(&self, _tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         debug!("SimplifyCfg({:?}) - simplifying {:?}", self.label, body);
         simplify_cfg(body);
     }
@@ -318,36 +319,21 @@
 pub struct SimplifyLocals;
 
 impl<'tcx> MirPass<'tcx> for SimplifyLocals {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        trace!("running SimplifyLocals on {:?}", source);
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        trace!("running SimplifyLocals on {:?}", body.source);
 
         // First, we're going to get a count of *actual* uses for every `Local`.
-        // Take a look at `DeclMarker::visit_local()` to see exactly what is ignored.
-        let mut used_locals = {
-            let mut marker = DeclMarker::new(body);
-            marker.visit_body(&body);
-
-            marker.local_counts
-        };
-
-        let arg_count = body.arg_count;
+        let mut used_locals = UsedLocals::new(body);
 
         // Next, we're going to remove any `Local` with zero actual uses. When we remove those
         // `Locals`, we're also going to subtract any uses of other `Locals` from the `used_locals`
         // count. For example, if we removed `_2 = discriminant(_1)`, then we'll subtract one from
         // `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a
         // fixedpoint where there are no more unused locals.
-        loop {
-            let mut remove_statements = RemoveStatements::new(&mut used_locals, arg_count, tcx);
-            remove_statements.visit_body(body);
-
-            if !remove_statements.modified {
-                break;
-            }
-        }
+        remove_unused_definitions(&mut used_locals, body);
 
         // Finally, we'll actually do the work of shrinking `body.local_decls` and remapping the `Local`s.
-        let map = make_local_map(&mut body.local_decls, used_locals, arg_count);
+        let map = make_local_map(&mut body.local_decls, &used_locals);
 
         // Only bother running the `LocalUpdater` if we actually found locals to remove.
         if map.iter().any(Option::is_none) {
@@ -363,14 +349,14 @@
 /// Construct the mapping while swapping out unused stuff out from the `vec`.
 fn make_local_map<V>(
     local_decls: &mut IndexVec<Local, V>,
-    used_locals: IndexVec<Local, usize>,
-    arg_count: usize,
+    used_locals: &UsedLocals,
 ) -> IndexVec<Local, Option<Local>> {
     let mut map: IndexVec<Local, Option<Local>> = IndexVec::from_elem(None, &*local_decls);
     let mut used = Local::new(0);
-    for (alive_index, count) in used_locals.iter_enumerated() {
-        // The `RETURN_PLACE` and arguments are always live.
-        if alive_index.as_usize() > arg_count && *count == 0 {
+
+    for alive_index in local_decls.indices() {
+        // `is_used` treats the `RETURN_PLACE` and arguments as used.
+        if !used_locals.is_used(alive_index) {
             continue;
         }
 
@@ -384,149 +370,130 @@
     map
 }
 
-struct DeclMarker<'a, 'tcx> {
-    pub local_counts: IndexVec<Local, usize>,
-    pub body: &'a Body<'tcx>,
+/// Keeps track of used & unused locals.
+struct UsedLocals {
+    increment: bool,
+    arg_count: u32,
+    use_count: IndexVec<Local, u32>,
 }
 
-impl<'a, 'tcx> DeclMarker<'a, 'tcx> {
-    pub fn new(body: &'a Body<'tcx>) -> Self {
-        Self { local_counts: IndexVec::from_elem(0, &body.local_decls), body }
+impl UsedLocals {
+    /// Determines which locals are used & unused in the given body.
+    fn new(body: &Body<'_>) -> Self {
+        let mut this = Self {
+            increment: true,
+            arg_count: body.arg_count.try_into().unwrap(),
+            use_count: IndexVec::from_elem(0, &body.local_decls),
+        };
+        this.visit_body(body);
+        this
+    }
+
+    /// Checks if local is used.
+    ///
+    /// Return place and arguments are always considered used.
+    fn is_used(&self, local: Local) -> bool {
+        trace!("is_used({:?}): use_count: {:?}", local, self.use_count[local]);
+        local.as_u32() <= self.arg_count || self.use_count[local] != 0
+    }
+
+    /// Updates the use counts to reflect the removal of given statement.
+    fn statement_removed(&mut self, statement: &Statement<'tcx>) {
+        self.increment = false;
+
+        // The location of the statement is irrelevant.
+        let location = Location { block: START_BLOCK, statement_index: 0 };
+        self.visit_statement(statement, location);
+    }
+
+    /// Visits a left-hand side of an assignment.
+    fn visit_lhs(&mut self, place: &Place<'tcx>, location: Location) {
+        if place.is_indirect() {
+            // A use, not a definition.
+            self.visit_place(place, PlaceContext::MutatingUse(MutatingUseContext::Store), location);
+        } else {
+            // A definition. Although, it still might use other locals for indexing.
+            self.super_projection(
+                place.local,
+                &place.projection,
+                PlaceContext::MutatingUse(MutatingUseContext::Projection),
+                location,
+            );
+        }
     }
 }
 
-impl<'a, 'tcx> Visitor<'tcx> for DeclMarker<'a, 'tcx> {
-    fn visit_local(&mut self, local: &Local, ctx: PlaceContext, location: Location) {
-        // Ignore storage markers altogether, they get removed along with their otherwise unused
-        // decls.
-        // FIXME: Extend this to all non-uses.
-        if ctx.is_storage_marker() {
-            return;
+impl Visitor<'_> for UsedLocals {
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        match statement.kind {
+            StatementKind::LlvmInlineAsm(..)
+            | StatementKind::Retag(..)
+            | StatementKind::Coverage(..)
+            | StatementKind::FakeRead(..)
+            | StatementKind::AscribeUserType(..) => {
+                self.super_statement(statement, location);
+            }
+
+            StatementKind::Nop => {}
+
+            StatementKind::StorageLive(_local) | StatementKind::StorageDead(_local) => {}
+
+            StatementKind::Assign(box (ref place, ref rvalue)) => {
+                self.visit_lhs(place, location);
+                self.visit_rvalue(rvalue, location);
+            }
+
+            StatementKind::SetDiscriminant { ref place, variant_index: _ } => {
+                self.visit_lhs(place, location);
+            }
         }
+    }
 
-        // Ignore stores of constants because `ConstProp` and `CopyProp` can remove uses of many
-        // of these locals. However, if the local is still needed, then it will be referenced in
-        // another place and we'll mark it as being used there.
-        if ctx == PlaceContext::MutatingUse(MutatingUseContext::Store)
-            || ctx == PlaceContext::MutatingUse(MutatingUseContext::Projection)
-        {
-            let block = &self.body.basic_blocks()[location.block];
-            if location.statement_index != block.statements.len() {
-                let stmt = &block.statements[location.statement_index];
+    fn visit_local(&mut self, local: &Local, _ctx: PlaceContext, _location: Location) {
+        if self.increment {
+            self.use_count[*local] += 1;
+        } else {
+            assert_ne!(self.use_count[*local], 0);
+            self.use_count[*local] -= 1;
+        }
+    }
+}
 
-                if let StatementKind::Assign(box (dest, rvalue)) = &stmt.kind {
-                    if !dest.is_indirect() && dest.local == *local {
-                        let can_skip = match rvalue {
-                            Rvalue::Use(_)
-                            | Rvalue::Discriminant(_)
-                            | Rvalue::BinaryOp(_, _, _)
-                            | Rvalue::CheckedBinaryOp(_, _, _)
-                            | Rvalue::Repeat(_, _)
-                            | Rvalue::AddressOf(_, _)
-                            | Rvalue::Len(_)
-                            | Rvalue::UnaryOp(_, _)
-                            | Rvalue::Aggregate(_, _) => true,
+/// Removes unused definitions. Updates the used locals to reflect the changes made.
+fn remove_unused_definitions<'a, 'tcx>(used_locals: &'a mut UsedLocals, body: &mut Body<'tcx>) {
+    // The use counts are updated as we remove the statements. A local might become unused
+    // during the retain operation, leading to a temporary inconsistency (storage statements or
+    // definitions referencing the local might remain). For correctness it is crucial that this
+    // computation reaches a fixed point.
 
-                            _ => false,
-                        };
+    let mut modified = true;
+    while modified {
+        modified = false;
 
-                        if can_skip {
-                            trace!("skipping store of {:?} to {:?}", rvalue, dest);
-                            return;
-                        }
+        for data in body.basic_blocks_mut() {
+            // Remove unnecessary StorageLive and StorageDead annotations.
+            data.statements.retain(|statement| {
+                let keep = match &statement.kind {
+                    StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
+                        used_locals.is_used(*local)
                     }
+                    StatementKind::Assign(box (place, _)) => used_locals.is_used(place.local),
+
+                    StatementKind::SetDiscriminant { ref place, .. } => {
+                        used_locals.is_used(place.local)
+                    }
+                    _ => true,
+                };
+
+                if !keep {
+                    trace!("removing statement {:?}", statement);
+                    modified = true;
+                    used_locals.statement_removed(statement);
                 }
-            }
+
+                keep
+            });
         }
-
-        self.local_counts[*local] += 1;
-    }
-}
-
-struct StatementDeclMarker<'a, 'tcx> {
-    used_locals: &'a mut IndexVec<Local, usize>,
-    statement: &'a Statement<'tcx>,
-}
-
-impl<'a, 'tcx> StatementDeclMarker<'a, 'tcx> {
-    pub fn new(
-        used_locals: &'a mut IndexVec<Local, usize>,
-        statement: &'a Statement<'tcx>,
-    ) -> Self {
-        Self { used_locals, statement }
-    }
-}
-
-impl<'a, 'tcx> Visitor<'tcx> for StatementDeclMarker<'a, 'tcx> {
-    fn visit_local(&mut self, local: &Local, context: PlaceContext, _location: Location) {
-        // Skip the lvalue for assignments
-        if let StatementKind::Assign(box (p, _)) = self.statement.kind {
-            if p.local == *local && context.is_place_assignment() {
-                return;
-            }
-        }
-
-        let use_count = &mut self.used_locals[*local];
-        // If this is the local we're removing...
-        if *use_count != 0 {
-            *use_count -= 1;
-        }
-    }
-}
-
-struct RemoveStatements<'a, 'tcx> {
-    used_locals: &'a mut IndexVec<Local, usize>,
-    arg_count: usize,
-    tcx: TyCtxt<'tcx>,
-    modified: bool,
-}
-
-impl<'a, 'tcx> RemoveStatements<'a, 'tcx> {
-    fn new(
-        used_locals: &'a mut IndexVec<Local, usize>,
-        arg_count: usize,
-        tcx: TyCtxt<'tcx>,
-    ) -> Self {
-        Self { used_locals, arg_count, tcx, modified: false }
-    }
-
-    fn keep_local(&self, l: Local) -> bool {
-        trace!("keep_local({:?}): count: {:?}", l, self.used_locals[l]);
-        l.as_usize() <= self.arg_count || self.used_locals[l] != 0
-    }
-}
-
-impl<'a, 'tcx> MutVisitor<'tcx> for RemoveStatements<'a, 'tcx> {
-    fn tcx(&self) -> TyCtxt<'tcx> {
-        self.tcx
-    }
-
-    fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
-        // Remove unnecessary StorageLive and StorageDead annotations.
-        let mut i = 0usize;
-        data.statements.retain(|stmt| {
-            let keep = match &stmt.kind {
-                StatementKind::StorageLive(l) | StatementKind::StorageDead(l) => {
-                    self.keep_local(*l)
-                }
-                StatementKind::Assign(box (place, _)) => self.keep_local(place.local),
-                _ => true,
-            };
-
-            if !keep {
-                trace!("removing statement {:?}", stmt);
-                self.modified = true;
-
-                let mut visitor = StatementDeclMarker::new(self.used_locals, stmt);
-                visitor.visit_statement(stmt, Location { block, statement_index: i });
-            }
-
-            i += 1;
-
-            keep
-        });
-
-        self.super_basic_block_data(block, data);
     }
 }
 
diff --git a/compiler/rustc_mir/src/transform/simplify_branches.rs b/compiler/rustc_mir/src/transform/simplify_branches.rs
index 4c30a09..a9a45e6 100644
--- a/compiler/rustc_mir/src/transform/simplify_branches.rs
+++ b/compiler/rustc_mir/src/transform/simplify_branches.rs
@@ -1,6 +1,6 @@
 //! A pass that simplifies branches when their condition is known.
 
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
 
@@ -21,25 +21,24 @@
         Cow::Borrowed(&self.label)
     }
 
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        let param_env = tcx.param_env(src.def_id());
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        let param_env = tcx.param_env(body.source.def_id());
         for block in body.basic_blocks_mut() {
             let terminator = block.terminator_mut();
             terminator.kind = match terminator.kind {
                 TerminatorKind::SwitchInt {
                     discr: Operand::Constant(ref c),
                     switch_ty,
-                    ref values,
                     ref targets,
                     ..
                 } => {
                     let constant = c.literal.try_eval_bits(tcx, param_env, switch_ty);
                     if let Some(constant) = constant {
-                        let (otherwise, targets) = targets.split_last().unwrap();
-                        let mut ret = TerminatorKind::Goto { target: *otherwise };
-                        for (&v, t) in values.iter().zip(targets.iter()) {
+                        let otherwise = targets.otherwise();
+                        let mut ret = TerminatorKind::Goto { target: otherwise };
+                        for (v, t) in targets.iter() {
                             if v == constant {
-                                ret = TerminatorKind::Goto { target: *t };
+                                ret = TerminatorKind::Goto { target: t };
                                 break;
                             }
                         }
@@ -50,9 +49,10 @@
                 }
                 TerminatorKind::Assert {
                     target, cond: Operand::Constant(ref c), expected, ..
-                } if (c.literal.try_eval_bool(tcx, param_env) == Some(true)) == expected => {
-                    TerminatorKind::Goto { target }
-                }
+                } => match c.literal.try_eval_bool(tcx, param_env) {
+                    Some(v) if v == expected => TerminatorKind::Goto { target },
+                    _ => continue,
+                },
                 TerminatorKind::FalseEdge { real_target, .. } => {
                     TerminatorKind::Goto { target: real_target }
                 }
diff --git a/compiler/rustc_mir/src/transform/simplify_comparison_integral.rs b/compiler/rustc_mir/src/transform/simplify_comparison_integral.rs
index 9b460c9..ea56080 100644
--- a/compiler/rustc_mir/src/transform/simplify_comparison_integral.rs
+++ b/compiler/rustc_mir/src/transform/simplify_comparison_integral.rs
@@ -1,8 +1,10 @@
-use super::{MirPass, MirSource};
+use std::iter;
+
+use super::MirPass;
 use rustc_middle::{
     mir::{
         interpret::Scalar, BasicBlock, BinOp, Body, Operand, Place, Rvalue, Statement,
-        StatementKind, TerminatorKind,
+        StatementKind, SwitchTargets, TerminatorKind,
     },
     ty::{Ty, TyCtxt},
 };
@@ -24,38 +26,44 @@
 pub struct SimplifyComparisonIntegral;
 
 impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral {
-    fn run_pass(&self, _: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        trace!("Running SimplifyComparisonIntegral on {:?}", source);
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        trace!("Running SimplifyComparisonIntegral on {:?}", body.source);
 
         let helper = OptimizationFinder { body };
         let opts = helper.find_optimizations();
         let mut storage_deads_to_insert = vec![];
         let mut storage_deads_to_remove: Vec<(usize, BasicBlock)> = vec![];
+        let param_env = tcx.param_env(body.source.def_id());
         for opt in opts {
             trace!("SUCCESS: Applying {:?}", opt);
             // replace terminator with a switchInt that switches on the integer directly
             let bbs = &mut body.basic_blocks_mut();
             let bb = &mut bbs[opt.bb_idx];
-            // We only use the bits for the untyped, not length checked `values` field. Thus we are
-            // not using any of the convenience wrappers here and directly access the bits.
             let new_value = match opt.branch_value_scalar {
-                Scalar::Raw { data, .. } => data,
+                Scalar::Int(int) => {
+                    let layout = tcx
+                        .layout_of(param_env.and(opt.branch_value_ty))
+                        .expect("if we have an evaluated constant we must know the layout");
+                    int.assert_bits(layout.size)
+                }
                 Scalar::Ptr(_) => continue,
             };
             const FALSE: u128 = 0;
-            let mut new_targets = opt.targets.clone();
-            let first_is_false_target = opt.values[0] == FALSE;
+
+            let mut new_targets = opt.targets;
+            let first_value = new_targets.iter().next().unwrap().0;
+            let first_is_false_target = first_value == FALSE;
             match opt.op {
                 BinOp::Eq => {
                     // if the assignment was Eq we want the true case to be first
                     if first_is_false_target {
-                        new_targets.swap(0, 1);
+                        new_targets.all_targets_mut().swap(0, 1);
                     }
                 }
                 BinOp::Ne => {
                     // if the assignment was Ne we want the false case to be first
                     if !first_is_false_target {
-                        new_targets.swap(0, 1);
+                        new_targets.all_targets_mut().swap(0, 1);
                     }
                 }
                 _ => unreachable!(),
@@ -96,7 +104,7 @@
                 }
                 storage_deads_to_remove.push((stmt_idx, opt.bb_idx));
                 // if we have StorageDeads to remove then make sure to insert them at the top of each target
-                for bb_idx in new_targets.iter() {
+                for bb_idx in new_targets.all_targets() {
                     storage_deads_to_insert.push((
                         *bb_idx,
                         Statement {
@@ -107,13 +115,18 @@
                 }
             }
 
-            let terminator = bb.terminator_mut();
+            let [bb_cond, bb_otherwise] = match new_targets.all_targets() {
+                [a, b] => [*a, *b],
+                e => bug!("expected 2 switch targets, got: {:?}", e),
+            };
 
+            let targets = SwitchTargets::new(iter::once((new_value, bb_cond)), bb_otherwise);
+
+            let terminator = bb.terminator_mut();
             terminator.kind = TerminatorKind::SwitchInt {
                 discr: Operand::Move(opt.to_switch_on),
                 switch_ty: opt.branch_value_ty,
-                values: vec![new_value].into(),
-                targets: new_targets,
+                targets,
             };
         }
 
@@ -138,15 +151,13 @@
             .iter_enumerated()
             .filter_map(|(bb_idx, bb)| {
                 // find switch
-                let (place_switched_on, values, targets, place_switched_on_moved) = match &bb
-                    .terminator()
-                    .kind
-                {
-                    rustc_middle::mir::TerminatorKind::SwitchInt {
-                        discr, values, targets, ..
-                    } => Some((discr.place()?, values, targets, discr.is_move())),
-                    _ => None,
-                }?;
+                let (place_switched_on, targets, place_switched_on_moved) =
+                    match &bb.terminator().kind {
+                        rustc_middle::mir::TerminatorKind::SwitchInt { discr, targets, .. } => {
+                            Some((discr.place()?, targets, discr.is_move()))
+                        }
+                        _ => None,
+                    }?;
 
                 // find the statement that assigns the place being switched on
                 bb.statements.iter().enumerate().rev().find_map(|(stmt_idx, stmt)| {
@@ -167,7 +178,6 @@
                                         branch_value_scalar,
                                         branch_value_ty,
                                         op: *op,
-                                        values: values.clone().into_owned(),
                                         targets: targets.clone(),
                                     })
                                 }
@@ -220,8 +230,6 @@
     branch_value_ty: Ty<'tcx>,
     /// Either Eq or Ne
     op: BinOp,
-    /// Current values used in the switch target. This needs to be replaced with the branch_value
-    values: Vec<u128>,
     /// Current targets used in the switch
-    targets: Vec<BasicBlock>,
+    targets: SwitchTargets,
 }
diff --git a/compiler/rustc_mir/src/transform/simplify_try.rs b/compiler/rustc_mir/src/transform/simplify_try.rs
index 45fa3b7..27bb1de 100644
--- a/compiler/rustc_mir/src/transform/simplify_try.rs
+++ b/compiler/rustc_mir/src/transform/simplify_try.rs
@@ -9,7 +9,7 @@
 //!
 //! into just `x`.
 
-use crate::transform::{simplify, MirPass, MirSource};
+use crate::transform::{simplify, MirPass};
 use itertools::Itertools as _;
 use rustc_index::{bit_set::BitSet, vec::IndexVec};
 use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
@@ -367,13 +367,15 @@
 }
 
 impl<'tcx> MirPass<'tcx> for SimplifyArmIdentity {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         // FIXME(77359): This optimization can result in unsoundness.
         if !tcx.sess.opts.debugging_opts.unsound_mir_opts {
             return;
         }
 
+        let source = body.source;
         trace!("running SimplifyArmIdentity on {:?}", source);
+
         let local_uses = LocalUseCounter::get_local_uses(body);
         let (basic_blocks, local_decls, debug_info) =
             body.basic_blocks_local_decls_mut_and_var_debug_info();
@@ -528,8 +530,8 @@
 pub struct SimplifyBranchSame;
 
 impl<'tcx> MirPass<'tcx> for SimplifyBranchSame {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        trace!("Running SimplifyBranchSame on {:?}", source);
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        trace!("Running SimplifyBranchSame on {:?}", body.source);
         let finder = SimplifyBranchSameOptimizationFinder { body, tcx };
         let opts = finder.find();
 
@@ -574,15 +576,13 @@
             .iter_enumerated()
             .filter_map(|(bb_idx, bb)| {
                 let (discr_switched_on, targets_and_values) = match &bb.terminator().kind {
-                    TerminatorKind::SwitchInt { targets, discr, values, .. } => {
-                        // if values.len() == targets.len() - 1, we need to include None where no value is present
-                        // such that the zip does not throw away targets. If no `otherwise` case is in targets, the zip will simply throw away the added None
-                        let values_extended = values.iter().map(|x|Some(*x)).chain(once(None));
-                        let targets_and_values:Vec<_> = targets.iter().zip(values_extended)
-                            .map(|(target, value)| SwitchTargetAndValue{target:*target, value})
+                    TerminatorKind::SwitchInt { targets, discr, .. } => {
+                        let targets_and_values: Vec<_> = targets.iter()
+                            .map(|(val, target)| SwitchTargetAndValue { target, value: Some(val) })
+                            .chain(once(SwitchTargetAndValue { target: targets.otherwise(), value: None }))
                             .collect();
-                        assert_eq!(targets.len(), targets_and_values.len());
-                        (discr, targets_and_values)},
+                        (discr, targets_and_values)
+                    },
                     _ => return None,
                 };
 
diff --git a/compiler/rustc_mir/src/transform/uninhabited_enum_branching.rs b/compiler/rustc_mir/src/transform/uninhabited_enum_branching.rs
index 4cca4d2..465832c 100644
--- a/compiler/rustc_mir/src/transform/uninhabited_enum_branching.rs
+++ b/compiler/rustc_mir/src/transform/uninhabited_enum_branching.rs
@@ -1,8 +1,10 @@
 //! A pass that eliminates branches on uninhabited enum variants.
 
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
+use rustc_data_structures::stable_set::FxHashSet;
 use rustc_middle::mir::{
-    BasicBlock, BasicBlockData, Body, Local, Operand, Rvalue, StatementKind, TerminatorKind,
+    BasicBlock, BasicBlockData, Body, Local, Operand, Rvalue, StatementKind, SwitchTargets,
+    TerminatorKind,
 };
 use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::ty::{Ty, TyCtxt};
@@ -52,9 +54,13 @@
     layout: &TyAndLayout<'tcx>,
     ty: Ty<'tcx>,
     tcx: TyCtxt<'tcx>,
-) -> Vec<u128> {
+) -> FxHashSet<u128> {
     match &layout.variants {
-        Variants::Single { index } => vec![index.as_u32() as u128],
+        Variants::Single { index } => {
+            let mut res = FxHashSet::default();
+            res.insert(index.as_u32() as u128);
+            res
+        }
         Variants::Multiple { variants, .. } => variants
             .iter_enumerated()
             .filter_map(|(idx, layout)| {
@@ -66,12 +72,12 @@
 }
 
 impl<'tcx> MirPass<'tcx> for UninhabitedEnumBranching {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        if source.promoted.is_some() {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        if body.source.promoted.is_some() {
             return;
         }
 
-        trace!("UninhabitedEnumBranching starting for {:?}", source);
+        trace!("UninhabitedEnumBranching starting for {:?}", body.source);
 
         let basic_block_count = body.basic_blocks().len();
 
@@ -86,7 +92,7 @@
                     continue;
                 };
 
-            let layout = tcx.layout_of(tcx.param_env(source.def_id()).and(discriminant_ty));
+            let layout = tcx.layout_of(tcx.param_env(body.source.def_id()).and(discriminant_ty));
 
             let allowed_variants = if let Ok(layout) = layout {
                 variant_discriminants(&layout, discriminant_ty, tcx)
@@ -96,21 +102,15 @@
 
             trace!("allowed_variants = {:?}", allowed_variants);
 
-            if let TerminatorKind::SwitchInt { values, targets, .. } =
+            if let TerminatorKind::SwitchInt { targets, .. } =
                 &mut body.basic_blocks_mut()[bb].terminator_mut().kind
             {
-                // take otherwise out early
-                let otherwise = targets.pop().unwrap();
-                assert_eq!(targets.len(), values.len());
-                let mut i = 0;
-                targets.retain(|_| {
-                    let keep = allowed_variants.contains(&values[i]);
-                    i += 1;
-                    keep
-                });
-                targets.push(otherwise);
+                let new_targets = SwitchTargets::new(
+                    targets.iter().filter(|(val, _)| allowed_variants.contains(val)),
+                    targets.otherwise(),
+                );
 
-                values.to_mut().retain(|var| allowed_variants.contains(var));
+                *targets = new_targets;
             } else {
                 unreachable!()
             }
diff --git a/compiler/rustc_mir/src/transform/unreachable_prop.rs b/compiler/rustc_mir/src/transform/unreachable_prop.rs
index fa362c6..f6d39da 100644
--- a/compiler/rustc_mir/src/transform/unreachable_prop.rs
+++ b/compiler/rustc_mir/src/transform/unreachable_prop.rs
@@ -3,16 +3,15 @@
 //! post-order traversal of the blocks.
 
 use crate::transform::simplify;
-use crate::transform::{MirPass, MirSource};
+use crate::transform::MirPass;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
-use std::borrow::Cow;
 
 pub struct UnreachablePropagation;
 
 impl MirPass<'_> for UnreachablePropagation {
-    fn run_pass<'tcx>(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
+    fn run_pass<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         if tcx.sess.opts.debugging_opts.mir_opt_level < 3 {
             // Enable only under -Zmir-opt-level=3 as in some cases (check the deeply-nested-opt
             // perf benchmark) LLVM may spend quite a lot of time optimizing the generated code.
@@ -69,14 +68,15 @@
 {
     let terminator = match *terminator_kind {
         TerminatorKind::Goto { target } if predicate(target) => TerminatorKind::Unreachable,
-        TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
-            let original_targets_len = targets.len();
-            let (otherwise, targets) = targets.split_last().unwrap();
-            let (mut values, mut targets): (Vec<_>, Vec<_>) =
-                values.iter().zip(targets.iter()).filter(|(_, &t)| !predicate(t)).unzip();
+        TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => {
+            let otherwise = targets.otherwise();
 
-            if !predicate(*otherwise) {
-                targets.push(*otherwise);
+            let original_targets_len = targets.iter().len() + 1;
+            let (mut values, mut targets): (Vec<_>, Vec<_>) =
+                targets.iter().filter(|(_, bb)| !predicate(*bb)).unzip();
+
+            if !predicate(otherwise) {
+                targets.push(otherwise);
             } else {
                 values.pop();
             }
@@ -91,8 +91,10 @@
                 TerminatorKind::SwitchInt {
                     discr: discr.clone(),
                     switch_ty,
-                    values: Cow::from(values),
-                    targets,
+                    targets: SwitchTargets::new(
+                        values.iter().copied().zip(targets.iter().copied()),
+                        *targets.last().unwrap(),
+                    ),
                 }
             } else {
                 return None;
diff --git a/compiler/rustc_mir/src/transform/validate.rs b/compiler/rustc_mir/src/transform/validate.rs
index 94018a3..e1e6e71 100644
--- a/compiler/rustc_mir/src/transform/validate.rs
+++ b/compiler/rustc_mir/src/transform/validate.rs
@@ -4,15 +4,19 @@
 use crate::dataflow::{Analysis, ResultsCursor};
 use crate::util::storage::AlwaysLiveLocals;
 
-use super::{MirPass, MirSource};
+use super::MirPass;
+use rustc_index::bit_set::BitSet;
 use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::mir::interpret::Scalar;
+use rustc_middle::mir::traversal;
 use rustc_middle::mir::visit::{PlaceContext, Visitor};
 use rustc_middle::mir::{
-    AggregateKind, BasicBlock, Body, BorrowKind, Local, Location, MirPhase, Operand, Rvalue,
-    Statement, StatementKind, Terminator, TerminatorKind, VarDebugInfo,
+    AggregateKind, BasicBlock, Body, BorrowKind, Local, Location, MirPhase, Operand, PlaceRef,
+    Rvalue, SourceScope, Statement, StatementKind, Terminator, TerminatorKind, VarDebugInfo,
 };
 use rustc_middle::ty::fold::BottomUpFolder;
 use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt, TypeFoldable};
+use rustc_target::abi::Size;
 
 #[derive(Copy, Clone, Debug)]
 enum EdgeKind {
@@ -32,19 +36,28 @@
 }
 
 impl<'tcx> MirPass<'tcx> for Validator {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
-        let def_id = source.def_id();
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        let def_id = body.source.def_id();
         let param_env = tcx.param_env(def_id);
         let mir_phase = self.mir_phase;
 
         let always_live_locals = AlwaysLiveLocals::new(body);
         let storage_liveness = MaybeStorageLive::new(always_live_locals)
-            .into_engine(tcx, body, def_id)
+            .into_engine(tcx, body)
             .iterate_to_fixpoint()
             .into_results_cursor(body);
 
-        TypeChecker { when: &self.when, source, body, tcx, param_env, mir_phase, storage_liveness }
-            .visit_body(body);
+        TypeChecker {
+            when: &self.when,
+            body,
+            tcx,
+            param_env,
+            mir_phase,
+            reachable_blocks: traversal::reachable_as_bitset(body),
+            storage_liveness,
+            place_cache: Vec::new(),
+        }
+        .visit_body(body);
     }
 }
 
@@ -72,9 +85,12 @@
             param_env,
             ty.fold_with(&mut BottomUpFolder {
                 tcx,
-                // We just erase all late-bound lifetimes, but this is not fully correct (FIXME):
-                // lifetimes in invariant positions could matter (e.g. through associated types).
-                // We rely on the fact that layout was confirmed to be equal above.
+                // FIXME: We erase all late-bound lifetimes, but this is not fully correct.
+                // If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`,
+                // this is not necessarily equivalent to `<fn(&'static u32) as SomeTrait>::Assoc`,
+                // since one may have an `impl SomeTrait for fn(&32)` and
+                // `impl SomeTrait for fn(&'static u32)` at the same time which
+                // specify distinct values for Assoc. (See also #56105)
                 lt_op: |_| tcx.lifetimes.re_erased,
                 // Leave consts and types unchanged.
                 ct_op: |ct| ct,
@@ -87,12 +103,13 @@
 
 struct TypeChecker<'a, 'tcx> {
     when: &'a str,
-    source: MirSource<'tcx>,
     body: &'a Body<'tcx>,
     tcx: TyCtxt<'tcx>,
     param_env: ParamEnv<'tcx>,
     mir_phase: MirPhase,
+    reachable_blocks: BitSet<BasicBlock>,
     storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive>,
+    place_cache: Vec<PlaceRef<'tcx>>,
 }
 
 impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
@@ -104,7 +121,7 @@
             span,
             &format!(
                 "broken MIR in {:?} ({}) at {:?}:\n{}",
-                self.source.instance,
+                self.body.source.instance,
                 self.when,
                 location,
                 msg.as_ref()
@@ -166,7 +183,7 @@
 
 impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
     fn visit_local(&mut self, local: &Local, context: PlaceContext, location: Location) {
-        if context.is_use() {
+        if self.reachable_blocks.contains(location.block) && context.is_use() {
             // Uses of locals must occur while the local's storage is allocated.
             self.storage_liveness.seek_after_primary_effect(location);
             let locals_with_storage = self.storage_liveness.get();
@@ -176,19 +193,23 @@
         }
     }
 
-    fn visit_var_debug_info(&mut self, _var_debug_info: &VarDebugInfo<'tcx>) {
+    fn visit_var_debug_info(&mut self, var_debug_info: &VarDebugInfo<'tcx>) {
         // Debuginfo can contain field projections, which count as a use of the base local. Skip
         // debuginfo so that we avoid the storage liveness assertion in that case.
+        self.visit_source_info(&var_debug_info.source_info);
     }
 
     fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
-        // `Operand::Copy` is only supposed to be used with `Copy` types.
-        if let Operand::Copy(place) = operand {
-            let ty = place.ty(&self.body.local_decls, self.tcx).ty;
-            let span = self.body.source_info(location).span;
+        // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
+        if self.tcx.sess.opts.debugging_opts.validate_mir {
+            // `Operand::Copy` is only supposed to be used with `Copy` types.
+            if let Operand::Copy(place) = operand {
+                let ty = place.ty(&self.body.local_decls, self.tcx).ty;
+                let span = self.body.source_info(location).span;
 
-            if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) {
-                self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
+                if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) {
+                    self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
+                }
             }
         }
 
@@ -274,6 +295,8 @@
             }
             _ => {}
         }
+
+        self.super_statement(statement, location);
     }
 
     fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
@@ -281,7 +304,7 @@
             TerminatorKind::Goto { target } => {
                 self.check_edge(location, *target, EdgeKind::Normal);
             }
-            TerminatorKind::SwitchInt { targets, values, switch_ty, discr } => {
+            TerminatorKind::SwitchInt { targets, switch_ty, discr } => {
                 let ty = discr.ty(&self.body.local_decls, self.tcx);
                 if ty != *switch_ty {
                     self.fail(
@@ -292,19 +315,28 @@
                         ),
                     );
                 }
-                if targets.len() != values.len() + 1 {
-                    self.fail(
-                        location,
-                        format!(
-                            "encountered `SwitchInt` terminator with {} values, but {} targets (should be values+1)",
-                            values.len(),
-                            targets.len(),
-                        ),
-                    );
+
+                let target_width = self.tcx.sess.target.pointer_width;
+
+                let size = Size::from_bits(match switch_ty.kind() {
+                    ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
+                    ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
+                    ty::Char => 32,
+                    ty::Bool => 1,
+                    other => bug!("unhandled type: {:?}", other),
+                });
+
+                for (value, target) in targets.iter() {
+                    if Scalar::<()>::try_from_uint(value, size).is_none() {
+                        self.fail(
+                            location,
+                            format!("the value {:#x} is not a proper {:?}", value, switch_ty),
+                        )
+                    }
+
+                    self.check_edge(location, target, EdgeKind::Normal);
                 }
-                for target in targets {
-                    self.check_edge(location, *target, EdgeKind::Normal);
-                }
+                self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
             }
             TerminatorKind::Drop { target, unwind, .. } => {
                 self.check_edge(location, *target, EdgeKind::Normal);
@@ -324,7 +356,7 @@
                     self.check_edge(location, *unwind, EdgeKind::Unwind);
                 }
             }
-            TerminatorKind::Call { func, destination, cleanup, .. } => {
+            TerminatorKind::Call { func, args, destination, cleanup, .. } => {
                 let func_ty = func.ty(&self.body.local_decls, self.tcx);
                 match func_ty.kind() {
                     ty::FnPtr(..) | ty::FnDef(..) => {}
@@ -339,6 +371,32 @@
                 if let Some(cleanup) = cleanup {
                     self.check_edge(location, *cleanup, EdgeKind::Unwind);
                 }
+
+                // The call destination place and Operand::Move place used as an argument might be
+                // passed by a reference to the callee. Consequently they must be non-overlapping.
+                // Currently this simply checks for duplicate places.
+                self.place_cache.clear();
+                if let Some((destination, _)) = destination {
+                    self.place_cache.push(destination.as_ref());
+                }
+                for arg in args {
+                    if let Operand::Move(place) = arg {
+                        self.place_cache.push(place.as_ref());
+                    }
+                }
+                let all_len = self.place_cache.len();
+                self.place_cache.sort_unstable();
+                self.place_cache.dedup();
+                let has_duplicates = all_len != self.place_cache.len();
+                if has_duplicates {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered overlapping memory in `Call` terminator: {:?}",
+                            terminator.kind,
+                        ),
+                    );
+                }
             }
             TerminatorKind::Assert { cond, target, cleanup, .. } => {
                 let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
@@ -387,5 +445,19 @@
             | TerminatorKind::Unreachable
             | TerminatorKind::GeneratorDrop => {}
         }
+
+        self.super_terminator(terminator, location);
+    }
+
+    fn visit_source_scope(&mut self, scope: &SourceScope) {
+        if self.body.source_scopes.get(*scope).is_none() {
+            self.tcx.sess.diagnostic().delay_span_bug(
+                self.body.span,
+                &format!(
+                    "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
+                    self.body.source.instance, self.when, scope,
+                ),
+            );
+        }
     }
 }
diff --git a/compiler/rustc_mir/src/util/def_use.rs b/compiler/rustc_mir/src/util/def_use.rs
deleted file mode 100644
index b4448ea..0000000
--- a/compiler/rustc_mir/src/util/def_use.rs
+++ /dev/null
@@ -1,158 +0,0 @@
-//! Def-use analysis.
-
-use rustc_index::vec::IndexVec;
-use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
-use rustc_middle::mir::{Body, Local, Location, VarDebugInfo};
-use rustc_middle::ty::TyCtxt;
-use std::mem;
-
-pub struct DefUseAnalysis {
-    info: IndexVec<Local, Info>,
-}
-
-#[derive(Clone)]
-pub struct Info {
-    // FIXME(eddyb) use smallvec where possible.
-    pub defs_and_uses: Vec<Use>,
-    var_debug_info_indices: Vec<usize>,
-}
-
-#[derive(Clone)]
-pub struct Use {
-    pub context: PlaceContext,
-    pub location: Location,
-}
-
-impl DefUseAnalysis {
-    pub fn new(body: &Body<'_>) -> DefUseAnalysis {
-        DefUseAnalysis { info: IndexVec::from_elem_n(Info::new(), body.local_decls.len()) }
-    }
-
-    pub fn analyze(&mut self, body: &Body<'_>) {
-        self.clear();
-
-        let mut finder = DefUseFinder {
-            info: mem::take(&mut self.info),
-            var_debug_info_index: 0,
-            in_var_debug_info: false,
-        };
-        finder.visit_body(&body);
-        self.info = finder.info
-    }
-
-    fn clear(&mut self) {
-        for info in &mut self.info {
-            info.clear();
-        }
-    }
-
-    pub fn local_info(&self, local: Local) -> &Info {
-        &self.info[local]
-    }
-
-    fn mutate_defs_and_uses(
-        &self,
-        local: Local,
-        body: &mut Body<'tcx>,
-        new_local: Local,
-        tcx: TyCtxt<'tcx>,
-    ) {
-        let mut visitor = MutateUseVisitor::new(local, new_local, tcx);
-        let info = &self.info[local];
-        for place_use in &info.defs_and_uses {
-            visitor.visit_location(body, place_use.location)
-        }
-        // Update debuginfo as well, alongside defs/uses.
-        for &i in &info.var_debug_info_indices {
-            visitor.visit_var_debug_info(&mut body.var_debug_info[i]);
-        }
-    }
-
-    // FIXME(pcwalton): this should update the def-use chains.
-    pub fn replace_all_defs_and_uses_with(
-        &self,
-        local: Local,
-        body: &mut Body<'tcx>,
-        new_local: Local,
-        tcx: TyCtxt<'tcx>,
-    ) {
-        self.mutate_defs_and_uses(local, body, new_local, tcx)
-    }
-}
-
-struct DefUseFinder {
-    info: IndexVec<Local, Info>,
-    var_debug_info_index: usize,
-    in_var_debug_info: bool,
-}
-
-impl Visitor<'_> for DefUseFinder {
-    fn visit_local(&mut self, &local: &Local, context: PlaceContext, location: Location) {
-        let info = &mut self.info[local];
-        if self.in_var_debug_info {
-            info.var_debug_info_indices.push(self.var_debug_info_index);
-        } else {
-            info.defs_and_uses.push(Use { context, location });
-        }
-    }
-    fn visit_var_debug_info(&mut self, var_debug_info: &VarDebugInfo<'tcx>) {
-        assert!(!self.in_var_debug_info);
-        self.in_var_debug_info = true;
-        self.super_var_debug_info(var_debug_info);
-        self.in_var_debug_info = false;
-        self.var_debug_info_index += 1;
-    }
-}
-
-impl Info {
-    fn new() -> Info {
-        Info { defs_and_uses: vec![], var_debug_info_indices: vec![] }
-    }
-
-    fn clear(&mut self) {
-        self.defs_and_uses.clear();
-        self.var_debug_info_indices.clear();
-    }
-
-    pub fn def_count(&self) -> usize {
-        self.defs_and_uses.iter().filter(|place_use| place_use.context.is_mutating_use()).count()
-    }
-
-    pub fn def_count_not_including_drop(&self) -> usize {
-        self.defs_not_including_drop().count()
-    }
-
-    pub fn defs_not_including_drop(&self) -> impl Iterator<Item = &Use> {
-        self.defs_and_uses
-            .iter()
-            .filter(|place_use| place_use.context.is_mutating_use() && !place_use.context.is_drop())
-    }
-
-    pub fn use_count(&self) -> usize {
-        self.defs_and_uses.iter().filter(|place_use| place_use.context.is_nonmutating_use()).count()
-    }
-}
-
-struct MutateUseVisitor<'tcx> {
-    query: Local,
-    new_local: Local,
-    tcx: TyCtxt<'tcx>,
-}
-
-impl MutateUseVisitor<'tcx> {
-    fn new(query: Local, new_local: Local, tcx: TyCtxt<'tcx>) -> MutateUseVisitor<'tcx> {
-        MutateUseVisitor { query, new_local, tcx }
-    }
-}
-
-impl MutVisitor<'tcx> for MutateUseVisitor<'tcx> {
-    fn tcx(&self) -> TyCtxt<'tcx> {
-        self.tcx
-    }
-
-    fn visit_local(&mut self, local: &mut Local, _context: PlaceContext, _location: Location) {
-        if *local == self.query {
-            *local = self.new_local;
-        }
-    }
-}
diff --git a/compiler/rustc_mir/src/util/elaborate_drops.rs b/compiler/rustc_mir/src/util/elaborate_drops.rs
index bf0a6be..0e2d8e5 100644
--- a/compiler/rustc_mir/src/util/elaborate_drops.rs
+++ b/compiler/rustc_mir/src/util/elaborate_drops.rs
@@ -231,8 +231,6 @@
                     .patch_terminator(bb, TerminatorKind::Goto { target: self.succ });
             }
             DropStyle::Static => {
-                let loc = self.terminator_loc(bb);
-                self.elaborator.clear_drop_flag(loc, self.path, DropFlagMode::Deep);
                 self.elaborator.patch().patch_terminator(
                     bb,
                     TerminatorKind::Drop {
@@ -243,9 +241,7 @@
                 );
             }
             DropStyle::Conditional => {
-                let unwind = self.unwind; // FIXME(#43234)
-                let succ = self.succ;
-                let drop_bb = self.complete_drop(Some(DropFlagMode::Deep), succ, unwind);
+                let drop_bb = self.complete_drop(self.succ, self.unwind);
                 self.elaborator
                     .patch()
                     .patch_terminator(bb, TerminatorKind::Goto { target: drop_bb });
@@ -317,7 +313,7 @@
                 // our own drop flag.
                 path: self.path,
             }
-            .complete_drop(None, succ, unwind)
+            .complete_drop(succ, unwind)
         }
     }
 
@@ -346,13 +342,7 @@
         // Clear the "master" drop flag at the end. This is needed
         // because the "master" drop protects the ADT's discriminant,
         // which is invalidated after the ADT is dropped.
-        let (succ, unwind) = (self.succ, self.unwind); // FIXME(#43234)
-        (
-            self.drop_flag_reset_block(DropFlagMode::Shallow, succ, unwind),
-            unwind.map(|unwind| {
-                self.drop_flag_reset_block(DropFlagMode::Shallow, unwind, Unwind::InCleanup)
-            }),
-        )
+        (self.drop_flag_reset_block(DropFlagMode::Shallow, self.succ, self.unwind), self.unwind)
     }
 
     /// Creates a full drop ladder, consisting of 2 connected half-drop-ladders
@@ -598,8 +588,10 @@
                 kind: TerminatorKind::SwitchInt {
                     discr: Operand::Move(discr),
                     switch_ty: discr_ty,
-                    values: From::from(values.to_owned()),
-                    targets: blocks,
+                    targets: SwitchTargets::new(
+                        values.iter().copied().zip(blocks.iter().copied()),
+                        *blocks.last().unwrap(),
+                    ),
                 },
             }),
             is_cleanup: unwind.is_cleanup(),
@@ -768,8 +760,6 @@
         let elem_size = Place::from(self.new_temp(tcx.types.usize));
         let len = Place::from(self.new_temp(tcx.types.usize));
 
-        static USIZE_SWITCH_ZERO: &[u128] = &[0];
-
         let base_block = BasicBlockData {
             statements: vec![
                 self.assign(elem_size, Rvalue::NullaryOp(NullOp::SizeOf, ety)),
@@ -781,11 +771,11 @@
                 kind: TerminatorKind::SwitchInt {
                     discr: move_(elem_size),
                     switch_ty: tcx.types.usize,
-                    values: From::from(USIZE_SWITCH_ZERO),
-                    targets: vec![
+                    targets: SwitchTargets::static_if(
+                        0,
                         self.drop_loop_pair(ety, false, len),
                         self.drop_loop_pair(ety, true, len),
-                    ],
+                    ),
                 },
             }),
         };
@@ -884,11 +874,7 @@
                     self.open_drop_for_adt(def, substs)
                 }
             }
-            ty::Dynamic(..) => {
-                let unwind = self.unwind; // FIXME(#43234)
-                let succ = self.succ;
-                self.complete_drop(Some(DropFlagMode::Deep), succ, unwind)
-            }
+            ty::Dynamic(..) => self.complete_drop(self.succ, self.unwind),
             ty::Array(ety, size) => {
                 let size = size.try_eval_usize(self.tcx(), self.elaborator.param_env());
                 self.open_drop_for_array(ety, size)
@@ -899,20 +885,10 @@
         }
     }
 
-    fn complete_drop(
-        &mut self,
-        drop_mode: Option<DropFlagMode>,
-        succ: BasicBlock,
-        unwind: Unwind,
-    ) -> BasicBlock {
-        debug!("complete_drop({:?},{:?})", self, drop_mode);
+    fn complete_drop(&mut self, succ: BasicBlock, unwind: Unwind) -> BasicBlock {
+        debug!("complete_drop(succ={:?}, unwind={:?})", succ, unwind);
 
         let drop_block = self.drop_block(succ, unwind);
-        let drop_block = if let Some(mode) = drop_mode {
-            self.drop_flag_reset_block(mode, drop_block, unwind)
-        } else {
-            drop_block
-        };
 
         self.drop_flag_test_block(drop_block, succ, unwind)
     }
@@ -927,6 +903,11 @@
     ) -> BasicBlock {
         debug!("drop_flag_reset_block({:?},{:?})", self, mode);
 
+        if unwind.is_cleanup() {
+            // The drop flag isn't read again on the unwind path, so don't
+            // bother setting it.
+            return succ;
+        }
         let block = self.new_block(unwind, TerminatorKind::Goto { target: succ });
         let block_start = Location { block, statement_index: 0 };
         self.elaborator.clear_drop_flag(block_start, self.path, mode);
@@ -1044,11 +1025,6 @@
         self.elaborator.patch().new_temp(ty, self.source_info.span)
     }
 
-    fn terminator_loc(&mut self, bb: BasicBlock) -> Location {
-        let body = self.elaborator.body();
-        self.elaborator.patch().terminator_loc(body, bb)
-    }
-
     fn constant_usize(&self, val: u16) -> Operand<'tcx> {
         Operand::Constant(box Constant {
             span: self.source_info.span,
diff --git a/compiler/rustc_mir/src/util/generic_graphviz.rs b/compiler/rustc_mir/src/util/generic_graphviz.rs
new file mode 100644
index 0000000..8bd4a51
--- /dev/null
+++ b/compiler/rustc_mir/src/util/generic_graphviz.rs
@@ -0,0 +1,185 @@
+use rustc_data_structures::graph::{self, iterate};
+use rustc_graphviz as dot;
+use rustc_middle::ty::TyCtxt;
+use std::io::{self, Write};
+
+pub struct GraphvizWriter<
+    'a,
+    G: graph::DirectedGraph + graph::WithSuccessors + graph::WithStartNode + graph::WithNumNodes,
+    NodeContentFn: Fn(<G as rustc_data_structures::graph::DirectedGraph>::Node) -> Vec<String>,
+    EdgeLabelsFn: Fn(<G as rustc_data_structures::graph::DirectedGraph>::Node) -> Vec<String>,
+> {
+    graph: &'a G,
+    is_subgraph: bool,
+    graphviz_name: String,
+    graph_label: Option<String>,
+    node_content_fn: NodeContentFn,
+    edge_labels_fn: EdgeLabelsFn,
+}
+
+impl<
+    'a,
+    G: graph::DirectedGraph + graph::WithSuccessors + graph::WithStartNode + graph::WithNumNodes,
+    NodeContentFn: Fn(<G as rustc_data_structures::graph::DirectedGraph>::Node) -> Vec<String>,
+    EdgeLabelsFn: Fn(<G as rustc_data_structures::graph::DirectedGraph>::Node) -> Vec<String>,
+> GraphvizWriter<'a, G, NodeContentFn, EdgeLabelsFn>
+{
+    pub fn new(
+        graph: &'a G,
+        graphviz_name: &str,
+        node_content_fn: NodeContentFn,
+        edge_labels_fn: EdgeLabelsFn,
+    ) -> Self {
+        Self {
+            graph,
+            is_subgraph: false,
+            graphviz_name: graphviz_name.to_owned(),
+            graph_label: None,
+            node_content_fn,
+            edge_labels_fn,
+        }
+    }
+
+    pub fn new_subgraph(
+        graph: &'a G,
+        graphviz_name: &str,
+        node_content_fn: NodeContentFn,
+        edge_labels_fn: EdgeLabelsFn,
+    ) -> Self {
+        Self {
+            graph,
+            is_subgraph: true,
+            graphviz_name: graphviz_name.to_owned(),
+            graph_label: None,
+            node_content_fn,
+            edge_labels_fn,
+        }
+    }
+
+    pub fn set_graph_label(&mut self, graph_label: &str) {
+        self.graph_label = Some(graph_label.to_owned());
+    }
+
+    /// Write a graphviz DOT of the graph
+    pub fn write_graphviz<'tcx, W>(&self, tcx: TyCtxt<'tcx>, w: &mut W) -> io::Result<()>
+    where
+        W: Write,
+    {
+        let kind = if self.is_subgraph { "subgraph" } else { "digraph" };
+        let cluster = if self.is_subgraph { "cluster_" } else { "" }; // Print border around graph
+        // FIXME(richkadel): If/when migrating the MIR graphviz to this generic implementation,
+        // prepend "Mir_" to the graphviz_safe_def_name(def_id)
+        writeln!(w, "{} {}{} {{", kind, cluster, self.graphviz_name)?;
+
+        // Global graph properties
+        let font = format!(r#"fontname="{}""#, tcx.sess.opts.debugging_opts.graphviz_font);
+        let mut graph_attrs = vec![&font[..]];
+        let mut content_attrs = vec![&font[..]];
+
+        let dark_mode = tcx.sess.opts.debugging_opts.graphviz_dark_mode;
+        if dark_mode {
+            graph_attrs.push(r#"bgcolor="black""#);
+            graph_attrs.push(r#"fontcolor="white""#);
+            content_attrs.push(r#"color="white""#);
+            content_attrs.push(r#"fontcolor="white""#);
+        }
+
+        writeln!(w, r#"    graph [{}];"#, graph_attrs.join(" "))?;
+        let content_attrs_str = content_attrs.join(" ");
+        writeln!(w, r#"    node [{}];"#, content_attrs_str)?;
+        writeln!(w, r#"    edge [{}];"#, content_attrs_str)?;
+
+        // Graph label
+        if let Some(graph_label) = &self.graph_label {
+            self.write_graph_label(graph_label, w)?;
+        }
+
+        // Nodes
+        for node in iterate::post_order_from(self.graph, self.graph.start_node()) {
+            self.write_node(node, dark_mode, w)?;
+        }
+
+        // Edges
+        for source in iterate::post_order_from(self.graph, self.graph.start_node()) {
+            self.write_edges(source, w)?;
+        }
+        writeln!(w, "}}")
+    }
+
+    /// Write a graphviz DOT node for the given node.
+    pub fn write_node<W>(&self, node: G::Node, dark_mode: bool, w: &mut W) -> io::Result<()>
+    where
+        W: Write,
+    {
+        // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables.
+        write!(w, r#"    {} [shape="none", label=<"#, self.node(node))?;
+
+        write!(w, r#"<table border="0" cellborder="1" cellspacing="0">"#)?;
+
+        // FIXME(richkadel): Need generic way to know if node header should have a different color
+        // let (blk, bgcolor) = if data.is_cleanup {
+        //    (format!("{:?} (cleanup)", node), "lightblue")
+        // } else {
+        //     let color = if dark_mode { "dimgray" } else { "gray" };
+        //     (format!("{:?}", node), color)
+        // };
+        let color = if dark_mode { "dimgray" } else { "gray" };
+        let (blk, bgcolor) = (format!("{:?}", node), color);
+        write!(
+            w,
+            r#"<tr><td bgcolor="{bgcolor}" {attrs} colspan="{colspan}">{blk}</td></tr>"#,
+            attrs = r#"align="center""#,
+            colspan = 1,
+            blk = blk,
+            bgcolor = bgcolor
+        )?;
+
+        for section in (self.node_content_fn)(node) {
+            write!(
+                w,
+                r#"<tr><td align="left" balign="left">{}</td></tr>"#,
+                dot::escape_html(&section).replace("\n", "<br/>")
+            )?;
+        }
+
+        // Close the table
+        write!(w, "</table>")?;
+
+        // Close the node label and the node itself.
+        writeln!(w, ">];")
+    }
+
+    /// Write graphviz DOT edges with labels between the given node and all of its successors.
+    fn write_edges<W>(&self, source: G::Node, w: &mut W) -> io::Result<()>
+    where
+        W: Write,
+    {
+        let edge_labels = (self.edge_labels_fn)(source);
+        for (index, target) in self.graph.successors(source).enumerate() {
+            let src = self.node(source);
+            let trg = self.node(target);
+            let escaped_edge_label = if let Some(edge_label) = edge_labels.get(index) {
+                dot::escape_html(edge_label).replace("\n", r#"<br align="left"/>"#)
+            } else {
+                "".to_owned()
+            };
+            writeln!(w, r#"    {} -> {} [label=<{}>];"#, src, trg, escaped_edge_label)?;
+        }
+        Ok(())
+    }
+
+    /// Write the graphviz DOT label for the overall graph. This is essentially a block of text that
+    /// will appear below the graph.
+    fn write_graph_label<W>(&self, label: &str, w: &mut W) -> io::Result<()>
+    where
+        W: Write,
+    {
+        let lines = label.split('\n').map(|s| dot::escape_html(s)).collect::<Vec<_>>();
+        let escaped_label = lines.join(r#"<br align="left"/>"#);
+        writeln!(w, r#"    label=<<br/><br/>{}<br align="left"/><br/><br/><br/>>;"#, escaped_label)
+    }
+
+    fn node(&self, node: G::Node) -> String {
+        format!("{:?}__{}", node, self.graphviz_name)
+    }
+}
diff --git a/compiler/rustc_mir/src/util/graphviz.rs b/compiler/rustc_mir/src/util/graphviz.rs
index 4511962..625f1a3 100644
--- a/compiler/rustc_mir/src/util/graphviz.rs
+++ b/compiler/rustc_mir/src/util/graphviz.rs
@@ -22,7 +22,7 @@
 
     for def_id in def_ids {
         let body = &tcx.optimized_mir(def_id);
-        write_mir_fn_graphviz(tcx, def_id, body, use_subgraphs, w)?;
+        write_mir_fn_graphviz(tcx, body, use_subgraphs, w)?;
     }
 
     if use_subgraphs {
@@ -41,7 +41,6 @@
 /// Write a graphviz DOT graph of the MIR.
 pub fn write_mir_fn_graphviz<'tcx, W>(
     tcx: TyCtxt<'tcx>,
-    def_id: DefId,
     body: &Body<'_>,
     subgraph: bool,
     w: &mut W,
@@ -49,6 +48,7 @@
 where
     W: Write,
 {
+    let def_id = body.source.def_id();
     let kind = if subgraph { "subgraph" } else { "digraph" };
     let cluster = if subgraph { "cluster_" } else { "" }; // Prints a border around MIR
     let def_name = graphviz_safe_def_name(def_id);
@@ -62,6 +62,7 @@
     let dark_mode = tcx.sess.opts.debugging_opts.graphviz_dark_mode;
     if dark_mode {
         graph_attrs.push(r#"bgcolor="black""#);
+        graph_attrs.push(r#"fontcolor="white""#);
         content_attrs.push(r#"color="white""#);
         content_attrs.push(r#"fontcolor="white""#);
     }
@@ -72,16 +73,16 @@
     writeln!(w, r#"    edge [{}];"#, content_attrs_str)?;
 
     // Graph label
-    write_graph_label(tcx, def_id, body, w)?;
+    write_graph_label(tcx, body, w)?;
 
     // Nodes
     for (block, _) in body.basic_blocks().iter_enumerated() {
-        write_node(def_id, block, body, dark_mode, w)?;
+        write_node(block, body, dark_mode, w)?;
     }
 
     // Edges
     for (source, _) in body.basic_blocks().iter_enumerated() {
-        write_edges(def_id, source, body, w)?;
+        write_edges(source, body, w)?;
     }
     writeln!(w, "}}")
 }
@@ -111,13 +112,20 @@
     write!(w, r#"<table border="0" cellborder="1" cellspacing="0">"#)?;
 
     // Basic block number at the top.
+    let (blk, bgcolor) = if data.is_cleanup {
+        let color = if dark_mode { "royalblue" } else { "lightblue" };
+        (format!("{} (cleanup)", block.index()), color)
+    } else {
+        let color = if dark_mode { "dimgray" } else { "gray" };
+        (format!("{}", block.index()), color)
+    };
     write!(
         w,
         r#"<tr><td bgcolor="{bgcolor}" {attrs} colspan="{colspan}">{blk}</td></tr>"#,
-        bgcolor = if dark_mode { "dimgray" } else { "gray" },
         attrs = r#"align="center""#,
         colspan = num_cols,
-        blk = block.index()
+        blk = blk,
+        bgcolor = bgcolor
     )?;
 
     init(w)?;
@@ -145,12 +153,12 @@
 
 /// Write a graphviz DOT node for the given basic block.
 fn write_node<W: Write>(
-    def_id: DefId,
     block: BasicBlock,
     body: &Body<'_>,
     dark_mode: bool,
     w: &mut W,
 ) -> io::Result<()> {
+    let def_id = body.source.def_id();
     // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables.
     write!(w, r#"    {} [shape="none", label=<"#, node(def_id, block))?;
     write_node_label(block, body, dark_mode, w, 1, |_| Ok(()), |_| Ok(()))?;
@@ -159,12 +167,8 @@
 }
 
 /// Write graphviz DOT edges with labels between the given basic block and all of its successors.
-fn write_edges<W: Write>(
-    def_id: DefId,
-    source: BasicBlock,
-    body: &Body<'_>,
-    w: &mut W,
-) -> io::Result<()> {
+fn write_edges<W: Write>(source: BasicBlock, body: &Body<'_>, w: &mut W) -> io::Result<()> {
+    let def_id = body.source.def_id();
     let terminator = body[source].terminator();
     let labels = terminator.kind.fmt_successor_labels();
 
@@ -182,10 +186,11 @@
 /// all the variables and temporaries.
 fn write_graph_label<'tcx, W: Write>(
     tcx: TyCtxt<'tcx>,
-    def_id: DefId,
     body: &Body<'_>,
     w: &mut W,
 ) -> io::Result<()> {
+    let def_id = body.source.def_id();
+
     write!(w, "    label=<fn {}(", dot::escape_html(&tcx.def_path_str(def_id)))?;
 
     // fn argument types.
diff --git a/compiler/rustc_mir/src/util/mod.rs b/compiler/rustc_mir/src/util/mod.rs
index 699f3bc..aaee0bc 100644
--- a/compiler/rustc_mir/src/util/mod.rs
+++ b/compiler/rustc_mir/src/util/mod.rs
@@ -1,6 +1,5 @@
 pub mod aggregate;
 pub mod borrowck_errors;
-pub mod def_use;
 pub mod elaborate_drops;
 pub mod patch;
 pub mod storage;
@@ -8,6 +7,7 @@
 mod alignment;
 pub mod collect_writes;
 mod find_self_call;
+pub(crate) mod generic_graphviz;
 mod graphviz;
 pub(crate) mod pretty;
 pub(crate) mod spanview;
diff --git a/compiler/rustc_mir/src/util/pretty.rs b/compiler/rustc_mir/src/util/pretty.rs
index 49c644a..8bee841 100644
--- a/compiler/rustc_mir/src/util/pretty.rs
+++ b/compiler/rustc_mir/src/util/pretty.rs
@@ -19,6 +19,7 @@
 use rustc_middle::mir::*;
 use rustc_middle::ty::{self, TyCtxt, TypeFoldable, TypeVisitor};
 use rustc_target::abi::Size;
+use std::ops::ControlFlow;
 
 const INDENT: &str = "    ";
 /// Alignment for lining up comments following MIR statements
@@ -75,17 +76,16 @@
     pass_num: Option<&dyn Display>,
     pass_name: &str,
     disambiguator: &dyn Display,
-    source: MirSource<'tcx>,
     body: &Body<'tcx>,
     extra_data: F,
 ) where
     F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
 {
-    if !dump_enabled(tcx, pass_name, source.def_id()) {
+    if !dump_enabled(tcx, pass_name, body.source.def_id()) {
         return;
     }
 
-    dump_matched_mir_node(tcx, pass_num, pass_name, disambiguator, source, body, extra_data);
+    dump_matched_mir_node(tcx, pass_num, pass_name, disambiguator, body, extra_data);
 }
 
 pub fn dump_enabled<'tcx>(tcx: TyCtxt<'tcx>, pass_name: &str, def_id: DefId) -> bool {
@@ -113,20 +113,20 @@
     pass_num: Option<&dyn Display>,
     pass_name: &str,
     disambiguator: &dyn Display,
-    source: MirSource<'tcx>,
     body: &Body<'tcx>,
     mut extra_data: F,
 ) where
     F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
 {
     let _: io::Result<()> = try {
-        let mut file = create_dump_file(tcx, "mir", pass_num, pass_name, disambiguator, source)?;
+        let mut file =
+            create_dump_file(tcx, "mir", pass_num, pass_name, disambiguator, body.source)?;
         let def_path = ty::print::with_forced_impl_filename_line(|| {
             // see notes on #41697 above
-            tcx.def_path_str(source.def_id())
+            tcx.def_path_str(body.source.def_id())
         });
         write!(file, "// MIR for `{}", def_path)?;
-        match source.promoted {
+        match body.source.promoted {
             None => write!(file, "`")?,
             Some(promoted) => write!(file, "::{:?}`", promoted)?,
         }
@@ -137,40 +137,39 @@
         writeln!(file)?;
         extra_data(PassWhere::BeforeCFG, &mut file)?;
         write_user_type_annotations(tcx, body, &mut file)?;
-        write_mir_fn(tcx, source, body, &mut extra_data, &mut file)?;
+        write_mir_fn(tcx, body, &mut extra_data, &mut file)?;
         extra_data(PassWhere::AfterCFG, &mut file)?;
     };
 
     if tcx.sess.opts.debugging_opts.dump_mir_graphviz {
         let _: io::Result<()> = try {
             let mut file =
-                create_dump_file(tcx, "dot", pass_num, pass_name, disambiguator, source)?;
-            write_mir_fn_graphviz(tcx, source.def_id(), body, false, &mut file)?;
+                create_dump_file(tcx, "dot", pass_num, pass_name, disambiguator, body.source)?;
+            write_mir_fn_graphviz(tcx, body, false, &mut file)?;
         };
     }
 
     if let Some(spanview) = tcx.sess.opts.debugging_opts.dump_mir_spanview {
         let _: io::Result<()> = try {
-            let mut file =
-                create_dump_file(tcx, "html", pass_num, pass_name, disambiguator, source)?;
-            if source.def_id().is_local() {
-                write_mir_fn_spanview(tcx, source.def_id(), body, spanview, &mut file)?;
+            let file_basename =
+                dump_file_basename(tcx, pass_num, pass_name, disambiguator, body.source);
+            let mut file = create_dump_file_with_basename(tcx, &file_basename, "html")?;
+            if body.source.def_id().is_local() {
+                write_mir_fn_spanview(tcx, body, spanview, &file_basename, &mut file)?;
             }
         };
     }
 }
 
-/// Returns the path to the filename where we should dump a given MIR.
-/// Also used by other bits of code (e.g., NLL inference) that dump
-/// graphviz data or other things.
-fn dump_path(
+/// Returns the file basename portion (without extension) of a filename path
+/// where we should dump a MIR representation output files.
+fn dump_file_basename(
     tcx: TyCtxt<'_>,
-    extension: &str,
     pass_num: Option<&dyn Display>,
     pass_name: &str,
     disambiguator: &dyn Display,
     source: MirSource<'tcx>,
-) -> PathBuf {
+) -> String {
     let promotion_id = match source.promoted {
         Some(id) => format!("-{:?}", id),
         None => String::new(),
@@ -185,9 +184,6 @@
         }
     };
 
-    let mut file_path = PathBuf::new();
-    file_path.push(Path::new(&tcx.sess.opts.debugging_opts.dump_mir_dir));
-
     let crate_name = tcx.crate_name(source.def_id().krate);
     let item_name = tcx.def_path(source.def_id()).to_filename_friendly_no_crate();
     // All drop shims have the same DefId, so we have to add the type
@@ -207,23 +203,46 @@
         _ => String::new(),
     };
 
-    let file_name = format!(
-        "{}.{}{}{}{}.{}.{}.{}",
-        crate_name,
-        item_name,
-        shim_disambiguator,
-        promotion_id,
-        pass_num,
-        pass_name,
-        disambiguator,
-        extension,
-    );
+    format!(
+        "{}.{}{}{}{}.{}.{}",
+        crate_name, item_name, shim_disambiguator, promotion_id, pass_num, pass_name, disambiguator,
+    )
+}
+
+/// Returns the path to the filename where we should dump a given MIR.
+/// Also used by other bits of code (e.g., NLL inference) that dump
+/// graphviz data or other things.
+fn dump_path(tcx: TyCtxt<'_>, basename: &str, extension: &str) -> PathBuf {
+    let mut file_path = PathBuf::new();
+    file_path.push(Path::new(&tcx.sess.opts.debugging_opts.dump_mir_dir));
+
+    let file_name = format!("{}.{}", basename, extension,);
 
     file_path.push(&file_name);
 
     file_path
 }
 
+/// Attempts to open the MIR dump file with the given name and extension.
+fn create_dump_file_with_basename(
+    tcx: TyCtxt<'_>,
+    file_basename: &str,
+    extension: &str,
+) -> io::Result<io::BufWriter<fs::File>> {
+    let file_path = dump_path(tcx, file_basename, extension);
+    if let Some(parent) = file_path.parent() {
+        fs::create_dir_all(parent).map_err(|e| {
+            io::Error::new(
+                e.kind(),
+                format!("IO error creating MIR dump directory: {:?}; {}", parent, e),
+            )
+        })?;
+    }
+    Ok(io::BufWriter::new(fs::File::create(&file_path).map_err(|e| {
+        io::Error::new(e.kind(), format!("IO error creating MIR dump file: {:?}; {}", file_path, e))
+    })?))
+}
+
 /// Attempts to open a file where we should dump a given MIR or other
 /// bit of MIR-related data. Used by `mir-dump`, but also by other
 /// bits of code (e.g., NLL inference) that dump graphviz data or
@@ -236,11 +255,11 @@
     disambiguator: &dyn Display,
     source: MirSource<'tcx>,
 ) -> io::Result<io::BufWriter<fs::File>> {
-    let file_path = dump_path(tcx, extension, pass_num, pass_name, disambiguator, source);
-    if let Some(parent) = file_path.parent() {
-        fs::create_dir_all(parent)?;
-    }
-    Ok(io::BufWriter::new(fs::File::create(&file_path)?))
+    create_dump_file_with_basename(
+        tcx,
+        &dump_file_basename(tcx, pass_num, pass_name, disambiguator, source),
+        extension,
+    )
 }
 
 /// Write out a human-readable textual representation for the given MIR.
@@ -263,15 +282,11 @@
             writeln!(w)?;
         }
 
-        write_mir_fn(tcx, MirSource::item(def_id), body, &mut |_, _| Ok(()), w)?;
+        write_mir_fn(tcx, body, &mut |_, _| Ok(()), w)?;
 
-        for (i, body) in tcx.promoted_mir(def_id).iter_enumerated() {
+        for body in tcx.promoted_mir(def_id) {
             writeln!(w)?;
-            let src = MirSource {
-                instance: ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)),
-                promoted: Some(i),
-            };
-            write_mir_fn(tcx, src, body, &mut |_, _| Ok(()), w)?;
+            write_mir_fn(tcx, body, &mut |_, _| Ok(()), w)?;
         }
     }
     Ok(())
@@ -280,7 +295,6 @@
 /// Write out a human-readable textual representation for the given function.
 pub fn write_mir_fn<'tcx, F>(
     tcx: TyCtxt<'tcx>,
-    src: MirSource<'tcx>,
     body: &Body<'tcx>,
     extra_data: &mut F,
     w: &mut dyn Write,
@@ -288,7 +302,7 @@
 where
     F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
 {
-    write_mir_intro(tcx, src, body, w)?;
+    write_mir_intro(tcx, body, w)?;
     for block in body.basic_blocks().indices() {
         extra_data(PassWhere::BeforeBlock(block), w)?;
         write_basic_block(tcx, block, body, extra_data, w)?;
@@ -535,8 +549,36 @@
     };
 
     for &child in children {
-        assert_eq!(body.source_scopes[child].parent_scope, Some(parent));
-        writeln!(w, "{0:1$}scope {2} {{", "", indent, child.index())?;
+        let child_data = &body.source_scopes[child];
+        assert_eq!(child_data.parent_scope, Some(parent));
+
+        let (special, span) = if let Some((callee, callsite_span)) = child_data.inlined {
+            (
+                format!(
+                    " (inlined {}{})",
+                    if callee.def.requires_caller_location(tcx) { "#[track_caller] " } else { "" },
+                    callee
+                ),
+                Some(callsite_span),
+            )
+        } else {
+            (String::new(), None)
+        };
+
+        let indented_header = format!("{0:1$}scope {2}{3} {{", "", indent, child.index(), special);
+
+        if let Some(span) = span {
+            writeln!(
+                w,
+                "{0:1$} // at {2}",
+                indented_header,
+                ALIGN,
+                tcx.sess.source_map().span_to_string(span),
+            )?;
+        } else {
+            writeln!(w, "{}", indented_header)?;
+        }
+
         write_scope_tree(tcx, body, scope_tree, w, child, depth + 1)?;
         writeln!(w, "{0:1$}}}", "", depth * INDENT.len())?;
     }
@@ -548,11 +590,10 @@
 /// local variables (both user-defined bindings and compiler temporaries).
 pub fn write_mir_intro<'tcx>(
     tcx: TyCtxt<'tcx>,
-    src: MirSource<'tcx>,
     body: &Body<'_>,
     w: &mut dyn Write,
 ) -> io::Result<()> {
-    write_mir_sig(tcx, src, body, w)?;
+    write_mir_sig(tcx, body, w)?;
     writeln!(w, "{{")?;
 
     // construct a scope tree and write it out
@@ -589,7 +630,7 @@
             ConstValue::Scalar(interpret::Scalar::Ptr(ptr)) => {
                 Either::Left(Either::Left(std::iter::once(ptr.alloc_id)))
             }
-            ConstValue::Scalar(interpret::Scalar::Raw { .. }) => {
+            ConstValue::Scalar(interpret::Scalar::Int { .. }) => {
                 Either::Left(Either::Right(std::iter::empty()))
             }
             ConstValue::ByRef { alloc, .. } | ConstValue::Slice { data: alloc, .. } => {
@@ -599,7 +640,7 @@
     }
     struct CollectAllocIds(BTreeSet<AllocId>);
     impl<'tcx> TypeVisitor<'tcx> for CollectAllocIds {
-        fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+        fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> {
             if let ty::ConstKind::Value(val) = c.val {
                 self.0.extend(alloc_ids_from_const(val));
             }
@@ -659,7 +700,8 @@
 /// Dumps the size and metadata and content of an allocation to the given writer.
 /// The expectation is that the caller first prints other relevant metadata, so the exact
 /// format of this function is (*without* leading or trailing newline):
-/// ```
+///
+/// ```text
 /// size: {}, align: {}) {
 ///     <bytes>
 /// }
@@ -850,25 +892,21 @@
     Ok(())
 }
 
-fn write_mir_sig(
-    tcx: TyCtxt<'_>,
-    src: MirSource<'tcx>,
-    body: &Body<'_>,
-    w: &mut dyn Write,
-) -> io::Result<()> {
+fn write_mir_sig(tcx: TyCtxt<'_>, body: &Body<'_>, w: &mut dyn Write) -> io::Result<()> {
     use rustc_hir::def::DefKind;
 
-    trace!("write_mir_sig: {:?}", src.instance);
-    let kind = tcx.def_kind(src.def_id());
+    trace!("write_mir_sig: {:?}", body.source.instance);
+    let def_id = body.source.def_id();
+    let kind = tcx.def_kind(def_id);
     let is_function = match kind {
         DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(..) => true,
-        _ => tcx.is_closure(src.def_id()),
+        _ => tcx.is_closure(def_id),
     };
-    match (kind, src.promoted) {
+    match (kind, body.source.promoted) {
         (_, Some(i)) => write!(w, "{:?} in ", i)?,
         (DefKind::Const | DefKind::AssocConst, _) => write!(w, "const ")?,
         (DefKind::Static, _) => {
-            write!(w, "static {}", if tcx.is_mutable_static(src.def_id()) { "mut " } else { "" })?
+            write!(w, "static {}", if tcx.is_mutable_static(def_id) { "mut " } else { "" })?
         }
         (_, _) if is_function => write!(w, "fn ")?,
         (DefKind::AnonConst, _) => {} // things like anon const, not an item
@@ -877,10 +915,10 @@
 
     ty::print::with_forced_impl_filename_line(|| {
         // see notes on #41697 elsewhere
-        write!(w, "{}", tcx.def_path_str(src.def_id()))
+        write!(w, "{}", tcx.def_path_str(def_id))
     })?;
 
-    if src.promoted.is_none() && is_function {
+    if body.source.promoted.is_none() && is_function {
         write!(w, "(")?;
 
         // fn argument types.
diff --git a/compiler/rustc_mir/src/util/spanview.rs b/compiler/rustc_mir/src/util/spanview.rs
index fe33fff..d3ef8c6 100644
--- a/compiler/rustc_mir/src/util/spanview.rs
+++ b/compiler/rustc_mir/src/util/spanview.rs
@@ -16,9 +16,13 @@
 const NEW_LINE_SPAN: &str = "</span>\n<span class=\"line\">";
 const HEADER: &str = r#"<!DOCTYPE html>
 <html>
-<head>
-    <title>coverage_of_if_else - Code Regions</title>
-    <style>
+<head>"#;
+const START_BODY: &str = r#"</head>
+<body>"#;
+const FOOTER: &str = r#"</body>
+</html>"#;
+
+const STYLE_SECTION: &str = r#"<style>
     .line {
         counter-increment: line;
     }
@@ -72,16 +76,12 @@
         /* requires hover over a span ONLY on its first line */
         display: inline-block;
     }
-    </style>
-</head>
-<body>"#;
-
-const FOOTER: &str = r#"
-</body>
-</html>"#;
+</style>"#;
 
 /// Metadata to highlight the span of a MIR BasicBlock, Statement, or Terminator.
+#[derive(Clone, Debug)]
 pub struct SpanViewable {
+    pub bb: BasicBlock,
     pub span: Span,
     pub id: String,
     pub tooltip: String,
@@ -90,14 +90,15 @@
 /// Write a spanview HTML+CSS file to analyze MIR element spans.
 pub fn write_mir_fn_spanview<'tcx, W>(
     tcx: TyCtxt<'tcx>,
-    def_id: DefId,
     body: &Body<'tcx>,
     spanview: MirSpanview,
+    title: &str,
     w: &mut W,
 ) -> io::Result<()>
 where
     W: Write,
 {
+    let def_id = body.source.def_id();
     let body_span = hir_body(tcx, def_id).value.span;
     let mut span_viewables = Vec::new();
     for (bb, data) in body.basic_blocks().iter_enumerated() {
@@ -126,16 +127,17 @@
             }
         }
     }
-    write_spanview_document(tcx, def_id, span_viewables, w)?;
+    write_document(tcx, def_id, span_viewables, title, w)?;
     Ok(())
 }
 
 /// Generate a spanview HTML+CSS document for the given local function `def_id`, and a pre-generated
 /// list `SpanViewable`s.
-pub fn write_spanview_document<'tcx, W>(
+pub fn write_document<'tcx, W>(
     tcx: TyCtxt<'tcx>,
     def_id: DefId,
     mut span_viewables: Vec<SpanViewable>,
+    title: &str,
     w: &mut W,
 ) -> io::Result<()>
 where
@@ -153,6 +155,9 @@
         source_map.span_to_snippet(fn_span).expect("function should have printable source")
     );
     writeln!(w, "{}", HEADER)?;
+    writeln!(w, "<title>{}</title>", title)?;
+    writeln!(w, "{}", STYLE_SECTION)?;
+    writeln!(w, "{}", START_BODY)?;
     write!(
         w,
         r#"<div class="code" style="counter-reset: line {}"><span class="line">{}"#,
@@ -182,6 +187,7 @@
             end_pos.to_usize(),
             ordered_viewables.len()
         );
+        let curr_id = &ordered_viewables[0].id;
         let (next_from_pos, next_ordered_viewables) = write_next_viewable_with_overlaps(
             tcx,
             from_pos,
@@ -204,13 +210,17 @@
         from_pos = next_from_pos;
         if next_ordered_viewables.len() != ordered_viewables.len() {
             ordered_viewables = next_ordered_viewables;
-            alt = !alt;
+            if let Some(next_ordered_viewable) = ordered_viewables.first() {
+                if &next_ordered_viewable.id != curr_id {
+                    alt = !alt;
+                }
+            }
         }
     }
     if from_pos < end_pos {
         write_coverage_gap(tcx, from_pos, end_pos, w)?;
     }
-    write!(w, r#"</span></div>"#)?;
+    writeln!(w, r#"</span></div>"#)?;
     writeln!(w, "{}", FOOTER)?;
     Ok(())
 }
@@ -273,7 +283,7 @@
     }
     let id = format!("{}[{}]", bb.index(), i);
     let tooltip = tooltip(tcx, &id, span, vec![statement.clone()], &None);
-    Some(SpanViewable { span, id, tooltip })
+    Some(SpanViewable { bb, span, id, tooltip })
 }
 
 fn terminator_span_viewable<'tcx>(
@@ -289,7 +299,7 @@
     }
     let id = format!("{}:{}", bb.index(), terminator_kind_name(term));
     let tooltip = tooltip(tcx, &id, span, vec![], &data.terminator);
-    Some(SpanViewable { span, id, tooltip })
+    Some(SpanViewable { bb, span, id, tooltip })
 }
 
 fn block_span_viewable<'tcx>(
@@ -304,7 +314,7 @@
     }
     let id = format!("{}", bb.index());
     let tooltip = tooltip(tcx, &id, span, data.statements.clone(), &data.terminator);
-    Some(SpanViewable { span, id, tooltip })
+    Some(SpanViewable { bb, span, id, tooltip })
 }
 
 fn compute_block_span<'tcx>(data: &BasicBlockData<'tcx>, body_span: Span) -> Span {
@@ -456,6 +466,7 @@
             remaining_viewables.len()
         );
         // Write the overlaps (and the overlaps' overlaps, if any) up to `to_pos`.
+        let curr_id = &remaining_viewables[0].id;
         let (next_from_pos, next_remaining_viewables) = write_next_viewable_with_overlaps(
             tcx,
             from_pos,
@@ -480,7 +491,11 @@
         from_pos = next_from_pos;
         if next_remaining_viewables.len() != remaining_viewables.len() {
             remaining_viewables = next_remaining_viewables;
-            subalt = !subalt;
+            if let Some(next_ordered_viewable) = remaining_viewables.first() {
+                if &next_ordered_viewable.id != curr_id {
+                    subalt = !subalt;
+                }
+            }
         }
     }
     if from_pos <= viewable.span.hi() {
@@ -649,8 +664,12 @@
         tcx.hir().local_def_id_to_hir_id(def_id.as_local().expect("expected DefId is local"));
     let fn_decl_span = tcx.hir().span(hir_id);
     let body_span = hir_body(tcx, def_id).value.span;
-    debug_assert_eq!(fn_decl_span.ctxt(), body_span.ctxt());
-    fn_decl_span.to(body_span)
+    if fn_decl_span.ctxt() == body_span.ctxt() {
+        fn_decl_span.to(body_span)
+    } else {
+        // This probably occurs for functions defined via macros
+        body_span
+    }
 }
 
 fn hir_body<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> &'tcx rustc_hir::Body<'tcx> {
diff --git a/compiler/rustc_mir_build/src/build/block.rs b/compiler/rustc_mir_build/src/build/block.rs
index beaf12b..d5f72e6 100644
--- a/compiler/rustc_mir_build/src/build/block.rs
+++ b/compiler/rustc_mir_build/src/build/block.rs
@@ -28,14 +28,16 @@
         self.in_opt_scope(opt_destruction_scope.map(|de| (de, source_info)), move |this| {
             this.in_scope((region_scope, source_info), LintLevel::Inherited, move |this| {
                 if targeted_by_break {
-                    // This is a `break`-able block
-                    let exit_block = this.cfg.start_new_block();
-                    let block_exit =
-                        this.in_breakable_scope(None, exit_block, destination, |this| {
-                            this.ast_block_stmts(destination, block, span, stmts, expr, safety_mode)
-                        });
-                    this.cfg.goto(unpack!(block_exit), source_info, exit_block);
-                    exit_block.unit()
+                    this.in_breakable_scope(None, destination, span, |this| {
+                        Some(this.ast_block_stmts(
+                            destination,
+                            block,
+                            span,
+                            stmts,
+                            expr,
+                            safety_mode,
+                        ))
+                    })
                 } else {
                     this.ast_block_stmts(destination, block, span, stmts, expr, safety_mode)
                 }
diff --git a/compiler/rustc_mir_build/src/build/expr/as_constant.rs b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
index 244a70f..3a36ad5 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_constant.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
@@ -33,6 +33,7 @@
                 Constant { span, user_ty, literal }
             }
             ExprKind::StaticRef { literal, .. } => Constant { span, user_ty: None, literal },
+            ExprKind::ConstBlock { value } => Constant { span, user_ty: None, literal: value },
             _ => span_bug!(span, "expression is not a valid constant {:?}", kind),
         }
     }
diff --git a/compiler/rustc_mir_build/src/build/expr/as_operand.rs b/compiler/rustc_mir_build/src/build/expr/as_operand.rs
index aac93f3..cf075ab 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_operand.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_operand.rs
@@ -165,7 +165,7 @@
 
         let tcx = this.hir.tcx();
 
-        if tcx.features().unsized_locals {
+        if tcx.features().unsized_fn_params {
             let ty = expr.ty;
             let span = expr.span;
             let param_env = this.hir.param_env;
diff --git a/compiler/rustc_mir_build/src/build/expr/as_place.rs b/compiler/rustc_mir_build/src/build/expr/as_place.rs
index 39dbb6d..b94346f 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_place.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_place.rs
@@ -254,6 +254,7 @@
             | ExprKind::Continue { .. }
             | ExprKind::Return { .. }
             | ExprKind::Literal { .. }
+            | ExprKind::ConstBlock { .. }
             | ExprKind::StaticRef { .. }
             | ExprKind::InlineAsm { .. }
             | ExprKind::LlvmInlineAsm { .. }
@@ -261,10 +262,7 @@
             | ExprKind::ThreadLocalRef(_)
             | ExprKind::Call { .. } => {
                 // these are not places, so we need to make a temporary.
-                debug_assert!(match Category::of(&expr.kind) {
-                    Some(Category::Place) => false,
-                    _ => true,
-                });
+                debug_assert!(!matches!(Category::of(&expr.kind), Some(Category::Place)));
                 let temp =
                     unpack!(block = this.as_temp(block, expr.temp_lifetime, expr, mutability));
                 block.and(PlaceBuilder::from(temp))
diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
index 9c5fddc..2853bf8 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
@@ -234,6 +234,7 @@
             }
             ExprKind::Yield { .. }
             | ExprKind::Literal { .. }
+            | ExprKind::ConstBlock { .. }
             | ExprKind::StaticRef { .. }
             | ExprKind::Block { .. }
             | ExprKind::Match { .. }
@@ -259,10 +260,7 @@
             | ExprKind::ValueTypeAscription { .. } => {
                 // these do not have corresponding `Rvalue` variants,
                 // so make an operand and then return that
-                debug_assert!(match Category::of(&expr.kind) {
-                    Some(Category::Rvalue(RvalueFunc::AsRvalue)) => false,
-                    _ => true,
-                });
+                debug_assert!(!matches!(Category::of(&expr.kind), Some(Category::Rvalue(RvalueFunc::AsRvalue))));
                 let operand = unpack!(block = this.as_operand(block, scope, expr));
                 block.and(Rvalue::Use(operand))
             }
diff --git a/compiler/rustc_mir_build/src/build/expr/category.rs b/compiler/rustc_mir_build/src/build/expr/category.rs
index 9cabd18..ac5cf18 100644
--- a/compiler/rustc_mir_build/src/build/expr/category.rs
+++ b/compiler/rustc_mir_build/src/build/expr/category.rs
@@ -68,7 +68,9 @@
             | ExprKind::ThreadLocalRef(_)
             | ExprKind::LlvmInlineAsm { .. } => Some(Category::Rvalue(RvalueFunc::AsRvalue)),
 
-            ExprKind::Literal { .. } | ExprKind::StaticRef { .. } => Some(Category::Constant),
+            ExprKind::ConstBlock { .. } | ExprKind::Literal { .. } | ExprKind::StaticRef { .. } => {
+                Some(Category::Constant)
+            }
 
             ExprKind::Loop { .. }
             | ExprKind::Block { .. }
diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs
index 319fae5..9dc596a 100644
--- a/compiler/rustc_mir_build/src/build/expr/into.rs
+++ b/compiler/rustc_mir_build/src/build/expr/into.rs
@@ -58,10 +58,7 @@
             }
             ExprKind::NeverToAny { source } => {
                 let source = this.hir.mirror(source);
-                let is_call = match source.kind {
-                    ExprKind::Call { .. } | ExprKind::InlineAsm { .. } => true,
-                    _ => false,
-                };
+                let is_call = matches!(source.kind, ExprKind::Call { .. } | ExprKind::InlineAsm { .. });
 
                 // (#66975) Source could be a const of type `!`, so has to
                 // exist in the generated MIR.
@@ -140,23 +137,19 @@
                 // body, even when the exact code in the body cannot unwind
 
                 let loop_block = this.cfg.start_new_block();
-                let exit_block = this.cfg.start_new_block();
 
                 // Start the loop.
                 this.cfg.goto(block, source_info, loop_block);
 
-                this.in_breakable_scope(Some(loop_block), exit_block, destination, move |this| {
+                this.in_breakable_scope(Some(loop_block), destination, expr_span, move |this| {
                     // conduct the test, if necessary
                     let body_block = this.cfg.start_new_block();
-                    let diverge_cleanup = this.diverge_cleanup();
                     this.cfg.terminate(
                         loop_block,
                         source_info,
-                        TerminatorKind::FalseUnwind {
-                            real_target: body_block,
-                            unwind: Some(diverge_cleanup),
-                        },
+                        TerminatorKind::FalseUnwind { real_target: body_block, unwind: None },
                     );
+                    this.diverge_from(loop_block);
 
                     // The “return” value of the loop body must always be an unit. We therefore
                     // introduce a unit temporary as the destination for the loop body.
@@ -164,8 +157,10 @@
                     // Execute the body, branching back to the test.
                     let body_block_end = unpack!(this.into(tmp, body_block, body));
                     this.cfg.goto(body_block_end, source_info, loop_block);
-                });
-                exit_block.unit()
+
+                    // Loops are only exited by `break` expressions.
+                    None
+                })
             }
             ExprKind::Call { ty, fun, args, from_hir_call, fn_span } => {
                 let intrinsic = match *ty.kind() {
@@ -206,7 +201,6 @@
                         .collect();
 
                     let success = this.cfg.start_new_block();
-                    let cleanup = this.diverge_cleanup();
 
                     this.record_operands_moved(&args);
 
@@ -218,7 +212,7 @@
                         TerminatorKind::Call {
                             func: fun,
                             args,
-                            cleanup: Some(cleanup),
+                            cleanup: None,
                             // FIXME(varkor): replace this with an uninhabitedness-based check.
                             // This requires getting access to the current module to call
                             // `tcx.is_ty_uninhabited_from`, which is currently tricky to do.
@@ -231,6 +225,7 @@
                             fn_span,
                         },
                     );
+                    this.diverge_from(block);
                     success.unit()
                 }
             }
@@ -437,12 +432,12 @@
                 let scope = this.local_scope();
                 let value = unpack!(block = this.as_operand(block, scope, value));
                 let resume = this.cfg.start_new_block();
-                let cleanup = this.generator_drop_cleanup();
                 this.cfg.terminate(
                     block,
                     source_info,
-                    TerminatorKind::Yield { value, resume, resume_arg: destination, drop: cleanup },
+                    TerminatorKind::Yield { value, resume, resume_arg: destination, drop: None },
                 );
+                this.generator_drop_cleanup(block);
                 resume.unit()
             }
 
@@ -456,6 +451,7 @@
             | ExprKind::Array { .. }
             | ExprKind::Tuple { .. }
             | ExprKind::Closure { .. }
+            | ExprKind::ConstBlock { .. }
             | ExprKind::Literal { .. }
             | ExprKind::ThreadLocalRef(_)
             | ExprKind::StaticRef { .. } => {
diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs
index a9b8a61..3ee1524 100644
--- a/compiler/rustc_mir_build/src/build/matches/mod.rs
+++ b/compiler/rustc_mir_build/src/build/matches/mod.rs
@@ -228,12 +228,10 @@
         outer_source_info: SourceInfo,
         fake_borrow_temps: Vec<(Place<'tcx>, Local)>,
     ) -> BlockAnd<()> {
-        let match_scope = self.scopes.topmost();
-
         let arm_end_blocks: Vec<_> = arm_candidates
             .into_iter()
             .map(|(arm, candidate)| {
-                debug!("lowering arm {:?}\ncanidate = {:?}", arm, candidate);
+                debug!("lowering arm {:?}\ncandidate = {:?}", arm, candidate);
 
                 let arm_source_info = self.source_info(arm.span);
                 let arm_scope = (arm.scope, arm_source_info);
@@ -250,7 +248,7 @@
                     let arm_block = this.bind_pattern(
                         outer_source_info,
                         candidate,
-                        arm.guard.as_ref().map(|g| (g, match_scope)),
+                        arm.guard.as_ref(),
                         &fake_borrow_temps,
                         scrutinee_span,
                         Some(arm.scope),
@@ -287,7 +285,7 @@
         &mut self,
         outer_source_info: SourceInfo,
         candidate: Candidate<'_, 'tcx>,
-        guard: Option<(&Guard<'tcx>, region::Scope)>,
+        guard: Option<&Guard<'tcx>>,
         fake_borrow_temps: &Vec<(Place<'tcx>, Local)>,
         scrutinee_span: Span,
         arm_scope: Option<region::Scope>,
@@ -1592,7 +1590,7 @@
         &mut self,
         candidate: Candidate<'pat, 'tcx>,
         parent_bindings: &[(Vec<Binding<'tcx>>, Vec<Ascription<'tcx>>)],
-        guard: Option<(&Guard<'tcx>, region::Scope)>,
+        guard: Option<&Guard<'tcx>>,
         fake_borrows: &Vec<(Place<'tcx>, Local)>,
         scrutinee_span: Span,
         schedule_drops: bool,
@@ -1704,7 +1702,7 @@
         //      the reference that we create for the arm.
         //    * So we eagerly create the reference for the arm and then take a
         //      reference to that.
-        if let Some((guard, region_scope)) = guard {
+        if let Some(guard) = guard {
             let tcx = self.hir.tcx();
             let bindings = parent_bindings
                 .iter()
@@ -1748,12 +1746,7 @@
                 unreachable
             });
             let outside_scope = self.cfg.start_new_block();
-            self.exit_scope(
-                source_info.span,
-                region_scope,
-                otherwise_post_guard_block,
-                outside_scope,
-            );
+            self.exit_top_scope(otherwise_post_guard_block, outside_scope, source_info);
             self.false_edges(
                 outside_scope,
                 otherwise_block,
diff --git a/compiler/rustc_mir_build/src/build/matches/simplify.rs b/compiler/rustc_mir_build/src/build/matches/simplify.rs
index a28a181..705266d 100644
--- a/compiler/rustc_mir_build/src/build/matches/simplify.rs
+++ b/compiler/rustc_mir_build/src/build/matches/simplify.rs
@@ -17,7 +17,6 @@
 use crate::thir::{self, *};
 use rustc_attr::{SignedInt, UnsignedInt};
 use rustc_hir::RangeEnd;
-use rustc_middle::mir::interpret::truncate;
 use rustc_middle::mir::Place;
 use rustc_middle::ty;
 use rustc_middle::ty::layout::IntegerExt;
@@ -28,8 +27,9 @@
 impl<'a, 'tcx> Builder<'a, 'tcx> {
     /// Simplify a candidate so that all match pairs require a test.
     ///
-    /// This method will also split a candidate where the only match-pair is an
-    /// or-pattern into multiple candidates. This is so that
+    /// This method will also split a candidate, in which the only
+    /// match-pair is an or-pattern, into multiple candidates.
+    /// This is so that
     ///
     /// match x {
     ///     0 | 1 => { ... },
@@ -43,12 +43,36 @@
         candidate: &mut Candidate<'pat, 'tcx>,
     ) -> bool {
         // repeatedly simplify match pairs until fixed point is reached
+        debug!(?candidate, "simplify_candidate");
+
+        // existing_bindings and new_bindings exists to keep the semantics in order.
+        // Reversing the binding order for bindings after `@` changes the binding order in places
+        // it shouldn't be changed, for example `let (Some(a), Some(b)) = (x, y)`
+        //
+        // To avoid this, the binding occurs in the following manner:
+        // * the bindings for one iteration of the following loop occurs in order (i.e. left to
+        // right)
+        // * the bindings from the previous iteration of the loop is prepended to the bindings from
+        // the current iteration (in the implementation this is done by mem::swap and extend)
+        // * after all iterations, these new bindings are then appended to the bindings that were
+        // prexisting (i.e. `candidate.binding` when the function was called).
+        //
+        // example:
+        // candidate.bindings = [1, 2, 3]
+        // binding in iter 1: [4, 5]
+        // binding in iter 2: [6, 7]
+        //
+        // final binding: [1, 2, 3, 6, 7, 4, 5]
+        let mut existing_bindings = mem::take(&mut candidate.bindings);
+        let mut new_bindings = Vec::new();
         loop {
             let match_pairs = mem::take(&mut candidate.match_pairs);
 
             if let [MatchPair { pattern: Pat { kind: box PatKind::Or { pats }, .. }, place }] =
                 *match_pairs
             {
+                existing_bindings.extend_from_slice(&new_bindings);
+                mem::swap(&mut candidate.bindings, &mut existing_bindings);
                 candidate.subcandidates = self.create_or_subcandidates(candidate, place, pats);
                 return true;
             }
@@ -64,13 +88,33 @@
                     }
                 }
             }
+            // Avoid issue #69971: the binding order should be right to left if there are more
+            // bindings after `@` to please the borrow checker
+            // Ex
+            // struct NonCopyStruct {
+            //     copy_field: u32,
+            // }
+            //
+            // fn foo1(x: NonCopyStruct) {
+            //     let y @ NonCopyStruct { copy_field: z } = x;
+            //     // the above should turn into
+            //     let z = x.copy_field;
+            //     let y = x;
+            // }
+            candidate.bindings.extend_from_slice(&new_bindings);
+            mem::swap(&mut candidate.bindings, &mut new_bindings);
+            candidate.bindings.clear();
+
             if !changed {
+                existing_bindings.extend_from_slice(&new_bindings);
+                mem::swap(&mut candidate.bindings, &mut existing_bindings);
                 // Move or-patterns to the end, because they can result in us
                 // creating additional candidates, so we want to test them as
                 // late as possible.
                 candidate
                     .match_pairs
                     .sort_by_key(|pair| matches!(*pair.pattern.kind, PatKind::Or { .. }));
+                debug!(simplified = ?candidate, "simplify_candidate");
                 return false; // if we were not able to simplify any, done.
             }
         }
@@ -160,13 +204,13 @@
                     }
                     ty::Int(ity) => {
                         let size = Integer::from_attr(&tcx, SignedInt(ity)).size();
-                        let max = truncate(u128::MAX, size);
+                        let max = size.truncate(u128::MAX);
                         let bias = 1u128 << (size.bits() - 1);
                         (Some((0, max, size)), bias)
                     }
                     ty::Uint(uty) => {
                         let size = Integer::from_attr(&tcx, UnsignedInt(uty)).size();
-                        let max = truncate(u128::MAX, size);
+                        let max = size.truncate(u128::MAX);
                         (Some((0, max, size)), 0)
                     }
                     _ => (None, 0),
diff --git a/compiler/rustc_mir_build/src/build/matches/test.rs b/compiler/rustc_mir_build/src/build/matches/test.rs
index d81c3b6..7bea822 100644
--- a/compiler/rustc_mir_build/src/build/matches/test.rs
+++ b/compiler/rustc_mir_build/src/build/matches/test.rs
@@ -167,48 +167,42 @@
                 let target_blocks = make_target_blocks(self);
                 // Variants is a BitVec of indexes into adt_def.variants.
                 let num_enum_variants = adt_def.variants.len();
-                let used_variants = variants.count();
                 debug_assert_eq!(target_blocks.len(), num_enum_variants + 1);
                 let otherwise_block = *target_blocks.last().unwrap();
-                let mut targets = Vec::with_capacity(used_variants + 1);
-                let mut values = Vec::with_capacity(used_variants);
                 let tcx = self.hir.tcx();
-                for (idx, discr) in adt_def.discriminants(tcx) {
-                    if variants.contains(idx) {
-                        debug_assert_ne!(
-                            target_blocks[idx.index()],
-                            otherwise_block,
-                            "no canididates for tested discriminant: {:?}",
-                            discr,
-                        );
-                        values.push(discr.val);
-                        targets.push(target_blocks[idx.index()]);
-                    } else {
-                        debug_assert_eq!(
-                            target_blocks[idx.index()],
-                            otherwise_block,
-                            "found canididates for untested discriminant: {:?}",
-                            discr,
-                        );
-                    }
-                }
-                targets.push(otherwise_block);
-                debug!(
-                    "num_enum_variants: {}, tested variants: {:?}, variants: {:?}",
-                    num_enum_variants, values, variants
+                let switch_targets = SwitchTargets::new(
+                    adt_def.discriminants(tcx).filter_map(|(idx, discr)| {
+                        if variants.contains(idx) {
+                            debug_assert_ne!(
+                                target_blocks[idx.index()],
+                                otherwise_block,
+                                "no canididates for tested discriminant: {:?}",
+                                discr,
+                            );
+                            Some((discr.val, target_blocks[idx.index()]))
+                        } else {
+                            debug_assert_eq!(
+                                target_blocks[idx.index()],
+                                otherwise_block,
+                                "found canididates for untested discriminant: {:?}",
+                                discr,
+                            );
+                            None
+                        }
+                    }),
+                    otherwise_block,
                 );
+                debug!("num_enum_variants: {}, variants: {:?}", num_enum_variants, variants);
                 let discr_ty = adt_def.repr.discr_type().to_ty(tcx);
                 let discr = self.temp(discr_ty, test.span);
                 self.cfg.push_assign(block, source_info, discr, Rvalue::Discriminant(place));
-                assert_eq!(values.len() + 1, targets.len());
                 self.cfg.terminate(
                     block,
                     source_info,
                     TerminatorKind::SwitchInt {
                         discr: Operand::Move(discr),
                         switch_ty: discr_ty,
-                        values: From::from(values),
-                        targets,
+                        targets: switch_targets,
                     },
                 );
             }
@@ -230,11 +224,15 @@
                 } else {
                     // The switch may be inexhaustive so we have a catch all block
                     debug_assert_eq!(options.len() + 1, target_blocks.len());
+                    let otherwise_block = *target_blocks.last().unwrap();
+                    let switch_targets = SwitchTargets::new(
+                        options.values().copied().zip(target_blocks),
+                        otherwise_block,
+                    );
                     TerminatorKind::SwitchInt {
                         discr: Operand::Copy(place),
                         switch_ty,
-                        values: options.values().copied().collect(),
-                        targets: target_blocks,
+                        targets: switch_targets,
                     }
                 };
                 self.cfg.terminate(block, source_info, terminator);
@@ -252,15 +250,13 @@
                         place,
                         ty,
                     );
+                } else if let [success, fail] = *make_target_blocks(self) {
+                    assert_eq!(value.ty, ty);
+                    let expect = self.literal_operand(test.span, value);
+                    let val = Operand::Copy(place);
+                    self.compare(block, success, fail, source_info, BinOp::Eq, expect, val);
                 } else {
-                    if let [success, fail] = *make_target_blocks(self) {
-                        assert_eq!(value.ty, ty);
-                        let expect = self.literal_operand(test.span, value);
-                        let val = Operand::Copy(place);
-                        self.compare(block, success, fail, source_info, BinOp::Eq, expect, val);
-                    } else {
-                        bug!("`TestKind::Eq` should have two target blocks");
-                    }
+                    bug!("`TestKind::Eq` should have two target blocks");
                 }
             }
 
@@ -418,7 +414,6 @@
         let bool_ty = self.hir.bool_ty();
         let eq_result = self.temp(bool_ty, source_info.span);
         let eq_block = self.cfg.start_new_block();
-        let cleanup = self.diverge_cleanup();
         self.cfg.terminate(
             block,
             source_info,
@@ -436,11 +431,12 @@
                 }),
                 args: vec![val, expect],
                 destination: Some((eq_result, eq_block)),
-                cleanup: Some(cleanup),
+                cleanup: None,
                 from_hir_call: false,
                 fn_span: source_info.span,
             },
         );
+        self.diverge_from(block);
 
         if let [success_block, fail_block] = *make_target_blocks(self) {
             // check the result
diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs
index aa96ae8..f9995f4 100644
--- a/compiler/rustc_mir_build/src/build/mod.rs
+++ b/compiler/rustc_mir_build/src/build/mod.rs
@@ -29,7 +29,13 @@
         return tcx.mir_built(def);
     }
 
-    tcx.alloc_steal_mir(mir_build(tcx, def))
+    let mut body = mir_build(tcx, def);
+    if def.const_param_did.is_some() {
+        assert!(matches!(body.source.instance, ty::InstanceDef::Item(_)));
+        body.source = MirSource::from_instance(ty::InstanceDef::Item(def.to_global()));
+    }
+
+    tcx.alloc_steal_mir(body)
 }
 
 /// Construct the MIR for a given `DefId`.
@@ -199,7 +205,7 @@
             build::construct_const(cx, body_id, return_ty, return_ty_span)
         };
 
-        lints::check(tcx, &body, def.did);
+        lints::check(tcx, &body);
 
         // The borrow checker will replace all the regions here with its own
         // inference variables. There's no point having non-erased regions here.
@@ -296,6 +302,7 @@
     hir: Cx<'a, 'tcx>,
     cfg: CFG<'tcx>,
 
+    def_id: DefId,
     fn_span: Span,
     arg_count: usize,
     generator_kind: Option<GeneratorKind>,
@@ -327,7 +334,7 @@
 
     /// The vector of all scopes that we have created thus far;
     /// we track this for debuginfo later.
-    source_scopes: IndexVec<SourceScope, SourceScopeData>,
+    source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
     source_scope: SourceScope,
 
     /// The guard-context: each time we build the guard expression for
@@ -344,14 +351,6 @@
     unit_temp: Option<Place<'tcx>>,
 
     var_debug_info: Vec<VarDebugInfo<'tcx>>,
-
-    /// Cached block with the `RESUME` terminator; this is created
-    /// when first set of cleanups are built.
-    cached_resume_block: Option<BasicBlock>,
-    /// Cached block with the `RETURN` terminator.
-    cached_return_block: Option<BasicBlock>,
-    /// Cached block with the `UNREACHABLE` terminator.
-    cached_unreachable_block: Option<BasicBlock>,
 }
 
 impl<'a, 'tcx> Builder<'a, 'tcx> {
@@ -597,6 +596,7 @@
 
     let mut builder = Builder::new(
         hir,
+        fn_def_id.to_def_id(),
         span_with_body,
         arguments.len(),
         safety,
@@ -609,50 +609,30 @@
         region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::CallSite };
     let arg_scope =
         region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::Arguments };
-    let mut block = START_BLOCK;
     let source_info = builder.source_info(span);
     let call_site_s = (call_site_scope, source_info);
-    unpack!(
-        block = builder.in_scope(call_site_s, LintLevel::Inherited, |builder| {
-            if should_abort_on_panic(tcx, fn_def_id, abi) {
-                builder.schedule_abort();
-            }
-
-            let arg_scope_s = (arg_scope, source_info);
-            // `return_block` is called when we evaluate a `return` expression, so
-            // we just use `START_BLOCK` here.
-            unpack!(
-                block = builder.in_breakable_scope(
-                    None,
-                    START_BLOCK,
-                    Place::return_place(),
-                    |builder| {
-                        builder.in_scope(arg_scope_s, LintLevel::Inherited, |builder| {
-                            builder.args_and_body(
-                                block,
-                                fn_def_id.to_def_id(),
-                                &arguments,
-                                arg_scope,
-                                &body.value,
-                            )
-                        })
-                    },
-                )
-            );
-            // Attribute epilogue to function's closing brace
-            let fn_end = span_with_body.shrink_to_hi();
-            let source_info = builder.source_info(fn_end);
-            let return_block = builder.return_block();
-            builder.cfg.goto(block, source_info, return_block);
-            builder.cfg.terminate(return_block, source_info, TerminatorKind::Return);
-            // Attribute any unreachable codepaths to the function's closing brace
-            if let Some(unreachable_block) = builder.cached_unreachable_block {
-                builder.cfg.terminate(unreachable_block, source_info, TerminatorKind::Unreachable);
-            }
-            return_block.unit()
-        })
-    );
-    assert_eq!(block, builder.return_block());
+    unpack!(builder.in_scope(call_site_s, LintLevel::Inherited, |builder| {
+        let arg_scope_s = (arg_scope, source_info);
+        // Attribute epilogue to function's closing brace
+        let fn_end = span_with_body.shrink_to_hi();
+        let return_block =
+            unpack!(builder.in_breakable_scope(None, Place::return_place(), fn_end, |builder| {
+                Some(builder.in_scope(arg_scope_s, LintLevel::Inherited, |builder| {
+                    builder.args_and_body(
+                        START_BLOCK,
+                        fn_def_id.to_def_id(),
+                        &arguments,
+                        arg_scope,
+                        &body.value,
+                    )
+                }))
+            }));
+        let source_info = builder.source_info(fn_end);
+        builder.cfg.terminate(return_block, source_info, TerminatorKind::Return);
+        let should_abort = should_abort_on_panic(tcx, fn_def_id, abi);
+        builder.build_drop_trees(should_abort);
+        return_block.unit()
+    }));
 
     let spread_arg = if abi == Abi::RustCall {
         // RustCall pseudo-ABI untuples the last argument.
@@ -675,8 +655,9 @@
 ) -> Body<'tcx> {
     let tcx = hir.tcx();
     let owner_id = tcx.hir().body_owner(body_id);
+    let def_id = tcx.hir().local_def_id(owner_id);
     let span = tcx.hir().span(owner_id);
-    let mut builder = Builder::new(hir, span, 0, Safety::Safe, const_ty, const_ty_span, None);
+    let mut builder = Builder::new(hir, def_id.to_def_id(), span, 0, Safety::Safe, const_ty, const_ty_span, None);
 
     let mut block = START_BLOCK;
     let ast_expr = &tcx.hir().body(body_id).value;
@@ -686,14 +667,7 @@
     let source_info = builder.source_info(span);
     builder.cfg.terminate(block, source_info, TerminatorKind::Return);
 
-    // Constants can't `return` so a return block should not be created.
-    assert_eq!(builder.cached_return_block, None);
-
-    // Constants may be match expressions in which case an unreachable block may
-    // be created, so terminate it properly.
-    if let Some(unreachable_block) = builder.cached_unreachable_block {
-        builder.cfg.terminate(unreachable_block, source_info, TerminatorKind::Unreachable);
-    }
+    builder.build_drop_trees(false);
 
     builder.finish()
 }
@@ -705,6 +679,7 @@
 fn construct_error<'a, 'tcx>(hir: Cx<'a, 'tcx>, body_id: hir::BodyId) -> Body<'tcx> {
     let tcx = hir.tcx();
     let owner_id = tcx.hir().body_owner(body_id);
+    let def_id = tcx.hir().local_def_id(owner_id);
     let span = tcx.hir().span(owner_id);
     let ty = tcx.ty_error();
     let num_params = match hir.body_owner_kind {
@@ -722,7 +697,7 @@
         hir::BodyOwnerKind::Const => 0,
         hir::BodyOwnerKind::Static(_) => 0,
     };
-    let mut builder = Builder::new(hir, span, num_params, Safety::Safe, ty, span, None);
+    let mut builder = Builder::new(hir, def_id.to_def_id(), span, num_params, Safety::Safe, ty, span, None);
     let source_info = builder.source_info(span);
     // Some MIR passes will expect the number of parameters to match the
     // function declaration.
@@ -740,6 +715,7 @@
 impl<'a, 'tcx> Builder<'a, 'tcx> {
     fn new(
         hir: Cx<'a, 'tcx>,
+        def_id: DefId,
         span: Span,
         arg_count: usize,
         safety: Safety,
@@ -750,11 +726,12 @@
         let lint_level = LintLevel::Explicit(hir.root_lint_level);
         let mut builder = Builder {
             hir,
+            def_id,
             cfg: CFG { basic_blocks: IndexVec::new() },
             fn_span: span,
             arg_count,
             generator_kind,
-            scopes: Default::default(),
+            scopes: scope::Scopes::new(),
             block_context: BlockContext::new(),
             source_scopes: IndexVec::new(),
             source_scope: OUTERMOST_SOURCE_SCOPE,
@@ -767,9 +744,6 @@
             var_indices: Default::default(),
             unit_temp: None,
             var_debug_info: vec![],
-            cached_resume_block: None,
-            cached_return_block: None,
-            cached_unreachable_block: None,
         };
 
         assert_eq!(builder.cfg.start_new_block(), START_BLOCK);
@@ -790,6 +764,7 @@
         }
 
         Body::new(
+            MirSource::item(self.def_id),
             self.cfg.basic_blocks,
             self.source_scopes,
             self.local_decls,
@@ -1003,17 +978,6 @@
             }
         }
     }
-
-    fn return_block(&mut self) -> BasicBlock {
-        match self.cached_return_block {
-            Some(rb) => rb,
-            None => {
-                let rb = self.cfg.start_new_block();
-                self.cached_return_block = Some(rb);
-                rb
-            }
-        }
-    }
 }
 
 ///////////////////////////////////////////////////////////////////////////
diff --git a/compiler/rustc_mir_build/src/build/scope.rs b/compiler/rustc_mir_build/src/build/scope.rs
index 2a03bb7..e91227d 100644
--- a/compiler/rustc_mir_build/src/build/scope.rs
+++ b/compiler/rustc_mir_build/src/build/scope.rs
@@ -6,30 +6,31 @@
 
 ### SEME Regions
 
-When pushing a new scope, we record the current point in the graph (a
+When pushing a new [Scope], we record the current point in the graph (a
 basic block); this marks the entry to the scope. We then generate more
 stuff in the control-flow graph. Whenever the scope is exited, either
 via a `break` or `return` or just by fallthrough, that marks an exit
 from the scope. Each lexical scope thus corresponds to a single-entry,
 multiple-exit (SEME) region in the control-flow graph.
 
-For now, we keep a mapping from each `region::Scope` to its
-corresponding SEME region for later reference (see caveat in next
-paragraph). This is because region scopes are tied to
-them. Eventually, when we shift to non-lexical lifetimes, there should
-be no need to remember this mapping.
+For now, we record the `region::Scope` to each SEME region for later reference
+(see caveat in next paragraph). This is because destruction scopes are tied to
+them. This may change in the future so that MIR lowering determines its own
+destruction scopes.
 
 ### Not so SEME Regions
 
 In the course of building matches, it sometimes happens that certain code
 (namely guards) gets executed multiple times. This means that the scope lexical
 scope may in fact correspond to multiple, disjoint SEME regions. So in fact our
-mapping is from one scope to a vector of SEME regions.
+mapping is from one scope to a vector of SEME regions. Since the SEME regions
+are disjoint, the mapping is still one-to-one for the set of SEME regions that
+we're currently in.
 
-Also in matches, the scopes assigned to arms are not even SEME regions! Each
-arm has a single region with one entry for each pattern. We manually
+Also in matches, the scopes assigned to arms are not always even SEME regions!
+Each arm has a single region with one entry for each pattern. We manually
 manipulate the scheduled drops in this scope to avoid dropping things multiple
-times, although drop elaboration would clean this up for value drops.
+times.
 
 ### Drops
 
@@ -60,25 +61,23 @@
 
 There are numerous "normal" ways to early exit a scope: `break`,
 `continue`, `return` (panics are handled separately). Whenever an
-early exit occurs, the method `exit_scope` is called. It is given the
+early exit occurs, the method `break_scope` is called. It is given the
 current point in execution where the early exit occurs, as well as the
 scope you want to branch to (note that all early exits from to some
-other enclosing scope). `exit_scope` will record this exit point and
-also add all drops.
+other enclosing scope). `break_scope` will record the set of drops currently
+scheduled in a [DropTree]. Later, before `in_breakable_scope` exits, the drops
+will be added to the CFG.
 
-Panics are handled in a similar fashion, except that a panic always
-returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
-`panic(p)` with the current point `p`. Or else you can call
-`diverge_cleanup`, which will produce a block that you can branch to
-which does the appropriate cleanup and then diverges. `panic(p)`
-simply calls `diverge_cleanup()` and adds an edge from `p` to the
-result.
+Panics are handled in a similar fashion, except that the drops are added to the
+MIR once the rest of the function has finished being lowered. If a terminator
+can panic, call `diverge_from(block)` with the block containing the terminator
+`block`.
 
-### Loop scopes
+### Breakable scopes
 
 In addition to the normal scope stack, we track a loop scope stack
-that contains only loops. It tracks where a `break` and `continue`
-should go to.
+that contains only loops and breakable blocks. It tracks where a `break`,
+`continue` or `return` should go to.
 
 */
 
@@ -86,12 +85,24 @@
 use crate::thir::{Expr, ExprRef, LintLevel};
 use rustc_data_structures::fx::FxHashMap;
 use rustc_hir as hir;
-use rustc_hir::GeneratorKind;
+use rustc_index::vec::IndexVec;
 use rustc_middle::middle::region;
 use rustc_middle::mir::*;
 use rustc_span::{Span, DUMMY_SP};
-use std::collections::hash_map::Entry;
-use std::mem;
+
+#[derive(Debug)]
+pub struct Scopes<'tcx> {
+    scopes: Vec<Scope>,
+    /// The current set of breakable scopes. See module comment for more details.
+    breakable_scopes: Vec<BreakableScope<'tcx>>,
+
+    /// Drops that need to be done on unwind paths. See the comment on
+    /// [DropTree] for more details.
+    unwind_drops: DropTree,
+
+    /// Drops that need to be done on paths to the `GeneratorDrop` terminator.
+    generator_drops: DropTree,
+}
 
 #[derive(Debug)]
 struct Scope {
@@ -112,73 +123,45 @@
 
     moved_locals: Vec<Local>,
 
-    /// The cache for drop chain on “normal” exit into a particular BasicBlock.
-    cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>,
+    /// The drop index that will drop everything in and below this scope on an
+    /// unwind path.
+    cached_unwind_block: Option<DropIdx>,
 
-    /// The cache for drop chain on "generator drop" exit.
-    cached_generator_drop: Option<BasicBlock>,
-
-    /// The cache for drop chain on "unwind" exit.
-    cached_unwind: CachedBlock,
+    /// The drop index that will drop everything in and below this scope on a
+    /// generator drop path.
+    cached_generator_drop_block: Option<DropIdx>,
 }
 
-#[derive(Debug, Default)]
-crate struct Scopes<'tcx> {
-    scopes: Vec<Scope>,
-    /// The current set of breakable scopes. See module comment for more details.
-    breakable_scopes: Vec<BreakableScope<'tcx>>,
-}
-
-#[derive(Debug)]
+#[derive(Clone, Copy, Debug)]
 struct DropData {
-    /// span where drop obligation was incurred (typically where place was declared)
-    span: Span,
+    /// The `Span` where drop obligation was incurred (typically where place was
+    /// declared)
+    source_info: SourceInfo,
 
     /// local to drop
     local: Local,
 
     /// Whether this is a value Drop or a StorageDead.
     kind: DropKind,
-
-    /// The cached blocks for unwinds.
-    cached_block: CachedBlock,
 }
 
-#[derive(Debug, Default, Clone, Copy)]
-struct CachedBlock {
-    /// The cached block for the cleanups-on-diverge path. This block
-    /// contains code to run the current drop and all the preceding
-    /// drops (i.e., those having lower index in Drop’s Scope drop
-    /// array)
-    unwind: Option<BasicBlock>,
-
-    /// The cached block for unwinds during cleanups-on-generator-drop path
-    ///
-    /// This is split from the standard unwind path here to prevent drop
-    /// elaboration from creating drop flags that would have to be captured
-    /// by the generator. I'm not sure how important this optimization is,
-    /// but it is here.
-    generator_drop: Option<BasicBlock>,
-}
-
-#[derive(Debug, PartialEq, Eq)]
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
 pub(crate) enum DropKind {
     Value,
     Storage,
 }
 
-#[derive(Clone, Debug)]
+#[derive(Debug)]
 struct BreakableScope<'tcx> {
     /// Region scope of the loop
     region_scope: region::Scope,
-    /// Where the body of the loop begins. `None` if block
-    continue_block: Option<BasicBlock>,
-    /// Block to branch into when the loop or block terminates (either by being
-    /// `break`-en out from, or by having its condition to become false)
-    break_block: BasicBlock,
     /// The destination of the loop/block expression itself (i.e., where to put
-    /// the result of a `break` expression)
+    /// the result of a `break` or `return` expression)
     break_destination: Place<'tcx>,
+    /// Drops that happen on the `break`/`return` path.
+    break_drops: DropTree,
+    /// Drops that happen on the `continue` path.
+    continue_drops: Option<DropTree>,
 }
 
 /// The target of an expression that breaks out of a scope
@@ -189,61 +172,33 @@
     Return,
 }
 
-impl CachedBlock {
-    fn invalidate(&mut self) {
-        *self = CachedBlock::default();
-    }
+rustc_index::newtype_index! {
+    struct DropIdx { .. }
+}
 
-    fn get(&self, generator_drop: bool) -> Option<BasicBlock> {
-        if generator_drop { self.generator_drop } else { self.unwind }
-    }
+const ROOT_NODE: DropIdx = DropIdx::from_u32(0);
 
-    fn ref_mut(&mut self, generator_drop: bool) -> &mut Option<BasicBlock> {
-        if generator_drop { &mut self.generator_drop } else { &mut self.unwind }
-    }
+/// A tree of drops that we have deferred lowering. It's used for:
+///
+/// * Drops on unwind paths
+/// * Drops on generator drop paths (when a suspended generator is dropped)
+/// * Drops on return and loop exit paths
+///
+/// Once no more nodes could be added to the tree, we lower it to MIR in one go
+/// in `build_mir`.
+#[derive(Debug)]
+struct DropTree {
+    /// Drops in the tree.
+    drops: IndexVec<DropIdx, (DropData, DropIdx)>,
+    /// Map for finding the inverse of the `next_drop` relation:
+    ///
+    /// `previous_drops[(drops[i].1, drops[i].0.local, drops[i].0.kind)] == i`
+    previous_drops: FxHashMap<(DropIdx, Local, DropKind), DropIdx>,
+    /// Edges into the `DropTree` that need to be added once it's lowered.
+    entry_points: Vec<(DropIdx, BasicBlock)>,
 }
 
 impl Scope {
-    /// Invalidates all the cached blocks in the scope.
-    ///
-    /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
-    /// larger extent of code.
-    ///
-    /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`.
-    /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
-    /// top-of-scope (as opposed to dependent scopes).
-    fn invalidate_cache(
-        &mut self,
-        storage_only: bool,
-        generator_kind: Option<GeneratorKind>,
-        this_scope_only: bool,
-    ) {
-        // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
-        // with lots of `try!`?
-
-        // cached exits drop storage and refer to the top-of-scope
-        self.cached_exits.clear();
-
-        // the current generator drop and unwind refer to top-of-scope
-        self.cached_generator_drop = None;
-
-        let ignore_unwinds = storage_only && generator_kind.is_none();
-        if !ignore_unwinds {
-            self.cached_unwind.invalidate();
-        }
-
-        if !ignore_unwinds && !this_scope_only {
-            for drop_data in &mut self.drops {
-                drop_data.cached_block.invalidate();
-            }
-        }
-    }
-
-    /// Given a span and this scope's source scope, make a SourceInfo.
-    fn source_info(&self, span: Span) -> SourceInfo {
-        SourceInfo { span, scope: self.source_scope }
-    }
-
     /// Whether there's anything to do for the cleanup path, that is,
     /// when unwinding through this scope. This includes destructors,
     /// but not StorageDead statements, which don't get emitted at all
@@ -261,11 +216,187 @@
             DropKind::Storage => false,
         })
     }
+
+    fn invalidate_cache(&mut self) {
+        self.cached_unwind_block = None;
+        self.cached_generator_drop_block = None;
+    }
+}
+
+/// A trait that determined how [DropTree] creates its blocks and
+/// links to any entry nodes.
+trait DropTreeBuilder<'tcx> {
+    /// Create a new block for the tree. This should call either
+    /// `cfg.start_new_block()` or `cfg.start_new_cleanup_block()`.
+    fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock;
+
+    /// Links a block outside the drop tree, `from`, to the block `to` inside
+    /// the drop tree.
+    fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock);
+}
+
+impl DropTree {
+    fn new() -> Self {
+        // The root node of the tree doesn't represent a drop, but instead
+        // represents the block in the tree that should be jumped to once all
+        // of the required drops have been performed.
+        let fake_source_info = SourceInfo::outermost(DUMMY_SP);
+        let fake_data =
+            DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage };
+        let drop_idx = DropIdx::MAX;
+        let drops = IndexVec::from_elem_n((fake_data, drop_idx), 1);
+        Self { drops, entry_points: Vec::new(), previous_drops: FxHashMap::default() }
+    }
+
+    fn add_drop(&mut self, drop: DropData, next: DropIdx) -> DropIdx {
+        let drops = &mut self.drops;
+        *self
+            .previous_drops
+            .entry((next, drop.local, drop.kind))
+            .or_insert_with(|| drops.push((drop, next)))
+    }
+
+    fn add_entry(&mut self, from: BasicBlock, to: DropIdx) {
+        debug_assert!(to < self.drops.next_index());
+        self.entry_points.push((to, from));
+    }
+
+    /// Builds the MIR for a given drop tree.
+    ///
+    /// `blocks` should have the same length as `self.drops`, and may have its
+    /// first value set to some already existing block.
+    fn build_mir<'tcx, T: DropTreeBuilder<'tcx>>(
+        &mut self,
+        cfg: &mut CFG<'tcx>,
+        blocks: &mut IndexVec<DropIdx, Option<BasicBlock>>,
+    ) {
+        debug!("DropTree::build_mir(drops = {:#?})", self);
+        assert_eq!(blocks.len(), self.drops.len());
+
+        self.assign_blocks::<T>(cfg, blocks);
+        self.link_blocks(cfg, blocks)
+    }
+
+    /// Assign blocks for all of the drops in the drop tree that need them.
+    fn assign_blocks<'tcx, T: DropTreeBuilder<'tcx>>(
+        &mut self,
+        cfg: &mut CFG<'tcx>,
+        blocks: &mut IndexVec<DropIdx, Option<BasicBlock>>,
+    ) {
+        // StorageDead statements can share blocks with each other and also with
+        // a Drop terminator. We iterate through the drops to find which drops
+        // need their own block.
+        #[derive(Clone, Copy)]
+        enum Block {
+            // This drop is unreachable
+            None,
+            // This drop is only reachable through the `StorageDead` with the
+            // specified index.
+            Shares(DropIdx),
+            // This drop has more than one way of being reached, or it is
+            // branched to from outside the tree, or its predecessor is a
+            // `Value` drop.
+            Own,
+        }
+
+        let mut needs_block = IndexVec::from_elem(Block::None, &self.drops);
+        if blocks[ROOT_NODE].is_some() {
+            // In some cases (such as drops for `continue`) the root node
+            // already has a block. In this case, make sure that we don't
+            // override it.
+            needs_block[ROOT_NODE] = Block::Own;
+        }
+
+        // Sort so that we only need to check the last value.
+        let entry_points = &mut self.entry_points;
+        entry_points.sort();
+
+        for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() {
+            if entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) {
+                let block = *blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
+                needs_block[drop_idx] = Block::Own;
+                while entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) {
+                    let entry_block = entry_points.pop().unwrap().1;
+                    T::add_entry(cfg, entry_block, block);
+                }
+            }
+            match needs_block[drop_idx] {
+                Block::None => continue,
+                Block::Own => {
+                    blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
+                }
+                Block::Shares(pred) => {
+                    blocks[drop_idx] = blocks[pred];
+                }
+            }
+            if let DropKind::Value = drop_data.0.kind {
+                needs_block[drop_data.1] = Block::Own;
+            } else if drop_idx != ROOT_NODE {
+                match &mut needs_block[drop_data.1] {
+                    pred @ Block::None => *pred = Block::Shares(drop_idx),
+                    pred @ Block::Shares(_) => *pred = Block::Own,
+                    Block::Own => (),
+                }
+            }
+        }
+
+        debug!("assign_blocks: blocks = {:#?}", blocks);
+        assert!(entry_points.is_empty());
+    }
+
+    fn link_blocks<'tcx>(
+        &self,
+        cfg: &mut CFG<'tcx>,
+        blocks: &IndexVec<DropIdx, Option<BasicBlock>>,
+    ) {
+        for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() {
+            let block = if let Some(block) = blocks[drop_idx] {
+                block
+            } else {
+                continue;
+            };
+            match drop_data.0.kind {
+                DropKind::Value => {
+                    let terminator = TerminatorKind::Drop {
+                        target: blocks[drop_data.1].unwrap(),
+                        // The caller will handle this if needed.
+                        unwind: None,
+                        place: drop_data.0.local.into(),
+                    };
+                    cfg.terminate(block, drop_data.0.source_info, terminator);
+                }
+                // Root nodes don't correspond to a drop.
+                DropKind::Storage if drop_idx == ROOT_NODE => {}
+                DropKind::Storage => {
+                    let stmt = Statement {
+                        source_info: drop_data.0.source_info,
+                        kind: StatementKind::StorageDead(drop_data.0.local),
+                    };
+                    cfg.push(block, stmt);
+                    let target = blocks[drop_data.1].unwrap();
+                    if target != block {
+                        // Diagnostics don't use this `Span` but debuginfo
+                        // might. Since we don't want breakpoints to be placed
+                        // here, especially when this is on an unwind path, we
+                        // use `DUMMY_SP`.
+                        let source_info = SourceInfo { span: DUMMY_SP, ..drop_data.0.source_info };
+                        let terminator = TerminatorKind::Goto { target };
+                        cfg.terminate(block, source_info, terminator);
+                    }
+                }
+            }
+        }
+    }
 }
 
 impl<'tcx> Scopes<'tcx> {
-    fn len(&self) -> usize {
-        self.scopes.len()
+    pub(crate) fn new() -> Self {
+        Self {
+            scopes: Vec::new(),
+            breakable_scopes: Vec::new(),
+            unwind_drops: DropTree::new(),
+            generator_drops: DropTree::new(),
+        }
     }
 
     fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo), vis_scope: SourceScope) {
@@ -276,94 +407,29 @@
             region_scope_span: region_scope.1.span,
             drops: vec![],
             moved_locals: vec![],
-            cached_generator_drop: None,
-            cached_exits: Default::default(),
-            cached_unwind: CachedBlock::default(),
+            cached_unwind_block: None,
+            cached_generator_drop_block: None,
         });
     }
 
-    fn pop_scope(
-        &mut self,
-        region_scope: (region::Scope, SourceInfo),
-    ) -> (Scope, Option<BasicBlock>) {
+    fn pop_scope(&mut self, region_scope: (region::Scope, SourceInfo)) -> Scope {
         let scope = self.scopes.pop().unwrap();
         assert_eq!(scope.region_scope, region_scope.0);
-        let unwind_to =
-            self.scopes.last().and_then(|next_scope| next_scope.cached_unwind.get(false));
-        (scope, unwind_to)
+        scope
     }
 
-    fn may_panic(&self, scope_count: usize) -> bool {
-        let len = self.len();
-        self.scopes[(len - scope_count)..].iter().any(|s| s.needs_cleanup())
-    }
-
-    /// Finds the breakable scope for a given label. This is used for
-    /// resolving `return`, `break` and `continue`.
-    fn find_breakable_scope(
-        &self,
-        span: Span,
-        target: BreakableTarget,
-    ) -> (BasicBlock, region::Scope, Option<Place<'tcx>>) {
-        let get_scope = |scope: region::Scope| {
-            // find the loop-scope by its `region::Scope`.
-            self.breakable_scopes
-                .iter()
-                .rfind(|breakable_scope| breakable_scope.region_scope == scope)
-                .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
-        };
-        match target {
-            BreakableTarget::Return => {
-                let scope = &self.breakable_scopes[0];
-                if scope.break_destination != Place::return_place() {
-                    span_bug!(span, "`return` in item with no return scope");
-                }
-                (scope.break_block, scope.region_scope, Some(scope.break_destination))
-            }
-            BreakableTarget::Break(scope) => {
-                let scope = get_scope(scope);
-                (scope.break_block, scope.region_scope, Some(scope.break_destination))
-            }
-            BreakableTarget::Continue(scope) => {
-                let scope = get_scope(scope);
-                let continue_block = scope
-                    .continue_block
-                    .unwrap_or_else(|| span_bug!(span, "missing `continue` block"));
-                (continue_block, scope.region_scope, None)
-            }
-        }
-    }
-
-    fn num_scopes_above(&self, region_scope: region::Scope, span: Span) -> usize {
-        let scope_count = self
-            .scopes
+    fn scope_index(&self, region_scope: region::Scope, span: Span) -> usize {
+        self.scopes
             .iter()
-            .rev()
-            .position(|scope| scope.region_scope == region_scope)
-            .unwrap_or_else(|| span_bug!(span, "region_scope {:?} does not enclose", region_scope));
-        let len = self.len();
-        assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
-        scope_count
-    }
-
-    fn iter_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut Scope> + '_ {
-        self.scopes.iter_mut().rev()
-    }
-
-    fn top_scopes(&mut self, count: usize) -> impl DoubleEndedIterator<Item = &mut Scope> + '_ {
-        let len = self.len();
-        self.scopes[len - count..].iter_mut()
+            .rposition(|scope| scope.region_scope == region_scope)
+            .unwrap_or_else(|| span_bug!(span, "region_scope {:?} does not enclose", region_scope))
     }
 
     /// Returns the topmost active scope, which is known to be alive until
     /// the next scope expression.
-    pub(super) fn topmost(&self) -> region::Scope {
+    fn topmost(&self) -> region::Scope {
         self.scopes.last().expect("topmost_scope: no scopes present").region_scope
     }
-
-    fn source_info(&self, index: usize, span: Span) -> SourceInfo {
-        self.scopes[self.len() - index].source_info(span)
-    }
 }
 
 impl<'a, 'tcx> Builder<'a, 'tcx> {
@@ -371,28 +437,48 @@
     // ==========================
     //  Start a breakable scope, which tracks where `continue`, `break` and
     //  `return` should branch to.
-    crate fn in_breakable_scope<F, R>(
+    crate fn in_breakable_scope<F>(
         &mut self,
         loop_block: Option<BasicBlock>,
-        break_block: BasicBlock,
         break_destination: Place<'tcx>,
+        span: Span,
         f: F,
-    ) -> R
+    ) -> BlockAnd<()>
     where
-        F: FnOnce(&mut Builder<'a, 'tcx>) -> R,
+        F: FnOnce(&mut Builder<'a, 'tcx>) -> Option<BlockAnd<()>>,
     {
         let region_scope = self.scopes.topmost();
         let scope = BreakableScope {
             region_scope,
-            continue_block: loop_block,
-            break_block,
             break_destination,
+            break_drops: DropTree::new(),
+            continue_drops: loop_block.map(|_| DropTree::new()),
         };
         self.scopes.breakable_scopes.push(scope);
-        let res = f(self);
+        let normal_exit_block = f(self);
         let breakable_scope = self.scopes.breakable_scopes.pop().unwrap();
         assert!(breakable_scope.region_scope == region_scope);
-        res
+        let break_block = self.build_exit_tree(breakable_scope.break_drops, None);
+        if let Some(drops) = breakable_scope.continue_drops { self.build_exit_tree(drops, loop_block); }
+        match (normal_exit_block, break_block) {
+            (Some(block), None) | (None, Some(block)) => block,
+            (None, None) => self.cfg.start_new_block().unit(),
+            (Some(normal_block), Some(exit_block)) => {
+                let target = self.cfg.start_new_block();
+                let source_info = self.source_info(span);
+                self.cfg.terminate(
+                    unpack!(normal_block),
+                    source_info,
+                    TerminatorKind::Goto { target },
+                );
+                self.cfg.terminate(
+                    unpack!(exit_block),
+                    source_info,
+                    TerminatorKind::Goto { target },
+                );
+                target.unit()
+            }
+        }
     }
 
     crate fn in_opt_scope<F, R>(
@@ -476,46 +562,51 @@
         mut block: BasicBlock,
     ) -> BlockAnd<()> {
         debug!("pop_scope({:?}, {:?})", region_scope, block);
-        // If we are emitting a `drop` statement, we need to have the cached
-        // diverge cleanup pads ready in case that drop panics.
-        if self.scopes.may_panic(1) {
-            self.diverge_cleanup();
-        }
-        let (scope, unwind_to) = self.scopes.pop_scope(region_scope);
-        let unwind_to = unwind_to.unwrap_or_else(|| self.resume_block());
 
-        unpack!(
-            block = build_scope_drops(
-                &mut self.cfg,
-                self.generator_kind,
-                &scope,
-                block,
-                unwind_to,
-                self.arg_count,
-                false, // not generator
-                false, // not unwind path
-            )
-        );
+        block = self.leave_top_scope(block);
+
+        self.scopes.pop_scope(region_scope);
 
         block.unit()
     }
 
+    /// Sets up the drops for breaking from `block` to `target`.
     crate fn break_scope(
         &mut self,
         mut block: BasicBlock,
         value: Option<ExprRef<'tcx>>,
-        scope: BreakableTarget,
+        target: BreakableTarget,
         source_info: SourceInfo,
     ) -> BlockAnd<()> {
-        let (mut target_block, region_scope, destination) =
-            self.scopes.find_breakable_scope(source_info.span, scope);
-        if let BreakableTarget::Return = scope {
-            // We call this now, rather than when we start lowering the
-            // function so that the return block doesn't precede the entire
-            // rest of the CFG. Some passes and LLVM prefer blocks to be in
-            // approximately CFG order.
-            target_block = self.return_block();
-        }
+        let span = source_info.span;
+
+        let get_scope_index = |scope: region::Scope| {
+            // find the loop-scope by its `region::Scope`.
+            self.scopes
+                .breakable_scopes
+                .iter()
+                .rposition(|breakable_scope| breakable_scope.region_scope == scope)
+                .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
+        };
+        let (break_index, destination) = match target {
+            BreakableTarget::Return => {
+                let scope = &self.scopes.breakable_scopes[0];
+                if scope.break_destination != Place::return_place() {
+                    span_bug!(span, "`return` in item with no return scope");
+                }
+                (0, Some(scope.break_destination))
+            }
+            BreakableTarget::Break(scope) => {
+                let break_index = get_scope_index(scope);
+                let scope = &self.scopes.breakable_scopes[break_index];
+                (break_index, Some(scope.break_destination))
+            }
+            BreakableTarget::Continue(scope) => {
+                let break_index = get_scope_index(scope);
+                (break_index, None)
+            }
+        };
+
         if let Some(destination) = destination {
             if let Some(value) = value {
                 debug!("stmt_expr Break val block_context.push(SubExpr)");
@@ -528,131 +619,57 @@
         } else {
             assert!(value.is_none(), "`return` and `break` should have a destination");
         }
-        self.exit_scope(source_info.span, region_scope, block, target_block);
+
+        let region_scope = self.scopes.breakable_scopes[break_index].region_scope;
+        let scope_index = self.scopes.scope_index(region_scope, span);
+        let drops = if destination.is_some() {
+            &mut self.scopes.breakable_scopes[break_index].break_drops
+        } else {
+            self.scopes.breakable_scopes[break_index].continue_drops.as_mut().unwrap()
+        };
+        let mut drop_idx = ROOT_NODE;
+        for scope in &self.scopes.scopes[scope_index + 1..] {
+            for drop in &scope.drops {
+                drop_idx = drops.add_drop(*drop, drop_idx);
+            }
+        }
+        drops.add_entry(block, drop_idx);
+
+        // `build_drop_tree` doesn't have access to our source_info, so we
+        // create a dummy terminator now. `TerminatorKind::Resume` is used
+        // because MIR type checking will panic if it hasn't been overwritten.
+        self.cfg.terminate(block, source_info, TerminatorKind::Resume);
+
         self.cfg.start_new_block().unit()
     }
 
-    /// Branch out of `block` to `target`, exiting all scopes up to
-    /// and including `region_scope`. This will insert whatever drops are
-    /// needed. See module comment for details.
-    crate fn exit_scope(
+    crate fn exit_top_scope(
         &mut self,
-        span: Span,
-        region_scope: region::Scope,
         mut block: BasicBlock,
         target: BasicBlock,
+        source_info: SourceInfo,
     ) {
-        debug!(
-            "exit_scope(region_scope={:?}, block={:?}, target={:?})",
-            region_scope, block, target
-        );
-        let scope_count = self.scopes.num_scopes_above(region_scope, span);
-
-        // If we are emitting a `drop` statement, we need to have the cached
-        // diverge cleanup pads ready in case that drop panics.
-        let may_panic = self.scopes.may_panic(scope_count);
-        if may_panic {
-            self.diverge_cleanup();
-        }
-
-        let mut scopes = self.scopes.top_scopes(scope_count + 1).rev();
-        let mut scope = scopes.next().unwrap();
-        for next_scope in scopes {
-            if scope.drops.is_empty() {
-                scope = next_scope;
-                continue;
-            }
-            let source_info = scope.source_info(span);
-            block = match scope.cached_exits.entry((target, region_scope)) {
-                Entry::Occupied(e) => {
-                    self.cfg.goto(block, source_info, *e.get());
-                    return;
-                }
-                Entry::Vacant(v) => {
-                    let b = self.cfg.start_new_block();
-                    self.cfg.goto(block, source_info, b);
-                    v.insert(b);
-                    b
-                }
-            };
-
-            let unwind_to = next_scope.cached_unwind.get(false).unwrap_or_else(|| {
-                debug_assert!(!may_panic, "cached block not present?");
-                START_BLOCK
-            });
-
-            unpack!(
-                block = build_scope_drops(
-                    &mut self.cfg,
-                    self.generator_kind,
-                    scope,
-                    block,
-                    unwind_to,
-                    self.arg_count,
-                    false, // not generator
-                    false, // not unwind path
-                )
-            );
-
-            scope = next_scope;
-        }
-
-        self.cfg.goto(block, self.scopes.source_info(scope_count, span), target);
+        block = self.leave_top_scope(block);
+        self.cfg.terminate(block, source_info, TerminatorKind::Goto { target });
     }
 
-    /// Creates a path that performs all required cleanup for dropping a generator.
-    ///
-    /// This path terminates in GeneratorDrop. Returns the start of the path.
-    /// None indicates there’s no cleanup to do at this point.
-    crate fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
-        // Fill in the cache for unwinds
-        self.diverge_cleanup_gen(true);
+    fn leave_top_scope(&mut self, block: BasicBlock) -> BasicBlock {
+        // If we are emitting a `drop` statement, we need to have the cached
+        // diverge cleanup pads ready in case that drop panics.
+        let needs_cleanup = self.scopes.scopes.last().map_or(false, |scope| scope.needs_cleanup());
+        let is_generator = self.generator_kind.is_some();
+        let unwind_to = if needs_cleanup { self.diverge_cleanup() } else { DropIdx::MAX };
 
-        let src_info = self.scopes.source_info(self.scopes.len(), self.fn_span);
-        let resume_block = self.resume_block();
-        let mut scopes = self.scopes.iter_mut().peekable();
-        let mut block = self.cfg.start_new_block();
-        let result = block;
-
-        while let Some(scope) = scopes.next() {
-            block = if let Some(b) = scope.cached_generator_drop {
-                self.cfg.goto(block, src_info, b);
-                return Some(result);
-            } else {
-                let b = self.cfg.start_new_block();
-                scope.cached_generator_drop = Some(b);
-                self.cfg.goto(block, src_info, b);
-                b
-            };
-
-            let unwind_to = scopes
-                .peek()
-                .as_ref()
-                .map(|scope| {
-                    scope
-                        .cached_unwind
-                        .get(true)
-                        .unwrap_or_else(|| span_bug!(src_info.span, "cached block not present?"))
-                })
-                .unwrap_or(resume_block);
-
-            unpack!(
-                block = build_scope_drops(
-                    &mut self.cfg,
-                    self.generator_kind,
-                    scope,
-                    block,
-                    unwind_to,
-                    self.arg_count,
-                    true, // is generator
-                    true, // is cached path
-                )
-            );
-        }
-
-        self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
-
-        Some(result)
+        let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes");
+        unpack!(build_scope_drops(
+            &mut self.cfg,
+            &mut self.scopes.unwind_drops,
+            scope,
+            block,
+            unwind_to,
+            is_generator && needs_cleanup,
+            self.arg_count,
+        ))
     }
 
     /// Creates a new source scope, nested in the current one.
@@ -684,6 +701,8 @@
         self.source_scopes.push(SourceScopeData {
             span,
             parent_scope: Some(parent),
+            inlined: None,
+            inlined_parent_scope: None,
             local_data: ClearCrossCrate::Set(scope_local_data),
         })
     }
@@ -728,15 +747,6 @@
         }
     }
 
-    // Schedule an abort block - this is used for some ABIs that cannot unwind
-    crate fn schedule_abort(&mut self) -> BasicBlock {
-        let source_info = self.scopes.source_info(self.scopes.len(), self.fn_span);
-        let abortblk = self.cfg.start_new_cleanup_block();
-        self.cfg.terminate(abortblk, source_info, TerminatorKind::Abort);
-        self.cached_resume_block = Some(abortblk);
-        abortblk
-    }
-
     // Scheduling drops
     // ================
     crate fn schedule_drop_storage_and_value(
@@ -749,11 +759,10 @@
         self.schedule_drop(span, region_scope, local, DropKind::Value);
     }
 
-    /// Indicates that `place` should be dropped on exit from
-    /// `region_scope`.
+    /// Indicates that `place` should be dropped on exit from `region_scope`.
     ///
-    /// When called with `DropKind::Storage`, `place` should be a local
-    /// with an index higher than the current `self.arg_count`.
+    /// When called with `DropKind::Storage`, `place` shouldn't be the return
+    /// place, or a function parameter.
     crate fn schedule_drop(
         &mut self,
         span: Span,
@@ -781,70 +790,74 @@
             }
         };
 
-        for scope in self.scopes.iter_mut() {
-            let this_scope = scope.region_scope == region_scope;
-            // When building drops, we try to cache chains of drops in such a way so these drops
-            // could be reused by the drops which would branch into the cached (already built)
-            // blocks.  This, however, means that whenever we add a drop into a scope which already
-            // had some blocks built (and thus, cached) for it, we must invalidate all caches which
-            // might branch into the scope which had a drop just added to it. This is necessary,
-            // because otherwise some other code might use the cache to branch into already built
-            // chain of drops, essentially ignoring the newly added drop.
-            //
-            // For example consider there’s two scopes with a drop in each. These are built and
-            // thus the caches are filled:
-            //
-            // +--------------------------------------------------------+
-            // | +---------------------------------+                    |
-            // | | +--------+     +-------------+  |  +---------------+ |
-            // | | | return | <-+ | drop(outer) | <-+ |  drop(middle) | |
-            // | | +--------+     +-------------+  |  +---------------+ |
-            // | +------------|outer_scope cache|--+                    |
-            // +------------------------------|middle_scope cache|------+
-            //
-            // Now, a new, inner-most scope is added along with a new drop into both inner-most and
-            // outer-most scopes:
-            //
-            // +------------------------------------------------------------+
-            // | +----------------------------------+                       |
-            // | | +--------+      +-------------+  |   +---------------+   | +-------------+
-            // | | | return | <+   | drop(new)   | <-+  |  drop(middle) | <--+| drop(inner) |
-            // | | +--------+  |   | drop(outer) |  |   +---------------+   | +-------------+
-            // | |             +-+ +-------------+  |                       |
-            // | +---|invalid outer_scope cache|----+                       |
-            // +----=----------------|invalid middle_scope cache|-----------+
-            //
-            // If, when adding `drop(new)` we do not invalidate the cached blocks for both
-            // outer_scope and middle_scope, then, when building drops for the inner (right-most)
-            // scope, the old, cached blocks, without `drop(new)` will get used, producing the
-            // wrong results.
-            //
-            // The cache and its invalidation for unwind branch is somewhat special. The cache is
-            // per-drop, rather than per scope, which has a several different implications. Adding
-            // a new drop into a scope will not invalidate cached blocks of the prior drops in the
-            // scope. That is true, because none of the already existing drops will have an edge
-            // into a block with the newly added drop.
-            //
-            // Note that this code iterates scopes from the inner-most to the outer-most,
-            // invalidating caches of each scope visited. This way bare minimum of the
-            // caches gets invalidated. i.e., if a new drop is added into the middle scope, the
-            // cache of outer scope stays intact.
-            scope.invalidate_cache(!needs_drop, self.generator_kind, this_scope);
-            if this_scope {
+        // When building drops, we try to cache chains of drops to reduce the
+        // number of `DropTree::add_drop` calls. This, however, means that
+        // whenever we add a drop into a scope which already had some entries
+        // in the drop tree built (and thus, cached) for it, we must invalidate
+        // all caches which might branch into the scope which had a drop just
+        // added to it. This is necessary, because otherwise some other code
+        // might use the cache to branch into already built chain of drops,
+        // essentially ignoring the newly added drop.
+        //
+        // For example consider there’s two scopes with a drop in each. These
+        // are built and thus the caches are filled:
+        //
+        // +--------------------------------------------------------+
+        // | +---------------------------------+                    |
+        // | | +--------+     +-------------+  |  +---------------+ |
+        // | | | return | <-+ | drop(outer) | <-+ |  drop(middle) | |
+        // | | +--------+     +-------------+  |  +---------------+ |
+        // | +------------|outer_scope cache|--+                    |
+        // +------------------------------|middle_scope cache|------+
+        //
+        // Now, a new, inner-most scope is added along with a new drop into
+        // both inner-most and outer-most scopes:
+        //
+        // +------------------------------------------------------------+
+        // | +----------------------------------+                       |
+        // | | +--------+      +-------------+  |   +---------------+   | +-------------+
+        // | | | return | <+   | drop(new)   | <-+  |  drop(middle) | <--+| drop(inner) |
+        // | | +--------+  |   | drop(outer) |  |   +---------------+   | +-------------+
+        // | |             +-+ +-------------+  |                       |
+        // | +---|invalid outer_scope cache|----+                       |
+        // +----=----------------|invalid middle_scope cache|-----------+
+        //
+        // If, when adding `drop(new)` we do not invalidate the cached blocks for both
+        // outer_scope and middle_scope, then, when building drops for the inner (right-most)
+        // scope, the old, cached blocks, without `drop(new)` will get used, producing the
+        // wrong results.
+        //
+        // Note that this code iterates scopes from the inner-most to the outer-most,
+        // invalidating caches of each scope visited. This way bare minimum of the
+        // caches gets invalidated. i.e., if a new drop is added into the middle scope, the
+        // cache of outer scope stays intact.
+        //
+        // Since we only cache drops for the unwind path and the generator drop
+        // path, we only need to invalidate the cache for drops that happen on
+        // the unwind or generator drop paths. This means that for
+        // non-generators we don't need to invalidate caches for `DropKind::Storage`.
+        let invalidate_caches = needs_drop || self.generator_kind.is_some();
+        for scope in self.scopes.scopes.iter_mut().rev() {
+            if invalidate_caches {
+                scope.invalidate_cache();
+            }
+
+            if scope.region_scope == region_scope {
                 let region_scope_span =
                     region_scope.span(self.hir.tcx(), &self.hir.region_scope_tree);
                 // Attribute scope exit drops to scope's closing brace.
                 let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span);
 
                 scope.drops.push(DropData {
-                    span: scope_end,
+                    source_info: SourceInfo { span: scope_end, scope: scope.source_scope },
                     local,
                     kind: drop_kind,
-                    cached_block: CachedBlock::default(),
                 });
+
                 return;
             }
         }
+
         span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local);
     }
 
@@ -893,8 +906,9 @@
 
             Some(local_scope) => self
                 .scopes
+                .scopes
                 .iter_mut()
-                .find(|scope| scope.region_scope == local_scope)
+                .rfind(|scope| scope.region_scope == local_scope)
                 .unwrap_or_else(|| bug!("scope {:?} not found in scope list!", local_scope)),
         };
 
@@ -944,13 +958,16 @@
                     // Manually drop the condition on both branches.
                     let top_scope = self.scopes.scopes.last_mut().unwrap();
                     let top_drop_data = top_scope.drops.pop().unwrap();
+                    if self.generator_kind.is_some() {
+                        top_scope.invalidate_cache();
+                    }
 
                     match top_drop_data.kind {
                         DropKind::Value { .. } => {
                             bug!("Drop scheduled on top of condition variable")
                         }
                         DropKind::Storage => {
-                            let source_info = top_scope.source_info(top_drop_data.span);
+                            let source_info = top_drop_data.source_info;
                             let local = top_drop_data.local;
                             assert_eq!(local, cond_temp, "Drop scheduled on top of condition");
                             self.cfg.push(
@@ -963,8 +980,6 @@
                             );
                         }
                     }
-
-                    top_scope.invalidate_cache(true, self.generator_kind, true);
                 } else {
                     bug!("Expected as_local_operand to produce a temporary");
                 }
@@ -974,62 +989,86 @@
         (true_block, false_block)
     }
 
-    /// Creates a path that performs all required cleanup for unwinding.
+    /// Returns the [DropIdx] for the innermost drop if the function unwound at
+    /// this point. The `DropIdx` will be created if it doesn't already exist.
+    fn diverge_cleanup(&mut self) -> DropIdx {
+        let is_generator = self.generator_kind.is_some();
+        let (uncached_scope, mut cached_drop) = self
+            .scopes
+            .scopes
+            .iter()
+            .enumerate()
+            .rev()
+            .find_map(|(scope_idx, scope)| {
+                scope.cached_unwind_block.map(|cached_block| (scope_idx + 1, cached_block))
+            })
+            .unwrap_or((0, ROOT_NODE));
+
+        for scope in &mut self.scopes.scopes[uncached_scope..] {
+            for drop in &scope.drops {
+                if is_generator || drop.kind == DropKind::Value {
+                    cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop);
+                }
+            }
+            scope.cached_unwind_block = Some(cached_drop);
+        }
+
+        cached_drop
+    }
+
+    /// Prepares to create a path that performs all required cleanup for a
+    /// terminator that can unwind at the given basic block.
     ///
-    /// This path terminates in Resume. Returns the start of the path.
-    /// See module comment for more details.
-    crate fn diverge_cleanup(&mut self) -> BasicBlock {
-        self.diverge_cleanup_gen(false)
+    /// This path terminates in Resume. The path isn't created until after all
+    /// of the non-unwind paths in this item have been lowered.
+    crate fn diverge_from(&mut self, start: BasicBlock) {
+        debug_assert!(
+            matches!(
+                self.cfg.block_data(start).terminator().kind,
+                TerminatorKind::Assert { .. }
+                | TerminatorKind::Call {..}
+                | TerminatorKind::DropAndReplace { .. }
+                | TerminatorKind::FalseUnwind { .. }
+            ),
+            "diverge_from called on block with terminator that cannot unwind."
+        );
+
+        let next_drop = self.diverge_cleanup();
+        self.scopes.unwind_drops.add_entry(start, next_drop);
     }
 
-    fn resume_block(&mut self) -> BasicBlock {
-        if let Some(target) = self.cached_resume_block {
-            target
-        } else {
-            let resumeblk = self.cfg.start_new_cleanup_block();
-            self.cfg.terminate(
-                resumeblk,
-                SourceInfo::outermost(self.fn_span),
-                TerminatorKind::Resume,
-            );
-            self.cached_resume_block = Some(resumeblk);
-            resumeblk
-        }
-    }
+    /// Sets up a path that performs all required cleanup for dropping a
+    /// generator, starting from the given block that ends in
+    /// [TerminatorKind::Yield].
+    ///
+    /// This path terminates in GeneratorDrop.
+    crate fn generator_drop_cleanup(&mut self, yield_block: BasicBlock) {
+        debug_assert!(
+            matches!(
+                self.cfg.block_data(yield_block).terminator().kind,
+                TerminatorKind::Yield { .. }
+            ),
+            "generator_drop_cleanup called on block with non-yield terminator."
+        );
+        let (uncached_scope, mut cached_drop) = self
+            .scopes
+            .scopes
+            .iter()
+            .enumerate()
+            .rev()
+            .find_map(|(scope_idx, scope)| {
+                scope.cached_generator_drop_block.map(|cached_block| (scope_idx + 1, cached_block))
+            })
+            .unwrap_or((0, ROOT_NODE));
 
-    fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock {
-        // Build up the drops in **reverse** order. The end result will
-        // look like:
-        //
-        //    scopes[n] -> scopes[n-1] -> ... -> scopes[0]
-        //
-        // However, we build this in **reverse order**. That is, we
-        // process scopes[0], then scopes[1], etc, pointing each one at
-        // the result generates from the one before. Along the way, we
-        // store caches. If everything is cached, we'll just walk right
-        // to left reading the cached results but never created anything.
-
-        // Find the last cached block
-        debug!("diverge_cleanup_gen(self.scopes = {:?})", self.scopes);
-        let cached_cleanup = self.scopes.iter_mut().enumerate().find_map(|(idx, ref scope)| {
-            let cached_block = scope.cached_unwind.get(generator_drop)?;
-            Some((cached_block, idx))
-        });
-        let (mut target, first_uncached) =
-            cached_cleanup.unwrap_or_else(|| (self.resume_block(), self.scopes.len()));
-
-        for scope in self.scopes.top_scopes(first_uncached) {
-            target = build_diverge_scope(
-                &mut self.cfg,
-                scope.region_scope_span,
-                scope,
-                target,
-                generator_drop,
-                self.generator_kind,
-            );
+        for scope in &mut self.scopes.scopes[uncached_scope..] {
+            for drop in &scope.drops {
+                cached_drop = self.scopes.generator_drops.add_drop(*drop, cached_drop);
+            }
+            scope.cached_generator_drop_block = Some(cached_drop);
         }
 
-        target
+        self.scopes.generator_drops.add_entry(yield_block, cached_drop);
     }
 
     /// Utility function for *non*-scope code to build their own drops
@@ -1042,21 +1081,18 @@
     ) -> BlockAnd<()> {
         let source_info = self.source_info(span);
         let next_target = self.cfg.start_new_block();
-        let diverge_target = self.diverge_cleanup();
+
         self.cfg.terminate(
             block,
             source_info,
-            TerminatorKind::DropAndReplace {
-                place,
-                value,
-                target: next_target,
-                unwind: Some(diverge_target),
-            },
+            TerminatorKind::DropAndReplace { place, value, target: next_target, unwind: None },
         );
+        self.diverge_from(block);
+
         next_target.unit()
     }
 
-    /// Creates an Assert terminator and return the success block.
+    /// Creates an `Assert` terminator and return the success block.
     /// If the boolean condition operand is not the expected value,
     /// a runtime panic will be caused with the given message.
     crate fn assert(
@@ -1068,51 +1104,41 @@
         span: Span,
     ) -> BasicBlock {
         let source_info = self.source_info(span);
-
         let success_block = self.cfg.start_new_block();
-        let cleanup = self.diverge_cleanup();
 
         self.cfg.terminate(
             block,
             source_info,
-            TerminatorKind::Assert {
-                cond,
-                expected,
-                msg,
-                target: success_block,
-                cleanup: Some(cleanup),
-            },
+            TerminatorKind::Assert { cond, expected, msg, target: success_block, cleanup: None },
         );
+        self.diverge_from(block);
 
         success_block
     }
 
-    // `match` arm scopes
-    // ==================
     /// Unschedules any drops in the top scope.
     ///
     /// This is only needed for `match` arm scopes, because they have one
     /// entrance per pattern, but only one exit.
-    pub(crate) fn clear_top_scope(&mut self, region_scope: region::Scope) {
+    crate fn clear_top_scope(&mut self, region_scope: region::Scope) {
         let top_scope = self.scopes.scopes.last_mut().unwrap();
 
         assert_eq!(top_scope.region_scope, region_scope);
 
         top_scope.drops.clear();
-        top_scope.invalidate_cache(false, self.generator_kind, true);
+        top_scope.invalidate_cache();
     }
 }
 
-/// Builds drops for pop_scope and exit_scope.
+/// Builds drops for `pop_scope` and `leave_top_scope`.
 fn build_scope_drops<'tcx>(
     cfg: &mut CFG<'tcx>,
-    generator_kind: Option<GeneratorKind>,
+    unwind_drops: &mut DropTree,
     scope: &Scope,
     mut block: BasicBlock,
-    last_unwind_to: BasicBlock,
+    mut unwind_to: DropIdx,
+    storage_dead_on_unwind: bool,
     arg_count: usize,
-    generator_drop: bool,
-    is_cached_path: bool,
 ) -> BlockAnd<()> {
     debug!("build_scope_drops({:?} -> {:?})", block, scope);
 
@@ -1135,37 +1161,43 @@
     // drops for the unwind path should have already been generated by
     // `diverge_cleanup_gen`.
 
-    for drop_idx in (0..scope.drops.len()).rev() {
-        let drop_data = &scope.drops[drop_idx];
-        let source_info = scope.source_info(drop_data.span);
+    for drop_data in scope.drops.iter().rev() {
+        let source_info = drop_data.source_info;
         let local = drop_data.local;
 
         match drop_data.kind {
             DropKind::Value => {
+                // `unwind_to` should drop the value that we're about to
+                // schedule. If dropping this value panics, then we continue
+                // with the *next* value on the unwind path.
+                debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local);
+                debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind);
+                unwind_to = unwind_drops.drops[unwind_to].1;
+
                 // If the operand has been moved, and we are not on an unwind
                 // path, then don't generate the drop. (We only take this into
                 // account for non-unwind paths so as not to disturb the
                 // caching mechanism.)
-                if !is_cached_path && scope.moved_locals.iter().any(|&o| o == local) {
+                if scope.moved_locals.iter().any(|&o| o == local) {
                     continue;
                 }
 
-                let unwind_to = get_unwind_to(scope, generator_kind, drop_idx, generator_drop)
-                    .unwrap_or(last_unwind_to);
+                unwind_drops.add_entry(block, unwind_to);
 
                 let next = cfg.start_new_block();
                 cfg.terminate(
                     block,
                     source_info,
-                    TerminatorKind::Drop {
-                        place: local.into(),
-                        target: next,
-                        unwind: Some(unwind_to),
-                    },
+                    TerminatorKind::Drop { place: local.into(), target: next, unwind: None },
                 );
                 block = next;
             }
             DropKind::Storage => {
+                if storage_dead_on_unwind {
+                    debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local);
+                    debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind);
+                    unwind_to = unwind_drops.drops[unwind_to].1;
+                }
                 // Only temps and vars need their storage dead.
                 assert!(local.index() > arg_count);
                 cfg.push(block, Statement { source_info, kind: StatementKind::StorageDead(local) });
@@ -1175,139 +1207,189 @@
     block.unit()
 }
 
-fn get_unwind_to(
-    scope: &Scope,
-    generator_kind: Option<GeneratorKind>,
-    unwind_from: usize,
-    generator_drop: bool,
-) -> Option<BasicBlock> {
-    for drop_idx in (0..unwind_from).rev() {
-        let drop_data = &scope.drops[drop_idx];
-        match (generator_kind, &drop_data.kind) {
-            (Some(_), DropKind::Storage) => {
-                return Some(drop_data.cached_block.get(generator_drop).unwrap_or_else(|| {
-                    span_bug!(drop_data.span, "cached block not present for {:?}", drop_data)
-                }));
+impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
+    /// Build a drop tree for a breakable scope.
+    ///
+    /// If `continue_block` is `Some`, then the tree is for `continue` inside a
+    /// loop. Otherwise this is for `break` or `return`.
+    fn build_exit_tree(
+        &mut self,
+        mut drops: DropTree,
+        continue_block: Option<BasicBlock>,
+    ) -> Option<BlockAnd<()>> {
+        let mut blocks = IndexVec::from_elem(None, &drops.drops);
+        blocks[ROOT_NODE] = continue_block;
+
+        drops.build_mir::<ExitScopes>(&mut self.cfg, &mut blocks);
+
+        // Link the exit drop tree to unwind drop tree.
+        if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) {
+            let unwind_target = self.diverge_cleanup();
+            let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1);
+            for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) {
+                match drop_data.0.kind {
+                    DropKind::Storage => {
+                        if self.generator_kind.is_some() {
+                            let unwind_drop = self
+                                .scopes
+                                .unwind_drops
+                                .add_drop(drop_data.0, unwind_indices[drop_data.1]);
+                            unwind_indices.push(unwind_drop);
+                        } else {
+                            unwind_indices.push(unwind_indices[drop_data.1]);
+                        }
+                    }
+                    DropKind::Value => {
+                        let unwind_drop = self
+                            .scopes
+                            .unwind_drops
+                            .add_drop(drop_data.0, unwind_indices[drop_data.1]);
+                        self.scopes
+                            .unwind_drops
+                            .add_entry(blocks[drop_idx].unwrap(), unwind_indices[drop_data.1]);
+                        unwind_indices.push(unwind_drop);
+                    }
+                }
             }
-            (None, DropKind::Value) => {
-                return Some(drop_data.cached_block.get(generator_drop).unwrap_or_else(|| {
-                    span_bug!(drop_data.span, "cached block not present for {:?}", drop_data)
-                }));
-            }
-            _ => (),
+        }
+        blocks[ROOT_NODE].map(BasicBlock::unit)
+    }
+
+    /// Build the unwind and generator drop trees.
+    crate fn build_drop_trees(&mut self, should_abort: bool) {
+        if self.generator_kind.is_some() {
+            self.build_generator_drop_trees(should_abort);
+        } else {
+            Self::build_unwind_tree(
+                &mut self.cfg,
+                &mut self.scopes.unwind_drops,
+                self.fn_span,
+                should_abort,
+                &mut None,
+            );
         }
     }
-    None
-}
 
-fn build_diverge_scope<'tcx>(
-    cfg: &mut CFG<'tcx>,
-    span: Span,
-    scope: &mut Scope,
-    mut target: BasicBlock,
-    generator_drop: bool,
-    generator_kind: Option<GeneratorKind>,
-) -> BasicBlock {
-    // Build up the drops in **reverse** order. The end result will
-    // look like:
-    //
-    //    [drops[n]] -...-> [drops[0]] -> [target]
-    //
-    // The code in this function reads from right to left. At each
-    // point, we check for cached blocks representing the
-    // remainder. If everything is cached, we'll just walk right to
-    // left reading the cached results but never create anything.
+    fn build_generator_drop_trees(&mut self, should_abort: bool) {
+        // Build the drop tree for dropping the generator while it's suspended.
+        let drops = &mut self.scopes.generator_drops;
+        let cfg = &mut self.cfg;
+        let fn_span = self.fn_span;
+        let mut blocks = IndexVec::from_elem(None, &drops.drops);
+        drops.build_mir::<GeneratorDrop>(cfg, &mut blocks);
+        if let Some(root_block) = blocks[ROOT_NODE] {
+            cfg.terminate(
+                root_block,
+                SourceInfo::outermost(fn_span),
+                TerminatorKind::GeneratorDrop,
+            );
+        }
 
-    let source_scope = scope.source_scope;
-    let source_info = |span| SourceInfo { span, scope: source_scope };
+        // Build the drop tree for unwinding in the normal control flow paths.
+        let resume_block = &mut None;
+        let unwind_drops = &mut self.scopes.unwind_drops;
+        Self::build_unwind_tree(cfg, unwind_drops, fn_span, should_abort, resume_block);
 
-    // We keep track of StorageDead statements to prepend to our current block
-    // and store them here, in reverse order.
-    let mut storage_deads = vec![];
-
-    let mut target_built_by_us = false;
-
-    // Build up the drops. Here we iterate the vector in
-    // *forward* order, so that we generate drops[0] first (right to
-    // left in diagram above).
-    debug!("build_diverge_scope({:?})", scope.drops);
-    for (j, drop_data) in scope.drops.iter_mut().enumerate() {
-        debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
-        // Only full value drops are emitted in the diverging path,
-        // not StorageDead, except in the case of generators.
+        // Build the drop tree for unwinding when dropping a suspended
+        // generator.
         //
-        // Note: This may not actually be what we desire (are we
-        // "freeing" stack storage as we unwind, or merely observing a
-        // frozen stack)? In particular, the intent may have been to
-        // match the behavior of clang, but on inspection eddyb says
-        // this is not what clang does.
-        match drop_data.kind {
-            DropKind::Storage if generator_kind.is_some() => {
-                storage_deads.push(Statement {
-                    source_info: source_info(drop_data.span),
-                    kind: StatementKind::StorageDead(drop_data.local),
-                });
-                if !target_built_by_us {
-                    // We cannot add statements to an existing block, so we create a new
-                    // block for our StorageDead statements.
-                    let block = cfg.start_new_cleanup_block();
-                    let source_info = SourceInfo { span: DUMMY_SP, scope: source_scope };
-                    cfg.goto(block, source_info, target);
-                    target = block;
-                    target_built_by_us = true;
-                }
-                *drop_data.cached_block.ref_mut(generator_drop) = Some(target);
+        // This is a different tree to the standard unwind paths here to
+        // prevent drop elaboration from creating drop flags that would have
+        // to be captured by the generator. I'm not sure how important this
+        // optimization is, but it is here.
+        for (drop_idx, drop_data) in drops.drops.iter_enumerated() {
+            if let DropKind::Value = drop_data.0.kind {
+                debug_assert!(drop_data.1 < drops.drops.next_index());
+                drops.entry_points.push((drop_data.1, blocks[drop_idx].unwrap()));
             }
-            DropKind::Storage => {}
-            DropKind::Value => {
-                let cached_block = drop_data.cached_block.ref_mut(generator_drop);
-                target = if let Some(cached_block) = *cached_block {
-                    storage_deads.clear();
-                    target_built_by_us = false;
-                    cached_block
-                } else {
-                    push_storage_deads(cfg, target, &mut storage_deads);
-                    let block = cfg.start_new_cleanup_block();
-                    cfg.terminate(
-                        block,
-                        source_info(drop_data.span),
-                        TerminatorKind::Drop {
-                            place: drop_data.local.into(),
-                            target,
-                            unwind: None,
-                        },
-                    );
-                    *cached_block = Some(block);
-                    target_built_by_us = true;
-                    block
-                };
-            }
-        };
+        }
+        Self::build_unwind_tree(cfg, drops, fn_span, should_abort, resume_block);
     }
-    push_storage_deads(cfg, target, &mut storage_deads);
-    *scope.cached_unwind.ref_mut(generator_drop) = Some(target);
 
-    assert!(storage_deads.is_empty());
-    debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);
+    fn build_unwind_tree(
+        cfg: &mut CFG<'tcx>,
+        drops: &mut DropTree,
+        fn_span: Span,
+        should_abort: bool,
+        resume_block: &mut Option<BasicBlock>,
+    ) {
+        let mut blocks = IndexVec::from_elem(None, &drops.drops);
+        blocks[ROOT_NODE] = *resume_block;
+        drops.build_mir::<Unwind>(cfg, &mut blocks);
+        if let (None, Some(resume)) = (*resume_block, blocks[ROOT_NODE]) {
+            // `TerminatorKind::Abort` is used for `#[unwind(aborts)]`
+            // functions.
+            let terminator =
+                if should_abort { TerminatorKind::Abort } else { TerminatorKind::Resume };
 
-    target
+            cfg.terminate(resume, SourceInfo::outermost(fn_span), terminator);
+
+            *resume_block = blocks[ROOT_NODE];
+        }
+    }
 }
 
-fn push_storage_deads<'tcx>(
-    cfg: &mut CFG<'tcx>,
-    target: BasicBlock,
-    storage_deads: &mut Vec<Statement<'tcx>>,
-) {
-    if storage_deads.is_empty() {
-        return;
+// DropTreeBuilder implementations.
+
+struct ExitScopes;
+
+impl<'tcx> DropTreeBuilder<'tcx> for ExitScopes {
+    fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
+        cfg.start_new_block()
     }
-    let statements = &mut cfg.block_data_mut(target).statements;
-    storage_deads.reverse();
-    debug!(
-        "push_storage_deads({:?}), storage_deads={:?}, statements={:?}",
-        target, storage_deads, statements
-    );
-    storage_deads.append(statements);
-    mem::swap(statements, storage_deads);
-    assert!(storage_deads.is_empty());
+    fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
+        cfg.block_data_mut(from).terminator_mut().kind = TerminatorKind::Goto { target: to };
+    }
+}
+
+struct GeneratorDrop;
+
+impl<'tcx> DropTreeBuilder<'tcx> for GeneratorDrop {
+    fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
+        cfg.start_new_block()
+    }
+    fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
+        let term = cfg.block_data_mut(from).terminator_mut();
+        if let TerminatorKind::Yield { ref mut drop, .. } = term.kind {
+            *drop = Some(to);
+        } else {
+            span_bug!(
+                term.source_info.span,
+                "cannot enter generator drop tree from {:?}",
+                term.kind
+            )
+        }
+    }
+}
+
+struct Unwind;
+
+impl<'tcx> DropTreeBuilder<'tcx> for Unwind {
+    fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
+        cfg.start_new_cleanup_block()
+    }
+    fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
+        let term = &mut cfg.block_data_mut(from).terminator_mut();
+        match &mut term.kind {
+            TerminatorKind::Drop { unwind, .. }
+            | TerminatorKind::DropAndReplace { unwind, .. }
+            | TerminatorKind::FalseUnwind { unwind, .. }
+            | TerminatorKind::Call { cleanup: unwind, .. }
+            | TerminatorKind::Assert { cleanup: unwind, .. } => {
+                *unwind = Some(to);
+            }
+            TerminatorKind::Goto { .. }
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::Resume
+            | TerminatorKind::Abort
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable
+            | TerminatorKind::Yield { .. }
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::InlineAsm {.. } => {
+                span_bug!(term.source_info.span, "cannot unwind from {:?}", term.kind)
+            }
+        }
+    }
 }
diff --git a/compiler/rustc_mir_build/src/lib.rs b/compiler/rustc_mir_build/src/lib.rs
index 714041a..0866892 100644
--- a/compiler/rustc_mir_build/src/lib.rs
+++ b/compiler/rustc_mir_build/src/lib.rs
@@ -9,6 +9,7 @@
 #![feature(control_flow_enum)]
 #![feature(crate_visibility_modifier)]
 #![feature(bool_to_option)]
+#![feature(once_cell)]
 #![feature(or_patterns)]
 #![recursion_limit = "256"]
 
diff --git a/compiler/rustc_mir_build/src/lints.rs b/compiler/rustc_mir_build/src/lints.rs
index 45a89c4..576b537 100644
--- a/compiler/rustc_mir_build/src/lints.rs
+++ b/compiler/rustc_mir_build/src/lints.rs
@@ -1,7 +1,6 @@
 use rustc_data_structures::graph::iterate::{
     NodeStatus, TriColorDepthFirstSearch, TriColorVisitor,
 };
-use rustc_hir::def_id::LocalDefId;
 use rustc_hir::intravisit::FnKind;
 use rustc_middle::hir::map::blocks::FnLikeNode;
 use rustc_middle::mir::{BasicBlock, Body, Operand, TerminatorKind};
@@ -11,7 +10,8 @@
 use rustc_span::Span;
 use std::ops::ControlFlow;
 
-crate fn check<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, def_id: LocalDefId) {
+crate fn check<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
+    let def_id = body.source.def_id().expect_local();
     let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
 
     if let Some(fn_like_node) = FnLikeNode::from_node(tcx.hir().get(hir_id)) {
@@ -31,7 +31,7 @@
             _ => &[],
         };
 
-        let mut vis = Search { tcx, body, def_id, reachable_recursive_calls: vec![], trait_substs };
+        let mut vis = Search { tcx, body, reachable_recursive_calls: vec![], trait_substs };
         if let Some(NonRecursive) = TriColorDepthFirstSearch::new(&body).run_from_start(&mut vis) {
             return;
         }
@@ -58,7 +58,6 @@
 struct Search<'mir, 'tcx> {
     tcx: TyCtxt<'tcx>,
     body: &'mir Body<'tcx>,
-    def_id: LocalDefId,
     trait_substs: &'tcx [GenericArg<'tcx>],
 
     reachable_recursive_calls: Vec<Span>,
@@ -67,17 +66,20 @@
 impl<'mir, 'tcx> Search<'mir, 'tcx> {
     /// Returns `true` if `func` refers to the function we are searching in.
     fn is_recursive_call(&self, func: &Operand<'tcx>) -> bool {
-        let Search { tcx, body, def_id, trait_substs, .. } = *self;
-        let param_env = tcx.param_env(def_id);
+        let Search { tcx, body, trait_substs, .. } = *self;
+        let caller = body.source.def_id();
+        let param_env = tcx.param_env(caller);
 
         let func_ty = func.ty(body, tcx);
-        if let ty::FnDef(fn_def_id, substs) = *func_ty.kind() {
-            let (call_fn_id, call_substs) =
-                if let Ok(Some(instance)) = Instance::resolve(tcx, param_env, fn_def_id, substs) {
-                    (instance.def_id(), instance.substs)
-                } else {
-                    (fn_def_id, substs)
-                };
+        if let ty::FnDef(callee, substs) = *func_ty.kind() {
+            let normalized_substs = tcx.normalize_erasing_regions(param_env, substs);
+            let (callee, call_substs) = if let Ok(Some(instance)) =
+                Instance::resolve(tcx, param_env, callee, normalized_substs)
+            {
+                (instance.def_id(), instance.substs)
+            } else {
+                (callee, normalized_substs)
+            };
 
             // FIXME(#57965): Make this work across function boundaries
 
@@ -85,8 +87,7 @@
             // calling into an entirely different method (for example, a call from the default
             // method in the trait to `<A as Trait<B>>::method`, where `A` and/or `B` are
             // specific types).
-            return call_fn_id == def_id.to_def_id()
-                && &call_substs[..trait_substs.len()] == trait_substs;
+            return callee == caller && &call_substs[..trait_substs.len()] == trait_substs;
         }
 
         false
diff --git a/compiler/rustc_mir_build/src/thir/constant.rs b/compiler/rustc_mir_build/src/thir/constant.rs
index a7bb286..dfe8231 100644
--- a/compiler/rustc_mir_build/src/thir/constant.rs
+++ b/compiler/rustc_mir_build/src/thir/constant.rs
@@ -1,6 +1,6 @@
 use rustc_ast as ast;
 use rustc_middle::mir::interpret::{
-    truncate, Allocation, ConstValue, LitToConstError, LitToConstInput, Scalar,
+    Allocation, ConstValue, LitToConstError, LitToConstInput, Scalar,
 };
 use rustc_middle::ty::{self, ParamEnv, TyCtxt};
 use rustc_span::symbol::Symbol;
@@ -16,7 +16,7 @@
         let param_ty = ParamEnv::reveal_all().and(ty);
         let width = tcx.layout_of(param_ty).map_err(|_| LitToConstError::Reported)?.size;
         trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits());
-        let result = truncate(n, width);
+        let result = width.truncate(n);
         trace!("trunc result: {}", result);
         Ok(ConstValue::Scalar(Scalar::from_uint(result, width)))
     };
@@ -31,7 +31,7 @@
         (ast::LitKind::ByteStr(data), ty::Ref(_, inner_ty, _))
             if matches!(inner_ty.kind(), ty::Slice(_)) =>
         {
-            let allocation = Allocation::from_byte_aligned_bytes(data as &Vec<u8>);
+            let allocation = Allocation::from_byte_aligned_bytes(data as &[u8]);
             let allocation = tcx.intern_const_alloc(allocation);
             ConstValue::Slice { data: allocation, start: 0, end: data.len() }
         }
diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs
index 13e6947..6ed7ed5 100644
--- a/compiler/rustc_mir_build/src/thir/cx/expr.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs
@@ -316,16 +316,14 @@
         hir::ExprKind::Unary(hir::UnOp::UnNeg, ref arg) => {
             if cx.typeck_results().is_method_call(expr) {
                 overloaded_operator(cx, expr, vec![arg.to_ref()])
-            } else {
-                if let hir::ExprKind::Lit(ref lit) = arg.kind {
-                    ExprKind::Literal {
-                        literal: cx.const_eval_literal(&lit.node, expr_ty, lit.span, true),
-                        user_ty: None,
-                        const_id: None,
-                    }
-                } else {
-                    ExprKind::Unary { op: UnOp::Neg, arg: arg.to_ref() }
+            } else if let hir::ExprKind::Lit(ref lit) = arg.kind {
+                ExprKind::Literal {
+                    literal: cx.const_eval_literal(&lit.node, expr_ty, lit.span, true),
+                    user_ty: None,
+                    const_id: None,
                 }
+            } else {
+                ExprKind::Unary { op: UnOp::Neg, arg: arg.to_ref() }
             }
         }
 
@@ -511,6 +509,12 @@
             inputs: asm.inputs_exprs.to_ref(),
         },
 
+        hir::ExprKind::ConstBlock(ref anon_const) => {
+            let anon_const_def_id = cx.tcx.hir().local_def_id(anon_const.hir_id);
+            let value = ty::Const::from_anon_const(cx.tcx, anon_const_def_id);
+
+            ExprKind::ConstBlock { value }
+        }
         // Now comes the rote stuff:
         hir::ExprKind::Repeat(ref v, ref count) => {
             let count_def_id = cx.tcx.hir().local_def_id(count.hir_id);
diff --git a/compiler/rustc_mir_build/src/thir/mod.rs b/compiler/rustc_mir_build/src/thir/mod.rs
index 4d57fd5..f2a2ef0 100644
--- a/compiler/rustc_mir_build/src/thir/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/mod.rs
@@ -232,6 +232,9 @@
     Return {
         value: Option<ExprRef<'tcx>>,
     },
+    ConstBlock {
+        value: &'tcx Const<'tcx>,
+    },
     Repeat {
         value: ExprRef<'tcx>,
         count: &'tcx Const<'tcx>,
diff --git a/compiler/rustc_mir_build/src/thir/pattern/_match.rs b/compiler/rustc_mir_build/src/thir/pattern/_match.rs
index 04de9a7..5e7e81e 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/_match.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/_match.rs
@@ -1,5 +1,11 @@
-//! Note: most of the tests relevant to this file can be found (at the time of writing) in
-//! src/tests/ui/pattern/usefulness.
+//! Note: tests specific to this file can be found in:
+//!     - ui/pattern/usefulness
+//!     - ui/or-patterns
+//!     - ui/consts/const_in_pattern
+//!     - ui/rfc-2008-non-exhaustive
+//!     - probably many others
+//! I (Nadrieril) prefer to put new tests in `ui/pattern/usefulness` unless there's a specific
+//! reason not to, for example if they depend on a particular feature like or_patterns.
 //!
 //! This file includes the logic for exhaustiveness and usefulness checking for
 //! pattern-matching. Specifically, given a list of patterns for a type, we can
@@ -8,7 +14,7 @@
 //! (b) each pattern is necessary (usefulness)
 //!
 //! The algorithm implemented here is a modified version of the one described in:
-//! http://moscova.inria.fr/~maranget/papers/warn/index.html
+//! <http://moscova.inria.fr/~maranget/papers/warn/index.html>
 //! However, to save future implementors from reading the original paper, we
 //! summarise the algorithm here to hopefully save time and be a little clearer
 //! (without being so rigorous).
@@ -78,20 +84,26 @@
 //! new pattern `p`.
 //!
 //! For example, say we have the following:
+//!
 //! ```
-//!     // x: (Option<bool>, Result<()>)
-//!     match x {
-//!         (Some(true), _) => {}
-//!         (None, Err(())) => {}
-//!         (None, Err(_)) => {}
-//!     }
+//! // x: (Option<bool>, Result<()>)
+//! match x {
+//!     (Some(true), _) => {}
+//!     (None, Err(())) => {}
+//!     (None, Err(_)) => {}
+//! }
 //! ```
+//!
 //! Here, the matrix `P` starts as:
+//!
+//! ```
 //! [
 //!     [(Some(true), _)],
 //!     [(None, Err(()))],
 //!     [(None, Err(_))],
 //! ]
+//! ```
+//!
 //! We can tell it's not exhaustive, because `U(P, _)` is true (we're not covering
 //! `[(Some(false), _)]`, for instance). In addition, row 3 is not useful, because
 //! all the values it covers are already covered by row 2.
@@ -131,8 +143,8 @@
 //!                 S(c, (r_1, p_2, .., p_n))
 //!                 S(c, (r_2, p_2, .., p_n))
 //!
-//! 2. We can pop a wildcard off the top of the stack. This is called `D(p)`, where `p` is
-//!    a pattern-stack.
+//! 2. We can pop a wildcard off the top of the stack. This is called `S(_, p)`, where `p` is
+//!    a pattern-stack. Note: the paper calls this `D(p)`.
 //!    This is used when we know there are missing constructor cases, but there might be
 //!    existing wildcard patterns, so to check the usefulness of the matrix, we have to check
 //!    all its *other* components.
@@ -144,8 +156,8 @@
 //!                 p_2, .., p_n
 //!         2.3. `p_1 = r_1 | r_2`. We expand the OR-pattern and then recurse on each resulting
 //!           stack.
-//!                 D((r_1, p_2, .., p_n))
-//!                 D((r_2, p_2, .., p_n))
+//!                 S(_, (r_1, p_2, .., p_n))
+//!                 S(_, (r_2, p_2, .., p_n))
 //!
 //! Note that the OR-patterns are not always used directly in Rust, but are used to derive the
 //! exhaustive integer matching rules, so they're written here for posterity.
@@ -175,13 +187,16 @@
 //! we ignore all the patterns in the first column of `P` that involve other constructors.
 //! This is where `S(c, P)` comes in:
 //! `U(P, p) := U(S(c, P), S(c, p))`
-//! This special case is handled in `is_useful_specialized`.
 //!
 //! For example, if `P` is:
+//!
+//! ```
 //! [
-//! [Some(true), _],
-//! [None, 0],
+//!     [Some(true), _],
+//!     [None, 0],
 //! ]
+//! ```
+//!
 //! and `p` is [Some(false), 0], then we don't care about row 2 since we know `p` only
 //! matches values that row 2 doesn't. For row 1 however, we need to dig into the
 //! arguments of `Some` to know whether some new value is covered. So we compute
@@ -194,14 +209,18 @@
 //! before.
 //! That's almost correct, but only works if there were no wildcards in those first
 //! components. So we need to check that `p` is useful with respect to the rows that
-//! start with a wildcard, if there are any. This is where `D` comes in:
-//! `U(P, p) := U(D(P), D(p))`
+//! start with a wildcard, if there are any. This is where `S(_, x)` comes in:
+//! `U(P, p) := U(S(_, P), S(_, p))`
 //!
 //! For example, if `P` is:
+//!
+//! ```
 //! [
 //!     [_, true, _],
 //!     [None, false, 1],
 //! ]
+//! ```
+//!
 //! and `p` is [_, false, _], the `Some` constructor doesn't appear in `P`. So if we
 //! only had row 2, we'd know that `p` is useful. However row 1 starts with a
 //! wildcard, so we need to check whether `U([[true, _]], [false, 1])`.
@@ -215,10 +234,14 @@
 //! `U(P, p) := ∃(k ϵ constructors) U(S(k, P), S(k, p))`
 //!
 //! For example, if `P` is:
+//!
+//! ```
 //! [
 //!     [Some(true), _],
 //!     [None, false],
 //! ]
+//! ```
+//!
 //! and `p` is [_, false], both `None` and `Some` constructors appear in the first
 //! components of `P`. We will therefore try popping both constructors in turn: we
 //! compute `U([[true, _]], [_, false])` for the `Some` constructor, and `U([[false]],
@@ -266,7 +289,7 @@
 //!       disjunction over every range. This is a bit more tricky to deal with: essentially we need
 //!       to form equivalence classes of subranges of the constructor range for which the behaviour
 //!       of the matrix `P` and new pattern `p` are the same. This is described in more
-//!       detail in `split_grouped_constructors`.
+//!       detail in `Constructor::split`.
 //!     + If some constructors are missing from the matrix, it turns out we don't need to do
 //!       anything special (because we know none of the integers are actually wildcards: i.e., we
 //!       can't span wildcards using ranges).
@@ -276,7 +299,8 @@
 use self::WitnessPreference::*;
 
 use rustc_data_structures::captures::Captures;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::sync::OnceCell;
 use rustc_index::vec::Idx;
 
 use super::{compare_const_vals, PatternFoldable, PatternFolder};
@@ -284,10 +308,9 @@
 
 use rustc_arena::TypedArena;
 use rustc_attr::{SignedInt, UnsignedInt};
-use rustc_errors::ErrorReported;
 use rustc_hir::def_id::DefId;
 use rustc_hir::{HirId, RangeEnd};
-use rustc_middle::mir::interpret::{truncate, AllocId, ConstValue, Pointer, Scalar};
+use rustc_middle::mir::interpret::ConstValue;
 use rustc_middle::mir::Field;
 use rustc_middle::ty::layout::IntegerExt;
 use rustc_middle::ty::{self, Const, Ty, TyCtxt};
@@ -296,110 +319,37 @@
 use rustc_target::abi::{Integer, Size, VariantIdx};
 
 use smallvec::{smallvec, SmallVec};
-use std::borrow::Cow;
 use std::cmp::{self, max, min, Ordering};
-use std::convert::TryInto;
 use std::fmt;
 use std::iter::{FromIterator, IntoIterator};
 use std::ops::RangeInclusive;
 
-crate fn expand_pattern<'a, 'tcx>(cx: &MatchCheckCtxt<'a, 'tcx>, pat: Pat<'tcx>) -> Pat<'tcx> {
-    LiteralExpander { tcx: cx.tcx, param_env: cx.param_env }.fold_pattern(&pat)
+crate fn expand_pattern<'tcx>(pat: Pat<'tcx>) -> Pat<'tcx> {
+    LiteralExpander.fold_pattern(&pat)
 }
 
-struct LiteralExpander<'tcx> {
-    tcx: TyCtxt<'tcx>,
-    param_env: ty::ParamEnv<'tcx>,
-}
+struct LiteralExpander;
 
-impl<'tcx> LiteralExpander<'tcx> {
-    /// Derefs `val` and potentially unsizes the value if `crty` is an array and `rty` a slice.
-    ///
-    /// `crty` and `rty` can differ because you can use array constants in the presence of slice
-    /// patterns. So the pattern may end up being a slice, but the constant is an array. We convert
-    /// the array to a slice in that case.
-    fn fold_const_value_deref(
-        &mut self,
-        val: ConstValue<'tcx>,
-        // the pattern's pointee type
-        rty: Ty<'tcx>,
-        // the constant's pointee type
-        crty: Ty<'tcx>,
-    ) -> ConstValue<'tcx> {
-        debug!("fold_const_value_deref {:?} {:?} {:?}", val, rty, crty);
-        match (val, &crty.kind(), &rty.kind()) {
-            // the easy case, deref a reference
-            (ConstValue::Scalar(p), x, y) if x == y => {
-                match p {
-                    Scalar::Ptr(p) => {
-                        let alloc = self.tcx.global_alloc(p.alloc_id).unwrap_memory();
-                        ConstValue::ByRef { alloc, offset: p.offset }
-                    }
-                    Scalar::Raw { .. } => {
-                        let layout = self.tcx.layout_of(self.param_env.and(rty)).unwrap();
-                        if layout.is_zst() {
-                            // Deref of a reference to a ZST is a nop.
-                            ConstValue::Scalar(Scalar::zst())
-                        } else {
-                            // FIXME(oli-obk): this is reachable for `const FOO: &&&u32 = &&&42;`
-                            bug!("cannot deref {:#?}, {} -> {}", val, crty, rty);
-                        }
-                    }
-                }
-            }
-            // unsize array to slice if pattern is array but match value or other patterns are slice
-            (ConstValue::Scalar(Scalar::Ptr(p)), ty::Array(t, n), ty::Slice(u)) => {
-                assert_eq!(t, u);
-                ConstValue::Slice {
-                    data: self.tcx.global_alloc(p.alloc_id).unwrap_memory(),
-                    start: p.offset.bytes().try_into().unwrap(),
-                    end: n.eval_usize(self.tcx, ty::ParamEnv::empty()).try_into().unwrap(),
-                }
-            }
-            // fat pointers stay the same
-            (ConstValue::Slice { .. }, _, _)
-            | (_, ty::Slice(_), ty::Slice(_))
-            | (_, ty::Str, ty::Str) => val,
-            // FIXME(oli-obk): this is reachable for `const FOO: &&&u32 = &&&42;` being used
-            _ => bug!("cannot deref {:#?}, {} -> {}", val, crty, rty),
-        }
-    }
-}
-
-impl<'tcx> PatternFolder<'tcx> for LiteralExpander<'tcx> {
+impl<'tcx> PatternFolder<'tcx> for LiteralExpander {
     fn fold_pattern(&mut self, pat: &Pat<'tcx>) -> Pat<'tcx> {
         debug!("fold_pattern {:?} {:?} {:?}", pat, pat.ty.kind(), pat.kind);
-        match (pat.ty.kind(), &*pat.kind) {
-            (&ty::Ref(_, rty, _), &PatKind::Constant { value: Const { val, ty: const_ty } })
-                if const_ty.is_ref() =>
-            {
-                let crty =
-                    if let ty::Ref(_, crty, _) = const_ty.kind() { crty } else { unreachable!() };
-                if let ty::ConstKind::Value(val) = val {
-                    Pat {
-                        ty: pat.ty,
-                        span: pat.span,
-                        kind: box PatKind::Deref {
-                            subpattern: Pat {
-                                ty: rty,
-                                span: pat.span,
-                                kind: box PatKind::Constant {
-                                    value: Const::from_value(
-                                        self.tcx,
-                                        self.fold_const_value_deref(*val, rty, crty),
-                                        rty,
-                                    ),
-                                },
-                            },
-                        },
-                    }
-                } else {
-                    bug!("cannot deref {:#?}, {} -> {}", val, crty, rty)
+        match (pat.ty.kind(), pat.kind.as_ref()) {
+            (_, PatKind::Binding { subpattern: Some(s), .. }) => s.fold_with(self),
+            (_, PatKind::AscribeUserType { subpattern: s, .. }) => s.fold_with(self),
+            (ty::Ref(_, t, _), PatKind::Constant { .. }) if t.is_str() => {
+                // Treat string literal patterns as deref patterns to a `str` constant, i.e.
+                // `&CONST`. This expands them like other const patterns. This could have been done
+                // in `const_to_pat`, but that causes issues with the rest of the matching code.
+                let mut new_pat = pat.super_fold_with(self);
+                // Make a fake const pattern of type `str` (instead of `&str`). That the carried
+                // constant value still knows it is of type `&str`.
+                new_pat.ty = t;
+                Pat {
+                    kind: Box::new(PatKind::Deref { subpattern: new_pat }),
+                    span: pat.span,
+                    ty: pat.ty,
                 }
             }
-
-            (_, &PatKind::Binding { subpattern: Some(ref s), .. }) => s.fold_with(self),
-            (_, &PatKind::AscribeUserType { subpattern: ref s, .. }) => s.fold_with(self),
             _ => pat.super_fold_with(self),
         }
     }
@@ -407,49 +357,46 @@
 
 impl<'tcx> Pat<'tcx> {
     pub(super) fn is_wildcard(&self) -> bool {
-        match *self.kind {
-            PatKind::Binding { subpattern: None, .. } | PatKind::Wild => true,
-            _ => false,
-        }
+        matches!(*self.kind, PatKind::Binding { subpattern: None, .. } | PatKind::Wild)
     }
 }
 
 /// A row of a matrix. Rows of len 1 are very common, which is why `SmallVec[_; 2]`
 /// works well.
-#[derive(Debug, Clone, PartialEq)]
-crate struct PatStack<'p, 'tcx>(SmallVec<[&'p Pat<'tcx>; 2]>);
+#[derive(Debug, Clone)]
+crate struct PatStack<'p, 'tcx> {
+    pats: SmallVec<[&'p Pat<'tcx>; 2]>,
+    /// Cache for the constructor of the head
+    head_ctor: OnceCell<Constructor<'tcx>>,
+}
 
 impl<'p, 'tcx> PatStack<'p, 'tcx> {
     crate fn from_pattern(pat: &'p Pat<'tcx>) -> Self {
-        PatStack(smallvec![pat])
+        Self::from_vec(smallvec![pat])
     }
 
     fn from_vec(vec: SmallVec<[&'p Pat<'tcx>; 2]>) -> Self {
-        PatStack(vec)
-    }
-
-    fn from_slice(s: &[&'p Pat<'tcx>]) -> Self {
-        PatStack(SmallVec::from_slice(s))
+        PatStack { pats: vec, head_ctor: OnceCell::new() }
     }
 
     fn is_empty(&self) -> bool {
-        self.0.is_empty()
+        self.pats.is_empty()
     }
 
     fn len(&self) -> usize {
-        self.0.len()
+        self.pats.len()
     }
 
     fn head(&self) -> &'p Pat<'tcx> {
-        self.0[0]
+        self.pats[0]
     }
 
-    fn to_tail(&self) -> Self {
-        PatStack::from_slice(&self.0[1..])
+    fn head_ctor<'a>(&'a self, cx: &MatchCheckCtxt<'p, 'tcx>) -> &'a Constructor<'tcx> {
+        self.head_ctor.get_or_init(|| pat_constructor(cx, self.head()))
     }
 
     fn iter(&self) -> impl Iterator<Item = &Pat<'tcx>> {
-        self.0.iter().copied()
+        self.pats.iter().copied()
     }
 
     // If the first pattern is an or-pattern, expand this pattern. Otherwise, return `None`.
@@ -461,7 +408,7 @@
                 pats.iter()
                     .map(|pat| {
                         let mut new_patstack = PatStack::from_pattern(pat);
-                        new_patstack.0.extend_from_slice(&self.0[1..]);
+                        new_patstack.pats.extend_from_slice(&self.pats[1..]);
                         new_patstack
                     })
                     .collect(),
@@ -471,27 +418,29 @@
         }
     }
 
-    /// This computes `D(self)`. See top of the file for explanations.
-    fn specialize_wildcard(&self) -> Option<Self> {
-        if self.head().is_wildcard() { Some(self.to_tail()) } else { None }
-    }
-
-    /// This computes `S(constructor, self)`. See top of the file for explanations.
-    fn specialize_constructor(
-        &self,
-        cx: &mut MatchCheckCtxt<'p, 'tcx>,
-        constructor: &Constructor<'tcx>,
-        ctor_wild_subpatterns: &Fields<'p, 'tcx>,
-    ) -> Option<PatStack<'p, 'tcx>> {
-        let new_fields =
-            specialize_one_pattern(cx, self.head(), constructor, ctor_wild_subpatterns)?;
-        Some(new_fields.push_on_patstack(&self.0[1..]))
+    /// This computes `S(self.head_ctor(), self)`. See top of the file for explanations.
+    ///
+    /// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing
+    /// fields filled with wild patterns.
+    ///
+    /// This is roughly the inverse of `Constructor::apply`.
+    fn pop_head_constructor(&self, ctor_wild_subpatterns: &Fields<'p, 'tcx>) -> PatStack<'p, 'tcx> {
+        // We pop the head pattern and push the new fields extracted from the arguments of
+        // `self.head()`.
+        let new_fields = ctor_wild_subpatterns.replace_with_pattern_arguments(self.head());
+        new_fields.push_on_patstack(&self.pats[1..])
     }
 }
 
 impl<'p, 'tcx> Default for PatStack<'p, 'tcx> {
     fn default() -> Self {
-        PatStack(smallvec![])
+        Self::from_vec(smallvec![])
+    }
+}
+
+impl<'p, 'tcx> PartialEq for PatStack<'p, 'tcx> {
+    fn eq(&self, other: &Self) -> bool {
+        self.pats == other.pats
     }
 }
 
@@ -500,40 +449,19 @@
     where
         T: IntoIterator<Item = &'p Pat<'tcx>>,
     {
-        PatStack(iter.into_iter().collect())
+        Self::from_vec(iter.into_iter().collect())
     }
 }
 
-/// Depending on the match patterns, the specialization process might be able to use a fast path.
-/// Tracks whether we can use the fast path and the lookup table needed in those cases.
-#[derive(Clone, Debug, PartialEq)]
-enum SpecializationCache {
-    /// Patterns consist of only enum variants.
-    /// Variant patterns does not intersect with each other (in contrast to range patterns),
-    /// so it is possible to precompute the result of `Matrix::specialize_constructor` at a
-    /// lower computational complexity.
-    /// `lookup` is responsible for holding the precomputed result of
-    /// `Matrix::specialize_constructor`, while `wilds` is used for two purposes: the first one is
-    /// the precomputed result of `Matrix::specialize_wildcard`, and the second is to be used as a
-    /// fallback for `Matrix::specialize_constructor` when it tries to apply a constructor that
-    /// has not been seen in the `Matrix`. See `update_cache` for further explanations.
-    Variants { lookup: FxHashMap<DefId, SmallVec<[usize; 1]>>, wilds: SmallVec<[usize; 1]> },
-    /// Does not belong to the cases above, use the slow path.
-    Incompatible,
-}
-
 /// A 2D matrix.
 #[derive(Clone, PartialEq)]
 crate struct Matrix<'p, 'tcx> {
     patterns: Vec<PatStack<'p, 'tcx>>,
-    cache: SpecializationCache,
 }
 
 impl<'p, 'tcx> Matrix<'p, 'tcx> {
     crate fn empty() -> Self {
-        // Use `SpecializationCache::Incompatible` as a placeholder; we will initialize it on the
-        // first call to `push`. See the first half of `update_cache`.
-        Matrix { patterns: vec![], cache: SpecializationCache::Incompatible }
+        Matrix { patterns: vec![] }
     }
 
     /// Pushes a new row to the matrix. If the row starts with an or-pattern, this expands it.
@@ -546,70 +474,6 @@
             }
         } else {
             self.patterns.push(row);
-            self.update_cache(self.patterns.len() - 1);
-        }
-    }
-
-    fn update_cache(&mut self, idx: usize) {
-        let row = &self.patterns[idx];
-        // We don't know which kind of cache could be used until we see the first row; therefore an
-        // empty `Matrix` is initialized with `SpecializationCache::Empty`, then the cache is
-        // assigned the appropriate variant below on the first call to `push`.
-        if self.patterns.is_empty() {
-            self.cache = if row.is_empty() {
-                SpecializationCache::Incompatible
-            } else {
-                match *row.head().kind {
-                    PatKind::Variant { .. } => SpecializationCache::Variants {
-                        lookup: FxHashMap::default(),
-                        wilds: SmallVec::new(),
-                    },
-                    // Note: If the first pattern is a wildcard, then all patterns after that is not
-                    // useful. The check is simple enough so we treat it as the same as unsupported
-                    // patterns.
-                    _ => SpecializationCache::Incompatible,
-                }
-            };
-        }
-        // Update the cache.
-        match &mut self.cache {
-            SpecializationCache::Variants { ref mut lookup, ref mut wilds } => {
-                let head = row.head();
-                match *head.kind {
-                    _ if head.is_wildcard() => {
-                        // Per rule 1.3 in the top-level comments, a wildcard pattern is included in
-                        // the result of `specialize_constructor` for *any* `Constructor`.
-                        // We push the wildcard pattern to the precomputed result for constructors
-                        // that we have seen before; results for constructors we have not yet seen
-                        // defaults to `wilds`, which is updated right below.
-                        for (_, v) in lookup.iter_mut() {
-                            v.push(idx);
-                        }
-                        // Per rule 2.1 and 2.2 in the top-level comments, only wildcard patterns
-                        // are included in the result of `specialize_wildcard`.
-                        // What we do here is to track the wildcards we have seen; so in addition to
-                        // acting as the precomputed result of `specialize_wildcard`, `wilds` also
-                        // serves as the default value of `specialize_constructor` for constructors
-                        // that are not in `lookup`.
-                        wilds.push(idx);
-                    }
-                    PatKind::Variant { adt_def, variant_index, .. } => {
-                        // Handle the cases of rule 1.1 and 1.2 in the top-level comments.
-                        // A variant pattern can only be included in the results of
-                        // `specialize_constructor` for a particular constructor, therefore we are
-                        // using a HashMap to track that.
-                        lookup
-                            .entry(adt_def.variants[variant_index].def_id)
-                            // Default to `wilds` for absent keys. See above for an explanation.
-                            .or_insert_with(|| wilds.clone())
-                            .push(idx);
-                    }
-                    _ => {
-                        self.cache = SpecializationCache::Incompatible;
-                    }
-                }
-            }
-            SpecializationCache::Incompatible => {}
         }
     }
 
@@ -618,78 +482,26 @@
         self.patterns.iter().map(|r| r.head())
     }
 
-    /// This computes `D(self)`. See top of the file for explanations.
-    fn specialize_wildcard(&self) -> Self {
-        match &self.cache {
-            SpecializationCache::Variants { wilds, .. } => {
-                let result =
-                    wilds.iter().filter_map(|&i| self.patterns[i].specialize_wildcard()).collect();
-                // When debug assertions are enabled, check the results against the "slow path"
-                // result.
-                debug_assert_eq!(
-                    result,
-                    Self {
-                        patterns: self.patterns.clone(),
-                        cache: SpecializationCache::Incompatible
-                    }
-                    .specialize_wildcard()
-                );
-                result
-            }
-            SpecializationCache::Incompatible => {
-                self.patterns.iter().filter_map(|r| r.specialize_wildcard()).collect()
-            }
-        }
+    /// Iterate over the first constructor of each row
+    fn head_ctors<'a>(
+        &'a self,
+        cx: &'a MatchCheckCtxt<'p, 'tcx>,
+    ) -> impl Iterator<Item = &'a Constructor<'tcx>> + Captures<'a> + Captures<'p> {
+        self.patterns.iter().map(move |r| r.head_ctor(cx))
     }
 
     /// This computes `S(constructor, self)`. See top of the file for explanations.
     fn specialize_constructor(
         &self,
-        cx: &mut MatchCheckCtxt<'p, 'tcx>,
-        constructor: &Constructor<'tcx>,
+        pcx: PatCtxt<'_, 'p, 'tcx>,
+        ctor: &Constructor<'tcx>,
         ctor_wild_subpatterns: &Fields<'p, 'tcx>,
     ) -> Matrix<'p, 'tcx> {
-        match &self.cache {
-            SpecializationCache::Variants { lookup, wilds } => {
-                let result: Self = if let Constructor::Variant(id) = constructor {
-                    lookup
-                        .get(id)
-                        // Default to `wilds` for absent keys. See `update_cache` for an explanation.
-                        .unwrap_or(&wilds)
-                        .iter()
-                        .filter_map(|&i| {
-                            self.patterns[i].specialize_constructor(
-                                cx,
-                                constructor,
-                                ctor_wild_subpatterns,
-                            )
-                        })
-                        .collect()
-                } else {
-                    unreachable!()
-                };
-                // When debug assertions are enabled, check the results against the "slow path"
-                // result.
-                debug_assert_eq!(
-                    result,
-                    Matrix {
-                        patterns: self.patterns.clone(),
-                        cache: SpecializationCache::Incompatible
-                    }
-                    .specialize_constructor(
-                        cx,
-                        constructor,
-                        ctor_wild_subpatterns
-                    )
-                );
-                result
-            }
-            SpecializationCache::Incompatible => self
-                .patterns
-                .iter()
-                .filter_map(|r| r.specialize_constructor(cx, constructor, ctor_wild_subpatterns))
-                .collect(),
-        }
+        self.patterns
+            .iter()
+            .filter(|r| ctor.is_covered_by(pcx, r.head_ctor(pcx.cx)))
+            .map(|r| r.pop_head_constructor(ctor_wild_subpatterns))
+            .collect()
     }
 }
 
@@ -707,6 +519,7 @@
 /// +++++++++++++++++++++++++++++
 /// + _     + [_, _, tail @ ..] +
 /// +++++++++++++++++++++++++++++
+/// ```
 impl<'p, 'tcx> fmt::Debug for Matrix<'p, 'tcx> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         write!(f, "\n")?;
@@ -812,46 +625,6 @@
             VarLen(prefix, suffix) => prefix + suffix <= other_len,
         }
     }
-
-    /// Returns a collection of slices that spans the values covered by `self`, subtracted by the
-    /// values covered by `other`: i.e., `self \ other` (in set notation).
-    fn subtract(self, other: Self) -> SmallVec<[Self; 1]> {
-        // Remember, `VarLen(i, j)` covers the union of `FixedLen` from `i + j` to infinity.
-        // Naming: we remove the "neg" constructors from the "pos" ones.
-        match self {
-            FixedLen(pos_len) => {
-                if other.covers_length(pos_len) {
-                    smallvec![]
-                } else {
-                    smallvec![self]
-                }
-            }
-            VarLen(pos_prefix, pos_suffix) => {
-                let pos_len = pos_prefix + pos_suffix;
-                match other {
-                    FixedLen(neg_len) => {
-                        if neg_len < pos_len {
-                            smallvec![self]
-                        } else {
-                            (pos_len..neg_len)
-                                .map(FixedLen)
-                                // We know that `neg_len + 1 >= pos_len >= pos_suffix`.
-                                .chain(Some(VarLen(neg_len + 1 - pos_suffix, pos_suffix)))
-                                .collect()
-                        }
-                    }
-                    VarLen(neg_prefix, neg_suffix) => {
-                        let neg_len = neg_prefix + neg_suffix;
-                        if neg_len <= pos_len {
-                            smallvec![]
-                        } else {
-                            (pos_len..neg_len).map(FixedLen).collect()
-                        }
-                    }
-                }
-            }
-        }
-    }
 }
 
 /// A constructor for array and slice patterns.
@@ -864,33 +637,142 @@
 }
 
 impl Slice {
-    /// Returns what patterns this constructor covers: either fixed-length patterns or
-    /// variable-length patterns.
-    fn pattern_kind(self) -> SliceKind {
-        match self {
-            Slice { array_len: Some(len), kind: VarLen(prefix, suffix) }
-                if prefix + suffix == len =>
-            {
-                FixedLen(len)
-            }
-            _ => self.kind,
-        }
-    }
-
-    /// Returns what values this constructor covers: either values of only one given length, or
-    /// values of length above a given length.
-    /// This is different from `pattern_kind()` because in some cases the pattern only takes into
-    /// account a subset of the entries of the array, but still only captures values of a given
-    /// length.
-    fn value_kind(self) -> SliceKind {
-        match self {
-            Slice { array_len: Some(len), kind: VarLen(_, _) } => FixedLen(len),
-            _ => self.kind,
-        }
+    fn new(array_len: Option<u64>, kind: SliceKind) -> Self {
+        let kind = match (array_len, kind) {
+            // If the middle `..` is empty, we effectively have a fixed-length pattern.
+            (Some(len), VarLen(prefix, suffix)) if prefix + suffix >= len => FixedLen(len),
+            _ => kind,
+        };
+        Slice { array_len, kind }
     }
 
     fn arity(self) -> u64 {
-        self.pattern_kind().arity()
+        self.kind.arity()
+    }
+
+    /// The exhaustiveness-checking paper does not include any details on
+    /// checking variable-length slice patterns. However, they may be
+    /// matched by an infinite collection of fixed-length array patterns.
+    ///
+    /// Checking the infinite set directly would take an infinite amount
+    /// of time. However, it turns out that for each finite set of
+    /// patterns `P`, all sufficiently large array lengths are equivalent:
+    ///
+    /// Each slice `s` with a "sufficiently-large" length `l ≥ L` that applies
+    /// to exactly the subset `Pā‚œ` of `P` can be transformed to a slice
+    /// `sā‚˜` for each sufficiently-large length `m` that applies to exactly
+    /// the same subset of `P`.
+    ///
+    /// Because of that, each witness for reachability-checking of one
+    /// of the sufficiently-large lengths can be transformed to an
+    /// equally-valid witness of any other length, so we only have
+    /// to check slices of the "minimal sufficiently-large length"
+    /// and less.
+    ///
+    /// Note that the fact that there is a *single* `sā‚˜` for each `m`
+    /// not depending on the specific pattern in `P` is important: if
+    /// you look at the pair of patterns
+    ///     `[true, ..]`
+    ///     `[.., false]`
+    /// Then any slice of length ≥1 that matches one of these two
+    /// patterns can be trivially turned to a slice of any
+    /// other length ≥1 that matches them and vice-versa,
+    /// but the slice of length 2 `[false, true]` that matches neither
+    /// of these patterns can't be turned to a slice from length 1 that
+    /// matches neither of these patterns, so we have to consider
+    /// slices from length 2 there.
+    ///
+    /// Now, to see that that length exists and find it, observe that slice
+    /// patterns are either "fixed-length" patterns (`[_, _, _]`) or
+    /// "variable-length" patterns (`[_, .., _]`).
+    ///
+    /// For fixed-length patterns, all slices with lengths *longer* than
+    /// the pattern's length have the same outcome (of not matching), so
+    /// as long as `L` is greater than the pattern's length we can pick
+    /// any `sā‚˜` from that length and get the same result.
+    ///
+    /// For variable-length patterns, the situation is more complicated,
+    /// because as seen above the precise value of `sā‚˜` matters.
+    ///
+    /// However, for each variable-length pattern `p` with a prefix of length
+    /// `plā‚š` and suffix of length `slā‚š`, only the first `plā‚š` and the last
+    /// `slā‚š` elements are examined.
+    ///
+    /// Therefore, as long as `L` is positive (to avoid concerns about empty
+    /// types), all elements after the maximum prefix length and before
+    /// the maximum suffix length are not examined by any variable-length
+    /// pattern, and therefore can be added/removed without affecting
+    /// them - creating equivalent patterns from any sufficiently-large
+    /// length.
+    ///
+    /// Of course, if fixed-length patterns exist, we must be sure
+    /// that our length is large enough to miss them all, so
+    /// we can pick `L = max(max(FIXED_LEN)+1, max(PREFIX_LEN) + max(SUFFIX_LEN))`
+    ///
+    /// for example, with the above pair of patterns, all elements
+    /// but the first and last can be added/removed, so any
+    /// witness of length ≥2 (say, `[false, false, true]`) can be
+    /// turned to a witness from any other length ≥2.
+    fn split<'p, 'tcx>(self, pcx: PatCtxt<'_, 'p, 'tcx>) -> SmallVec<[Constructor<'tcx>; 1]> {
+        let (self_prefix, self_suffix) = match self.kind {
+            VarLen(self_prefix, self_suffix) => (self_prefix, self_suffix),
+            _ => return smallvec![Slice(self)],
+        };
+
+        let head_ctors = pcx.matrix.head_ctors(pcx.cx).filter(|c| !c.is_wildcard());
+
+        let mut max_prefix_len = self_prefix;
+        let mut max_suffix_len = self_suffix;
+        let mut max_fixed_len = 0;
+
+        for ctor in head_ctors {
+            if let Slice(slice) = ctor {
+                match slice.kind {
+                    FixedLen(len) => {
+                        max_fixed_len = cmp::max(max_fixed_len, len);
+                    }
+                    VarLen(prefix, suffix) => {
+                        max_prefix_len = cmp::max(max_prefix_len, prefix);
+                        max_suffix_len = cmp::max(max_suffix_len, suffix);
+                    }
+                }
+            } else {
+                bug!("unexpected ctor for slice type: {:?}", ctor);
+            }
+        }
+
+        // For diagnostics, we keep the prefix and suffix lengths separate, so in the case
+        // where `max_fixed_len + 1` is the largest, we adapt `max_prefix_len` accordingly,
+        // so that `L = max_prefix_len + max_suffix_len`.
+        if max_fixed_len + 1 >= max_prefix_len + max_suffix_len {
+            // The subtraction can't overflow thanks to the above check.
+            // The new `max_prefix_len` is also guaranteed to be larger than its previous
+            // value.
+            max_prefix_len = max_fixed_len + 1 - max_suffix_len;
+        }
+
+        let final_slice = VarLen(max_prefix_len, max_suffix_len);
+        let final_slice = Slice::new(self.array_len, final_slice);
+        match self.array_len {
+            Some(_) => smallvec![Slice(final_slice)],
+            None => {
+                // `self` originally covered the range `(self.arity()..infinity)`. We split that
+                // range into two: lengths smaller than `final_slice.arity()` are treated
+                // independently as fixed-lengths slices, and lengths above are captured by
+                // `final_slice`.
+                let smaller_lengths = (self.arity()..final_slice.arity()).map(FixedLen);
+                smaller_lengths
+                    .map(|kind| Slice::new(self.array_len, kind))
+                    .chain(Some(final_slice))
+                    .map(Slice)
+                    .collect()
+            }
+        }
+    }
+
+    /// See `Constructor::is_covered_by`
+    fn is_covered_by(self, other: Self) -> bool {
+        other.kind.covers_length(self.arity())
     }
 }
 
@@ -898,7 +780,7 @@
 /// the constructor. See also `Fields`.
 ///
 /// `pat_constructor` retrieves the constructor corresponding to a pattern.
-/// `specialize_one_pattern` returns the list of fields corresponding to a pattern, given a
+/// `specialize_constructor` returns the list of fields corresponding to a pattern, given a
 /// constructor. `Constructor::apply` reconstructs the pattern from a pair of `Constructor` and
 /// `Fields`.
 #[derive(Clone, Debug, PartialEq)]
@@ -908,135 +790,204 @@
     Single,
     /// Enum variants.
     Variant(DefId),
-    /// Literal values.
-    ConstantValue(&'tcx ty::Const<'tcx>),
     /// Ranges of integer literal values (`2`, `2..=5` or `2..5`).
     IntRange(IntRange<'tcx>),
     /// Ranges of floating-point literal values (`2.0..=5.2`).
     FloatRange(&'tcx ty::Const<'tcx>, &'tcx ty::Const<'tcx>, RangeEnd),
+    /// String literals. Strings are not quite the same as `&[u8]` so we treat them separately.
+    Str(&'tcx ty::Const<'tcx>),
     /// Array and slice patterns.
     Slice(Slice),
-    /// Fake extra constructor for enums that aren't allowed to be matched exhaustively.
+    /// Constants that must not be matched structurally. They are treated as black
+    /// boxes for the purposes of exhaustiveness: we must not inspect them, and they
+    /// don't count towards making a match exhaustive.
+    Opaque,
+    /// Fake extra constructor for enums that aren't allowed to be matched exhaustively. Also used
+    /// for those types for which we cannot list constructors explicitly, like `f64` and `str`.
     NonExhaustive,
+    /// Wildcard pattern.
+    Wildcard,
 }
 
 impl<'tcx> Constructor<'tcx> {
-    fn is_slice(&self) -> bool {
+    fn is_wildcard(&self) -> bool {
+        matches!(self, Wildcard)
+    }
+
+    fn as_int_range(&self) -> Option<&IntRange<'tcx>> {
         match self {
-            Slice(_) => true,
-            _ => false,
+            IntRange(range) => Some(range),
+            _ => None,
         }
     }
 
-    fn variant_index_for_adt<'a>(
-        &self,
-        cx: &MatchCheckCtxt<'a, 'tcx>,
-        adt: &'tcx ty::AdtDef,
-    ) -> VariantIdx {
+    fn as_slice(&self) -> Option<Slice> {
+        match self {
+            Slice(slice) => Some(*slice),
+            _ => None,
+        }
+    }
+
+    fn variant_index_for_adt(&self, adt: &'tcx ty::AdtDef) -> VariantIdx {
         match *self {
             Variant(id) => adt.variant_index_with_id(id),
             Single => {
                 assert!(!adt.is_enum());
                 VariantIdx::new(0)
             }
-            ConstantValue(c) => cx
-                .tcx
-                .destructure_const(cx.param_env.and(c))
-                .variant
-                .expect("destructed const of adt without variant id"),
             _ => bug!("bad constructor {:?} for adt {:?}", self, adt),
         }
     }
 
-    // Returns the set of constructors covered by `self` but not by
-    // anything in `other_ctors`.
-    fn subtract_ctors(&self, other_ctors: &Vec<Constructor<'tcx>>) -> Vec<Constructor<'tcx>> {
-        if other_ctors.is_empty() {
-            return vec![self.clone()];
-        }
+    /// Some constructors (namely `Wildcard`, `IntRange` and `Slice`) actually stand for a set of actual
+    /// constructors (like variants, integers or fixed-sized slices). When specializing for these
+    /// constructors, we want to be specialising for the actual underlying constructors.
+    /// Naively, we would simply return the list of constructors they correspond to. We instead are
+    /// more clever: if there are constructors that we know will behave the same wrt the current
+    /// matrix, we keep them grouped. For example, all slices of a sufficiently large length
+    /// will either be all useful or all non-useful with a given matrix.
+    ///
+    /// See the branches for details on how the splitting is done.
+    ///
+    /// This function may discard some irrelevant constructors if this preserves behavior and
+    /// diagnostics. Eg. for the `_` case, we ignore the constructors already present in the
+    /// matrix, unless all of them are.
+    ///
+    /// `hir_id` is `None` when we're evaluating the wildcard pattern. In that case we do not want
+    /// to lint for overlapping ranges.
+    fn split<'p>(&self, pcx: PatCtxt<'_, 'p, 'tcx>, hir_id: Option<HirId>) -> SmallVec<[Self; 1]> {
+        debug!("Constructor::split({:#?}, {:#?})", self, pcx.matrix);
 
         match self {
-            // Those constructors can only match themselves.
-            Single | Variant(_) | ConstantValue(..) | FloatRange(..) => {
-                if other_ctors.iter().any(|c| c == self) { vec![] } else { vec![self.clone()] }
+            Wildcard => Constructor::split_wildcard(pcx),
+            // Fast-track if the range is trivial. In particular, we don't do the overlapping
+            // ranges check.
+            IntRange(ctor_range)
+                if ctor_range.treat_exhaustively(pcx.cx.tcx) && !ctor_range.is_singleton() =>
+            {
+                ctor_range.split(pcx, hir_id)
             }
-            &Slice(slice) => {
-                let mut other_slices = other_ctors
-                    .iter()
-                    .filter_map(|c: &Constructor<'_>| match c {
-                        Slice(slice) => Some(*slice),
-                        // FIXME(oli-obk): implement `deref` for `ConstValue`
-                        ConstantValue(..) => None,
-                        _ => bug!("bad slice pattern constructor {:?}", c),
-                    })
-                    .map(Slice::value_kind);
+            Slice(slice @ Slice { kind: VarLen(..), .. }) => slice.split(pcx),
+            // Any other constructor can be used unchanged.
+            _ => smallvec![self.clone()],
+        }
+    }
 
-                match slice.value_kind() {
-                    FixedLen(self_len) => {
-                        if other_slices.any(|other_slice| other_slice.covers_length(self_len)) {
-                            vec![]
-                        } else {
-                            vec![Slice(slice)]
-                        }
+    /// For wildcards, there are two groups of constructors: there are the constructors actually
+    /// present in the matrix (`head_ctors`), and the constructors not present (`missing_ctors`).
+    /// Two constructors that are not in the matrix will either both be caught (by a wildcard), or
+    /// both not be caught. Therefore we can keep the missing constructors grouped together.
+    fn split_wildcard<'p>(pcx: PatCtxt<'_, 'p, 'tcx>) -> SmallVec<[Self; 1]> {
+        // Missing constructors are those that are not matched by any non-wildcard patterns in the
+        // current column. We only fully construct them on-demand, because they're rarely used and
+        // can be big.
+        let missing_ctors = MissingConstructors::new(pcx);
+        if missing_ctors.is_empty(pcx) {
+            // All the constructors are present in the matrix, so we just go through them all.
+            // We must also split them first.
+            missing_ctors.all_ctors
+        } else {
+            // Some constructors are missing, thus we can specialize with the wildcard constructor,
+            // which will stand for those constructors that are missing, and behaves like any of
+            // them.
+            smallvec![Wildcard]
+        }
+    }
+
+    /// Returns whether `self` is covered by `other`, i.e. whether `self` is a subset of `other`.
+    /// For the simple cases, this is simply checking for equality. For the "grouped" constructors,
+    /// this checks for inclusion.
+    fn is_covered_by<'p>(&self, pcx: PatCtxt<'_, 'p, 'tcx>, other: &Self) -> bool {
+        // This must be kept in sync with `is_covered_by_any`.
+        match (self, other) {
+            // Wildcards cover anything
+            (_, Wildcard) => true,
+            // Wildcards are only covered by wildcards
+            (Wildcard, _) => false,
+
+            (Single, Single) => true,
+            (Variant(self_id), Variant(other_id)) => self_id == other_id,
+
+            (IntRange(self_range), IntRange(other_range)) => {
+                self_range.is_covered_by(pcx, other_range)
+            }
+            (
+                FloatRange(self_from, self_to, self_end),
+                FloatRange(other_from, other_to, other_end),
+            ) => {
+                match (
+                    compare_const_vals(pcx.cx.tcx, self_to, other_to, pcx.cx.param_env, pcx.ty),
+                    compare_const_vals(pcx.cx.tcx, self_from, other_from, pcx.cx.param_env, pcx.ty),
+                ) {
+                    (Some(to), Some(from)) => {
+                        (from == Ordering::Greater || from == Ordering::Equal)
+                            && (to == Ordering::Less
+                                || (other_end == self_end && to == Ordering::Equal))
                     }
-                    kind @ VarLen(..) => {
-                        let mut remaining_slices = vec![kind];
-
-                        // For each used slice, subtract from the current set of slices.
-                        for other_slice in other_slices {
-                            remaining_slices = remaining_slices
-                                .into_iter()
-                                .flat_map(|remaining_slice| remaining_slice.subtract(other_slice))
-                                .collect();
-
-                            // If the constructors that have been considered so far already cover
-                            // the entire range of `self`, no need to look at more constructors.
-                            if remaining_slices.is_empty() {
-                                break;
-                            }
-                        }
-
-                        remaining_slices
-                            .into_iter()
-                            .map(|kind| Slice { array_len: slice.array_len, kind })
-                            .map(Slice)
-                            .collect()
-                    }
+                    _ => false,
                 }
             }
-            IntRange(self_range) => {
-                let mut remaining_ranges = vec![self_range.clone()];
-                for other_ctor in other_ctors {
-                    if let IntRange(other_range) = other_ctor {
-                        if other_range == self_range {
-                            // If the `self` range appears directly in a `match` arm, we can
-                            // eliminate it straight away.
-                            remaining_ranges = vec![];
-                        } else {
-                            // Otherwise explicitly compute the remaining ranges.
-                            remaining_ranges = other_range.subtract_from(remaining_ranges);
-                        }
-
-                        // If the ranges that have been considered so far already cover the entire
-                        // range of values, we can return early.
-                        if remaining_ranges.is_empty() {
-                            break;
-                        }
-                    }
+            (Str(self_val), Str(other_val)) => {
+                // FIXME: there's probably a more direct way of comparing for equality
+                match compare_const_vals(pcx.cx.tcx, self_val, other_val, pcx.cx.param_env, pcx.ty)
+                {
+                    Some(comparison) => comparison == Ordering::Equal,
+                    None => false,
                 }
-
-                // Convert the ranges back into constructors.
-                remaining_ranges.into_iter().map(IntRange).collect()
             }
+            (Slice(self_slice), Slice(other_slice)) => self_slice.is_covered_by(*other_slice),
+
+            // We are trying to inspect an opaque constant. Thus we skip the row.
+            (Opaque, _) | (_, Opaque) => false,
+            // Only a wildcard pattern can match the special extra constructor.
+            (NonExhaustive, _) => false,
+
+            _ => span_bug!(
+                pcx.span,
+                "trying to compare incompatible constructors {:?} and {:?}",
+                self,
+                other
+            ),
+        }
+    }
+
+    /// Faster version of `is_covered_by` when applied to many constructors. `used_ctors` is
+    /// assumed to be built from `matrix.head_ctors()` with wildcards filtered out, and `self` is
+    /// assumed to have been split from a wildcard.
+    fn is_covered_by_any<'p>(
+        &self,
+        pcx: PatCtxt<'_, 'p, 'tcx>,
+        used_ctors: &[Constructor<'tcx>],
+    ) -> bool {
+        if used_ctors.is_empty() {
+            return false;
+        }
+
+        // This must be kept in sync with `is_covered_by`.
+        match self {
+            // If `self` is `Single`, `used_ctors` cannot contain anything else than `Single`s.
+            Single => !used_ctors.is_empty(),
+            Variant(_) => used_ctors.iter().any(|c| c == self),
+            IntRange(range) => used_ctors
+                .iter()
+                .filter_map(|c| c.as_int_range())
+                .any(|other| range.is_covered_by(pcx, other)),
+            Slice(slice) => used_ctors
+                .iter()
+                .filter_map(|c| c.as_slice())
+                .any(|other| slice.is_covered_by(other)),
             // This constructor is never covered by anything else
-            NonExhaustive => vec![NonExhaustive],
+            NonExhaustive => false,
+            Str(..) | FloatRange(..) | Opaque | Wildcard => {
+                bug!("found unexpected ctor in all_ctors: {:?}", self)
+            }
         }
     }
 
     /// Apply a constructor to a list of patterns, yielding a new pattern. `pats`
     /// must have as many elements as this constructor's arity.
     ///
-    /// This is roughly the inverse of `specialize_one_pattern`.
+    /// This is roughly the inverse of `specialize_constructor`.
     ///
     /// Examples:
     /// `self`: `Constructor::Single`
@@ -1048,28 +999,23 @@
     /// `ty`: `Option<bool>`
     /// `pats`: `[false]`
     /// returns `Some(false)`
-    fn apply<'p>(
-        &self,
-        cx: &MatchCheckCtxt<'p, 'tcx>,
-        ty: Ty<'tcx>,
-        fields: Fields<'p, 'tcx>,
-    ) -> Pat<'tcx> {
+    fn apply<'p>(&self, pcx: PatCtxt<'_, 'p, 'tcx>, fields: Fields<'p, 'tcx>) -> Pat<'tcx> {
         let mut subpatterns = fields.all_patterns();
 
         let pat = match self {
-            Single | Variant(_) => match ty.kind() {
+            Single | Variant(_) => match pcx.ty.kind() {
                 ty::Adt(..) | ty::Tuple(..) => {
                     let subpatterns = subpatterns
                         .enumerate()
                         .map(|(i, p)| FieldPat { field: Field::new(i), pattern: p })
                         .collect();
 
-                    if let ty::Adt(adt, substs) = ty.kind() {
+                    if let ty::Adt(adt, substs) = pcx.ty.kind() {
                         if adt.is_enum() {
                             PatKind::Variant {
                                 adt_def: adt,
                                 substs,
-                                variant_index: self.variant_index_for_adt(cx, adt),
+                                variant_index: self.variant_index_for_adt(adt),
                                 subpatterns,
                             }
                         } else {
@@ -1079,11 +1025,15 @@
                         PatKind::Leaf { subpatterns }
                     }
                 }
+                // Note: given the expansion of `&str` patterns done in `expand_pattern`, we should
+                // be careful to reconstruct the correct constant pattern here. However a string
+                // literal pattern will never be reported as a non-exhaustiveness witness, so we
+                // can ignore this issue.
                 ty::Ref(..) => PatKind::Deref { subpattern: subpatterns.next().unwrap() },
-                ty::Slice(_) | ty::Array(..) => bug!("bad slice pattern {:?} {:?}", self, ty),
+                ty::Slice(_) | ty::Array(..) => bug!("bad slice pattern {:?} {:?}", self, pcx.ty),
                 _ => PatKind::Wild,
             },
-            Slice(slice) => match slice.pattern_kind() {
+            Slice(slice) => match slice.kind {
                 FixedLen(_) => {
                     PatKind::Slice { prefix: subpatterns.collect(), slice: None, suffix: vec![] }
                 }
@@ -1104,22 +1054,21 @@
                     } else {
                         subpatterns.collect()
                     };
-                    let wild = Pat::wildcard_from_ty(ty);
+                    let wild = Pat::wildcard_from_ty(pcx.ty);
                     PatKind::Slice { prefix, slice: Some(wild), suffix }
                 }
             },
-            &ConstantValue(value) => PatKind::Constant { value },
+            &Str(value) => PatKind::Constant { value },
             &FloatRange(lo, hi, end) => PatKind::Range(PatRange { lo, hi, end }),
-            IntRange(range) => return range.to_pat(cx.tcx),
+            IntRange(range) => return range.to_pat(pcx.cx.tcx),
             NonExhaustive => PatKind::Wild,
+            Opaque => bug!("we should not try to apply an opaque constructor"),
+            Wildcard => bug!(
+                "trying to apply a wildcard constructor; this should have been done in `apply_constructors`"
+            ),
         };
 
-        Pat { ty, span: DUMMY_SP, kind: Box::new(pat) }
-    }
-
-    /// Like `apply`, but where all the subpatterns are wildcards `_`.
-    fn apply_wildcards<'a>(&self, cx: &MatchCheckCtxt<'a, 'tcx>, ty: Ty<'tcx>) -> Pat<'tcx> {
-        self.apply(cx, ty, Fields::wildcards(cx, self, ty))
+        Pat { ty: pcx.ty, span: DUMMY_SP, kind: Box::new(pat) }
     }
 }
 
@@ -1186,12 +1135,6 @@
         Fields::Slice(std::slice::from_ref(pat))
     }
 
-    /// Construct a new `Fields` from the given patterns. You must be sure those patterns can't
-    /// contain fields that need to be filtered out. When in doubt, prefer `replace_fields`.
-    fn from_slice_unfiltered(pats: &'p [Pat<'tcx>]) -> Self {
-        Fields::Slice(pats)
-    }
-
     /// Convenience; internal use.
     fn wildcards_from_tys(
         cx: &MatchCheckCtxt<'p, 'tcx>,
@@ -1203,11 +1146,9 @@
     }
 
     /// Creates a new list of wildcard fields for a given constructor.
-    fn wildcards(
-        cx: &MatchCheckCtxt<'p, 'tcx>,
-        constructor: &Constructor<'tcx>,
-        ty: Ty<'tcx>,
-    ) -> Self {
+    fn wildcards(pcx: PatCtxt<'_, 'p, 'tcx>, constructor: &Constructor<'tcx>) -> Self {
+        let ty = pcx.ty;
+        let cx = pcx.cx;
         let wildcard_from_ty = |ty| &*cx.pattern_arena.alloc(Pat::wildcard_from_ty(ty));
 
         let ret = match constructor {
@@ -1221,7 +1162,7 @@
                         // Use T as the sub pattern type of Box<T>.
                         Fields::from_single_pattern(wildcard_from_ty(substs.type_at(0)))
                     } else {
-                        let variant = &adt.variants[constructor.variant_index_for_adt(cx, adt)];
+                        let variant = &adt.variants[constructor.variant_index_for_adt(adt)];
                         // Whether we must not match the fields of this variant exhaustively.
                         let is_non_exhaustive =
                             variant.is_field_list_non_exhaustive() && !adt.did.is_local();
@@ -1260,7 +1201,7 @@
                         }
                     }
                 }
-                _ => Fields::empty(),
+                _ => bug!("Unexpected type for `Single` constructor: {:?}", ty),
             },
             Slice(slice) => match *ty.kind() {
                 ty::Slice(ty) | ty::Array(ty, _) => {
@@ -1269,7 +1210,9 @@
                 }
                 _ => bug!("bad slice pattern {:?} {:?}", constructor, ty),
             },
-            ConstantValue(..) | FloatRange(..) | IntRange(..) | NonExhaustive => Fields::empty(),
+            Str(..) | FloatRange(..) | IntRange(..) | NonExhaustive | Opaque | Wildcard => {
+                Fields::empty()
+            }
         };
         debug!("Fields::wildcards({:?}, {:?}) = {:#?}", constructor, ty, ret);
         ret
@@ -1367,6 +1310,45 @@
         }
     }
 
+    /// Replaces contained fields with the arguments of the given pattern. Only use on a pattern
+    /// that is compatible with the constructor used to build `self`.
+    /// This is meant to be used on the result of `Fields::wildcards()`. The idea is that
+    /// `wildcards` constructs a list of fields where all entries are wildcards, and the pattern
+    /// provided to this function fills some of the fields with non-wildcards.
+    /// In the following example `Fields::wildcards` would return `[_, _, _, _]`. If we call
+    /// `replace_with_pattern_arguments` on it with the pattern, the result will be `[Some(0), _,
+    /// _, _]`.
+    /// ```rust
+    /// let x: [Option<u8>; 4] = foo();
+    /// match x {
+    ///     [Some(0), ..] => {}
+    /// }
+    /// ```
+    /// This is guaranteed to preserve the number of patterns in `self`.
+    fn replace_with_pattern_arguments(&self, pat: &'p Pat<'tcx>) -> Self {
+        match pat.kind.as_ref() {
+            PatKind::Deref { subpattern } => {
+                assert_eq!(self.len(), 1);
+                Fields::from_single_pattern(subpattern)
+            }
+            PatKind::Leaf { subpatterns } | PatKind::Variant { subpatterns, .. } => {
+                self.replace_with_fieldpats(subpatterns)
+            }
+            PatKind::Array { prefix, suffix, .. } | PatKind::Slice { prefix, suffix, .. } => {
+                // Number of subpatterns for the constructor
+                let ctor_arity = self.len();
+
+                // Replace the prefix and the suffix with the given patterns, leaving wildcards in
+                // the middle if there was a subslice pattern `..`.
+                let prefix = prefix.iter().enumerate();
+                let suffix =
+                    suffix.iter().enumerate().map(|(i, p)| (ctor_arity - suffix.len() + i, p));
+                self.replace_fields_indexed(prefix.chain(suffix))
+            }
+            _ => self.clone(),
+        }
+    }
+
     fn push_on_patstack(self, stack: &[&'p Pat<'tcx>]) -> PatStack<'p, 'tcx> {
         let pats: SmallVec<_> = match self {
             Fields::Slice(pats) => pats.iter().chain(stack.iter().copied()).collect(),
@@ -1385,8 +1367,9 @@
 
 #[derive(Clone, Debug)]
 crate enum Usefulness<'tcx> {
-    /// Carries a list of unreachable subpatterns. Used only in the presence of or-patterns.
-    Useful(Vec<Span>),
+    /// Carries, for each column in the matrix, a set of sub-branches that have been found to be
+    /// unreachable. Used only in the presence of or-patterns, otherwise it stays empty.
+    Useful(Vec<FxHashSet<Span>>),
     /// Carries a list of witnesses of non-exhaustiveness.
     UsefulWithWitness(Vec<Witness<'tcx>>),
     NotUseful,
@@ -1401,60 +1384,21 @@
     }
 
     fn is_useful(&self) -> bool {
-        match *self {
-            NotUseful => false,
-            _ => true,
-        }
+        !matches!(*self, NotUseful)
     }
 
     fn apply_constructor<'p>(
         self,
-        cx: &MatchCheckCtxt<'p, 'tcx>,
+        pcx: PatCtxt<'_, 'p, 'tcx>,
         ctor: &Constructor<'tcx>,
-        ty: Ty<'tcx>,
         ctor_wild_subpatterns: &Fields<'p, 'tcx>,
-    ) -> Self {
-        match self {
-            UsefulWithWitness(witnesses) => UsefulWithWitness(
-                witnesses
-                    .into_iter()
-                    .map(|witness| witness.apply_constructor(cx, &ctor, ty, ctor_wild_subpatterns))
-                    .collect(),
-            ),
-            x => x,
-        }
-    }
-
-    fn apply_wildcard(self, ty: Ty<'tcx>) -> Self {
-        match self {
-            UsefulWithWitness(witnesses) => {
-                let wild = Pat::wildcard_from_ty(ty);
-                UsefulWithWitness(
-                    witnesses
-                        .into_iter()
-                        .map(|mut witness| {
-                            witness.0.push(wild.clone());
-                            witness
-                        })
-                        .collect(),
-                )
-            }
-            x => x,
-        }
-    }
-
-    fn apply_missing_ctors(
-        self,
-        cx: &MatchCheckCtxt<'_, 'tcx>,
-        ty: Ty<'tcx>,
-        missing_ctors: &MissingConstructors<'tcx>,
+        is_top_level: bool,
     ) -> Self {
         match self {
             UsefulWithWitness(witnesses) => {
-                let new_patterns: Vec<_> =
-                    missing_ctors.iter().map(|ctor| ctor.apply_wildcards(cx, ty)).collect();
-                // Add the new patterns to each witness
-                UsefulWithWitness(
+                let new_witnesses = if ctor.is_wildcard() {
+                    let missing_ctors = MissingConstructors::new(pcx);
+                    let new_patterns = missing_ctors.report_patterns(pcx, is_top_level);
                     witnesses
                         .into_iter()
                         .flat_map(|witness| {
@@ -1464,8 +1408,31 @@
                                 witness
                             })
                         })
-                        .collect(),
-                )
+                        .collect()
+                } else {
+                    witnesses
+                        .into_iter()
+                        .map(|witness| witness.apply_constructor(pcx, &ctor, ctor_wild_subpatterns))
+                        .collect()
+                };
+                UsefulWithWitness(new_witnesses)
+            }
+            Useful(mut unreachables) => {
+                if !unreachables.is_empty() {
+                    // When we apply a constructor, there are `arity` columns of the matrix that
+                    // corresponded to its arguments. All the unreachables found in these columns
+                    // will, after `apply`, come from the first column. So we take the union of all
+                    // the corresponding sets and put them in the first column.
+                    // Note that `arity` may be 0, in which case we just push a new empty set.
+                    let len = unreachables.len();
+                    let arity = ctor_wild_subpatterns.len();
+                    let mut unioned = FxHashSet::default();
+                    for set in unreachables.drain((len - arity)..) {
+                        unioned.extend(set)
+                    }
+                    unreachables.push(unioned);
+                }
+                Useful(unreachables)
             }
             x => x,
         }
@@ -1478,9 +1445,14 @@
     LeaveOutWitness,
 }
 
-#[derive(Copy, Clone, Debug)]
-struct PatCtxt<'tcx> {
+#[derive(Copy, Clone)]
+struct PatCtxt<'a, 'p, 'tcx> {
+    cx: &'a MatchCheckCtxt<'p, 'tcx>,
+    /// Current state of the matrix.
+    matrix: &'a Matrix<'p, 'tcx>,
+    /// Type of the current column under investigation.
     ty: Ty<'tcx>,
+    /// Span of the current pattern under investigation.
     span: Span,
 }
 
@@ -1496,6 +1468,7 @@
 /// multiple patterns.
 ///
 /// For example, if we are constructing a witness for the match against
+///
 /// ```
 /// struct Pair(Option<(u32, u32)>, bool);
 ///
@@ -1540,17 +1513,16 @@
     /// pats: [(false, "foo"), 42]  => X { a: (false, "foo"), b: 42 }
     fn apply_constructor<'p>(
         mut self,
-        cx: &MatchCheckCtxt<'p, 'tcx>,
+        pcx: PatCtxt<'_, 'p, 'tcx>,
         ctor: &Constructor<'tcx>,
-        ty: Ty<'tcx>,
         ctor_wild_subpatterns: &Fields<'p, 'tcx>,
     ) -> Self {
         let pat = {
             let len = self.0.len();
             let arity = ctor_wild_subpatterns.len();
             let pats = self.0.drain((len - arity)..).rev();
-            let fields = ctor_wild_subpatterns.replace_fields(cx, pats);
-            ctor.apply(cx, ty, fields)
+            let fields = ctor_wild_subpatterns.replace_fields(pcx.cx, pats);
+            ctor.apply(pcx, fields)
         };
 
         self.0.push(pat);
@@ -1568,11 +1540,9 @@
 /// `Option<!>`, we do not include `Some(_)` in the returned list of constructors.
 /// Invariant: this returns an empty `Vec` if and only if the type is uninhabited (as determined by
 /// `cx.is_uninhabited()`).
-fn all_constructors<'a, 'tcx>(
-    cx: &mut MatchCheckCtxt<'a, 'tcx>,
-    pcx: PatCtxt<'tcx>,
-) -> Vec<Constructor<'tcx>> {
+fn all_constructors<'p, 'tcx>(pcx: PatCtxt<'_, 'p, 'tcx>) -> Vec<Constructor<'tcx>> {
     debug!("all_constructors({:?})", pcx.ty);
+    let cx = pcx.cx;
     let make_range = |start, end| {
         IntRange(
             // `unwrap()` is ok because we know the type is an integer.
@@ -1580,25 +1550,50 @@
                 .unwrap(),
         )
     };
-    match *pcx.ty.kind() {
-        ty::Bool => {
-            [true, false].iter().map(|&b| ConstantValue(ty::Const::from_bool(cx.tcx, b))).collect()
-        }
-        ty::Array(ref sub_ty, len) if len.try_eval_usize(cx.tcx, cx.param_env).is_some() => {
+    match pcx.ty.kind() {
+        ty::Bool => vec![make_range(0, 1)],
+        ty::Array(sub_ty, len) if len.try_eval_usize(cx.tcx, cx.param_env).is_some() => {
             let len = len.eval_usize(cx.tcx, cx.param_env);
             if len != 0 && cx.is_uninhabited(sub_ty) {
                 vec![]
             } else {
-                vec![Slice(Slice { array_len: Some(len), kind: VarLen(0, 0) })]
+                vec![Slice(Slice::new(Some(len), VarLen(0, 0)))]
             }
         }
         // Treat arrays of a constant but unknown length like slices.
-        ty::Array(ref sub_ty, _) | ty::Slice(ref sub_ty) => {
+        ty::Array(sub_ty, _) | ty::Slice(sub_ty) => {
             let kind = if cx.is_uninhabited(sub_ty) { FixedLen(0) } else { VarLen(0, 0) };
-            vec![Slice(Slice { array_len: None, kind })]
+            vec![Slice(Slice::new(None, kind))]
         }
         ty::Adt(def, substs) if def.is_enum() => {
-            let ctors: Vec<_> = if cx.tcx.features().exhaustive_patterns {
+            // If the enum is declared as `#[non_exhaustive]`, we treat it as if it had an
+            // additional "unknown" constructor.
+            // There is no point in enumerating all possible variants, because the user can't
+            // actually match against them all themselves. So we always return only the fictitious
+            // constructor.
+            // E.g., in an example like:
+            //
+            // ```
+            //     let err: io::ErrorKind = ...;
+            //     match err {
+            //         io::ErrorKind::NotFound => {},
+            //     }
+            // ```
+            //
+            // we don't want to show every possible IO error, but instead have only `_` as the
+            // witness.
+            let is_declared_nonexhaustive = cx.is_foreign_non_exhaustive_enum(pcx.ty);
+
+            // If `exhaustive_patterns` is disabled and our scrutinee is an empty enum, we treat it
+            // as though it had an "unknown" constructor to avoid exposing its emptyness. Note that
+            // an empty match will still be considered exhaustive because that case is handled
+            // separately in `check_match`.
+            let is_secretly_empty =
+                def.variants.is_empty() && !cx.tcx.features().exhaustive_patterns;
+
+            if is_secretly_empty || is_declared_nonexhaustive {
+                vec![NonExhaustive]
+            } else if cx.tcx.features().exhaustive_patterns {
                 // If `exhaustive_patterns` is enabled, we exclude variants known to be
                 // uninhabited.
                 def.variants
@@ -1611,32 +1606,7 @@
                     .collect()
             } else {
                 def.variants.iter().map(|v| Variant(v.def_id)).collect()
-            };
-
-            // If the enum is declared as `#[non_exhaustive]`, we treat it as if it had an
-            // additional "unknown" constructor.
-            // There is no point in enumerating all possible variants, because the user can't
-            // actually match against them all themselves. So we always return only the fictitious
-            // constructor.
-            // E.g., in an example like:
-            // ```
-            //     let err: io::ErrorKind = ...;
-            //     match err {
-            //         io::ErrorKind::NotFound => {},
-            //     }
-            // ```
-            // we don't want to show every possible IO error, but instead have only `_` as the
-            // witness.
-            let is_declared_nonexhaustive = cx.is_foreign_non_exhaustive_enum(pcx.ty);
-
-            // If `exhaustive_patterns` is disabled and our scrutinee is an empty enum, we treat it
-            // as though it had an "unknown" constructor to avoid exposing its emptyness. Note that
-            // an empty match will still be considered exhaustive because that case is handled
-            // separately in `check_match`.
-            let is_secretly_empty =
-                def.variants.is_empty() && !cx.tcx.features().exhaustive_patterns;
-
-            if is_secretly_empty || is_declared_nonexhaustive { vec![NonExhaustive] } else { ctors }
+            }
         }
         ty::Char => {
             vec![
@@ -1654,24 +1624,21 @@
             // `#[non_exhaustive]` enums by returning a special unmatcheable constructor.
             vec![NonExhaustive]
         }
-        ty::Int(ity) => {
+        &ty::Int(ity) => {
             let bits = Integer::from_attr(&cx.tcx, SignedInt(ity)).size().bits() as u128;
             let min = 1u128 << (bits - 1);
             let max = min - 1;
             vec![make_range(min, max)]
         }
-        ty::Uint(uty) => {
+        &ty::Uint(uty) => {
             let size = Integer::from_attr(&cx.tcx, UnsignedInt(uty)).size();
-            let max = truncate(u128::MAX, size);
+            let max = size.truncate(u128::MAX);
             vec![make_range(0, max)]
         }
-        _ => {
-            if cx.is_uninhabited(pcx.ty) {
-                vec![]
-            } else {
-                vec![Single]
-            }
-        }
+        _ if cx.is_uninhabited(pcx.ty) => vec![],
+        ty::Adt(..) | ty::Tuple(..) | ty::Ref(..) => vec![Single],
+        // This type is one for which we cannot list constructors, like `str` or `f64`.
+        _ => vec![NonExhaustive],
     }
 }
 
@@ -1695,10 +1662,7 @@
 impl<'tcx> IntRange<'tcx> {
     #[inline]
     fn is_integral(ty: Ty<'_>) -> bool {
-        match ty.kind() {
-            ty::Char | ty::Int(_) | ty::Uint(_) => true,
-            _ => false,
-        }
+        matches!(ty.kind(), ty::Char | ty::Int(_) | ty::Uint(_) | ty::Bool)
     }
 
     fn is_singleton(&self) -> bool {
@@ -1718,6 +1682,7 @@
     #[inline]
     fn integral_size_and_signed_bias(tcx: TyCtxt<'tcx>, ty: Ty<'_>) -> Option<(Size, u128)> {
         match *ty.kind() {
+            ty::Bool => Some((Size::from_bytes(1), 0)),
             ty::Char => Some((Size::from_bytes(4), 0)),
             ty::Int(ity) => {
                 let size = Integer::from_attr(&tcx, SignedInt(ity)).size();
@@ -1782,40 +1747,6 @@
         }
     }
 
-    fn from_pat(
-        tcx: TyCtxt<'tcx>,
-        param_env: ty::ParamEnv<'tcx>,
-        pat: &Pat<'tcx>,
-    ) -> Option<IntRange<'tcx>> {
-        // This MUST be kept in sync with `pat_constructor`.
-        match *pat.kind {
-            PatKind::AscribeUserType { .. } => bug!(), // Handled by `expand_pattern`
-            PatKind::Or { .. } => bug!("Or-pattern should have been expanded earlier on."),
-
-            PatKind::Binding { .. }
-            | PatKind::Wild
-            | PatKind::Leaf { .. }
-            | PatKind::Deref { .. }
-            | PatKind::Variant { .. }
-            | PatKind::Array { .. }
-            | PatKind::Slice { .. } => None,
-
-            PatKind::Constant { value } => Self::from_const(tcx, param_env, value, pat.span),
-
-            PatKind::Range(PatRange { lo, hi, end }) => {
-                let ty = lo.ty;
-                Self::from_range(
-                    tcx,
-                    lo.eval_bits(tcx, param_env, lo.ty),
-                    hi.eval_bits(tcx, param_env, hi.ty),
-                    ty,
-                    &end,
-                    pat.span,
-                )
-            }
-        }
-    }
-
     // The return value of `signed_bias` should be XORed with an endpoint to encode/decode it.
     fn signed_bias(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> u128 {
         match *ty.kind() {
@@ -1827,35 +1758,6 @@
         }
     }
 
-    /// Returns a collection of ranges that spans the values covered by `ranges`, subtracted
-    /// by the values covered by `self`: i.e., `ranges \ self` (in set notation).
-    fn subtract_from(&self, ranges: Vec<IntRange<'tcx>>) -> Vec<IntRange<'tcx>> {
-        let mut remaining_ranges = vec![];
-        let ty = self.ty;
-        let span = self.span;
-        let (lo, hi) = self.boundaries();
-        for subrange in ranges {
-            let (subrange_lo, subrange_hi) = subrange.range.into_inner();
-            if lo > subrange_hi || subrange_lo > hi {
-                // The pattern doesn't intersect with the subrange at all,
-                // so the subrange remains untouched.
-                remaining_ranges.push(IntRange { range: subrange_lo..=subrange_hi, ty, span });
-            } else {
-                if lo > subrange_lo {
-                    // The pattern intersects an upper section of the
-                    // subrange, so a lower section will remain.
-                    remaining_ranges.push(IntRange { range: subrange_lo..=(lo - 1), ty, span });
-                }
-                if hi < subrange_hi {
-                    // The pattern intersects a lower section of the
-                    // subrange, so an upper section will remain.
-                    remaining_ranges.push(IntRange { range: (hi + 1)..=subrange_hi, ty, span });
-                }
-            }
-        }
-        remaining_ranges
-    }
-
     fn is_subrange(&self, other: &Self) -> bool {
         other.range.start() <= self.range.start() && self.range.end() <= other.range.end()
     }
@@ -1913,6 +1815,162 @@
         // This is a brand new pattern, so we don't reuse `self.span`.
         Pat { ty: self.ty, span: DUMMY_SP, kind: Box::new(kind) }
     }
+
+    /// For exhaustive integer matching, some constructors are grouped within other constructors
+    /// (namely integer typed values are grouped within ranges). However, when specialising these
+    /// constructors, we want to be specialising for the underlying constructors (the integers), not
+    /// the groups (the ranges). Thus we need to split the groups up. Splitting them up naïvely would
+    /// mean creating a separate constructor for every single value in the range, which is clearly
+    /// impractical. However, observe that for some ranges of integers, the specialisation will be
+    /// identical across all values in that range (i.e., there are equivalence classes of ranges of
+    /// constructors based on their `U(S(c, P), S(c, p))` outcome). These classes are grouped by
+    /// the patterns that apply to them (in the matrix `P`). We can split the range whenever the
+    /// patterns that apply to that range (specifically: the patterns that *intersect* with that range)
+    /// change.
+    /// Our solution, therefore, is to split the range constructor into subranges at every single point
+    /// the group of intersecting patterns changes (using the method described below).
+    /// And voilà! We're testing precisely those ranges that we need to, without any exhaustive matching
+    /// on actual integers. The nice thing about this is that the number of subranges is linear in the
+    /// number of rows in the matrix (i.e., the number of cases in the `match` statement), so we don't
+    /// need to be worried about matching over gargantuan ranges.
+    ///
+    /// Essentially, given the first column of a matrix representing ranges, looking like the following:
+    ///
+    /// |------|  |----------| |-------|    ||
+    ///    |-------| |-------|            |----| ||
+    ///       |---------|
+    ///
+    /// We split the ranges up into equivalence classes so the ranges are no longer overlapping:
+    ///
+    /// |--|--|||-||||--||---|||-------|  |-|||| ||
+    ///
+    /// The logic for determining how to split the ranges is fairly straightforward: we calculate
+    /// boundaries for each interval range, sort them, then create constructors for each new interval
+    /// between every pair of boundary points. (This essentially sums up to performing the intuitive
+    /// merging operation depicted above.)
+    fn split<'p>(
+        &self,
+        pcx: PatCtxt<'_, 'p, 'tcx>,
+        hir_id: Option<HirId>,
+    ) -> SmallVec<[Constructor<'tcx>; 1]> {
+        let ty = pcx.ty;
+
+        /// Represents a border between 2 integers. Because the intervals spanning borders
+        /// must be able to cover every integer, we need to be able to represent
+        /// 2^128 + 1 such borders.
+        #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
+        enum Border {
+            JustBefore(u128),
+            AfterMax,
+        }
+
+        // A function for extracting the borders of an integer interval.
+        fn range_borders(r: IntRange<'_>) -> impl Iterator<Item = Border> {
+            let (lo, hi) = r.range.into_inner();
+            let from = Border::JustBefore(lo);
+            let to = match hi.checked_add(1) {
+                Some(m) => Border::JustBefore(m),
+                None => Border::AfterMax,
+            };
+            vec![from, to].into_iter()
+        }
+
+        // Collect the span and range of all the intersecting ranges to lint on likely
+        // incorrect range patterns. (#63987)
+        let mut overlaps = vec![];
+        let row_len = pcx.matrix.patterns.get(0).map(|r| r.len()).unwrap_or(0);
+        // `borders` is the set of borders between equivalence classes: each equivalence
+        // class lies between 2 borders.
+        let row_borders = pcx
+            .matrix
+            .head_ctors(pcx.cx)
+            .filter_map(|ctor| ctor.as_int_range())
+            .filter_map(|range| {
+                let intersection = self.intersection(pcx.cx.tcx, &range);
+                let should_lint = self.suspicious_intersection(&range);
+                if let (Some(range), 1, true) = (&intersection, row_len, should_lint) {
+                    // FIXME: for now, only check for overlapping ranges on simple range
+                    // patterns. Otherwise with the current logic the following is detected
+                    // as overlapping:
+                    //   match (10u8, true) {
+                    //    (0 ..= 125, false) => {}
+                    //    (126 ..= 255, false) => {}
+                    //    (0 ..= 255, true) => {}
+                    //  }
+                    overlaps.push(range.clone());
+                }
+                intersection
+            })
+            .flat_map(range_borders);
+        let self_borders = range_borders(self.clone());
+        let mut borders: Vec<_> = row_borders.chain(self_borders).collect();
+        borders.sort_unstable();
+
+        self.lint_overlapping_patterns(pcx.cx.tcx, hir_id, ty, overlaps);
+
+        // We're going to iterate through every adjacent pair of borders, making sure that
+        // each represents an interval of nonnegative length, and convert each such
+        // interval into a constructor.
+        borders
+            .array_windows()
+            .filter_map(|&pair| match pair {
+                [Border::JustBefore(n), Border::JustBefore(m)] => {
+                    if n < m {
+                        Some(n..=(m - 1))
+                    } else {
+                        None
+                    }
+                }
+                [Border::JustBefore(n), Border::AfterMax] => Some(n..=u128::MAX),
+                [Border::AfterMax, _] => None,
+            })
+            .map(|range| IntRange { range, ty, span: pcx.span })
+            .map(IntRange)
+            .collect()
+    }
+
+    fn lint_overlapping_patterns(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        hir_id: Option<HirId>,
+        ty: Ty<'tcx>,
+        overlaps: Vec<IntRange<'tcx>>,
+    ) {
+        if let (true, Some(hir_id)) = (!overlaps.is_empty(), hir_id) {
+            tcx.struct_span_lint_hir(
+                lint::builtin::OVERLAPPING_PATTERNS,
+                hir_id,
+                self.span,
+                |lint| {
+                    let mut err = lint.build("multiple patterns covering the same range");
+                    err.span_label(self.span, "overlapping patterns");
+                    for int_range in overlaps {
+                        // Use the real type for user display of the ranges:
+                        err.span_label(
+                            int_range.span,
+                            &format!(
+                                "this range overlaps on `{}`",
+                                IntRange { range: int_range.range, ty, span: DUMMY_SP }.to_pat(tcx),
+                            ),
+                        );
+                    }
+                    err.emit();
+                },
+            );
+        }
+    }
+
+    /// See `Constructor::is_covered_by`
+    fn is_covered_by<'p>(&self, pcx: PatCtxt<'_, 'p, 'tcx>, other: &Self) -> bool {
+        if self.intersection(pcx.cx.tcx, other).is_some() {
+            // Constructor splitting should ensure that all intersections we encounter are actually
+            // inclusions.
+            assert!(self.is_subrange(other));
+            true
+        } else {
+            false
+        }
+    }
 }
 
 /// Ignore spans when comparing, they don't carry semantic information as they are only for lints.
@@ -1923,43 +1981,90 @@
 }
 
 // A struct to compute a set of constructors equivalent to `all_ctors \ used_ctors`.
+#[derive(Debug)]
 struct MissingConstructors<'tcx> {
-    all_ctors: Vec<Constructor<'tcx>>,
+    all_ctors: SmallVec<[Constructor<'tcx>; 1]>,
     used_ctors: Vec<Constructor<'tcx>>,
 }
 
 impl<'tcx> MissingConstructors<'tcx> {
-    fn new(all_ctors: Vec<Constructor<'tcx>>, used_ctors: Vec<Constructor<'tcx>>) -> Self {
+    fn new<'p>(pcx: PatCtxt<'_, 'p, 'tcx>) -> Self {
+        let used_ctors: Vec<Constructor<'_>> =
+            pcx.matrix.head_ctors(pcx.cx).cloned().filter(|c| !c.is_wildcard()).collect();
+        // Since `all_ctors` never contains wildcards, this won't recurse further.
+        let all_ctors =
+            all_constructors(pcx).into_iter().flat_map(|ctor| ctor.split(pcx, None)).collect();
+
         MissingConstructors { all_ctors, used_ctors }
     }
 
-    fn into_inner(self) -> (Vec<Constructor<'tcx>>, Vec<Constructor<'tcx>>) {
-        (self.all_ctors, self.used_ctors)
-    }
-
-    fn is_empty(&self) -> bool {
-        self.iter().next().is_none()
-    }
-    /// Whether this contains all the constructors for the given type or only a
-    /// subset.
-    fn all_ctors_are_missing(&self) -> bool {
-        self.used_ctors.is_empty()
+    fn is_empty<'p>(&self, pcx: PatCtxt<'_, 'p, 'tcx>) -> bool {
+        self.iter(pcx).next().is_none()
     }
 
     /// Iterate over all_ctors \ used_ctors
-    fn iter<'a>(&'a self) -> impl Iterator<Item = Constructor<'tcx>> + Captures<'a> {
-        self.all_ctors.iter().flat_map(move |req_ctor| req_ctor.subtract_ctors(&self.used_ctors))
+    fn iter<'a, 'p>(
+        &'a self,
+        pcx: PatCtxt<'a, 'p, 'tcx>,
+    ) -> impl Iterator<Item = &'a Constructor<'tcx>> + Captures<'p> {
+        self.all_ctors.iter().filter(move |ctor| !ctor.is_covered_by_any(pcx, &self.used_ctors))
+    }
+
+    /// List the patterns corresponding to the missing constructors. In some cases, instead of
+    /// listing all constructors of a given type, we prefer to simply report a wildcard.
+    fn report_patterns<'p>(
+        &self,
+        pcx: PatCtxt<'_, 'p, 'tcx>,
+        is_top_level: bool,
+    ) -> SmallVec<[Pat<'tcx>; 1]> {
+        // There are 2 ways we can report a witness here.
+        // Commonly, we can report all the "free"
+        // constructors as witnesses, e.g., if we have:
+        //
+        // ```
+        //     enum Direction { N, S, E, W }
+        //     let Direction::N = ...;
+        // ```
+        //
+        // we can report 3 witnesses: `S`, `E`, and `W`.
+        //
+        // However, there is a case where we don't want
+        // to do this and instead report a single `_` witness:
+        // if the user didn't actually specify a constructor
+        // in this arm, e.g., in
+        //
+        // ```
+        //     let x: (Direction, Direction, bool) = ...;
+        //     let (_, _, false) = x;
+        // ```
+        //
+        // we don't want to show all 16 possible witnesses
+        // `(<direction-1>, <direction-2>, true)` - we are
+        // satisfied with `(_, _, true)`. In this case,
+        // `used_ctors` is empty.
+        // The exception is: if we are at the top-level, for example in an empty match, we
+        // sometimes prefer reporting the list of constructors instead of just `_`.
+        let report_when_all_missing = is_top_level && !IntRange::is_integral(pcx.ty);
+        if self.used_ctors.is_empty() && !report_when_all_missing {
+            // All constructors are unused. Report only a wildcard
+            // rather than each individual constructor.
+            smallvec![Pat::wildcard_from_ty(pcx.ty)]
+        } else {
+            // Construct for each missing constructor a "wild" version of this
+            // constructor, that matches everything that can be built with
+            // it. For example, if `ctor` is a `Constructor::Variant` for
+            // `Option::Some`, we get the pattern `Some(_)`.
+            self.iter(pcx)
+                .map(|missing_ctor| {
+                    let fields = Fields::wildcards(pcx, &missing_ctor);
+                    missing_ctor.apply(pcx, fields)
+                })
+                .collect()
+        }
     }
 }
 
-impl<'tcx> fmt::Debug for MissingConstructors<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        let ctors: Vec<_> = self.iter().collect();
-        write!(f, "{:?}", ctors)
-    }
-}
-
-/// Algorithm from http://moscova.inria.fr/~maranget/papers/warn/index.html.
+/// Algorithm from <http://moscova.inria.fr/~maranget/papers/warn/index.html>.
 /// The algorithm from the paper has been modified to correctly handle empty
 /// types. The changes are:
 ///   (0) We don't exit early if the pattern matrix has zero rows. We just
@@ -1982,7 +2087,7 @@
 /// has one it must not be inserted into the matrix. This shouldn't be
 /// relied on for soundness.
 crate fn is_useful<'p, 'tcx>(
-    cx: &mut MatchCheckCtxt<'p, 'tcx>,
+    cx: &MatchCheckCtxt<'p, 'tcx>,
     matrix: &Matrix<'p, 'tcx>,
     v: &PatStack<'p, 'tcx>,
     witness_preference: WitnessPreference,
@@ -2010,261 +2115,162 @@
 
     // If the first pattern is an or-pattern, expand it.
     if let Some(vs) = v.expand_or_pat() {
-        // We need to push the already-seen patterns into the matrix in order to detect redundant
-        // branches like `Some(_) | Some(0)`. We also keep track of the unreachable subpatterns.
-        let mut matrix = matrix.clone();
-        // `Vec` of all the unreachable branches of the current or-pattern.
-        let mut unreachable_branches = Vec::new();
-        // Subpatterns that are unreachable from all branches. E.g. in the following case, the last
-        // `true` is unreachable only from one branch, so it is overall reachable.
+        // We expand the or pattern, trying each of its branches in turn and keeping careful track
+        // of possible unreachable sub-branches.
+        //
+        // If two branches have detected some unreachable sub-branches, we need to be careful. If
+        // they were detected in columns that are not the current one, we want to keep only the
+        // sub-branches that were unreachable in _all_ branches. Eg. in the following, the last
+        // `true` is unreachable in the second branch of the first or-pattern, but not otherwise.
+        // Therefore we don't want to lint that it is unreachable.
+        //
         // ```
         // match (true, true) {
         //     (true, true) => {}
         //     (false | true, false | true) => {}
         // }
         // ```
-        let mut unreachable_subpats = FxHashSet::default();
-        // Whether any branch at all is useful.
+        // If however the sub-branches come from the current column, they come from the inside of
+        // the current or-pattern, and we want to keep them all. Eg. in the following, we _do_ want
+        // to lint that the last `false` is unreachable.
+        // ```
+        // match None {
+        //     Some(false) => {}
+        //     None | Some(true | false) => {}
+        // }
+        // ```
+
+        let mut matrix = matrix.clone();
+        // We keep track of sub-branches separately depending on whether they come from this column
+        // or from others.
+        let mut unreachables_this_column: FxHashSet<Span> = FxHashSet::default();
+        let mut unreachables_other_columns: Vec<FxHashSet<Span>> = Vec::default();
+        // Whether at least one branch is reachable.
         let mut any_is_useful = false;
 
         for v in vs {
             let res = is_useful(cx, &matrix, &v, witness_preference, hir_id, is_under_guard, false);
             match res {
-                Useful(pats) => {
-                    if !any_is_useful {
-                        any_is_useful = true;
-                        // Initialize with the first set of unreachable subpatterns encountered.
-                        unreachable_subpats = pats.into_iter().collect();
-                    } else {
-                        // Keep the patterns unreachable from both this and previous branches.
-                        unreachable_subpats =
-                            pats.into_iter().filter(|p| unreachable_subpats.contains(p)).collect();
+                Useful(unreachables) => {
+                    if let Some((this_column, other_columns)) = unreachables.split_last() {
+                        // We keep the union of unreachables found in the first column.
+                        unreachables_this_column.extend(this_column);
+                        // We keep the intersection of unreachables found in other columns.
+                        if unreachables_other_columns.is_empty() {
+                            unreachables_other_columns = other_columns.to_vec();
+                        } else {
+                            unreachables_other_columns = unreachables_other_columns
+                                .into_iter()
+                                .zip(other_columns)
+                                .map(|(x, y)| x.intersection(&y).copied().collect())
+                                .collect();
+                        }
                     }
+                    any_is_useful = true;
                 }
-                NotUseful => unreachable_branches.push(v.head().span),
-                UsefulWithWitness(_) => {
-                    bug!("Encountered or-pat in `v` during exhaustiveness checking")
+                NotUseful => {
+                    unreachables_this_column.insert(v.head().span);
                 }
+                UsefulWithWitness(_) => bug!(
+                    "encountered or-pat in the expansion of `_` during exhaustiveness checking"
+                ),
             }
-            // If pattern has a guard don't add it to the matrix
+
+            // If pattern has a guard don't add it to the matrix.
             if !is_under_guard {
+                // We push the already-seen patterns into the matrix in order to detect redundant
+                // branches like `Some(_) | Some(0)`.
                 matrix.push(v);
             }
         }
-        if any_is_useful {
-            // Collect all the unreachable patterns.
-            unreachable_branches.extend(unreachable_subpats);
-            return Useful(unreachable_branches);
+
+        return if any_is_useful {
+            let mut unreachables = if unreachables_other_columns.is_empty() {
+                let n_columns = v.len();
+                (0..n_columns - 1).map(|_| FxHashSet::default()).collect()
+            } else {
+                unreachables_other_columns
+            };
+            unreachables.push(unreachables_this_column);
+            Useful(unreachables)
         } else {
-            return NotUseful;
-        }
+            NotUseful
+        };
     }
 
     // FIXME(Nadrieril): Hack to work around type normalization issues (see #72476).
     let ty = matrix.heads().next().map(|r| r.ty).unwrap_or(v.head().ty);
-    let pcx = PatCtxt { ty, span: v.head().span };
+    let pcx = PatCtxt { cx, matrix, ty, span: v.head().span };
 
-    debug!("is_useful_expand_first_col: pcx={:#?}, expanding {:#?}", pcx, v.head());
+    debug!("is_useful_expand_first_col: ty={:#?}, expanding {:#?}", pcx.ty, v.head());
 
-    let ret = if let Some(constructor) = pat_constructor(cx.tcx, cx.param_env, v.head()) {
-        debug!("is_useful - expanding constructor: {:#?}", constructor);
-        split_grouped_constructors(
-            cx.tcx,
-            cx.param_env,
-            pcx,
-            vec![constructor],
-            matrix,
-            pcx.span,
-            Some(hir_id),
-        )
+    let ret = v
+        .head_ctor(cx)
+        .split(pcx, Some(hir_id))
         .into_iter()
-        .map(|c| {
-            is_useful_specialized(
-                cx,
-                matrix,
-                v,
-                c,
-                pcx.ty,
-                witness_preference,
-                hir_id,
-                is_under_guard,
-            )
+        .map(|ctor| {
+            // We cache the result of `Fields::wildcards` because it is used a lot.
+            let ctor_wild_subpatterns = Fields::wildcards(pcx, &ctor);
+            let matrix = pcx.matrix.specialize_constructor(pcx, &ctor, &ctor_wild_subpatterns);
+            let v = v.pop_head_constructor(&ctor_wild_subpatterns);
+            let usefulness =
+                is_useful(pcx.cx, &matrix, &v, witness_preference, hir_id, is_under_guard, false);
+            usefulness.apply_constructor(pcx, &ctor, &ctor_wild_subpatterns, is_top_level)
         })
         .find(|result| result.is_useful())
-        .unwrap_or(NotUseful)
-    } else {
-        debug!("is_useful - expanding wildcard");
-
-        let used_ctors: Vec<Constructor<'_>> =
-            matrix.heads().filter_map(|p| pat_constructor(cx.tcx, cx.param_env, p)).collect();
-        debug!("is_useful_used_ctors = {:#?}", used_ctors);
-        // `all_ctors` are all the constructors for the given type, which
-        // should all be represented (or caught with the wild pattern `_`).
-        let all_ctors = all_constructors(cx, pcx);
-        debug!("is_useful_all_ctors = {:#?}", all_ctors);
-
-        // `missing_ctors` is the set of constructors from the same type as the
-        // first column of `matrix` that are matched only by wildcard patterns
-        // from the first column.
-        //
-        // Therefore, if there is some pattern that is unmatched by `matrix`,
-        // it will still be unmatched if the first constructor is replaced by
-        // any of the constructors in `missing_ctors`
-
-        // Missing constructors are those that are not matched by any non-wildcard patterns in the
-        // current column. We only fully construct them on-demand, because they're rarely used and
-        // can be big.
-        let missing_ctors = MissingConstructors::new(all_ctors, used_ctors);
-
-        debug!("is_useful_missing_ctors.empty()={:#?}", missing_ctors.is_empty(),);
-
-        if missing_ctors.is_empty() {
-            let (all_ctors, _) = missing_ctors.into_inner();
-            split_grouped_constructors(cx.tcx, cx.param_env, pcx, all_ctors, matrix, DUMMY_SP, None)
-                .into_iter()
-                .map(|c| {
-                    is_useful_specialized(
-                        cx,
-                        matrix,
-                        v,
-                        c,
-                        pcx.ty,
-                        witness_preference,
-                        hir_id,
-                        is_under_guard,
-                    )
-                })
-                .find(|result| result.is_useful())
-                .unwrap_or(NotUseful)
-        } else {
-            let matrix = matrix.specialize_wildcard();
-            let v = v.to_tail();
-            let usefulness =
-                is_useful(cx, &matrix, &v, witness_preference, hir_id, is_under_guard, false);
-
-            // In this case, there's at least one "free"
-            // constructor that is only matched against by
-            // wildcard patterns.
-            //
-            // There are 2 ways we can report a witness here.
-            // Commonly, we can report all the "free"
-            // constructors as witnesses, e.g., if we have:
-            //
-            // ```
-            //     enum Direction { N, S, E, W }
-            //     let Direction::N = ...;
-            // ```
-            //
-            // we can report 3 witnesses: `S`, `E`, and `W`.
-            //
-            // However, there is a case where we don't want
-            // to do this and instead report a single `_` witness:
-            // if the user didn't actually specify a constructor
-            // in this arm, e.g., in
-            // ```
-            //     let x: (Direction, Direction, bool) = ...;
-            //     let (_, _, false) = x;
-            // ```
-            // we don't want to show all 16 possible witnesses
-            // `(<direction-1>, <direction-2>, true)` - we are
-            // satisfied with `(_, _, true)`. In this case,
-            // `used_ctors` is empty.
-            // The exception is: if we are at the top-level, for example in an empty match, we
-            // sometimes prefer reporting the list of constructors instead of just `_`.
-            let report_ctors_rather_than_wildcard = is_top_level && !IntRange::is_integral(pcx.ty);
-            if missing_ctors.all_ctors_are_missing() && !report_ctors_rather_than_wildcard {
-                // All constructors are unused. Add a wild pattern
-                // rather than each individual constructor.
-                usefulness.apply_wildcard(pcx.ty)
-            } else {
-                // Construct for each missing constructor a "wild" version of this
-                // constructor, that matches everything that can be built with
-                // it. For example, if `ctor` is a `Constructor::Variant` for
-                // `Option::Some`, we get the pattern `Some(_)`.
-                usefulness.apply_missing_ctors(cx, pcx.ty, &missing_ctors)
-            }
-        }
-    };
+        .unwrap_or(NotUseful);
     debug!("is_useful::returns({:#?}, {:#?}) = {:?}", matrix, v, ret);
     ret
 }
 
-/// A shorthand for the `U(S(c, P), S(c, q))` operation from the paper. I.e., `is_useful` applied
-/// to the specialised version of both the pattern matrix `P` and the new pattern `q`.
-fn is_useful_specialized<'p, 'tcx>(
-    cx: &mut MatchCheckCtxt<'p, 'tcx>,
-    matrix: &Matrix<'p, 'tcx>,
-    v: &PatStack<'p, 'tcx>,
-    ctor: Constructor<'tcx>,
-    ty: Ty<'tcx>,
-    witness_preference: WitnessPreference,
-    hir_id: HirId,
-    is_under_guard: bool,
-) -> Usefulness<'tcx> {
-    debug!("is_useful_specialized({:#?}, {:#?}, {:?})", v, ctor, ty);
-
-    // We cache the result of `Fields::wildcards` because it is used a lot.
-    let ctor_wild_subpatterns = Fields::wildcards(cx, &ctor, ty);
-    let matrix = matrix.specialize_constructor(cx, &ctor, &ctor_wild_subpatterns);
-    v.specialize_constructor(cx, &ctor, &ctor_wild_subpatterns)
-        .map(|v| is_useful(cx, &matrix, &v, witness_preference, hir_id, is_under_guard, false))
-        .map(|u| u.apply_constructor(cx, &ctor, ty, &ctor_wild_subpatterns))
-        .unwrap_or(NotUseful)
-}
-
 /// Determines the constructor that the given pattern can be specialized to.
 /// Returns `None` in case of a catch-all, which can't be specialized.
-fn pat_constructor<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    param_env: ty::ParamEnv<'tcx>,
-    pat: &Pat<'tcx>,
-) -> Option<Constructor<'tcx>> {
-    // This MUST be kept in sync with `IntRange::from_pat`.
-    match *pat.kind {
+fn pat_constructor<'p, 'tcx>(
+    cx: &MatchCheckCtxt<'p, 'tcx>,
+    pat: &'p Pat<'tcx>,
+) -> Constructor<'tcx> {
+    match pat.kind.as_ref() {
         PatKind::AscribeUserType { .. } => bug!(), // Handled by `expand_pattern`
-        PatKind::Binding { .. } | PatKind::Wild => None,
-        PatKind::Leaf { .. } | PatKind::Deref { .. } => Some(Single),
-        PatKind::Variant { adt_def, variant_index, .. } => {
-            Some(Variant(adt_def.variants[variant_index].def_id))
+        PatKind::Binding { .. } | PatKind::Wild => Wildcard,
+        PatKind::Leaf { .. } | PatKind::Deref { .. } => Single,
+        &PatKind::Variant { adt_def, variant_index, .. } => {
+            Variant(adt_def.variants[variant_index].def_id)
         }
         PatKind::Constant { value } => {
-            if let Some(int_range) = IntRange::from_const(tcx, param_env, value, pat.span) {
-                Some(IntRange(int_range))
+            if let Some(int_range) = IntRange::from_const(cx.tcx, cx.param_env, value, pat.span) {
+                IntRange(int_range)
             } else {
-                match (value.val, &value.ty.kind()) {
-                    (_, ty::Array(_, n)) => {
-                        let len = n.eval_usize(tcx, param_env);
-                        Some(Slice(Slice { array_len: Some(len), kind: FixedLen(len) }))
-                    }
-                    (ty::ConstKind::Value(ConstValue::Slice { start, end, .. }), ty::Slice(_)) => {
-                        let len = (end - start) as u64;
-                        Some(Slice(Slice { array_len: None, kind: FixedLen(len) }))
-                    }
-                    // FIXME(oli-obk): implement `deref` for `ConstValue`
-                    // (ty::ConstKind::Value(ConstValue::ByRef { .. }), ty::Slice(_)) => { ... }
-                    _ => Some(ConstantValue(value)),
+                match pat.ty.kind() {
+                    ty::Float(_) => FloatRange(value, value, RangeEnd::Included),
+                    // In `expand_pattern`, we convert string literals to `&CONST` patterns with
+                    // `CONST` a pattern of type `str`. In truth this contains a constant of type
+                    // `&str`.
+                    ty::Str => Str(value),
+                    // All constants that can be structurally matched have already been expanded
+                    // into the corresponding `Pat`s by `const_to_pat`. Constants that remain are
+                    // opaque.
+                    _ => Opaque,
                 }
             }
         }
-        PatKind::Range(PatRange { lo, hi, end }) => {
+        &PatKind::Range(PatRange { lo, hi, end }) => {
             let ty = lo.ty;
             if let Some(int_range) = IntRange::from_range(
-                tcx,
-                lo.eval_bits(tcx, param_env, lo.ty),
-                hi.eval_bits(tcx, param_env, hi.ty),
+                cx.tcx,
+                lo.eval_bits(cx.tcx, cx.param_env, lo.ty),
+                hi.eval_bits(cx.tcx, cx.param_env, hi.ty),
                 ty,
                 &end,
                 pat.span,
             ) {
-                Some(IntRange(int_range))
+                IntRange(int_range)
             } else {
-                Some(FloatRange(lo, hi, end))
+                FloatRange(lo, hi, end)
             }
         }
-        PatKind::Array { ref prefix, ref slice, ref suffix }
-        | PatKind::Slice { ref prefix, ref slice, ref suffix } => {
+        PatKind::Array { prefix, slice, suffix } | PatKind::Slice { prefix, slice, suffix } => {
             let array_len = match pat.ty.kind() {
-                ty::Array(_, length) => Some(length.eval_usize(tcx, param_env)),
+                ty::Array(_, length) => Some(length.eval_usize(cx.tcx, cx.param_env)),
                 ty::Slice(_) => None,
                 _ => span_bug!(pat.span, "bad ty {:?} for slice pattern", pat.ty),
             };
@@ -2272,584 +2278,8 @@
             let suffix = suffix.len() as u64;
             let kind =
                 if slice.is_some() { VarLen(prefix, suffix) } else { FixedLen(prefix + suffix) };
-            Some(Slice(Slice { array_len, kind }))
+            Slice(Slice::new(array_len, kind))
         }
         PatKind::Or { .. } => bug!("Or-pattern should have been expanded earlier on."),
     }
 }
-
-// checks whether a constant is equal to a user-written slice pattern. Only supports byte slices,
-// meaning all other types will compare unequal and thus equal patterns often do not cause the
-// second pattern to lint about unreachable match arms.
-fn slice_pat_covered_by_const<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    _span: Span,
-    const_val: &'tcx ty::Const<'tcx>,
-    prefix: &[Pat<'tcx>],
-    slice: &Option<Pat<'tcx>>,
-    suffix: &[Pat<'tcx>],
-    param_env: ty::ParamEnv<'tcx>,
-) -> Result<bool, ErrorReported> {
-    let const_val_val = if let ty::ConstKind::Value(val) = const_val.val {
-        val
-    } else {
-        bug!(
-            "slice_pat_covered_by_const: {:#?}, {:#?}, {:#?}, {:#?}",
-            const_val,
-            prefix,
-            slice,
-            suffix,
-        )
-    };
-
-    let data: &[u8] = match (const_val_val, &const_val.ty.kind()) {
-        (ConstValue::ByRef { offset, alloc, .. }, ty::Array(t, n)) => {
-            assert_eq!(*t, tcx.types.u8);
-            let n = n.eval_usize(tcx, param_env);
-            let ptr = Pointer::new(AllocId(0), offset);
-            alloc.get_bytes(&tcx, ptr, Size::from_bytes(n)).unwrap()
-        }
-        (ConstValue::Slice { data, start, end }, ty::Slice(t)) => {
-            assert_eq!(*t, tcx.types.u8);
-            let ptr = Pointer::new(AllocId(0), Size::from_bytes(start));
-            data.get_bytes(&tcx, ptr, Size::from_bytes(end - start)).unwrap()
-        }
-        // FIXME(oli-obk): create a way to extract fat pointers from ByRef
-        (_, ty::Slice(_)) => return Ok(false),
-        _ => bug!(
-            "slice_pat_covered_by_const: {:#?}, {:#?}, {:#?}, {:#?}",
-            const_val,
-            prefix,
-            slice,
-            suffix,
-        ),
-    };
-
-    let pat_len = prefix.len() + suffix.len();
-    if data.len() < pat_len || (slice.is_none() && data.len() > pat_len) {
-        return Ok(false);
-    }
-
-    for (ch, pat) in data[..prefix.len()]
-        .iter()
-        .zip(prefix)
-        .chain(data[data.len() - suffix.len()..].iter().zip(suffix))
-    {
-        if let box PatKind::Constant { value } = pat.kind {
-            let b = value.eval_bits(tcx, param_env, pat.ty);
-            assert_eq!(b as u8 as u128, b);
-            if b as u8 != *ch {
-                return Ok(false);
-            }
-        }
-    }
-
-    Ok(true)
-}
-
-/// For exhaustive integer matching, some constructors are grouped within other constructors
-/// (namely integer typed values are grouped within ranges). However, when specialising these
-/// constructors, we want to be specialising for the underlying constructors (the integers), not
-/// the groups (the ranges). Thus we need to split the groups up. Splitting them up naïvely would
-/// mean creating a separate constructor for every single value in the range, which is clearly
-/// impractical. However, observe that for some ranges of integers, the specialisation will be
-/// identical across all values in that range (i.e., there are equivalence classes of ranges of
-/// constructors based on their `is_useful_specialized` outcome). These classes are grouped by
-/// the patterns that apply to them (in the matrix `P`). We can split the range whenever the
-/// patterns that apply to that range (specifically: the patterns that *intersect* with that range)
-/// change.
-/// Our solution, therefore, is to split the range constructor into subranges at every single point
-/// the group of intersecting patterns changes (using the method described below).
-/// And voilà! We're testing precisely those ranges that we need to, without any exhaustive matching
-/// on actual integers. The nice thing about this is that the number of subranges is linear in the
-/// number of rows in the matrix (i.e., the number of cases in the `match` statement), so we don't
-/// need to be worried about matching over gargantuan ranges.
-///
-/// Essentially, given the first column of a matrix representing ranges, looking like the following:
-///
-/// |------|  |----------| |-------|    ||
-///    |-------| |-------|            |----| ||
-///       |---------|
-///
-/// We split the ranges up into equivalence classes so the ranges are no longer overlapping:
-///
-/// |--|--|||-||||--||---|||-------|  |-|||| ||
-///
-/// The logic for determining how to split the ranges is fairly straightforward: we calculate
-/// boundaries for each interval range, sort them, then create constructors for each new interval
-/// between every pair of boundary points. (This essentially sums up to performing the intuitive
-/// merging operation depicted above.)
-///
-/// `hir_id` is `None` when we're evaluating the wildcard pattern, do not lint for overlapping in
-/// ranges that case.
-///
-/// This also splits variable-length slices into fixed-length slices.
-fn split_grouped_constructors<'p, 'tcx>(
-    tcx: TyCtxt<'tcx>,
-    param_env: ty::ParamEnv<'tcx>,
-    pcx: PatCtxt<'tcx>,
-    ctors: Vec<Constructor<'tcx>>,
-    matrix: &Matrix<'p, 'tcx>,
-    span: Span,
-    hir_id: Option<HirId>,
-) -> Vec<Constructor<'tcx>> {
-    let ty = pcx.ty;
-    let mut split_ctors = Vec::with_capacity(ctors.len());
-    debug!("split_grouped_constructors({:#?}, {:#?})", matrix, ctors);
-
-    for ctor in ctors.into_iter() {
-        match ctor {
-            IntRange(ctor_range) if ctor_range.treat_exhaustively(tcx) => {
-                // Fast-track if the range is trivial. In particular, don't do the overlapping
-                // ranges check.
-                if ctor_range.is_singleton() {
-                    split_ctors.push(IntRange(ctor_range));
-                    continue;
-                }
-
-                /// Represents a border between 2 integers. Because the intervals spanning borders
-                /// must be able to cover every integer, we need to be able to represent
-                /// 2^128 + 1 such borders.
-                #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
-                enum Border {
-                    JustBefore(u128),
-                    AfterMax,
-                }
-
-                // A function for extracting the borders of an integer interval.
-                fn range_borders(r: IntRange<'_>) -> impl Iterator<Item = Border> {
-                    let (lo, hi) = r.range.into_inner();
-                    let from = Border::JustBefore(lo);
-                    let to = match hi.checked_add(1) {
-                        Some(m) => Border::JustBefore(m),
-                        None => Border::AfterMax,
-                    };
-                    vec![from, to].into_iter()
-                }
-
-                // Collect the span and range of all the intersecting ranges to lint on likely
-                // incorrect range patterns. (#63987)
-                let mut overlaps = vec![];
-                // `borders` is the set of borders between equivalence classes: each equivalence
-                // class lies between 2 borders.
-                let row_borders = matrix
-                    .patterns
-                    .iter()
-                    .flat_map(|row| {
-                        IntRange::from_pat(tcx, param_env, row.head()).map(|r| (r, row.len()))
-                    })
-                    .flat_map(|(range, row_len)| {
-                        let intersection = ctor_range.intersection(tcx, &range);
-                        let should_lint = ctor_range.suspicious_intersection(&range);
-                        if let (Some(range), 1, true) = (&intersection, row_len, should_lint) {
-                            // FIXME: for now, only check for overlapping ranges on simple range
-                            // patterns. Otherwise with the current logic the following is detected
-                            // as overlapping:
-                            //   match (10u8, true) {
-                            //    (0 ..= 125, false) => {}
-                            //    (126 ..= 255, false) => {}
-                            //    (0 ..= 255, true) => {}
-                            //  }
-                            overlaps.push(range.clone());
-                        }
-                        intersection
-                    })
-                    .flat_map(range_borders);
-                let ctor_borders = range_borders(ctor_range.clone());
-                let mut borders: Vec<_> = row_borders.chain(ctor_borders).collect();
-                borders.sort_unstable();
-
-                lint_overlapping_patterns(tcx, hir_id, ctor_range, ty, overlaps);
-
-                // We're going to iterate through every adjacent pair of borders, making sure that
-                // each represents an interval of nonnegative length, and convert each such
-                // interval into a constructor.
-                split_ctors.extend(
-                    borders
-                        .array_windows()
-                        .filter_map(|&pair| match pair {
-                            [Border::JustBefore(n), Border::JustBefore(m)] => {
-                                if n < m {
-                                    Some(IntRange { range: n..=(m - 1), ty, span })
-                                } else {
-                                    None
-                                }
-                            }
-                            [Border::JustBefore(n), Border::AfterMax] => {
-                                Some(IntRange { range: n..=u128::MAX, ty, span })
-                            }
-                            [Border::AfterMax, _] => None,
-                        })
-                        .map(IntRange),
-                );
-            }
-            Slice(Slice { array_len, kind: VarLen(self_prefix, self_suffix) }) => {
-                // The exhaustiveness-checking paper does not include any details on
-                // checking variable-length slice patterns. However, they are matched
-                // by an infinite collection of fixed-length array patterns.
-                //
-                // Checking the infinite set directly would take an infinite amount
-                // of time. However, it turns out that for each finite set of
-                // patterns `P`, all sufficiently large array lengths are equivalent:
-                //
-                // Each slice `s` with a "sufficiently-large" length `l ≥ L` that applies
-                // to exactly the subset `Pā‚œ` of `P` can be transformed to a slice
-                // `sā‚˜` for each sufficiently-large length `m` that applies to exactly
-                // the same subset of `P`.
-                //
-                // Because of that, each witness for reachability-checking from one
-                // of the sufficiently-large lengths can be transformed to an
-                // equally-valid witness from any other length, so we only have
-                // to check slice lengths from the "minimal sufficiently-large length"
-                // and below.
-                //
-                // Note that the fact that there is a *single* `sā‚˜` for each `m`
-                // not depending on the specific pattern in `P` is important: if
-                // you look at the pair of patterns
-                //     `[true, ..]`
-                //     `[.., false]`
-                // Then any slice of length ≥1 that matches one of these two
-                // patterns can be trivially turned to a slice of any
-                // other length ≥1 that matches them and vice-versa - for
-                // but the slice from length 2 `[false, true]` that matches neither
-                // of these patterns can't be turned to a slice from length 1 that
-                // matches neither of these patterns, so we have to consider
-                // slices from length 2 there.
-                //
-                // Now, to see that that length exists and find it, observe that slice
-                // patterns are either "fixed-length" patterns (`[_, _, _]`) or
-                // "variable-length" patterns (`[_, .., _]`).
-                //
-                // For fixed-length patterns, all slices with lengths *longer* than
-                // the pattern's length have the same outcome (of not matching), so
-                // as long as `L` is greater than the pattern's length we can pick
-                // any `sā‚˜` from that length and get the same result.
-                //
-                // For variable-length patterns, the situation is more complicated,
-                // because as seen above the precise value of `sā‚˜` matters.
-                //
-                // However, for each variable-length pattern `p` with a prefix of length
-                // `plā‚š` and suffix of length `slā‚š`, only the first `plā‚š` and the last
-                // `slā‚š` elements are examined.
-                //
-                // Therefore, as long as `L` is positive (to avoid concerns about empty
-                // types), all elements after the maximum prefix length and before
-                // the maximum suffix length are not examined by any variable-length
-                // pattern, and therefore can be added/removed without affecting
-                // them - creating equivalent patterns from any sufficiently-large
-                // length.
-                //
-                // Of course, if fixed-length patterns exist, we must be sure
-                // that our length is large enough to miss them all, so
-                // we can pick `L = max(max(FIXED_LEN)+1, max(PREFIX_LEN) + max(SUFFIX_LEN))`
-                //
-                // for example, with the above pair of patterns, all elements
-                // but the first and last can be added/removed, so any
-                // witness of length ≥2 (say, `[false, false, true]`) can be
-                // turned to a witness from any other length ≥2.
-
-                let mut max_prefix_len = self_prefix;
-                let mut max_suffix_len = self_suffix;
-                let mut max_fixed_len = 0;
-
-                let head_ctors =
-                    matrix.heads().filter_map(|pat| pat_constructor(tcx, param_env, pat));
-                for ctor in head_ctors {
-                    if let Slice(slice) = ctor {
-                        match slice.pattern_kind() {
-                            FixedLen(len) => {
-                                max_fixed_len = cmp::max(max_fixed_len, len);
-                            }
-                            VarLen(prefix, suffix) => {
-                                max_prefix_len = cmp::max(max_prefix_len, prefix);
-                                max_suffix_len = cmp::max(max_suffix_len, suffix);
-                            }
-                        }
-                    }
-                }
-
-                // For diagnostics, we keep the prefix and suffix lengths separate, so in the case
-                // where `max_fixed_len + 1` is the largest, we adapt `max_prefix_len` accordingly,
-                // so that `L = max_prefix_len + max_suffix_len`.
-                if max_fixed_len + 1 >= max_prefix_len + max_suffix_len {
-                    // The subtraction can't overflow thanks to the above check.
-                    // The new `max_prefix_len` is also guaranteed to be larger than its previous
-                    // value.
-                    max_prefix_len = max_fixed_len + 1 - max_suffix_len;
-                }
-
-                match array_len {
-                    Some(len) => {
-                        let kind = if max_prefix_len + max_suffix_len < len {
-                            VarLen(max_prefix_len, max_suffix_len)
-                        } else {
-                            FixedLen(len)
-                        };
-                        split_ctors.push(Slice(Slice { array_len, kind }));
-                    }
-                    None => {
-                        // `ctor` originally covered the range `(self_prefix +
-                        // self_suffix..infinity)`. We now split it into two: lengths smaller than
-                        // `max_prefix_len + max_suffix_len` are treated independently as
-                        // fixed-lengths slices, and lengths above are captured by a final VarLen
-                        // constructor.
-                        split_ctors.extend(
-                            (self_prefix + self_suffix..max_prefix_len + max_suffix_len)
-                                .map(|len| Slice(Slice { array_len, kind: FixedLen(len) })),
-                        );
-                        split_ctors.push(Slice(Slice {
-                            array_len,
-                            kind: VarLen(max_prefix_len, max_suffix_len),
-                        }));
-                    }
-                }
-            }
-            // Any other constructor can be used unchanged.
-            _ => split_ctors.push(ctor),
-        }
-    }
-
-    debug!("split_grouped_constructors(..)={:#?}", split_ctors);
-    split_ctors
-}
-
-fn lint_overlapping_patterns<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    hir_id: Option<HirId>,
-    ctor_range: IntRange<'tcx>,
-    ty: Ty<'tcx>,
-    overlaps: Vec<IntRange<'tcx>>,
-) {
-    if let (true, Some(hir_id)) = (!overlaps.is_empty(), hir_id) {
-        tcx.struct_span_lint_hir(
-            lint::builtin::OVERLAPPING_PATTERNS,
-            hir_id,
-            ctor_range.span,
-            |lint| {
-                let mut err = lint.build("multiple patterns covering the same range");
-                err.span_label(ctor_range.span, "overlapping patterns");
-                for int_range in overlaps {
-                    // Use the real type for user display of the ranges:
-                    err.span_label(
-                        int_range.span,
-                        &format!(
-                            "this range overlaps on `{}`",
-                            IntRange { range: int_range.range, ty, span: DUMMY_SP }.to_pat(tcx),
-                        ),
-                    );
-                }
-                err.emit();
-            },
-        );
-    }
-}
-
-fn constructor_covered_by_range<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    param_env: ty::ParamEnv<'tcx>,
-    ctor: &Constructor<'tcx>,
-    pat: &Pat<'tcx>,
-) -> Option<()> {
-    if let Single = ctor {
-        return Some(());
-    }
-
-    let (pat_from, pat_to, pat_end, ty) = match *pat.kind {
-        PatKind::Constant { value } => (value, value, RangeEnd::Included, value.ty),
-        PatKind::Range(PatRange { lo, hi, end }) => (lo, hi, end, lo.ty),
-        _ => bug!("`constructor_covered_by_range` called with {:?}", pat),
-    };
-    let (ctor_from, ctor_to, ctor_end) = match *ctor {
-        ConstantValue(value) => (value, value, RangeEnd::Included),
-        FloatRange(from, to, ctor_end) => (from, to, ctor_end),
-        _ => bug!("`constructor_covered_by_range` called with {:?}", ctor),
-    };
-    trace!("constructor_covered_by_range {:#?}, {:#?}, {:#?}, {}", ctor, pat_from, pat_to, ty);
-
-    let to = compare_const_vals(tcx, ctor_to, pat_to, param_env, ty)?;
-    let from = compare_const_vals(tcx, ctor_from, pat_from, param_env, ty)?;
-    let intersects = (from == Ordering::Greater || from == Ordering::Equal)
-        && (to == Ordering::Less || (pat_end == ctor_end && to == Ordering::Equal));
-    if intersects { Some(()) } else { None }
-}
-
-/// This is the main specialization step. It expands the pattern
-/// into `arity` patterns based on the constructor. For most patterns, the step is trivial,
-/// for instance tuple patterns are flattened and box patterns expand into their inner pattern.
-/// Returns `None` if the pattern does not have the given constructor.
-///
-/// OTOH, slice patterns with a subslice pattern (tail @ ..) can be expanded into multiple
-/// different patterns.
-/// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing
-/// fields filled with wild patterns.
-///
-/// This is roughly the inverse of `Constructor::apply`.
-fn specialize_one_pattern<'p, 'tcx>(
-    cx: &mut MatchCheckCtxt<'p, 'tcx>,
-    pat: &'p Pat<'tcx>,
-    constructor: &Constructor<'tcx>,
-    ctor_wild_subpatterns: &Fields<'p, 'tcx>,
-) -> Option<Fields<'p, 'tcx>> {
-    if let NonExhaustive = constructor {
-        // Only a wildcard pattern can match the special extra constructor
-        if !pat.is_wildcard() {
-            return None;
-        }
-        return Some(Fields::empty());
-    }
-
-    let result = match *pat.kind {
-        PatKind::AscribeUserType { .. } => bug!(), // Handled by `expand_pattern`
-
-        PatKind::Binding { .. } | PatKind::Wild => Some(ctor_wild_subpatterns.clone()),
-
-        PatKind::Variant { adt_def, variant_index, ref subpatterns, .. } => {
-            let variant = &adt_def.variants[variant_index];
-            if constructor != &Variant(variant.def_id) {
-                return None;
-            }
-            Some(ctor_wild_subpatterns.replace_with_fieldpats(subpatterns))
-        }
-
-        PatKind::Leaf { ref subpatterns } => {
-            Some(ctor_wild_subpatterns.replace_with_fieldpats(subpatterns))
-        }
-
-        PatKind::Deref { ref subpattern } => Some(Fields::from_single_pattern(subpattern)),
-
-        PatKind::Constant { value } if constructor.is_slice() => {
-            // We extract an `Option` for the pointer because slices of zero
-            // elements don't necessarily point to memory, they are usually
-            // just integers. The only time they should be pointing to memory
-            // is when they are subslices of nonzero slices.
-            let (alloc, offset, n, ty) = match value.ty.kind() {
-                ty::Array(t, n) => {
-                    let n = n.eval_usize(cx.tcx, cx.param_env);
-                    // Shortcut for `n == 0` where no matter what `alloc` and `offset` we produce,
-                    // the result would be exactly what we early return here.
-                    if n == 0 {
-                        if ctor_wild_subpatterns.len() as u64 != n {
-                            return None;
-                        }
-                        return Some(Fields::empty());
-                    }
-                    match value.val {
-                        ty::ConstKind::Value(ConstValue::ByRef { offset, alloc, .. }) => {
-                            (Cow::Borrowed(alloc), offset, n, t)
-                        }
-                        _ => span_bug!(pat.span, "array pattern is {:?}", value,),
-                    }
-                }
-                ty::Slice(t) => {
-                    match value.val {
-                        ty::ConstKind::Value(ConstValue::Slice { data, start, end }) => {
-                            let offset = Size::from_bytes(start);
-                            let n = (end - start) as u64;
-                            (Cow::Borrowed(data), offset, n, t)
-                        }
-                        ty::ConstKind::Value(ConstValue::ByRef { .. }) => {
-                            // FIXME(oli-obk): implement `deref` for `ConstValue`
-                            return None;
-                        }
-                        _ => span_bug!(
-                            pat.span,
-                            "slice pattern constant must be scalar pair but is {:?}",
-                            value,
-                        ),
-                    }
-                }
-                _ => span_bug!(
-                    pat.span,
-                    "unexpected const-val {:?} with ctor {:?}",
-                    value,
-                    constructor,
-                ),
-            };
-            if ctor_wild_subpatterns.len() as u64 != n {
-                return None;
-            }
-
-            // Convert a constant slice/array pattern to a list of patterns.
-            let layout = cx.tcx.layout_of(cx.param_env.and(ty)).ok()?;
-            let ptr = Pointer::new(AllocId(0), offset);
-            let pats = cx.pattern_arena.alloc_from_iter((0..n).filter_map(|i| {
-                let ptr = ptr.offset(layout.size * i, &cx.tcx).ok()?;
-                let scalar = alloc.read_scalar(&cx.tcx, ptr, layout.size).ok()?;
-                let scalar = scalar.check_init().ok()?;
-                let value = ty::Const::from_scalar(cx.tcx, scalar, ty);
-                let pattern = Pat { ty, span: pat.span, kind: box PatKind::Constant { value } };
-                Some(pattern)
-            }));
-            // Ensure none of the dereferences failed.
-            if pats.len() as u64 != n {
-                return None;
-            }
-            Some(Fields::from_slice_unfiltered(pats))
-        }
-
-        PatKind::Constant { .. } | PatKind::Range { .. } => {
-            // If the constructor is a:
-            // - Single value: add a row if the pattern contains the constructor.
-            // - Range: add a row if the constructor intersects the pattern.
-            if let IntRange(ctor) = constructor {
-                let pat = IntRange::from_pat(cx.tcx, cx.param_env, pat)?;
-                ctor.intersection(cx.tcx, &pat)?;
-                // Constructor splitting should ensure that all intersections we encounter
-                // are actually inclusions.
-                assert!(ctor.is_subrange(&pat));
-            } else {
-                // Fallback for non-ranges and ranges that involve
-                // floating-point numbers, which are not conveniently handled
-                // by `IntRange`. For these cases, the constructor may not be a
-                // range so intersection actually devolves into being covered
-                // by the pattern.
-                constructor_covered_by_range(cx.tcx, cx.param_env, constructor, pat)?;
-            }
-            Some(Fields::empty())
-        }
-
-        PatKind::Array { ref prefix, ref slice, ref suffix }
-        | PatKind::Slice { ref prefix, ref slice, ref suffix } => match *constructor {
-            Slice(_) => {
-                // Number of subpatterns for this pattern
-                let pat_len = prefix.len() + suffix.len();
-                // Number of subpatterns for this constructor
-                let arity = ctor_wild_subpatterns.len();
-
-                if (slice.is_none() && arity != pat_len) || pat_len > arity {
-                    return None;
-                }
-
-                // Replace the prefix and the suffix with the given patterns, leaving wildcards in
-                // the middle if there was a subslice pattern `..`.
-                let prefix = prefix.iter().enumerate();
-                let suffix = suffix.iter().enumerate().map(|(i, p)| (arity - suffix.len() + i, p));
-                Some(ctor_wild_subpatterns.replace_fields_indexed(prefix.chain(suffix)))
-            }
-            ConstantValue(cv) => {
-                match slice_pat_covered_by_const(
-                    cx.tcx,
-                    pat.span,
-                    cv,
-                    prefix,
-                    slice,
-                    suffix,
-                    cx.param_env,
-                ) {
-                    Ok(true) => Some(Fields::empty()),
-                    Ok(false) => None,
-                    Err(ErrorReported) => None,
-                }
-            }
-            _ => span_bug!(pat.span, "unexpected ctor {:?} for slice pat", constructor),
-        },
-
-        PatKind::Or { .. } => bug!("Or-pattern should have been expanded earlier on."),
-    };
-    debug!(
-        "specialize({:#?}, {:#?}, {:#?}) = {:#?}",
-        pat, constructor, ctor_wild_subpatterns, result
-    );
-
-    result
-}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
index 047bf7d..14ed93f 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
@@ -69,15 +69,16 @@
             hir::LocalSource::ForLoopDesugar => ("`for` loop binding", None),
             hir::LocalSource::AsyncFn => ("async fn binding", None),
             hir::LocalSource::AwaitDesugar => ("`await` future binding", None),
+            hir::LocalSource::AssignDesugar(_) => ("destructuring assignment binding", None),
         };
         self.check_irrefutable(&loc.pat, msg, sp);
-        self.check_patterns(false, &loc.pat);
+        self.check_patterns(&loc.pat);
     }
 
     fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
         intravisit::walk_param(self, param);
         self.check_irrefutable(&param.pat, "function argument", None);
-        self.check_patterns(false, &param.pat);
+        self.check_patterns(&param.pat);
     }
 }
 
@@ -96,14 +97,14 @@
                 }
                 PatternError::FloatBug => {
                     // FIXME(#31407) this is only necessary because float parsing is buggy
-                    ::rustc_middle::mir::interpret::struct_error(
+                    rustc_middle::mir::interpret::struct_error(
                         self.tcx.at(pat_span),
                         "could not evaluate float literal (see issue #31407)",
                     )
                     .emit();
                 }
                 PatternError::NonConstPath(span) => {
-                    ::rustc_middle::mir::interpret::struct_error(
+                    rustc_middle::mir::interpret::struct_error(
                         self.tcx.at(span),
                         "runtime values cannot be referenced in patterns",
                     )
@@ -119,10 +120,7 @@
 }
 
 impl<'tcx> MatchVisitor<'_, 'tcx> {
-    fn check_patterns(&mut self, has_guard: bool, pat: &Pat<'_>) {
-        if !self.tcx.features().move_ref_pattern {
-            check_legality_of_move_bindings(self, has_guard, pat);
-        }
+    fn check_patterns(&mut self, pat: &Pat<'_>) {
         pat.walk_always(|pat| check_borrow_conflicts_in_at_patterns(self, pat));
         if !self.tcx.features().bindings_after_at {
             check_legality_of_bindings_in_at_patterns(self, pat);
@@ -140,7 +138,7 @@
         patcx.include_lint_checks();
         let pattern = patcx.lower_pattern(pat);
         let pattern_ty = pattern.ty;
-        let pattern: &_ = cx.pattern_arena.alloc(expand_pattern(cx, pattern));
+        let pattern: &_ = cx.pattern_arena.alloc(expand_pattern(pattern));
         if !patcx.errors.is_empty() {
             *have_errors = true;
             patcx.report_inlining_errors(pat.span);
@@ -165,7 +163,7 @@
     ) {
         for arm in arms {
             // Check the arm for some things unrelated to exhaustiveness.
-            self.check_patterns(arm.guard.is_some(), &arm.pat);
+            self.check_patterns(&arm.pat);
         }
 
         let mut cx = self.new_cx(scrut.hir_id);
@@ -392,8 +390,11 @@
                     hir::MatchSource::AwaitDesugar | hir::MatchSource::TryDesugar => {}
                 }
             }
-            Useful(unreachable_subpatterns) => {
-                for span in unreachable_subpatterns {
+            Useful(unreachables) => {
+                let mut unreachables: Vec<_> = unreachables.into_iter().flatten().collect();
+                // Emit lints in the order in which they occur in the file.
+                unreachables.sort_unstable();
+                for span in unreachables {
                     unreachable_pattern(cx.tcx, span, id, None);
                 }
             }
@@ -601,65 +602,6 @@
     !cx.typeck_results.node_type(hir_id).is_copy_modulo_regions(cx.tcx.at(span), cx.param_env)
 }
 
-/// Check the legality of legality of by-move bindings.
-fn check_legality_of_move_bindings(cx: &mut MatchVisitor<'_, '_>, has_guard: bool, pat: &Pat<'_>) {
-    let sess = cx.tcx.sess;
-    let typeck_results = cx.typeck_results;
-
-    // Find all by-ref spans.
-    let mut by_ref_spans = Vec::new();
-    pat.each_binding(|_, hir_id, span, _| {
-        if let Some(ty::BindByReference(_)) =
-            typeck_results.extract_binding_mode(sess, hir_id, span)
-        {
-            by_ref_spans.push(span);
-        }
-    });
-
-    // Find bad by-move spans:
-    let by_move_spans = &mut Vec::new();
-    let mut check_move = |p: &Pat<'_>, sub: Option<&Pat<'_>>| {
-        // Check legality of moving out of the enum.
-        //
-        // `x @ Foo(..)` is legal, but `x @ Foo(y)` isn't.
-        if sub.map_or(false, |p| p.contains_bindings()) {
-            struct_span_err!(sess, p.span, E0007, "cannot bind by-move with sub-bindings")
-                .span_label(p.span, "binds an already bound by-move value by moving it")
-                .emit();
-        } else if !has_guard && !by_ref_spans.is_empty() {
-            by_move_spans.push(p.span);
-        }
-    };
-    pat.walk_always(|p| {
-        if let hir::PatKind::Binding(.., sub) = &p.kind {
-            if let Some(ty::BindByValue(_)) =
-                typeck_results.extract_binding_mode(sess, p.hir_id, p.span)
-            {
-                if is_binding_by_move(cx, p.hir_id, p.span) {
-                    check_move(p, sub.as_deref());
-                }
-            }
-        }
-    });
-
-    // Found some bad by-move spans, error!
-    if !by_move_spans.is_empty() {
-        let mut err = feature_err(
-            &sess.parse_sess,
-            sym::move_ref_pattern,
-            by_move_spans.clone(),
-            "binding by-move and by-ref in the same pattern is unstable",
-        );
-        for span in by_ref_spans.iter() {
-            err.span_label(*span, "by-ref pattern here");
-        }
-        for span in by_move_spans.iter() {
-            err.span_label(*span, "by-move pattern here");
-        }
-        err.emit();
-    }
-}
-
 /// Check that there are no borrow or move conflicts in `binding @ subpat` patterns.
 ///
 /// For example, this would reject:
diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
index a203b3a..32fc0f0 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
@@ -18,6 +18,7 @@
     /// Converts an evaluated constant to a pattern (if possible).
     /// This means aggregate values (like structs and enums) are converted
     /// to a pattern that matches the value (as if you'd compared via structural equality).
+    #[instrument(skip(self))]
     pub(super) fn const_to_pat(
         &self,
         cv: &'tcx ty::Const<'tcx>,
@@ -25,15 +26,12 @@
         span: Span,
         mir_structural_match_violation: bool,
     ) -> Pat<'tcx> {
-        debug!("const_to_pat: cv={:#?} id={:?}", cv, id);
-        debug!("const_to_pat: cv.ty={:?} span={:?}", cv.ty, span);
-
         let pat = self.tcx.infer_ctxt().enter(|infcx| {
             let mut convert = ConstToPat::new(self, id, span, infcx);
             convert.to_pat(cv, mir_structural_match_violation)
         });
 
-        debug!("const_to_pat: pat={:?}", pat);
+        debug!(?pat);
         pat
     }
 }
@@ -61,6 +59,8 @@
     infcx: InferCtxt<'a, 'tcx>,
 
     include_lint_checks: bool,
+
+    treat_byte_string_as_slice: bool,
 }
 
 mod fallback_to_const_ref {
@@ -88,6 +88,7 @@
         span: Span,
         infcx: InferCtxt<'a, 'tcx>,
     ) -> Self {
+        trace!(?pat_ctxt.typeck_results.hir_owner);
         ConstToPat {
             id,
             span,
@@ -97,6 +98,10 @@
             saw_const_match_error: Cell::new(false),
             saw_const_match_lint: Cell::new(false),
             behind_reference: Cell::new(false),
+            treat_byte_string_as_slice: pat_ctxt
+                .typeck_results
+                .treat_byte_string_as_slice
+                .contains(&id.local_id),
         }
     }
 
@@ -153,6 +158,7 @@
         cv: &'tcx ty::Const<'tcx>,
         mir_structural_match_violation: bool,
     ) -> Pat<'tcx> {
+        trace!(self.treat_byte_string_as_slice);
         // This method is just a wrapper handling a validity check; the heavy lifting is
         // performed by the recursive `recur` method, which is not meant to be
         // invoked except by this method.
@@ -384,18 +390,42 @@
                     }
                     PatKind::Wild
                 }
-                // `&str` and `&[u8]` are represented as `ConstValue::Slice`, let's keep using this
+                // `&str` is represented as `ConstValue::Slice`, let's keep using this
                 // optimization for now.
                 ty::Str => PatKind::Constant { value: cv },
-                ty::Slice(elem_ty) if elem_ty == tcx.types.u8 => PatKind::Constant { value: cv },
                 // `b"foo"` produces a `&[u8; 3]`, but you can't use constants of array type when
                 // matching against references, you can only use byte string literals.
-                // FIXME: clean this up, likely by permitting array patterns when matching on slices
-                ty::Array(elem_ty, _) if elem_ty == tcx.types.u8 => PatKind::Constant { value: cv },
+                // The typechecker has a special case for byte string literals, by treating them
+                // as slices. This means we turn `&[T; N]` constants into slice patterns, which
+                // has no negative effects on pattern matching, even if we're actually matching on
+                // arrays.
+                ty::Array(..) if !self.treat_byte_string_as_slice => {
+                    let old = self.behind_reference.replace(true);
+                    let array = tcx.deref_const(self.param_env.and(cv));
+                    let val = PatKind::Deref {
+                        subpattern: Pat {
+                            kind: Box::new(PatKind::Array {
+                                prefix: tcx
+                                    .destructure_const(param_env.and(array))
+                                    .fields
+                                    .iter()
+                                    .map(|val| self.recur(val, false))
+                                    .collect::<Result<_, _>>()?,
+                                slice: None,
+                                suffix: vec![],
+                            }),
+                            span,
+                            ty: pointee_ty,
+                        },
+                    };
+                    self.behind_reference.set(old);
+                    val
+                }
+                ty::Array(elem_ty, _) |
                 // Cannot merge this with the catch all branch below, because the `const_deref`
-                // changes the type from slice to array, and slice patterns behave differently from
-                // array patterns.
-                ty::Slice(..) => {
+                // changes the type from slice to array, we need to keep the original type in the
+                // pattern.
+                ty::Slice(elem_ty) => {
                     let old = self.behind_reference.replace(true);
                     let array = tcx.deref_const(self.param_env.and(cv));
                     let val = PatKind::Deref {
@@ -411,7 +441,7 @@
                                 suffix: vec![],
                             }),
                             span,
-                            ty: pointee_ty,
+                            ty: tcx.mk_slice(elem_ty),
                         },
                     };
                     self.behind_reference.set(old);
diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
index 718ed78..db0ecd7 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
@@ -15,7 +15,7 @@
 use rustc_hir::pat_util::EnumerateAndAdjustIterator;
 use rustc_hir::RangeEnd;
 use rustc_index::vec::Idx;
-use rustc_middle::mir::interpret::{get_slice_bytes, sign_extend, ConstValue};
+use rustc_middle::mir::interpret::{get_slice_bytes, ConstValue};
 use rustc_middle::mir::interpret::{ErrorHandled, LitToConstError, LitToConstInput};
 use rustc_middle::mir::UserTypeProjection;
 use rustc_middle::mir::{BorrowKind, Field, Mutability};
@@ -158,6 +158,13 @@
         subpattern: Pat<'tcx>,
     },
 
+    /// One of the following:
+    /// * `&str`, which will be handled as a string pattern and thus exhaustiveness
+    ///   checking will detect if you use the same string twice in different patterns.
+    /// * integer, bool, char or float, which will be handled by exhaustivenes to cover exactly
+    ///   its own value, similar to `&str`, but these values are much simpler.
+    /// * Opaque constants, that must not be matched structurally. So anything that does not derive
+    ///   `PartialEq` and `Eq`.
     Constant {
         value: &'tcx ty::Const<'tcx>,
     },
@@ -216,10 +223,7 @@
                     BindingMode::ByValue => mutability == Mutability::Mut,
                     BindingMode::ByRef(bk) => {
                         write!(f, "ref ")?;
-                        match bk {
-                            BorrowKind::Mut { .. } => true,
-                            _ => false,
-                        }
+                        matches!(bk, BorrowKind::Mut { .. })
                     }
                 };
                 if is_mut {
@@ -856,6 +860,11 @@
             *self.lower_path(qpath, expr.hir_id, expr.span).kind
         } else {
             let (lit, neg) = match expr.kind {
+                hir::ExprKind::ConstBlock(ref anon_const) => {
+                    let anon_const_def_id = self.tcx.hir().local_def_id(anon_const.hir_id);
+                    let value = ty::Const::from_anon_const(self.tcx, anon_const_def_id);
+                    return *self.const_to_pat(value, expr.hir_id, expr.span, false).kind;
+                }
                 hir::ExprKind::Lit(ref lit) => (lit, false),
                 hir::ExprKind::Unary(hir::UnOp::UnNeg, ref expr) => {
                     let lit = match expr.kind {
@@ -1060,21 +1069,21 @@
         use rustc_apfloat::Float;
         return match *ty.kind() {
             ty::Float(ast::FloatTy::F32) => {
-                let l = ::rustc_apfloat::ieee::Single::from_bits(a);
-                let r = ::rustc_apfloat::ieee::Single::from_bits(b);
+                let l = rustc_apfloat::ieee::Single::from_bits(a);
+                let r = rustc_apfloat::ieee::Single::from_bits(b);
                 l.partial_cmp(&r)
             }
             ty::Float(ast::FloatTy::F64) => {
-                let l = ::rustc_apfloat::ieee::Double::from_bits(a);
-                let r = ::rustc_apfloat::ieee::Double::from_bits(b);
+                let l = rustc_apfloat::ieee::Double::from_bits(a);
+                let r = rustc_apfloat::ieee::Double::from_bits(b);
                 l.partial_cmp(&r)
             }
             ty::Int(ity) => {
                 use rustc_attr::SignedInt;
                 use rustc_middle::ty::layout::IntegerExt;
                 let size = rustc_target::abi::Integer::from_attr(&tcx, SignedInt(ity)).size();
-                let a = sign_extend(a, size);
-                let b = sign_extend(b, size);
+                let a = size.sign_extend(a);
+                let b = size.sign_extend(b);
                 Some((a as i128).cmp(&(b as i128)))
             }
             _ => Some(a.cmp(&b)),
diff --git a/compiler/rustc_parse/src/lexer/mod.rs b/compiler/rustc_parse/src/lexer/mod.rs
index 32b1249..0dfacd7 100644
--- a/compiler/rustc_parse/src/lexer/mod.rs
+++ b/compiler/rustc_parse/src/lexer/mod.rs
@@ -511,7 +511,7 @@
     }
 
     /// Note: It was decided to not add a test case, because it would be to big.
-    /// https://github.com/rust-lang/rust/pull/50296#issuecomment-392135180
+    /// <https://github.com/rust-lang/rust/pull/50296#issuecomment-392135180>
     fn report_too_many_hashes(&self, start: BytePos, found: usize) -> ! {
         self.fatal_span_(
             start,
diff --git a/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs b/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs
index 6f249f4..47d317f 100644
--- a/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs
+++ b/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs
@@ -181,10 +181,9 @@
 
             if suggestion_len > 0 {
                 suggestion.push('}');
-                let lo = char_span.lo();
-                let hi = lo + BytePos(suggestion_len as u32);
+                let hi = char_span.lo() + BytePos(suggestion_len as u32);
                 diag.span_suggestion(
-                    span.with_lo(lo).with_hi(hi),
+                    span.with_hi(hi),
                     "format of unicode escape sequences uses braces",
                     suggestion,
                     Applicability::MaybeIncorrect,
diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs
index b68d36c..f125a12 100644
--- a/compiler/rustc_parse/src/lib.rs
+++ b/compiler/rustc_parse/src/lib.rs
@@ -7,8 +7,8 @@
 #![feature(or_patterns)]
 
 use rustc_ast as ast;
-use rustc_ast::token::{self, Nonterminal, Token, TokenKind};
-use rustc_ast::tokenstream::{self, TokenStream, TokenTree};
+use rustc_ast::token::{self, DelimToken, Nonterminal, Token, TokenKind};
+use rustc_ast::tokenstream::{self, LazyTokenStream, TokenStream, TokenTree};
 use rustc_ast_pretty::pprust;
 use rustc_data_structures::sync::Lrc;
 use rustc_errors::{Diagnostic, FatalError, Level, PResult};
@@ -22,7 +22,7 @@
 
 use tracing::{debug, info};
 
-pub const MACRO_ARGUMENTS: Option<&'static str> = Some("macro arguments");
+pub const MACRO_ARGUMENTS: Option<&str> = Some("macro arguments");
 
 #[macro_use]
 pub mod parser;
@@ -114,16 +114,6 @@
     source_file_to_parser(sess, file_to_source_file(sess, path, sp))
 }
 
-/// Creates a new parser, returning buffered diagnostics if the file doesn't exist,
-/// or from lexing the initial token stream.
-pub fn maybe_new_parser_from_file<'a>(
-    sess: &'a ParseSess,
-    path: &Path,
-) -> Result<Parser<'a>, Vec<Diagnostic>> {
-    let file = try_file_to_source_file(sess, path, None).map_err(|db| vec![db])?;
-    maybe_source_file_to_parser(sess, file)
-}
-
 /// Given a `source_file` and config, returns a parser.
 fn source_file_to_parser(sess: &ParseSess, source_file: Lrc<SourceFile>) -> Parser<'_> {
     panictry_buffer!(&sess.span_diagnostic, maybe_source_file_to_parser(sess, source_file))
@@ -146,12 +136,6 @@
     Ok(parser)
 }
 
-// Must preserve old name for now, because `quote!` from the *existing*
-// compiler expands into it.
-pub fn new_parser_from_tts(sess: &ParseSess, tts: Vec<TokenTree>) -> Parser<'_> {
-    stream_to_parser(sess, tts.into_iter().collect(), crate::MACRO_ARGUMENTS)
-}
-
 // Base abstractions
 
 /// Given a session and a path and an optional span (for error reporting),
@@ -264,40 +248,46 @@
     // As a result, some AST nodes are annotated with the token stream they
     // came from. Here we attempt to extract these lossless token streams
     // before we fall back to the stringification.
+
+    let convert_tokens =
+        |tokens: &Option<LazyTokenStream>| tokens.as_ref().map(|t| t.create_token_stream());
+
     let tokens = match *nt {
-        Nonterminal::NtItem(ref item) => {
-            prepend_attrs(sess, &item.attrs, item.tokens.as_ref(), span)
-        }
-        Nonterminal::NtBlock(ref block) => block.tokens.clone(),
+        Nonterminal::NtItem(ref item) => prepend_attrs(&item.attrs, item.tokens.as_ref()),
+        Nonterminal::NtBlock(ref block) => convert_tokens(&block.tokens),
         Nonterminal::NtStmt(ref stmt) => {
             // FIXME: We currently only collect tokens for `:stmt`
             // matchers in `macro_rules!` macros. When we start collecting
             // tokens for attributes on statements, we will need to prepend
             // attributes here
-            stmt.tokens.clone()
+            convert_tokens(&stmt.tokens)
         }
-        Nonterminal::NtPat(ref pat) => pat.tokens.clone(),
-        Nonterminal::NtTy(ref ty) => ty.tokens.clone(),
+        Nonterminal::NtPat(ref pat) => convert_tokens(&pat.tokens),
+        Nonterminal::NtTy(ref ty) => convert_tokens(&ty.tokens),
         Nonterminal::NtIdent(ident, is_raw) => {
             Some(tokenstream::TokenTree::token(token::Ident(ident.name, is_raw), ident.span).into())
         }
         Nonterminal::NtLifetime(ident) => {
             Some(tokenstream::TokenTree::token(token::Lifetime(ident.name), ident.span).into())
         }
-        Nonterminal::NtMeta(ref attr) => attr.tokens.clone(),
-        Nonterminal::NtPath(ref path) => path.tokens.clone(),
-        Nonterminal::NtVis(ref vis) => vis.tokens.clone(),
+        Nonterminal::NtMeta(ref attr) => convert_tokens(&attr.tokens),
+        Nonterminal::NtPath(ref path) => convert_tokens(&path.tokens),
+        Nonterminal::NtVis(ref vis) => convert_tokens(&vis.tokens),
         Nonterminal::NtTT(ref tt) => Some(tt.clone().into()),
         Nonterminal::NtExpr(ref expr) | Nonterminal::NtLiteral(ref expr) => {
             if expr.tokens.is_none() {
                 debug!("missing tokens for expr {:?}", expr);
             }
-            prepend_attrs(sess, &expr.attrs, expr.tokens.as_ref(), span)
+            prepend_attrs(&expr.attrs, expr.tokens.as_ref())
         }
     };
 
     // FIXME(#43081): Avoid this pretty-print + reparse hack
-    let source = pprust::nonterminal_to_string(nt);
+    // Pretty-print the AST struct without inserting any parenthesis
+    // beyond those explicitly written by the user (e.g. `ExpnKind::Paren`).
+    // The resulting stream may have incorrect precedence, but it's only
+    // ever used for a comparison against the capture tokenstream.
+    let source = pprust::nonterminal_to_string_no_extra_parens(nt);
     let filename = FileName::macro_expansion_source_code(&source);
     let reparsed_tokens = parse_stream_from_source_str(filename, source, sess, Some(span));
 
@@ -325,15 +315,43 @@
     // modifications, including adding/removing typically non-semantic
     // tokens such as extra braces and commas, don't happen.
     if let Some(tokens) = tokens {
-        if tokenstream_probably_equal_for_proc_macro(&tokens, &reparsed_tokens, sess) {
+        // Compare with a non-relaxed delim match to start.
+        if tokenstream_probably_equal_for_proc_macro(&tokens, &reparsed_tokens, sess, false) {
             return tokens;
         }
+
+        // The check failed. This time, we pretty-print the AST struct with parenthesis
+        // inserted to preserve precedence. This may cause `None`-delimiters in the captured
+        // token stream to match up with inserted parenthesis in the reparsed stream.
+        let source_with_parens = pprust::nonterminal_to_string(nt);
+        let filename_with_parens = FileName::macro_expansion_source_code(&source_with_parens);
+        let reparsed_tokens_with_parens = parse_stream_from_source_str(
+            filename_with_parens,
+            source_with_parens,
+            sess,
+            Some(span),
+        );
+
+        // Compare with a relaxed delim match - we want inserted parenthesis in the
+        // reparsed stream to match `None`-delimiters in the original stream.
+        if tokenstream_probably_equal_for_proc_macro(
+            &tokens,
+            &reparsed_tokens_with_parens,
+            sess,
+            true,
+        ) {
+            return tokens;
+        }
+
         info!(
             "cached tokens found, but they're not \"probably equal\", \
                 going with stringified version"
         );
-        info!("cached tokens: {:?}", tokens);
-        info!("reparsed tokens: {:?}", reparsed_tokens);
+        info!("cached   tokens: {}", pprust::tts_to_string(&tokens));
+        info!("reparsed tokens: {}", pprust::tts_to_string(&reparsed_tokens_with_parens));
+
+        info!("cached   tokens debug: {:?}", tokens);
+        info!("reparsed tokens debug: {:?}", reparsed_tokens_with_parens);
     }
     reparsed_tokens
 }
@@ -347,6 +365,7 @@
     tokens: &TokenStream,
     reparsed_tokens: &TokenStream,
     sess: &ParseSess,
+    relaxed_delim_match: bool,
 ) -> bool {
     // When checking for `probably_eq`, we ignore certain tokens that aren't
     // preserved in the AST. Because they are not preserved, the pretty
@@ -472,7 +491,9 @@
     let tokens = tokens.trees().flat_map(|t| expand_token(t, sess));
     let reparsed_tokens = reparsed_tokens.trees().flat_map(|t| expand_token(t, sess));
 
-    tokens.eq_by(reparsed_tokens, |t, rt| tokentree_probably_equal_for_proc_macro(&t, &rt, sess))
+    tokens.eq_by(reparsed_tokens, |t, rt| {
+        tokentree_probably_equal_for_proc_macro(&t, &rt, sess, relaxed_delim_match)
+    })
 }
 
 // See comments in `Nonterminal::to_tokenstream` for why we care about
@@ -484,6 +505,7 @@
     token: &TokenTree,
     reparsed_token: &TokenTree,
     sess: &ParseSess,
+    relaxed_delim_match: bool,
 ) -> bool {
     match (token, reparsed_token) {
         (TokenTree::Token(token), TokenTree::Token(reparsed_token)) => {
@@ -492,9 +514,33 @@
         (
             TokenTree::Delimited(_, delim, tokens),
             TokenTree::Delimited(_, reparsed_delim, reparsed_tokens),
-        ) => {
-            delim == reparsed_delim
-                && tokenstream_probably_equal_for_proc_macro(tokens, reparsed_tokens, sess)
+        ) if delim == reparsed_delim => tokenstream_probably_equal_for_proc_macro(
+            tokens,
+            reparsed_tokens,
+            sess,
+            relaxed_delim_match,
+        ),
+        (TokenTree::Delimited(_, DelimToken::NoDelim, tokens), reparsed_token) => {
+            if relaxed_delim_match {
+                if let TokenTree::Delimited(_, DelimToken::Paren, reparsed_tokens) = reparsed_token
+                {
+                    if tokenstream_probably_equal_for_proc_macro(
+                        tokens,
+                        reparsed_tokens,
+                        sess,
+                        relaxed_delim_match,
+                    ) {
+                        return true;
+                    }
+                }
+            }
+            tokens.len() == 1
+                && tokentree_probably_equal_for_proc_macro(
+                    &tokens.trees().next().unwrap(),
+                    reparsed_token,
+                    sess,
+                    relaxed_delim_match,
+                )
         }
         _ => false,
     }
@@ -556,64 +602,22 @@
 }
 
 fn prepend_attrs(
-    sess: &ParseSess,
     attrs: &[ast::Attribute],
-    tokens: Option<&tokenstream::TokenStream>,
-    span: rustc_span::Span,
+    tokens: Option<&tokenstream::LazyTokenStream>,
 ) -> Option<tokenstream::TokenStream> {
-    let tokens = tokens?;
+    let tokens = tokens?.create_token_stream();
     if attrs.is_empty() {
-        return Some(tokens.clone());
+        return Some(tokens);
     }
     let mut builder = tokenstream::TokenStreamBuilder::new();
     for attr in attrs {
-        assert_eq!(
-            attr.style,
-            ast::AttrStyle::Outer,
-            "inner attributes should prevent cached tokens from existing"
-        );
-
-        let source = pprust::attribute_to_string(attr);
-        let macro_filename = FileName::macro_expansion_source_code(&source);
-
-        let item = match attr.kind {
-            ast::AttrKind::Normal(ref item) => item,
-            ast::AttrKind::DocComment(..) => {
-                let stream = parse_stream_from_source_str(macro_filename, source, sess, Some(span));
-                builder.push(stream);
-                continue;
-            }
-        };
-
-        // synthesize # [ $path $tokens ] manually here
-        let mut brackets = tokenstream::TokenStreamBuilder::new();
-
-        // For simple paths, push the identifier directly
-        if item.path.segments.len() == 1 && item.path.segments[0].args.is_none() {
-            let ident = item.path.segments[0].ident;
-            let token = token::Ident(ident.name, ident.as_str().starts_with("r#"));
-            brackets.push(tokenstream::TokenTree::token(token, ident.span));
-
-        // ... and for more complicated paths, fall back to a reparse hack that
-        // should eventually be removed.
-        } else {
-            let stream = parse_stream_from_source_str(macro_filename, source, sess, Some(span));
-            brackets.push(stream);
+        // FIXME: Correctly handle tokens for inner attributes.
+        // For now, we fall back to reparsing the original AST node
+        if attr.style == ast::AttrStyle::Inner {
+            return None;
         }
-
-        brackets.push(item.args.outer_tokens());
-
-        // The span we list here for `#` and for `[ ... ]` are both wrong in
-        // that it encompasses more than each token, but it hopefully is "good
-        // enough" for now at least.
-        builder.push(tokenstream::TokenTree::token(token::Pound, attr.span));
-        let delim_span = tokenstream::DelimSpan::from_single(attr.span);
-        builder.push(tokenstream::TokenTree::Delimited(
-            delim_span,
-            token::DelimToken::Bracket,
-            brackets.build(),
-        ));
+        builder.push(attr.tokens());
     }
-    builder.push(tokens.clone());
+    builder.push(tokens);
     Some(builder.build())
 }
diff --git a/compiler/rustc_parse/src/parser/attr.rs b/compiler/rustc_parse/src/parser/attr.rs
index 98f9409..3738fba 100644
--- a/compiler/rustc_parse/src/parser/attr.rs
+++ b/compiler/rustc_parse/src/parser/attr.rs
@@ -4,7 +4,7 @@
 use rustc_ast::token::{self, Nonterminal};
 use rustc_ast_pretty::pprust;
 use rustc_errors::{error_code, PResult};
-use rustc_span::Span;
+use rustc_span::{sym, Span};
 
 use tracing::debug;
 
@@ -30,7 +30,7 @@
         let mut just_parsed_doc_comment = false;
         loop {
             debug!("parse_outer_attributes: self.token={:?}", self.token);
-            if self.check(&token::Pound) {
+            let attr = if self.check(&token::Pound) {
                 let inner_error_reason = if just_parsed_doc_comment {
                     "an inner attribute is not permitted following an outer doc comment"
                 } else if !attrs.is_empty() {
@@ -43,12 +43,10 @@
                     saw_doc_comment: just_parsed_doc_comment,
                     prev_attr_sp: attrs.last().map(|a| a.span),
                 };
-                let attr = self.parse_attribute_with_inner_parse_policy(inner_parse_policy)?;
-                attrs.push(attr);
                 just_parsed_doc_comment = false;
+                Some(self.parse_attribute(inner_parse_policy)?)
             } else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind {
-                let attr = attr::mk_doc_comment(comment_kind, attr_style, data, self.token.span);
-                if attr.style != ast::AttrStyle::Outer {
+                if attr_style != ast::AttrStyle::Outer {
                     self.sess
                         .span_diagnostic
                         .struct_span_err_with_code(
@@ -58,13 +56,19 @@
                         )
                         .note(
                             "inner doc comments like this (starting with \
-                             `//!` or `/*!`) can only appear before items",
+                         `//!` or `/*!`) can only appear before items",
                         )
                         .emit();
                 }
-                attrs.push(attr);
                 self.bump();
                 just_parsed_doc_comment = true;
+                Some(attr::mk_doc_comment(comment_kind, attr_style, data, self.prev_token.span))
+            } else {
+                None
+            };
+
+            if let Some(attr) = attr {
+                attrs.push(attr);
             } else {
                 break;
             }
@@ -73,49 +77,43 @@
     }
 
     /// Matches `attribute = # ! [ meta_item ]`.
-    ///
-    /// If `permit_inner` is `true`, then a leading `!` indicates an inner
-    /// attribute.
-    pub fn parse_attribute(&mut self, permit_inner: bool) -> PResult<'a, ast::Attribute> {
-        debug!("parse_attribute: permit_inner={:?} self.token={:?}", permit_inner, self.token);
-        let inner_parse_policy =
-            if permit_inner { InnerAttrPolicy::Permitted } else { DEFAULT_INNER_ATTR_FORBIDDEN };
-        self.parse_attribute_with_inner_parse_policy(inner_parse_policy)
-    }
-
-    /// The same as `parse_attribute`, except it takes in an `InnerAttrPolicy`
-    /// that prescribes how to handle inner attributes.
-    fn parse_attribute_with_inner_parse_policy(
+    /// `inner_parse_policy` prescribes how to handle inner attributes.
+    fn parse_attribute(
         &mut self,
         inner_parse_policy: InnerAttrPolicy<'_>,
     ) -> PResult<'a, ast::Attribute> {
         debug!(
-            "parse_attribute_with_inner_parse_policy: inner_parse_policy={:?} self.token={:?}",
+            "parse_attribute: inner_parse_policy={:?} self.token={:?}",
             inner_parse_policy, self.token
         );
         let lo = self.token.span;
-        let (span, item, style) = if self.eat(&token::Pound) {
-            let style =
-                if self.eat(&token::Not) { ast::AttrStyle::Inner } else { ast::AttrStyle::Outer };
+        let ((item, style, span), tokens) = self.collect_tokens(|this| {
+            if this.eat(&token::Pound) {
+                let style = if this.eat(&token::Not) {
+                    ast::AttrStyle::Inner
+                } else {
+                    ast::AttrStyle::Outer
+                };
 
-            self.expect(&token::OpenDelim(token::Bracket))?;
-            let item = self.parse_attr_item()?;
-            self.expect(&token::CloseDelim(token::Bracket))?;
-            let attr_sp = lo.to(self.prev_token.span);
+                this.expect(&token::OpenDelim(token::Bracket))?;
+                let item = this.parse_attr_item(false)?;
+                this.expect(&token::CloseDelim(token::Bracket))?;
+                let attr_sp = lo.to(this.prev_token.span);
 
-            // Emit error if inner attribute is encountered and forbidden.
-            if style == ast::AttrStyle::Inner {
-                self.error_on_forbidden_inner_attr(attr_sp, inner_parse_policy);
+                // Emit error if inner attribute is encountered and forbidden.
+                if style == ast::AttrStyle::Inner {
+                    this.error_on_forbidden_inner_attr(attr_sp, inner_parse_policy);
+                }
+
+                Ok((item, style, attr_sp))
+            } else {
+                let token_str = pprust::token_to_string(&this.token);
+                let msg = &format!("expected `#`, found `{}`", token_str);
+                Err(this.struct_span_err(this.token.span, msg))
             }
+        })?;
 
-            (attr_sp, item, style)
-        } else {
-            let token_str = pprust::token_to_string(&self.token);
-            let msg = &format!("expected `#`, found `{}`", token_str);
-            return Err(self.struct_span_err(self.token.span, msg));
-        };
-
-        Ok(attr::mk_attr_from_item(style, item, span))
+        Ok(attr::mk_attr_from_item(item, tokens, style, span))
     }
 
     pub(super) fn error_on_forbidden_inner_attr(&self, attr_sp: Span, policy: InnerAttrPolicy<'_>) {
@@ -148,7 +146,7 @@
     ///     PATH
     ///     PATH `=` UNSUFFIXED_LIT
     /// The delimiters or `=` are still put into the resulting token stream.
-    pub fn parse_attr_item(&mut self) -> PResult<'a, ast::AttrItem> {
+    pub fn parse_attr_item(&mut self, capture_tokens: bool) -> PResult<'a, ast::AttrItem> {
         let item = match self.token.kind {
             token::Interpolated(ref nt) => match **nt {
                 Nonterminal::NtMeta(ref item) => Some(item.clone().into_inner()),
@@ -160,9 +158,18 @@
             self.bump();
             item
         } else {
-            let path = self.parse_path(PathStyle::Mod)?;
-            let args = self.parse_attr_args()?;
-            ast::AttrItem { path, args, tokens: None }
+            let do_parse = |this: &mut Self| {
+                let path = this.parse_path(PathStyle::Mod)?;
+                let args = this.parse_attr_args()?;
+                Ok(ast::AttrItem { path, args, tokens: None })
+            };
+            if capture_tokens {
+                let (mut item, tokens) = self.collect_tokens(do_parse)?;
+                item.tokens = tokens;
+                item
+            } else {
+                do_parse(self)?
+            }
         })
     }
 
@@ -175,20 +182,21 @@
         let mut attrs: Vec<ast::Attribute> = vec![];
         loop {
             // Only try to parse if it is an inner attribute (has `!`).
-            if self.check(&token::Pound) && self.look_ahead(1, |t| t == &token::Not) {
-                let attr = self.parse_attribute(true)?;
-                assert_eq!(attr.style, ast::AttrStyle::Inner);
-                attrs.push(attr);
+            let attr = if self.check(&token::Pound) && self.look_ahead(1, |t| t == &token::Not) {
+                Some(self.parse_attribute(InnerAttrPolicy::Permitted)?)
             } else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind {
-                // We need to get the position of this token before we bump.
-                let attr = attr::mk_doc_comment(comment_kind, attr_style, data, self.token.span);
-                if attr.style == ast::AttrStyle::Inner {
-                    attrs.push(attr);
+                if attr_style == ast::AttrStyle::Inner {
                     self.bump();
+                    Some(attr::mk_doc_comment(comment_kind, attr_style, data, self.prev_token.span))
                 } else {
-                    break;
+                    None
                 }
             } else {
+                None
+            };
+            if let Some(attr) = attr {
+                attrs.push(attr);
+            } else {
                 break;
             }
         }
@@ -220,7 +228,7 @@
         let mut expanded_attrs = Vec::with_capacity(1);
         while self.token.kind != token::Eof {
             let lo = self.token.span;
-            let item = self.parse_attr_item()?;
+            let item = self.parse_attr_item(true)?;
             expanded_attrs.push((item, lo.to(self.prev_token.span)));
             if !self.eat(&token::Comma) {
                 break;
@@ -302,3 +310,16 @@
         Err(self.struct_span_err(self.token.span, &msg))
     }
 }
+
+pub fn maybe_needs_tokens(attrs: &[ast::Attribute]) -> bool {
+    attrs.iter().any(|attr| {
+        if let Some(ident) = attr.ident() {
+            ident.name == sym::derive
+            // This might apply a custom attribute/derive
+            || ident.name == sym::cfg_attr
+            || !rustc_feature::is_builtin_attr_name(ident.name)
+        } else {
+            true
+        }
+    })
+}
diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs
index 9ab13db..cd3b8db 100644
--- a/compiler/rustc_parse/src/parser/diagnostics.rs
+++ b/compiler/rustc_parse/src/parser/diagnostics.rs
@@ -1,12 +1,14 @@
 use super::ty::AllowPlus;
-use super::{BlockMode, Parser, PathStyle, SemiColonMode, SeqSep, TokenExpectType, TokenType};
+use super::TokenType;
+use super::{BlockMode, Parser, PathStyle, Restrictions, SemiColonMode, SeqSep, TokenExpectType};
 
 use rustc_ast::ptr::P;
 use rustc_ast::token::{self, Lit, LitKind, TokenKind};
 use rustc_ast::util::parser::AssocOp;
 use rustc_ast::{
-    self as ast, AngleBracketedArgs, AttrVec, BinOpKind, BindingMode, BlockCheckMode, Expr,
-    ExprKind, Item, ItemKind, Mutability, Param, Pat, PatKind, PathSegment, QSelf, Ty, TyKind,
+    self as ast, AngleBracketedArg, AngleBracketedArgs, AnonConst, AttrVec, BinOpKind, BindingMode,
+    Block, BlockCheckMode, Expr, ExprKind, GenericArg, Item, ItemKind, Mutability, Param, Pat,
+    PatKind, Path, PathSegment, QSelf, Ty, TyKind,
 };
 use rustc_ast_pretty::pprust;
 use rustc_data_structures::fx::FxHashSet;
@@ -18,7 +20,8 @@
 
 use tracing::{debug, trace};
 
-const TURBOFISH: &str = "use `::<...>` instead of `<...>` to specify type arguments";
+const TURBOFISH_SUGGESTION_STR: &str =
+    "use `::<...>` instead of `<...>` to specify type or const arguments";
 
 /// Creates a placeholder argument.
 pub(super) fn dummy_arg(ident: Ident) -> Param {
@@ -119,6 +122,28 @@
     No,
 }
 
+#[derive(Clone, Copy)]
+pub enum AttemptLocalParseRecovery {
+    Yes,
+    No,
+}
+
+impl AttemptLocalParseRecovery {
+    pub fn yes(&self) -> bool {
+        match self {
+            AttemptLocalParseRecovery::Yes => true,
+            AttemptLocalParseRecovery::No => false,
+        }
+    }
+
+    pub fn no(&self) -> bool {
+        match self {
+            AttemptLocalParseRecovery::Yes => false,
+            AttemptLocalParseRecovery::No => true,
+        }
+    }
+}
+
 impl<'a> Parser<'a> {
     pub(super) fn span_fatal_err<S: Into<MultiSpan>>(
         &self,
@@ -321,6 +346,66 @@
         }
     }
 
+    pub fn maybe_suggest_struct_literal(
+        &mut self,
+        lo: Span,
+        s: BlockCheckMode,
+    ) -> Option<PResult<'a, P<Block>>> {
+        if self.token.is_ident() && self.look_ahead(1, |t| t == &token::Colon) {
+            // We might be having a struct literal where people forgot to include the path:
+            // fn foo() -> Foo {
+            //     field: value,
+            // }
+            let mut snapshot = self.clone();
+            let path =
+                Path { segments: vec![], span: self.prev_token.span.shrink_to_lo(), tokens: None };
+            let struct_expr = snapshot.parse_struct_expr(path, AttrVec::new(), false);
+            let block_tail = self.parse_block_tail(lo, s, AttemptLocalParseRecovery::No);
+            return Some(match (struct_expr, block_tail) {
+                (Ok(expr), Err(mut err)) => {
+                    // We have encountered the following:
+                    // fn foo() -> Foo {
+                    //     field: value,
+                    // }
+                    // Suggest:
+                    // fn foo() -> Foo { Path {
+                    //     field: value,
+                    // } }
+                    err.delay_as_bug();
+                    self.struct_span_err(expr.span, "struct literal body without path")
+                        .multipart_suggestion(
+                            "you might have forgotten to add the struct literal inside the block",
+                            vec![
+                                (expr.span.shrink_to_lo(), "{ SomeStruct ".to_string()),
+                                (expr.span.shrink_to_hi(), " }".to_string()),
+                            ],
+                            Applicability::MaybeIncorrect,
+                        )
+                        .emit();
+                    *self = snapshot;
+                    Ok(self.mk_block(
+                        vec![self.mk_stmt_err(expr.span)],
+                        s,
+                        lo.to(self.prev_token.span),
+                    ))
+                }
+                (Err(mut err), Ok(tail)) => {
+                    // We have a block tail that contains a somehow valid type ascription expr.
+                    err.cancel();
+                    Ok(tail)
+                }
+                (Err(mut snapshot_err), Err(err)) => {
+                    // We don't know what went wrong, emit the normal error.
+                    snapshot_err.cancel();
+                    self.consume_block(token::Brace, ConsumeClosingDelim::Yes);
+                    Err(err)
+                }
+                (Ok(_), Ok(tail)) => Ok(tail),
+            });
+        }
+        None
+    }
+
     pub fn maybe_annotate_with_ascription(
         &mut self,
         err: &mut DiagnosticBuilder<'_>,
@@ -575,7 +660,7 @@
                                 Ok(_) => {
                                     e.span_suggestion_verbose(
                                         binop.span.shrink_to_lo(),
-                                        "use `::<...>` instead of `<...>` to specify type arguments",
+                                        TURBOFISH_SUGGESTION_STR,
                                         "::".to_string(),
                                         Applicability::MaybeIncorrect,
                                     );
@@ -730,7 +815,7 @@
                 let suggest = |err: &mut DiagnosticBuilder<'_>| {
                     err.span_suggestion_verbose(
                         op.span.shrink_to_lo(),
-                        TURBOFISH,
+                        TURBOFISH_SUGGESTION_STR,
                         "::".to_string(),
                         Applicability::MaybeIncorrect,
                     );
@@ -804,7 +889,7 @@
                         {
                             // All we know is that this is `foo < bar >` and *nothing* else. Try to
                             // be helpful, but don't attempt to recover.
-                            err.help(TURBOFISH);
+                            err.help(TURBOFISH_SUGGESTION_STR);
                             err.help("or use `(...)` if you meant to specify fn arguments");
                         }
 
@@ -1124,7 +1209,13 @@
             self.recover_await_prefix(await_sp)?
         };
         let sp = self.error_on_incorrect_await(lo, hi, &expr, is_question);
-        let expr = self.mk_expr(lo.to(sp), ExprKind::Await(expr), attrs);
+        let kind = match expr.kind {
+            // Avoid knock-down errors as we don't know whether to interpret this as `foo().await?`
+            // or `foo()?.await` (the very reason we went with postfix syntax šŸ˜…).
+            ExprKind::Try(_) => ExprKind::Err,
+            _ => ExprKind::Await(expr),
+        };
+        let expr = self.mk_expr(lo.to(sp), kind, attrs);
         self.maybe_recover_from_bad_qpath(expr, true)
     }
 
@@ -1268,11 +1359,7 @@
         (self.token == token::Lt && // `foo:<bar`, likely a typoed turbofish.
             self.look_ahead(1, |t| t.is_ident() && !t.is_reserved_ident()))
             || self.token.is_ident() &&
-            match node {
-                // `foo::` → `foo:` or `foo.bar::` → `foo.bar:`
-                ast::ExprKind::Path(..) | ast::ExprKind::Field(..) => true,
-                _ => false,
-            } &&
+            matches!(node, ast::ExprKind::Path(..) | ast::ExprKind::Field(..)) &&
             !self.token.is_reserved_ident() &&           // v `foo:bar(baz)`
             self.look_ahead(1, |t| t == &token::OpenDelim(token::Paren))
             || self.look_ahead(1, |t| t == &token::OpenDelim(token::Brace)) // `foo:bar {`
@@ -1467,14 +1554,6 @@
         }
     }
 
-    pub(super) fn expected_semi_or_open_brace<T>(&mut self) -> PResult<'a, T> {
-        let token_str = super::token_descr(&self.token);
-        let msg = &format!("expected `;` or `{{`, found {}", token_str);
-        let mut err = self.struct_span_err(self.token.span, msg);
-        err.span_label(self.token.span, "expected `;` or `{`");
-        Err(err)
-    }
-
     pub(super) fn eat_incorrect_doc_comment_for_param_type(&mut self) {
         if let token::DocComment(..) = self.token.kind {
             self.struct_span_err(
@@ -1691,4 +1770,142 @@
             }
         }
     }
+
+    /// Handle encountering a symbol in a generic argument list that is not a `,` or `>`. In this
+    /// case, we emit an error and try to suggest enclosing a const argument in braces if it looks
+    /// like the user has forgotten them.
+    pub fn handle_ambiguous_unbraced_const_arg(
+        &mut self,
+        args: &mut Vec<AngleBracketedArg>,
+    ) -> PResult<'a, bool> {
+        // If we haven't encountered a closing `>`, then the argument is malformed.
+        // It's likely that the user has written a const expression without enclosing it
+        // in braces, so we try to recover here.
+        let arg = args.pop().unwrap();
+        // FIXME: for some reason using `unexpected` or `expected_one_of_not_found` has
+        // adverse side-effects to subsequent errors and seems to advance the parser.
+        // We are causing this error here exclusively in case that a `const` expression
+        // could be recovered from the current parser state, even if followed by more
+        // arguments after a comma.
+        let mut err = self.struct_span_err(
+            self.token.span,
+            &format!("expected one of `,` or `>`, found {}", super::token_descr(&self.token)),
+        );
+        err.span_label(self.token.span, "expected one of `,` or `>`");
+        match self.recover_const_arg(arg.span(), err) {
+            Ok(arg) => {
+                args.push(AngleBracketedArg::Arg(arg));
+                if self.eat(&token::Comma) {
+                    return Ok(true); // Continue
+                }
+            }
+            Err(mut err) => {
+                args.push(arg);
+                // We will emit a more generic error later.
+                err.delay_as_bug();
+            }
+        }
+        return Ok(false); // Don't continue.
+    }
+
+    /// Handle a generic const argument that had not been enclosed in braces, and suggest enclosing
+    /// it braces. In this situation, unlike in `handle_ambiguous_unbraced_const_arg`, this is
+    /// almost certainly a const argument, so we always offer a suggestion.
+    pub fn handle_unambiguous_unbraced_const_arg(&mut self) -> PResult<'a, P<Expr>> {
+        let start = self.token.span;
+        let expr = self.parse_expr_res(Restrictions::CONST_EXPR, None).map_err(|mut err| {
+            err.span_label(
+                start.shrink_to_lo(),
+                "while parsing a const generic argument starting here",
+            );
+            err
+        })?;
+        if !self.expr_is_valid_const_arg(&expr) {
+            self.struct_span_err(
+                expr.span,
+                "expressions must be enclosed in braces to be used as const generic \
+                    arguments",
+            )
+            .multipart_suggestion(
+                "enclose the `const` expression in braces",
+                vec![
+                    (expr.span.shrink_to_lo(), "{ ".to_string()),
+                    (expr.span.shrink_to_hi(), " }".to_string()),
+                ],
+                Applicability::MachineApplicable,
+            )
+            .emit();
+        }
+        Ok(expr)
+    }
+
+    /// Try to recover from possible generic const argument without `{` and `}`.
+    ///
+    /// When encountering code like `foo::< bar + 3 >` or `foo::< bar - baz >` we suggest
+    /// `foo::<{ bar + 3 }>` and `foo::<{ bar - baz }>`, respectively. We only provide a suggestion
+    /// if we think that that the resulting expression would be well formed.
+    pub fn recover_const_arg(
+        &mut self,
+        start: Span,
+        mut err: DiagnosticBuilder<'a>,
+    ) -> PResult<'a, GenericArg> {
+        let is_op = AssocOp::from_token(&self.token)
+            .and_then(|op| {
+                if let AssocOp::Greater
+                | AssocOp::Less
+                | AssocOp::ShiftRight
+                | AssocOp::GreaterEqual
+                // Don't recover from `foo::<bar = baz>`, because this could be an attempt to
+                // assign a value to a defaulted generic parameter.
+                | AssocOp::Assign
+                | AssocOp::AssignOp(_) = op
+                {
+                    None
+                } else {
+                    Some(op)
+                }
+            })
+            .is_some();
+        // This will be true when a trait object type `Foo +` or a path which was a `const fn` with
+        // type params has been parsed.
+        let was_op =
+            matches!(self.prev_token.kind, token::BinOp(token::Plus | token::Shr) | token::Gt);
+        if !is_op && !was_op {
+            // We perform these checks and early return to avoid taking a snapshot unnecessarily.
+            return Err(err);
+        }
+        let snapshot = self.clone();
+        if is_op {
+            self.bump();
+        }
+        match self.parse_expr_res(Restrictions::CONST_EXPR, None) {
+            Ok(expr) => {
+                if token::Comma == self.token.kind || self.token.kind.should_end_const_arg() {
+                    // Avoid the following output by checking that we consumed a full const arg:
+                    // help: expressions must be enclosed in braces to be used as const generic
+                    //       arguments
+                    //    |
+                    // LL |     let sr: Vec<{ (u32, _, _) = vec![] };
+                    //    |                 ^                      ^
+                    err.multipart_suggestion(
+                        "expressions must be enclosed in braces to be used as const generic \
+                         arguments",
+                        vec![
+                            (start.shrink_to_lo(), "{ ".to_string()),
+                            (expr.span.shrink_to_hi(), " }".to_string()),
+                        ],
+                        Applicability::MaybeIncorrect,
+                    );
+                    let value = self.mk_expr_err(start.to(expr.span));
+                    err.emit();
+                    return Ok(GenericArg::Const(AnonConst { id: ast::DUMMY_NODE_ID, value }));
+                }
+            }
+            Err(mut err) => {
+                err.cancel();
+            }
+        }
+        *self = snapshot;
+        Err(err)
+    }
 }
diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs
index a11ad6e..188bf22 100644
--- a/compiler/rustc_parse/src/parser/expr.rs
+++ b/compiler/rustc_parse/src/parser/expr.rs
@@ -6,6 +6,7 @@
 
 use rustc_ast::ptr::P;
 use rustc_ast::token::{self, Token, TokenKind};
+use rustc_ast::tokenstream::Spacing;
 use rustc_ast::util::classify;
 use rustc_ast::util::literal::LitError;
 use rustc_ast::util::parser::{prec_let_scrutinee_needs_par, AssocOp, Fixity};
@@ -18,7 +19,6 @@
 use rustc_span::symbol::{kw, sym, Ident, Symbol};
 use rustc_span::{BytePos, Pos};
 use std::mem;
-use tracing::debug;
 
 /// Possibly accepts an `token::Interpolated` expression (a pre-parsed expression
 /// dropped into the token stream, which happens while parsing the result of
@@ -246,11 +246,7 @@
                 this.parse_assoc_expr_with(prec + prec_adjustment, LhsExpr::NotYetParsed)
             })?;
 
-            // Make sure that the span of the parent node is larger than the span of lhs and rhs,
-            // including the attributes.
-            let lhs_span =
-                lhs.attrs.iter().find(|a| a.style == AttrStyle::Outer).map_or(lhs_span, |a| a.span);
-            let span = lhs_span.to(rhs.span);
+            let span = self.mk_expr_sp(&lhs, lhs_span, rhs.span);
             lhs = match op {
                 AssocOp::Add
                 | AssocOp::Subtract
@@ -363,6 +359,18 @@
     /// Also performs recovery for `and` / `or` which are mistaken for `&&` and `||` respectively.
     fn check_assoc_op(&self) -> Option<Spanned<AssocOp>> {
         let (op, span) = match (AssocOp::from_token(&self.token), self.token.ident()) {
+            // When parsing const expressions, stop parsing when encountering `>`.
+            (
+                Some(
+                    AssocOp::ShiftRight
+                    | AssocOp::Greater
+                    | AssocOp::GreaterEqual
+                    | AssocOp::AssignOp(token::BinOpToken::Shr),
+                ),
+                _,
+            ) if self.restrictions.contains(Restrictions::CONST_EXPR) => {
+                return None;
+            }
             (Some(op), _) => (op, self.token.span),
             (None, Some((Ident { name: sym::and, span }, false))) => {
                 self.error_bad_logical_op("and", "&&", "conjunction");
@@ -411,7 +419,7 @@
             None
         };
         let rhs_span = rhs.as_ref().map_or(cur_op_span, |x| x.span);
-        let span = lhs.span.to(rhs_span);
+        let span = self.mk_expr_sp(&lhs, lhs.span, rhs_span);
         let limits =
             if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed };
         Ok(self.mk_expr(span, self.mk_range(Some(lhs), rhs, limits)?, AttrVec::new()))
@@ -463,7 +471,7 @@
     /// Parses a prefix-unary-operator expr.
     fn parse_prefix_expr(&mut self, attrs: Option<AttrVec>) -> PResult<'a, P<Expr>> {
         let attrs = self.parse_or_use_outer_attributes(attrs)?;
-        self.maybe_collect_tokens(!attrs.is_empty(), |this| {
+        self.maybe_collect_tokens(super::attr::maybe_needs_tokens(&attrs), |this| {
             let lo = this.token.span;
             // Note: when adding new unary operators, don't forget to adjust TokenKind::can_begin_expr()
             let (hi, ex) = match this.token.uninterpolate().kind {
@@ -571,7 +579,11 @@
         expr_kind: fn(P<Expr>, P<Ty>) -> ExprKind,
     ) -> PResult<'a, P<Expr>> {
         let mk_expr = |this: &mut Self, rhs: P<Ty>| {
-            this.mk_expr(lhs_span.to(rhs.span), expr_kind(lhs, rhs), AttrVec::new())
+            this.mk_expr(
+                this.mk_expr_sp(&lhs, lhs_span, rhs.span),
+                expr_kind(lhs, rhs),
+                AttrVec::new(),
+            )
         };
 
         // Save the state of the parser before parsing type normally, in case there is a
@@ -819,7 +831,7 @@
         self.struct_span_err(self.token.span, &format!("unexpected token: `{}`", actual)).emit();
     }
 
-    // We need and identifier or integer, but the next token is a float.
+    // We need an identifier or integer, but the next token is a float.
     // Break the float into components to extract the identifier or integer.
     // FIXME: With current `TokenCursor` it's hard to break tokens into more than 2
     // parts unless those parts are processed immediately. `TokenCursor` should either
@@ -884,7 +896,7 @@
                 assert!(suffix.is_none());
                 let symbol = Symbol::intern(&i);
                 self.token = Token::new(token::Ident(symbol, false), ident_span);
-                let next_token = Token::new(token::Dot, dot_span);
+                let next_token = (Token::new(token::Dot, dot_span), self.token_spacing);
                 self.parse_tuple_field_access_expr(lo, base, symbol, None, Some(next_token))
             }
             // 1.2 | 1.2e3
@@ -902,12 +914,14 @@
                 };
                 let symbol1 = Symbol::intern(&i1);
                 self.token = Token::new(token::Ident(symbol1, false), ident1_span);
-                let next_token1 = Token::new(token::Dot, dot_span);
+                // This needs to be `Spacing::Alone` to prevent regressions.
+                // See issue #76399 and PR #76285 for more details
+                let next_token1 = (Token::new(token::Dot, dot_span), Spacing::Alone);
                 let base1 =
                     self.parse_tuple_field_access_expr(lo, base, symbol1, None, Some(next_token1));
                 let symbol2 = Symbol::intern(&i2);
                 let next_token2 = Token::new(token::Ident(symbol2, false), ident2_span);
-                self.bump_with(next_token2); // `.`
+                self.bump_with((next_token2, self.token_spacing)); // `.`
                 self.parse_tuple_field_access_expr(lo, base1, symbol2, suffix, None)
             }
             // 1e+ | 1e- (recovered)
@@ -930,7 +944,7 @@
         base: P<Expr>,
         field: Symbol,
         suffix: Option<Symbol>,
-        next_token: Option<Token>,
+        next_token: Option<(Token, Spacing)>,
     ) -> P<Expr> {
         match next_token {
             Some(next_token) => self.bump_with(next_token),
@@ -1060,6 +1074,8 @@
             })
         } else if self.eat_keyword(kw::Unsafe) {
             self.parse_block_expr(None, lo, BlockCheckMode::Unsafe(ast::UserProvided), attrs)
+        } else if self.check_inline_const(0) {
+            self.parse_const_block(lo.to(self.token.span))
         } else if self.is_do_catch_block() {
             self.recover_do_catch(attrs)
         } else if self.is_try_block() {
@@ -1107,13 +1123,12 @@
 
     fn maybe_collect_tokens(
         &mut self,
-        has_outer_attrs: bool,
+        needs_tokens: bool,
         f: impl FnOnce(&mut Self) -> PResult<'a, P<Expr>>,
     ) -> PResult<'a, P<Expr>> {
-        if has_outer_attrs {
+        if needs_tokens {
             let (mut expr, tokens) = self.collect_tokens(f)?;
-            debug!("maybe_collect_tokens: Collected tokens for {:?} (tokens {:?}", expr, tokens);
-            expr.tokens = Some(tokens);
+            expr.tokens = tokens;
             Ok(expr)
         } else {
             f(self)
@@ -1712,7 +1727,7 @@
         let lo = self.prev_token.span;
         let pat = self.parse_top_pat(GateOr::No)?;
         self.expect(&token::Eq)?;
-        let expr = self.with_res(Restrictions::NO_STRUCT_LITERAL, |this| {
+        let expr = self.with_res(self.restrictions | Restrictions::NO_STRUCT_LITERAL, |this| {
             this.parse_assoc_expr_with(1 + prec_let_scrutinee_needs_par(), None.into())
         })?;
         let span = lo.to(expr.span);
@@ -2041,9 +2056,12 @@
     ) -> Option<PResult<'a, P<Expr>>> {
         let struct_allowed = !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL);
         if struct_allowed || self.is_certainly_not_a_block() {
-            // This is a struct literal, but we don't can't accept them here.
-            let expr = self.parse_struct_expr(path.clone(), attrs.clone());
+            if let Err(err) = self.expect(&token::OpenDelim(token::Brace)) {
+                return Some(Err(err));
+            }
+            let expr = self.parse_struct_expr(path.clone(), attrs.clone(), true);
             if let (Ok(expr), false) = (&expr, struct_allowed) {
+                // This is a struct literal, but we don't can't accept them here.
                 self.error_struct_lit_not_allowed_here(path.span, expr.span);
             }
             return Some(expr);
@@ -2061,14 +2079,15 @@
             .emit();
     }
 
+    /// Precondition: already parsed the '{'.
     pub(super) fn parse_struct_expr(
         &mut self,
         pth: ast::Path,
         mut attrs: AttrVec,
+        recover: bool,
     ) -> PResult<'a, P<Expr>> {
-        self.bump();
         let mut fields = Vec::new();
-        let mut base = None;
+        let mut base = ast::StructRest::None;
         let mut recover_async = false;
 
         attrs.extend(self.parse_inner_attributes()?);
@@ -2083,12 +2102,19 @@
         while self.token != token::CloseDelim(token::Brace) {
             if self.eat(&token::DotDot) {
                 let exp_span = self.prev_token.span;
+                // We permit `.. }` on the left-hand side of a destructuring assignment.
+                if self.check(&token::CloseDelim(token::Brace)) {
+                    self.sess.gated_spans.gate(sym::destructuring_assignment, self.prev_token.span);
+                    base = ast::StructRest::Rest(self.prev_token.span.shrink_to_hi());
+                    break;
+                }
                 match self.parse_expr() {
-                    Ok(e) => base = Some(e),
-                    Err(mut e) => {
+                    Ok(e) => base = ast::StructRest::Base(e),
+                    Err(mut e) if recover => {
                         e.emit();
                         self.recover_stmt();
                     }
+                    Err(e) => return Err(e),
                 }
                 self.recover_struct_comma_after_dotdot(exp_span);
                 break;
@@ -2140,6 +2166,9 @@
                             );
                         }
                     }
+                    if !recover {
+                        return Err(e);
+                    }
                     e.emit();
                     self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore);
                     self.eat(&token::Comma);
@@ -2316,4 +2345,14 @@
     pub(super) fn mk_expr_err(&self, span: Span) -> P<Expr> {
         self.mk_expr(span, ExprKind::Err, AttrVec::new())
     }
+
+    /// Create expression span ensuring the span of the parent node
+    /// is larger than the span of lhs and rhs, including the attributes.
+    fn mk_expr_sp(&self, lhs: &P<Expr>, lhs_span: Span, rhs_span: Span) -> Span {
+        lhs.attrs
+            .iter()
+            .find(|a| a.style == AttrStyle::Outer)
+            .map_or(lhs_span, |a| a.span)
+            .to(rhs_span)
+    }
 }
diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs
index 26ca998..5954b37 100644
--- a/compiler/rustc_parse/src/parser/item.rs
+++ b/compiler/rustc_parse/src/parser/item.rs
@@ -7,7 +7,7 @@
 use rustc_ast::ptr::P;
 use rustc_ast::token::{self, TokenKind};
 use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree};
-use rustc_ast::{self as ast, AttrStyle, AttrVec, Attribute, DUMMY_NODE_ID};
+use rustc_ast::{self as ast, AttrVec, Attribute, DUMMY_NODE_ID};
 use rustc_ast::{AssocItem, AssocItemKind, ForeignItemKind, Item, ItemKind, Mod};
 use rustc_ast::{Async, Const, Defaultness, IsAuto, Mutability, Unsafe, UseTree, UseTreeKind};
 use rustc_ast::{BindingMode, Block, FnDecl, FnSig, Param, SelfKind};
@@ -116,44 +116,30 @@
             Some(item.into_inner())
         });
 
+        let needs_tokens = super::attr::maybe_needs_tokens(&attrs);
+
         let mut unclosed_delims = vec![];
-        let has_attrs = !attrs.is_empty();
         let parse_item = |this: &mut Self| {
             let item = this.parse_item_common_(attrs, mac_allowed, attrs_allowed, req_name);
             unclosed_delims.append(&mut this.unclosed_delims);
             item
         };
 
-        let (mut item, tokens) = if has_attrs {
+        let (mut item, tokens) = if needs_tokens {
             let (item, tokens) = self.collect_tokens(parse_item)?;
-            (item, Some(tokens))
+            (item, tokens)
         } else {
             (parse_item(self)?, None)
         };
-
-        self.unclosed_delims.append(&mut unclosed_delims);
-
-        // Once we've parsed an item and recorded the tokens we got while
-        // parsing we may want to store `tokens` into the item we're about to
-        // return. Note, though, that we specifically didn't capture tokens
-        // related to outer attributes. The `tokens` field here may later be
-        // used with procedural macros to convert this item back into a token
-        // stream, but during expansion we may be removing attributes as we go
-        // along.
-        //
-        // If we've got inner attributes then the `tokens` we've got above holds
-        // these inner attributes. If an inner attribute is expanded we won't
-        // actually remove it from the token stream, so we'll just keep yielding
-        // it (bad!). To work around this case for now we just avoid recording
-        // `tokens` if we detect any inner attributes. This should help keep
-        // expansion correct, but we should fix this bug one day!
-        if let Some(tokens) = tokens {
-            if let Some(item) = &mut item {
-                if !item.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) {
-                    item.tokens = Some(tokens);
-                }
+        if let Some(item) = &mut item {
+            // If we captured tokens during parsing (due to encountering an `NtItem`),
+            // use those instead
+            if item.tokens.is_none() {
+                item.tokens = tokens;
             }
         }
+
+        self.unclosed_delims.append(&mut unclosed_delims);
         Ok(item)
     }
 
@@ -375,21 +361,19 @@
                     format!(" {} ", kw),
                     Applicability::MachineApplicable,
                 );
+            } else if let Ok(snippet) = self.span_to_snippet(ident_sp) {
+                err.span_suggestion(
+                    full_sp,
+                    "if you meant to call a macro, try",
+                    format!("{}!", snippet),
+                    // this is the `ambiguous` conditional branch
+                    Applicability::MaybeIncorrect,
+                );
             } else {
-                if let Ok(snippet) = self.span_to_snippet(ident_sp) {
-                    err.span_suggestion(
-                        full_sp,
-                        "if you meant to call a macro, try",
-                        format!("{}!", snippet),
-                        // this is the `ambiguous` conditional branch
-                        Applicability::MaybeIncorrect,
-                    );
-                } else {
-                    err.help(
-                        "if you meant to call a macro, remove the `pub` \
-                                  and add a trailing `!` after the identifier",
-                    );
-                }
+                err.help(
+                    "if you meant to call a macro, remove the `pub` \
+                              and add a trailing `!` after the identifier",
+                );
             }
             Err(err)
         } else if self.look_ahead(1, |t| *t == token::Lt) {
@@ -981,10 +965,7 @@
                 if token.is_keyword(kw::Move) {
                     return true;
                 }
-                match token.kind {
-                    token::BinOp(token::Or) | token::OrOr => true,
-                    _ => false,
-                }
+                matches!(token.kind, token::BinOp(token::Or) | token::OrOr)
             })
         } else {
             false
@@ -1537,7 +1518,7 @@
         generics.where_clause = self.parse_where_clause()?; // `where T: Ord`
 
         let mut sig_hi = self.prev_token.span;
-        let body = self.parse_fn_body(attrs, &mut sig_hi)?; // `;` or `{ ... }`.
+        let body = self.parse_fn_body(attrs, &ident, &mut sig_hi)?; // `;` or `{ ... }`.
         let fn_sig_span = sig_lo.to(sig_hi);
         Ok((ident, FnSig { header, decl, span: fn_sig_span }, generics, body))
     }
@@ -1548,12 +1529,12 @@
     fn parse_fn_body(
         &mut self,
         attrs: &mut Vec<Attribute>,
+        ident: &Ident,
         sig_hi: &mut Span,
     ) -> PResult<'a, Option<P<Block>>> {
-        let (inner_attrs, body) = if self.check(&token::Semi) {
+        let (inner_attrs, body) = if self.eat(&token::Semi) {
             // Include the trailing semicolon in the span of the signature
-            *sig_hi = self.token.span;
-            self.bump(); // `;`
+            *sig_hi = self.prev_token.span;
             (Vec::new(), None)
         } else if self.check(&token::OpenDelim(token::Brace)) || self.token.is_whole_block() {
             self.parse_inner_attrs_and_block().map(|(attrs, body)| (attrs, Some(body)))?
@@ -1573,7 +1554,21 @@
                 .emit();
             (Vec::new(), Some(self.mk_block_err(span)))
         } else {
-            return self.expected_semi_or_open_brace();
+            if let Err(mut err) =
+                self.expected_one_of_not_found(&[], &[token::Semi, token::OpenDelim(token::Brace)])
+            {
+                if self.token.kind == token::CloseDelim(token::Brace) {
+                    // The enclosing `mod`, `trait` or `impl` is being closed, so keep the `fn` in
+                    // the AST for typechecking.
+                    err.span_label(ident.span, "while parsing this `fn`");
+                    err.emit();
+                    (Vec::new(), None)
+                } else {
+                    return Err(err);
+                }
+            } else {
+                unreachable!()
+            }
         };
         attrs.extend(inner_attrs);
         Ok(body)
@@ -1744,7 +1739,7 @@
             }
         };
 
-        let span = lo.to(self.token.span);
+        let span = lo.until(self.token.span);
 
         Ok(Param {
             attrs: attrs.into(),
diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs
index 7340c57..da1c54e 100644
--- a/compiler/rustc_parse/src/parser/mod.rs
+++ b/compiler/rustc_parse/src/parser/mod.rs
@@ -10,17 +10,21 @@
 mod ty;
 
 use crate::lexer::UnmatchedBrace;
+pub use diagnostics::AttemptLocalParseRecovery;
 use diagnostics::Error;
 pub use path::PathStyle;
 
 use rustc_ast::ptr::P;
 use rustc_ast::token::{self, DelimToken, Token, TokenKind};
-use rustc_ast::tokenstream::{self, DelimSpan, TokenStream, TokenTree, TreeAndSpacing};
+use rustc_ast::tokenstream::{self, DelimSpan, LazyTokenStream, Spacing};
+use rustc_ast::tokenstream::{CreateTokenStream, TokenStream, TokenTree};
 use rustc_ast::DUMMY_NODE_ID;
-use rustc_ast::{self as ast, AttrStyle, AttrVec, Const, CrateSugar, Extern, Unsafe};
-use rustc_ast::{Async, MacArgs, MacDelimiter, Mutability, StrLit, Visibility, VisibilityKind};
+use rustc_ast::{self as ast, AnonConst, AttrStyle, AttrVec, Const, CrateSugar, Extern, Unsafe};
+use rustc_ast::{Async, Expr, ExprKind, MacArgs, MacDelimiter, Mutability, StrLit};
+use rustc_ast::{Visibility, VisibilityKind};
 use rustc_ast_pretty::pprust;
-use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, FatalError, PResult};
+use rustc_errors::PResult;
+use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, FatalError};
 use rustc_session::parse::ParseSess;
 use rustc_span::source_map::{Span, DUMMY_SP};
 use rustc_span::symbol::{kw, sym, Ident, Symbol};
@@ -32,6 +36,7 @@
     struct Restrictions: u8 {
         const STMT_EXPR         = 1 << 0;
         const NO_STRUCT_LITERAL = 1 << 1;
+        const CONST_EXPR        = 1 << 2;
     }
 }
 
@@ -83,10 +88,14 @@
     pub sess: &'a ParseSess,
     /// The current token.
     pub token: Token,
+    /// The spacing for the current token
+    pub token_spacing: Spacing,
     /// The previous token.
     pub prev_token: Token,
     restrictions: Restrictions,
     expected_tokens: Vec<TokenType>,
+    // Important: This must only be advanced from `next_tok`
+    // to ensure that `token_cursor.num_next_calls` is updated properly
     token_cursor: TokenCursor,
     desugar_doc_comments: bool,
     /// This field is used to keep track of how many left angle brackets we have seen. This is
@@ -118,8 +127,10 @@
 struct TokenCursor {
     frame: TokenCursorFrame,
     stack: Vec<TokenCursorFrame>,
-    cur_token: Option<TreeAndSpacing>,
-    collecting: Option<Collecting>,
+    desugar_doc_comments: bool,
+    // Counts the number of calls to `next` or `next_desugared`,
+    // depending on whether `desugar_doc_comments` is set.
+    num_next_calls: usize,
 }
 
 #[derive(Clone)]
@@ -131,40 +142,22 @@
     close_delim: bool,
 }
 
-/// Used to track additional state needed by `collect_tokens`
-#[derive(Clone, Debug)]
-struct Collecting {
-    /// Holds the current tokens captured during the most
-    /// recent call to `collect_tokens`
-    buf: Vec<TreeAndSpacing>,
-    /// The depth of the `TokenCursor` stack at the time
-    /// collection was started. When we encounter a `TokenTree::Delimited`,
-    /// we want to record the `TokenTree::Delimited` itself,
-    /// but *not* any of the inner tokens while we are inside
-    /// the new frame (this would cause us to record duplicate tokens).
-    ///
-    /// This `depth` fields tracks stack depth we are recording tokens.
-    /// Only tokens encountered at this depth will be recorded. See
-    /// `TokenCursor::next` for more details.
-    depth: usize,
-}
-
 impl TokenCursorFrame {
-    fn new(span: DelimSpan, delim: DelimToken, tts: &TokenStream) -> Self {
+    fn new(span: DelimSpan, delim: DelimToken, tts: TokenStream) -> Self {
         TokenCursorFrame {
             delim,
             span,
             open_delim: delim == token::NoDelim,
-            tree_cursor: tts.clone().into_trees(),
+            tree_cursor: tts.into_trees(),
             close_delim: delim == token::NoDelim,
         }
     }
 }
 
 impl TokenCursor {
-    fn next(&mut self) -> Token {
+    fn next(&mut self) -> (Token, Spacing) {
         loop {
-            let tree = if !self.frame.open_delim {
+            let (tree, spacing) = if !self.frame.open_delim {
                 self.frame.open_delim = true;
                 TokenTree::open_tt(self.frame.span, self.frame.delim).into()
             } else if let Some(tree) = self.frame.tree_cursor.next_with_spacing() {
@@ -176,40 +169,24 @@
                 self.frame = frame;
                 continue;
             } else {
-                return Token::new(token::Eof, DUMMY_SP);
+                (TokenTree::Token(Token::new(token::Eof, DUMMY_SP)), Spacing::Alone)
             };
 
-            // Don't set an open delimiter as our current token - we want
-            // to leave it as the full `TokenTree::Delimited` from the previous
-            // iteration of this loop
-            if !matches!(tree.0, TokenTree::Token(Token { kind: TokenKind::OpenDelim(_), .. })) {
-                self.cur_token = Some(tree.clone());
-            }
-
-            if let Some(collecting) = &mut self.collecting {
-                if collecting.depth == self.stack.len() {
-                    debug!(
-                        "TokenCursor::next():  collected {:?} at depth {:?}",
-                        tree,
-                        self.stack.len()
-                    );
-                    collecting.buf.push(tree.clone())
+            match tree {
+                TokenTree::Token(token) => {
+                    return (token, spacing);
                 }
-            }
-
-            match tree.0 {
-                TokenTree::Token(token) => return token,
                 TokenTree::Delimited(sp, delim, tts) => {
-                    let frame = TokenCursorFrame::new(sp, delim, &tts);
+                    let frame = TokenCursorFrame::new(sp, delim, tts);
                     self.stack.push(mem::replace(&mut self.frame, frame));
                 }
             }
         }
     }
 
-    fn next_desugared(&mut self) -> Token {
+    fn next_desugared(&mut self) -> (Token, Spacing) {
         let (data, attr_style, sp) = match self.next() {
-            Token { kind: token::DocComment(_, attr_style, data), span } => {
+            (Token { kind: token::DocComment(_, attr_style, data), span }, _) => {
                 (data, attr_style, span)
             }
             tok => return tok,
@@ -247,7 +224,7 @@
             TokenCursorFrame::new(
                 delim_span,
                 token::NoDelim,
-                &if attr_style == AttrStyle::Inner {
+                if attr_style == AttrStyle::Inner {
                     [TokenTree::token(token::Pound, sp), TokenTree::token(token::Not, sp), body]
                         .iter()
                         .cloned()
@@ -349,14 +326,15 @@
         let mut parser = Parser {
             sess,
             token: Token::dummy(),
+            token_spacing: Spacing::Alone,
             prev_token: Token::dummy(),
             restrictions: Restrictions::empty(),
             expected_tokens: Vec::new(),
             token_cursor: TokenCursor {
-                frame: TokenCursorFrame::new(DelimSpan::dummy(), token::NoDelim, &tokens),
+                frame: TokenCursorFrame::new(DelimSpan::dummy(), token::NoDelim, tokens),
                 stack: Vec::new(),
-                cur_token: None,
-                collecting: None,
+                num_next_calls: 0,
+                desugar_doc_comments,
             },
             desugar_doc_comments,
             unmatched_angle_bracket_count: 0,
@@ -373,20 +351,21 @@
         parser
     }
 
-    fn next_tok(&mut self, fallback_span: Span) -> Token {
-        let mut next = if self.desugar_doc_comments {
+    fn next_tok(&mut self, fallback_span: Span) -> (Token, Spacing) {
+        let (mut next, spacing) = if self.desugar_doc_comments {
             self.token_cursor.next_desugared()
         } else {
             self.token_cursor.next()
         };
+        self.token_cursor.num_next_calls += 1;
         if next.span.is_dummy() {
             // Tweak the location for better diagnostics, but keep syntactic context intact.
             next.span = fallback_span.with_ctxt(next.span.ctxt());
         }
-        next
+        (next, spacing)
     }
 
-    crate fn unexpected<T>(&mut self) -> PResult<'a, T> {
+    pub fn unexpected<T>(&mut self) -> PResult<'a, T> {
         match self.expect_one_of(&[], &[]) {
             Err(e) => Err(e),
             // We can get `Ok(true)` from `recover_closing_delimiter`
@@ -544,6 +523,15 @@
         self.check_or_expected(self.token.can_begin_const_arg(), TokenType::Const)
     }
 
+    fn check_inline_const(&self, dist: usize) -> bool {
+        self.is_keyword_ahead(dist, &[kw::Const])
+            && self.look_ahead(dist + 1, |t| match t.kind {
+                token::Interpolated(ref nt) => matches!(**nt, token::NtBlock(..)),
+                token::OpenDelim(DelimToken::Brace) => true,
+                _ => false,
+            })
+    }
+
     /// Checks to see if the next token is either `+` or `+=`.
     /// Otherwise returns `false`.
     fn check_plus(&mut self) -> bool {
@@ -566,7 +554,9 @@
                 let first_span = self.sess.source_map().start_point(self.token.span);
                 let second_span = self.token.span.with_lo(first_span.hi());
                 self.token = Token::new(first, first_span);
-                self.bump_with(Token::new(second, second_span));
+                // Use the spacing of the glued token as the spacing
+                // of the unglued second token.
+                self.bump_with((Token::new(second, second_span), self.token_spacing));
                 true
             }
             _ => {
@@ -798,7 +788,7 @@
     }
 
     /// Advance the parser by one token using provided token as the next one.
-    fn bump_with(&mut self, next_token: Token) {
+    fn bump_with(&mut self, (next_token, next_spacing): (Token, Spacing)) {
         // Bumping after EOF is a bad sign, usually an infinite loop.
         if self.prev_token.kind == TokenKind::Eof {
             let msg = "attempted to bump the parser past EOF (may be stuck in a loop)";
@@ -807,6 +797,7 @@
 
         // Update the current and previous tokens.
         self.prev_token = mem::replace(&mut self.token, next_token);
+        self.token_spacing = next_spacing;
 
         // Diagnostics.
         self.expected_tokens.clear();
@@ -826,15 +817,15 @@
         }
 
         let frame = &self.token_cursor.frame;
-        looker(&match frame.tree_cursor.look_ahead(dist - 1) {
+        match frame.tree_cursor.look_ahead(dist - 1) {
             Some(tree) => match tree {
-                TokenTree::Token(token) => token,
+                TokenTree::Token(token) => looker(token),
                 TokenTree::Delimited(dspan, delim, _) => {
-                    Token::new(token::OpenDelim(delim), dspan.open)
+                    looker(&Token::new(token::OpenDelim(*delim), dspan.open))
                 }
             },
-            None => Token::new(token::CloseDelim(frame.delim), frame.span.close),
-        })
+            None => looker(&Token::new(token::CloseDelim(frame.delim), frame.span.close)),
+        }
     }
 
     /// Returns whether any of the given keywords are `dist` tokens ahead of the current one.
@@ -863,13 +854,28 @@
 
     /// Parses constness: `const` or nothing.
     fn parse_constness(&mut self) -> Const {
-        if self.eat_keyword(kw::Const) {
+        // Avoid const blocks to be parsed as const items
+        if self.look_ahead(1, |t| t != &token::OpenDelim(DelimToken::Brace))
+            && self.eat_keyword(kw::Const)
+        {
             Const::Yes(self.prev_token.uninterpolated_span())
         } else {
             Const::No
         }
     }
 
+    /// Parses inline const expressions.
+    fn parse_const_block(&mut self, span: Span) -> PResult<'a, P<Expr>> {
+        self.sess.gated_spans.gate(sym::inline_const, span);
+        self.eat_keyword(kw::Const);
+        let blk = self.parse_block()?;
+        let anon_const = AnonConst {
+            id: DUMMY_NODE_ID,
+            value: self.mk_expr(blk.span, ExprKind::Block(blk, None), AttrVec::new()),
+        };
+        Ok(self.mk_expr(span, ExprKind::ConstBlock(anon_const), AttrVec::new()))
+    }
+
     /// Parses mutability (`mut` or nothing).
     fn parse_mutability(&mut self) -> Mutability {
         if self.eat_keyword(kw::Mut) { Mutability::Mut } else { Mutability::Not }
@@ -962,13 +968,27 @@
     pub(crate) fn parse_token_tree(&mut self) -> TokenTree {
         match self.token.kind {
             token::OpenDelim(..) => {
-                let frame = mem::replace(
-                    &mut self.token_cursor.frame,
-                    self.token_cursor.stack.pop().unwrap(),
-                );
-                self.token = Token::new(TokenKind::CloseDelim(frame.delim), frame.span.close);
+                let depth = self.token_cursor.stack.len();
+
+                // We keep advancing the token cursor until we hit
+                // the matching `CloseDelim` token.
+                while !(depth == self.token_cursor.stack.len()
+                    && matches!(self.token.kind, token::CloseDelim(_)))
+                {
+                    // Advance one token at a time, so `TokenCursor::next()`
+                    // can capture these tokens if necessary.
+                    self.bump();
+                }
+                // We are still inside the frame corresponding
+                // to the delimited stream we captured, so grab
+                // the tokens from this frame.
+                let frame = &self.token_cursor.frame;
+                let stream = frame.tree_cursor.stream.clone();
+                let span = frame.span;
+                let delim = frame.delim;
+                // Consume close delimiter
                 self.bump();
-                TokenTree::Delimited(frame.span, frame.delim, frame.tree_cursor.stream)
+                TokenTree::Delimited(span, delim, stream)
             }
             token::CloseDelim(_) | token::Eof => unreachable!(),
             _ => {
@@ -1159,8 +1179,9 @@
 
     /// Records all tokens consumed by the provided callback,
     /// including the current token. These tokens are collected
-    /// into a `TokenStream`, and returned along with the result
-    /// of the callback.
+    /// into a `LazyTokenStream`, and returned along with the result
+    /// of the callback. The returned `LazyTokenStream` will be `None`
+    /// if not tokens were captured.
     ///
     /// Note: If your callback consumes an opening delimiter
     /// (including the case where you call `collect_tokens`
@@ -1176,79 +1197,61 @@
     pub fn collect_tokens<R>(
         &mut self,
         f: impl FnOnce(&mut Self) -> PResult<'a, R>,
-    ) -> PResult<'a, (R, TokenStream)> {
-        // Record all tokens we parse when parsing this item.
-        let tokens: Vec<TreeAndSpacing> = self.token_cursor.cur_token.clone().into_iter().collect();
-        debug!("collect_tokens: starting with {:?}", tokens);
+    ) -> PResult<'a, (R, Option<LazyTokenStream>)> {
+        let start_token = (self.token.clone(), self.token_spacing);
+        let cursor_snapshot = self.token_cursor.clone();
 
-        // We need special handling for the case where `collect_tokens` is called
-        // on an opening delimeter (e.g. '('). At this point, we have already pushed
-        // a new frame - however, we want to record the original `TokenTree::Delimited`,
-        // for consistency with the case where we start recording one token earlier.
-        // See `TokenCursor::next` to see how `cur_token` is set up.
-        let prev_depth =
-            if matches!(self.token_cursor.cur_token, Some((TokenTree::Delimited(..), _))) {
-                if self.token_cursor.stack.is_empty() {
-                    // There is nothing below us in the stack that
-                    // the function could consume, so the only thing it can legally
-                    // capture is the entire contents of the current frame.
-                    return Ok((f(self)?, TokenStream::new(tokens)));
-                }
-                // We have already recorded the full `TokenTree::Delimited` when we created
-                // our `tokens` vector at the start of this function. We are now inside
-                // a new frame corresponding to the `TokenTree::Delimited` we already recoreded.
-                // We don't want to record any of the tokens inside this frame, since they
-                // will be duplicates of the tokens nested inside the `TokenTree::Delimited`.
-                // Therefore, we set our recording depth to the *previous* frame. This allows
-                // us to record a sequence like: `(foo).bar()`: the `(foo)` will be recored
-                // as our initial `cur_token`, while the `.bar()` will be recored after we
-                // pop the `(foo)` frame.
-                self.token_cursor.stack.len() - 1
-            } else {
-                self.token_cursor.stack.len()
-            };
-        let prev_collecting =
-            self.token_cursor.collecting.replace(Collecting { buf: tokens, depth: prev_depth });
+        let ret = f(self)?;
 
-        let ret = f(self);
-
-        let mut collected_tokens = if let Some(collecting) = self.token_cursor.collecting.take() {
-            collecting.buf
-        } else {
-            let msg = "our vector went away?";
-            debug!("collect_tokens: {}", msg);
-            self.sess.span_diagnostic.delay_span_bug(self.token.span, &msg);
-            // This can happen due to a bad interaction of two unrelated recovery mechanisms
-            // with mismatched delimiters *and* recovery lookahead on the likely typo
-            // `pub ident(` (#62895, different but similar to the case above).
-            return Ok((ret?, TokenStream::default()));
-        };
-
-        debug!("collect_tokens: got raw tokens {:?}", collected_tokens);
-
-        // If we're not at EOF our current token wasn't actually consumed by
-        // `f`, but it'll still be in our list that we pulled out. In that case
-        // put it back.
-        let extra_token = if self.token != token::Eof { collected_tokens.pop() } else { None };
-
-        if let Some(mut collecting) = prev_collecting {
-            // If we were previously collecting at the same depth,
-            // then the previous call to `collect_tokens` needs to see
-            // the tokens we just recorded.
-            //
-            // If we were previously recording at an lower `depth`,
-            // then the previous `collect_tokens` call already recorded
-            // this entire frame in the form of a `TokenTree::Delimited`,
-            // so there is nothing else for us to do.
-            if collecting.depth == prev_depth {
-                collecting.buf.extend(collected_tokens.iter().cloned());
-                collecting.buf.extend(extra_token);
-                debug!("collect_tokens: updating previous buf to {:?}", collecting);
-            }
-            self.token_cursor.collecting = Some(collecting)
+        // We didn't capture any tokens
+        let num_calls = self.token_cursor.num_next_calls - cursor_snapshot.num_next_calls;
+        if num_calls == 0 {
+            return Ok((ret, None));
         }
 
-        Ok((ret?, TokenStream::new(collected_tokens)))
+        // Produces a `TokenStream` on-demand. Using `cursor_snapshot`
+        // and `num_calls`, we can reconstruct the `TokenStream` seen
+        // by the callback. This allows us to avoid producing a `TokenStream`
+        // if it is never needed - for example, a captured `macro_rules!`
+        // argument that is never passed to a proc macro.
+        //
+        // This also makes `Parser` very cheap to clone, since
+        // there is no intermediate collection buffer to clone.
+        struct LazyTokenStreamImpl {
+            start_token: (Token, Spacing),
+            cursor_snapshot: TokenCursor,
+            num_calls: usize,
+            desugar_doc_comments: bool,
+        }
+        impl CreateTokenStream for LazyTokenStreamImpl {
+            fn create_token_stream(&self) -> TokenStream {
+                // The token produced by the final call to `next` or `next_desugared`
+                // was not actually consumed by the callback. The combination
+                // of chaining the initial token and using `take` produces the desired
+                // result - we produce an empty `TokenStream` if no calls were made,
+                // and omit the final token otherwise.
+                let mut cursor_snapshot = self.cursor_snapshot.clone();
+                let tokens = std::iter::once(self.start_token.clone())
+                    .chain((0..self.num_calls).map(|_| {
+                        if self.desugar_doc_comments {
+                            cursor_snapshot.next_desugared()
+                        } else {
+                            cursor_snapshot.next()
+                        }
+                    }))
+                    .take(self.num_calls);
+
+                make_token_stream(tokens)
+            }
+        }
+
+        let lazy_impl = LazyTokenStreamImpl {
+            start_token,
+            cursor_snapshot,
+            num_calls,
+            desugar_doc_comments: self.desugar_doc_comments,
+        };
+        Ok((ret, Some(LazyTokenStream::new(lazy_impl))))
     }
 
     /// `::{` or `::*`
@@ -1297,3 +1300,41 @@
         }
     }
 }
+
+/// Converts a flattened iterator of tokens (including open and close delimiter tokens)
+/// into a `TokenStream`, creating a `TokenTree::Delimited` for each matching pair
+/// of open and close delims.
+fn make_token_stream(tokens: impl Iterator<Item = (Token, Spacing)>) -> TokenStream {
+    #[derive(Debug)]
+    struct FrameData {
+        open: Span,
+        inner: Vec<(TokenTree, Spacing)>,
+    }
+    let mut stack = vec![FrameData { open: DUMMY_SP, inner: vec![] }];
+    for (token, spacing) in tokens {
+        match token {
+            Token { kind: TokenKind::OpenDelim(_), span } => {
+                stack.push(FrameData { open: span, inner: vec![] });
+            }
+            Token { kind: TokenKind::CloseDelim(delim), span } => {
+                let frame_data = stack.pop().expect("Token stack was empty!");
+                let dspan = DelimSpan::from_pair(frame_data.open, span);
+                let stream = TokenStream::new(frame_data.inner);
+                let delimited = TokenTree::Delimited(dspan, delim, stream);
+                stack
+                    .last_mut()
+                    .unwrap_or_else(|| panic!("Bottom token frame is missing for tokens!"))
+                    .inner
+                    .push((delimited, Spacing::Alone));
+            }
+            token => stack
+                .last_mut()
+                .expect("Bottom token frame is missing!")
+                .inner
+                .push((TokenTree::Token(token), spacing)),
+        }
+    }
+    let final_buf = stack.pop().expect("Missing final buf!");
+    assert!(stack.is_empty(), "Stack should be empty: final_buf={:?} stack={:?}", final_buf, stack);
+    TokenStream::new(final_buf.inner)
+}
diff --git a/compiler/rustc_parse/src/parser/nonterminal.rs b/compiler/rustc_parse/src/parser/nonterminal.rs
index 15660fd..38f04da 100644
--- a/compiler/rustc_parse/src/parser/nonterminal.rs
+++ b/compiler/rustc_parse/src/parser/nonterminal.rs
@@ -27,6 +27,8 @@
                 token.can_begin_expr()
                 // This exception is here for backwards compatibility.
                 && !token.is_keyword(kw::Let)
+                // This exception is here for backwards compatibility.
+                && !token.is_keyword(kw::Const)
             }
             NonterminalKind::Ty => token.can_begin_type(),
             NonterminalKind::Ident => get_macro_ident(token).is_some(),
@@ -38,16 +40,13 @@
             },
             NonterminalKind::Block => match token.kind {
                 token::OpenDelim(token::Brace) => true,
-                token::Interpolated(ref nt) => match **nt {
-                    token::NtItem(_)
+                token::Interpolated(ref nt) => !matches!(**nt, token::NtItem(_)
                     | token::NtPat(_)
                     | token::NtTy(_)
                     | token::NtIdent(..)
                     | token::NtMeta(_)
                     | token::NtPath(_)
-                    | token::NtVis(_) => false, // none of these may start with '{'.
-                    _ => true,
-                },
+                    | token::NtVis(_)),
                 _ => false,
             },
             NonterminalKind::Path | NonterminalKind::Meta => match token.kind {
@@ -76,17 +75,14 @@
             },
             NonterminalKind::Lifetime => match token.kind {
                 token::Lifetime(_) => true,
-                token::Interpolated(ref nt) => match **nt {
-                    token::NtLifetime(_) | token::NtTT(_) => true,
-                    _ => false,
-                },
+                token::Interpolated(ref nt) => {
+                    matches!(**nt, token::NtLifetime(_) | token::NtTT(_))
+                }
                 _ => false,
             },
-            NonterminalKind::TT | NonterminalKind::Item | NonterminalKind::Stmt => match token.kind
-            {
-                token::CloseDelim(_) => false,
-                _ => true,
-            },
+            NonterminalKind::TT | NonterminalKind::Item | NonterminalKind::Stmt => {
+                !matches!(token.kind, token::CloseDelim(_))
+            }
         }
     }
 
@@ -103,7 +99,7 @@
                     // If we captured tokens during parsing (due to outer attributes),
                     // use those.
                     if item.tokens.is_none() {
-                        item.tokens = Some(tokens);
+                        item.tokens = tokens;
                     }
                     token::NtItem(item)
                 }
@@ -115,7 +111,7 @@
                 let (mut block, tokens) = self.collect_tokens(|this| this.parse_block())?;
                 // We have have eaten an NtBlock, which could already have tokens
                 if block.tokens.is_none() {
-                    block.tokens = Some(tokens);
+                    block.tokens = tokens;
                 }
                 token::NtBlock(block)
             }
@@ -124,7 +120,7 @@
                 match stmt {
                     Some(mut s) => {
                         if s.tokens.is_none() {
-                            s.tokens = Some(tokens);
+                            s.tokens = tokens;
                         }
                         token::NtStmt(s)
                     }
@@ -137,7 +133,7 @@
                 let (mut pat, tokens) = self.collect_tokens(|this| this.parse_pat(None))?;
                 // We have have eaten an NtPat, which could already have tokens
                 if pat.tokens.is_none() {
-                    pat.tokens = Some(tokens);
+                    pat.tokens = tokens;
                 }
                 token::NtPat(pat)
             }
@@ -146,7 +142,7 @@
                 // If we captured tokens during parsing (due to outer attributes),
                 // use those.
                 if expr.tokens.is_none() {
-                    expr.tokens = Some(tokens);
+                    expr.tokens = tokens;
                 }
                 token::NtExpr(expr)
             }
@@ -155,7 +151,7 @@
                     self.collect_tokens(|this| this.parse_literal_maybe_minus())?;
                 // We have have eaten a nonterminal, which  could already have tokens
                 if lit.tokens.is_none() {
-                    lit.tokens = Some(tokens);
+                    lit.tokens = tokens;
                 }
                 token::NtLiteral(lit)
             }
@@ -163,7 +159,7 @@
                 let (mut ty, tokens) = self.collect_tokens(|this| this.parse_ty())?;
                 // We have an eaten an NtTy, which could already have tokens
                 if ty.tokens.is_none() {
-                    ty.tokens = Some(tokens);
+                    ty.tokens = tokens;
                 }
                 token::NtTy(ty)
             }
@@ -183,15 +179,15 @@
                     self.collect_tokens(|this| this.parse_path(PathStyle::Type))?;
                 // We have have eaten an NtPath, which could already have tokens
                 if path.tokens.is_none() {
-                    path.tokens = Some(tokens);
+                    path.tokens = tokens;
                 }
                 token::NtPath(path)
             }
             NonterminalKind::Meta => {
-                let (mut attr, tokens) = self.collect_tokens(|this| this.parse_attr_item())?;
+                let (mut attr, tokens) = self.collect_tokens(|this| this.parse_attr_item(false))?;
                 // We may have eaten a nonterminal, which could already have tokens
                 if attr.tokens.is_none() {
-                    attr.tokens = Some(tokens);
+                    attr.tokens = tokens;
                 }
                 token::NtMeta(P(attr))
             }
@@ -201,7 +197,7 @@
                     self.collect_tokens(|this| this.parse_visibility(FollowedByType::Yes))?;
                 // We may have etan an `NtVis`, which could already have tokens
                 if vis.tokens.is_none() {
-                    vis.tokens = Some(tokens);
+                    vis.tokens = tokens;
                 }
                 token::NtVis(vis)
             }
diff --git a/compiler/rustc_parse/src/parser/pat.rs b/compiler/rustc_parse/src/parser/pat.rs
index 5aced9d..ee9a6dc 100644
--- a/compiler/rustc_parse/src/parser/pat.rs
+++ b/compiler/rustc_parse/src/parser/pat.rs
@@ -1,6 +1,6 @@
 use super::{Parser, PathStyle};
 use crate::{maybe_recover_from_interpolated_ty_qpath, maybe_whole};
-use rustc_ast::mut_visit::{noop_visit_mac, noop_visit_pat, MutVisitor};
+use rustc_ast::mut_visit::{noop_visit_pat, MutVisitor};
 use rustc_ast::ptr::P;
 use rustc_ast::token;
 use rustc_ast::{self as ast, AttrVec, Attribute, FieldPat, MacCall, Pat, PatKind, RangeEnd};
@@ -149,8 +149,10 @@
     /// Note that there are more tokens such as `@` for which we know that the `|`
     /// is an illegal parse. However, the user's intent is less clear in that case.
     fn recover_trailing_vert(&mut self, lo: Option<Span>) -> bool {
-        let is_end_ahead = self.look_ahead(1, |token| match &token.uninterpolate().kind {
-            token::FatArrow // e.g. `a | => 0,`.
+        let is_end_ahead = self.look_ahead(1, |token| {
+            matches!(
+                &token.uninterpolate().kind,
+                token::FatArrow // e.g. `a | => 0,`.
             | token::Ident(kw::If, false) // e.g. `a | if expr`.
             | token::Eq // e.g. `let a | = 0`.
             | token::Semi // e.g. `let a |;`.
@@ -158,8 +160,8 @@
             | token::Comma // e.g. `let (a |,)`.
             | token::CloseDelim(token::Bracket) // e.g. `let [a | ]`.
             | token::CloseDelim(token::Paren) // e.g. `let (a | )`.
-            | token::CloseDelim(token::Brace) => true, // e.g. `let A { f: a | }`.
-            _ => false,
+            | token::CloseDelim(token::Brace) // e.g. `let A { f: a | }`.
+            )
         });
         match (is_end_ahead, &self.token.kind) {
             (true, token::BinOp(token::Or) | token::OrOr) => {
@@ -313,6 +315,15 @@
             let pat = self.parse_pat_with_range_pat(false, None)?;
             self.sess.gated_spans.gate(sym::box_patterns, lo.to(self.prev_token.span));
             PatKind::Box(pat)
+        } else if self.check_inline_const(0) {
+            // Parse `const pat`
+            let const_expr = self.parse_const_block(lo.to(self.token.span))?;
+
+            if let Some(re) = self.parse_range_end() {
+                self.parse_pat_range_begin_with(const_expr, re)?
+            } else {
+                PatKind::Lit(const_expr)
+            }
         } else if self.can_be_ident_pat() {
             // Parse `ident @ pat`
             // This can give false positives and parse nullary enums,
@@ -559,10 +570,6 @@
     fn make_all_value_bindings_mutable(pat: &mut P<Pat>) -> bool {
         struct AddMut(bool);
         impl MutVisitor for AddMut {
-            fn visit_mac(&mut self, mac: &mut MacCall) {
-                noop_visit_mac(mac, self);
-            }
-
             fn visit_pat(&mut self, pat: &mut P<Pat>) {
                 if let PatKind::Ident(BindingMode::ByValue(m @ Mutability::Not), ..) = &mut pat.kind
                 {
@@ -714,16 +721,19 @@
 
     /// Is the token `dist` away from the current suitable as the start of a range patterns end?
     fn is_pat_range_end_start(&self, dist: usize) -> bool {
-        self.look_ahead(dist, |t| {
-            t.is_path_start() // e.g. `MY_CONST`;
+        self.check_inline_const(dist)
+            || self.look_ahead(dist, |t| {
+                t.is_path_start() // e.g. `MY_CONST`;
                 || t.kind == token::Dot // e.g. `.5` for recovery;
                 || t.can_begin_literal_maybe_minus() // e.g. `42`.
                 || t.is_whole_expr()
-        })
+            })
     }
 
     fn parse_pat_range_end(&mut self) -> PResult<'a, P<Expr>> {
-        if self.check_path() {
+        if self.check_inline_const(0) {
+            self.parse_const_block(self.token.span)
+        } else if self.check_path() {
             let lo = self.token.span;
             let (qself, path) = if self.eat_lt() {
                 // Parse a qualified path
@@ -754,14 +764,12 @@
         && !self.token.is_path_segment_keyword() // Avoid e.g. `Self` as it is a path.
         // Avoid `in`. Due to recovery in the list parser this messes with `for ( $pat in $expr )`.
         && !self.token.is_keyword(kw::In)
-        && self.look_ahead(1, |t| match t.kind { // Try to do something more complex?
-            token::OpenDelim(token::Paren) // A tuple struct pattern.
+        // Try to do something more complex?
+        && self.look_ahead(1, |t| !matches!(t.kind, token::OpenDelim(token::Paren) // A tuple struct pattern.
             | token::OpenDelim(token::Brace) // A struct pattern.
             | token::DotDotDot | token::DotDotEq | token::DotDot // A range pattern.
             | token::ModSep // A tuple / struct variant pattern.
-            | token::Not => false, // A macro expanding to a pattern.
-            _ => true,
-        })
+            | token::Not)) // A macro expanding to a pattern.
     }
 
     /// Parses `ident` or `ident @ pat`.
diff --git a/compiler/rustc_parse/src/parser/path.rs b/compiler/rustc_parse/src/parser/path.rs
index 66ce015..79e7374 100644
--- a/compiler/rustc_parse/src/parser/path.rs
+++ b/compiler/rustc_parse/src/parser/path.rs
@@ -187,12 +187,14 @@
     pub(super) fn parse_path_segment(&mut self, style: PathStyle) -> PResult<'a, PathSegment> {
         let ident = self.parse_path_segment_ident()?;
 
-        let is_args_start = |token: &Token| match token.kind {
-            token::Lt
-            | token::BinOp(token::Shl)
-            | token::OpenDelim(token::Paren)
-            | token::LArrow => true,
-            _ => false,
+        let is_args_start = |token: &Token| {
+            matches!(
+                token.kind,
+                token::Lt
+                    | token::BinOp(token::Shl)
+                    | token::OpenDelim(token::Paren)
+                    | token::LArrow
+            )
         };
         let check_args_start = |this: &mut Self| {
             this.expected_tokens.extend_from_slice(&[
@@ -397,6 +399,13 @@
         while let Some(arg) = self.parse_angle_arg()? {
             args.push(arg);
             if !self.eat(&token::Comma) {
+                if !self.token.kind.should_end_const_arg() {
+                    if self.handle_ambiguous_unbraced_const_arg(&mut args)? {
+                        // We've managed to (partially) recover, so continue trying to parse
+                        // arguments.
+                        continue;
+                    }
+                }
                 break;
             }
         }
@@ -476,41 +485,50 @@
         Ok(self.mk_ty(span, ast::TyKind::Err))
     }
 
+    /// We do not permit arbitrary expressions as const arguments. They must be one of:
+    /// - An expression surrounded in `{}`.
+    /// - A literal.
+    /// - A numeric literal prefixed by `-`.
+    pub(super) fn expr_is_valid_const_arg(&self, expr: &P<rustc_ast::Expr>) -> bool {
+        match &expr.kind {
+            ast::ExprKind::Block(_, _) | ast::ExprKind::Lit(_) => true,
+            ast::ExprKind::Unary(ast::UnOp::Neg, expr) => match &expr.kind {
+                ast::ExprKind::Lit(_) => true,
+                _ => false,
+            },
+            _ => false,
+        }
+    }
+
     /// Parse a generic argument in a path segment.
     /// This does not include constraints, e.g., `Item = u8`, which is handled in `parse_angle_arg`.
     fn parse_generic_arg(&mut self) -> PResult<'a, Option<GenericArg>> {
+        let start = self.token.span;
         let arg = if self.check_lifetime() && self.look_ahead(1, |t| !t.is_like_plus()) {
             // Parse lifetime argument.
             GenericArg::Lifetime(self.expect_lifetime())
         } else if self.check_const_arg() {
             // Parse const argument.
-            let expr = if let token::OpenDelim(token::Brace) = self.token.kind {
+            let value = if let token::OpenDelim(token::Brace) = self.token.kind {
                 self.parse_block_expr(
                     None,
                     self.token.span,
                     BlockCheckMode::Default,
                     ast::AttrVec::new(),
                 )?
-            } else if self.token.is_ident() {
-                // FIXME(const_generics): to distinguish between idents for types and consts,
-                // we should introduce a GenericArg::Ident in the AST and distinguish when
-                // lowering to the HIR. For now, idents for const args are not permitted.
-                if self.token.is_bool_lit() {
-                    self.parse_literal_maybe_minus()?
-                } else {
-                    let span = self.token.span;
-                    let msg = "identifiers may currently not be used for const generics";
-                    self.struct_span_err(span, msg).emit();
-                    let block = self.mk_block_err(span);
-                    self.mk_expr(span, ast::ExprKind::Block(block, None), ast::AttrVec::new())
-                }
             } else {
-                self.parse_literal_maybe_minus()?
+                self.handle_unambiguous_unbraced_const_arg()?
             };
-            GenericArg::Const(AnonConst { id: ast::DUMMY_NODE_ID, value: expr })
+            GenericArg::Const(AnonConst { id: ast::DUMMY_NODE_ID, value })
         } else if self.check_type() {
             // Parse type argument.
-            GenericArg::Type(self.parse_ty()?)
+            match self.parse_ty() {
+                Ok(ty) => GenericArg::Type(ty),
+                Err(err) => {
+                    // Try to recover from possible `const` arg without braces.
+                    return self.recover_const_arg(start, err).map(Some);
+                }
+            }
         } else {
             return Ok(None);
         };
diff --git a/compiler/rustc_parse/src/parser/stmt.rs b/compiler/rustc_parse/src/parser/stmt.rs
index fd1c6b2..131ff1a 100644
--- a/compiler/rustc_parse/src/parser/stmt.rs
+++ b/compiler/rustc_parse/src/parser/stmt.rs
@@ -1,5 +1,5 @@
 use super::attr::DEFAULT_INNER_ATTR_FORBIDDEN;
-use super::diagnostics::Error;
+use super::diagnostics::{AttemptLocalParseRecovery, Error};
 use super::expr::LhsExpr;
 use super::pat::GateOr;
 use super::path::PathStyle;
@@ -79,8 +79,8 @@
             return self.parse_stmt_mac(lo, attrs.into(), path);
         }
 
-        let expr = if self.check(&token::OpenDelim(token::Brace)) {
-            self.parse_struct_expr(path, AttrVec::new())?
+        let expr = if self.eat(&token::OpenDelim(token::Brace)) {
+            self.parse_struct_expr(path, AttrVec::new(), true)?
         } else {
             let hi = self.prev_token.span;
             self.mk_expr(lo.to(hi), ExprKind::Path(None, path), AttrVec::new())
@@ -321,25 +321,37 @@
             return self.error_block_no_opening_brace();
         }
 
-        Ok((self.parse_inner_attributes()?, self.parse_block_tail(lo, blk_mode)?))
+        let attrs = self.parse_inner_attributes()?;
+        let tail = if let Some(tail) = self.maybe_suggest_struct_literal(lo, blk_mode) {
+            tail?
+        } else {
+            self.parse_block_tail(lo, blk_mode, AttemptLocalParseRecovery::Yes)?
+        };
+        Ok((attrs, tail))
     }
 
     /// Parses the rest of a block expression or function body.
     /// Precondition: already parsed the '{'.
-    fn parse_block_tail(&mut self, lo: Span, s: BlockCheckMode) -> PResult<'a, P<Block>> {
+    crate fn parse_block_tail(
+        &mut self,
+        lo: Span,
+        s: BlockCheckMode,
+        recover: AttemptLocalParseRecovery,
+    ) -> PResult<'a, P<Block>> {
         let mut stmts = vec![];
         while !self.eat(&token::CloseDelim(token::Brace)) {
             if self.token == token::Eof {
                 break;
             }
-            let stmt = match self.parse_full_stmt() {
-                Err(mut err) => {
+            let stmt = match self.parse_full_stmt(recover) {
+                Err(mut err) if recover.yes() => {
                     self.maybe_annotate_with_ascription(&mut err, false);
                     err.emit();
                     self.recover_stmt_(SemiColonMode::Ignore, BlockMode::Ignore);
                     Some(self.mk_stmt_err(self.token.span))
                 }
                 Ok(stmt) => stmt,
+                Err(err) => return Err(err),
             };
             if let Some(stmt) = stmt {
                 stmts.push(stmt);
@@ -352,7 +364,10 @@
     }
 
     /// Parses a statement, including the trailing semicolon.
-    pub fn parse_full_stmt(&mut self) -> PResult<'a, Option<Stmt>> {
+    pub fn parse_full_stmt(
+        &mut self,
+        recover: AttemptLocalParseRecovery,
+    ) -> PResult<'a, Option<Stmt>> {
         // Skip looking for a trailing semicolon when we have an interpolated statement.
         maybe_whole!(self, NtStmt, |x| Some(x));
 
@@ -391,6 +406,9 @@
                     if let Err(mut e) =
                         self.check_mistyped_turbofish_with_multiple_type_params(e, expr)
                     {
+                        if recover.no() {
+                            return Err(e);
+                        }
                         e.emit();
                         self.recover_stmt();
                     }
@@ -432,7 +450,7 @@
         Stmt { id: DUMMY_NODE_ID, kind, span, tokens: None }
     }
 
-    fn mk_stmt_err(&self, span: Span) -> Stmt {
+    pub(super) fn mk_stmt_err(&self, span: Span) -> Stmt {
         self.mk_stmt(span, StmtKind::Expr(self.mk_expr_err(span)))
     }
 
diff --git a/compiler/rustc_parse/src/parser/ty.rs b/compiler/rustc_parse/src/parser/ty.rs
index d42a786..7a6ebca 100644
--- a/compiler/rustc_parse/src/parser/ty.rs
+++ b/compiler/rustc_parse/src/parser/ty.rs
@@ -265,7 +265,19 @@
     /// Parses an array (`[TYPE; EXPR]`) or slice (`[TYPE]`) type.
     /// The opening `[` bracket is already eaten.
     fn parse_array_or_slice_ty(&mut self) -> PResult<'a, TyKind> {
-        let elt_ty = self.parse_ty()?;
+        let elt_ty = match self.parse_ty() {
+            Ok(ty) => ty,
+            Err(mut err)
+                if self.look_ahead(1, |t| t.kind == token::CloseDelim(token::Bracket))
+                    | self.look_ahead(1, |t| t.kind == token::Semi) =>
+            {
+                // Recover from `[LIT; EXPR]` and `[LIT]`
+                self.bump();
+                err.emit();
+                self.mk_ty(self.prev_token.span, TyKind::Err)
+            }
+            Err(err) => return Err(err),
+        };
         let ty = if self.eat(&token::Semi) {
             TyKind::Array(elt_ty, self.parse_anon_const_expr()?)
         } else {
diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs
index b52216c..7679582 100644
--- a/compiler/rustc_passes/src/check_attr.rs
+++ b/compiler/rustc_passes/src/check_attr.rs
@@ -13,12 +13,14 @@
 use rustc_hir as hir;
 use rustc_hir::def_id::LocalDefId;
 use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
-use rustc_hir::{self, FnSig, ForeignItem, ForeignItemKind, HirId, Item, ItemKind, TraitItem};
+use rustc_hir::{
+    self, FnSig, ForeignItem, ForeignItemKind, HirId, Item, ItemKind, TraitItem, CRATE_HIR_ID,
+};
 use rustc_hir::{MethodKind, Target};
 use rustc_session::lint::builtin::{CONFLICTING_REPR_HINTS, UNUSED_ATTRIBUTES};
 use rustc_session::parse::feature_err;
-use rustc_span::symbol::sym;
-use rustc_span::Span;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{Span, DUMMY_SP};
 
 pub(crate) fn target_from_impl_item<'tcx>(
     tcx: TyCtxt<'tcx>,
@@ -83,6 +85,10 @@
                 self.check_export_name(&attr, span, target)
             } else if self.tcx.sess.check_name(attr, sym::rustc_args_required_const) {
                 self.check_rustc_args_required_const(&attr, span, target, item)
+            } else if self.tcx.sess.check_name(attr, sym::allow_internal_unstable) {
+                self.check_allow_internal_unstable(&attr, span, target, &attrs)
+            } else if self.tcx.sess.check_name(attr, sym::rustc_allow_const_fn_unstable) {
+                self.check_rustc_allow_const_fn_unstable(hir_id, &attr, span, target)
             } else {
                 // lint-only checks
                 if self.tcx.sess.check_name(attr, sym::cold) {
@@ -102,7 +108,7 @@
             return;
         }
 
-        if matches!(target, Target::Fn | Target::Method(_) | Target::ForeignFn) {
+        if matches!(target, Target::Closure | Target::Fn | Target::Method(_) | Target::ForeignFn) {
             self.tcx.ensure().codegen_fn_attrs(self.tcx.hir().local_def_id(hir_id));
         }
 
@@ -193,7 +199,7 @@
     /// Checks if the `#[non_exhaustive]` attribute on an `item` is valid. Returns `true` if valid.
     fn check_non_exhaustive(&self, attr: &Attribute, span: &Span, target: Target) -> bool {
         match target {
-            Target::Struct | Target::Enum => true,
+            Target::Struct | Target::Enum | Target::Variant => true,
             _ => {
                 struct_span_err!(
                     self.tcx.sess,
@@ -285,8 +291,9 @@
                             self.doc_alias_str_error(meta);
                             return false;
                         }
-                        if let Some(c) =
-                            doc_alias.chars().find(|&c| c == '"' || c == '\'' || c.is_whitespace())
+                        if let Some(c) = doc_alias
+                            .chars()
+                            .find(|&c| c == '"' || c == '\'' || (c.is_whitespace() && c != ' '))
                         {
                             self.tcx
                                 .sess
@@ -300,6 +307,16 @@
                                 .emit();
                             return false;
                         }
+                        if doc_alias.starts_with(' ') || doc_alias.ends_with(' ') {
+                            self.tcx
+                                .sess
+                                .struct_span_err(
+                                    meta.span(),
+                                    "`#[doc(alias = \"...\")]` cannot start or end with ' '",
+                                )
+                                .emit();
+                            return false;
+                        }
                         if let Some(err) = match target {
                             Target::Impl => Some("implementation block"),
                             Target::ForeignMod => Some("extern block"),
@@ -333,6 +350,17 @@
                                 .emit();
                             return false;
                         }
+                        if CRATE_HIR_ID == hir_id {
+                            self.tcx
+                                .sess
+                                .struct_span_err(
+                                    meta.span(),
+                                    "`#![doc(alias = \"...\")]` isn't allowed as a crate \
+                                     level attribute",
+                                )
+                                .emit();
+                            return false;
+                        }
                     }
                 }
             }
@@ -563,6 +591,9 @@
 
         for hint in &hints {
             let (article, allowed_targets) = match hint.name_or_empty() {
+                _ if !matches!(target, Target::Struct | Target::Enum | Target::Union) => {
+                    ("a", "struct, enum, or union")
+                }
                 name @ sym::C | name @ sym::align => {
                     is_c |= name == sym::C;
                     match target {
@@ -628,12 +659,16 @@
                 }
                 _ => continue,
             };
-            self.emit_repr_error(
+
+            struct_span_err!(
+                self.tcx.sess,
                 hint.span(),
-                *span,
-                &format!("attribute should be applied to {}", allowed_targets),
-                &format!("not {} {}", article, allowed_targets),
+                E0517,
+                "{}",
+                &format!("attribute should be applied to {} {}", article, allowed_targets)
             )
+            .span_label(*span, &format!("not {} {}", article, allowed_targets))
+            .emit();
         }
 
         // Just point at all repr hints if there are any incompatibilities.
@@ -679,56 +714,6 @@
         }
     }
 
-    fn emit_repr_error(
-        &self,
-        hint_span: Span,
-        label_span: Span,
-        hint_message: &str,
-        label_message: &str,
-    ) {
-        struct_span_err!(self.tcx.sess, hint_span, E0517, "{}", hint_message)
-            .span_label(label_span, label_message)
-            .emit();
-    }
-
-    fn check_stmt_attributes(&self, stmt: &hir::Stmt<'_>) {
-        // When checking statements ignore expressions, they will be checked later
-        if let hir::StmtKind::Local(ref l) = stmt.kind {
-            self.check_attributes(l.hir_id, &l.attrs, &stmt.span, Target::Statement, None);
-            for attr in l.attrs.iter() {
-                if self.tcx.sess.check_name(attr, sym::repr) {
-                    self.emit_repr_error(
-                        attr.span,
-                        stmt.span,
-                        "attribute should not be applied to a statement",
-                        "not a struct, enum, or union",
-                    );
-                }
-            }
-        }
-    }
-
-    fn check_expr_attributes(&self, expr: &hir::Expr<'_>) {
-        let target = match expr.kind {
-            hir::ExprKind::Closure(..) => Target::Closure,
-            _ => Target::Expression,
-        };
-        self.check_attributes(expr.hir_id, &expr.attrs, &expr.span, target, None);
-        for attr in expr.attrs.iter() {
-            if self.tcx.sess.check_name(attr, sym::repr) {
-                self.emit_repr_error(
-                    attr.span,
-                    expr.span,
-                    "attribute should not be applied to an expression",
-                    "not defining a struct, enum, or union",
-                );
-            }
-        }
-        if target == Target::Closure {
-            self.tcx.ensure().codegen_fn_attrs(self.tcx.hir().local_def_id(expr.hir_id));
-        }
-    }
-
     fn check_used(&self, attrs: &'hir [Attribute], target: Target) {
         for attr in attrs {
             if self.tcx.sess.check_name(attr, sym::used) && target != Target::Static {
@@ -738,6 +723,55 @@
             }
         }
     }
+
+    /// Outputs an error for `#[allow_internal_unstable]` which can only be applied to macros.
+    /// (Allows proc_macro functions)
+    fn check_allow_internal_unstable(
+        &self,
+        attr: &Attribute,
+        span: &Span,
+        target: Target,
+        attrs: &[Attribute],
+    ) -> bool {
+        debug!("Checking target: {:?}", target);
+        if target == Target::Fn {
+            for attr in attrs {
+                if self.tcx.sess.is_proc_macro_attr(attr) {
+                    debug!("Is proc macro attr");
+                    return true;
+                }
+            }
+            debug!("Is not proc macro attr");
+        }
+        self.tcx
+            .sess
+            .struct_span_err(attr.span, "attribute should be applied to a macro")
+            .span_label(*span, "not a macro")
+            .emit();
+        false
+    }
+
+    /// Outputs an error for `#[allow_internal_unstable]` which can only be applied to macros.
+    /// (Allows proc_macro functions)
+    fn check_rustc_allow_const_fn_unstable(
+        &self,
+        hir_id: HirId,
+        attr: &Attribute,
+        span: &Span,
+        target: Target,
+    ) -> bool {
+        if let Target::Fn | Target::Method(_) = target {
+            if self.tcx.is_const_fn_raw(self.tcx.hir().local_def_id(hir_id)) {
+                return true;
+            }
+        }
+        self.tcx
+            .sess
+            .struct_span_err(attr.span, "attribute should be applied to `const fn`")
+            .span_label(*span, "not a `const fn`")
+            .emit();
+        false
+    }
 }
 
 impl Visitor<'tcx> for CheckAttrVisitor<'tcx> {
@@ -784,14 +818,32 @@
     }
 
     fn visit_stmt(&mut self, stmt: &'tcx hir::Stmt<'tcx>) {
-        self.check_stmt_attributes(stmt);
+        // When checking statements ignore expressions, they will be checked later.
+        if let hir::StmtKind::Local(ref l) = stmt.kind {
+            self.check_attributes(l.hir_id, &l.attrs, &stmt.span, Target::Statement, None);
+        }
         intravisit::walk_stmt(self, stmt)
     }
 
     fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
-        self.check_expr_attributes(expr);
+        let target = match expr.kind {
+            hir::ExprKind::Closure(..) => Target::Closure,
+            _ => Target::Expression,
+        };
+
+        self.check_attributes(expr.hir_id, &expr.attrs, &expr.span, target, None);
         intravisit::walk_expr(self, expr)
     }
+
+    fn visit_variant(
+        &mut self,
+        variant: &'tcx hir::Variant<'tcx>,
+        generics: &'tcx hir::Generics<'tcx>,
+        item_id: HirId,
+    ) {
+        self.check_attributes(variant.id, variant.attrs, &variant.span, Target::Variant, None);
+        intravisit::walk_variant(self, variant, generics, item_id)
+    }
 }
 
 fn is_c_like_enum(item: &Item<'_>) -> bool {
@@ -808,9 +860,46 @@
     }
 }
 
+fn check_invalid_crate_level_attr(tcx: TyCtxt<'_>, attrs: &[Attribute]) {
+    const ATTRS_TO_CHECK: &[Symbol] = &[
+        sym::macro_export,
+        sym::repr,
+        sym::path,
+        sym::automatically_derived,
+        sym::start,
+        sym::main,
+    ];
+
+    for attr in attrs {
+        for attr_to_check in ATTRS_TO_CHECK {
+            if tcx.sess.check_name(attr, *attr_to_check) {
+                tcx.sess
+                    .struct_span_err(
+                        attr.span,
+                        &format!(
+                            "`{}` attribute cannot be used at crate level",
+                            attr_to_check.to_ident_string()
+                        ),
+                    )
+                    .emit();
+            }
+        }
+    }
+}
+
 fn check_mod_attrs(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
     tcx.hir()
         .visit_item_likes_in_module(module_def_id, &mut CheckAttrVisitor { tcx }.as_deep_visitor());
+    if module_def_id.is_top_level_module() {
+        CheckAttrVisitor { tcx }.check_attributes(
+            CRATE_HIR_ID,
+            tcx.hir().krate_attrs(),
+            &DUMMY_SP,
+            Target::Mod,
+            None,
+        );
+        check_invalid_crate_level_attr(tcx, tcx.hir().krate_attrs());
+    }
 }
 
 pub(crate) fn provide(providers: &mut Providers) {
diff --git a/compiler/rustc_passes/src/check_const.rs b/compiler/rustc_passes/src/check_const.rs
index dd0bcbf..b24c62b 100644
--- a/compiler/rustc_passes/src/check_const.rs
+++ b/compiler/rustc_passes/src/check_const.rs
@@ -87,7 +87,7 @@
 
         let is_feature_allowed = |feature_gate| {
             // All features require that the corresponding gate be enabled,
-            // even if the function has `#[allow_internal_unstable(the_gate)]`.
+            // even if the function has `#[rustc_allow_const_fn_unstable(the_gate)]`.
             if !tcx.features().enabled(feature_gate) {
                 return false;
             }
@@ -105,8 +105,8 @@
             }
 
             // However, we cannot allow stable `const fn`s to use unstable features without an explicit
-            // opt-in via `allow_internal_unstable`.
-            attr::allow_internal_unstable(&tcx.sess, &tcx.get_attrs(def_id))
+            // opt-in via `rustc_allow_const_fn_unstable`.
+            attr::rustc_allow_const_fn_unstable(&tcx.sess, &tcx.get_attrs(def_id))
                 .map_or(false, |mut features| features.any(|name| name == feature_gate))
         };
 
diff --git a/compiler/rustc_passes/src/dead.rs b/compiler/rustc_passes/src/dead.rs
index 98ded41..f567dd8 100644
--- a/compiler/rustc_passes/src/dead.rs
+++ b/compiler/rustc_passes/src/dead.rs
@@ -458,8 +458,8 @@
         .map
         .iter()
         .filter_map(
-            |(&id, level)| {
-                if level >= &privacy::AccessLevel::Reachable { Some(id) } else { None }
+            |(&id, &level)| {
+                if level >= privacy::AccessLevel::Reachable { Some(id) } else { None }
             },
         )
         .chain(
@@ -547,7 +547,7 @@
         let def_id = self.tcx.hir().local_def_id(id);
         let inherent_impls = self.tcx.inherent_impls(def_id);
         for &impl_did in inherent_impls.iter() {
-            for &item_did in &self.tcx.associated_item_def_ids(impl_did)[..] {
+            for item_did in self.tcx.associated_item_def_ids(impl_did) {
                 if let Some(did) = item_did.as_local() {
                     let item_hir_id = self.tcx.hir().local_def_id_to_hir_id(did);
                     if self.live_symbols.contains(&item_hir_id) {
diff --git a/compiler/rustc_passes/src/diagnostic_items.rs b/compiler/rustc_passes/src/diagnostic_items.rs
index 9459293..0f4aa72 100644
--- a/compiler/rustc_passes/src/diagnostic_items.rs
+++ b/compiler/rustc_passes/src/diagnostic_items.rs
@@ -102,7 +102,7 @@
     tcx.hir().krate().visit_all_item_likes(&mut collector);
     // FIXME(visit_all_item_likes): Foreign items are not visited
     // here, so we have to manually look at them for now.
-    for foreign_module in tcx.foreign_modules(LOCAL_CRATE) {
+    for (_, foreign_module) in tcx.foreign_modules(LOCAL_CRATE).iter() {
         for &foreign_item in foreign_module.foreign_items.iter() {
             match tcx.hir().get(tcx.hir().local_def_id_to_hir_id(foreign_item.expect_local())) {
                 hir::Node::ForeignItem(item) => {
diff --git a/compiler/rustc_passes/src/entry.rs b/compiler/rustc_passes/src/entry.rs
index 8aa6e79..e87adb37 100644
--- a/compiler/rustc_passes/src/entry.rs
+++ b/compiler/rustc_passes/src/entry.rs
@@ -78,29 +78,38 @@
 // Beware, this is duplicated in `librustc_builtin_macros/test_harness.rs`
 // (with `ast::Item`), so make sure to keep them in sync.
 fn entry_point_type(sess: &Session, item: &Item<'_>, at_root: bool) -> EntryPointType {
-    match item.kind {
-        ItemKind::Fn(..) => {
-            if sess.contains_name(&item.attrs, sym::start) {
-                EntryPointType::Start
-            } else if sess.contains_name(&item.attrs, sym::main) {
-                EntryPointType::MainAttr
-            } else if item.ident.name == sym::main {
-                if at_root {
-                    // This is a top-level function so can be `main`.
-                    EntryPointType::MainNamed
-                } else {
-                    EntryPointType::OtherMain
-                }
-            } else {
-                EntryPointType::None
-            }
+    if sess.contains_name(&item.attrs, sym::start) {
+        EntryPointType::Start
+    } else if sess.contains_name(&item.attrs, sym::main) {
+        EntryPointType::MainAttr
+    } else if item.ident.name == sym::main {
+        if at_root {
+            // This is a top-level function so can be `main`.
+            EntryPointType::MainNamed
+        } else {
+            EntryPointType::OtherMain
         }
-        _ => EntryPointType::None,
+    } else {
+        EntryPointType::None
     }
 }
 
+fn throw_attr_err(sess: &Session, span: Span, attr: &str) {
+    sess.struct_span_err(span, &format!("`{}` attribute can only be used on functions", attr))
+        .emit();
+}
+
 fn find_item(item: &Item<'_>, ctxt: &mut EntryContext<'_, '_>, at_root: bool) {
     match entry_point_type(&ctxt.session, item, at_root) {
+        EntryPointType::None => (),
+        _ if !matches!(item.kind, ItemKind::Fn(..)) => {
+            if let Some(attr) = ctxt.session.find_by_name(item.attrs, sym::start) {
+                throw_attr_err(&ctxt.session, attr.span, "start");
+            }
+            if let Some(attr) = ctxt.session.find_by_name(item.attrs, sym::main) {
+                throw_attr_err(&ctxt.session, attr.span, "main");
+            }
+        }
         EntryPointType::MainNamed => {
             if ctxt.main_fn.is_none() {
                 ctxt.main_fn = Some((item.hir_id, item.span));
@@ -137,7 +146,6 @@
                     .emit();
             }
         }
-        EntryPointType::None => (),
     }
 }
 
diff --git a/compiler/rustc_passes/src/hir_id_validator.rs b/compiler/rustc_passes/src/hir_id_validator.rs
index 24695f5..6d1a5fc 100644
--- a/compiler/rustc_passes/src/hir_id_validator.rs
+++ b/compiler/rustc_passes/src/hir_id_validator.rs
@@ -163,4 +163,17 @@
         // we are currently in. So for those it's correct that they have a
         // different owner.
     }
+
+    fn visit_generic_param(&mut self, param: &'hir hir::GenericParam<'hir>) {
+        if let hir::GenericParamKind::Type {
+            synthetic: Some(hir::SyntheticTyParamKind::ImplTrait),
+            ..
+        } = param.kind
+        {
+            // Synthetic impl trait parameters are owned by the node of the desugared type.
+            // This means it is correct for them to have a different owner.
+        } else {
+            intravisit::walk_generic_param(self, param);
+        }
+    }
 }
diff --git a/compiler/rustc_passes/src/hir_stats.rs b/compiler/rustc_passes/src/hir_stats.rs
index 9537321..1d02c9a 100644
--- a/compiler/rustc_passes/src/hir_stats.rs
+++ b/compiler/rustc_passes/src/hir_stats.rs
@@ -336,8 +336,9 @@
         ast_visit::walk_lifetime(self, lifetime)
     }
 
-    fn visit_mac(&mut self, mac: &'v ast::MacCall) {
+    fn visit_mac_call(&mut self, mac: &'v ast::MacCall) {
         self.record("MacCall", Id::None, mac);
+        ast_visit::walk_mac(self, mac)
     }
 
     fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v ast::PathSegment) {
diff --git a/compiler/rustc_passes/src/intrinsicck.rs b/compiler/rustc_passes/src/intrinsicck.rs
index 79f1c2b..956be92 100644
--- a/compiler/rustc_passes/src/intrinsicck.rs
+++ b/compiler/rustc_passes/src/intrinsicck.rs
@@ -143,7 +143,7 @@
     ) -> Option<InlineAsmType> {
         // Check the type against the allowed types for inline asm.
         let ty = self.typeck_results.expr_ty_adjusted(expr);
-        let asm_ty_isize = match self.tcx.sess.target.ptr_width {
+        let asm_ty_isize = match self.tcx.sess.target.pointer_width {
             16 => InlineAsmType::I16,
             32 => InlineAsmType::I32,
             64 => InlineAsmType::I64,
@@ -184,7 +184,7 @@
                         Some(InlineAsmType::VecI128(fields.len() as u64))
                     }
                     ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => {
-                        Some(match self.tcx.sess.target.ptr_width {
+                        Some(match self.tcx.sess.target.pointer_width {
                             16 => InlineAsmType::VecI16(fields.len() as u64),
                             32 => InlineAsmType::VecI32(fields.len() as u64),
                             64 => InlineAsmType::VecI64(fields.len() as u64),
diff --git a/compiler/rustc_passes/src/liveness.rs b/compiler/rustc_passes/src/liveness.rs
index e8b97d7..7288015 100644
--- a/compiler/rustc_passes/src/liveness.rs
+++ b/compiler/rustc_passes/src/liveness.rs
@@ -432,6 +432,7 @@
             | hir::ExprKind::Break(..)
             | hir::ExprKind::Continue(_)
             | hir::ExprKind::Lit(_)
+            | hir::ExprKind::ConstBlock(..)
             | hir::ExprKind::Ret(..)
             | hir::ExprKind::Block(..)
             | hir::ExprKind::Assign(..)
@@ -1173,7 +1174,7 @@
                             }
                         }
                         hir::InlineAsmOperand::InOut { expr, .. } => {
-                            succ = self.write_place(expr, succ, ACC_READ | ACC_WRITE);
+                            succ = self.write_place(expr, succ, ACC_READ | ACC_WRITE | ACC_USE);
                         }
                         hir::InlineAsmOperand::SplitInOut { out_expr, .. } => {
                             if let Some(expr) = out_expr {
@@ -1232,6 +1233,7 @@
             }
 
             hir::ExprKind::Lit(..)
+            | hir::ExprKind::ConstBlock(..)
             | hir::ExprKind::Err
             | hir::ExprKind::Path(hir::QPath::TypeRelative(..))
             | hir::ExprKind::Path(hir::QPath::LangItem(..)) => succ,
@@ -1478,6 +1480,7 @@
         | hir::ExprKind::Break(..)
         | hir::ExprKind::Continue(..)
         | hir::ExprKind::Lit(_)
+        | hir::ExprKind::ConstBlock(..)
         | hir::ExprKind::Block(..)
         | hir::ExprKind::AddrOf(..)
         | hir::ExprKind::Struct(..)
diff --git a/compiler/rustc_passes/src/stability.rs b/compiler/rustc_passes/src/stability.rs
index 1378b0d..04b5c65 100644
--- a/compiler/rustc_passes/src/stability.rs
+++ b/compiler/rustc_passes/src/stability.rs
@@ -13,15 +13,13 @@
 use rustc_middle::hir::map::Map;
 use rustc_middle::middle::privacy::AccessLevels;
 use rustc_middle::middle::stability::{DeprecationEntry, Index};
-use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::{self, query::Providers, TyCtxt};
 use rustc_session::lint;
-use rustc_session::lint::builtin::INEFFECTIVE_UNSTABLE_TRAIT_IMPL;
+use rustc_session::lint::builtin::{INEFFECTIVE_UNSTABLE_TRAIT_IMPL, USELESS_DEPRECATED};
 use rustc_session::parse::feature_err;
 use rustc_session::Session;
 use rustc_span::symbol::{sym, Symbol};
-use rustc_span::Span;
-use rustc_trait_selection::traits::misc::can_type_implement_copy;
+use rustc_span::{Span, DUMMY_SP};
 
 use std::cmp::Ordering;
 use std::mem::replace;
@@ -33,6 +31,8 @@
     Required,
     // Annotation is useless, reject it
     Prohibited,
+    // Deprecation annotation is useless, reject it. (Stability attribute is still required.)
+    DeprecationProhibited,
     // Annotation itself is useless, but it can be propagated to children
     Container,
 }
@@ -85,14 +85,22 @@
             did_error = self.forbid_staged_api_attrs(hir_id, attrs, inherit_deprecation.clone());
         }
 
-        let depr =
-            if did_error { None } else { attr::find_deprecation(&self.tcx.sess, attrs, item_sp) };
+        let depr = if did_error { None } else { attr::find_deprecation(&self.tcx.sess, attrs) };
         let mut is_deprecated = false;
-        if let Some(depr) = &depr {
+        if let Some((depr, span)) = &depr {
             is_deprecated = true;
 
-            if kind == AnnotationKind::Prohibited {
-                self.tcx.sess.span_err(item_sp, "This deprecation annotation is useless");
+            if kind == AnnotationKind::Prohibited || kind == AnnotationKind::DeprecationProhibited {
+                self.tcx.struct_span_lint_hir(USELESS_DEPRECATED, hir_id, *span, |lint| {
+                    lint.build("this `#[deprecated]` annotation has no effect")
+                        .span_suggestion_short(
+                            *span,
+                            "remove the unnecessary deprecation attribute",
+                            String::new(),
+                            rustc_errors::Applicability::MachineApplicable,
+                        )
+                        .emit()
+                });
             }
 
             // `Deprecation` is just two pointers, no need to intern it
@@ -116,7 +124,7 @@
             }
         } else {
             self.recurse_with_stability_attrs(
-                depr.map(|d| DeprecationEntry::local(d, hir_id)),
+                depr.map(|(d, _)| DeprecationEntry::local(d, hir_id)),
                 None,
                 None,
                 visit_children,
@@ -141,11 +149,11 @@
             }
         }
 
-        if depr.as_ref().map_or(false, |d| d.is_since_rustc_version) {
+        if let Some((rustc_attr::Deprecation { is_since_rustc_version: true, .. }, span)) = &depr {
             if stab.is_none() {
                 struct_span_err!(
                     self.tcx.sess,
-                    item_sp,
+                    *span,
                     E0549,
                     "rustc_deprecated attribute must be paired with \
                     either stable or unstable attribute"
@@ -168,7 +176,7 @@
             // Check if deprecated_since < stable_since. If it is,
             // this is *almost surely* an accident.
             if let (&Some(dep_since), &attr::Stable { since: stab_since }) =
-                (&depr.as_ref().and_then(|d| d.since), &stab.level)
+                (&depr.as_ref().and_then(|(d, _)| d.since), &stab.level)
             {
                 // Explicit version of iter::order::lt to handle parse errors properly
                 for (dep_v, stab_v) in
@@ -214,7 +222,7 @@
         }
 
         self.recurse_with_stability_attrs(
-            depr.map(|d| DeprecationEntry::local(d, hir_id)),
+            depr.map(|(d, _)| DeprecationEntry::local(d, hir_id)),
             stab,
             const_stab,
             visit_children,
@@ -324,6 +332,7 @@
             }
             hir::ItemKind::Impl { of_trait: Some(_), .. } => {
                 self.in_trait_impl = true;
+                kind = AnnotationKind::DeprecationProhibited;
             }
             hir::ItemKind::Struct(ref sd, _) => {
                 if let Some(ctor_hir_id) = sd.ctor_hir_id() {
@@ -711,27 +720,35 @@
             // so semi-randomly perform it here in stability.rs
             hir::ItemKind::Union(..) if !self.tcx.features().untagged_unions => {
                 let def_id = self.tcx.hir().local_def_id(item.hir_id);
-                let adt_def = self.tcx.adt_def(def_id);
                 let ty = self.tcx.type_of(def_id);
+                let (adt_def, substs) = match ty.kind() {
+                    ty::Adt(adt_def, substs) => (adt_def, substs),
+                    _ => bug!(),
+                };
 
-                if adt_def.has_dtor(self.tcx) {
-                    feature_err(
-                        &self.tcx.sess.parse_sess,
-                        sym::untagged_unions,
-                        item.span,
-                        "unions with `Drop` implementations are unstable",
-                    )
-                    .emit();
-                } else {
-                    let param_env = self.tcx.param_env(def_id);
-                    if can_type_implement_copy(self.tcx, param_env, ty).is_err() {
-                        feature_err(
-                            &self.tcx.sess.parse_sess,
-                            sym::untagged_unions,
-                            item.span,
-                            "unions with non-`Copy` fields are unstable",
-                        )
-                        .emit();
+                // Non-`Copy` fields are unstable, except for `ManuallyDrop`.
+                let param_env = self.tcx.param_env(def_id);
+                for field in &adt_def.non_enum_variant().fields {
+                    let field_ty = field.ty(self.tcx, substs);
+                    if !field_ty.ty_adt_def().map_or(false, |adt_def| adt_def.is_manually_drop())
+                        && !field_ty.is_copy_modulo_regions(self.tcx.at(DUMMY_SP), param_env)
+                    {
+                        if field_ty.needs_drop(self.tcx, param_env) {
+                            // Avoid duplicate error: This will error later anyway because fields
+                            // that need drop are not allowed.
+                            self.tcx.sess.delay_span_bug(
+                                item.span,
+                                "union should have been rejected due to potentially dropping field",
+                            );
+                        } else {
+                            feature_err(
+                                &self.tcx.sess.parse_sess,
+                                sym::untagged_unions,
+                                self.tcx.def_span(field.did),
+                                "unions with non-`Copy` fields other than `ManuallyDrop<T>` are unstable",
+                            )
+                            .emit();
+                        }
                     }
                 }
             }
diff --git a/compiler/rustc_passes/src/weak_lang_items.rs b/compiler/rustc_passes/src/weak_lang_items.rs
index 6bc2110..4273d60 100644
--- a/compiler/rustc_passes/src/weak_lang_items.rs
+++ b/compiler/rustc_passes/src/weak_lang_items.rs
@@ -26,7 +26,7 @@
     if items.eh_personality().is_none() {
         items.missing.push(LangItem::EhPersonality);
     }
-    if tcx.sess.target.target.options.is_like_emscripten && items.eh_catch_typeinfo().is_none() {
+    if tcx.sess.target.is_like_emscripten && items.eh_catch_typeinfo().is_none() {
         items.missing.push(LangItem::EhCatchTypeinfo);
     }
 
@@ -64,7 +64,10 @@
             if item == LangItem::PanicImpl {
                 tcx.sess.err("`#[panic_handler]` function required, but not found");
             } else if item == LangItem::Oom {
-                tcx.sess.err("`#[alloc_error_handler]` function required, but not found");
+                if !tcx.features().default_alloc_error_handler {
+                    tcx.sess.err("`#[alloc_error_handler]` function required, but not found.");
+                    tcx.sess.note_without_error("Use `#![feature(default_alloc_error_handler)]` for a default error handler.");
+                }
             } else {
                 tcx.sess.err(&format!("language item required, but not found: `{}`", name));
             }
diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs
index 8d1b826..75d7543 100644
--- a/compiler/rustc_privacy/src/lib.rs
+++ b/compiler/rustc_privacy/src/lib.rs
@@ -1,6 +1,9 @@
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
 #![feature(in_band_lifetimes)]
 #![feature(nll)]
+#![feature(or_patterns)]
+#![feature(control_flow_enum)]
+#![feature(try_blocks)]
 #![recursion_limit = "256"]
 
 use rustc_attr as attr;
@@ -14,16 +17,18 @@
 use rustc_middle::bug;
 use rustc_middle::hir::map::Map;
 use rustc_middle::middle::privacy::{AccessLevel, AccessLevels};
+use rustc_middle::span_bug;
 use rustc_middle::ty::fold::TypeVisitor;
 use rustc_middle::ty::query::Providers;
 use rustc_middle::ty::subst::InternalSubsts;
 use rustc_middle::ty::{self, GenericParamDefKind, TraitRef, Ty, TyCtxt, TypeFoldable};
 use rustc_session::lint;
 use rustc_span::hygiene::Transparency;
-use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::symbol::{kw, Ident};
 use rustc_span::Span;
 
 use std::marker::PhantomData;
+use std::ops::ControlFlow;
 use std::{cmp, fmt, mem};
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -33,7 +38,7 @@
 /// Implemented to visit all `DefId`s in a type.
 /// Visiting `DefId`s is useful because visibilities and reachabilities are attached to them.
 /// The idea is to visit "all components of a type", as documented in
-/// https://github.com/rust-lang/rfcs/blob/master/text/2145-type-privacy.md#how-to-determine-visibility-of-a-type.
+/// <https://github.com/rust-lang/rfcs/blob/master/text/2145-type-privacy.md#how-to-determine-visibility-of-a-type>.
 /// The default type visitor (`TypeVisitor`) does most of the job, but it has some shortcomings.
 /// First, it doesn't have overridable `fn visit_trait_ref`, so we have to catch trait `DefId`s
 /// manually. Second, it doesn't visit some type components like signatures of fn types, or traits
@@ -46,7 +51,12 @@
     fn skip_assoc_tys(&self) -> bool {
         false
     }
-    fn visit_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool;
+    fn visit_def_id(
+        &mut self,
+        def_id: DefId,
+        kind: &str,
+        descr: &dyn fmt::Display,
+    ) -> ControlFlow<()>;
 
     /// Not overridden, but used to actually visit types and traits.
     fn skeleton(&mut self) -> DefIdVisitorSkeleton<'_, 'tcx, Self> {
@@ -56,13 +66,13 @@
             dummy: Default::default(),
         }
     }
-    fn visit(&mut self, ty_fragment: impl TypeFoldable<'tcx>) -> bool {
+    fn visit(&mut self, ty_fragment: impl TypeFoldable<'tcx>) -> ControlFlow<()> {
         ty_fragment.visit_with(&mut self.skeleton())
     }
-    fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> bool {
+    fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> ControlFlow<()> {
         self.skeleton().visit_trait(trait_ref)
     }
-    fn visit_predicates(&mut self, predicates: ty::GenericPredicates<'tcx>) -> bool {
+    fn visit_predicates(&mut self, predicates: ty::GenericPredicates<'tcx>) -> ControlFlow<()> {
         self.skeleton().visit_predicates(predicates)
     }
 }
@@ -77,25 +87,25 @@
 where
     V: DefIdVisitor<'tcx> + ?Sized,
 {
-    fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> bool {
+    fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> ControlFlow<()> {
         let TraitRef { def_id, substs } = trait_ref;
-        self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref.print_only_trait_path())
-            || (!self.def_id_visitor.shallow() && substs.visit_with(self))
+        self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref.print_only_trait_path())?;
+        if self.def_id_visitor.shallow() { ControlFlow::CONTINUE } else { substs.visit_with(self) }
     }
 
-    fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> bool {
+    fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<()> {
         match predicate.skip_binders() {
             ty::PredicateAtom::Trait(ty::TraitPredicate { trait_ref }, _) => {
                 self.visit_trait(trait_ref)
             }
             ty::PredicateAtom::Projection(ty::ProjectionPredicate { projection_ty, ty }) => {
-                ty.visit_with(self)
-                    || self.visit_trait(projection_ty.trait_ref(self.def_id_visitor.tcx()))
+                ty.visit_with(self)?;
+                self.visit_trait(projection_ty.trait_ref(self.def_id_visitor.tcx()))
             }
             ty::PredicateAtom::TypeOutlives(ty::OutlivesPredicate(ty, _region)) => {
                 ty.visit_with(self)
             }
-            ty::PredicateAtom::RegionOutlives(..) => false,
+            ty::PredicateAtom::RegionOutlives(..) => ControlFlow::CONTINUE,
             ty::PredicateAtom::ConstEvaluatable(..)
                 if self.def_id_visitor.tcx().features().const_evaluatable_checked =>
             {
@@ -103,20 +113,15 @@
                 // private function we may have to do something here...
                 //
                 // For now, let's just pretend that everything is fine.
-                false
+                ControlFlow::CONTINUE
             }
             _ => bug!("unexpected predicate: {:?}", predicate),
         }
     }
 
-    fn visit_predicates(&mut self, predicates: ty::GenericPredicates<'tcx>) -> bool {
+    fn visit_predicates(&mut self, predicates: ty::GenericPredicates<'tcx>) -> ControlFlow<()> {
         let ty::GenericPredicates { parent: _, predicates } = predicates;
-        for &(predicate, _span) in predicates {
-            if self.visit_predicate(predicate) {
-                return true;
-            }
-        }
-        false
+        predicates.iter().try_for_each(|&(predicate, _span)| self.visit_predicate(predicate))
     }
 }
 
@@ -124,7 +129,7 @@
 where
     V: DefIdVisitor<'tcx> + ?Sized,
 {
-    fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> {
         let tcx = self.def_id_visitor.tcx();
         // InternalSubsts are not visited here because they are visited below in `super_visit_with`.
         match *ty.kind() {
@@ -133,19 +138,15 @@
             | ty::FnDef(def_id, ..)
             | ty::Closure(def_id, ..)
             | ty::Generator(def_id, ..) => {
-                if self.def_id_visitor.visit_def_id(def_id, "type", &ty) {
-                    return true;
-                }
+                self.def_id_visitor.visit_def_id(def_id, "type", &ty)?;
                 if self.def_id_visitor.shallow() {
-                    return false;
+                    return ControlFlow::CONTINUE;
                 }
                 // Default type visitor doesn't visit signatures of fn types.
                 // Something like `fn() -> Priv {my_func}` is considered a private type even if
                 // `my_func` is public, so we need to visit signatures.
                 if let ty::FnDef(..) = ty.kind() {
-                    if tcx.fn_sig(def_id).visit_with(self) {
-                        return true;
-                    }
+                    tcx.fn_sig(def_id).visit_with(self)?;
                 }
                 // Inherent static methods don't have self type in substs.
                 // Something like `fn() {my_method}` type of the method
@@ -153,9 +154,7 @@
                 // so we need to visit the self type additionally.
                 if let Some(assoc_item) = tcx.opt_associated_item(def_id) {
                     if let ty::ImplContainer(impl_def_id) = assoc_item.container {
-                        if tcx.type_of(impl_def_id).visit_with(self) {
-                            return true;
-                        }
+                        tcx.type_of(impl_def_id).visit_with(self)?;
                     }
                 }
             }
@@ -166,7 +165,7 @@
                     // as visible/reachable even if both `Type` and `Trait` are private.
                     // Ideally, associated types should be substituted in the same way as
                     // free type aliases, but this isn't done yet.
-                    return false;
+                    return ControlFlow::CONTINUE;
                 }
                 // This will also visit substs if necessary, so we don't need to recurse.
                 return self.visit_trait(proj.trait_ref(tcx));
@@ -183,9 +182,7 @@
                         }
                     };
                     let ty::ExistentialTraitRef { def_id, substs: _ } = trait_ref;
-                    if self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref) {
-                        return true;
-                    }
+                    self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref)?;
                 }
             }
             ty::Opaque(def_id, ..) => {
@@ -194,13 +191,14 @@
                     // The intent is to treat `impl Trait1 + Trait2` identically to
                     // `dyn Trait1 + Trait2`. Therefore we ignore def-id of the opaque type itself
                     // (it either has no visibility, or its visibility is insignificant, like
-                    // visibilities of type aliases) and recurse into predicates instead to go
+                    // visibilities of type aliases) and recurse into bounds instead to go
                     // through the trait list (default type visitor doesn't visit those traits).
                     // All traits in the list are considered the "primary" part of the type
                     // and are visited by shallow visitors.
-                    if self.visit_predicates(tcx.predicates_of(def_id)) {
-                        return true;
-                    }
+                    self.visit_predicates(ty::GenericPredicates {
+                        parent: None,
+                        predicates: tcx.explicit_item_bounds(def_id),
+                    })?;
                 }
             }
             // These types don't have their own def-ids (but may have subcomponents
@@ -226,125 +224,10 @@
             }
         }
 
-        !self.def_id_visitor.shallow() && ty.super_visit_with(self)
-    }
-}
-
-fn def_id_visibility<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    def_id: DefId,
-) -> (ty::Visibility, Span, &'static str) {
-    match def_id.as_local().map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id)) {
-        Some(hir_id) => {
-            let vis = match tcx.hir().get(hir_id) {
-                Node::Item(item) => &item.vis,
-                Node::ForeignItem(foreign_item) => &foreign_item.vis,
-                Node::MacroDef(macro_def) => {
-                    if tcx.sess.contains_name(&macro_def.attrs, sym::macro_export) {
-                        return (ty::Visibility::Public, macro_def.span, "public");
-                    } else {
-                        &macro_def.vis
-                    }
-                }
-                Node::TraitItem(..) | Node::Variant(..) => {
-                    return def_id_visibility(tcx, tcx.hir().get_parent_did(hir_id).to_def_id());
-                }
-                Node::ImplItem(impl_item) => {
-                    match tcx.hir().get(tcx.hir().get_parent_item(hir_id)) {
-                        Node::Item(item) => match &item.kind {
-                            hir::ItemKind::Impl { of_trait: None, .. } => &impl_item.vis,
-                            hir::ItemKind::Impl { of_trait: Some(trait_ref), .. } => {
-                                return def_id_visibility(tcx, trait_ref.path.res.def_id());
-                            }
-                            kind => bug!("unexpected item kind: {:?}", kind),
-                        },
-                        node => bug!("unexpected node kind: {:?}", node),
-                    }
-                }
-                Node::Ctor(vdata) => {
-                    let parent_hir_id = tcx.hir().get_parent_node(hir_id);
-                    match tcx.hir().get(parent_hir_id) {
-                        Node::Variant(..) => {
-                            let parent_did = tcx.hir().local_def_id(parent_hir_id);
-                            let (mut ctor_vis, mut span, mut descr) =
-                                def_id_visibility(tcx, parent_did.to_def_id());
-
-                            let adt_def = tcx.adt_def(tcx.hir().get_parent_did(hir_id).to_def_id());
-                            let ctor_did = tcx.hir().local_def_id(vdata.ctor_hir_id().unwrap());
-                            let variant = adt_def.variant_with_ctor_id(ctor_did.to_def_id());
-
-                            if variant.is_field_list_non_exhaustive()
-                                && ctor_vis == ty::Visibility::Public
-                            {
-                                ctor_vis =
-                                    ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX));
-                                let attrs = tcx.get_attrs(variant.def_id);
-                                span = tcx
-                                    .sess
-                                    .find_by_name(&attrs, sym::non_exhaustive)
-                                    .unwrap()
-                                    .span;
-                                descr = "crate-visible";
-                            }
-
-                            return (ctor_vis, span, descr);
-                        }
-                        Node::Item(..) => {
-                            let item = match tcx.hir().get(parent_hir_id) {
-                                Node::Item(item) => item,
-                                node => bug!("unexpected node kind: {:?}", node),
-                            };
-                            let (mut ctor_vis, mut span, mut descr) = (
-                                ty::Visibility::from_hir(&item.vis, parent_hir_id, tcx),
-                                item.vis.span,
-                                item.vis.node.descr(),
-                            );
-                            for field in vdata.fields() {
-                                let field_vis = ty::Visibility::from_hir(&field.vis, hir_id, tcx);
-                                if ctor_vis.is_at_least(field_vis, tcx) {
-                                    ctor_vis = field_vis;
-                                    span = field.vis.span;
-                                    descr = field.vis.node.descr();
-                                }
-                            }
-
-                            // If the structure is marked as non_exhaustive then lower the
-                            // visibility to within the crate.
-                            if ctor_vis == ty::Visibility::Public {
-                                let adt_def =
-                                    tcx.adt_def(tcx.hir().get_parent_did(hir_id).to_def_id());
-                                if adt_def.non_enum_variant().is_field_list_non_exhaustive() {
-                                    ctor_vis =
-                                        ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX));
-                                    span = tcx
-                                        .sess
-                                        .find_by_name(&item.attrs, sym::non_exhaustive)
-                                        .unwrap()
-                                        .span;
-                                    descr = "crate-visible";
-                                }
-                            }
-
-                            return (ctor_vis, span, descr);
-                        }
-                        node => bug!("unexpected node kind: {:?}", node),
-                    }
-                }
-                Node::Expr(expr) => {
-                    return (
-                        ty::Visibility::Restricted(tcx.parent_module(expr.hir_id).to_def_id()),
-                        expr.span,
-                        "private",
-                    );
-                }
-                node => bug!("unexpected node kind: {:?}", node),
-            };
-            (ty::Visibility::from_hir(vis, hir_id, tcx), vis.span, vis.node.descr())
-        }
-        None => {
-            let vis = tcx.visibility(def_id);
-            let descr = if vis == ty::Visibility::Public { "public" } else { "private" };
-            (vis, tcx.def_span(def_id), descr)
+        if self.def_id_visitor.shallow() {
+            ControlFlow::CONTINUE
+        } else {
+            ty.super_visit_with(self)
         }
     }
 }
@@ -395,9 +278,14 @@
     fn skip_assoc_tys(&self) -> bool {
         true
     }
-    fn visit_def_id(&mut self, def_id: DefId, _kind: &str, _descr: &dyn fmt::Display) -> bool {
+    fn visit_def_id(
+        &mut self,
+        def_id: DefId,
+        _kind: &str,
+        _descr: &dyn fmt::Display,
+    ) -> ControlFlow<()> {
         self.min = VL::new_min(self, def_id);
-        false
+        ControlFlow::CONTINUE
     }
 }
 
@@ -421,7 +309,7 @@
 impl VisibilityLike for ty::Visibility {
     const MAX: Self = ty::Visibility::Public;
     fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self {
-        min(def_id_visibility(find.tcx, def_id).0, find.min, find.tcx)
+        min(find.tcx.visibility(def_id), find.min, find.tcx)
     }
 }
 impl VisibilityLike for Option<AccessLevel> {
@@ -531,17 +419,16 @@
             let hir_id = item_id.id;
             let item_def_id = self.tcx.hir().local_def_id(hir_id);
             let def_kind = self.tcx.def_kind(item_def_id);
-            let item = self.tcx.hir().expect_item(hir_id);
-            let vis = ty::Visibility::from_hir(&item.vis, hir_id, self.tcx);
+            let vis = self.tcx.visibility(item_def_id);
             self.update_macro_reachable_def(hir_id, def_kind, vis, defining_mod);
         }
         if let Some(exports) = self.tcx.module_exports(module_def_id) {
             for export in exports {
                 if export.vis.is_accessible_from(defining_mod, self.tcx) {
                     if let Res::Def(def_kind, def_id) = export.res {
-                        let vis = def_id_visibility(self.tcx, def_id).0;
                         if let Some(def_id) = def_id.as_local() {
                             let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+                            let vis = self.tcx.visibility(def_id.to_def_id());
                             self.update_macro_reachable_def(hir_id, def_kind, vis, defining_mod);
                         }
                     }
@@ -593,7 +480,7 @@
                     {
                         for field in struct_def.fields() {
                             let field_vis =
-                                ty::Visibility::from_hir(&field.vis, field.hir_id, self.tcx);
+                                self.tcx.visibility(self.tcx.hir().local_def_id(field.hir_id));
                             if field_vis.is_accessible_from(module, self.tcx) {
                                 self.reach(field.hir_id, level).ty();
                             }
@@ -1010,17 +897,21 @@
     fn tcx(&self) -> TyCtxt<'tcx> {
         self.ev.tcx
     }
-    fn visit_def_id(&mut self, def_id: DefId, _kind: &str, _descr: &dyn fmt::Display) -> bool {
+    fn visit_def_id(
+        &mut self,
+        def_id: DefId,
+        _kind: &str,
+        _descr: &dyn fmt::Display,
+    ) -> ControlFlow<()> {
         if let Some(def_id) = def_id.as_local() {
-            let hir_id = self.ev.tcx.hir().local_def_id_to_hir_id(def_id);
-            if let ((ty::Visibility::Public, ..), _)
-            | (_, Some(AccessLevel::ReachableFromImplTrait)) =
-                (def_id_visibility(self.tcx(), def_id.to_def_id()), self.access_level)
+            if let (ty::Visibility::Public, _) | (_, Some(AccessLevel::ReachableFromImplTrait)) =
+                (self.tcx().visibility(def_id.to_def_id()), self.access_level)
             {
+                let hir_id = self.ev.tcx.hir().local_def_id_to_hir_id(def_id);
                 self.ev.update(hir_id, self.access_level);
             }
         }
-        false
+        ControlFlow::CONTINUE
     }
 }
 
@@ -1181,26 +1072,21 @@
     }
 
     fn item_is_accessible(&self, did: DefId) -> bool {
-        def_id_visibility(self.tcx, did)
-            .0
-            .is_accessible_from(self.current_item.to_def_id(), self.tcx)
+        self.tcx.visibility(did).is_accessible_from(self.current_item.to_def_id(), self.tcx)
     }
 
     // Take node-id of an expression or pattern and check its type for privacy.
     fn check_expr_pat_type(&mut self, id: hir::HirId, span: Span) -> bool {
         self.span = span;
         let typeck_results = self.typeck_results();
-        if self.visit(typeck_results.node_type(id)) || self.visit(typeck_results.node_substs(id)) {
-            return true;
-        }
-        if let Some(adjustments) = typeck_results.adjustments().get(id) {
-            for adjustment in adjustments {
-                if self.visit(adjustment.target) {
-                    return true;
-                }
+        let result: ControlFlow<()> = try {
+            self.visit(typeck_results.node_type(id))?;
+            self.visit(typeck_results.node_substs(id))?;
+            if let Some(adjustments) = typeck_results.adjustments().get(id) {
+                adjustments.iter().try_for_each(|adjustment| self.visit(adjustment.target))?;
             }
-        }
-        false
+        };
+        result.is_break()
     }
 
     fn check_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
@@ -1242,14 +1128,14 @@
         self.span = hir_ty.span;
         if let Some(typeck_results) = self.maybe_typeck_results {
             // Types in bodies.
-            if self.visit(typeck_results.node_type(hir_ty.hir_id)) {
+            if self.visit(typeck_results.node_type(hir_ty.hir_id)).is_break() {
                 return;
             }
         } else {
             // Types in signatures.
             // FIXME: This is very ineffective. Ideally each HIR type should be converted
             // into a semantic type only once and the result should be cached somehow.
-            if self.visit(rustc_typeck::hir_ty_to_ty(self.tcx, hir_ty)) {
+            if self.visit(rustc_typeck::hir_ty_to_ty(self.tcx, hir_ty)).is_break() {
                 return;
             }
         }
@@ -1271,15 +1157,17 @@
             );
 
             for (trait_predicate, _, _) in bounds.trait_bounds {
-                if self.visit_trait(trait_predicate.skip_binder()) {
+                if self.visit_trait(trait_predicate.skip_binder()).is_break() {
                     return;
                 }
             }
 
             for (poly_predicate, _) in bounds.projection_bounds {
                 let tcx = self.tcx;
-                if self.visit(poly_predicate.skip_binder().ty)
-                    || self.visit_trait(poly_predicate.skip_binder().projection_ty.trait_ref(tcx))
+                if self.visit(poly_predicate.skip_binder().ty).is_break()
+                    || self
+                        .visit_trait(poly_predicate.skip_binder().projection_ty.trait_ref(tcx))
+                        .is_break()
                 {
                     return;
                 }
@@ -1306,7 +1194,7 @@
                 // Method calls have to be checked specially.
                 self.span = span;
                 if let Some(def_id) = self.typeck_results().type_dependent_def_id(expr.hir_id) {
-                    if self.visit(self.tcx.type_of(def_id)) {
+                    if self.visit(self.tcx.type_of(def_id)).is_break() {
                         return;
                     }
                 } else {
@@ -1337,9 +1225,11 @@
                 .maybe_typeck_results
                 .and_then(|typeck_results| typeck_results.type_dependent_def(id)),
         };
-        let def = def.filter(|(kind, _)| match kind {
-            DefKind::AssocFn | DefKind::AssocConst | DefKind::AssocTy | DefKind::Static => true,
-            _ => false,
+        let def = def.filter(|(kind, _)| {
+            matches!(
+                kind,
+                DefKind::AssocFn | DefKind::AssocConst | DefKind::AssocTy | DefKind::Static
+            )
         });
         if let Some((kind, def_id)) = def {
             let is_local_static =
@@ -1404,8 +1294,17 @@
     fn tcx(&self) -> TyCtxt<'tcx> {
         self.tcx
     }
-    fn visit_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
-        self.check_def_id(def_id, kind, descr)
+    fn visit_def_id(
+        &mut self,
+        def_id: DefId,
+        kind: &str,
+        descr: &dyn fmt::Display,
+    ) -> ControlFlow<()> {
+        if self.check_def_id(def_id, kind, descr) {
+            ControlFlow::BREAK
+        } else {
+            ControlFlow::CONTINUE
+        }
     }
 }
 
@@ -1800,6 +1699,14 @@
         self
     }
 
+    fn bounds(&mut self) -> &mut Self {
+        self.visit_predicates(ty::GenericPredicates {
+            parent: None,
+            predicates: self.tcx.explicit_item_bounds(self.item_def_id),
+        });
+        self
+    }
+
     fn ty(&mut self) -> &mut Self {
         self.visit(self.tcx.type_of(self.item_def_id));
         self
@@ -1829,8 +1736,21 @@
             None => return false,
         };
 
-        let (vis, vis_span, vis_descr) = def_id_visibility(self.tcx, def_id);
+        let vis = self.tcx.visibility(def_id);
         if !vis.is_at_least(self.required_visibility, self.tcx) {
+            let vis_descr = match vis {
+                ty::Visibility::Public => "public",
+                ty::Visibility::Invisible => "private",
+                ty::Visibility::Restricted(vis_def_id) => {
+                    if vis_def_id == self.tcx.parent_module(hir_id).to_def_id() {
+                        "private"
+                    } else if vis_def_id.is_top_level_module() {
+                        "crate-private"
+                    } else {
+                        "restricted"
+                    }
+                }
+            };
             let make_msg = || format!("{} {} `{}` in public interface", vis_descr, kind, descr);
             if self.has_pub_restricted || self.has_old_errors || self.in_assoc_ty {
                 let mut err = if kind == "trait" {
@@ -1838,6 +1758,8 @@
                 } else {
                     struct_span_err!(self.tcx.sess, self.span, E0446, "{}", make_msg())
                 };
+                let vis_span =
+                    self.tcx.sess.source_map().guess_head_span(self.tcx.def_span(def_id));
                 err.span_label(self.span, format!("can't leak {} {}", vis_descr, kind));
                 err.span_label(vis_span, format!("`{}` declared as {}", descr, vis_descr));
                 err.emit();
@@ -1872,8 +1794,17 @@
     fn tcx(&self) -> TyCtxt<'tcx> {
         self.tcx
     }
-    fn visit_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
-        self.check_def_id(def_id, kind, descr)
+    fn visit_def_id(
+        &mut self,
+        def_id: DefId,
+        kind: &str,
+        descr: &dyn fmt::Display,
+    ) -> ControlFlow<()> {
+        if self.check_def_id(def_id, kind, descr) {
+            ControlFlow::BREAK
+        } else {
+            ControlFlow::CONTINUE
+        }
     }
 }
 
@@ -1954,7 +1885,7 @@
 
     fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
         let tcx = self.tcx;
-        let item_visibility = ty::Visibility::from_hir(&item.vis, item.hir_id, tcx);
+        let item_visibility = tcx.visibility(tcx.hir().local_def_id(item.hir_id).to_def_id());
 
         match item.kind {
             // Crates are always public.
@@ -1975,7 +1906,7 @@
             hir::ItemKind::OpaqueTy(..) => {
                 // `ty()` for opaque types is the underlying type,
                 // it's not a part of interface, so we skip it.
-                self.check(item.hir_id, item_visibility).generics().predicates();
+                self.check(item.hir_id, item_visibility).generics().bounds();
             }
             hir::ItemKind::Trait(.., trait_item_refs) => {
                 self.check(item.hir_id, item_visibility).generics().predicates();
@@ -1987,6 +1918,10 @@
                         trait_item_ref.defaultness,
                         item_visibility,
                     );
+
+                    if let AssocItemKind::Type = trait_item_ref.kind {
+                        self.check(trait_item_ref.id.hir_id, item_visibility).bounds();
+                    }
                 }
             }
             hir::ItemKind::TraitAlias(..) => {
@@ -2004,7 +1939,7 @@
             // Subitems of foreign modules have their own publicity.
             hir::ItemKind::ForeignMod(ref foreign_mod) => {
                 for foreign_item in foreign_mod.items {
-                    let vis = ty::Visibility::from_hir(&foreign_item.vis, item.hir_id, tcx);
+                    let vis = tcx.visibility(tcx.hir().local_def_id(foreign_item.hir_id));
                     self.check(foreign_item.hir_id, vis).generics().predicates().ty();
                 }
             }
@@ -2013,7 +1948,7 @@
                 self.check(item.hir_id, item_visibility).generics().predicates();
 
                 for field in struct_def.fields() {
-                    let field_visibility = ty::Visibility::from_hir(&field.vis, item.hir_id, tcx);
+                    let field_visibility = tcx.visibility(tcx.hir().local_def_id(field.hir_id));
                     self.check(field.hir_id, min(item_visibility, field_visibility, tcx)).ty();
                 }
             }
@@ -2025,10 +1960,9 @@
                 let impl_vis = ty::Visibility::of_impl(item.hir_id, tcx, &Default::default());
                 self.check(item.hir_id, impl_vis).generics().predicates();
                 for impl_item_ref in items {
-                    let impl_item = tcx.hir().impl_item(impl_item_ref.id);
                     let impl_item_vis = if of_trait.is_none() {
                         min(
-                            ty::Visibility::from_hir(&impl_item.vis, item.hir_id, tcx),
+                            tcx.visibility(tcx.hir().local_def_id(impl_item_ref.id.hir_id)),
                             impl_vis,
                             tcx,
                         )
@@ -2049,6 +1983,7 @@
 
 pub fn provide(providers: &mut Providers) {
     *providers = Providers {
+        visibility,
         privacy_access_levels,
         check_private_in_public,
         check_mod_privacy,
@@ -2056,6 +1991,55 @@
     };
 }
 
+fn visibility(tcx: TyCtxt<'_>, def_id: DefId) -> ty::Visibility {
+    let def_id = def_id.expect_local();
+    match tcx.visibilities.get(&def_id) {
+        Some(vis) => *vis,
+        None => {
+            let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+            match tcx.hir().get(hir_id) {
+                // Unique types created for closures participate in type privacy checking.
+                // They have visibilities inherited from the module they are defined in.
+                Node::Expr(hir::Expr { kind: hir::ExprKind::Closure(..), .. }) => {
+                    ty::Visibility::Restricted(tcx.parent_module(hir_id).to_def_id())
+                }
+                // - AST lowering may clone `use` items and the clones don't
+                //   get their entries in the resolver's visibility table.
+                // - AST lowering also creates opaque type items with inherited visibilies.
+                //   Visibility on them should have no effect, but to avoid the visibility
+                //   query failing on some items, we provide it for opaque types as well.
+                Node::Item(hir::Item {
+                    vis,
+                    kind: hir::ItemKind::Use(..) | hir::ItemKind::OpaqueTy(..),
+                    ..
+                }) => ty::Visibility::from_hir(vis, hir_id, tcx),
+                // Visibilities of trait impl items are inherited from their traits
+                // and are not filled in resolve.
+                Node::ImplItem(impl_item) => {
+                    match tcx.hir().get(tcx.hir().get_parent_item(hir_id)) {
+                        Node::Item(hir::Item {
+                            kind: hir::ItemKind::Impl { of_trait: Some(tr), .. },
+                            ..
+                        }) => tr.path.res.opt_def_id().map_or_else(
+                            || {
+                                tcx.sess.delay_span_bug(tr.path.span, "trait without a def-id");
+                                ty::Visibility::Public
+                            },
+                            |def_id| tcx.visibility(def_id),
+                        ),
+                        _ => span_bug!(impl_item.span, "the parent is not a trait impl"),
+                    }
+                }
+                _ => span_bug!(
+                    tcx.def_span(def_id),
+                    "visibility table unexpectedly missing a def-id: {:?}",
+                    def_id,
+                ),
+            }
+        }
+    }
+}
+
 fn check_mod_privacy(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
     // Check privacy of names not checked in previous compilation stages.
     let mut visitor = NamePrivacyVisitor { tcx, maybe_typeck_results: None, current_item: None };
diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
index e302784..7808a28 100644
--- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
@@ -165,10 +165,6 @@
         cgu_name.hash(&mut hasher);
         WorkProductId { hash: hasher.finish() }
     }
-
-    pub fn from_fingerprint(fingerprint: Fingerprint) -> WorkProductId {
-        WorkProductId { hash: fingerprint }
-    }
 }
 
 impl<HCX> HashStable<HCX> for WorkProductId {
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index d70306b..d9b687c 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -292,10 +292,8 @@
                 );
 
                 data.colors.insert(prev_index, color);
-            } else {
-                if print_status {
-                    eprintln!("[task::new] {:?}", key);
-                }
+            } else if print_status {
+                eprintln!("[task::new] {:?}", key);
             }
 
             (result, dep_node_index)
@@ -402,11 +400,6 @@
         self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
     }
 
-    #[inline]
-    pub fn prev_dep_node_index_of(&self, dep_node: &DepNode<K>) -> SerializedDepNodeIndex {
-        self.data.as_ref().unwrap().previous.node_to_index(dep_node)
-    }
-
     /// Checks whether a previous work product exists for `v` and, if
     /// so, return the path that leads to it. Used to skip doing work.
     pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
diff --git a/compiler/rustc_query_system/src/dep_graph/query.rs b/compiler/rustc_query_system/src/dep_graph/query.rs
index fb313d2..a27b716 100644
--- a/compiler/rustc_query_system/src/dep_graph/query.rs
+++ b/compiler/rustc_query_system/src/dep_graph/query.rs
@@ -1,7 +1,5 @@
 use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::graph::implementation::{
-    Direction, Graph, NodeIndex, INCOMING, OUTGOING,
-};
+use rustc_data_structures::graph::implementation::{Direction, Graph, NodeIndex, INCOMING};
 
 use super::{DepKind, DepNode};
 
@@ -52,23 +50,8 @@
         }
     }
 
-    /// All nodes reachable from `node`. In other words, things that
-    /// will have to be recomputed if `node` changes.
-    pub fn transitive_successors(&self, node: &DepNode<K>) -> Vec<&DepNode<K>> {
-        self.reachable_nodes(node, OUTGOING)
-    }
-
     /// All nodes that can reach `node`.
     pub fn transitive_predecessors(&self, node: &DepNode<K>) -> Vec<&DepNode<K>> {
         self.reachable_nodes(node, INCOMING)
     }
-
-    /// Just the outgoing edges from `node`.
-    pub fn immediate_successors(&self, node: &DepNode<K>) -> Vec<&DepNode<K>> {
-        if let Some(&index) = self.indices.get(&node) {
-            self.graph.successor_nodes(index).map(|s| self.graph.node_data(s)).collect()
-        } else {
-            vec![]
-        }
-    }
 }
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs
index 1839e1a..7bc6ae1 100644
--- a/compiler/rustc_query_system/src/query/caches.rs
+++ b/compiler/rustc_query_system/src/query/caches.rs
@@ -1,12 +1,12 @@
 use crate::dep_graph::DepNodeIndex;
 use crate::query::plumbing::{QueryLookup, QueryState};
-use crate::query::QueryContext;
 
 use rustc_arena::TypedArena;
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::sharded::Sharded;
 use rustc_data_structures::sync::WorkerLocal;
 use std::default::Default;
+use std::fmt::Debug;
 use std::hash::Hash;
 use std::marker::PhantomData;
 
@@ -24,16 +24,16 @@
 }
 
 pub trait QueryCache: QueryStorage {
-    type Key: Hash;
+    type Key: Hash + Eq + Clone + Debug;
     type Sharded: Default;
 
     /// Checks if the query is already computed and in the cache.
     /// It returns the shard index and a lock guard to the shard,
     /// which will be used if the query is not in the cache and we need
     /// to compute it.
-    fn lookup<CTX: QueryContext, R, OnHit, OnMiss>(
+    fn lookup<D, Q, R, OnHit, OnMiss>(
         &self,
-        state: &QueryState<CTX, Self>,
+        state: &QueryState<D, Q, Self>,
         key: Self::Key,
         // `on_hit` can be called while holding a lock to the query state shard.
         on_hit: OnHit,
@@ -41,7 +41,7 @@
     ) -> R
     where
         OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R,
-        OnMiss: FnOnce(Self::Key, QueryLookup<'_, CTX, Self::Key, Self::Sharded>) -> R;
+        OnMiss: FnOnce(Self::Key, QueryLookup<'_, D, Q, Self::Key, Self::Sharded>) -> R;
 
     fn complete(
         &self,
@@ -86,21 +86,25 @@
     }
 }
 
-impl<K: Eq + Hash, V: Clone> QueryCache for DefaultCache<K, V> {
+impl<K, V> QueryCache for DefaultCache<K, V>
+where
+    K: Eq + Hash + Clone + Debug,
+    V: Clone,
+{
     type Key = K;
     type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
 
     #[inline(always)]
-    fn lookup<CTX: QueryContext, R, OnHit, OnMiss>(
+    fn lookup<D, Q, R, OnHit, OnMiss>(
         &self,
-        state: &QueryState<CTX, Self>,
+        state: &QueryState<D, Q, Self>,
         key: K,
         on_hit: OnHit,
         on_miss: OnMiss,
     ) -> R
     where
         OnHit: FnOnce(&V, DepNodeIndex) -> R,
-        OnMiss: FnOnce(K, QueryLookup<'_, CTX, K, Self::Sharded>) -> R,
+        OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
     {
         let mut lookup = state.get_lookup(&key);
         let lock = &mut *lookup.lock;
@@ -164,21 +168,24 @@
     }
 }
 
-impl<'tcx, K: Eq + Hash, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V> {
+impl<'tcx, K, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V>
+where
+    K: Eq + Hash + Clone + Debug,
+{
     type Key = K;
     type Sharded = FxHashMap<K, &'tcx (V, DepNodeIndex)>;
 
     #[inline(always)]
-    fn lookup<CTX: QueryContext, R, OnHit, OnMiss>(
+    fn lookup<D, Q, R, OnHit, OnMiss>(
         &self,
-        state: &QueryState<CTX, Self>,
+        state: &QueryState<D, Q, Self>,
         key: K,
         on_hit: OnHit,
         on_miss: OnMiss,
     ) -> R
     where
         OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
-        OnMiss: FnOnce(K, QueryLookup<'_, CTX, K, Self::Sharded>) -> R,
+        OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
     {
         let mut lookup = state.get_lookup(&key);
         let lock = &mut *lookup.lock;
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
index 5490565..0f0684b 100644
--- a/compiler/rustc_query_system/src/query/config.rs
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -5,18 +5,14 @@
 use crate::query::caches::QueryCache;
 use crate::query::plumbing::CycleError;
 use crate::query::{QueryContext, QueryState};
-use rustc_data_structures::profiling::ProfileCategory;
 
 use rustc_data_structures::fingerprint::Fingerprint;
 use std::borrow::Cow;
 use std::fmt::Debug;
 use std::hash::Hash;
 
-// The parameter `CTX` is required in librustc_middle:
-// implementations may need to access the `'tcx` lifetime in `CTX = TyCtxt<'tcx>`.
-pub trait QueryConfig<CTX> {
+pub trait QueryConfig {
     const NAME: &'static str;
-    const CATEGORY: ProfileCategory;
 
     type Key: Eq + Hash + Clone + Debug;
     type Value;
@@ -70,7 +66,7 @@
     }
 }
 
-pub trait QueryAccessors<CTX: QueryContext>: QueryConfig<CTX> {
+pub trait QueryAccessors<CTX: QueryContext>: QueryConfig {
     const ANON: bool;
     const EVAL_ALWAYS: bool;
     const DEP_KIND: CTX::DepKind;
@@ -78,7 +74,7 @@
     type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
 
     // Don't use this method to access query results, instead use the methods on TyCtxt
-    fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX, Self::Cache>;
+    fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX::DepKind, CTX::Query, Self::Cache>;
 
     fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode<CTX::DepKind>
     where
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index 190312b..c1d3210 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -1,16 +1,16 @@
-use crate::dep_graph::{DepContext, DepKind};
 use crate::query::plumbing::CycleError;
-use crate::query::QueryContext;
 
 use rustc_data_structures::fx::FxHashMap;
 use rustc_span::Span;
 
 use std::convert::TryFrom;
+use std::hash::Hash;
 use std::marker::PhantomData;
 use std::num::NonZeroU32;
 
 #[cfg(parallel_compiler)]
 use {
+    super::QueryContext,
     parking_lot::{Condvar, Mutex},
     rustc_data_structures::fx::FxHashSet,
     rustc_data_structures::stable_hasher::{HashStable, StableHasher},
@@ -31,7 +31,7 @@
     pub query: Q,
 }
 
-type QueryMap<CTX> = FxHashMap<QueryJobId<<CTX as DepContext>::DepKind>, QueryJobInfo<CTX>>;
+pub(crate) type QueryMap<D, Q> = FxHashMap<QueryJobId<D>, QueryJobInfo<D, Q>>;
 
 /// A value uniquely identifiying an active query job within a shard in the query cache.
 #[derive(Copy, Clone, Eq, PartialEq, Hash)]
@@ -39,71 +39,75 @@
 
 /// A value uniquely identifiying an active query job.
 #[derive(Copy, Clone, Eq, PartialEq, Hash)]
-pub struct QueryJobId<K> {
+pub struct QueryJobId<D> {
     /// Which job within a shard is this
     pub job: QueryShardJobId,
 
     /// In which shard is this job
     pub shard: u16,
 
-    /// What kind of query this job is
-    pub kind: K,
+    /// What kind of query this job is.
+    pub kind: D,
 }
 
-impl<K: DepKind> QueryJobId<K> {
-    pub fn new(job: QueryShardJobId, shard: usize, kind: K) -> Self {
+impl<D> QueryJobId<D>
+where
+    D: Copy + Clone + Eq + Hash,
+{
+    pub fn new(job: QueryShardJobId, shard: usize, kind: D) -> Self {
         QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
     }
 
-    fn query<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> CTX::Query {
+    fn query<Q: Clone>(self, map: &QueryMap<D, Q>) -> Q {
         map.get(&self).unwrap().info.query.clone()
     }
 
     #[cfg(parallel_compiler)]
-    fn span<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> Span {
+    fn span<Q: Clone>(self, map: &QueryMap<D, Q>) -> Span {
         map.get(&self).unwrap().job.span
     }
 
     #[cfg(parallel_compiler)]
-    fn parent<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> Option<QueryJobId<K>> {
+    fn parent<Q: Clone>(self, map: &QueryMap<D, Q>) -> Option<QueryJobId<D>> {
         map.get(&self).unwrap().job.parent
     }
 
     #[cfg(parallel_compiler)]
-    fn latch<'a, CTX: QueryContext<DepKind = K>>(
-        self,
-        map: &'a QueryMap<CTX>,
-    ) -> Option<&'a QueryLatch<CTX>> {
+    fn latch<'a, Q: Clone>(self, map: &'a QueryMap<D, Q>) -> Option<&'a QueryLatch<D, Q>> {
         map.get(&self).unwrap().job.latch.as_ref()
     }
 }
 
-pub struct QueryJobInfo<CTX: QueryContext> {
-    pub info: QueryInfo<CTX::Query>,
-    pub job: QueryJob<CTX>,
+pub struct QueryJobInfo<D, Q> {
+    pub info: QueryInfo<Q>,
+    pub job: QueryJob<D, Q>,
 }
 
 /// Represents an active query job.
 #[derive(Clone)]
-pub struct QueryJob<CTX: QueryContext> {
+pub struct QueryJob<D, Q> {
     pub id: QueryShardJobId,
 
     /// The span corresponding to the reason for which this query was required.
     pub span: Span,
 
     /// The parent query job which created this job and is implicitly waiting on it.
-    pub parent: Option<QueryJobId<CTX::DepKind>>,
+    pub parent: Option<QueryJobId<D>>,
 
     /// The latch that is used to wait on this job.
     #[cfg(parallel_compiler)]
-    latch: Option<QueryLatch<CTX>>,
+    latch: Option<QueryLatch<D, Q>>,
 
-    dummy: PhantomData<QueryLatch<CTX>>,
+    dummy: PhantomData<QueryLatch<D, Q>>,
 }
 
-impl<CTX: QueryContext> QueryJob<CTX> {
+impl<D, Q> QueryJob<D, Q>
+where
+    D: Copy + Clone + Eq + Hash,
+    Q: Clone,
+{
     /// Creates a new query job.
-    pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<CTX::DepKind>>) -> Self {
+    pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self {
         QueryJob {
             id,
             span,
@@ -115,7 +119,7 @@
     }
 
     #[cfg(parallel_compiler)]
-    pub(super) fn latch(&mut self, _id: QueryJobId<CTX::DepKind>) -> QueryLatch<CTX> {
+    pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D, Q> {
         if self.latch.is_none() {
             self.latch = Some(QueryLatch::new());
         }
@@ -123,7 +127,7 @@
     }
 
     #[cfg(not(parallel_compiler))]
-    pub(super) fn latch(&mut self, id: QueryJobId<CTX::DepKind>) -> QueryLatch<CTX> {
+    pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D, Q> {
         QueryLatch { id, dummy: PhantomData }
     }
 
@@ -143,19 +147,26 @@
 
 #[cfg(not(parallel_compiler))]
 #[derive(Clone)]
-pub(super) struct QueryLatch<CTX: QueryContext> {
-    id: QueryJobId<CTX::DepKind>,
-    dummy: PhantomData<CTX>,
+pub(super) struct QueryLatch<D, Q> {
+    id: QueryJobId<D>,
+    dummy: PhantomData<Q>,
 }
 
 #[cfg(not(parallel_compiler))]
-impl<CTX: QueryContext> QueryLatch<CTX> {
-    pub(super) fn find_cycle_in_stack(&self, tcx: CTX, span: Span) -> CycleError<CTX::Query> {
-        let query_map = tcx.try_collect_active_jobs().unwrap();
-
-        // Get the current executing query (waiter) and find the waitee amongst its parents
-        let mut current_job = tcx.current_query_job();
+impl<D, Q> QueryLatch<D, Q>
+where
+    D: Copy + Clone + Eq + Hash,
+    Q: Clone,
+{
+    pub(super) fn find_cycle_in_stack(
+        &self,
+        query_map: QueryMap<D, Q>,
+        current_job: &Option<QueryJobId<D>>,
+        span: Span,
+    ) -> CycleError<Q> {
+        // Find the waitee amongst `current_job` parents
         let mut cycle = Vec::new();
+        let mut current_job = Option::clone(current_job);
 
         while let Some(job) = current_job {
             let info = query_map.get(&job).unwrap();
@@ -186,15 +197,15 @@
 }
 
 #[cfg(parallel_compiler)]
-struct QueryWaiter<CTX: QueryContext> {
-    query: Option<QueryJobId<CTX::DepKind>>,
+struct QueryWaiter<D, Q> {
+    query: Option<QueryJobId<D>>,
     condvar: Condvar,
     span: Span,
-    cycle: Lock<Option<CycleError<CTX::Query>>>,
+    cycle: Lock<Option<CycleError<Q>>>,
 }
 
 #[cfg(parallel_compiler)]
-impl<CTX: QueryContext> QueryWaiter<CTX> {
+impl<D, Q> QueryWaiter<D, Q> {
     fn notify(&self, registry: &rayon_core::Registry) {
         rayon_core::mark_unblocked(registry);
         self.condvar.notify_one();
@@ -202,19 +213,19 @@
 }
 
 #[cfg(parallel_compiler)]
-struct QueryLatchInfo<CTX: QueryContext> {
+struct QueryLatchInfo<D, Q> {
     complete: bool,
-    waiters: Vec<Lrc<QueryWaiter<CTX>>>,
+    waiters: Vec<Lrc<QueryWaiter<D, Q>>>,
 }
 
 #[cfg(parallel_compiler)]
 #[derive(Clone)]
-pub(super) struct QueryLatch<CTX: QueryContext> {
-    info: Lrc<Mutex<QueryLatchInfo<CTX>>>,
+pub(super) struct QueryLatch<D, Q> {
+    info: Lrc<Mutex<QueryLatchInfo<D, Q>>>,
 }
 
 #[cfg(parallel_compiler)]
-impl<CTX: QueryContext> QueryLatch<CTX> {
+impl<D: Eq + Hash, Q: Clone> QueryLatch<D, Q> {
     fn new() -> Self {
         QueryLatch {
             info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
@@ -223,10 +234,13 @@
 }
 
 #[cfg(parallel_compiler)]
-impl<CTX: QueryContext> QueryLatch<CTX> {
+impl<D, Q> QueryLatch<D, Q> {
     /// Awaits for the query job to complete.
-    pub(super) fn wait_on(&self, tcx: CTX, span: Span) -> Result<(), CycleError<CTX::Query>> {
-        let query = tcx.current_query_job();
+    pub(super) fn wait_on(
+        &self,
+        query: Option<QueryJobId<D>>,
+        span: Span,
+    ) -> Result<(), CycleError<Q>> {
         let waiter =
             Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
         self.wait_on_inner(&waiter);
@@ -239,12 +253,9 @@
             Some(cycle) => Err(cycle),
         }
     }
-}
 
-#[cfg(parallel_compiler)]
-impl<CTX: QueryContext> QueryLatch<CTX> {
     /// Awaits the caller on this latch by blocking the current thread.
-    fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<CTX>>) {
+    fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D, Q>>) {
         let mut info = self.info.lock();
         if !info.complete {
             // We push the waiter on to the `waiters` list. It can be accessed inside
@@ -278,7 +289,7 @@
 
     /// Removes a single waiter from the list of waiters.
     /// This is used to break query cycles.
-    fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<CTX>> {
+    fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D, Q>> {
         let mut info = self.info.lock();
         debug_assert!(!info.complete);
         // Remove the waiter from the list of waiters
@@ -288,7 +299,7 @@
 
 /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
 #[cfg(parallel_compiler)]
-type Waiter<K> = (QueryJobId<K>, usize);
+type Waiter<D> = (QueryJobId<D>, usize);
 
 /// Visits all the non-resumable and resumable waiters of a query.
 /// Only waiters in a query are visited.
@@ -300,13 +311,15 @@
 /// required information to resume the waiter.
 /// If all `visit` calls returns None, this function also returns None.
 #[cfg(parallel_compiler)]
-fn visit_waiters<CTX: QueryContext, F>(
-    query_map: &QueryMap<CTX>,
-    query: QueryJobId<CTX::DepKind>,
+fn visit_waiters<D, Q, F>(
+    query_map: &QueryMap<D, Q>,
+    query: QueryJobId<D>,
     mut visit: F,
-) -> Option<Option<Waiter<CTX::DepKind>>>
+) -> Option<Option<Waiter<D>>>
 where
-    F: FnMut(Span, QueryJobId<CTX::DepKind>) -> Option<Option<Waiter<CTX::DepKind>>>,
+    D: Copy + Clone + Eq + Hash,
+    Q: Clone,
+    F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>,
 {
     // Visit the parent query which is a non-resumable waiter since it's on the same stack
     if let Some(parent) = query.parent(query_map) {
@@ -335,13 +348,17 @@
 /// If a cycle is detected, this initial value is replaced with the span causing
 /// the cycle.
 #[cfg(parallel_compiler)]
-fn cycle_check<CTX: QueryContext>(
-    query_map: &QueryMap<CTX>,
-    query: QueryJobId<CTX::DepKind>,
+fn cycle_check<D, Q>(
+    query_map: &QueryMap<D, Q>,
+    query: QueryJobId<D>,
     span: Span,
-    stack: &mut Vec<(Span, QueryJobId<CTX::DepKind>)>,
-    visited: &mut FxHashSet<QueryJobId<CTX::DepKind>>,
-) -> Option<Option<Waiter<CTX::DepKind>>> {
+    stack: &mut Vec<(Span, QueryJobId<D>)>,
+    visited: &mut FxHashSet<QueryJobId<D>>,
+) -> Option<Option<Waiter<D>>>
+where
+    D: Copy + Clone + Eq + Hash,
+    Q: Clone,
+{
     if !visited.insert(query) {
         return if let Some(p) = stack.iter().position(|q| q.1 == query) {
             // We detected a query cycle, fix up the initial span and return Some
@@ -376,11 +393,15 @@
 /// from `query` without going through any of the queries in `visited`.
 /// This is achieved with a depth first search.
 #[cfg(parallel_compiler)]
-fn connected_to_root<CTX: QueryContext>(
-    query_map: &QueryMap<CTX>,
-    query: QueryJobId<CTX::DepKind>,
-    visited: &mut FxHashSet<QueryJobId<CTX::DepKind>>,
-) -> bool {
+fn connected_to_root<D, Q>(
+    query_map: &QueryMap<D, Q>,
+    query: QueryJobId<D>,
+    visited: &mut FxHashSet<QueryJobId<D>>,
+) -> bool
+where
+    D: Copy + Clone + Eq + Hash,
+    Q: Clone,
+{
     // We already visited this or we're deliberately ignoring it
     if !visited.insert(query) {
         return false;
@@ -399,7 +420,12 @@
 
 // Deterministically pick an query from a list
 #[cfg(parallel_compiler)]
-fn pick_query<'a, CTX, T, F>(query_map: &QueryMap<CTX>, tcx: CTX, queries: &'a [T], f: F) -> &'a T
+fn pick_query<'a, CTX, T, F>(
+    query_map: &QueryMap<CTX::DepKind, CTX::Query>,
+    tcx: CTX,
+    queries: &'a [T],
+    f: F,
+) -> &'a T
 where
     CTX: QueryContext,
     F: Fn(&T) -> (Span, QueryJobId<CTX::DepKind>),
@@ -429,9 +455,9 @@
 /// the function returns false.
 #[cfg(parallel_compiler)]
 fn remove_cycle<CTX: QueryContext>(
-    query_map: &QueryMap<CTX>,
+    query_map: &QueryMap<CTX::DepKind, CTX::Query>,
     jobs: &mut Vec<QueryJobId<CTX::DepKind>>,
-    wakelist: &mut Vec<Lrc<QueryWaiter<CTX>>>,
+    wakelist: &mut Vec<Lrc<QueryWaiter<CTX::DepKind, CTX::Query>>>,
     tcx: CTX,
 ) -> bool {
     let mut visited = FxHashSet::default();
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
index 4909772..da45565 100644
--- a/compiler/rustc_query_system/src/query/mod.rs
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -15,8 +15,8 @@
 pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
 
 use crate::dep_graph::{DepContext, DepGraph};
+use crate::query::job::QueryMap;
 
-use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::stable_hasher::HashStable;
 use rustc_data_structures::sync::Lock;
 use rustc_data_structures::thin_vec::ThinVec;
@@ -38,9 +38,7 @@
     /// Get the query information from the TLS context.
     fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>;
 
-    fn try_collect_active_jobs(
-        &self,
-    ) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>>;
+    fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind, Self::Query>>;
 
     /// Executes a job by changing the `ImplicitCtxt` to point to the
     /// new query job while it executes. It returns the diagnostics
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index ae042cc..426f5bb 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -7,7 +7,7 @@
 use crate::query::caches::QueryCache;
 use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
 use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
-use crate::query::QueryContext;
+use crate::query::{QueryContext, QueryMap};
 
 #[cfg(not(parallel_compiler))]
 use rustc_data_structures::cold_path;
@@ -20,8 +20,6 @@
 use rustc_span::source_map::DUMMY_SP;
 use rustc_span::Span;
 use std::collections::hash_map::Entry;
-use std::convert::TryFrom;
-use std::fmt::Debug;
 use std::hash::{Hash, Hasher};
 use std::mem;
 use std::num::NonZeroU32;
@@ -29,33 +27,33 @@
 #[cfg(debug_assertions)]
 use std::sync::atomic::{AtomicUsize, Ordering};
 
-pub(super) struct QueryStateShard<CTX: QueryContext, K, C> {
+pub(super) struct QueryStateShard<D, Q, K, C> {
     pub(super) cache: C,
-    active: FxHashMap<K, QueryResult<CTX>>,
+    active: FxHashMap<K, QueryResult<D, Q>>,
 
     /// Used to generate unique ids for active jobs.
     jobs: u32,
 }
 
-impl<CTX: QueryContext, K, C: Default> Default for QueryStateShard<CTX, K, C> {
-    fn default() -> QueryStateShard<CTX, K, C> {
+impl<D, Q, K, C: Default> Default for QueryStateShard<D, Q, K, C> {
+    fn default() -> QueryStateShard<D, Q, K, C> {
         QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
     }
 }
 
-pub struct QueryState<CTX: QueryContext, C: QueryCache> {
+pub struct QueryState<D, Q, C: QueryCache> {
     cache: C,
-    shards: Sharded<QueryStateShard<CTX, C::Key, C::Sharded>>,
+    shards: Sharded<QueryStateShard<D, Q, C::Key, C::Sharded>>,
     #[cfg(debug_assertions)]
     pub cache_hits: AtomicUsize,
 }
 
-impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
+impl<D, Q, C: QueryCache> QueryState<D, Q, C> {
     #[inline]
     pub(super) fn get_lookup<'tcx>(
         &'tcx self,
         key: &C::Key,
-    ) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> {
+    ) -> QueryLookup<'tcx, D, Q, C::Key, C::Sharded> {
         // We compute the key's hash once and then use it for both the
         // shard lookup and the hashmap lookup. This relies on the fact
         // that both of them use `FxHasher`.
@@ -70,16 +68,21 @@
 }
 
 /// Indicates the state of a query for a given key in a query map.
-enum QueryResult<CTX: QueryContext> {
+enum QueryResult<D, Q> {
     /// An already executing query. The query job can be used to await for its completion.
-    Started(QueryJob<CTX>),
+    Started(QueryJob<D, Q>),
 
     /// The query panicked. Queries trying to wait on this will raise a fatal error which will
     /// silently panic.
     Poisoned,
 }
 
-impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
+impl<D, Q, C> QueryState<D, Q, C>
+where
+    D: Copy + Clone + Eq + Hash,
+    Q: Clone,
+    C: QueryCache,
+{
     #[inline(always)]
     pub fn iter_results<R>(
         &self,
@@ -98,13 +101,10 @@
 
     pub fn try_collect_active_jobs(
         &self,
-        kind: CTX::DepKind,
-        make_query: fn(C::Key) -> CTX::Query,
-        jobs: &mut FxHashMap<QueryJobId<CTX::DepKind>, QueryJobInfo<CTX>>,
-    ) -> Option<()>
-    where
-        C::Key: Clone,
-    {
+        kind: D,
+        make_query: fn(C::Key) -> Q,
+        jobs: &mut QueryMap<D, Q>,
+    ) -> Option<()> {
         // We use try_lock_shards here since we are called from the
         // deadlock handler, and this shouldn't be locked.
         let shards = self.shards.try_lock_shards()?;
@@ -112,8 +112,7 @@
         jobs.extend(shards.flat_map(|(shard_id, shard)| {
             shard.active.iter().filter_map(move |(k, v)| {
                 if let QueryResult::Started(ref job) = *v {
-                    let id =
-                        QueryJobId { job: job.id, shard: u16::try_from(shard_id).unwrap(), kind };
+                    let id = QueryJobId::new(job.id, shard_id, kind);
                     let info = QueryInfo { span: job.span, query: make_query(k.clone()) };
                     Some((id, QueryJobInfo { info, job: job.clone() }))
                 } else {
@@ -126,8 +125,8 @@
     }
 }
 
-impl<CTX: QueryContext, C: QueryCache> Default for QueryState<CTX, C> {
-    fn default() -> QueryState<CTX, C> {
+impl<D, Q, C: QueryCache> Default for QueryState<D, Q, C> {
+    fn default() -> QueryState<D, Q, C> {
         QueryState {
             cache: C::default(),
             shards: Default::default(),
@@ -138,28 +137,30 @@
 }
 
 /// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
-pub struct QueryLookup<'tcx, CTX: QueryContext, K, C> {
+pub struct QueryLookup<'tcx, D, Q, K, C> {
     pub(super) key_hash: u64,
     shard: usize,
-    pub(super) lock: LockGuard<'tcx, QueryStateShard<CTX, K, C>>,
+    pub(super) lock: LockGuard<'tcx, QueryStateShard<D, Q, K, C>>,
 }
 
 /// A type representing the responsibility to execute the job in the `job` field.
 /// This will poison the relevant query if dropped.
-struct JobOwner<'tcx, CTX: QueryContext, C>
+struct JobOwner<'tcx, D, Q, C>
 where
+    D: Copy + Clone + Eq + Hash,
+    Q: Clone,
     C: QueryCache,
-    C::Key: Eq + Hash + Clone + Debug,
 {
-    state: &'tcx QueryState<CTX, C>,
+    state: &'tcx QueryState<D, Q, C>,
     key: C::Key,
-    id: QueryJobId<CTX::DepKind>,
+    id: QueryJobId<D>,
 }
 
-impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C>
+impl<'tcx, D, Q, C> JobOwner<'tcx, D, Q, C>
 where
+    D: Copy + Clone + Eq + Hash,
+    Q: Clone,
     C: QueryCache,
-    C::Key: Eq + Hash + Clone + Debug,
 {
     /// Either gets a `JobOwner` corresponding the query, allowing us to
     /// start executing the query, or returns with the result of the query.
@@ -170,14 +171,14 @@
     /// This function is inlined because that results in a noticeable speed-up
     /// for some compile-time benchmarks.
     #[inline(always)]
-    fn try_start<'a, 'b>(
+    fn try_start<'a, 'b, CTX>(
         tcx: CTX,
-        state: &'b QueryState<CTX, C>,
+        state: &'b QueryState<CTX::DepKind, CTX::Query, C>,
         span: Span,
         key: &C::Key,
-        mut lookup: QueryLookup<'a, CTX, C::Key, C::Sharded>,
+        mut lookup: QueryLookup<'a, CTX::DepKind, CTX::Query, C::Key, C::Sharded>,
         query: &QueryVtable<CTX, C::Key, C::Value>,
-    ) -> TryGetJob<'b, CTX, C>
+    ) -> TryGetJob<'b, CTX::DepKind, CTX::Query, C>
     where
         CTX: QueryContext,
     {
@@ -229,7 +230,12 @@
         // so we just return the error.
         #[cfg(not(parallel_compiler))]
         return TryGetJob::Cycle(cold_path(|| {
-            let value = query.handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span));
+            let error: CycleError<CTX::Query> = latch.find_cycle_in_stack(
+                tcx.try_collect_active_jobs().unwrap(),
+                &tcx.current_query_job(),
+                span,
+            );
+            let value = query.handle_cycle_error(tcx, error);
             state.cache.store_nocache(value)
         }));
 
@@ -237,7 +243,7 @@
         // thread.
         #[cfg(parallel_compiler)]
         {
-            let result = latch.wait_on(tcx, span);
+            let result = latch.wait_on(tcx.current_query_job(), span);
 
             if let Err(cycle) = result {
                 let value = query.handle_cycle_error(tcx, cycle);
@@ -297,9 +303,11 @@
     (result, diagnostics.into_inner())
 }
 
-impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C>
+impl<'tcx, D, Q, C> Drop for JobOwner<'tcx, D, Q, C>
 where
-    C::Key: Eq + Hash + Clone + Debug,
+    D: Copy + Clone + Eq + Hash,
+    Q: Clone,
+    C: QueryCache,
 {
     #[inline(never)]
     #[cold]
@@ -330,12 +338,14 @@
 }
 
 /// The result of `try_start`.
-enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache>
+enum TryGetJob<'tcx, D, Q, C>
 where
-    C::Key: Eq + Hash + Clone + Debug,
+    D: Copy + Clone + Eq + Hash,
+    Q: Clone,
+    C: QueryCache,
 {
     /// The query is not yet started. Contains a guard to the cache eventually used to start it.
-    NotYetStarted(JobOwner<'tcx, CTX, C>),
+    NotYetStarted(JobOwner<'tcx, D, Q, C>),
 
     /// The query was already completed.
     /// Returns the result of the query and its dep-node index
@@ -354,7 +364,7 @@
 #[inline(always)]
 fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
     tcx: CTX,
-    state: &QueryState<CTX, C>,
+    state: &QueryState<CTX::DepKind, CTX::Query, C>,
     key: C::Key,
     // `on_hit` can be called while holding a lock to the query cache
     on_hit: OnHit,
@@ -364,7 +374,7 @@
     C: QueryCache,
     CTX: QueryContext,
     OnHit: FnOnce(&C::Stored, DepNodeIndex) -> R,
-    OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R,
+    OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>) -> R,
 {
     state.cache.lookup(
         state,
@@ -386,19 +396,20 @@
 #[inline(always)]
 fn try_execute_query<CTX, C>(
     tcx: CTX,
-    state: &QueryState<CTX, C>,
+    state: &QueryState<CTX::DepKind, CTX::Query, C>,
     span: Span,
     key: C::Key,
-    lookup: QueryLookup<'_, CTX, C::Key, C::Sharded>,
+    lookup: QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>,
     query: &QueryVtable<CTX, C::Key, C::Value>,
 ) -> C::Stored
 where
     C: QueryCache,
-    C::Key: Eq + Clone + Debug + crate::dep_graph::DepNodeParams<CTX>,
-    C::Stored: Clone,
+    C::Key: crate::dep_graph::DepNodeParams<CTX>,
     CTX: QueryContext,
 {
-    let job = match JobOwner::try_start(tcx, state, span, &key, lookup, query) {
+    let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
+        tcx, state, span, &key, lookup, query,
+    ) {
         TryGetJob::NotYetStarted(job) => job,
         TryGetJob::Cycle(result) => return result,
         #[cfg(parallel_compiler)]
@@ -559,14 +570,12 @@
 fn force_query_with_job<C, CTX>(
     tcx: CTX,
     key: C::Key,
-    job: JobOwner<'_, CTX, C>,
+    job: JobOwner<'_, CTX::DepKind, CTX::Query, C>,
     dep_node: DepNode<CTX::DepKind>,
     query: &QueryVtable<CTX, C::Key, C::Value>,
 ) -> (C::Stored, DepNodeIndex)
 where
     C: QueryCache,
-    C::Key: Eq + Clone + Debug,
-    C::Stored: Clone,
     CTX: QueryContext,
 {
     // If the following assertion triggers, it can have two reasons:
@@ -603,10 +612,8 @@
 
     prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
-    if unlikely!(!diagnostics.is_empty()) {
-        if dep_node.kind != DepKind::NULL {
-            tcx.store_diagnostics(dep_node_index, diagnostics);
-        }
+    if unlikely!(!diagnostics.is_empty()) && dep_node.kind != DepKind::NULL {
+        tcx.store_diagnostics(dep_node_index, diagnostics);
     }
 
     let result = job.complete(result, dep_node_index);
@@ -617,7 +624,7 @@
 #[inline(never)]
 fn get_query_impl<CTX, C>(
     tcx: CTX,
-    state: &QueryState<CTX, C>,
+    state: &QueryState<CTX::DepKind, CTX::Query, C>,
     span: Span,
     key: C::Key,
     query: &QueryVtable<CTX, C::Key, C::Value>,
@@ -625,8 +632,7 @@
 where
     CTX: QueryContext,
     C: QueryCache,
-    C::Key: Eq + Clone + crate::dep_graph::DepNodeParams<CTX>,
-    C::Stored: Clone,
+    C::Key: crate::dep_graph::DepNodeParams<CTX>,
 {
     try_get_cached(
         tcx,
@@ -650,12 +656,12 @@
 #[inline(never)]
 fn ensure_query_impl<CTX, C>(
     tcx: CTX,
-    state: &QueryState<CTX, C>,
+    state: &QueryState<CTX::DepKind, CTX::Query, C>,
     key: C::Key,
     query: &QueryVtable<CTX, C::Key, C::Value>,
 ) where
     C: QueryCache,
-    C::Key: Eq + Clone + crate::dep_graph::DepNodeParams<CTX>,
+    C::Key: crate::dep_graph::DepNodeParams<CTX>,
     CTX: QueryContext,
 {
     if query.eval_always {
@@ -687,14 +693,14 @@
 #[inline(never)]
 fn force_query_impl<CTX, C>(
     tcx: CTX,
-    state: &QueryState<CTX, C>,
+    state: &QueryState<CTX::DepKind, CTX::Query, C>,
     key: C::Key,
     span: Span,
     dep_node: DepNode<CTX::DepKind>,
     query: &QueryVtable<CTX, C::Key, C::Value>,
 ) where
     C: QueryCache,
-    C::Key: Eq + Clone + crate::dep_graph::DepNodeParams<CTX>,
+    C::Key: crate::dep_graph::DepNodeParams<CTX>,
     CTX: QueryContext,
 {
     // We may be concurrently trying both execute and force a query.
@@ -708,7 +714,9 @@
             // Cache hit, do nothing
         },
         |key, lookup| {
-            let job = match JobOwner::try_start(tcx, state, span, &key, lookup, query) {
+            let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
+                tcx, state, span, &key, lookup, query,
+            ) {
                 TryGetJob::NotYetStarted(job) => job,
                 TryGetJob::Cycle(_) => return,
                 #[cfg(parallel_compiler)]
diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs
index a48d002..34145c3 100644
--- a/compiler/rustc_resolve/src/build_reduced_graph.rs
+++ b/compiler/rustc_resolve/src/build_reduced_graph.rs
@@ -7,7 +7,7 @@
 
 use crate::def_collector::collect_definitions;
 use crate::imports::{Import, ImportKind};
-use crate::macros::{MacroRulesBinding, MacroRulesScope};
+use crate::macros::{MacroRulesBinding, MacroRulesScope, MacroRulesScopeRef};
 use crate::Namespace::{self, MacroNS, TypeNS, ValueNS};
 use crate::{CrateLint, Determinacy, PathResult, ResolutionError, VisResolutionError};
 use crate::{
@@ -15,7 +15,6 @@
 };
 use crate::{Module, ModuleData, ModuleKind, NameBinding, NameBindingKind, Segment, ToNameBinding};
 
-use rustc_ast::token::{self, Token};
 use rustc_ast::visit::{self, AssocCtxt, Visitor};
 use rustc_ast::{self as ast, Block, ForeignItem, ForeignItemKind, Item, ItemKind, NodeId};
 use rustc_ast::{AssocItem, AssocItemKind, MetaItemKind, StmtKind};
@@ -95,6 +94,27 @@
         }
     }
 
+    /// Walks up the tree of definitions starting at `def_id`,
+    /// stopping at the first `DefKind::Mod` encountered
+    fn nearest_mod_parent(&mut self, def_id: DefId) -> Module<'a> {
+        let def_key = self.cstore().def_key(def_id);
+
+        let mut parent_id = DefId {
+            krate: def_id.krate,
+            index: def_key.parent.expect("failed to get parent for module"),
+        };
+        // The immediate parent may not be a module
+        // (e.g. `const _: () =  { #[path = "foo.rs"] mod foo; };`)
+        // Walk up the tree until we hit a module or the crate root.
+        while parent_id.index != CRATE_DEF_INDEX
+            && self.cstore().def_kind(parent_id) != DefKind::Mod
+        {
+            let parent_def_key = self.cstore().def_key(parent_id);
+            parent_id.index = parent_def_key.parent.expect("failed to get parent for module");
+        }
+        self.get_module(parent_id)
+    }
+
     crate fn get_module(&mut self, def_id: DefId) -> Module<'a> {
         // If this is a local module, it will be in `module_map`, no need to recalculate it.
         if let Some(def_id) = def_id.as_local() {
@@ -116,11 +136,8 @@
                 .data
                 .get_opt_name()
                 .expect("given a DefId that wasn't a module");
-            // This unwrap is safe since we know this isn't the root
-            let parent = Some(self.get_module(DefId {
-                index: def_key.parent.expect("failed to get parent for module"),
-                ..def_id
-            }));
+
+            let parent = Some(self.nearest_mod_parent(def_id));
             (name, parent)
         };
 
@@ -145,8 +162,24 @@
         if let Some(id) = def_id.as_local() {
             self.local_macro_def_scopes[&id]
         } else {
-            let module_def_id = ty::DefIdTree::parent(&*self, def_id).unwrap();
-            self.get_module(module_def_id)
+            // This is not entirely correct - a `macro_rules!` macro may occur
+            // inside a 'block' module:
+            //
+            // ```rust
+            // const _: () = {
+            // #[macro_export]
+            // macro_rules! my_macro {
+            //     () => {};
+            // }
+            // `
+            // We don't record this information for external crates, so
+            // the module we compute here will be the closest 'mod' item
+            // (not necesssarily the actual parent of the `macro_rules!`
+            // macro). `macro_rules!` macros can't use def-site hygiene,
+            // so this hopefully won't be a problem.
+            //
+            // See https://github.com/rust-lang/rust/pull/77984#issuecomment-712445508
+            self.nearest_mod_parent(def_id)
         }
     }
 
@@ -176,7 +209,7 @@
         &mut self,
         fragment: &AstFragment,
         parent_scope: ParentScope<'a>,
-    ) -> MacroRulesScope<'a> {
+    ) -> MacroRulesScopeRef<'a> {
         collect_definitions(self, fragment, parent_scope.expansion);
         let mut visitor = BuildReducedGraphVisitor { r: self, parent_scope };
         fragment.visit_with(&mut visitor);
@@ -187,7 +220,8 @@
         let def_id = module.def_id().expect("unpopulated module without a def-id");
         for child in self.cstore().item_children_untracked(def_id, self.session) {
             let child = child.map_id(|_| panic!("unexpected id"));
-            BuildReducedGraphVisitor { r: self, parent_scope: ParentScope::module(module) }
+            let parent_scope = ParentScope::module(module, self);
+            BuildReducedGraphVisitor { r: self, parent_scope }
                 .build_reduced_graph_for_external_crate_res(child);
         }
     }
@@ -310,10 +344,10 @@
 
     fn block_needs_anonymous_module(&mut self, block: &Block) -> bool {
         // If any statements are items, we need to create an anonymous module
-        block.stmts.iter().any(|statement| match statement.kind {
-            StmtKind::Item(_) | StmtKind::MacCall(_) => true,
-            _ => false,
-        })
+        block
+            .stmts
+            .iter()
+            .any(|statement| matches!(statement.kind, StmtKind::Item(_) | StmtKind::MacCall(_)))
     }
 
     // Add an import to the current module.
@@ -613,12 +647,21 @@
 
     /// Constructs the reduced graph for one item.
     fn build_reduced_graph_for_item(&mut self, item: &'b Item) {
+        if matches!(item.kind, ItemKind::Mod(..)) && item.ident.name == kw::Invalid {
+            // Fake crate root item from expand.
+            return;
+        }
+
         let parent_scope = &self.parent_scope;
         let parent = parent_scope.module;
         let expansion = parent_scope.expansion;
         let ident = item.ident;
         let sp = item.span;
         let vis = self.resolve_visibility(&item.vis);
+        let local_def_id = self.r.local_def_id(item.id);
+        let def_id = local_def_id.to_def_id();
+
+        self.r.visibilities.insert(local_def_id, vis);
 
         match item.kind {
             ItemKind::Use(ref use_tree) => {
@@ -651,10 +694,12 @@
                 } else if orig_name == Some(kw::SelfLower) {
                     self.r.graph_root
                 } else {
-                    let def_id = self.r.local_def_id(item.id);
-                    let crate_id =
-                        self.r.crate_loader.process_extern_crate(item, &self.r.definitions, def_id);
-                    self.r.extern_crate_map.insert(def_id, crate_id);
+                    let crate_id = self.r.crate_loader.process_extern_crate(
+                        item,
+                        &self.r.definitions,
+                        local_def_id,
+                    );
+                    self.r.extern_crate_map.insert(local_def_id, crate_id);
                     self.r.get_module(DefId { krate: crate_id, index: CRATE_DEF_INDEX })
                 };
 
@@ -705,25 +750,16 @@
                 self.r.define(parent, ident, TypeNS, imported_binding);
             }
 
-            ItemKind::Mod(..) if ident.name == kw::Invalid => {} // Crate root
-
             ItemKind::Mod(..) => {
-                let def_id = self.r.local_def_id(item.id);
-                let module_kind = ModuleKind::Def(DefKind::Mod, def_id.to_def_id(), ident.name);
+                let module_kind = ModuleKind::Def(DefKind::Mod, def_id, ident.name);
                 let module = self.r.arenas.alloc_module(ModuleData {
                     no_implicit_prelude: parent.no_implicit_prelude || {
                         self.r.session.contains_name(&item.attrs, sym::no_implicit_prelude)
                     },
-                    ..ModuleData::new(
-                        Some(parent),
-                        module_kind,
-                        def_id.to_def_id(),
-                        expansion,
-                        item.span,
-                    )
+                    ..ModuleData::new(Some(parent), module_kind, def_id, expansion, item.span)
                 });
                 self.r.define(parent, ident, TypeNS, (module, vis, sp, expansion));
-                self.r.module_map.insert(def_id, module);
+                self.r.module_map.insert(local_def_id, module);
 
                 // Descend into the module.
                 self.parent_scope.module = module;
@@ -731,15 +767,15 @@
 
             // These items live in the value namespace.
             ItemKind::Static(..) => {
-                let res = Res::Def(DefKind::Static, self.r.local_def_id(item.id).to_def_id());
+                let res = Res::Def(DefKind::Static, def_id);
                 self.r.define(parent, ident, ValueNS, (res, vis, sp, expansion));
             }
             ItemKind::Const(..) => {
-                let res = Res::Def(DefKind::Const, self.r.local_def_id(item.id).to_def_id());
+                let res = Res::Def(DefKind::Const, def_id);
                 self.r.define(parent, ident, ValueNS, (res, vis, sp, expansion));
             }
             ItemKind::Fn(..) => {
-                let res = Res::Def(DefKind::Fn, self.r.local_def_id(item.id).to_def_id());
+                let res = Res::Def(DefKind::Fn, def_id);
                 self.r.define(parent, ident, ValueNS, (res, vis, sp, expansion));
 
                 // Functions introducing procedural macros reserve a slot
@@ -749,13 +785,11 @@
 
             // These items live in the type namespace.
             ItemKind::TyAlias(..) => {
-                let res = Res::Def(DefKind::TyAlias, self.r.local_def_id(item.id).to_def_id());
+                let res = Res::Def(DefKind::TyAlias, def_id);
                 self.r.define(parent, ident, TypeNS, (res, vis, sp, expansion));
             }
 
             ItemKind::Enum(_, _) => {
-                let def_id = self.r.local_def_id(item.id).to_def_id();
-                self.r.variant_vis.insert(def_id, vis);
                 let module_kind = ModuleKind::Def(DefKind::Enum, def_id, ident.name);
                 let module = self.r.new_module(
                     parent,
@@ -769,14 +803,13 @@
             }
 
             ItemKind::TraitAlias(..) => {
-                let res = Res::Def(DefKind::TraitAlias, self.r.local_def_id(item.id).to_def_id());
+                let res = Res::Def(DefKind::TraitAlias, def_id);
                 self.r.define(parent, ident, TypeNS, (res, vis, sp, expansion));
             }
 
             // These items live in both the type and value namespaces.
             ItemKind::Struct(ref vdata, _) => {
                 // Define a name in the type namespace.
-                let def_id = self.r.local_def_id(item.id).to_def_id();
                 let res = Res::Def(DefKind::Struct, def_id);
                 self.r.define(parent, ident, TypeNS, (res, vis, sp, expansion));
 
@@ -810,17 +843,19 @@
                         }
                         ret_fields.push(field_vis);
                     }
+                    let ctor_def_id = self.r.local_def_id(ctor_node_id);
                     let ctor_res = Res::Def(
                         DefKind::Ctor(CtorOf::Struct, CtorKind::from_ast(vdata)),
-                        self.r.local_def_id(ctor_node_id).to_def_id(),
+                        ctor_def_id.to_def_id(),
                     );
                     self.r.define(parent, ident, ValueNS, (ctor_res, ctor_vis, sp, expansion));
+                    self.r.visibilities.insert(ctor_def_id, ctor_vis);
+
                     self.r.struct_constructors.insert(def_id, (ctor_res, ctor_vis, ret_fields));
                 }
             }
 
             ItemKind::Union(ref vdata, _) => {
-                let def_id = self.r.local_def_id(item.id).to_def_id();
                 let res = Res::Def(DefKind::Union, def_id);
                 self.r.define(parent, ident, TypeNS, (res, vis, sp, expansion));
 
@@ -829,8 +864,6 @@
             }
 
             ItemKind::Trait(..) => {
-                let def_id = self.r.local_def_id(item.id).to_def_id();
-
                 // Add all the items within to a new module.
                 let module_kind = ModuleKind::Def(DefKind::Trait, def_id, ident.name);
                 let module = self.r.new_module(
@@ -845,6 +878,9 @@
             }
 
             // These items do not add names to modules.
+            ItemKind::Impl { of_trait: Some(..), .. } => {
+                self.r.trait_impl_items.insert(local_def_id);
+            }
             ItemKind::Impl { .. } | ItemKind::ForeignMod(..) | ItemKind::GlobalAsm(..) => {}
 
             ItemKind::MacroDef(..) | ItemKind::MacCall(_) => unreachable!(),
@@ -853,22 +889,20 @@
 
     /// Constructs the reduced graph for one foreign item.
     fn build_reduced_graph_for_foreign_item(&mut self, item: &ForeignItem) {
-        let (res, ns) = match item.kind {
-            ForeignItemKind::Fn(..) => {
-                (Res::Def(DefKind::Fn, self.r.local_def_id(item.id).to_def_id()), ValueNS)
-            }
-            ForeignItemKind::Static(..) => {
-                (Res::Def(DefKind::Static, self.r.local_def_id(item.id).to_def_id()), ValueNS)
-            }
-            ForeignItemKind::TyAlias(..) => {
-                (Res::Def(DefKind::ForeignTy, self.r.local_def_id(item.id).to_def_id()), TypeNS)
-            }
+        let local_def_id = self.r.local_def_id(item.id);
+        let def_id = local_def_id.to_def_id();
+        let (def_kind, ns) = match item.kind {
+            ForeignItemKind::Fn(..) => (DefKind::Fn, ValueNS),
+            ForeignItemKind::Static(..) => (DefKind::Static, ValueNS),
+            ForeignItemKind::TyAlias(..) => (DefKind::ForeignTy, TypeNS),
             ForeignItemKind::MacCall(_) => unreachable!(),
         };
         let parent = self.parent_scope.module;
         let expansion = self.parent_scope.expansion;
         let vis = self.resolve_visibility(&item.vis);
+        let res = Res::Def(def_kind, def_id);
         self.r.define(parent, item.ident, ns, (res, vis, item.span, expansion));
+        self.r.visibilities.insert(local_def_id, vis);
     }
 
     fn build_reduced_graph_for_block(&mut self, block: &Block) {
@@ -1121,7 +1155,7 @@
         false
     }
 
-    fn visit_invoc(&mut self, id: NodeId) -> MacroRulesScope<'a> {
+    fn visit_invoc(&mut self, id: NodeId) -> MacroRulesScopeRef<'a> {
         let invoc_id = id.placeholder_to_expn_id();
 
         self.parent_scope.module.unexpanded_invocations.borrow_mut().insert(invoc_id);
@@ -1129,7 +1163,9 @@
         let old_parent_scope = self.r.invocation_parent_scopes.insert(invoc_id, self.parent_scope);
         assert!(old_parent_scope.is_none(), "invocation data is reset for an invocation");
 
-        MacroRulesScope::Invocation(invoc_id)
+        let scope = self.r.arenas.alloc_macro_rules_scope(MacroRulesScope::Invocation(invoc_id));
+        self.r.invocation_macro_rules_scopes.entry(invoc_id).or_default().insert(scope);
+        scope
     }
 
     fn proc_macro_stub(&self, item: &ast::Item) -> Option<(MacroKind, Ident, Span)> {
@@ -1163,7 +1199,7 @@
         }
     }
 
-    fn define_macro(&mut self, item: &ast::Item) -> MacroRulesScope<'a> {
+    fn define_macro(&mut self, item: &ast::Item) -> MacroRulesScopeRef<'a> {
         let parent_scope = self.parent_scope;
         let expansion = parent_scope.expansion;
         let def_id = self.r.local_def_id(item.id);
@@ -1205,11 +1241,14 @@
                 self.r.check_reserved_macro_name(ident, res);
                 self.insert_unused_macro(ident, def_id, item.id, span);
             }
-            MacroRulesScope::Binding(self.r.arenas.alloc_macro_rules_binding(MacroRulesBinding {
-                parent_macro_rules_scope: parent_scope.macro_rules,
-                binding,
-                ident,
-            }))
+            self.r.visibilities.insert(def_id, vis);
+            self.r.arenas.alloc_macro_rules_scope(MacroRulesScope::Binding(
+                self.r.arenas.alloc_macro_rules_binding(MacroRulesBinding {
+                    parent_macro_rules_scope: parent_scope.macro_rules,
+                    binding,
+                    ident,
+                }),
+            ))
         } else {
             let module = parent_scope.module;
             let vis = match item.kind {
@@ -1224,6 +1263,7 @@
                 self.insert_unused_macro(ident, def_id, item.id, span);
             }
             self.r.define(module, ident, MacroNS, (res, vis, span, expansion));
+            self.r.visibilities.insert(def_id, vis);
             self.parent_scope.macro_rules
         }
     }
@@ -1297,50 +1337,68 @@
     }
 
     fn visit_assoc_item(&mut self, item: &'b AssocItem, ctxt: AssocCtxt) {
-        let parent = self.parent_scope.module;
-
         if let AssocItemKind::MacCall(_) = item.kind {
             self.visit_invoc(item.id);
             return;
         }
 
-        if let AssocCtxt::Impl = ctxt {
-            self.resolve_visibility(&item.vis);
-            visit::walk_assoc_item(self, item, ctxt);
-            return;
-        }
+        let local_def_id = self.r.local_def_id(item.id);
+        let def_id = local_def_id.to_def_id();
+        let vis = match ctxt {
+            AssocCtxt::Trait => {
+                let (def_kind, ns) = match item.kind {
+                    AssocItemKind::Const(..) => (DefKind::AssocConst, ValueNS),
+                    AssocItemKind::Fn(_, ref sig, _, _) => {
+                        if sig.decl.has_self() {
+                            self.r.has_self.insert(def_id);
+                        }
+                        (DefKind::AssocFn, ValueNS)
+                    }
+                    AssocItemKind::TyAlias(..) => (DefKind::AssocTy, TypeNS),
+                    AssocItemKind::MacCall(_) => bug!(), // handled above
+                };
 
-        // Add the item to the trait info.
-        let item_def_id = self.r.local_def_id(item.id).to_def_id();
-        let (res, ns) = match item.kind {
-            AssocItemKind::Const(..) => (Res::Def(DefKind::AssocConst, item_def_id), ValueNS),
-            AssocItemKind::Fn(_, ref sig, _, _) => {
-                if sig.decl.has_self() {
-                    self.r.has_self.insert(item_def_id);
-                }
-                (Res::Def(DefKind::AssocFn, item_def_id), ValueNS)
+                let parent = self.parent_scope.module;
+                let expansion = self.parent_scope.expansion;
+                let res = Res::Def(def_kind, def_id);
+                // Trait item visibility is inherited from its trait when not specified explicitly.
+                let vis = match &item.vis.kind {
+                    ast::VisibilityKind::Inherited => {
+                        self.r.visibilities[&parent.def_id().unwrap().expect_local()]
+                    }
+                    _ => self.resolve_visibility(&item.vis),
+                };
+                // FIXME: For historical reasons the binding visibility is set to public,
+                // use actual visibility here instead, using enum variants as an example.
+                let vis_hack = ty::Visibility::Public;
+                self.r.define(parent, item.ident, ns, (res, vis_hack, item.span, expansion));
+                Some(vis)
             }
-            AssocItemKind::TyAlias(..) => (Res::Def(DefKind::AssocTy, item_def_id), TypeNS),
-            AssocItemKind::MacCall(_) => bug!(), // handled above
+            AssocCtxt::Impl => {
+                // Trait impl item visibility is inherited from its trait when not specified
+                // explicitly. In that case we cannot determine it here in early resolve,
+                // so we leave a hole in the visibility table to be filled later.
+                // Inherent impl item visibility is never inherited from other items.
+                if matches!(item.vis.kind, ast::VisibilityKind::Inherited)
+                    && self
+                        .r
+                        .trait_impl_items
+                        .contains(&ty::DefIdTree::parent(&*self.r, def_id).unwrap().expect_local())
+                {
+                    None
+                } else {
+                    Some(self.resolve_visibility(&item.vis))
+                }
+            }
         };
 
-        let vis = ty::Visibility::Public;
-        let expansion = self.parent_scope.expansion;
-        self.r.define(parent, item.ident, ns, (res, vis, item.span, expansion));
+        if let Some(vis) = vis {
+            self.r.visibilities.insert(local_def_id, vis);
+        }
 
         visit::walk_assoc_item(self, item, ctxt);
     }
 
-    fn visit_token(&mut self, t: Token) {
-        if let token::Interpolated(nt) = t.kind {
-            if let token::NtExpr(ref expr) = *nt {
-                if let ast::ExprKind::MacCall(..) = expr.kind {
-                    self.visit_invoc(expr.id);
-                }
-            }
-        }
-    }
-
     fn visit_attribute(&mut self, attr: &'b ast::Attribute) {
         if !attr.is_doc_comment() && attr::is_builtin_attr(attr) {
             self.r
@@ -1394,7 +1452,8 @@
         if sf.is_placeholder {
             self.visit_invoc(sf.id);
         } else {
-            self.resolve_visibility(&sf.vis);
+            let vis = self.resolve_visibility(&sf.vis);
+            self.r.visibilities.insert(self.r.local_def_id(sf.id), vis);
             visit::walk_struct_field(self, sf);
         }
     }
@@ -1408,22 +1467,30 @@
         }
 
         let parent = self.parent_scope.module;
-        let vis = self.r.variant_vis[&parent.def_id().expect("enum without def-id")];
+        let vis = match variant.vis.kind {
+            // Variant visibility is inherited from its enum when not specified explicitly.
+            ast::VisibilityKind::Inherited => {
+                self.r.visibilities[&parent.def_id().unwrap().expect_local()]
+            }
+            _ => self.resolve_visibility(&variant.vis),
+        };
         let expn_id = self.parent_scope.expansion;
         let ident = variant.ident;
 
         // Define a name in the type namespace.
-        let def_id = self.r.local_def_id(variant.id).to_def_id();
-        let res = Res::Def(DefKind::Variant, def_id);
+        let def_id = self.r.local_def_id(variant.id);
+        let res = Res::Def(DefKind::Variant, def_id.to_def_id());
         self.r.define(parent, ident, TypeNS, (res, vis, variant.span, expn_id));
+        self.r.visibilities.insert(def_id, vis);
 
-        // If the variant is marked as non_exhaustive then lower the visibility to within the
-        // crate.
-        let mut ctor_vis = vis;
-        let has_non_exhaustive = self.r.session.contains_name(&variant.attrs, sym::non_exhaustive);
-        if has_non_exhaustive && vis == ty::Visibility::Public {
-            ctor_vis = ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX));
-        }
+        // If the variant is marked as non_exhaustive then lower the visibility to within the crate.
+        let ctor_vis = if vis == ty::Visibility::Public
+            && self.r.session.contains_name(&variant.attrs, sym::non_exhaustive)
+        {
+            ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX))
+        } else {
+            vis
+        };
 
         // Define a constructor name in the value namespace.
         // Braced variants, unlike structs, generate unusable names in
@@ -1431,12 +1498,15 @@
         // It's ok to use the variant's id as a ctor id since an
         // error will be reported on any use of such resolution anyway.
         let ctor_node_id = variant.data.ctor_id().unwrap_or(variant.id);
-        let ctor_def_id = self.r.local_def_id(ctor_node_id).to_def_id();
+        let ctor_def_id = self.r.local_def_id(ctor_node_id);
         let ctor_kind = CtorKind::from_ast(&variant.data);
-        let ctor_res = Res::Def(DefKind::Ctor(CtorOf::Variant, ctor_kind), ctor_def_id);
+        let ctor_res = Res::Def(DefKind::Ctor(CtorOf::Variant, ctor_kind), ctor_def_id.to_def_id());
         self.r.define(parent, ident, ValueNS, (ctor_res, ctor_vis, variant.span, expn_id));
+        if ctor_def_id != def_id {
+            self.r.visibilities.insert(ctor_def_id, ctor_vis);
+        }
         // Record field names for error reporting.
-        self.insert_field_names_local(ctor_def_id, &variant.data);
+        self.insert_field_names_local(ctor_def_id.to_def_id(), &variant.data);
 
         visit::walk_variant(self, variant);
     }
diff --git a/compiler/rustc_resolve/src/def_collector.rs b/compiler/rustc_resolve/src/def_collector.rs
index 5d5088d..69773ba 100644
--- a/compiler/rustc_resolve/src/def_collector.rs
+++ b/compiler/rustc_resolve/src/def_collector.rs
@@ -1,5 +1,4 @@
 use crate::Resolver;
-use rustc_ast::token::{self, Token};
 use rustc_ast::visit::{self, FnKind};
 use rustc_ast::walk_list;
 use rustc_ast::*;
@@ -76,6 +75,7 @@
         let def_data = match &i.kind {
             ItemKind::Impl { .. } => DefPathData::Impl,
             ItemKind::Mod(..) if i.ident.name == kw::Invalid => {
+                // Fake crate root item from expand.
                 return visit::walk_item(self, i);
             }
             ItemKind::Mod(..)
@@ -239,13 +239,13 @@
 
     fn visit_ty(&mut self, ty: &'a Ty) {
         match ty.kind {
-            TyKind::MacCall(..) => return self.visit_macro_invoc(ty.id),
+            TyKind::MacCall(..) => self.visit_macro_invoc(ty.id),
             TyKind::ImplTrait(node_id, _) => {
-                self.create_def(node_id, DefPathData::ImplTrait, ty.span);
+                let parent_def = self.create_def(node_id, DefPathData::ImplTrait, ty.span);
+                self.with_parent(parent_def, |this| visit::walk_ty(this, ty));
             }
-            _ => {}
+            _ => visit::walk_ty(self, ty),
         }
-        visit::walk_ty(self, ty);
     }
 
     fn visit_stmt(&mut self, stmt: &'a Stmt) {
@@ -255,16 +255,6 @@
         }
     }
 
-    fn visit_token(&mut self, t: Token) {
-        if let token::Interpolated(nt) = t.kind {
-            if let token::NtExpr(ref expr) = *nt {
-                if let ExprKind::MacCall(..) = expr.kind {
-                    self.visit_macro_invoc(expr.id);
-                }
-            }
-        }
-    }
-
     fn visit_arm(&mut self, arm: &'a Arm) {
         if arm.is_placeholder { self.visit_macro_invoc(arm.id) } else { visit::walk_arm(self, arm) }
     }
diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs
index 612bc3e..acd88af 100644
--- a/compiler/rustc_resolve/src/diagnostics.rs
+++ b/compiler/rustc_resolve/src/diagnostics.rs
@@ -469,24 +469,17 @@
             ResolutionError::ParamInNonTrivialAnonConst { name, is_type } => {
                 let mut err = self.session.struct_span_err(
                     span,
-                    "generic parameters must not be used inside of non trivial constant values",
+                    "generic parameters may not be used in const operations",
                 );
-                err.span_label(
-                    span,
-                    &format!(
-                        "non-trivial anonymous constants must not depend on the parameter `{}`",
-                        name
-                    ),
-                );
+                err.span_label(span, &format!("cannot perform const operation using `{}`", name));
 
                 if is_type {
-                    err.note("type parameters are currently not permitted in anonymous constants");
+                    err.note("type parameters may not be used in const expressions");
                 } else {
-                    err.help(
-                        &format!("it is currently only allowed to use either `{0}` or `{{ {0} }}` as generic constants",
-                                 name
-                        )
-                    );
+                    err.help(&format!(
+                        "const parameters may only be used as standalone arguments, i.e. `{}`",
+                        name
+                    ));
                 }
 
                 err
@@ -637,7 +630,7 @@
                     }
                 }
                 Scope::MacroRules(macro_rules_scope) => {
-                    if let MacroRulesScope::Binding(macro_rules_binding) = macro_rules_scope {
+                    if let MacroRulesScope::Binding(macro_rules_binding) = macro_rules_scope.get() {
                         let res = macro_rules_binding.binding.res();
                         if filter_fn(res) {
                             suggestions
@@ -929,6 +922,12 @@
         );
         self.add_typo_suggestion(err, suggestion, ident.span);
 
+        let import_suggestions =
+            self.lookup_import_candidates(ident, Namespace::MacroNS, parent_scope, |res| {
+                matches!(res, Res::Def(DefKind::Macro(MacroKind::Bang), _))
+            });
+        show_candidates(err, None, &import_suggestions, false, true);
+
         if macro_kind == MacroKind::Derive && (ident.name == sym::Send || ident.name == sym::Sync) {
             let msg = format!("unsafe traits like `{}` should be implemented explicitly", ident);
             err.span_note(ident.span, &msg);
@@ -1006,11 +1005,9 @@
     fn binding_description(&self, b: &NameBinding<'_>, ident: Ident, from_prelude: bool) -> String {
         let res = b.res();
         if b.span.is_dummy() {
-            let add_built_in = match b.res() {
-                // These already contain the "built-in" prefix or look bad with it.
-                Res::NonMacroAttr(..) | Res::PrimTy(..) | Res::ToolMod => false,
-                _ => true,
-            };
+            // These already contain the "built-in" prefix or look bad with it.
+            let add_built_in =
+                !matches!(b.res(), Res::NonMacroAttr(..) | Res::PrimTy(..) | Res::ToolMod);
             let (built_in, from) = if from_prelude {
                 ("", " from prelude")
             } else if b.is_extern_crate()
@@ -1024,17 +1021,11 @@
                 ("", "")
             };
 
-            let article = if built_in.is_empty() { res.article() } else { "a" };
-            format!(
-                "{a}{built_in} {thing}{from}",
-                a = article,
-                thing = res.descr(),
-                built_in = built_in,
-                from = from
-            )
+            let a = if built_in.is_empty() { res.article() } else { "a" };
+            format!("{a}{built_in} {thing}{from}", thing = res.descr())
         } else {
             let introduced = if b.is_import() { "imported" } else { "defined" };
-            format!("the {thing} {introduced} here", thing = res.descr(), introduced = introduced)
+            format!("the {thing} {introduced} here", thing = res.descr())
         }
     }
 
@@ -1052,19 +1043,13 @@
             ident.span,
             E0659,
             "`{ident}` is ambiguous ({why})",
-            ident = ident,
             why = kind.descr()
         );
         err.span_label(ident.span, "ambiguous name");
 
         let mut could_refer_to = |b: &NameBinding<'_>, misc: AmbiguityErrorMisc, also: &str| {
             let what = self.binding_description(b, ident, misc == AmbiguityErrorMisc::FromPrelude);
-            let note_msg = format!(
-                "`{ident}` could{also} refer to {what}",
-                ident = ident,
-                also = also,
-                what = what
-            );
+            let note_msg = format!("`{ident}` could{also} refer to {what}");
 
             let thing = b.res().descr();
             let mut help_msgs = Vec::new();
@@ -1074,30 +1059,18 @@
                     || kind == AmbiguityKind::GlobVsOuter && swapped != also.is_empty())
             {
                 help_msgs.push(format!(
-                    "consider adding an explicit import of \
-                     `{ident}` to disambiguate",
-                    ident = ident
+                    "consider adding an explicit import of `{ident}` to disambiguate"
                 ))
             }
             if b.is_extern_crate() && ident.span.rust_2018() {
-                help_msgs.push(format!(
-                    "use `::{ident}` to refer to this {thing} unambiguously",
-                    ident = ident,
-                    thing = thing,
-                ))
+                help_msgs.push(format!("use `::{ident}` to refer to this {thing} unambiguously"))
             }
             if misc == AmbiguityErrorMisc::SuggestCrate {
-                help_msgs.push(format!(
-                    "use `crate::{ident}` to refer to this {thing} unambiguously",
-                    ident = ident,
-                    thing = thing,
-                ))
+                help_msgs
+                    .push(format!("use `crate::{ident}` to refer to this {thing} unambiguously"))
             } else if misc == AmbiguityErrorMisc::SuggestSelf {
-                help_msgs.push(format!(
-                    "use `self::{ident}` to refer to this {thing} unambiguously",
-                    ident = ident,
-                    thing = thing,
-                ))
+                help_msgs
+                    .push(format!("use `self::{ident}` to refer to this {thing} unambiguously"))
             }
 
             err.span_note(b.span, &note_msg);
@@ -1170,12 +1143,10 @@
             };
 
             let first = ptr::eq(binding, first_binding);
-            let descr = get_descr(binding);
             let msg = format!(
                 "{and_refers_to}the {item} `{name}`{which} is defined here{dots}",
                 and_refers_to = if first { "" } else { "...and refers to " },
-                item = descr,
-                name = name,
+                item = get_descr(binding),
                 which = if first { "" } else { " which" },
                 dots = if next_binding.is_some() { "..." } else { "" },
             );
@@ -1606,10 +1577,7 @@
         if *c == ':' {
             num_colons += 1;
         }
-        match c {
-            ':' if num_colons == 2 => false,
-            _ => true,
-        }
+        !matches!(c, ':' if num_colons == 2)
     });
     // Find everything after the second colon.. `foo::{baz, makro};`
     let from_second_colon = use_span.with_lo(until_second_colon.hi() + BytePos(1));
diff --git a/compiler/rustc_resolve/src/imports.rs b/compiler/rustc_resolve/src/imports.rs
index bf8a2f2..026cf8b 100644
--- a/compiler/rustc_resolve/src/imports.rs
+++ b/compiler/rustc_resolve/src/imports.rs
@@ -114,10 +114,7 @@
 
 impl<'a> Import<'a> {
     pub fn is_glob(&self) -> bool {
-        match self.kind {
-            ImportKind::Glob { .. } => true,
-            _ => false,
-        }
+        matches!(self.kind, ImportKind::Glob { .. })
     }
 
     pub fn is_nested(&self) -> bool {
@@ -906,12 +903,10 @@
                     if !ModuleOrUniformRoot::same_def(module, initial_module) && no_ambiguity {
                         span_bug!(import.span, "inconsistent resolution for an import");
                     }
-                } else {
-                    if self.r.privacy_errors.is_empty() {
-                        let msg = "cannot determine resolution for the import";
-                        let msg_note = "import resolution is stuck, try simplifying other imports";
-                        self.r.session.struct_span_err(import.span, msg).note(msg_note).emit();
-                    }
+                } else if self.r.privacy_errors.is_empty() {
+                    let msg = "cannot determine resolution for the import";
+                    let msg_note = "import resolution is stuck, try simplifying other imports";
+                    self.r.session.struct_span_err(import.span, msg).note(msg_note).emit();
                 }
 
                 module
@@ -1053,19 +1048,14 @@
                             if res != initial_res && this.ambiguity_errors.is_empty() {
                                 span_bug!(import.span, "inconsistent resolution for an import");
                             }
-                        } else {
-                            if res != Res::Err
-                                && this.ambiguity_errors.is_empty()
-                                && this.privacy_errors.is_empty()
-                            {
-                                let msg = "cannot determine resolution for the import";
-                                let msg_note =
-                                    "import resolution is stuck, try simplifying other imports";
-                                this.session
-                                    .struct_span_err(import.span, msg)
-                                    .note(msg_note)
-                                    .emit();
-                            }
+                        } else if res != Res::Err
+                            && this.ambiguity_errors.is_empty()
+                            && this.privacy_errors.is_empty()
+                        {
+                            let msg = "cannot determine resolution for the import";
+                            let msg_note =
+                                "import resolution is stuck, try simplifying other imports";
+                            this.session.struct_span_err(import.span, msg).note(msg_note).emit();
                         }
                     }
                     Err(..) => {
diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs
index 2c01934..f156caf 100644
--- a/compiler/rustc_resolve/src/late.rs
+++ b/compiler/rustc_resolve/src/late.rs
@@ -57,6 +57,12 @@
     FnParam,
 }
 
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum IsRepeatExpr {
+    No,
+    Yes,
+}
+
 impl PatternSource {
     fn descr(self) -> &'static str {
         match self {
@@ -251,16 +257,12 @@
     }
 
     fn is_call(self) -> bool {
-        match self {
-            PathSource::Expr(Some(&Expr { kind: ExprKind::Call(..), .. })) => true,
-            _ => false,
-        }
+        matches!(self, PathSource::Expr(Some(&Expr { kind: ExprKind::Call(..), .. })))
     }
 
     crate fn is_expected(self, res: Res) -> bool {
         match self {
-            PathSource::Type => match res {
-                Res::Def(
+            PathSource::Type => matches!(res, Res::Def(
                     DefKind::Struct
                     | DefKind::Union
                     | DefKind::Enum
@@ -274,19 +276,12 @@
                     _,
                 )
                 | Res::PrimTy(..)
-                | Res::SelfTy(..) => true,
-                _ => false,
-            },
-            PathSource::Trait(AliasPossibility::No) => match res {
-                Res::Def(DefKind::Trait, _) => true,
-                _ => false,
-            },
-            PathSource::Trait(AliasPossibility::Maybe) => match res {
-                Res::Def(DefKind::Trait | DefKind::TraitAlias, _) => true,
-                _ => false,
-            },
-            PathSource::Expr(..) => match res {
-                Res::Def(
+                | Res::SelfTy(..)),
+            PathSource::Trait(AliasPossibility::No) => matches!(res, Res::Def(DefKind::Trait, _)),
+            PathSource::Trait(AliasPossibility::Maybe) => {
+                matches!(res, Res::Def(DefKind::Trait | DefKind::TraitAlias, _))
+            }
+            PathSource::Expr(..) => matches!(res, Res::Def(
                     DefKind::Ctor(_, CtorKind::Const | CtorKind::Fn)
                     | DefKind::Const
                     | DefKind::Static
@@ -297,23 +292,14 @@
                     _,
                 )
                 | Res::Local(..)
-                | Res::SelfCtor(..) => true,
-                _ => false,
-            },
-            PathSource::Pat => match res {
-                Res::Def(
+                | Res::SelfCtor(..)),
+            PathSource::Pat => matches!(res, Res::Def(
                     DefKind::Ctor(_, CtorKind::Const) | DefKind::Const | DefKind::AssocConst,
                     _,
                 )
-                | Res::SelfCtor(..) => true,
-                _ => false,
-            },
-            PathSource::TupleStruct(..) => match res {
-                Res::Def(DefKind::Ctor(_, CtorKind::Fn), _) | Res::SelfCtor(..) => true,
-                _ => false,
-            },
-            PathSource::Struct => match res {
-                Res::Def(
+                | Res::SelfCtor(..)),
+            PathSource::TupleStruct(..) => res.expected_in_tuple_struct_pat(),
+            PathSource::Struct => matches!(res, Res::Def(
                     DefKind::Struct
                     | DefKind::Union
                     | DefKind::Variant
@@ -321,9 +307,7 @@
                     | DefKind::AssocTy,
                     _,
                 )
-                | Res::SelfTy(..) => true,
-                _ => false,
-            },
+                | Res::SelfTy(..)),
             PathSource::TraitItem(ns) => match res {
                 Res::Def(DefKind::AssocConst | DefKind::AssocFn, _) if ns == ValueNS => true,
                 Res::Def(DefKind::AssocTy, _) if ns == TypeNS => true,
@@ -353,8 +337,8 @@
 
 #[derive(Default)]
 struct DiagnosticMetadata<'ast> {
-    /// The current trait's associated types' ident, used for diagnostic suggestions.
-    current_trait_assoc_types: Vec<Ident>,
+    /// The current trait's associated items' ident, used for diagnostic suggestions.
+    current_trait_assoc_items: Option<&'ast [P<AssocItem>]>,
 
     /// The current self type if inside an impl (used for better errors).
     current_self_type: Option<Ty>,
@@ -369,7 +353,7 @@
     /// param.
     currently_processing_generics: bool,
 
-    /// The current enclosing function (used for better errors).
+    /// The current enclosing (non-closure) function (used for better errors).
     current_function: Option<(FnKind<'ast>, Span)>,
 
     /// A list of labels as of yet unused. Labels will be removed from this map when
@@ -384,6 +368,13 @@
 
     /// Used to detect possible `if let` written without `let` and to provide structured suggestion.
     in_if_condition: Option<&'ast Expr>,
+
+    /// If we are currently in a trait object definition. Used to point at the bounds when
+    /// encountering a struct or enum.
+    current_trait_object: Option<&'ast [ast::GenericBound]>,
+
+    /// Given `where <T as Bar>::Baz: String`, suggest `where T: Bar<Baz = String>`.
+    current_where_predicate: Option<&'ast WherePredicate>,
 }
 
 struct LateResolutionVisitor<'a, 'b, 'ast> {
@@ -430,10 +421,8 @@
         self.resolve_block(block);
     }
     fn visit_anon_const(&mut self, constant: &'ast AnonConst) {
-        debug!("visit_anon_const {:?}", constant);
-        self.with_constant_rib(constant.value.is_potential_trivial_const_param(), |this| {
-            visit::walk_anon_const(this, constant);
-        });
+        // We deal with repeat expressions explicitly in `resolve_expr`.
+        self.resolve_anon_const(constant, IsRepeatExpr::No);
     }
     fn visit_expr(&mut self, expr: &'ast Expr) {
         self.resolve_expr(expr, None);
@@ -453,6 +442,7 @@
         self.diagnostic_metadata.current_let_binding = original;
     }
     fn visit_ty(&mut self, ty: &'ast Ty) {
+        let prev = self.diagnostic_metadata.current_trait_object;
         match ty.kind {
             TyKind::Path(ref qself, ref path) => {
                 self.smart_resolve_path(ty.id, qself.as_ref(), path, PathSource::Type);
@@ -464,9 +454,13 @@
                     .map_or(Res::Err, |d| d.res());
                 self.r.record_partial_res(ty.id, PartialRes::new(res));
             }
+            TyKind::TraitObject(ref bounds, ..) => {
+                self.diagnostic_metadata.current_trait_object = Some(&bounds[..]);
+            }
             _ => (),
         }
         visit::walk_ty(self, ty);
+        self.diagnostic_metadata.current_trait_object = prev;
     }
     fn visit_poly_trait_ref(&mut self, tref: &'ast PolyTraitRef, m: &'ast TraitBoundModifier) {
         self.smart_resolve_path(
@@ -503,8 +497,10 @@
             FnKind::Fn(FnCtxt::Assoc(_), ..) => NormalRibKind,
             FnKind::Closure(..) => ClosureOrAsyncRibKind,
         };
-        let previous_value =
-            replace(&mut self.diagnostic_metadata.current_function, Some((fn_kind, sp)));
+        let previous_value = self.diagnostic_metadata.current_function;
+        if matches!(fn_kind, FnKind::Fn(..)) {
+            self.diagnostic_metadata.current_function = Some((fn_kind, sp));
+        }
         debug!("(resolving function) entering function");
         let declaration = fn_kind.decl();
 
@@ -633,7 +629,11 @@
                         if !check_ns(TypeNS) && check_ns(ValueNS) {
                             // This must be equivalent to `visit_anon_const`, but we cannot call it
                             // directly due to visitor lifetimes so we have to copy-paste some code.
-                            self.with_constant_rib(true, |this| {
+                            //
+                            // Note that we might not be inside of an repeat expression here,
+                            // but considering that `IsRepeatExpr` is only relevant for
+                            // non-trivial constants this is doesn't matter.
+                            self.with_constant_rib(IsRepeatExpr::No, true, |this| {
                                 this.smart_resolve_path(
                                     ty.id,
                                     qself.as_ref(),
@@ -660,6 +660,14 @@
         }
         self.diagnostic_metadata.currently_processing_generics = prev;
     }
+
+    fn visit_where_predicate(&mut self, p: &'ast WherePredicate) {
+        debug!("visit_where_predicate {:?}", p);
+        let previous_value =
+            replace(&mut self.diagnostic_metadata.current_where_predicate, Some(p));
+        visit::walk_where_predicate(self, p);
+        self.diagnostic_metadata.current_where_predicate = previous_value;
+    }
 }
 
 impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
@@ -667,7 +675,7 @@
         // During late resolution we only track the module component of the parent scope,
         // although it may be useful to track other components as well for diagnostics.
         let graph_root = resolver.graph_root;
-        let parent_scope = ParentScope::module(graph_root);
+        let parent_scope = ParentScope::module(graph_root, resolver);
         let start_rib_kind = ModuleRibKind(graph_root);
         LateResolutionVisitor {
             r: resolver,
@@ -958,9 +966,11 @@
                                             //
                                             // Type parameters can already be used and as associated consts are
                                             // not used as part of the type system, this is far less surprising.
-                                            this.with_constant_rib(true, |this| {
-                                                this.visit_expr(expr)
-                                            });
+                                            this.with_constant_rib(
+                                                IsRepeatExpr::No,
+                                                true,
+                                                |this| this.visit_expr(expr),
+                                            );
                                         }
                                     }
                                     AssocItemKind::Fn(_, _, generics, _) => {
@@ -1001,7 +1011,9 @@
                 self.with_item_rib(HasGenericParams::No, |this| {
                     this.visit_ty(ty);
                     if let Some(expr) = expr {
-                        this.with_constant_rib(expr.is_potential_trivial_const_param(), |this| {
+                        // We already forbid generic params because of the above item rib,
+                        // so it doesn't matter whether this is a trivial constant.
+                        this.with_constant_rib(IsRepeatExpr::No, true, |this| {
                             this.visit_expr(expr)
                         });
                     }
@@ -1100,12 +1112,29 @@
         self.with_rib(ValueNS, kind, |this| this.with_rib(TypeNS, kind, f))
     }
 
-    fn with_constant_rib(&mut self, trivial: bool, f: impl FnOnce(&mut Self)) {
-        debug!("with_constant_rib");
-        self.with_rib(ValueNS, ConstantItemRibKind(trivial), |this| {
-            this.with_rib(TypeNS, ConstantItemRibKind(trivial), |this| {
-                this.with_label_rib(ConstantItemRibKind(trivial), f);
-            })
+    // HACK(min_const_generics,const_evaluatable_unchecked): We
+    // want to keep allowing `[0; std::mem::size_of::<*mut T>()]`
+    // with a future compat lint for now. We do this by adding an
+    // additional special case for repeat expressions.
+    //
+    // Note that we intentionally still forbid `[0; N + 1]` during
+    // name resolution so that we don't extend the future
+    // compat lint to new cases.
+    fn with_constant_rib(
+        &mut self,
+        is_repeat: IsRepeatExpr,
+        is_trivial: bool,
+        f: impl FnOnce(&mut Self),
+    ) {
+        debug!("with_constant_rib: is_repeat={:?} is_trivial={}", is_repeat, is_trivial);
+        self.with_rib(ValueNS, ConstantItemRibKind(is_trivial), |this| {
+            this.with_rib(
+                TypeNS,
+                ConstantItemRibKind(is_repeat == IsRepeatExpr::Yes || is_trivial),
+                |this| {
+                    this.with_label_rib(ConstantItemRibKind(is_trivial), f);
+                },
+            )
         });
     }
 
@@ -1126,26 +1155,18 @@
         result
     }
 
-    /// When evaluating a `trait` use its associated types' idents for suggestionsa in E0412.
+    /// When evaluating a `trait` use its associated types' idents for suggestions in E0412.
     fn with_trait_items<T>(
         &mut self,
-        trait_items: &Vec<P<AssocItem>>,
+        trait_items: &'ast Vec<P<AssocItem>>,
         f: impl FnOnce(&mut Self) -> T,
     ) -> T {
-        let trait_assoc_types = replace(
-            &mut self.diagnostic_metadata.current_trait_assoc_types,
-            trait_items
-                .iter()
-                .filter_map(|item| match &item.kind {
-                    AssocItemKind::TyAlias(_, _, bounds, _) if bounds.is_empty() => {
-                        Some(item.ident)
-                    }
-                    _ => None,
-                })
-                .collect(),
+        let trait_assoc_items = replace(
+            &mut self.diagnostic_metadata.current_trait_assoc_items,
+            Some(&trait_items[..]),
         );
         let result = f(self);
-        self.diagnostic_metadata.current_trait_assoc_types = trait_assoc_types;
+        self.diagnostic_metadata.current_trait_assoc_items = trait_assoc_items;
         result
     }
 
@@ -1250,9 +1271,17 @@
                                             //
                                             // Type parameters can already be used and as associated consts are
                                             // not used as part of the type system, this is far less surprising.
-                                            this.with_constant_rib(true, |this| {
-                                                visit::walk_assoc_item(this, item, AssocCtxt::Impl)
-                                            });
+                                            this.with_constant_rib(
+                                                IsRepeatExpr::No,
+                                                true,
+                                                |this| {
+                                                    visit::walk_assoc_item(
+                                                        this,
+                                                        item,
+                                                        AssocCtxt::Impl,
+                                                    )
+                                                },
+                                            );
                                         }
                                         AssocItemKind::Fn(_, _, generics, _) => {
                                             // We also need a new scope for the impl item type parameters.
@@ -1391,10 +1420,7 @@
     }
 
     fn is_base_res_local(&self, nid: NodeId) -> bool {
-        match self.r.partial_res_map.get(&nid).map(|res| res.base_res()) {
-            Some(Res::Local(..)) => true,
-            _ => false,
-        }
+        matches!(self.r.partial_res_map.get(&nid).map(|res| res.base_res()), Some(Res::Local(..)))
     }
 
     /// Checks that all of the arms in an or-pattern have exactly the
@@ -2177,6 +2203,17 @@
         debug!("(resolving block) leaving block");
     }
 
+    fn resolve_anon_const(&mut self, constant: &'ast AnonConst, is_repeat: IsRepeatExpr) {
+        debug!("resolve_anon_const {:?} is_repeat: {:?}", constant, is_repeat);
+        self.with_constant_rib(
+            is_repeat,
+            constant.value.is_potential_trivial_const_param(),
+            |this| {
+                visit::walk_anon_const(this, constant);
+            },
+        );
+    }
+
     fn resolve_expr(&mut self, expr: &'ast Expr, parent: Option<&'ast Expr>) {
         // First, record candidate traits for this expression if it could
         // result in the invocation of a method call.
@@ -2300,6 +2337,10 @@
             ExprKind::Async(..) | ExprKind::Closure(..) => {
                 self.with_label_rib(ClosureOrAsyncRibKind, |this| visit::walk_expr(this, expr));
             }
+            ExprKind::Repeat(ref elem, ref ct) => {
+                self.visit_expr(elem);
+                self.resolve_anon_const(ct, IsRepeatExpr::Yes);
+            }
             _ => {
                 visit::walk_expr(self, expr);
             }
diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs
index 521ea7a..00e6d5c 100644
--- a/compiler/rustc_resolve/src/late/diagnostics.rs
+++ b/compiler/rustc_resolve/src/late/diagnostics.rs
@@ -1,6 +1,6 @@
 use crate::diagnostics::{ImportSuggestion, LabelSuggestion, TypoSuggestion};
 use crate::late::lifetimes::{ElisionFailureInfo, LifetimeContext};
-use crate::late::{LateResolutionVisitor, RibKind};
+use crate::late::{AliasPossibility, LateResolutionVisitor, RibKind};
 use crate::path_names_to_string;
 use crate::{CrateLint, Module, ModuleKind, ModuleOrUniformRoot};
 use crate::{PathResult, PathSource, Segment};
@@ -8,18 +8,19 @@
 use rustc_ast::util::lev_distance::find_best_match_for_name;
 use rustc_ast::visit::FnKind;
 use rustc_ast::{self as ast, Expr, ExprKind, Item, ItemKind, NodeId, Path, Ty, TyKind};
+use rustc_ast_pretty::pprust::path_segment_to_string;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder};
 use rustc_hir as hir;
 use rustc_hir::def::Namespace::{self, *};
-use rustc_hir::def::{self, CtorKind, DefKind};
+use rustc_hir::def::{self, CtorKind, CtorOf, DefKind};
 use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
 use rustc_hir::PrimTy;
 use rustc_session::config::nightly_options;
 use rustc_session::parse::feature_err;
 use rustc_span::hygiene::MacroKind;
 use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_span::{BytePos, Span, DUMMY_SP};
+use rustc_span::{BytePos, MultiSpan, Span, DUMMY_SP};
 
 use tracing::debug;
 
@@ -29,7 +30,21 @@
 enum AssocSuggestion {
     Field,
     MethodWithSelf,
-    AssocItem,
+    AssocFn,
+    AssocType,
+    AssocConst,
+}
+
+impl AssocSuggestion {
+    fn action(&self) -> &'static str {
+        match self {
+            AssocSuggestion::Field => "use the available field",
+            AssocSuggestion::MethodWithSelf => "call the method with the fully-qualified path",
+            AssocSuggestion::AssocFn => "call the associated function",
+            AssocSuggestion::AssocConst => "use the associated `const`",
+            AssocSuggestion::AssocType => "use the associated type",
+        }
+    }
 }
 
 crate enum MissingLifetimeSpot<'tcx> {
@@ -385,15 +400,18 @@
                     AssocSuggestion::MethodWithSelf if self_is_available => {
                         err.span_suggestion(
                             span,
-                            "try",
+                            "you might have meant to call the method",
                             format!("self.{}", path_str),
                             Applicability::MachineApplicable,
                         );
                     }
-                    AssocSuggestion::MethodWithSelf | AssocSuggestion::AssocItem => {
+                    AssocSuggestion::MethodWithSelf
+                    | AssocSuggestion::AssocFn
+                    | AssocSuggestion::AssocConst
+                    | AssocSuggestion::AssocType => {
                         err.span_suggestion(
                             span,
-                            "try",
+                            &format!("you might have meant to {}", candidate.action()),
                             format!("Self::{}", path_str),
                             Applicability::MachineApplicable,
                         );
@@ -439,27 +457,213 @@
             }
         }
 
-        if !self.type_ascription_suggestion(&mut err, base_span)
-            && !self.r.add_typo_suggestion(&mut err, typo_sugg, ident_span)
-        {
-            // Fallback label.
-            err.span_label(base_span, fallback_label);
+        if !self.type_ascription_suggestion(&mut err, base_span) {
+            let mut fallback = false;
+            if let (
+                PathSource::Trait(AliasPossibility::Maybe),
+                Some(Res::Def(DefKind::Struct | DefKind::Enum | DefKind::Union, _)),
+            ) = (source, res)
+            {
+                if let Some(bounds @ [_, .., _]) = self.diagnostic_metadata.current_trait_object {
+                    fallback = true;
+                    let spans: Vec<Span> = bounds
+                        .iter()
+                        .map(|bound| bound.span())
+                        .filter(|&sp| sp != base_span)
+                        .collect();
 
-            match self.diagnostic_metadata.current_let_binding {
-                Some((pat_sp, Some(ty_sp), None)) if ty_sp.contains(base_span) && could_be_expr => {
-                    err.span_suggestion_short(
-                        pat_sp.between(ty_sp),
-                        "use `=` if you meant to assign",
-                        " = ".to_string(),
-                        Applicability::MaybeIncorrect,
+                    let start_span = bounds.iter().map(|bound| bound.span()).next().unwrap();
+                    // `end_span` is the end of the poly trait ref (Foo + 'baz + Bar><)
+                    let end_span = bounds.iter().map(|bound| bound.span()).last().unwrap();
+                    // `last_bound_span` is the last bound of the poly trait ref (Foo + >'baz< + Bar)
+                    let last_bound_span = spans.last().cloned().unwrap();
+                    let mut multi_span: MultiSpan = spans.clone().into();
+                    for sp in spans {
+                        let msg = if sp == last_bound_span {
+                            format!(
+                                "...because of {} bound{}",
+                                if bounds.len() <= 2 { "this" } else { "these" },
+                                if bounds.len() <= 2 { "" } else { "s" },
+                            )
+                        } else {
+                            String::new()
+                        };
+                        multi_span.push_span_label(sp, msg);
+                    }
+                    multi_span.push_span_label(
+                        base_span,
+                        "expected this type to be a trait...".to_string(),
                     );
+                    err.span_help(
+                        multi_span,
+                        "`+` is used to constrain a \"trait object\" type with lifetimes or \
+                         auto-traits; structs and enums can't be bound in that way",
+                    );
+                    if bounds.iter().all(|bound| match bound {
+                        ast::GenericBound::Outlives(_) => true,
+                        ast::GenericBound::Trait(tr, _) => tr.span == base_span,
+                    }) {
+                        let mut sugg = vec![];
+                        if base_span != start_span {
+                            sugg.push((start_span.until(base_span), String::new()));
+                        }
+                        if base_span != end_span {
+                            sugg.push((base_span.shrink_to_hi().to(end_span), String::new()));
+                        }
+
+                        err.multipart_suggestion(
+                            "if you meant to use a type and not a trait here, remove the bounds",
+                            sugg,
+                            Applicability::MaybeIncorrect,
+                        );
+                    }
                 }
-                _ => {}
+            }
+
+            fallback |= self.restrict_assoc_type_in_where_clause(span, &mut err);
+
+            if !self.r.add_typo_suggestion(&mut err, typo_sugg, ident_span) {
+                fallback = true;
+                match self.diagnostic_metadata.current_let_binding {
+                    Some((pat_sp, Some(ty_sp), None))
+                        if ty_sp.contains(base_span) && could_be_expr =>
+                    {
+                        err.span_suggestion_short(
+                            pat_sp.between(ty_sp),
+                            "use `=` if you meant to assign",
+                            " = ".to_string(),
+                            Applicability::MaybeIncorrect,
+                        );
+                    }
+                    _ => {}
+                }
+            }
+            if fallback {
+                // Fallback label.
+                err.span_label(base_span, fallback_label);
             }
         }
         (err, candidates)
     }
 
+    /// Given `where <T as Bar>::Baz: String`, suggest `where T: Bar<Baz = String>`.
+    fn restrict_assoc_type_in_where_clause(
+        &mut self,
+        span: Span,
+        err: &mut DiagnosticBuilder<'_>,
+    ) -> bool {
+        // Detect that we are actually in a `where` predicate.
+        let (bounded_ty, bounds, where_span) =
+            if let Some(ast::WherePredicate::BoundPredicate(ast::WhereBoundPredicate {
+                bounded_ty,
+                bound_generic_params,
+                bounds,
+                span,
+            })) = self.diagnostic_metadata.current_where_predicate
+            {
+                if !bound_generic_params.is_empty() {
+                    return false;
+                }
+                (bounded_ty, bounds, span)
+            } else {
+                return false;
+            };
+
+        // Confirm that the target is an associated type.
+        let (ty, position, path) = if let ast::TyKind::Path(
+            Some(ast::QSelf { ty, position, .. }),
+            path,
+        ) = &bounded_ty.kind
+        {
+            // use this to verify that ident is a type param.
+            let partial_res = if let Ok(Some(partial_res)) = self.resolve_qpath_anywhere(
+                bounded_ty.id,
+                None,
+                &Segment::from_path(path),
+                Namespace::TypeNS,
+                span,
+                true,
+                CrateLint::No,
+            ) {
+                partial_res
+            } else {
+                return false;
+            };
+            if !(matches!(
+                partial_res.base_res(),
+                hir::def::Res::Def(hir::def::DefKind::AssocTy, _)
+            ) && partial_res.unresolved_segments() == 0)
+            {
+                return false;
+            }
+            (ty, position, path)
+        } else {
+            return false;
+        };
+
+        if let ast::TyKind::Path(None, type_param_path) = &ty.peel_refs().kind {
+            // Confirm that the `SelfTy` is a type parameter.
+            let partial_res = if let Ok(Some(partial_res)) = self.resolve_qpath_anywhere(
+                bounded_ty.id,
+                None,
+                &Segment::from_path(type_param_path),
+                Namespace::TypeNS,
+                span,
+                true,
+                CrateLint::No,
+            ) {
+                partial_res
+            } else {
+                return false;
+            };
+            if !(matches!(
+                partial_res.base_res(),
+                hir::def::Res::Def(hir::def::DefKind::TyParam, _)
+            ) && partial_res.unresolved_segments() == 0)
+            {
+                return false;
+            }
+            if let (
+                [ast::PathSegment { ident: constrain_ident, args: None, .. }],
+                [ast::GenericBound::Trait(poly_trait_ref, ast::TraitBoundModifier::None)],
+            ) = (&type_param_path.segments[..], &bounds[..])
+            {
+                if let [ast::PathSegment { ident, args: None, .. }] =
+                    &poly_trait_ref.trait_ref.path.segments[..]
+                {
+                    if ident.span == span {
+                        err.span_suggestion_verbose(
+                            *where_span,
+                            &format!("constrain the associated type to `{}`", ident),
+                            format!(
+                                "{}: {}<{} = {}>",
+                                self.r
+                                    .session
+                                    .source_map()
+                                    .span_to_snippet(ty.span) // Account for `<&'a T as Foo>::Bar`.
+                                    .unwrap_or_else(|_| constrain_ident.to_string()),
+                                path.segments[..*position]
+                                    .iter()
+                                    .map(|segment| path_segment_to_string(segment))
+                                    .collect::<Vec<_>>()
+                                    .join("::"),
+                                path.segments[*position..]
+                                    .iter()
+                                    .map(|segment| path_segment_to_string(segment))
+                                    .collect::<Vec<_>>()
+                                    .join("::"),
+                                ident,
+                            ),
+                            Applicability::MaybeIncorrect,
+                        );
+                    }
+                    return true;
+                }
+            }
+        }
+        false
+    }
+
     /// Check if the source is call expression and the first argument is `self`. If true,
     /// return the span of whole call and the span for all arguments expect the first one (`self`).
     fn call_has_self_arg(&self, source: PathSource<'_>) -> Option<(Span, Option<Span>)> {
@@ -515,10 +719,7 @@
                 _ => break,
             }
         }
-        let followed_by_brace = match sm.span_to_snippet(sp) {
-            Ok(ref snippet) if snippet == "{" => true,
-            _ => false,
-        };
+        let followed_by_brace = matches!(sm.span_to_snippet(sp), Ok(ref snippet) if snippet == "{");
         // In case this could be a struct literal that needs to be surrounded
         // by parentheses, find the appropriate span.
         let mut i = 0;
@@ -664,7 +865,7 @@
                     err.span_suggestion(
                         span,
                         &format!("use struct {} syntax instead", descr),
-                        format!("{} {{{pad}{}{pad}}}", path_str, fields, pad = pad),
+                        format!("{path_str} {{{pad}{fields}{pad}}}"),
                         applicability,
                     );
                 }
@@ -726,74 +927,75 @@
                     // We already suggested changing `:` into `::` during parsing.
                     return false;
                 }
-                if let Some(variants) = self.collect_enum_variants(def_id) {
-                    if !variants.is_empty() {
-                        let msg = if variants.len() == 1 {
-                            "try using the enum's variant"
-                        } else {
-                            "try using one of the enum's variants"
-                        };
 
-                        err.span_suggestions(
-                            span,
-                            msg,
-                            variants.iter().map(path_names_to_string),
-                            Applicability::MaybeIncorrect,
-                        );
-                    }
-                } else {
-                    err.note("you might have meant to use one of the enum's variants");
-                }
+                self.suggest_using_enum_variant(err, source, def_id, span);
             }
             (Res::Def(DefKind::Struct, def_id), _) if ns == ValueNS => {
-                if let Some((ctor_def, ctor_vis, fields)) =
-                    self.r.struct_constructors.get(&def_id).cloned()
-                {
-                    let accessible_ctor =
-                        self.r.is_accessible_from(ctor_vis, self.parent_scope.module);
-                    if is_expected(ctor_def) && !accessible_ctor {
-                        let mut better_diag = false;
-                        if let PathSource::TupleStruct(_, pattern_spans) = source {
-                            if pattern_spans.len() > 0 && fields.len() == pattern_spans.len() {
-                                let non_visible_spans: Vec<Span> = fields
-                                    .iter()
-                                    .zip(pattern_spans.iter())
-                                    .filter_map(|(vis, span)| {
-                                        match self
-                                            .r
-                                            .is_accessible_from(*vis, self.parent_scope.module)
-                                        {
-                                            true => None,
-                                            false => Some(*span),
-                                        }
-                                    })
-                                    .collect();
-                                // Extra check to be sure
-                                if non_visible_spans.len() > 0 {
-                                    let mut m: rustc_span::MultiSpan =
-                                        non_visible_spans.clone().into();
-                                    non_visible_spans.into_iter().for_each(|s| {
-                                        m.push_span_label(s, "private field".to_string())
-                                    });
-                                    err.span_note(
-                                        m,
-                                        "constructor is not visible here due to private fields",
-                                    );
-                                    better_diag = true;
-                                }
-                            }
-                        }
+                let (ctor_def, ctor_vis, fields) =
+                    if let Some(struct_ctor) = self.r.struct_constructors.get(&def_id).cloned() {
+                        struct_ctor
+                    } else {
+                        bad_struct_syntax_suggestion(def_id);
+                        return true;
+                    };
 
-                        if !better_diag {
-                            err.span_label(
-                                span,
-                                "constructor is not visible here due to private fields".to_string(),
-                            );
-                        }
-                    }
-                } else {
-                    bad_struct_syntax_suggestion(def_id);
+                let is_accessible = self.r.is_accessible_from(ctor_vis, self.parent_scope.module);
+                if !is_expected(ctor_def) || is_accessible {
+                    return true;
                 }
+
+                let field_spans = match source {
+                    // e.g. `if let Enum::TupleVariant(field1, field2) = _`
+                    PathSource::TupleStruct(_, pattern_spans) => {
+                        err.set_primary_message(
+                            "cannot match against a tuple struct which contains private fields",
+                        );
+
+                        // Use spans of the tuple struct pattern.
+                        Some(Vec::from(pattern_spans))
+                    }
+                    // e.g. `let _ = Enum::TupleVariant(field1, field2);`
+                    _ if source.is_call() => {
+                        err.set_primary_message(
+                            "cannot initialize a tuple struct which contains private fields",
+                        );
+
+                        // Use spans of the tuple struct definition.
+                        self.r
+                            .field_names
+                            .get(&def_id)
+                            .map(|fields| fields.iter().map(|f| f.span).collect::<Vec<_>>())
+                    }
+                    _ => None,
+                };
+
+                if let Some(spans) =
+                    field_spans.filter(|spans| spans.len() > 0 && fields.len() == spans.len())
+                {
+                    let non_visible_spans: Vec<Span> = fields
+                        .iter()
+                        .zip(spans.iter())
+                        .filter(|(vis, _)| {
+                            !self.r.is_accessible_from(**vis, self.parent_scope.module)
+                        })
+                        .map(|(_, span)| *span)
+                        .collect();
+
+                    if non_visible_spans.len() > 0 {
+                        let mut m: rustc_span::MultiSpan = non_visible_spans.clone().into();
+                        non_visible_spans
+                            .into_iter()
+                            .for_each(|s| m.push_span_label(s, "private field".to_string()));
+                        err.span_note(m, "constructor is not visible here due to private fields");
+                    }
+
+                    return true;
+                }
+
+                err.span_label(
+                    span,
+                    "constructor is not visible here due to private fields".to_string(),
+                );
             }
             (
                 Res::Def(
@@ -877,9 +1079,19 @@
             }
         }
 
-        for assoc_type_ident in &self.diagnostic_metadata.current_trait_assoc_types {
-            if *assoc_type_ident == ident {
-                return Some(AssocSuggestion::AssocItem);
+        if let Some(items) = self.diagnostic_metadata.current_trait_assoc_items {
+            for assoc_item in &items[..] {
+                if assoc_item.ident == ident {
+                    return Some(match &assoc_item.kind {
+                        ast::AssocItemKind::Const(..) => AssocSuggestion::AssocConst,
+                        ast::AssocItemKind::Fn(_, sig, ..) if sig.decl.has_self() => {
+                            AssocSuggestion::MethodWithSelf
+                        }
+                        ast::AssocItemKind::Fn(..) => AssocSuggestion::AssocFn,
+                        ast::AssocItemKind::TyAlias(..) => AssocSuggestion::AssocType,
+                        ast::AssocItemKind::MacCall(_) => continue,
+                    });
+                }
             }
         }
 
@@ -895,11 +1107,20 @@
             ) {
                 let res = binding.res();
                 if filter_fn(res) {
-                    return Some(if self.r.has_self.contains(&res.def_id()) {
-                        AssocSuggestion::MethodWithSelf
+                    if self.r.has_self.contains(&res.def_id()) {
+                        return Some(AssocSuggestion::MethodWithSelf);
                     } else {
-                        AssocSuggestion::AssocItem
-                    });
+                        match res {
+                            Res::Def(DefKind::AssocFn, _) => return Some(AssocSuggestion::AssocFn),
+                            Res::Def(DefKind::AssocConst, _) => {
+                                return Some(AssocSuggestion::AssocConst);
+                            }
+                            Res::Def(DefKind::AssocTy, _) => {
+                                return Some(AssocSuggestion::AssocType);
+                            }
+                            _ => {}
+                        }
+                    }
                 }
             }
         }
@@ -1126,20 +1347,165 @@
         result
     }
 
-    fn collect_enum_variants(&mut self, def_id: DefId) -> Option<Vec<Path>> {
+    fn collect_enum_ctors(&mut self, def_id: DefId) -> Option<Vec<(Path, DefId, CtorKind)>> {
         self.find_module(def_id).map(|(enum_module, enum_import_suggestion)| {
             let mut variants = Vec::new();
             enum_module.for_each_child(self.r, |_, ident, _, name_binding| {
-                if let Res::Def(DefKind::Variant, _) = name_binding.res() {
+                if let Res::Def(DefKind::Ctor(CtorOf::Variant, kind), def_id) = name_binding.res() {
                     let mut segms = enum_import_suggestion.path.segments.clone();
                     segms.push(ast::PathSegment::from_ident(ident));
-                    variants.push(Path { span: name_binding.span, segments: segms, tokens: None });
+                    let path = Path { span: name_binding.span, segments: segms, tokens: None };
+                    variants.push((path, def_id, kind));
                 }
             });
             variants
         })
     }
 
+    /// Adds a suggestion for using an enum's variant when an enum is used instead.
+    fn suggest_using_enum_variant(
+        &mut self,
+        err: &mut DiagnosticBuilder<'a>,
+        source: PathSource<'_>,
+        def_id: DefId,
+        span: Span,
+    ) {
+        let variants = match self.collect_enum_ctors(def_id) {
+            Some(variants) => variants,
+            None => {
+                err.note("you might have meant to use one of the enum's variants");
+                return;
+            }
+        };
+
+        let suggest_only_tuple_variants =
+            matches!(source, PathSource::TupleStruct(..)) || source.is_call();
+        if suggest_only_tuple_variants {
+            // Suggest only tuple variants regardless of whether they have fields and do not
+            // suggest path with added parenthesis.
+            let mut suggestable_variants = variants
+                .iter()
+                .filter(|(.., kind)| *kind == CtorKind::Fn)
+                .map(|(variant, ..)| path_names_to_string(variant))
+                .collect::<Vec<_>>();
+
+            let non_suggestable_variant_count = variants.len() - suggestable_variants.len();
+
+            let source_msg = if source.is_call() {
+                "to construct"
+            } else if matches!(source, PathSource::TupleStruct(..)) {
+                "to match against"
+            } else {
+                unreachable!()
+            };
+
+            if !suggestable_variants.is_empty() {
+                let msg = if non_suggestable_variant_count == 0 && suggestable_variants.len() == 1 {
+                    format!("try {} the enum's variant", source_msg)
+                } else {
+                    format!("try {} one of the enum's variants", source_msg)
+                };
+
+                err.span_suggestions(
+                    span,
+                    &msg,
+                    suggestable_variants.drain(..),
+                    Applicability::MaybeIncorrect,
+                );
+            }
+
+            // If the enum has no tuple variants..
+            if non_suggestable_variant_count == variants.len() {
+                err.help(&format!("the enum has no tuple variants {}", source_msg));
+            }
+
+            // If there are also non-tuple variants..
+            if non_suggestable_variant_count == 1 {
+                err.help(&format!(
+                    "you might have meant {} the enum's non-tuple variant",
+                    source_msg
+                ));
+            } else if non_suggestable_variant_count >= 1 {
+                err.help(&format!(
+                    "you might have meant {} one of the enum's non-tuple variants",
+                    source_msg
+                ));
+            }
+        } else {
+            let needs_placeholder = |def_id: DefId, kind: CtorKind| {
+                let has_no_fields =
+                    self.r.field_names.get(&def_id).map(|f| f.is_empty()).unwrap_or(false);
+                match kind {
+                    CtorKind::Const => false,
+                    CtorKind::Fn | CtorKind::Fictive if has_no_fields => false,
+                    _ => true,
+                }
+            };
+
+            let mut suggestable_variants = variants
+                .iter()
+                .filter(|(_, def_id, kind)| !needs_placeholder(*def_id, *kind))
+                .map(|(variant, _, kind)| (path_names_to_string(variant), kind))
+                .map(|(variant, kind)| match kind {
+                    CtorKind::Const => variant,
+                    CtorKind::Fn => format!("({}())", variant),
+                    CtorKind::Fictive => format!("({} {{}})", variant),
+                })
+                .collect::<Vec<_>>();
+
+            if !suggestable_variants.is_empty() {
+                let msg = if suggestable_variants.len() == 1 {
+                    "you might have meant to use the following enum variant"
+                } else {
+                    "you might have meant to use one of the following enum variants"
+                };
+
+                err.span_suggestions(
+                    span,
+                    msg,
+                    suggestable_variants.drain(..),
+                    Applicability::MaybeIncorrect,
+                );
+            }
+
+            let mut suggestable_variants_with_placeholders = variants
+                .iter()
+                .filter(|(_, def_id, kind)| needs_placeholder(*def_id, *kind))
+                .map(|(variant, _, kind)| (path_names_to_string(variant), kind))
+                .filter_map(|(variant, kind)| match kind {
+                    CtorKind::Fn => Some(format!("({}(/* fields */))", variant)),
+                    CtorKind::Fictive => Some(format!("({} {{ /* fields */ }})", variant)),
+                    _ => None,
+                })
+                .collect::<Vec<_>>();
+
+            if !suggestable_variants_with_placeholders.is_empty() {
+                let msg = match (
+                    suggestable_variants.is_empty(),
+                    suggestable_variants_with_placeholders.len(),
+                ) {
+                    (true, 1) => "the following enum variant is available",
+                    (true, _) => "the following enum variants are available",
+                    (false, 1) => "alternatively, the following enum variant is available",
+                    (false, _) => "alternatively, the following enum variants are also available",
+                };
+
+                err.span_suggestions(
+                    span,
+                    msg,
+                    suggestable_variants_with_placeholders.drain(..),
+                    Applicability::HasPlaceholders,
+                );
+            }
+        };
+
+        if def_id.is_local() {
+            if let Some(span) = self.def_span(def_id) {
+                err.span_note(span, "the enum is defined here");
+            }
+        }
+    }
+
     crate fn report_missing_type_error(
         &self,
         path: &[Segment],
@@ -1455,12 +1821,11 @@
                         }
                         msg = "consider introducing a named lifetime parameter".to_string();
                         should_break = true;
-                        if let Some(param) = generics.params.iter().find(|p| match p.kind {
-                            hir::GenericParamKind::Type {
+                        if let Some(param) = generics.params.iter().find(|p| {
+                            !matches!(p.kind, hir::GenericParamKind::Type {
                                 synthetic: Some(hir::SyntheticTyParamKind::ImplTrait),
                                 ..
-                            } => false,
-                            _ => true,
+                            })
                         }) {
                             (param.span.shrink_to_lo(), "'a, ".to_string())
                         } else {
@@ -1521,9 +1886,8 @@
                         if snippet.starts_with('&') && !snippet.starts_with("&'") {
                             introduce_suggestion
                                 .push((param.span, format!("&'a {}", &snippet[1..])));
-                        } else if snippet.starts_with("&'_ ") {
-                            introduce_suggestion
-                                .push((param.span, format!("&'a {}", &snippet[4..])));
+                        } else if let Some(stripped) = snippet.strip_prefix("&'_ ") {
+                            introduce_suggestion.push((param.span, format!("&'a {}", &stripped)));
                         }
                     }
                 }
diff --git a/compiler/rustc_resolve/src/late/lifetimes.rs b/compiler/rustc_resolve/src/late/lifetimes.rs
index 072fb50..c79d670 100644
--- a/compiler/rustc_resolve/src/late/lifetimes.rs
+++ b/compiler/rustc_resolve/src/late/lifetimes.rs
@@ -351,10 +351,7 @@
 /// We have to account for this when computing the index of the other generic parameters.
 /// This function returns whether there is such an implicit parameter defined on the given item.
 fn sub_items_have_self_param(node: &hir::ItemKind<'_>) -> bool {
-    match *node {
-        hir::ItemKind::Trait(..) | hir::ItemKind::TraitAlias(..) => true,
-        _ => false,
-    }
+    matches!(*node, hir::ItemKind::Trait(..) | hir::ItemKind::TraitAlias(..))
 }
 
 impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> {
@@ -417,10 +414,7 @@
 
                 // Impls permit `'_` to be used and it is equivalent to "some fresh lifetime name".
                 // This is not true for other kinds of items.x
-                let track_lifetime_uses = match item.kind {
-                    hir::ItemKind::Impl { .. } => true,
-                    _ => false,
-                };
+                let track_lifetime_uses = matches!(item.kind, hir::ItemKind::Impl { .. });
                 // These kinds of items have only early-bound lifetime parameters.
                 let mut index = if sub_items_have_self_param(&item.kind) {
                     1 // Self comes before lifetimes
@@ -970,10 +964,10 @@
 
         let trait_ref_hack = take(&mut self.trait_ref_hack);
         if !trait_ref_hack
-            || trait_ref.bound_generic_params.iter().any(|param| match param.kind {
-                GenericParamKind::Lifetime { .. } => true,
-                _ => false,
-            })
+            || trait_ref
+                .bound_generic_params
+                .iter()
+                .any(|param| matches!(param.kind, GenericParamKind::Lifetime { .. }))
         {
             if trait_ref_hack {
                 struct_span_err!(
@@ -1384,18 +1378,16 @@
                 }
                 if in_band {
                     Some(param.span)
+                } else if generics.params.len() == 1 {
+                    // if sole lifetime, remove the entire `<>` brackets
+                    Some(generics.span)
                 } else {
-                    if generics.params.len() == 1 {
-                        // if sole lifetime, remove the entire `<>` brackets
-                        Some(generics.span)
+                    // if removing within `<>` brackets, we also want to
+                    // delete a leading or trailing comma as appropriate
+                    if i >= generics.params.len() - 1 {
+                        Some(generics.params[i - 1].span.shrink_to_hi().to(param.span))
                     } else {
-                        // if removing within `<>` brackets, we also want to
-                        // delete a leading or trailing comma as appropriate
-                        if i >= generics.params.len() - 1 {
-                            Some(generics.params[i - 1].span.shrink_to_hi().to(param.span))
-                        } else {
-                            Some(param.span.to(generics.params[i + 1].span.shrink_to_lo()))
-                        }
+                        Some(param.span.to(generics.params[i + 1].span.shrink_to_lo()))
                     }
                 }
             } else {
@@ -2047,10 +2039,8 @@
         //
         // This is intended to leave room for us to implement the
         // correct behavior in the future.
-        let has_lifetime_parameter = generic_args.args.iter().any(|arg| match arg {
-            GenericArg::Lifetime(_) => true,
-            _ => false,
-        });
+        let has_lifetime_parameter =
+            generic_args.args.iter().any(|arg| matches!(arg, GenericArg::Lifetime(_)));
 
         // Resolve lifetimes found in the type `XX` from `Item = XX` bindings.
         for b in generic_args.bindings {
diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs
index 283db14..4e85c88 100644
--- a/compiler/rustc_resolve/src/lib.rs
+++ b/compiler/rustc_resolve/src/lib.rs
@@ -11,6 +11,7 @@
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
 #![feature(bool_to_option)]
 #![feature(crate_visibility_modifier)]
+#![feature(format_args_capture)]
 #![feature(nll)]
 #![feature(or_patterns)]
 #![recursion_limit = "256"]
@@ -19,7 +20,7 @@
 
 use Determinacy::*;
 
-use rustc_arena::TypedArena;
+use rustc_arena::{DroplessArena, TypedArena};
 use rustc_ast::node_id::NodeMap;
 use rustc_ast::unwrap_or;
 use rustc_ast::visit::{self, Visitor};
@@ -64,7 +65,7 @@
 use diagnostics::{ImportSuggestion, LabelSuggestion, Suggestion};
 use imports::{Import, ImportKind, ImportResolver, NameResolution};
 use late::{HasGenericParams, PathSource, Rib, RibKind::*};
-use macros::{MacroRulesBinding, MacroRulesScope};
+use macros::{MacroRulesBinding, MacroRulesScope, MacroRulesScopeRef};
 
 type Res = def::Res<NodeId>;
 
@@ -100,7 +101,7 @@
 enum Scope<'a> {
     DeriveHelpers(ExpnId),
     DeriveHelpersCompat,
-    MacroRules(MacroRulesScope<'a>),
+    MacroRules(MacroRulesScopeRef<'a>),
     CrateRoot,
     Module(Module<'a>),
     RegisteredAttrs,
@@ -133,18 +134,18 @@
 pub struct ParentScope<'a> {
     module: Module<'a>,
     expansion: ExpnId,
-    macro_rules: MacroRulesScope<'a>,
+    macro_rules: MacroRulesScopeRef<'a>,
     derives: &'a [ast::Path],
 }
 
 impl<'a> ParentScope<'a> {
     /// Creates a parent scope with the passed argument used as the module scope component,
     /// and other scope components set to default empty values.
-    pub fn module(module: Module<'a>) -> ParentScope<'a> {
+    pub fn module(module: Module<'a>, resolver: &Resolver<'a>) -> ParentScope<'a> {
         ParentScope {
             module,
             expansion: ExpnId::root(),
-            macro_rules: MacroRulesScope::Empty,
+            macro_rules: resolver.arenas.alloc_macro_rules_scope(MacroRulesScope::Empty),
             derives: &[],
         }
     }
@@ -218,7 +219,7 @@
     ParamInTyOfConstParam(Symbol),
     /// constant values inside of type parameter defaults must not depend on generic parameters.
     ParamInAnonConstInTyDefault(Symbol),
-    /// generic parameters must not be used inside of non trivial constant values.
+    /// generic parameters must not be used inside const evaluations.
     ///
     /// This error is only emitted when using `min_const_generics`.
     ParamInNonTrivialAnonConst { name: Symbol, is_type: bool },
@@ -313,17 +314,17 @@
                 ItemKind::ExternCrate(_) => {}
                 // but place them before the first other item
                 _ => {
-                    if self.span.map_or(true, |span| item.span < span) {
-                        if !item.span.from_expansion() {
-                            // don't insert between attributes and an item
-                            if item.attrs.is_empty() {
-                                self.span = Some(item.span.shrink_to_lo());
-                            } else {
-                                // find the first attribute on the item
-                                for attr in &item.attrs {
-                                    if self.span.map_or(true, |span| attr.span < span) {
-                                        self.span = Some(attr.span.shrink_to_lo());
-                                    }
+                    if self.span.map_or(true, |span| item.span < span)
+                        && !item.span.from_expansion()
+                    {
+                        // don't insert between attributes and an item
+                        if item.attrs.is_empty() {
+                            self.span = Some(item.span.shrink_to_lo());
+                        } else {
+                            // find the first attribute on the item
+                            for attr in &item.attrs {
+                                if self.span.map_or(true, |span| attr.span < span) {
+                                    self.span = Some(attr.span.shrink_to_lo());
                                 }
                             }
                         }
@@ -558,17 +559,11 @@
 
     // `self` resolves to the first module ancestor that `is_normal`.
     fn is_normal(&self) -> bool {
-        match self.kind {
-            ModuleKind::Def(DefKind::Mod, _, _) => true,
-            _ => false,
-        }
+        matches!(self.kind, ModuleKind::Def(DefKind::Mod, _, _))
     }
 
     fn is_trait(&self) -> bool {
-        match self.kind {
-            ModuleKind::Def(DefKind::Trait, _, _) => true,
-            _ => false,
-        }
+        matches!(self.kind, ModuleKind::Def(DefKind::Trait, _, _))
     }
 
     fn nearest_item_scope(&'a self) -> Module<'a> {
@@ -628,10 +623,7 @@
 impl<'a> NameBindingKind<'a> {
     /// Is this a name binding of a import?
     fn is_import(&self) -> bool {
-        match *self {
-            NameBindingKind::Import { .. } => true,
-            _ => false,
-        }
+        matches!(*self, NameBindingKind::Import { .. })
     }
 }
 
@@ -750,13 +742,10 @@
     }
 
     fn is_variant(&self) -> bool {
-        match self.kind {
-            NameBindingKind::Res(
+        matches!(self.kind, NameBindingKind::Res(
                 Res::Def(DefKind::Variant | DefKind::Ctor(CtorOf::Variant, ..), _),
                 _,
-            ) => true,
-            _ => false,
-        }
+            ))
     }
 
     fn is_extern_crate(&self) -> bool {
@@ -774,10 +763,7 @@
     }
 
     fn is_import(&self) -> bool {
-        match self.kind {
-            NameBindingKind::Import { .. } => true,
-            _ => false,
-        }
+        matches!(self.kind, NameBindingKind::Import { .. })
     }
 
     fn is_glob_import(&self) -> bool {
@@ -788,17 +774,14 @@
     }
 
     fn is_importable(&self) -> bool {
-        match self.res() {
-            Res::Def(DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy, _) => false,
-            _ => true,
-        }
+        !matches!(
+            self.res(),
+            Res::Def(DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy, _)
+        )
     }
 
     fn is_macro_def(&self) -> bool {
-        match self.kind {
-            NameBindingKind::Res(Res::Def(DefKind::Macro(..), _), _) => true,
-            _ => false,
-        }
+        matches!(self.kind, NameBindingKind::Res(Res::Def(DefKind::Macro(..), _), _))
     }
 
     fn macro_kind(&self) -> Option<MacroKind> {
@@ -944,7 +927,8 @@
 
     /// Maps glob imports to the names of items actually imported.
     glob_map: FxHashMap<LocalDefId, FxHashSet<Symbol>>,
-
+    /// Visibilities in "lowered" form, for all entities that have them.
+    visibilities: FxHashMap<LocalDefId, ty::Visibility>,
     used_imports: FxHashSet<(NodeId, Namespace)>,
     maybe_unused_trait_imports: FxHashSet<LocalDefId>,
     maybe_unused_extern_crates: Vec<(LocalDefId, Span)>,
@@ -991,7 +975,10 @@
     invocation_parent_scopes: FxHashMap<ExpnId, ParentScope<'a>>,
     /// `macro_rules` scopes *produced* by expanding the macro invocations,
     /// include all the `macro_rules` items and other invocations generated by them.
-    output_macro_rules_scopes: FxHashMap<ExpnId, MacroRulesScope<'a>>,
+    output_macro_rules_scopes: FxHashMap<ExpnId, MacroRulesScopeRef<'a>>,
+    /// References to all `MacroRulesScope::Invocation(invoc_id)`s, used to update such scopes
+    /// when their corresponding `invoc_id`s get expanded.
+    invocation_macro_rules_scopes: FxHashMap<ExpnId, FxHashSet<MacroRulesScopeRef<'a>>>,
     /// Helper attributes that are in scope for the given expansion.
     helper_attrs: FxHashMap<ExpnId, Vec<Ident>>,
 
@@ -1008,10 +995,6 @@
     /// Features enabled for this crate.
     active_features: FxHashSet<Symbol>,
 
-    /// Stores enum visibilities to properly build a reduced graph
-    /// when visiting the correspondent variants.
-    variant_vis: DefIdMap<ty::Visibility>,
-
     lint_buffer: LintBuffer,
 
     next_node_id: NodeId,
@@ -1028,6 +1011,9 @@
     invocation_parents: FxHashMap<ExpnId, LocalDefId>,
 
     next_disambiguator: FxHashMap<(LocalDefId, DefPathData), u32>,
+    /// Some way to know that we are in a *trait* impl in `visit_assoc_item`.
+    /// FIXME: Replace with a more general AST map (together with some other fields).
+    trait_impl_items: FxHashSet<LocalDefId>,
 }
 
 /// Nothing really interesting here; it just provides memory for the rest of the crate.
@@ -1035,12 +1021,10 @@
 pub struct ResolverArenas<'a> {
     modules: TypedArena<ModuleData<'a>>,
     local_modules: RefCell<Vec<Module<'a>>>,
-    name_bindings: TypedArena<NameBinding<'a>>,
     imports: TypedArena<Import<'a>>,
     name_resolutions: TypedArena<RefCell<NameResolution<'a>>>,
-    macro_rules_bindings: TypedArena<MacroRulesBinding<'a>>,
     ast_paths: TypedArena<ast::Path>,
-    pattern_spans: TypedArena<Span>,
+    dropless: DroplessArena,
 }
 
 impl<'a> ResolverArenas<'a> {
@@ -1055,7 +1039,7 @@
         self.local_modules.borrow()
     }
     fn alloc_name_binding(&'a self, name_binding: NameBinding<'a>) -> &'a NameBinding<'a> {
-        self.name_bindings.alloc(name_binding)
+        self.dropless.alloc(name_binding)
     }
     fn alloc_import(&'a self, import: Import<'a>) -> &'a Import<'_> {
         self.imports.alloc(import)
@@ -1063,17 +1047,20 @@
     fn alloc_name_resolution(&'a self) -> &'a RefCell<NameResolution<'a>> {
         self.name_resolutions.alloc(Default::default())
     }
+    fn alloc_macro_rules_scope(&'a self, scope: MacroRulesScope<'a>) -> MacroRulesScopeRef<'a> {
+        PtrKey(self.dropless.alloc(Cell::new(scope)))
+    }
     fn alloc_macro_rules_binding(
         &'a self,
         binding: MacroRulesBinding<'a>,
     ) -> &'a MacroRulesBinding<'a> {
-        self.macro_rules_bindings.alloc(binding)
+        self.dropless.alloc(binding)
     }
     fn alloc_ast_paths(&'a self, paths: &[ast::Path]) -> &'a [ast::Path] {
         self.ast_paths.alloc_from_iter(paths.iter().cloned())
     }
     fn alloc_pattern_spans(&'a self, spans: impl Iterator<Item = Span>) -> &'a [Span] {
-        self.pattern_spans.alloc_from_iter(spans)
+        self.dropless.alloc_from_iter(spans)
     }
 }
 
@@ -1195,7 +1182,8 @@
         metadata_loader: &'a MetadataLoaderDyn,
         arenas: &'a ResolverArenas<'a>,
     ) -> Resolver<'a> {
-        let root_def_id = DefId::local(CRATE_DEF_INDEX);
+        let root_local_def_id = LocalDefId { local_def_index: CRATE_DEF_INDEX };
+        let root_def_id = root_local_def_id.to_def_id();
         let root_module_kind = ModuleKind::Def(DefKind::Mod, root_def_id, kw::Invalid);
         let graph_root = arenas.alloc_module(ModuleData {
             no_implicit_prelude: session.contains_name(&krate.attrs, sym::no_implicit_prelude),
@@ -1213,11 +1201,14 @@
             )
         });
         let mut module_map = FxHashMap::default();
-        module_map.insert(LocalDefId { local_def_index: CRATE_DEF_INDEX }, graph_root);
+        module_map.insert(root_local_def_id, graph_root);
 
         let definitions = Definitions::new(crate_name, session.local_crate_disambiguator());
         let root = definitions.get_root_def();
 
+        let mut visibilities = FxHashMap::default();
+        visibilities.insert(root_local_def_id, ty::Visibility::Public);
+
         let mut def_id_to_span = IndexVec::default();
         assert_eq!(def_id_to_span.push(rustc_span::DUMMY_SP), root);
         let mut def_id_to_node_id = IndexVec::default();
@@ -1240,23 +1231,17 @@
             extern_prelude.insert(Ident::with_dummy_span(sym::core), Default::default());
             if !session.contains_name(&krate.attrs, sym::no_std) {
                 extern_prelude.insert(Ident::with_dummy_span(sym::std), Default::default());
-                if session.rust_2018() {
-                    extern_prelude.insert(Ident::with_dummy_span(sym::meta), Default::default());
-                }
             }
         }
 
         let (registered_attrs, registered_tools) =
             macros::registered_attrs_and_tools(session, &krate.attrs);
 
-        let mut invocation_parent_scopes = FxHashMap::default();
-        invocation_parent_scopes.insert(ExpnId::root(), ParentScope::module(graph_root));
-
         let features = session.features_untracked();
         let non_macro_attr =
             |mark_used| Lrc::new(SyntaxExtension::non_macro_attr(mark_used, session.edition()));
 
-        Resolver {
+        let mut resolver = Resolver {
             session,
 
             definitions,
@@ -1293,7 +1278,7 @@
             ast_transform_scopes: FxHashMap::default(),
 
             glob_map: Default::default(),
-
+            visibilities,
             used_imports: FxHashSet::default(),
             maybe_unused_trait_imports: Default::default(),
             maybe_unused_extern_crates: Vec::new(),
@@ -1323,8 +1308,9 @@
             dummy_ext_bang: Lrc::new(SyntaxExtension::dummy_bang(session.edition())),
             dummy_ext_derive: Lrc::new(SyntaxExtension::dummy_derive(session.edition())),
             non_macro_attrs: [non_macro_attr(false), non_macro_attr(true)],
-            invocation_parent_scopes,
+            invocation_parent_scopes: Default::default(),
             output_macro_rules_scopes: Default::default(),
+            invocation_macro_rules_scopes: Default::default(),
             helper_attrs: Default::default(),
             local_macro_def_scopes: FxHashMap::default(),
             name_already_seen: FxHashMap::default(),
@@ -1342,7 +1328,6 @@
                 .map(|(feat, ..)| *feat)
                 .chain(features.declared_lang_features.iter().map(|(feat, ..)| *feat))
                 .collect(),
-            variant_vis: Default::default(),
             lint_buffer: LintBuffer::default(),
             next_node_id: NodeId::from_u32(1),
             def_id_to_span,
@@ -1351,7 +1336,13 @@
             placeholder_field_indices: Default::default(),
             invocation_parents,
             next_disambiguator: Default::default(),
-        }
+            trait_impl_items: Default::default(),
+        };
+
+        let root_parent_scope = ParentScope::module(graph_root, &resolver);
+        resolver.invocation_parent_scopes.insert(ExpnId::root(), root_parent_scope);
+
+        resolver
     }
 
     pub fn next_node_id(&mut self) -> NodeId {
@@ -1374,14 +1365,16 @@
 
     pub fn into_outputs(self) -> ResolverOutputs {
         let definitions = self.definitions;
+        let visibilities = self.visibilities;
         let extern_crate_map = self.extern_crate_map;
         let export_map = self.export_map;
         let maybe_unused_trait_imports = self.maybe_unused_trait_imports;
         let maybe_unused_extern_crates = self.maybe_unused_extern_crates;
         let glob_map = self.glob_map;
         ResolverOutputs {
-            definitions: definitions,
+            definitions,
             cstore: Box::new(self.crate_loader.into_cstore()),
+            visibilities,
             extern_crate_map,
             export_map,
             glob_map,
@@ -1399,6 +1392,7 @@
         ResolverOutputs {
             definitions: self.definitions.clone(),
             cstore: Box::new(self.cstore().clone()),
+            visibilities: self.visibilities.clone(),
             extern_crate_map: self.extern_crate_map.clone(),
             export_map: self.export_map.clone(),
             glob_map: self.glob_map.clone(),
@@ -1718,15 +1712,14 @@
                 }
                 Scope::DeriveHelpers(..) => Scope::DeriveHelpersCompat,
                 Scope::DeriveHelpersCompat => Scope::MacroRules(parent_scope.macro_rules),
-                Scope::MacroRules(macro_rules_scope) => match macro_rules_scope {
+                Scope::MacroRules(macro_rules_scope) => match macro_rules_scope.get() {
                     MacroRulesScope::Binding(binding) => {
                         Scope::MacroRules(binding.parent_macro_rules_scope)
                     }
                     MacroRulesScope::Invocation(invoc_id) => Scope::MacroRules(
-                        self.output_macro_rules_scopes
-                            .get(&invoc_id)
-                            .cloned()
-                            .unwrap_or(self.invocation_parent_scopes[&invoc_id].macro_rules),
+                        self.output_macro_rules_scopes.get(&invoc_id).cloned().unwrap_or_else(
+                            || self.invocation_parent_scopes[&invoc_id].macro_rules,
+                        ),
                     ),
                     MacroRulesScope::Empty => Scope::Module(module),
                 },
@@ -1991,11 +1984,12 @@
                 // The macro is a proc macro derive
                 if let Some(def_id) = module.expansion.expn_data().macro_def_id {
                     if let Some(ext) = self.get_macro_by_def_id(def_id) {
-                        if !ext.is_builtin && ext.macro_kind() == MacroKind::Derive {
-                            if parent.expansion.outer_expn_is_descendant_of(span.ctxt()) {
-                                *poisoned = Some(node_id);
-                                return module.parent;
-                            }
+                        if !ext.is_builtin
+                            && ext.macro_kind() == MacroKind::Derive
+                            && parent.expansion.outer_expn_is_descendant_of(span.ctxt())
+                        {
+                            *poisoned = Some(node_id);
+                            return module.parent;
                         }
                     }
                 }
@@ -2389,10 +2383,7 @@
                         _ => None,
                     };
                     let (label, suggestion) = if module_res == self.graph_root.res() {
-                        let is_mod = |res| match res {
-                            Res::Def(DefKind::Mod, _) => true,
-                            _ => false,
-                        };
+                        let is_mod = |res| matches!(res, Res::Def(DefKind::Mod, _));
                         // Don't look up import candidates if this is a speculative resolve
                         let mut candidates = if record_used {
                             self.lookup_import_candidates(ident, TypeNS, parent_scope, is_mod)
@@ -3218,7 +3209,7 @@
             }
         };
         let module = self.get_module(module_id);
-        let parent_scope = &ParentScope::module(module);
+        let parent_scope = &ParentScope::module(module, self);
         let res = self.resolve_ast_path(&path, ns, parent_scope).map_err(|_| ())?;
         Ok((path, res))
     }
diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs
index bea7138..6bc9419 100644
--- a/compiler/rustc_resolve/src/macros.rs
+++ b/compiler/rustc_resolve/src/macros.rs
@@ -11,6 +11,7 @@
 use rustc_ast_pretty::pprust;
 use rustc_attr::StabilityLevel;
 use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::ptr_key::PtrKey;
 use rustc_errors::struct_span_err;
 use rustc_expand::base::{Indeterminate, InvocationRes, ResolverExpand, SyntaxExtension};
 use rustc_expand::compile_declarative_macro;
@@ -19,7 +20,7 @@
 use rustc_hir::def::{self, DefKind, NonMacroAttrKind};
 use rustc_hir::def_id;
 use rustc_middle::middle::stability;
-use rustc_middle::{span_bug, ty};
+use rustc_middle::ty;
 use rustc_session::lint::builtin::UNUSED_MACROS;
 use rustc_session::Session;
 use rustc_span::edition::Edition;
@@ -29,6 +30,7 @@
 
 use rustc_data_structures::sync::Lrc;
 use rustc_span::hygiene::{AstPass, MacroKind};
+use std::cell::Cell;
 use std::{mem, ptr};
 
 type Res = def::Res<NodeId>;
@@ -39,7 +41,7 @@
 pub struct MacroRulesBinding<'a> {
     crate binding: &'a NameBinding<'a>,
     /// `macro_rules` scope into which the `macro_rules` item was planted.
-    crate parent_macro_rules_scope: MacroRulesScope<'a>,
+    crate parent_macro_rules_scope: MacroRulesScopeRef<'a>,
     crate ident: Ident,
 }
 
@@ -59,6 +61,14 @@
     Invocation(ExpnId),
 }
 
+/// `macro_rules!` scopes are always kept by reference and inside a cell.
+/// The reason is that we update all scopes with value `MacroRulesScope::Invocation(invoc_id)`
+/// in-place immediately after `invoc_id` gets expanded.
+/// This helps to avoid uncontrollable growth of `macro_rules!` scope chains,
+/// which usually grow lineraly with the number of macro invocations
+/// in a module (including derives) and hurt performance.
+pub(crate) type MacroRulesScopeRef<'a> = PtrKey<'a, Cell<MacroRulesScope<'a>>>;
+
 // Macro namespace is separated into two sub-namespaces, one for bang macros and
 // one for attribute-like macros (attributes, derives).
 // We ignore resolutions from one sub-namespace when searching names in scope for another.
@@ -163,6 +173,22 @@
         let output_macro_rules_scope = self.build_reduced_graph(fragment, parent_scope);
         self.output_macro_rules_scopes.insert(expansion, output_macro_rules_scope);
 
+        // Update all `macro_rules` scopes referring to this invocation. This is an optimization
+        // used to avoid long scope chains, see the comments on `MacroRulesScopeRef`.
+        if let Some(invocation_scopes) = self.invocation_macro_rules_scopes.remove(&expansion) {
+            for invocation_scope in &invocation_scopes {
+                invocation_scope.set(output_macro_rules_scope.get());
+            }
+            // All `macro_rules` scopes that previously referred to `expansion`
+            // are now rerouted to its output scope, if it's also an invocation.
+            if let MacroRulesScope::Invocation(invoc_id) = output_macro_rules_scope.get() {
+                self.invocation_macro_rules_scopes
+                    .entry(invoc_id)
+                    .or_default()
+                    .extend(invocation_scopes);
+            }
+        }
+
         parent_scope.module.unexpanded_invocations.borrow_mut().remove(&expansion);
     }
 
@@ -655,7 +681,7 @@
                         }
                         result
                     }
-                    Scope::MacroRules(macro_rules_scope) => match macro_rules_scope {
+                    Scope::MacroRules(macro_rules_scope) => match macro_rules_scope.get() {
                         MacroRulesScope::Binding(macro_rules_binding)
                             if ident == macro_rules_binding.ident =>
                         {
@@ -885,11 +911,11 @@
                                  initial_res: Option<Res>,
                                  res: Res| {
             if let Some(initial_res) = initial_res {
-                if res != initial_res && res != Res::Err && this.ambiguity_errors.is_empty() {
+                if res != initial_res {
                     // Make sure compilation does not succeed if preferred macro resolution
                     // has changed after the macro had been expanded. In theory all such
-                    // situations should be reported as ambiguity errors, so this is a bug.
-                    span_bug!(span, "inconsistent resolution for a macro");
+                    // situations should be reported as errors, so this is a bug.
+                    this.session.delay_span_bug(span, "inconsistent resolution for a macro");
                 }
             } else {
                 // It's possible that the macro was unresolved (indeterminate) and silently
diff --git a/compiler/rustc_save_analysis/src/dump_visitor.rs b/compiler/rustc_save_analysis/src/dump_visitor.rs
index ce48485..40d60a8 100644
--- a/compiler/rustc_save_analysis/src/dump_visitor.rs
+++ b/compiler/rustc_save_analysis/src/dump_visitor.rs
@@ -320,6 +320,15 @@
         for param in generics.params {
             match param.kind {
                 hir::GenericParamKind::Lifetime { .. } => {}
+                hir::GenericParamKind::Type {
+                    synthetic: Some(hir::SyntheticTyParamKind::ImplTrait),
+                    ..
+                } => {
+                    return self
+                        .nest_typeck_results(self.tcx.hir().local_def_id(param.hir_id), |this| {
+                            this.visit_generics(generics)
+                        });
+                }
                 hir::GenericParamKind::Type { .. } => {
                     let param_ss = param.name.ident().span;
                     let name = escape(self.span.snippet(param_ss));
@@ -351,7 +360,8 @@
                 hir::GenericParamKind::Const { .. } => {}
             }
         }
-        self.visit_generics(generics);
+
+        self.visit_generics(generics)
     }
 
     fn process_fn(
@@ -806,7 +816,7 @@
         path: &'tcx hir::QPath<'tcx>,
         fields: &'tcx [hir::Field<'tcx>],
         variant: &'tcx ty::VariantDef,
-        base: Option<&'tcx hir::Expr<'tcx>>,
+        rest: Option<&'tcx hir::Expr<'tcx>>,
     ) {
         if let Some(struct_lit_data) = self.save_ctxt.get_expr_data(ex) {
             if let hir::QPath::Resolved(_, path) = path {
@@ -826,7 +836,9 @@
             }
         }
 
-        walk_list!(self, visit_expr, base);
+        if let Some(base) = rest {
+            self.visit_expr(&base);
+        }
     }
 
     fn process_method_call(
@@ -1389,7 +1401,7 @@
         debug!("visit_expr {:?}", ex.kind);
         self.process_macro_use(ex.span);
         match ex.kind {
-            hir::ExprKind::Struct(ref path, ref fields, ref base) => {
+            hir::ExprKind::Struct(ref path, ref fields, ref rest) => {
                 let hir_expr = self.save_ctxt.tcx.hir().expect_expr(ex.hir_id);
                 let adt = match self.save_ctxt.typeck_results().expr_ty_opt(&hir_expr) {
                     Some(ty) if ty.ty_adt_def().is_some() => ty.ty_adt_def().unwrap(),
@@ -1399,7 +1411,7 @@
                     }
                 };
                 let res = self.save_ctxt.get_path_res(hir_expr.hir_id);
-                self.process_struct_lit(ex, path, fields, adt.variant_of_res(res), *base)
+                self.process_struct_lit(ex, path, fields, adt.variant_of_res(res), *rest)
             }
             hir::ExprKind::MethodCall(ref seg, _, args, _) => {
                 self.process_method_call(ex, seg, args)
diff --git a/compiler/rustc_save_analysis/src/lib.rs b/compiler/rustc_save_analysis/src/lib.rs
index f643468..eed9f2e 100644
--- a/compiler/rustc_save_analysis/src/lib.rs
+++ b/compiler/rustc_save_analysis/src/lib.rs
@@ -630,9 +630,14 @@
             })
             | Node::Ty(&hir::Ty { kind: hir::TyKind::Path(ref qpath), .. }) => match qpath {
                 hir::QPath::Resolved(_, path) => path.res,
-                hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => self
-                    .maybe_typeck_results
-                    .map_or(Res::Err, |typeck_results| typeck_results.qpath_res(qpath, hir_id)),
+                hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => {
+                    // #75962: `self.typeck_results` may be different from the `hir_id`'s result.
+                    if self.tcx.has_typeck_results(hir_id.owner.to_def_id()) {
+                        self.tcx.typeck(hir_id.owner).qpath_res(qpath, hir_id)
+                    } else {
+                        Res::Err
+                    }
+                }
             },
 
             Node::Binding(&hir::Pat {
@@ -794,7 +799,9 @@
 
             // These are not macros.
             // FIXME(eddyb) maybe there is a way to handle them usefully?
-            ExpnKind::Root | ExpnKind::AstPass(_) | ExpnKind::Desugaring(_) => return None,
+            ExpnKind::Inlined | ExpnKind::Root | ExpnKind::AstPass(_) | ExpnKind::Desugaring(_) => {
+                return None;
+            }
         };
 
         let callee_span = self.span_from_span(callee.def_site);
diff --git a/compiler/rustc_save_analysis/src/sig.rs b/compiler/rustc_save_analysis/src/sig.rs
index 747e198..1bf8160 100644
--- a/compiler/rustc_save_analysis/src/sig.rs
+++ b/compiler/rustc_save_analysis/src/sig.rs
@@ -262,7 +262,7 @@
                 } else {
                     let start = offset + prefix.len() + 5;
                     let end = start + name.len();
-                    // FIXME should put the proper path in there, not elipses.
+                    // FIXME should put the proper path in there, not ellipsis.
                     Ok(Signature {
                         text: prefix + "...::" + &name,
                         defs: vec![],
@@ -272,7 +272,7 @@
             }
             hir::TyKind::Path(hir::QPath::TypeRelative(ty, segment)) => {
                 let nested_ty = ty.make(offset + 1, id, scx)?;
-                let prefix = format!("<{}>::", nested_ty.text,);
+                let prefix = format!("<{}>::", nested_ty.text);
 
                 let name = path_segment_to_string(segment);
                 let res = scx.get_path_res(id.ok_or("Missing id for Path")?);
@@ -551,7 +551,7 @@
                 // FIXME where clause
             }
             hir::ItemKind::ForeignMod(_) => Err("extern mod"),
-            hir::ItemKind::GlobalAsm(_) => Err("glboal asm"),
+            hir::ItemKind::GlobalAsm(_) => Err("global asm"),
             hir::ItemKind::ExternCrate(_) => Err("extern crate"),
             hir::ItemKind::OpaqueTy(..) => Err("opaque type"),
             // FIXME should implement this (e.g., pub use).
diff --git a/compiler/rustc_serialize/src/opaque.rs b/compiler/rustc_serialize/src/opaque.rs
index fa4423e..8b79c93 100644
--- a/compiler/rustc_serialize/src/opaque.rs
+++ b/compiler/rustc_serialize/src/opaque.rs
@@ -107,7 +107,7 @@
 
     #[inline]
     fn emit_i8(&mut self, v: i8) -> EncodeResult {
-        let as_u8: u8 = unsafe { ::std::mem::transmute(v) };
+        let as_u8: u8 = unsafe { std::mem::transmute(v) };
         self.emit_u8(as_u8)
     }
 
@@ -300,13 +300,13 @@
     #[inline]
     fn read_char(&mut self) -> Result<char, Self::Error> {
         let bits = self.read_u32()?;
-        Ok(::std::char::from_u32(bits).unwrap())
+        Ok(std::char::from_u32(bits).unwrap())
     }
 
     #[inline]
     fn read_str(&mut self) -> Result<Cow<'_, str>, Self::Error> {
         let len = self.read_usize()?;
-        let s = ::std::str::from_utf8(&self.data[self.position..self.position + len]).unwrap();
+        let s = std::str::from_utf8(&self.data[self.position..self.position + len]).unwrap();
         self.position += len;
         Ok(Cow::Borrowed(s))
     }
diff --git a/compiler/rustc_session/Cargo.toml b/compiler/rustc_session/Cargo.toml
index cdff166..4c72920 100644
--- a/compiler/rustc_session/Cargo.toml
+++ b/compiler/rustc_session/Cargo.toml
@@ -18,3 +18,4 @@
 rustc_fs_util = { path = "../rustc_fs_util" }
 num_cpus = "1.0"
 rustc_ast = { path = "../rustc_ast" }
+rustc_lint_defs = { path = "../rustc_lint_defs" }
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
index ab96b03..8768733 100644
--- a/compiler/rustc_session/src/config.rs
+++ b/compiler/rustc_session/src/config.rs
@@ -34,11 +34,6 @@
 use std::path::{Path, PathBuf};
 use std::str::{self, FromStr};
 
-pub struct Config {
-    pub target: Target,
-    pub ptr_width: u32,
-}
-
 bitflags! {
     #[derive(Default, Encodable, Decodable)]
     pub struct SanitizerSet: u8 {
@@ -740,16 +735,16 @@
 }
 
 pub fn default_configuration(sess: &Session) -> CrateConfig {
-    let end = &sess.target.target.target_endian;
-    let arch = &sess.target.target.arch;
-    let wordsz = &sess.target.target.target_pointer_width;
-    let os = &sess.target.target.target_os;
-    let env = &sess.target.target.target_env;
-    let vendor = &sess.target.target.target_vendor;
-    let min_atomic_width = sess.target.target.min_atomic_width();
-    let max_atomic_width = sess.target.target.max_atomic_width();
-    let atomic_cas = sess.target.target.options.atomic_cas;
-    let layout = TargetDataLayout::parse(&sess.target.target).unwrap_or_else(|err| {
+    let end = &sess.target.endian;
+    let arch = &sess.target.arch;
+    let wordsz = sess.target.pointer_width.to_string();
+    let os = &sess.target.os;
+    let env = &sess.target.env;
+    let vendor = &sess.target.vendor;
+    let min_atomic_width = sess.target.min_atomic_width();
+    let max_atomic_width = sess.target.max_atomic_width();
+    let atomic_cas = sess.target.atomic_cas;
+    let layout = TargetDataLayout::parse(&sess.target).unwrap_or_else(|err| {
         sess.fatal(&err);
     });
 
@@ -757,7 +752,7 @@
     ret.reserve(6); // the minimum number of insertions
     // Target bindings.
     ret.insert((sym::target_os, Some(Symbol::intern(os))));
-    if let Some(ref fam) = sess.target.target.options.target_family {
+    if let Some(ref fam) = sess.target.os_family {
         ret.insert((sym::target_family, Some(Symbol::intern(fam))));
         if fam == "windows" {
             ret.insert((sym::windows, None));
@@ -767,10 +762,10 @@
     }
     ret.insert((sym::target_arch, Some(Symbol::intern(arch))));
     ret.insert((sym::target_endian, Some(Symbol::intern(end))));
-    ret.insert((sym::target_pointer_width, Some(Symbol::intern(wordsz))));
+    ret.insert((sym::target_pointer_width, Some(Symbol::intern(&wordsz))));
     ret.insert((sym::target_env, Some(Symbol::intern(env))));
     ret.insert((sym::target_vendor, Some(Symbol::intern(vendor))));
-    if sess.target.target.options.has_elf_tls {
+    if sess.target.has_elf_tls {
         ret.insert((sym::target_thread_local, None));
     }
     for &(i, align) in &[
@@ -792,12 +787,15 @@
             };
             let s = i.to_string();
             insert_atomic(&s, align);
-            if &s == wordsz {
+            if s == wordsz {
                 insert_atomic("ptr", layout.pointer_align.abi);
             }
         }
     }
 
+    let panic_strategy = sess.panic_strategy();
+    ret.insert((sym::panic, Some(panic_strategy.desc_symbol())));
+
     for s in sess.opts.debugging_opts.sanitizer {
         let symbol = Symbol::intern(&s.to_string());
         ret.insert((sym::sanitize, Some(symbol)));
@@ -831,7 +829,7 @@
     user_cfg
 }
 
-pub fn build_target_config(opts: &Options, target_override: Option<Target>) -> Config {
+pub fn build_target_config(opts: &Options, target_override: Option<Target>) -> Target {
     let target_result = target_override.map_or_else(|| Target::search(&opts.target_triple), Ok);
     let target = target_result.unwrap_or_else(|e| {
         early_error(
@@ -844,21 +842,18 @@
         )
     });
 
-    let ptr_width = match &target.target_pointer_width[..] {
-        "16" => 16,
-        "32" => 32,
-        "64" => 64,
-        w => early_error(
+    if !matches!(target.pointer_width, 16 | 32 | 64) {
+        early_error(
             opts.error_format,
             &format!(
                 "target specification was invalid: \
              unrecognized target-pointer-width {}",
-                w
+                target.pointer_width
             ),
-        ),
-    };
+        )
+    }
 
-    Config { target, ptr_width }
+    target
 }
 
 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
@@ -1756,10 +1751,6 @@
         );
     }
 
-    if debugging_opts.experimental_coverage {
-        debugging_opts.instrument_coverage = true;
-    }
-
     if debugging_opts.instrument_coverage {
         if cg.profile_generate.enabled() || cg.profile_use.is_some() {
             early_error(
@@ -2069,10 +2060,7 @@
 
     pub fn needs_analysis(&self) -> bool {
         use PpMode::*;
-        match *self {
-            PpmMir | PpmMirCFG => true,
-            _ => false,
-        }
+        matches!(*self, PpmMir | PpmMirCFG)
     }
 }
 
diff --git a/compiler/rustc_session/src/filesearch.rs b/compiler/rustc_session/src/filesearch.rs
index 12a268d..55ee4e5 100644
--- a/compiler/rustc_session/src/filesearch.rs
+++ b/compiler/rustc_session/src/filesearch.rs
@@ -153,14 +153,14 @@
     const SECONDARY_LIB_DIR: &str = "lib";
 
     match option_env!("CFG_LIBDIR_RELATIVE") {
-        Some(libdir) if libdir != "lib" => libdir.into(),
-        _ => {
+        None | Some("lib") => {
             if sysroot.join(PRIMARY_LIB_DIR).join(RUST_LIB_DIR).exists() {
                 PRIMARY_LIB_DIR.into()
             } else {
                 SECONDARY_LIB_DIR.into()
             }
         }
+        Some(libdir) => libdir.into(),
     }
 }
 
diff --git a/compiler/rustc_session/src/lib.rs b/compiler/rustc_session/src/lib.rs
index a808261..d002f59 100644
--- a/compiler/rustc_session/src/lib.rs
+++ b/compiler/rustc_session/src/lib.rs
@@ -9,8 +9,8 @@
 
 pub mod cgu_reuse_tracker;
 pub mod utils;
-#[macro_use]
-pub mod lint;
+pub use lint::{declare_lint, declare_lint_pass, declare_tool_lint, impl_lint_pass};
+pub use rustc_lint_defs as lint;
 pub mod parse;
 
 mod code_stats;
diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs
index b705ab6..1cd3d11 100644
--- a/compiler/rustc_session/src/options.rs
+++ b/compiler/rustc_session/src/options.rs
@@ -717,7 +717,7 @@
     // This list is in alphabetical order.
     //
     // If you add a new option, please update:
-    // - src/librustc_interface/tests.rs
+    // - compiler/rustc_interface/src/tests.rs
     // - src/doc/rustc/src/codegen-options/index.md
 
     ar: String = (String::new(), parse_string, [UNTRACKED],
@@ -814,7 +814,7 @@
     // This list is in alphabetical order.
     //
     // If you add a new option, please update:
-    // - src/librustc_interface/tests.rs
+    // - compiler/rustc_interface/src/tests.rs
     // - src/doc/rustc/src/codegen-options/index.md
 }
 
@@ -825,7 +825,7 @@
     // This list is in alphabetical order.
     //
     // If you add a new option, please update:
-    // - src/librustc_interface/tests.rs
+    // - compiler/rustc_interface/src/tests.rs
 
     allow_features: Option<Vec<String>> = (None, parse_opt_comma_list, [TRACKED],
         "only allow the listed language features to be enabled in code (space separated)"),
@@ -887,19 +887,19 @@
     dump_mir_exclude_pass_number: bool = (false, parse_bool, [UNTRACKED],
         "exclude the pass number when dumping MIR (used in tests) (default: no)"),
     dump_mir_graphviz: bool = (false, parse_bool, [UNTRACKED],
-        "in addition to `.mir` files, create graphviz `.dot` files (default: no)"),
+        "in addition to `.mir` files, create graphviz `.dot` files (and with \
+        `-Z instrument-coverage`, also create a `.dot` file for the MIR-derived \
+        coverage graph) (default: no)"),
     dump_mir_spanview: Option<MirSpanview> = (None, parse_mir_spanview, [UNTRACKED],
         "in addition to `.mir` files, create `.html` files to view spans for \
         all `statement`s (including terminators), only `terminator` spans, or \
         computed `block` spans (one span encompassing a block's terminator and \
-        all statements)."),
+        all statements). If `-Z instrument-coverage` is also enabled, create \
+        an additional `.html` file showing the computed coverage spans."),
+    emit_future_incompat_report: bool = (false, parse_bool, [UNTRACKED],
+        "emits a future-incompatibility report for lints (RFC 2834)"),
     emit_stack_sizes: bool = (false, parse_bool, [UNTRACKED],
         "emit a section containing stack size metadata (default: no)"),
-    experimental_coverage: bool = (false, parse_bool, [TRACKED],
-        "enable and extend the `-Z instrument-coverage` function-level coverage \
-        feature, adding additional experimental (likely inaccurate) counters and \
-        code regions (used by `rustc` compiler developers to test new coverage \
-        counter placements) (default: no)"),
     fewer_names: bool = (false, parse_bool, [TRACKED],
         "reduce memory use by retaining fewer names within compilation artifacts (LLVM-IR) \
         (default: no)"),
@@ -909,6 +909,8 @@
         "force all crates to be `rustc_private` unstable (default: no)"),
     fuel: Option<(String, u64)> = (None, parse_optimization_fuel, [TRACKED],
         "set the optimization fuel quota for a crate"),
+    function_sections: Option<bool> = (None, parse_opt_bool, [TRACKED],
+        "whether each function should go in its own section"),
     graphviz_dark_mode: bool = (false, parse_bool, [UNTRACKED],
         "use dark-themed colors in graphviz output (default: no)"),
     graphviz_font: String = ("Courier, monospace".to_string(), parse_string, [UNTRACKED],
@@ -927,6 +929,10 @@
         (default: no)"),
     incremental_verify_ich: bool = (false, parse_bool, [UNTRACKED],
         "verify incr. comp. hashes of green query instances (default: no)"),
+    inline_mir_threshold: usize = (50, parse_uint, [TRACKED],
+        "a default MIR inlining threshold (default: 50)"),
+    inline_mir_hint_threshold: usize = (100, parse_uint, [TRACKED],
+        "inlining threshold for functions with inline hint (default: 100)"),
     inline_in_all_cgus: Option<bool> = (None, parse_opt_bool, [TRACKED],
         "control whether `#[inline]` functions are in all CGUs"),
     input_stats: bool = (false, parse_bool, [UNTRACKED],
@@ -972,6 +978,8 @@
         "use new LLVM pass manager (default: no)"),
     nll_facts: bool = (false, parse_bool, [UNTRACKED],
         "dump facts from NLL analysis into side files (default: no)"),
+    nll_facts_dir: String = ("nll-facts".to_string(), parse_string, [UNTRACKED],
+        "the directory the NLL facts are dumped into (default: `nll-facts`)"),
     no_analysis: bool = (false, parse_no_flag, [UNTRACKED],
         "parse and expand the source, but run no analysis"),
     no_codegen: bool = (false, parse_no_flag, [TRACKED],
@@ -1033,6 +1041,8 @@
         "enable queries of the dependency graph for regression testing (default: no)"),
     query_stats: bool = (false, parse_bool, [UNTRACKED],
         "print some statistics about the query system (default: no)"),
+    relax_elf_relocations: Option<bool> = (None, parse_opt_bool, [TRACKED],
+        "whether ELF relocations can be relaxed"),
     relro_level: Option<RelroLevel> = (None, parse_relro_level, [TRACKED],
         "choose which RELRO level to use"),
     report_delayed_bugs: bool = (false, parse_bool, [TRACKED],
@@ -1073,7 +1083,7 @@
     span_free_formats: bool = (false, parse_bool, [UNTRACKED],
         "exclude spans when debug-printing compiler state (default: no)"),
     src_hash_algorithm: Option<SourceFileHashAlgorithm> = (None, parse_src_file_hash, [TRACKED],
-        "hash algorithm of source files in debug info (`md5`, or `sha1`)"),
+        "hash algorithm of source files in debug info (`md5`, `sha1`, or `sha256`)"),
     strip: Strip = (Strip::None, parse_strip, [UNTRACKED],
         "tell the linker which information to strip (`none` (default), `debuginfo` or `symbols`)"),
     symbol_mangling_version: SymbolManglingVersion = (SymbolManglingVersion::Legacy,
@@ -1083,6 +1093,8 @@
         "show extended diagnostic help (default: no)"),
     terminal_width: Option<usize> = (None, parse_opt_uint, [UNTRACKED],
         "set the current terminal width"),
+    tune_cpu: Option<String> = (None, parse_opt_string, [TRACKED],
+        "select processor to schedule for (`rustc --print target-cpus` for details)"),
     thinlto: Option<bool> = (None, parse_opt_bool, [TRACKED],
         "enable ThinLTO when possible"),
     // We default to 1 here since we want to behave like
diff --git a/compiler/rustc_session/src/output.rs b/compiler/rustc_session/src/output.rs
index bf9c96c..777eea3 100644
--- a/compiler/rustc_session/src/output.rs
+++ b/compiler/rustc_session/src/output.rs
@@ -150,19 +150,15 @@
     match crate_type {
         CrateType::Rlib => outputs.out_directory.join(&format!("lib{}.rlib", libname)),
         CrateType::Cdylib | CrateType::ProcMacro | CrateType::Dylib => {
-            let (prefix, suffix) =
-                (&sess.target.target.options.dll_prefix, &sess.target.target.options.dll_suffix);
+            let (prefix, suffix) = (&sess.target.dll_prefix, &sess.target.dll_suffix);
             outputs.out_directory.join(&format!("{}{}{}", prefix, libname, suffix))
         }
         CrateType::Staticlib => {
-            let (prefix, suffix) = (
-                &sess.target.target.options.staticlib_prefix,
-                &sess.target.target.options.staticlib_suffix,
-            );
+            let (prefix, suffix) = (&sess.target.staticlib_prefix, &sess.target.staticlib_suffix);
             outputs.out_directory.join(&format!("{}{}{}", prefix, libname, suffix))
         }
         CrateType::Executable => {
-            let suffix = &sess.target.target.options.exe_suffix;
+            let suffix = &sess.target.exe_suffix;
             let out_filename = outputs.path(OutputType::Exe);
             if suffix.is_empty() { out_filename } else { out_filename.with_extension(&suffix[1..]) }
         }
@@ -179,38 +175,30 @@
 /// interaction with Rust code through static library is the only
 /// option for now
 pub fn default_output_for_target(sess: &Session) -> CrateType {
-    if !sess.target.target.options.executables {
-        CrateType::Staticlib
-    } else {
-        CrateType::Executable
-    }
+    if !sess.target.executables { CrateType::Staticlib } else { CrateType::Executable }
 }
 
 /// Checks if target supports crate_type as output
 pub fn invalid_output_for_target(sess: &Session, crate_type: CrateType) -> bool {
     match crate_type {
         CrateType::Cdylib | CrateType::Dylib | CrateType::ProcMacro => {
-            if !sess.target.target.options.dynamic_linking {
+            if !sess.target.dynamic_linking {
                 return true;
             }
-            if sess.crt_static(Some(crate_type))
-                && !sess.target.target.options.crt_static_allows_dylibs
-            {
+            if sess.crt_static(Some(crate_type)) && !sess.target.crt_static_allows_dylibs {
                 return true;
             }
         }
         _ => {}
     }
-    if sess.target.target.options.only_cdylib {
+    if sess.target.only_cdylib {
         match crate_type {
             CrateType::ProcMacro | CrateType::Dylib => return true,
             _ => {}
         }
     }
-    if !sess.target.target.options.executables {
-        if crate_type == CrateType::Executable {
-            return true;
-        }
+    if !sess.target.executables && crate_type == CrateType::Executable {
+        return true;
     }
 
     false
diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs
index ff5e615..98b7f03d 100644
--- a/compiler/rustc_session/src/session.rs
+++ b/compiler/rustc_session/src/session.rs
@@ -3,7 +3,7 @@
 pub use crate::code_stats::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
 use crate::config::{self, CrateType, OutputType, PrintRequest, SanitizerSet, SwitchWithOptPath};
 use crate::filesearch;
-use crate::lint;
+use crate::lint::{self, LintId};
 use crate::parse::ParseSess;
 use crate::search_paths::{PathKind, SearchPath};
 
@@ -21,7 +21,8 @@
 use rustc_errors::emitter::{Emitter, EmitterWriter, HumanReadableErrorType};
 use rustc_errors::json::JsonEmitter;
 use rustc_errors::registry::Registry;
-use rustc_errors::{Applicability, DiagnosticBuilder, DiagnosticId, ErrorReported};
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, DiagnosticId, ErrorReported};
+use rustc_lint_defs::FutureBreakage;
 use rustc_span::edition::Edition;
 use rustc_span::source_map::{FileLoader, MultiSpan, RealFileLoader, SourceMap, Span};
 use rustc_span::{sym, SourceFileHashAlgorithm, Symbol};
@@ -40,6 +41,10 @@
 use std::sync::Arc;
 use std::time::Duration;
 
+pub trait SessionLintStore: sync::Send + sync::Sync {
+    fn name_to_lint(&self, lint_name: &str) -> LintId;
+}
+
 pub struct OptimizationFuel {
     /// If `-zfuel=crate=n` is specified, initially set to `n`, otherwise `0`.
     remaining: u64,
@@ -102,7 +107,7 @@
 /// Represents the data associated with a compilation
 /// session for a single crate.
 pub struct Session {
-    pub target: config::Config,
+    pub target: Target,
     pub host: Target,
     pub opts: config::Options,
     pub host_tlib_path: SearchPath,
@@ -131,6 +136,8 @@
 
     features: OnceCell<rustc_feature::Features>,
 
+    lint_store: OnceCell<Lrc<dyn SessionLintStore>>,
+
     /// The maximum recursion limit for potentially infinitely recursive
     /// operations such as auto-dereference and monomorphization.
     pub recursion_limit: OnceCell<Limit>,
@@ -297,6 +304,35 @@
     pub fn finish_diagnostics(&self, registry: &Registry) {
         self.check_miri_unleashed_features();
         self.diagnostic().print_error_count(registry);
+        self.emit_future_breakage();
+    }
+
+    fn emit_future_breakage(&self) {
+        if !self.opts.debugging_opts.emit_future_incompat_report {
+            return;
+        }
+
+        let diags = self.diagnostic().take_future_breakage_diagnostics();
+        if diags.is_empty() {
+            return;
+        }
+        // If any future-breakage lints were registered, this lint store
+        // should be available
+        let lint_store = self.lint_store.get().expect("`lint_store` not initialized!");
+        let diags_and_breakage: Vec<(FutureBreakage, Diagnostic)> = diags
+            .into_iter()
+            .map(|diag| {
+                let lint_name = match &diag.code {
+                    Some(DiagnosticId::Lint { name, has_future_breakage: true }) => name,
+                    _ => panic!("Unexpected code in diagnostic {:?}", diag),
+                };
+                let lint = lint_store.name_to_lint(&lint_name);
+                let future_breakage =
+                    lint.lint.future_incompatible.unwrap().future_breakage.unwrap();
+                (future_breakage, diag)
+            })
+            .collect();
+        self.parse_sess.span_diagnostic.emit_future_breakage_report(diags_and_breakage);
     }
 
     pub fn local_crate_disambiguator(&self) -> CrateDisambiguator {
@@ -337,6 +373,12 @@
     pub fn struct_warn(&self, msg: &str) -> DiagnosticBuilder<'_> {
         self.diagnostic().struct_warn(msg)
     }
+    pub fn struct_span_allow<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> DiagnosticBuilder<'_> {
+        self.diagnostic().struct_span_allow(sp, msg)
+    }
+    pub fn struct_allow(&self, msg: &str) -> DiagnosticBuilder<'_> {
+        self.diagnostic().struct_allow(msg)
+    }
     pub fn struct_span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> DiagnosticBuilder<'_> {
         self.diagnostic().struct_span_err(sp, msg)
     }
@@ -611,10 +653,17 @@
         }
     }
 
+    pub fn init_lint_store(&self, lint_store: Lrc<dyn SessionLintStore>) {
+        self.lint_store
+            .set(lint_store)
+            .map_err(|_| ())
+            .expect("`lint_store` was initialized twice");
+    }
+
     /// Calculates the flavor of LTO to use for this compilation.
     pub fn lto(&self) -> config::Lto {
         // If our target has codegen requirements ignore the command line
-        if self.target.target.options.requires_lto {
+        if self.target.requires_lto {
             return config::Lto::Fat;
         }
 
@@ -682,7 +731,7 @@
     /// Returns the panic strategy for this compile session. If the user explicitly selected one
     /// using '-C panic', use that, otherwise use the panic strategy defined by the target.
     pub fn panic_strategy(&self) -> PanicStrategy {
-        self.opts.cg.panic.unwrap_or(self.target.target.options.panic_strategy)
+        self.opts.cg.panic.unwrap_or(self.target.panic_strategy)
     }
     pub fn fewer_names(&self) -> bool {
         let more_names = self.opts.output_types.contains_key(&OutputType::LlvmAssembly)
@@ -706,9 +755,9 @@
 
     /// Check whether this compile session and crate type use static crt.
     pub fn crt_static(&self, crate_type: Option<CrateType>) -> bool {
-        if !self.target.target.options.crt_static_respected {
+        if !self.target.crt_static_respected {
             // If the target does not opt in to crt-static support, use its default.
-            return self.target.target.options.crt_static_default;
+            return self.target.crt_static_default;
         }
 
         let requested_features = self.opts.cg.target_feature.split(',');
@@ -725,20 +774,20 @@
             // We can't check `#![crate_type = "proc-macro"]` here.
             false
         } else {
-            self.target.target.options.crt_static_default
+            self.target.crt_static_default
         }
     }
 
     pub fn relocation_model(&self) -> RelocModel {
-        self.opts.cg.relocation_model.unwrap_or(self.target.target.options.relocation_model)
+        self.opts.cg.relocation_model.unwrap_or(self.target.relocation_model)
     }
 
     pub fn code_model(&self) -> Option<CodeModel> {
-        self.opts.cg.code_model.or(self.target.target.options.code_model)
+        self.opts.cg.code_model.or(self.target.code_model)
     }
 
     pub fn tls_model(&self) -> TlsModel {
-        self.opts.debugging_opts.tls_model.unwrap_or(self.target.target.options.tls_model)
+        self.opts.debugging_opts.tls_model.unwrap_or(self.target.tls_model)
     }
 
     pub fn must_not_eliminate_frame_pointers(&self) -> bool {
@@ -749,7 +798,7 @@
         } else if let Some(x) = self.opts.cg.force_frame_pointers {
             x
         } else {
-            !self.target.target.options.eliminate_frame_pointer
+            !self.target.eliminate_frame_pointer
         }
     }
 
@@ -773,7 +822,7 @@
         // value, if it is provided, or disable them, if not.
         if self.panic_strategy() == PanicStrategy::Unwind {
             true
-        } else if self.target.target.options.requires_uwtable {
+        } else if self.target.requires_uwtable {
             true
         } else {
             self.opts.cg.force_unwind_tables.unwrap_or(false)
@@ -944,7 +993,7 @@
         if let Some(n) = self.opts.cli_forced_codegen_units {
             return n;
         }
-        if let Some(n) = self.target.target.options.default_codegen_units {
+        if let Some(n) = self.target.default_codegen_units {
             return n as usize;
         }
 
@@ -1029,11 +1078,11 @@
     pub fn needs_plt(&self) -> bool {
         // Check if the current target usually needs PLT to be enabled.
         // The user can use the command line flag to override it.
-        let needs_plt = self.target.target.options.needs_plt;
+        let needs_plt = self.target.needs_plt;
 
         let dbg_opts = &self.opts.debugging_opts;
 
-        let relro_level = dbg_opts.relro_level.unwrap_or(self.target.target.options.relro_level);
+        let relro_level = dbg_opts.relro_level.unwrap_or(self.target.relro_level);
 
         // Only enable this optimization by default if full relro is also enabled.
         // In this case, lazy binding was already unavailable, so nothing is lost.
@@ -1057,8 +1106,7 @@
         match self.opts.cg.link_dead_code {
             Some(explicitly_set) => explicitly_set,
             None => {
-                self.opts.debugging_opts.instrument_coverage
-                    && !self.target.target.options.is_like_msvc
+                self.opts.debugging_opts.instrument_coverage && !self.target.is_like_msvc
                 // Issue #76038: (rustc `-Clink-dead-code` causes MSVC linker to produce invalid
                 // binaries when LLVM InstrProf counters are enabled). As described by this issue,
                 // the "link dead code" option produces incorrect binaries when compiled and linked
@@ -1257,9 +1305,9 @@
         early_error(sopts.error_format, &format!("Error loading host specification: {}", e))
     });
 
-    let loader = file_loader.unwrap_or(Box::new(RealFileLoader));
+    let loader = file_loader.unwrap_or_else(|| Box::new(RealFileLoader));
     let hash_kind = sopts.debugging_opts.src_hash_algorithm.unwrap_or_else(|| {
-        if target_cfg.target.options.is_like_msvc {
+        if target_cfg.is_like_msvc {
             SourceFileHashAlgorithm::Sha1
         } else {
             SourceFileHashAlgorithm::Md5
@@ -1369,11 +1417,8 @@
         if candidate.join("library/std/src/lib.rs").is_file() { Some(candidate) } else { None }
     };
 
-    let asm_arch = if target_cfg.target.options.allow_asm {
-        InlineAsmArch::from_str(&target_cfg.target.arch).ok()
-    } else {
-        None
-    };
+    let asm_arch =
+        if target_cfg.allow_asm { InlineAsmArch::from_str(&target_cfg.arch).ok() } else { None };
 
     let sess = Session {
         target: target_cfg,
@@ -1389,6 +1434,7 @@
         crate_types: OnceCell::new(),
         crate_disambiguator: OnceCell::new(),
         features: OnceCell::new(),
+        lint_store: OnceCell::new(),
         recursion_limit: OnceCell::new(),
         type_length_limit: OnceCell::new(),
         const_eval_limit: OnceCell::new(),
@@ -1438,7 +1484,7 @@
     // the `dllimport` attributes and `__imp_` symbols in that case.
     if sess.opts.cg.linker_plugin_lto.enabled()
         && sess.opts.cg.prefer_dynamic
-        && sess.target.target.options.is_like_windows
+        && sess.target.is_like_windows
     {
         sess.err(
             "Linker plugin based LTO is not supported together with \
@@ -1466,7 +1512,7 @@
             );
         }
 
-        if sess.target.target.options.requires_uwtable && !include_uwtables {
+        if sess.target.requires_uwtable && !include_uwtables {
             sess.err(
                 "target requires unwind tables, they cannot be disabled with \
                      `-C force-unwind-tables=no`.",
@@ -1481,7 +1527,7 @@
     // We should only display this error if we're actually going to run PGO.
     // If we're just supposed to print out some data, don't show the error (#61002).
     if sess.opts.cg.profile_generate.enabled()
-        && sess.target.target.options.is_like_msvc
+        && sess.target.is_like_msvc
         && sess.panic_strategy() == PanicStrategy::Unwind
         && sess.opts.prints.iter().all(|&p| p == PrintRequest::NativeStaticLibs)
     {
@@ -1586,5 +1632,3 @@
     let handler = rustc_errors::Handler::with_emitter(true, None, emitter);
     handler.struct_warn(msg).emit();
 }
-
-pub type CompileResult = Result<(), ErrorReported>;
diff --git a/compiler/rustc_span/Cargo.toml b/compiler/rustc_span/Cargo.toml
index 1abfd50..0864599 100644
--- a/compiler/rustc_span/Cargo.toml
+++ b/compiler/rustc_span/Cargo.toml
@@ -17,5 +17,6 @@
 unicode-width = "0.1.4"
 cfg-if = "0.1.2"
 tracing = "0.1"
-sha-1 = "0.8"
-md-5 = "0.8"
+sha-1 = "0.9"
+sha2 = "0.9"
+md-5 = "0.9"
diff --git a/compiler/rustc_span/src/caching_source_map_view.rs b/compiler/rustc_span/src/caching_source_map_view.rs
index 68b0bd1..15dd00f 100644
--- a/compiler/rustc_span/src/caching_source_map_view.rs
+++ b/compiler/rustc_span/src/caching_source_map_view.rs
@@ -1,13 +1,25 @@
 use crate::source_map::SourceMap;
 use crate::{BytePos, SourceFile};
 use rustc_data_structures::sync::Lrc;
+use std::ops::Range;
 
 #[derive(Clone)]
 struct CacheEntry {
     time_stamp: usize,
     line_number: usize,
-    line_start: BytePos,
-    line_end: BytePos,
+    // The line's byte position range in the `SourceMap`. This range will fail to contain a valid
+    // position in certain edge cases. Spans often start/end one past something, and when that
+    // something is the last character of a file (this can happen when a file doesn't end in a
+    // newline, for example), we'd still like for the position to be considered within the last
+    // line. However, it isn't according to the exclusive upper bound of this range. We cannot
+    // change the upper bound to be inclusive, because for most lines, the upper bound is the same
+    // as the lower bound of the next line, so there would be an ambiguity.
+    //
+    // Since the containment aspect of this range is only used to see whether or not the cache
+    // entry contains a position, the only ramification of the above is that we will get cache
+    // misses for these rare positions. A line lookup for the position via `SourceMap::lookup_line`
+    // after a cache miss will produce the last line number, as desired.
+    line: Range<BytePos>,
     file: Lrc<SourceFile>,
     file_index: usize,
 }
@@ -26,8 +38,7 @@
         let entry = CacheEntry {
             time_stamp: 0,
             line_number: 0,
-            line_start: BytePos(0),
-            line_end: BytePos(0),
+            line: BytePos(0)..BytePos(0),
             file: first_file,
             file_index: 0,
         };
@@ -47,13 +58,13 @@
 
         // Check if the position is in one of the cached lines
         for cache_entry in self.line_cache.iter_mut() {
-            if pos >= cache_entry.line_start && pos < cache_entry.line_end {
+            if cache_entry.line.contains(&pos) {
                 cache_entry.time_stamp = self.time_stamp;
 
                 return Some((
                     cache_entry.file.clone(),
                     cache_entry.line_number,
-                    pos - cache_entry.line_start,
+                    pos - cache_entry.line.start,
                 ));
             }
         }
@@ -69,13 +80,13 @@
         let cache_entry = &mut self.line_cache[oldest];
 
         // If the entry doesn't point to the correct file, fix it up
-        if pos < cache_entry.file.start_pos || pos >= cache_entry.file.end_pos {
+        if !file_contains(&cache_entry.file, pos) {
             let file_valid;
             if self.source_map.files().len() > 0 {
                 let file_index = self.source_map.lookup_source_file_idx(pos);
                 let file = self.source_map.files()[file_index].clone();
 
-                if pos >= file.start_pos && pos < file.end_pos {
+                if file_contains(&file, pos) {
                     cache_entry.file = file;
                     cache_entry.file_index = file_index;
                     file_valid = true;
@@ -95,10 +106,19 @@
         let line_bounds = cache_entry.file.line_bounds(line_index);
 
         cache_entry.line_number = line_index + 1;
-        cache_entry.line_start = line_bounds.0;
-        cache_entry.line_end = line_bounds.1;
+        cache_entry.line = line_bounds;
         cache_entry.time_stamp = self.time_stamp;
 
-        Some((cache_entry.file.clone(), cache_entry.line_number, pos - cache_entry.line_start))
+        Some((cache_entry.file.clone(), cache_entry.line_number, pos - cache_entry.line.start))
     }
 }
+
+#[inline]
+fn file_contains(file: &SourceFile, pos: BytePos) -> bool {
+    // `SourceMap::lookup_source_file_idx` and `SourceFile::contains` both consider the position
+    // one past the end of a file to belong to it. Normally, that's what we want. But for the
+    // purposes of converting a byte position to a line and column number, we can't come up with a
+    // line and column number if the file is empty, because an empty file doesn't contain any
+    // lines. So for our purposes, we don't consider empty files to contain any byte position.
+    file.contains(pos) && !file.is_empty()
+}
diff --git a/compiler/rustc_span/src/def_id.rs b/compiler/rustc_span/src/def_id.rs
index aae7782..b24ede9 100644
--- a/compiler/rustc_span/src/def_id.rs
+++ b/compiler/rustc_span/src/def_id.rs
@@ -159,6 +159,7 @@
         DefId { krate: LOCAL_CRATE, index }
     }
 
+    /// Returns whether the item is defined in the crate currently being compiled.
     #[inline]
     pub fn is_local(self) -> bool {
         self.krate == LOCAL_CRATE
diff --git a/compiler/rustc_span/src/hygiene.rs b/compiler/rustc_span/src/hygiene.rs
index fb80dcb..0f82db1 100644
--- a/compiler/rustc_span/src/hygiene.rs
+++ b/compiler/rustc_span/src/hygiene.rs
@@ -619,14 +619,6 @@
         HygieneData::with(|data| data.outer_mark(self))
     }
 
-    #[inline]
-    pub fn outer_mark_with_data(self) -> (ExpnId, Transparency, ExpnData) {
-        HygieneData::with(|data| {
-            let (expn_id, transparency) = data.outer_mark(self);
-            (expn_id, transparency, data.expn_data(expn_id).clone())
-        })
-    }
-
     pub fn dollar_crate_name(self) -> Symbol {
         HygieneData::with(|data| data.syntax_context_data[self.0 as usize].dollar_crate_name)
     }
@@ -774,6 +766,8 @@
     AstPass(AstPass),
     /// Desugaring done by the compiler during HIR lowering.
     Desugaring(DesugaringKind),
+    /// MIR inlining
+    Inlined,
 }
 
 impl ExpnKind {
@@ -787,6 +781,7 @@
             },
             ExpnKind::AstPass(kind) => kind.descr().to_string(),
             ExpnKind::Desugaring(kind) => format!("desugaring of {}", kind.descr()),
+            ExpnKind::Inlined => "inlined source".to_string(),
         }
     }
 }
diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs
index 96a6956..0926561 100644
--- a/compiler/rustc_span/src/lib.rs
+++ b/compiler/rustc_span/src/lib.rs
@@ -52,13 +52,16 @@
 use std::cmp::{self, Ordering};
 use std::fmt;
 use std::hash::Hash;
-use std::ops::{Add, Sub};
+use std::ops::{Add, Range, Sub};
 use std::path::{Path, PathBuf};
 use std::str::FromStr;
 
 use md5::Md5;
 use sha1::Digest;
 use sha1::Sha1;
+use sha2::Sha256;
+
+use tracing::debug;
 
 #[cfg(test)]
 mod tests;
@@ -221,12 +224,6 @@
         }
     }
 
-    pub fn quote_expansion_source_code(src: &str) -> FileName {
-        let mut hasher = StableHasher::new();
-        src.hash(&mut hasher);
-        FileName::QuoteExpansion(hasher.finish())
-    }
-
     pub fn macro_expansion_source_code(src: &str) -> FileName {
         let mut hasher = StableHasher::new();
         src.hash(&mut hasher);
@@ -742,14 +739,14 @@
 }
 
 /// Calls the provided closure, using the provided `SourceMap` to format
-/// any spans that are debug-printed during the closure'e exectuino.
+/// any spans that are debug-printed during the closure's execution.
 ///
 /// Normally, the global `TyCtxt` is used to retrieve the `SourceMap`
 /// (see `rustc_interface::callbacks::span_debug1). However, some parts
 /// of the compiler (e.g. `rustc_parse`) may debug-print `Span`s before
 /// a `TyCtxt` is available. In this case, we fall back to
 /// the `SourceMap` provided to this function. If that is not available,
-/// we fall back to printing the raw `Span` field values
+/// we fall back to printing the raw `Span` field values.
 pub fn with_source_map<T, F: FnOnce() -> T>(source_map: Lrc<SourceMap>, f: F) -> T {
     SESSION_GLOBALS.with(|session_globals| {
         *session_globals.source_map.borrow_mut() = Some(source_map);
@@ -1038,6 +1035,7 @@
 pub enum SourceFileHashAlgorithm {
     Md5,
     Sha1,
+    Sha256,
 }
 
 impl FromStr for SourceFileHashAlgorithm {
@@ -1047,6 +1045,7 @@
         match s {
             "md5" => Ok(SourceFileHashAlgorithm::Md5),
             "sha1" => Ok(SourceFileHashAlgorithm::Sha1),
+            "sha256" => Ok(SourceFileHashAlgorithm::Sha256),
             _ => Err(()),
         }
     }
@@ -1059,7 +1058,7 @@
 #[derive(HashStable_Generic, Encodable, Decodable)]
 pub struct SourceFileHash {
     pub kind: SourceFileHashAlgorithm,
-    value: [u8; 20],
+    value: [u8; 32],
 }
 
 impl SourceFileHash {
@@ -1075,6 +1074,9 @@
             SourceFileHashAlgorithm::Sha1 => {
                 value.copy_from_slice(&Sha1::digest(data));
             }
+            SourceFileHashAlgorithm::Sha256 => {
+                value.copy_from_slice(&Sha256::digest(data));
+            }
         }
         hash
     }
@@ -1094,6 +1096,7 @@
         match self.kind {
             SourceFileHashAlgorithm::Md5 => 16,
             SourceFileHashAlgorithm::Sha1 => 20,
+            SourceFileHashAlgorithm::Sha256 => 32,
         }
     }
 }
@@ -1430,24 +1433,33 @@
         if line_index >= 0 { Some(line_index as usize) } else { None }
     }
 
-    pub fn line_bounds(&self, line_index: usize) -> (BytePos, BytePos) {
-        if self.start_pos == self.end_pos {
-            return (self.start_pos, self.end_pos);
+    pub fn line_bounds(&self, line_index: usize) -> Range<BytePos> {
+        if self.is_empty() {
+            return self.start_pos..self.end_pos;
         }
 
         assert!(line_index < self.lines.len());
         if line_index == (self.lines.len() - 1) {
-            (self.lines[line_index], self.end_pos)
+            self.lines[line_index]..self.end_pos
         } else {
-            (self.lines[line_index], self.lines[line_index + 1])
+            self.lines[line_index]..self.lines[line_index + 1]
         }
     }
 
+    /// Returns whether or not the file contains the given `SourceMap` byte
+    /// position. The position one past the end of the file is considered to be
+    /// contained by the file. This implies that files for which `is_empty`
+    /// returns true still contain one byte position according to this function.
     #[inline]
     pub fn contains(&self, byte_pos: BytePos) -> bool {
         byte_pos >= self.start_pos && byte_pos <= self.end_pos
     }
 
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.start_pos == self.end_pos
+    }
+
     /// Calculates the original byte position relative to the start of the file
     /// based on the given byte position.
     pub fn original_relative_byte_pos(&self, pos: BytePos) -> BytePos {
@@ -1462,6 +1474,88 @@
 
         BytePos::from_u32(pos.0 - self.start_pos.0 + diff)
     }
+
+    /// Converts an absolute `BytePos` to a `CharPos` relative to the `SourceFile`.
+    pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
+        // The number of extra bytes due to multibyte chars in the `SourceFile`.
+        let mut total_extra_bytes = 0;
+
+        for mbc in self.multibyte_chars.iter() {
+            debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
+            if mbc.pos < bpos {
+                // Every character is at least one byte, so we only
+                // count the actual extra bytes.
+                total_extra_bytes += mbc.bytes as u32 - 1;
+                // We should never see a byte position in the middle of a
+                // character.
+                assert!(bpos.to_u32() >= mbc.pos.to_u32() + mbc.bytes as u32);
+            } else {
+                break;
+            }
+        }
+
+        assert!(self.start_pos.to_u32() + total_extra_bytes <= bpos.to_u32());
+        CharPos(bpos.to_usize() - self.start_pos.to_usize() - total_extra_bytes as usize)
+    }
+
+    /// Looks up the file's (1-based) line number and (0-based `CharPos`) column offset, for a
+    /// given `BytePos`.
+    pub fn lookup_file_pos(&self, pos: BytePos) -> (usize, CharPos) {
+        let chpos = self.bytepos_to_file_charpos(pos);
+        match self.lookup_line(pos) {
+            Some(a) => {
+                let line = a + 1; // Line numbers start at 1
+                let linebpos = self.lines[a];
+                let linechpos = self.bytepos_to_file_charpos(linebpos);
+                let col = chpos - linechpos;
+                debug!("byte pos {:?} is on the line at byte pos {:?}", pos, linebpos);
+                debug!("char pos {:?} is on the line at char pos {:?}", chpos, linechpos);
+                debug!("byte is on line: {}", line);
+                assert!(chpos >= linechpos);
+                (line, col)
+            }
+            None => (0, chpos),
+        }
+    }
+
+    /// Looks up the file's (1-based) line number, (0-based `CharPos`) column offset, and (0-based)
+    /// column offset when displayed, for a given `BytePos`.
+    pub fn lookup_file_pos_with_col_display(&self, pos: BytePos) -> (usize, CharPos, usize) {
+        let (line, col_or_chpos) = self.lookup_file_pos(pos);
+        if line > 0 {
+            let col = col_or_chpos;
+            let linebpos = self.lines[line - 1];
+            let col_display = {
+                let start_width_idx = self
+                    .non_narrow_chars
+                    .binary_search_by_key(&linebpos, |x| x.pos())
+                    .unwrap_or_else(|x| x);
+                let end_width_idx = self
+                    .non_narrow_chars
+                    .binary_search_by_key(&pos, |x| x.pos())
+                    .unwrap_or_else(|x| x);
+                let special_chars = end_width_idx - start_width_idx;
+                let non_narrow: usize = self.non_narrow_chars[start_width_idx..end_width_idx]
+                    .iter()
+                    .map(|x| x.width())
+                    .sum();
+                col.0 - special_chars + non_narrow
+            };
+            (line, col, col_display)
+        } else {
+            let chpos = col_or_chpos;
+            let col_display = {
+                let end_width_idx = self
+                    .non_narrow_chars
+                    .binary_search_by_key(&pos, |x| x.pos())
+                    .unwrap_or_else(|x| x);
+                let non_narrow: usize =
+                    self.non_narrow_chars[0..end_width_idx].iter().map(|x| x.width()).sum();
+                chpos.0 - end_width_idx + non_narrow
+            };
+            (0, chpos, col_display)
+        }
+    }
 }
 
 /// Normalizes the source code and records the normalizations.
@@ -1480,7 +1574,7 @@
 
 /// Removes UTF-8 BOM, if any.
 fn remove_bom(src: &mut String, normalized_pos: &mut Vec<NormalizedPos>) {
-    if src.starts_with("\u{feff}") {
+    if src.starts_with('\u{feff}') {
         src.drain(..3);
         normalized_pos.push(NormalizedPos { pos: BytePos(0), diff: 3 });
     }
@@ -1777,7 +1871,7 @@
         }
 
         if *self == DUMMY_SP {
-            std::hash::Hash::hash(&TAG_INVALID_SPAN, hasher);
+            Hash::hash(&TAG_INVALID_SPAN, hasher);
             return;
         }
 
@@ -1788,28 +1882,49 @@
         let (file_lo, line_lo, col_lo) = match ctx.byte_pos_to_line_and_col(span.lo) {
             Some(pos) => pos,
             None => {
-                std::hash::Hash::hash(&TAG_INVALID_SPAN, hasher);
+                Hash::hash(&TAG_INVALID_SPAN, hasher);
                 span.ctxt.hash_stable(ctx, hasher);
                 return;
             }
         };
 
         if !file_lo.contains(span.hi) {
-            std::hash::Hash::hash(&TAG_INVALID_SPAN, hasher);
+            Hash::hash(&TAG_INVALID_SPAN, hasher);
             span.ctxt.hash_stable(ctx, hasher);
             return;
         }
 
-        std::hash::Hash::hash(&TAG_VALID_SPAN, hasher);
+        let (_, line_hi, col_hi) = match ctx.byte_pos_to_line_and_col(span.hi) {
+            Some(pos) => pos,
+            None => {
+                Hash::hash(&TAG_INVALID_SPAN, hasher);
+                span.ctxt.hash_stable(ctx, hasher);
+                return;
+            }
+        };
+
+        Hash::hash(&TAG_VALID_SPAN, hasher);
         // We truncate the stable ID hash and line and column numbers. The chances
         // of causing a collision this way should be minimal.
-        std::hash::Hash::hash(&(file_lo.name_hash as u64), hasher);
+        Hash::hash(&(file_lo.name_hash as u64), hasher);
 
-        let col = (col_lo.0 as u64) & 0xFF;
-        let line = ((line_lo as u64) & 0xFF_FF_FF) << 8;
-        let len = ((span.hi - span.lo).0 as u64) << 32;
-        let line_col_len = col | line | len;
-        std::hash::Hash::hash(&line_col_len, hasher);
+        // Hash both the length and the end location (line/column) of a span. If we
+        // hash only the length, for example, then two otherwise equal spans with
+        // different end locations will have the same hash. This can cause a problem
+        // during incremental compilation wherein a previous result for a query that
+        // depends on the end location of a span will be incorrectly reused when the
+        // end location of the span it depends on has changed (see issue #74890). A
+        // similar analysis applies if some query depends specifically on the length
+        // of the span, but we only hash the end location. So hash both.
+
+        let col_lo_trunc = (col_lo.0 as u64) & 0xFF;
+        let line_lo_trunc = ((line_lo as u64) & 0xFF_FF_FF) << 8;
+        let col_hi_trunc = (col_hi.0 as u64) & 0xFF << 32;
+        let line_hi_trunc = ((line_hi as u64) & 0xFF_FF_FF) << 40;
+        let col_line = col_lo_trunc | line_lo_trunc | col_hi_trunc | line_hi_trunc;
+        let len = (span.hi - span.lo).0;
+        Hash::hash(&col_line, hasher);
+        Hash::hash(&len, hasher);
         span.ctxt.hash_stable(ctx, hasher);
     }
 }
@@ -1847,9 +1962,7 @@
             return;
         }
 
-        TAG_NOT_ROOT.hash_stable(ctx, hasher);
         let index = self.as_u32() as usize;
-
         let res = CACHE.with(|cache| cache.borrow().get(index).copied().flatten());
 
         if let Some(res) = res {
@@ -1858,6 +1971,7 @@
             let new_len = index + 1;
 
             let mut sub_hasher = StableHasher::new();
+            TAG_NOT_ROOT.hash_stable(ctx, &mut sub_hasher);
             self.expn_data().hash_stable(ctx, &mut sub_hasher);
             let sub_hash: Fingerprint = sub_hasher.finish();
 
diff --git a/compiler/rustc_span/src/source_map.rs b/compiler/rustc_span/src/source_map.rs
index 37596b8..f067cdb 100644
--- a/compiler/rustc_span/src/source_map.rs
+++ b/compiler/rustc_span/src/source_map.rs
@@ -12,7 +12,7 @@
 
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::stable_hasher::StableHasher;
-use rustc_data_structures::sync::{AtomicU32, Lock, LockGuard, Lrc, MappedLockGuard};
+use rustc_data_structures::sync::{AtomicU32, Lrc, MappedReadGuard, ReadGuard, RwLock};
 use std::cmp;
 use std::convert::TryFrom;
 use std::hash::Hash;
@@ -168,7 +168,7 @@
     /// The address space below this value is currently used by the files in the source map.
     used_address_space: AtomicU32,
 
-    files: Lock<SourceMapFiles>,
+    files: RwLock<SourceMapFiles>,
     file_loader: Box<dyn FileLoader + Sync + Send>,
     // This is used to apply the file path remapping as specified via
     // `--remap-path-prefix` to all `SourceFile`s allocated within this `SourceMap`.
@@ -236,8 +236,8 @@
 
     // By returning a `MonotonicVec`, we ensure that consumers cannot invalidate
     // any existing indices pointing into `files`.
-    pub fn files(&self) -> MappedLockGuard<'_, monotonic::MonotonicVec<Lrc<SourceFile>>> {
-        LockGuard::map(self.files.borrow(), |files| &mut files.source_files)
+    pub fn files(&self) -> MappedReadGuard<'_, monotonic::MonotonicVec<Lrc<SourceFile>>> {
+        ReadGuard::map(self.files.borrow(), |files| &files.source_files)
     }
 
     pub fn source_file_by_stable_id(
@@ -428,58 +428,22 @@
         }
     }
 
+    /// Return the SourceFile that contains the given `BytePos`
+    pub fn lookup_source_file(&self, pos: BytePos) -> Lrc<SourceFile> {
+        let idx = self.lookup_source_file_idx(pos);
+        (*self.files.borrow().source_files)[idx].clone()
+    }
+
     /// Looks up source information about a `BytePos`.
     pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
-        let chpos = self.bytepos_to_file_charpos(pos);
-        match self.lookup_line(pos) {
-            Ok(SourceFileAndLine { sf: f, line: a }) => {
-                let line = a + 1; // Line numbers start at 1
-                let linebpos = f.lines[a];
-                let linechpos = self.bytepos_to_file_charpos(linebpos);
-                let col = chpos - linechpos;
-
-                let col_display = {
-                    let start_width_idx = f
-                        .non_narrow_chars
-                        .binary_search_by_key(&linebpos, |x| x.pos())
-                        .unwrap_or_else(|x| x);
-                    let end_width_idx = f
-                        .non_narrow_chars
-                        .binary_search_by_key(&pos, |x| x.pos())
-                        .unwrap_or_else(|x| x);
-                    let special_chars = end_width_idx - start_width_idx;
-                    let non_narrow: usize = f.non_narrow_chars[start_width_idx..end_width_idx]
-                        .iter()
-                        .map(|x| x.width())
-                        .sum();
-                    col.0 - special_chars + non_narrow
-                };
-                debug!("byte pos {:?} is on the line at byte pos {:?}", pos, linebpos);
-                debug!("char pos {:?} is on the line at char pos {:?}", chpos, linechpos);
-                debug!("byte is on line: {}", line);
-                assert!(chpos >= linechpos);
-                Loc { file: f, line, col, col_display }
-            }
-            Err(f) => {
-                let col_display = {
-                    let end_width_idx = f
-                        .non_narrow_chars
-                        .binary_search_by_key(&pos, |x| x.pos())
-                        .unwrap_or_else(|x| x);
-                    let non_narrow: usize =
-                        f.non_narrow_chars[0..end_width_idx].iter().map(|x| x.width()).sum();
-                    chpos.0 - end_width_idx + non_narrow
-                };
-                Loc { file: f, line: 0, col: chpos, col_display }
-            }
-        }
+        let sf = self.lookup_source_file(pos);
+        let (line, col, col_display) = sf.lookup_file_pos_with_col_display(pos);
+        Loc { file: sf, line, col, col_display }
     }
 
     // If the corresponding `SourceFile` is empty, does not return a line number.
     pub fn lookup_line(&self, pos: BytePos) -> Result<SourceFileAndLine, Lrc<SourceFile>> {
-        let idx = self.lookup_source_file_idx(pos);
-
-        let f = (*self.files.borrow().source_files)[idx].clone();
+        let f = self.lookup_source_file(pos);
 
         match f.lookup_line(pos) {
             Some(line) => Ok(SourceFileAndLine { sf: f, line }),
@@ -487,15 +451,6 @@
         }
     }
 
-    /// Returns a new `Span` covering the start and end `BytePos`s of the file containing the given
-    /// `pos`. This can be used to quickly determine if another `BytePos` or `Span` is from the same
-    /// file.
-    pub fn lookup_file_span(&self, pos: BytePos) -> Span {
-        let idx = self.lookup_source_file_idx(pos);
-        let SourceFile { start_pos, end_pos, .. } = *(*self.files.borrow().source_files)[idx];
-        Span::with_root_ctxt(start_pos, end_pos)
-    }
-
     /// Returns `Some(span)`, a union of the LHS and RHS span. The LHS must precede the RHS. If
     /// there are gaps between LHS and RHS, the resulting union will cross these gaps.
     /// For this to work,
@@ -934,27 +889,8 @@
     /// Converts an absolute `BytePos` to a `CharPos` relative to the `SourceFile`.
     pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
         let idx = self.lookup_source_file_idx(bpos);
-        let map = &(*self.files.borrow().source_files)[idx];
-
-        // The number of extra bytes due to multibyte chars in the `SourceFile`.
-        let mut total_extra_bytes = 0;
-
-        for mbc in map.multibyte_chars.iter() {
-            debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
-            if mbc.pos < bpos {
-                // Every character is at least one byte, so we only
-                // count the actual extra bytes.
-                total_extra_bytes += mbc.bytes as u32 - 1;
-                // We should never see a byte position in the middle of a
-                // character.
-                assert!(bpos.to_u32() >= mbc.pos.to_u32() + mbc.bytes as u32);
-            } else {
-                break;
-            }
-        }
-
-        assert!(map.start_pos.to_u32() + total_extra_bytes <= bpos.to_u32());
-        CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes as usize)
+        let sf = &(*self.files.borrow().source_files)[idx];
+        sf.bytepos_to_file_charpos(bpos)
     }
 
     // Returns the index of the `SourceFile` (in `self.files`) that contains `pos`.
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index e3ad314..ad58f89 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -127,6 +127,7 @@
         ArgumentV1,
         Arguments,
         C,
+        CString,
         Center,
         Clone,
         Copy,
@@ -212,6 +213,7 @@
         _d,
         _e,
         _task_context,
+        a32,
         aarch64_target_feature,
         abi,
         abi_amdgpu_kernel,
@@ -256,9 +258,11 @@
         arbitrary_enum_discriminant,
         arbitrary_self_types,
         arith_offset,
+        arm,
         arm_target_feature,
         array,
         arrays,
+        as_ptr,
         as_str,
         asm,
         assert,
@@ -308,6 +312,7 @@
         breakpoint,
         bridge,
         bswap,
+        c_str,
         c_variadic,
         call,
         call_mut,
@@ -321,6 +326,7 @@
         cfg_attr,
         cfg_attr_multi,
         cfg_doctest,
+        cfg_panic,
         cfg_sanitize,
         cfg_target_feature,
         cfg_target_has_atomic,
@@ -359,6 +365,7 @@
         const_fn_union,
         const_generics,
         const_if_match,
+        const_impl_trait,
         const_in_array_repeat_expressions,
         const_indexing,
         const_let,
@@ -394,6 +401,7 @@
         crate_type,
         crate_visibility_modifier,
         crt_dash_static: "crt-static",
+        cstring_type,
         ctlz,
         ctlz_nonzero,
         ctpop,
@@ -415,6 +423,7 @@
         decl_macro,
         declare_lint_pass,
         decode,
+        default_alloc_error_handler,
         default_lib_allocator,
         default_type_parameter_fallback,
         default_type_params,
@@ -426,6 +435,7 @@
         deref_mut,
         deref_target,
         derive,
+        destructuring_assignment,
         diagnostic,
         direct,
         discriminant_kind,
@@ -463,6 +473,7 @@
         encode,
         env,
         eq,
+        ermsb_target_feature,
         err,
         exact_div,
         except,
@@ -473,6 +484,7 @@
         existential_type,
         exp2f32,
         exp2f64,
+        expect,
         expected,
         expf32,
         expf64,
@@ -496,6 +508,7 @@
         fadd_fast,
         fdiv_fast,
         feature,
+        ffi,
         ffi_const,
         ffi_pure,
         ffi_returns_twice,
@@ -589,12 +602,15 @@
         infer_static_outlives_requirements,
         inlateout,
         inline,
+        inline_const,
         inout,
+        instruction_set,
         intel,
         into_iter,
         into_result,
         intrinsics,
         irrefutable_let_patterns,
+        isa_attribute,
         isize,
         issue,
         issue_5723_bootstrap,
@@ -770,6 +786,7 @@
         panic_info,
         panic_location,
         panic_runtime,
+        panic_str,
         panic_unwind,
         param_attrs,
         parent_trait,
@@ -787,6 +804,8 @@
         plugin_registrar,
         plugins,
         pointer,
+        pointer_trait,
+        pointer_trait_fmt,
         poll,
         position,
         post_dash_lto: "post-lto",
@@ -886,6 +905,7 @@
         rustc,
         rustc_allocator,
         rustc_allocator_nounwind,
+        rustc_allow_const_fn_unstable,
         rustc_args_required_const,
         rustc_attrs,
         rustc_builtin_macro,
@@ -1063,6 +1083,7 @@
         sym,
         sync,
         sync_trait,
+        t32,
         target_arch,
         target_endian,
         target_env,
@@ -1107,6 +1128,7 @@
         try_trait,
         tt,
         tuple,
+        tuple_from_req,
         tuple_indexing,
         two_phase,
         ty,
@@ -1149,6 +1171,7 @@
         unsafe_cell,
         unsafe_no_drop_flag,
         unsize,
+        unsized_fn_params,
         unsized_locals,
         unsized_tuple_coercion,
         unstable,
@@ -1156,6 +1179,7 @@
         unused_qualifications,
         unwind,
         unwind_attributes,
+        unwrap,
         unwrap_or,
         use_extern_macros,
         use_nested_groups,
diff --git a/compiler/rustc_symbol_mangling/Cargo.toml b/compiler/rustc_symbol_mangling/Cargo.toml
index c0dacd2..3df5f16 100644
--- a/compiler/rustc_symbol_mangling/Cargo.toml
+++ b/compiler/rustc_symbol_mangling/Cargo.toml
@@ -10,7 +10,7 @@
 [dependencies]
 tracing = "0.1"
 punycode = "0.4.0"
-rustc-demangle = "0.1.16"
+rustc-demangle = "0.1.18"
 
 rustc_ast = { path = "../rustc_ast" }
 rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_symbol_mangling/src/legacy.rs b/compiler/rustc_symbol_mangling/src/legacy.rs
index b96e318..ac91fcf 100644
--- a/compiler/rustc_symbol_mangling/src/legacy.rs
+++ b/compiler/rustc_symbol_mangling/src/legacy.rs
@@ -115,7 +115,6 @@
         }
 
         // also include any type parameters (for generic items)
-        assert!(!substs.has_erasable_regions());
         substs.hash_stable(&mut hcx, &mut hasher);
 
         if let Some(instantiating_crate) = instantiating_crate {
@@ -238,7 +237,7 @@
 
     fn print_const(mut self, ct: &'tcx ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
         // only print integers
-        if let ty::ConstKind::Value(ConstValue::Scalar(Scalar::Raw { .. })) = ct.val {
+        if let ty::ConstKind::Value(ConstValue::Scalar(Scalar::Int { .. })) = ct.val {
             if ct.ty.is_integral() {
                 return self.pretty_print_const(ct, true);
             }
@@ -327,10 +326,8 @@
     ) -> Result<Self::Path, Self::Error> {
         self = print_prefix(self)?;
 
-        let args = args.iter().cloned().filter(|arg| match arg.unpack() {
-            GenericArgKind::Lifetime(_) => false,
-            _ => true,
-        });
+        let args =
+            args.iter().cloned().filter(|arg| !matches!(arg.unpack(), GenericArgKind::Lifetime(_)));
 
         if args.clone().next().is_some() {
             self.generic_delimiters(|cx| cx.comma_sep(args))
diff --git a/compiler/rustc_symbol_mangling/src/lib.rs b/compiler/rustc_symbol_mangling/src/lib.rs
index 75150a5..10245d2 100644
--- a/compiler/rustc_symbol_mangling/src/lib.rs
+++ b/compiler/rustc_symbol_mangling/src/lib.rs
@@ -174,10 +174,7 @@
             return tcx.sess.generate_proc_macro_decls_symbol(disambiguator);
         }
         let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
-        match tcx.hir().get(hir_id) {
-            Node::ForeignItem(_) => true,
-            _ => false,
-        }
+        matches!(tcx.hir().get(hir_id), Node::ForeignItem(_))
     } else {
         tcx.is_foreign_item(def_id)
     };
@@ -200,15 +197,14 @@
     //   show up in the `wasm-import-name` custom attribute in LLVM IR.
     //
     // [1]: https://bugs.llvm.org/show_bug.cgi?id=44316
-    if is_foreign {
-        if tcx.sess.target.target.arch != "wasm32"
-            || !tcx.wasm_import_module_map(def_id.krate).contains_key(&def_id)
-        {
-            if let Some(name) = attrs.link_name {
-                return name.to_string();
-            }
-            return tcx.item_name(def_id).to_string();
+    if is_foreign
+        && (tcx.sess.target.arch != "wasm32"
+            || !tcx.wasm_import_module_map(def_id.krate).contains_key(&def_id))
+    {
+        if let Some(name) = attrs.link_name {
+            return name.to_string();
         }
+        return tcx.item_name(def_id).to_string();
     }
 
     if let Some(name) = attrs.export_name {
@@ -234,10 +230,7 @@
         // codegen units) then this symbol may become an exported (but hidden
         // visibility) symbol. This means that multiple crates may do the same
         // and we want to be sure to avoid any symbol conflicts here.
-        match MonoItem::Fn(instance).instantiation_mode(tcx) {
-            InstantiationMode::GloballyShared { may_conflict: true } => true,
-            _ => false,
-        };
+        matches!(MonoItem::Fn(instance).instantiation_mode(tcx), InstantiationMode::GloballyShared { may_conflict: true });
 
     let instantiating_crate =
         if avoid_cross_crate_conflicts { Some(compute_instantiating_crate()) } else { None };
diff --git a/compiler/rustc_symbol_mangling/src/test.rs b/compiler/rustc_symbol_mangling/src/test.rs
index 24850a8..822a835 100644
--- a/compiler/rustc_symbol_mangling/src/test.rs
+++ b/compiler/rustc_symbol_mangling/src/test.rs
@@ -5,7 +5,8 @@
 //! paths etc in all kinds of annoying scenarios.
 
 use rustc_hir as hir;
-use rustc_middle::ty::{Instance, TyCtxt};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{subst::InternalSubsts, Instance, TyCtxt};
 use rustc_span::symbol::{sym, Symbol};
 
 const SYMBOL_NAME: Symbol = sym::rustc_symbol_name;
@@ -35,8 +36,11 @@
         let def_id = tcx.hir().local_def_id(hir_id);
         for attr in tcx.get_attrs(def_id.to_def_id()).iter() {
             if tcx.sess.check_name(attr, SYMBOL_NAME) {
-                // for now, can only use on monomorphic names
-                let instance = Instance::mono(tcx, def_id.to_def_id());
+                let def_id = def_id.to_def_id();
+                let instance = Instance::new(
+                    def_id,
+                    tcx.erase_regions(&InternalSubsts::identity_for_item(tcx, def_id)),
+                );
                 let mangled = tcx.symbol_name(instance);
                 tcx.sess.span_err(attr.span, &format!("symbol-name({})", mangled));
                 if let Ok(demangling) = rustc_demangle::try_demangle(mangled.name) {
@@ -44,7 +48,7 @@
                     tcx.sess.span_err(attr.span, &format!("demangling-alt({:#})", demangling));
                 }
             } else if tcx.sess.check_name(attr, DEF_PATH) {
-                let path = tcx.def_path_str(def_id.to_def_id());
+                let path = with_no_trimmed_paths(|| tcx.def_path_str(def_id.to_def_id()));
                 tcx.sess.span_err(attr.span, &format!("def-path({})", path));
             }
 
diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs
index da9c931..c28c2fe 100644
--- a/compiler/rustc_symbol_mangling/src/v0.rs
+++ b/compiler/rustc_symbol_mangling/src/v0.rs
@@ -132,7 +132,7 @@
             self.push("u");
 
             // FIXME(eddyb) we should probably roll our own punycode implementation.
-            let mut punycode_bytes = match ::punycode::encode(ident) {
+            let mut punycode_bytes = match punycode::encode(ident) {
                 Ok(s) => s.into_bytes(),
                 Err(()) => bug!("symbol_names: punycode encoding failed for ident {:?}", ident),
             };
@@ -199,15 +199,9 @@
 
         let lifetimes = regions
             .into_iter()
-            .map(|br| {
-                match br {
-                    ty::BrAnon(i) => {
-                        // FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`.
-                        assert_ne!(i, 0);
-                        i - 1
-                    }
-                    _ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value),
-                }
+            .map(|br| match br {
+                ty::BrAnon(i) => i,
+                _ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value),
             })
             .max()
             .map_or(0, |max| max + 1);
@@ -259,7 +253,7 @@
     }
 
     fn print_impl_path(
-        self,
+        mut self,
         impl_def_id: DefId,
         substs: &'tcx [GenericArg<'tcx>],
         mut self_ty: Ty<'tcx>,
@@ -284,12 +278,37 @@
             }
         }
 
-        self.path_append_impl(
-            |cx| cx.print_def_path(parent_def_id, &[]),
-            &key.disambiguated_data,
-            self_ty,
-            impl_trait_ref,
-        )
+        self.push(match impl_trait_ref {
+            Some(_) => "X",
+            None => "M",
+        });
+
+        // Encode impl generic params if the substitutions contain parameters (implying
+        // polymorphization is enabled) and this isn't an inherent impl.
+        if impl_trait_ref.is_some() && substs.iter().any(|a| a.has_param_types_or_consts()) {
+            self = self.path_generic_args(
+                |this| {
+                    this.path_append_ns(
+                        |cx| cx.print_def_path(parent_def_id, &[]),
+                        'I',
+                        key.disambiguated_data.disambiguator as u64,
+                        "",
+                    )
+                },
+                substs,
+            )?;
+        } else {
+            self.push_disambiguator(key.disambiguated_data.disambiguator as u64);
+            self = self.print_def_path(parent_def_id, &[])?;
+        }
+
+        self = self_ty.print(self)?;
+
+        if let Some(trait_ref) = impl_trait_ref {
+            self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?;
+        }
+
+        Ok(self)
     }
 
     fn print_region(mut self, region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
@@ -301,10 +320,6 @@
             // Late-bound lifetimes use indices starting at 1,
             // see `BinderLevel` for more details.
             ty::ReLateBound(debruijn, ty::BrAnon(i)) => {
-                // FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`.
-                assert_ne!(i, 0);
-                let i = i - 1;
-
                 let binder = &self.binders[self.binders.len() - 1 - debruijn.index()];
                 let depth = binder.lifetime_depths.start + i;
 
@@ -502,17 +517,31 @@
         }
         let start = self.out.len();
 
-        match ct.ty.kind() {
-            ty::Uint(_) => {}
-            ty::Bool => {}
+        let mut neg = false;
+        let val = match ct.ty.kind() {
+            ty::Uint(_) | ty::Bool | ty::Char => {
+                ct.try_eval_bits(self.tcx, ty::ParamEnv::reveal_all(), ct.ty)
+            }
+            ty::Int(_) => {
+                let param_env = ty::ParamEnv::reveal_all();
+                ct.try_eval_bits(self.tcx, param_env, ct.ty).and_then(|b| {
+                    let sz = self.tcx.layout_of(param_env.and(ct.ty)).ok()?.size;
+                    let val = sz.sign_extend(b) as i128;
+                    if val < 0 {
+                        neg = true;
+                    }
+                    Some(val.wrapping_abs() as u128)
+                })
+            }
             _ => {
                 bug!("symbol_names: unsupported constant of type `{}` ({:?})", ct.ty, ct);
             }
-        }
-        self = ct.ty.print(self)?;
+        };
 
-        if let Some(bits) = ct.try_eval_bits(self.tcx, ty::ParamEnv::reveal_all(), ct.ty) {
-            let _ = write!(self.out, "{:x}_", bits);
+        if let Some(bits) = val {
+            // We only print the type if the const can be evaluated.
+            self = ct.ty.print(self)?;
+            let _ = write!(self.out, "{}{:x}_", if neg { "n" } else { "" }, bits);
         } else {
             // NOTE(eddyb) despite having the path, we need to
             // encode a placeholder, as the path could refer
@@ -538,6 +567,7 @@
         self.push_ident(&name);
         Ok(self)
     }
+
     fn path_qualified(
         mut self,
         self_ty: Ty<'tcx>,
@@ -552,24 +582,16 @@
     }
 
     fn path_append_impl(
-        mut self,
-        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
-        disambiguated_data: &DisambiguatedDefPathData,
-        self_ty: Ty<'tcx>,
-        trait_ref: Option<ty::TraitRef<'tcx>>,
+        self,
+        _: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        _: &DisambiguatedDefPathData,
+        _: Ty<'tcx>,
+        _: Option<ty::TraitRef<'tcx>>,
     ) -> Result<Self::Path, Self::Error> {
-        self.push(match trait_ref {
-            Some(_) => "X",
-            None => "M",
-        });
-        self.push_disambiguator(disambiguated_data.disambiguator as u64);
-        self = print_prefix(self)?;
-        self = self_ty.print(self)?;
-        if let Some(trait_ref) = trait_ref {
-            self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?;
-        }
-        Ok(self)
+        // Inlined into `print_impl_path`
+        unreachable!()
     }
+
     fn path_append(
         self,
         print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
@@ -603,6 +625,7 @@
             name.as_ref().map_or("", |s| &s[..]),
         )
     }
+
     fn path_generic_args(
         mut self,
         print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index 602c424..429a337 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -474,31 +474,19 @@
     }
 
     pub fn is_indirect(&self) -> bool {
-        match self.mode {
-            PassMode::Indirect(..) => true,
-            _ => false,
-        }
+        matches!(self.mode, PassMode::Indirect(..))
     }
 
     pub fn is_sized_indirect(&self) -> bool {
-        match self.mode {
-            PassMode::Indirect(_, None) => true,
-            _ => false,
-        }
+        matches!(self.mode, PassMode::Indirect(_, None))
     }
 
     pub fn is_unsized_indirect(&self) -> bool {
-        match self.mode {
-            PassMode::Indirect(_, Some(_)) => true,
-            _ => false,
-        }
+        matches!(self.mode, PassMode::Indirect(_, Some(_)))
     }
 
     pub fn is_ignore(&self) -> bool {
-        match self.mode {
-            PassMode::Ignore => true,
-            _ => false,
-        }
+        matches!(self.mode, PassMode::Ignore)
     }
 }
 
@@ -574,7 +562,7 @@
             "x86_64" => {
                 if abi == spec::abi::Abi::SysV64 {
                     x86_64::compute_abi_info(cx, self);
-                } else if abi == spec::abi::Abi::Win64 || cx.target_spec().options.is_like_windows {
+                } else if abi == spec::abi::Abi::Win64 || cx.target_spec().is_like_windows {
                     x86_win64::compute_abi_info(self);
                 } else {
                     x86_64::compute_abi_info(cx, self);
@@ -596,7 +584,7 @@
             "nvptx64" => nvptx64::compute_abi_info(self),
             "hexagon" => hexagon::compute_abi_info(self),
             "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self),
-            "wasm32" if cx.target_spec().target_os != "emscripten" => {
+            "wasm32" if cx.target_spec().os != "emscripten" => {
                 wasm32_bindgen_compat::compute_abi_info(self)
             }
             "wasm32" | "asmjs" => wasm32::compute_abi_info(cx, self),
@@ -610,15 +598,3 @@
         Ok(())
     }
 }
-
-/// Returns the maximum size of return values to be passed by value in the Rust ABI.
-///
-/// Return values beyond this size will use an implicit out-pointer instead.
-pub fn max_ret_by_val<C: HasTargetSpec + HasDataLayout>(spec: &C) -> Size {
-    match spec.target_spec().arch.as_str() {
-        // System-V will pass return values up to 128 bits in RAX/RDX.
-        "x86_64" => Size::from_bits(128),
-
-        _ => spec.data_layout().pointer_size,
-    }
-}
diff --git a/compiler/rustc_target/src/abi/call/powerpc64.rs b/compiler/rustc_target/src/abi/call/powerpc64.rs
index b740707..8c2a9d0 100644
--- a/compiler/rustc_target/src/abi/call/powerpc64.rs
+++ b/compiler/rustc_target/src/abi/call/powerpc64.rs
@@ -119,7 +119,7 @@
     Ty: TyAndLayoutMethods<'a, C> + Copy,
     C: LayoutOf<Ty = Ty, TyAndLayout = TyAndLayout<'a, Ty>> + HasDataLayout + HasTargetSpec,
 {
-    let abi = if cx.target_spec().target_env == "musl" {
+    let abi = if cx.target_spec().env == "musl" {
         ELFv2
     } else {
         match cx.data_layout().endian {
diff --git a/compiler/rustc_target/src/abi/call/riscv.rs b/compiler/rustc_target/src/abi/call/riscv.rs
index 2e10bed..782c661 100644
--- a/compiler/rustc_target/src/abi/call/riscv.rs
+++ b/compiler/rustc_target/src/abi/call/riscv.rs
@@ -323,7 +323,7 @@
     Ty: TyAndLayoutMethods<'a, C> + Copy,
     C: LayoutOf<Ty = Ty, TyAndLayout = TyAndLayout<'a, Ty>> + HasDataLayout + HasTargetSpec,
 {
-    let flen = match &cx.target_spec().options.llvm_abiname[..] {
+    let flen = match &cx.target_spec().llvm_abiname[..] {
         "ilp32f" | "lp64f" => 32,
         "ilp32d" | "lp64d" => 64,
         _ => 0,
@@ -333,10 +333,8 @@
     let mut avail_gprs = 8;
     let mut avail_fprs = 8;
 
-    if !fn_abi.ret.is_ignore() {
-        if classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
-            avail_gprs -= 1;
-        }
+    if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
+        avail_gprs -= 1;
     }
 
     for (i, arg) in fn_abi.args.iter_mut().enumerate() {
diff --git a/compiler/rustc_target/src/abi/call/wasm32.rs b/compiler/rustc_target/src/abi/call/wasm32.rs
index 510f671..ff2c0e9 100644
--- a/compiler/rustc_target/src/abi/call/wasm32.rs
+++ b/compiler/rustc_target/src/abi/call/wasm32.rs
@@ -24,10 +24,8 @@
     C: LayoutOf<Ty = Ty, TyAndLayout = TyAndLayout<'a, Ty>> + HasDataLayout,
 {
     ret.extend_integer_width_to(32);
-    if ret.layout.is_aggregate() {
-        if !unwrap_trivial_aggregate(cx, ret) {
-            ret.make_indirect();
-        }
+    if ret.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, ret) {
+        ret.make_indirect();
     }
 }
 
@@ -37,10 +35,8 @@
     C: LayoutOf<Ty = Ty, TyAndLayout = TyAndLayout<'a, Ty>> + HasDataLayout,
 {
     arg.extend_integer_width_to(32);
-    if arg.layout.is_aggregate() {
-        if !unwrap_trivial_aggregate(cx, arg) {
-            arg.make_indirect_byval();
-        }
+    if arg.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, arg) {
+        arg.make_indirect_byval();
     }
 }
 
diff --git a/compiler/rustc_target/src/abi/call/x86.rs b/compiler/rustc_target/src/abi/call/x86.rs
index df3dd5d..07bf1e9 100644
--- a/compiler/rustc_target/src/abi/call/x86.rs
+++ b/compiler/rustc_target/src/abi/call/x86.rs
@@ -41,10 +41,10 @@
             // http://www.angelcode.com/dev/callconv/callconv.html
             // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
             let t = cx.target_spec();
-            if t.options.abi_return_struct_as_int {
+            if t.abi_return_struct_as_int {
                 // According to Clang, everyone but MSVC returns single-element
                 // float aggregates directly in a floating-point register.
-                if !t.options.is_like_msvc && is_single_fp_element(cx, fn_abi.ret.layout) {
+                if !t.is_like_msvc && is_single_fp_element(cx, fn_abi.ret.layout) {
                     match fn_abi.ret.layout.size.bytes() {
                         4 => fn_abi.ret.cast_to(Reg::f32()),
                         8 => fn_abi.ret.cast_to(Reg::f64()),
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
index 3c1a2ea..a43080b 100644
--- a/compiler/rustc_target/src/abi/mod.rs
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -156,20 +156,20 @@
             Endian::Little => "little",
             Endian::Big => "big",
         };
-        if endian_str != target.target_endian {
+        if endian_str != target.endian {
             return Err(format!(
                 "inconsistent target specification: \"data-layout\" claims \
                                 architecture is {}-endian, while \"target-endian\" is `{}`",
-                endian_str, target.target_endian
+                endian_str, target.endian
             ));
         }
 
-        if dl.pointer_size.bits().to_string() != target.target_pointer_width {
+        if dl.pointer_size.bits() != target.pointer_width.into() {
             return Err(format!(
                 "inconsistent target specification: \"data-layout\" claims \
                                 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
                 dl.pointer_size.bits(),
-                target.target_pointer_width
+                target.pointer_width
             ));
         }
 
@@ -306,6 +306,35 @@
         let bytes = self.bytes().checked_mul(count)?;
         if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
     }
+
+    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
+    /// (i.e., if it is negative, fill with 1's on the left).
+    #[inline]
+    pub fn sign_extend(self, value: u128) -> u128 {
+        let size = self.bits();
+        if size == 0 {
+            // Truncated until nothing is left.
+            return 0;
+        }
+        // Sign-extend it.
+        let shift = 128 - size;
+        // Shift the unsigned value to the left, then shift back to the right as signed
+        // (essentially fills with sign bit on the left).
+        (((value << shift) as i128) >> shift) as u128
+    }
+
+    /// Truncates `value` to `self` bits.
+    #[inline]
+    pub fn truncate(self, value: u128) -> u128 {
+        let size = self.bits();
+        if size == 0 {
+            // Truncated until nothing is left.
+            return 0;
+        }
+        let shift = 128 - size;
+        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
+        (value << shift) >> shift
+    }
 }
 
 // Panicking addition, subtraction and multiplication for convenience.
@@ -557,17 +586,11 @@
     }
 
     pub fn is_float(self) -> bool {
-        match self {
-            F32 | F64 => true,
-            _ => false,
-        }
+        matches!(self, F32 | F64)
     }
 
     pub fn is_int(self) -> bool {
-        match self {
-            Int(..) => true,
-            _ => false,
-        }
+        matches!(self, Int(..))
     }
 }
 
@@ -794,18 +817,12 @@
 
     /// Returns `true` if this is an uninhabited type
     pub fn is_uninhabited(&self) -> bool {
-        match *self {
-            Abi::Uninhabited => true,
-            _ => false,
-        }
+        matches!(*self, Abi::Uninhabited)
     }
 
     /// Returns `true` is this is a scalar type
     pub fn is_scalar(&self) -> bool {
-        match *self {
-            Abi::Scalar(_) => true,
-            _ => false,
-        }
+        matches!(*self, Abi::Scalar(_))
     }
 }
 
diff --git a/compiler/rustc_target/src/asm/arm.rs b/compiler/rustc_target/src/asm/arm.rs
index 85a136b..2800091 100644
--- a/compiler/rustc_target/src/asm/arm.rs
+++ b/compiler/rustc_target/src/asm/arm.rs
@@ -61,7 +61,7 @@
 
 // This uses the same logic as useR7AsFramePointer in LLVM
 fn frame_pointer_is_r7(mut has_feature: impl FnMut(&str) -> bool, target: &Target) -> bool {
-    target.options.is_like_osx || (!target.options.is_like_windows && has_feature("thumb-mode"))
+    target.is_like_osx || (!target.is_like_windows && has_feature("thumb-mode"))
 }
 
 fn frame_pointer_r11(
diff --git a/compiler/rustc_target/src/asm/mips.rs b/compiler/rustc_target/src/asm/mips.rs
index 638c52d..b19489a 100644
--- a/compiler/rustc_target/src/asm/mips.rs
+++ b/compiler/rustc_target/src/asm/mips.rs
@@ -32,11 +32,12 @@
 
     pub fn supported_types(
         self,
-        _arch: InlineAsmArch,
+        arch: InlineAsmArch,
     ) -> &'static [(InlineAsmType, Option<&'static str>)] {
-        match self {
-            Self::reg => types! { _: I8, I16, I32, F32; },
-            Self::freg => types! { _: F32; },
+        match (self, arch) {
+            (Self::reg, InlineAsmArch::Mips64) => types! { _: I8, I16, I32, I64, F32, F64; },
+            (Self::reg, _) => types! { _: I8, I16, I32, F32; },
+            (Self::freg, _) => types! { _: F32, F64; },
         }
     }
 }
@@ -44,31 +45,31 @@
 // The reserved registers are somewhat taken from <https://git.io/JUR1k#L150>.
 def_regs! {
     Mips MipsInlineAsmReg MipsInlineAsmRegClass {
-        v0: reg = ["$2", "$v0"],
-        v1: reg = ["$3", "$v1"],
-        a0: reg = ["$4", "$a0"],
-        a1: reg = ["$5", "$a1"],
-        a2: reg = ["$6", "$a2"],
-        a3: reg = ["$7", "$a3"],
+        r2: reg = ["$2"],
+        r3: reg = ["$3"],
+        r4: reg = ["$4"],
+        r5: reg = ["$5"],
+        r6: reg = ["$6"],
+        r7: reg = ["$7"],
         // FIXME: Reserve $t0, $t1 if in mips16 mode.
-        t0: reg = ["$8", "$t0"],
-        t1: reg = ["$9", "$t1"],
-        t2: reg = ["$10", "$t2"],
-        t3: reg = ["$11", "$t3"],
-        t4: reg = ["$12", "$t4"],
-        t5: reg = ["$13", "$t5"],
-        t6: reg = ["$14", "$t6"],
-        t7: reg = ["$15", "$t7"],
-        s0: reg = ["$16", "$s0"],
-        s1: reg = ["$17", "$s1"],
-        s2: reg = ["$18", "$s2"],
-        s3: reg = ["$19", "$s3"],
-        s4: reg = ["$20", "$s4"],
-        s5: reg = ["$21", "$s5"],
-        s6: reg = ["$22", "$s6"],
-        s7: reg = ["$23", "$s7"],
-        t8: reg = ["$24", "$t8"],
-        t9: reg = ["$25", "$t9"],
+        r8: reg = ["$8"],
+        r9: reg = ["$9"],
+        r10: reg = ["$10"],
+        r11: reg = ["$11"],
+        r12: reg = ["$12"],
+        r13: reg = ["$13"],
+        r14: reg = ["$14"],
+        r15: reg = ["$15"],
+        r16: reg = ["$16"],
+        r17: reg = ["$17"],
+        r18: reg = ["$18"],
+        r19: reg = ["$19"],
+        r20: reg = ["$20"],
+        r21: reg = ["$21"],
+        r22: reg = ["$22"],
+        r23: reg = ["$23"],
+        r24: reg = ["$24"],
+        r25: reg = ["$25"],
         f0: freg = ["$f0"],
         f1: freg = ["$f1"],
         f2: freg = ["$f2"],
@@ -101,21 +102,21 @@
         f29: freg = ["$f29"],
         f30: freg = ["$f30"],
         f31: freg = ["$f31"],
-        #error = ["$0", "$zero"] =>
+        #error = ["$0"] =>
             "constant zero cannot be used as an operand for inline asm",
-        #error = ["$1", "$at"] =>
+        #error = ["$1"] =>
             "reserved for assembler (Assembler Temp)",
-        #error = ["$26", "$k0"] =>
+        #error = ["$26"] =>
             "OS-reserved register cannot be used as an operand for inline asm",
-        #error = ["$27", "$k1"] =>
+        #error = ["$27"] =>
             "OS-reserved register cannot be used as an operand for inline asm",
-        #error = ["$28", "$gp"] =>
+        #error = ["$28"] =>
             "the global pointer cannot be used as an operand for inline asm",
-        #error = ["$29", "$sp"] =>
+        #error = ["$29"] =>
             "the stack pointer cannot be used as an operand for inline asm",
-        #error = ["$30", "$s8", "$fp"] =>
+        #error = ["$30"] =>
             "the frame pointer cannot be used as an operand for inline asm",
-        #error = ["$31", "$ra"] =>
+        #error = ["$31"] =>
             "the return address register cannot be used as an operand for inline asm",
     }
 }
diff --git a/compiler/rustc_target/src/asm/mod.rs b/compiler/rustc_target/src/asm/mod.rs
index e2f8e91..5ebd6c4 100644
--- a/compiler/rustc_target/src/asm/mod.rs
+++ b/compiler/rustc_target/src/asm/mod.rs
@@ -155,6 +155,7 @@
 mod mips;
 mod nvptx;
 mod riscv;
+mod spirv;
 mod x86;
 
 pub use aarch64::{AArch64InlineAsmReg, AArch64InlineAsmRegClass};
@@ -163,6 +164,7 @@
 pub use mips::{MipsInlineAsmReg, MipsInlineAsmRegClass};
 pub use nvptx::{NvptxInlineAsmReg, NvptxInlineAsmRegClass};
 pub use riscv::{RiscVInlineAsmReg, RiscVInlineAsmRegClass};
+pub use spirv::{SpirVInlineAsmReg, SpirVInlineAsmRegClass};
 pub use x86::{X86InlineAsmReg, X86InlineAsmRegClass};
 
 #[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, Hash)]
@@ -176,6 +178,8 @@
     Nvptx64,
     Hexagon,
     Mips,
+    Mips64,
+    SpirV,
 }
 
 impl FromStr for InlineAsmArch {
@@ -192,6 +196,8 @@
             "nvptx64" => Ok(Self::Nvptx64),
             "hexagon" => Ok(Self::Hexagon),
             "mips" => Ok(Self::Mips),
+            "mips64" => Ok(Self::Mips64),
+            "spirv" => Ok(Self::SpirV),
             _ => Err(()),
         }
     }
@@ -206,6 +212,7 @@
     Nvptx(NvptxInlineAsmReg),
     Hexagon(HexagonInlineAsmReg),
     Mips(MipsInlineAsmReg),
+    SpirV(SpirVInlineAsmReg),
 }
 
 impl InlineAsmReg {
@@ -259,9 +266,12 @@
             InlineAsmArch::Hexagon => {
                 Self::Hexagon(HexagonInlineAsmReg::parse(arch, has_feature, target, &name)?)
             }
-            InlineAsmArch::Mips => {
+            InlineAsmArch::Mips | InlineAsmArch::Mips64 => {
                 Self::Mips(MipsInlineAsmReg::parse(arch, has_feature, target, &name)?)
             }
+            InlineAsmArch::SpirV => {
+                Self::SpirV(SpirVInlineAsmReg::parse(arch, has_feature, target, &name)?)
+            }
         })
     }
 
@@ -304,6 +314,7 @@
     Nvptx(NvptxInlineAsmRegClass),
     Hexagon(HexagonInlineAsmRegClass),
     Mips(MipsInlineAsmRegClass),
+    SpirV(SpirVInlineAsmRegClass),
 }
 
 impl InlineAsmRegClass {
@@ -316,6 +327,7 @@
             Self::Nvptx(r) => r.name(),
             Self::Hexagon(r) => r.name(),
             Self::Mips(r) => r.name(),
+            Self::SpirV(r) => r.name(),
         }
     }
 
@@ -331,6 +343,7 @@
             Self::Nvptx(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Nvptx),
             Self::Hexagon(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Hexagon),
             Self::Mips(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Mips),
+            Self::SpirV(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::SpirV),
         }
     }
 
@@ -353,6 +366,7 @@
             Self::Nvptx(r) => r.suggest_modifier(arch, ty),
             Self::Hexagon(r) => r.suggest_modifier(arch, ty),
             Self::Mips(r) => r.suggest_modifier(arch, ty),
+            Self::SpirV(r) => r.suggest_modifier(arch, ty),
         }
     }
 
@@ -371,6 +385,7 @@
             Self::Nvptx(r) => r.default_modifier(arch),
             Self::Hexagon(r) => r.default_modifier(arch),
             Self::Mips(r) => r.default_modifier(arch),
+            Self::SpirV(r) => r.default_modifier(arch),
         }
     }
 
@@ -388,6 +403,7 @@
             Self::Nvptx(r) => r.supported_types(arch),
             Self::Hexagon(r) => r.supported_types(arch),
             Self::Mips(r) => r.supported_types(arch),
+            Self::SpirV(r) => r.supported_types(arch),
         }
     }
 
@@ -409,7 +425,10 @@
                 InlineAsmArch::Hexagon => {
                     Self::Hexagon(HexagonInlineAsmRegClass::parse(arch, name)?)
                 }
-                InlineAsmArch::Mips => Self::Mips(MipsInlineAsmRegClass::parse(arch, name)?),
+                InlineAsmArch::Mips | InlineAsmArch::Mips64 => {
+                    Self::Mips(MipsInlineAsmRegClass::parse(arch, name)?)
+                }
+                InlineAsmArch::SpirV => Self::SpirV(SpirVInlineAsmRegClass::parse(arch, name)?),
             })
         })
     }
@@ -425,6 +444,7 @@
             Self::Nvptx(r) => r.valid_modifiers(arch),
             Self::Hexagon(r) => r.valid_modifiers(arch),
             Self::Mips(r) => r.valid_modifiers(arch),
+            Self::SpirV(r) => r.valid_modifiers(arch),
         }
     }
 }
@@ -474,10 +494,7 @@
 
 impl InlineAsmType {
     pub fn is_integer(self) -> bool {
-        match self {
-            Self::I8 | Self::I16 | Self::I32 | Self::I64 | Self::I128 => true,
-            _ => false,
-        }
+        matches!(self, Self::I8 | Self::I16 | Self::I32 | Self::I64 | Self::I128)
     }
 
     pub fn size(self) -> Size {
@@ -565,10 +582,15 @@
             hexagon::fill_reg_map(arch, has_feature, target, &mut map);
             map
         }
-        InlineAsmArch::Mips => {
+        InlineAsmArch::Mips | InlineAsmArch::Mips64 => {
             let mut map = mips::regclass_map();
             mips::fill_reg_map(arch, has_feature, target, &mut map);
             map
         }
+        InlineAsmArch::SpirV => {
+            let mut map = spirv::regclass_map();
+            spirv::fill_reg_map(arch, has_feature, target, &mut map);
+            map
+        }
     }
 }
diff --git a/compiler/rustc_target/src/asm/spirv.rs b/compiler/rustc_target/src/asm/spirv.rs
new file mode 100644
index 0000000..da82749
--- /dev/null
+++ b/compiler/rustc_target/src/asm/spirv.rs
@@ -0,0 +1,46 @@
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+
+def_reg_class! {
+    SpirV SpirVInlineAsmRegClass {
+        reg,
+    }
+}
+
+impl SpirVInlineAsmRegClass {
+    pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+        &[]
+    }
+
+    pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+        None
+    }
+
+    pub fn suggest_modifier(
+        self,
+        _arch: InlineAsmArch,
+        _ty: InlineAsmType,
+    ) -> Option<(char, &'static str)> {
+        None
+    }
+
+    pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+        None
+    }
+
+    pub fn supported_types(
+        self,
+        _arch: InlineAsmArch,
+    ) -> &'static [(InlineAsmType, Option<&'static str>)] {
+        match self {
+            Self::reg => {
+                types! { _: I8, I16, I32, I64, F32, F64; }
+            }
+        }
+    }
+}
+
+def_regs! {
+    // SPIR-V is SSA-based, it does not have registers.
+    SpirV SpirVInlineAsmReg SpirVInlineAsmRegClass {}
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs b/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs
index 60daf10..7de809f 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs
@@ -1,7 +1,7 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let mut base = super::apple_base::opts();
+pub fn target() -> Target {
+    let mut base = super::apple_base::opts("macos");
     base.cpu = "apple-a12".to_string();
     base.max_atomic_width = Some(128);
     base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-arch".to_string(), "arm64".to_string()]);
@@ -14,17 +14,11 @@
     let arch = "aarch64";
     let llvm_target = super::apple_base::macos_llvm_target(&arch);
 
-    Ok(Target {
+    Target {
         llvm_target,
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(),
         arch: arch.to_string(),
-        target_os: "macos".to_string(),
-        target_env: String::new(),
-        target_vendor: "apple".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "\u{1}mcount".to_string(), ..base },
-    })
+        options: TargetOptions { mcount: "\u{1}mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_ios.rs b/compiler/rustc_target/src/spec/aarch64_apple_ios.rs
index 168cd01..2218c6c 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_ios.rs
@@ -1,19 +1,13 @@
 use super::apple_sdk_base::{opts, Arch};
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let base = opts(Arch::Arm64);
-    Ok(Target {
+pub fn target() -> Target {
+    let base = opts("ios", Arch::Arm64);
+    Target {
         llvm_target: "arm64-apple-ios".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "ios".to_string(),
-        target_env: String::new(),
-        target_vendor: "apple".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             features: "+neon,+fp-armv8,+apple-a7".to_string(),
             eliminate_frame_pointer: false,
@@ -33,5 +27,5 @@
                 .to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_tvos.rs b/compiler/rustc_target/src/spec/aarch64_apple_tvos.rs
index 5e2cab0..a83de77 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_tvos.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_tvos.rs
@@ -1,19 +1,13 @@
 use super::apple_sdk_base::{opts, Arch};
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let base = opts(Arch::Arm64);
-    Ok(Target {
+pub fn target() -> Target {
+    let base = opts("tvos", Arch::Arm64);
+    Target {
         llvm_target: "arm64-apple-tvos".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "tvos".to_string(),
-        target_env: String::new(),
-        target_vendor: "apple".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             features: "+neon,+fp-armv8,+apple-a7".to_string(),
             eliminate_frame_pointer: false,
@@ -22,5 +16,5 @@
             forces_embed_bitcode: true,
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_fuchsia.rs b/compiler/rustc_target/src/spec/aarch64_fuchsia.rs
index aabfe45..1252741 100644
--- a/compiler/rustc_target/src/spec/aarch64_fuchsia.rs
+++ b/compiler/rustc_target/src/spec/aarch64_fuchsia.rs
@@ -1,20 +1,14 @@
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::fuchsia_base::opts();
     base.max_atomic_width = Some(128);
 
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-fuchsia".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "fuchsia".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         options: TargetOptions { unsupported_abis: super::arm_base::unsupported_abis(), ..base },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_linux_android.rs b/compiler/rustc_target/src/spec/aarch64_linux_android.rs
index e4ecc7a..fa6108d 100644
--- a/compiler/rustc_target/src/spec/aarch64_linux_android.rs
+++ b/compiler/rustc_target/src/spec/aarch64_linux_android.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
 // See https://developer.android.com/ndk/guides/abis.html#arm64-v8a
 // for target ABI requirements.
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::android_base::opts();
     base.max_atomic_width = Some(128);
     // As documented in http://developer.android.com/ndk/guides/cpu-features.html
     // the neon (ASIMD) and FP must exist on all android aarch64 targets.
     base.features = "+neon,+fp-armv8".to_string();
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-linux-android".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "android".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions { unsupported_abis: super::arm_base::unsupported_abis(), ..base },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs
index 8c03f1e..1369d9d 100644
--- a/compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::windows_msvc_base::opts();
     base.max_atomic_width = Some(64);
     base.has_elf_tls = true;
     base.features = "+neon,+fp-armv8".to_string();
 
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-pc-windows-msvc".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "windows".to_string(),
-        target_env: "msvc".to_string(),
-        target_vendor: "pc".to_string(),
-        linker_flavor: LinkerFlavor::Msvc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_cloudabi.rs b/compiler/rustc_target/src/spec/aarch64_unknown_cloudabi.rs
index 1278b89..67f69b4 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_cloudabi.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_cloudabi.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::cloudabi_base::opts();
     base.max_atomic_width = Some(128);
     base.unsupported_abis = super::arm_base::unsupported_abis();
     base.linker = Some("aarch64-unknown-cloudabi-cc".to_string());
 
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-unknown-cloudabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "cloudabi".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_freebsd.rs b/compiler/rustc_target/src/spec/aarch64_unknown_freebsd.rs
index 5ae592c..d48389d 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_freebsd.rs
@@ -1,20 +1,14 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::freebsd_base::opts();
     base.max_atomic_width = Some(128);
 
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-unknown-freebsd".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "freebsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions { unsupported_abis: super::arm_base::unsupported_abis(), ..base },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs b/compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs
index e07b8f7..44beb2f 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs
@@ -1,20 +1,14 @@
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::hermit_base::opts();
     base.max_atomic_width = Some(128);
 
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-unknown-hermit".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "hermit".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu.rs
index 0361622..58c72af 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu.rs
@@ -1,24 +1,18 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
     base.max_atomic_width = Some(128);
 
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-unknown-linux-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_env: "gnu".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "linux".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}_mcount".to_string(),
+            mcount: "\u{1}_mcount".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/aarch64_unknown_linux_musl.rs
index dc613f3..7bbfc8e 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_linux_musl.rs
@@ -1,24 +1,18 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
     base.max_atomic_width = Some(128);
 
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-unknown-linux-musl".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_env: "musl".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "linux".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}_mcount".to_string(),
+            mcount: "\u{1}_mcount".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/aarch64_unknown_netbsd.rs
index 8c2f6fc..09efbdb 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_netbsd.rs
@@ -1,21 +1,15 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::netbsd_base::opts();
     base.max_atomic_width = Some(128);
     base.unsupported_abis = super::arm_base::unsupported_abis();
 
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-unknown-netbsd".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "netbsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "__mcount".to_string(), ..base },
-    })
+        options: TargetOptions { mcount: "__mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_none.rs b/compiler/rustc_target/src/spec/aarch64_unknown_none.rs
index e012dce..d0ad451 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_none.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_none.rs
@@ -8,8 +8,10 @@
 
 use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
 
-pub fn target() -> Result<Target, String> {
+pub fn target() -> Target {
     let opts = TargetOptions {
+        vendor: String::new(),
+        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         linker: Some("rust-lld".to_owned()),
         features: "+strict-align,+neon,+fp-armv8".to_string(),
         executables: true,
@@ -21,17 +23,11 @@
         unsupported_abis: super::arm_base::unsupported_abis(),
         ..Default::default()
     };
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-unknown-none".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         options: opts,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs b/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs
index e2aa6e3..41bd218 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs
@@ -8,8 +8,10 @@
 
 use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
 
-pub fn target() -> Result<Target, String> {
+pub fn target() -> Target {
     let opts = TargetOptions {
+        vendor: String::new(),
+        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         linker: Some("rust-lld".to_owned()),
         features: "+strict-align,-neon,-fp-armv8".to_string(),
         executables: true,
@@ -21,17 +23,11 @@
         unsupported_abis: super::arm_base::unsupported_abis(),
         ..Default::default()
     };
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-unknown-none".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         options: opts,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/aarch64_unknown_openbsd.rs
index fd726c7..83ba1ec 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_openbsd.rs
@@ -1,21 +1,15 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::openbsd_base::opts();
     base.max_atomic_width = Some(128);
     base.unsupported_abis = super::arm_base::unsupported_abis();
 
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-unknown-openbsd".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "openbsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_redox.rs b/compiler/rustc_target/src/spec/aarch64_unknown_redox.rs
index f347a2d..b9c9325 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_redox.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_redox.rs
@@ -1,20 +1,14 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::redox_base::opts();
     base.max_atomic_width = Some(128);
 
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-unknown-redox".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "redox".to_string(),
-        target_env: "relibc".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs b/compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs
index 6a8d148..e0a81df 100644
--- a/compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs
@@ -1,21 +1,15 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::windows_uwp_msvc_base::opts();
     base.max_atomic_width = Some(64);
     base.has_elf_tls = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-pc-windows-msvc".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "windows".to_string(),
-        target_env: "msvc".to_string(),
-        target_vendor: "uwp".to_string(),
-        linker_flavor: LinkerFlavor::Msvc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/aarch64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/aarch64_wrs_vxworks.rs
index 05f5d7d..beb8ce3 100644
--- a/compiler/rustc_target/src/spec/aarch64_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/aarch64_wrs_vxworks.rs
@@ -1,20 +1,14 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::vxworks_base::opts();
     base.max_atomic_width = Some(128);
 
-    Ok(Target {
+    Target {
         llvm_target: "aarch64-unknown-linux-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
-        target_os: "vxworks".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "wrs".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions { unsupported_abis: super::arm_base::unsupported_abis(), ..base },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/android_base.rs b/compiler/rustc_target/src/spec/android_base.rs
index 0ea99af..f6fbe7c 100644
--- a/compiler/rustc_target/src/spec/android_base.rs
+++ b/compiler/rustc_target/src/spec/android_base.rs
@@ -1,16 +1,18 @@
 use crate::spec::{LinkerFlavor, TargetOptions};
 
 pub fn opts() -> TargetOptions {
-    let mut base = super::linux_base::opts();
+    let mut base = super::linux_gnu_base::opts();
+    base.os = "android".to_string();
     // Many of the symbols defined in compiler-rt are also defined in libgcc.
     // Android's linker doesn't like that by default.
     base.pre_link_args
         .get_mut(&LinkerFlavor::Gcc)
         .unwrap()
         .push("-Wl,--allow-multiple-definition".to_string());
-    base.is_like_android = true;
+    base.dwarf_version = Some(2);
     base.position_independent_executables = true;
     base.has_elf_tls = false;
     base.requires_uwtable = true;
+    base.crt_static_respected = false;
     base
 }
diff --git a/compiler/rustc_target/src/spec/apple_base.rs b/compiler/rustc_target/src/spec/apple_base.rs
index e7b565a..e271a6d 100644
--- a/compiler/rustc_target/src/spec/apple_base.rs
+++ b/compiler/rustc_target/src/spec/apple_base.rs
@@ -2,7 +2,7 @@
 
 use crate::spec::{LinkArgs, TargetOptions};
 
-pub fn opts() -> TargetOptions {
+pub fn opts(os: &str) -> TargetOptions {
     // ELF TLS is only available in macOS 10.7+. If you try to compile for 10.6
     // either the linker will complain if it is used or the binary will end up
     // segfaulting at runtime when run on 10.6. Rust by default supports macOS
@@ -17,12 +17,15 @@
     let version = macos_deployment_target();
 
     TargetOptions {
+        os: os.to_string(),
+        vendor: "apple".to_string(),
         // macOS has -dead_strip, which doesn't rely on function_sections
         function_sections: false,
         dynamic_linking: true,
         executables: true,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         is_like_osx: true,
+        dwarf_version: Some(2),
         has_rpath: true,
         dll_prefix: "lib".to_string(),
         dll_suffix: ".dylib".to_string(),
diff --git a/compiler/rustc_target/src/spec/apple_sdk_base.rs b/compiler/rustc_target/src/spec/apple_sdk_base.rs
index e34277d..092401f 100644
--- a/compiler/rustc_target/src/spec/apple_sdk_base.rs
+++ b/compiler/rustc_target/src/spec/apple_sdk_base.rs
@@ -31,13 +31,14 @@
     }
 }
 
-pub fn opts(arch: Arch) -> TargetOptions {
+pub fn opts(os: &str, arch: Arch) -> TargetOptions {
     TargetOptions {
         cpu: target_cpu(arch),
+        dynamic_linking: false,
         executables: true,
         link_env_remove: link_env_remove(arch),
         has_elf_tls: false,
         eliminate_frame_pointer: false,
-        ..super::apple_base::opts()
+        ..super::apple_base::opts(os)
     }
 }
diff --git a/compiler/rustc_target/src/spec/arm_linux_androideabi.rs b/compiler/rustc_target/src/spec/arm_linux_androideabi.rs
index 7109d04..4353756 100644
--- a/compiler/rustc_target/src/spec/arm_linux_androideabi.rs
+++ b/compiler/rustc_target/src/spec/arm_linux_androideabi.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::android_base::opts();
     // https://developer.android.com/ndk/guides/abis.html#armeabi
     base.features = "+strict-align,+v5te".to_string();
     base.max_atomic_width = Some(32);
 
-    Ok(Target {
+    Target {
         llvm_target: "arm-linux-androideabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "android".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions { unsupported_abis: super::arm_base::unsupported_abis(), ..base },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs
index 2e3bad8..c41cf6e 100644
--- a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs
+++ b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
     base.max_atomic_width = Some(64);
-    Ok(Target {
+    Target {
         llvm_target: "arm-unknown-linux-gnueabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
             features: "+strict-align,+v6".to_string(),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}__gnu_mcount_nc".to_string(),
+            mcount: "\u{1}__gnu_mcount_nc".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs
index f8e357c..f214396 100644
--- a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs
+++ b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
     base.max_atomic_width = Some(64);
-    Ok(Target {
+    Target {
         llvm_target: "arm-unknown-linux-gnueabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
             features: "+strict-align,+v6,+vfp2,-d32".to_string(),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}__gnu_mcount_nc".to_string(),
+            mcount: "\u{1}__gnu_mcount_nc".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_musleabi.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_musleabi.rs
index 75753af..53ff100 100644
--- a/compiler/rustc_target/src/spec/arm_unknown_linux_musleabi.rs
+++ b/compiler/rustc_target/src/spec/arm_unknown_linux_musleabi.rs
@@ -1,30 +1,24 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
 
     // Most of these settings are copied from the arm_unknown_linux_gnueabi
     // target.
     base.features = "+strict-align,+v6".to_string();
     base.max_atomic_width = Some(64);
-    Ok(Target {
+    Target {
         // It's important we use "gnueabi" and not "musleabi" here. LLVM uses it
         // to determine the calling convention and float ABI, and it doesn't
         // support the "musleabi" value.
         llvm_target: "arm-unknown-linux-gnueabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}mcount".to_string(),
+            mcount: "\u{1}mcount".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_musleabihf.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_musleabihf.rs
index c74c88e..6d8a5f9 100644
--- a/compiler/rustc_target/src/spec/arm_unknown_linux_musleabihf.rs
+++ b/compiler/rustc_target/src/spec/arm_unknown_linux_musleabihf.rs
@@ -1,30 +1,24 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
 
     // Most of these settings are copied from the arm_unknown_linux_gnueabihf
     // target.
     base.features = "+strict-align,+v6,+vfp2,-d32".to_string();
     base.max_atomic_width = Some(64);
-    Ok(Target {
+    Target {
         // It's important we use "gnueabihf" and not "musleabihf" here. LLVM
         // uses it to determine the calling convention and float ABI, and it
         // doesn't support the "musleabihf" value.
         llvm_target: "arm-unknown-linux-gnueabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}mcount".to_string(),
+            mcount: "\u{1}mcount".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs b/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs
index e0d1f26..3685630 100644
--- a/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs
@@ -1,22 +1,19 @@
 // Targets the Big endian Cortex-R4/R5 processor (ARMv7-R)
 
 use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "armebv7r-unknown-none-eabi".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "none".to_string(),
-        target_env: "".to_string(),
-        target_vendor: "".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
+            endian: "big".to_string(),
+            vendor: String::new(),
+            linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
             executables: true,
             linker: Some("rust-lld".to_owned()),
             relocation_model: RelocModel::Static,
@@ -26,5 +23,5 @@
             emit_debug_gdb_scripts: false,
             ..Default::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs b/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs
index e2d37d4..2ff3c89 100644
--- a/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs
@@ -1,22 +1,19 @@
 // Targets the Cortex-R4F/R5F processor (ARMv7-R)
 
 use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "armebv7r-unknown-none-eabihf".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
+            endian: "big".to_string(),
+            vendor: String::new(),
+            linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
             executables: true,
             linker: Some("rust-lld".to_owned()),
             relocation_model: RelocModel::Static,
@@ -27,5 +24,5 @@
             emit_debug_gdb_scripts: false,
             ..Default::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs
index 2580e8b..e1ba72b 100644
--- a/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs
+++ b/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs
@@ -1,26 +1,21 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let base = super::linux_base::opts();
-    Ok(Target {
+pub fn target() -> Target {
+    let base = super::linux_gnu_base::opts();
+    Target {
         llvm_target: "armv4t-unknown-linux-gnueabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
             features: "+soft-float,+strict-align".to_string(),
             // Atomic operations provided by compiler-builtins
             max_atomic_width: Some(32),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}__gnu_mcount_nc".to_string(),
+            mcount: "\u{1}__gnu_mcount_nc".to_string(),
+            has_thumb_interworking: true,
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv5te_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armv5te_unknown_linux_gnueabi.rs
index f28421d..3ac8d53 100644
--- a/compiler/rustc_target/src/spec/armv5te_unknown_linux_gnueabi.rs
+++ b/compiler/rustc_target/src/spec/armv5te_unknown_linux_gnueabi.rs
@@ -1,26 +1,21 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let base = super::linux_base::opts();
-    Ok(Target {
+pub fn target() -> Target {
+    let base = super::linux_gnu_base::opts();
+    Target {
         llvm_target: "armv5te-unknown-linux-gnueabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
             features: "+soft-float,+strict-align".to_string(),
             // Atomic operations provided by compiler-builtins
             max_atomic_width: Some(32),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}__gnu_mcount_nc".to_string(),
+            mcount: "\u{1}__gnu_mcount_nc".to_string(),
+            has_thumb_interworking: true,
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv5te_unknown_linux_musleabi.rs b/compiler/rustc_target/src/spec/armv5te_unknown_linux_musleabi.rs
index fe1fa88..40d405c 100644
--- a/compiler/rustc_target/src/spec/armv5te_unknown_linux_musleabi.rs
+++ b/compiler/rustc_target/src/spec/armv5te_unknown_linux_musleabi.rs
@@ -1,29 +1,24 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let base = super::linux_musl_base::opts();
-    Ok(Target {
+    Target {
         // It's important we use "gnueabihf" and not "musleabihf" here. LLVM
         // uses it to determine the calling convention and float ABI, and LLVM
         // doesn't support the "musleabihf" value.
         llvm_target: "armv5te-unknown-linux-gnueabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
             features: "+soft-float,+strict-align".to_string(),
             // Atomic operations provided by compiler-builtins
             max_atomic_width: Some(32),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}mcount".to_string(),
+            mcount: "\u{1}mcount".to_string(),
+            has_thumb_interworking: true,
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs b/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs
index 1e06f83..a149bd9 100644
--- a/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs
@@ -1,25 +1,20 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let base = super::freebsd_base::opts();
-    Ok(Target {
+    Target {
         llvm_target: "armv6-unknown-freebsd-gnueabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "freebsd".to_string(),
-        target_env: "gnueabihf".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
+            env: "gnueabihf".to_string(),
             features: "+v6,+vfp2,-d32".to_string(),
             max_atomic_width: Some(64),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}__gnu_mcount_nc".to_string(),
+            mcount: "\u{1}__gnu_mcount_nc".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv6_unknown_netbsd_eabihf.rs b/compiler/rustc_target/src/spec/armv6_unknown_netbsd_eabihf.rs
index ef40085..6c81a45 100644
--- a/compiler/rustc_target/src/spec/armv6_unknown_netbsd_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armv6_unknown_netbsd_eabihf.rs
@@ -1,25 +1,20 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::netbsd_base::opts();
     base.max_atomic_width = Some(64);
-    Ok(Target {
+    Target {
         llvm_target: "armv6-unknown-netbsdelf-eabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "netbsd".to_string(),
-        target_env: "eabihf".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
+            env: "eabihf".to_string(),
             features: "+v6,+vfp2,-d32".to_string(),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "__mcount".to_string(),
+            mcount: "__mcount".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7_apple_ios.rs b/compiler/rustc_target/src/spec/armv7_apple_ios.rs
index 6dafcc2..051a394 100644
--- a/compiler/rustc_target/src/spec/armv7_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/armv7_apple_ios.rs
@@ -1,24 +1,18 @@
 use super::apple_sdk_base::{opts, Arch};
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let base = opts(Arch::Armv7);
-    Ok(Target {
+pub fn target() -> Target {
+    let base = opts("ios", Arch::Armv7);
+    Target {
         llvm_target: "armv7-apple-ios".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(),
         arch: "arm".to_string(),
-        target_os: "ios".to_string(),
-        target_env: String::new(),
-        target_vendor: "apple".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             features: "+v7,+vfp3,+neon".to_string(),
             max_atomic_width: Some(64),
             unsupported_abis: super::arm_base::unsupported_abis(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7_linux_androideabi.rs b/compiler/rustc_target/src/spec/armv7_linux_androideabi.rs
index 38c6c31..9aa378a 100644
--- a/compiler/rustc_target/src/spec/armv7_linux_androideabi.rs
+++ b/compiler/rustc_target/src/spec/armv7_linux_androideabi.rs
@@ -1,4 +1,4 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
 // This target if is for the baseline of the Android v7a ABI
 // in thumb mode. It's named armv7-* instead of thumbv7-*
@@ -8,23 +8,17 @@
 // See https://developer.android.com/ndk/guides/abis.html#v7a
 // for target ABI requirements.
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::android_base::opts();
     base.features = "+v7,+thumb-mode,+thumb2,+vfp3,-d32,-neon".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-march=armv7-a".to_string());
 
-    Ok(Target {
+    Target {
         llvm_target: "armv7-none-linux-android".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "android".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions { unsupported_abis: super::arm_base::unsupported_abis(), ..base },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_cloudabi_eabihf.rs b/compiler/rustc_target/src/spec/armv7_unknown_cloudabi_eabihf.rs
index e3f4fe0..d47ee54 100644
--- a/compiler/rustc_target/src/spec/armv7_unknown_cloudabi_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armv7_unknown_cloudabi_eabihf.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::cloudabi_base::opts();
     base.cpu = "cortex-a8".to_string();
     base.max_atomic_width = Some(64);
@@ -8,17 +8,11 @@
     base.unsupported_abis = super::arm_base::unsupported_abis();
     base.linker = Some("armv7-unknown-cloudabi-eabihf-cc".to_string());
 
-    Ok(Target {
+    Target {
         llvm_target: "armv7-unknown-cloudabi-eabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "cloudabi".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "\u{1}mcount".to_string(), ..base },
-    })
+        options: TargetOptions { mcount: "\u{1}mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_freebsd.rs b/compiler/rustc_target/src/spec/armv7_unknown_freebsd.rs
index 80a9e6d..6f24c68 100644
--- a/compiler/rustc_target/src/spec/armv7_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/armv7_unknown_freebsd.rs
@@ -1,25 +1,20 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let base = super::freebsd_base::opts();
-    Ok(Target {
+    Target {
         llvm_target: "armv7-unknown-freebsd-gnueabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "freebsd".to_string(),
-        target_env: "gnueabihf".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
+            env: "gnueabihf".to_string(),
             features: "+v7,+vfp3,-d32,+thumb2,-neon".to_string(),
             max_atomic_width: Some(64),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}__gnu_mcount_nc".to_string(),
+            mcount: "\u{1}__gnu_mcount_nc".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs
index 0f175e9..ae6b8286 100644
--- a/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs
+++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs
@@ -1,29 +1,23 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
 // This target is for glibc Linux on ARMv7 without thumb-mode, NEON or
 // hardfloat.
 
-pub fn target() -> TargetResult {
-    let base = super::linux_base::opts();
-    Ok(Target {
+pub fn target() -> Target {
+    let base = super::linux_gnu_base::opts();
+    Target {
         llvm_target: "armv7-unknown-linux-gnueabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
             features: "+v7,+thumb2,+soft-float,-neon".to_string(),
             cpu: "generic".to_string(),
             max_atomic_width: Some(64),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}__gnu_mcount_nc".to_string(),
+            mcount: "\u{1}__gnu_mcount_nc".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabihf.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabihf.rs
index 2792345..48c16b6 100644
--- a/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabihf.rs
+++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabihf.rs
@@ -1,21 +1,15 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
 // This target is for glibc Linux on ARMv7 without NEON or
 // thumb-mode. See the thumbv7neon variant for enabling both.
 
-pub fn target() -> TargetResult {
-    let base = super::linux_base::opts();
-    Ok(Target {
+pub fn target() -> Target {
+    let base = super::linux_gnu_base::opts();
+    Target {
         llvm_target: "armv7-unknown-linux-gnueabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
             // Info about features at https://wiki.debian.org/ArmHardFloatPort
@@ -23,8 +17,8 @@
             cpu: "generic".to_string(),
             max_atomic_width: Some(64),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}__gnu_mcount_nc".to_string(),
+            mcount: "\u{1}__gnu_mcount_nc".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabi.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabi.rs
index 3d1bf05..9f9f1bd 100644
--- a/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabi.rs
+++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabi.rs
@@ -1,34 +1,28 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
 // This target is for musl Linux on ARMv7 without thumb-mode, NEON or
 // hardfloat.
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let base = super::linux_musl_base::opts();
     // Most of these settings are copied from the armv7_unknown_linux_gnueabi
     // target.
-    Ok(Target {
+    Target {
         // It's important we use "gnueabi" and not "musleabi" here. LLVM uses it
         // to determine the calling convention and float ABI, and it doesn't
         // support the "musleabi" value.
         llvm_target: "armv7-unknown-linux-gnueabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
             features: "+v7,+thumb2,+soft-float,-neon".to_string(),
             cpu: "generic".to_string(),
             max_atomic_width: Some(64),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}mcount".to_string(),
+            mcount: "\u{1}mcount".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabihf.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabihf.rs
index 03d7d88..59deee3 100644
--- a/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabihf.rs
+++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabihf.rs
@@ -1,23 +1,17 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
 // This target is for musl Linux on ARMv7 without thumb-mode or NEON.
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let base = super::linux_musl_base::opts();
-    Ok(Target {
+    Target {
         // It's important we use "gnueabihf" and not "musleabihf" here. LLVM
         // uses it to determine the calling convention and float ABI, and LLVM
         // doesn't support the "musleabihf" value.
         llvm_target: "armv7-unknown-linux-gnueabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         // Most of these settings are copied from the armv7_unknown_linux_gnueabihf
         // target.
@@ -26,8 +20,8 @@
             cpu: "generic".to_string(),
             max_atomic_width: Some(64),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}mcount".to_string(),
+            mcount: "\u{1}mcount".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_netbsd_eabihf.rs b/compiler/rustc_target/src/spec/armv7_unknown_netbsd_eabihf.rs
index 18fc9ed..6605257 100644
--- a/compiler/rustc_target/src/spec/armv7_unknown_netbsd_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armv7_unknown_netbsd_eabihf.rs
@@ -1,26 +1,21 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let base = super::netbsd_base::opts();
-    Ok(Target {
+    Target {
         llvm_target: "armv7-unknown-netbsdelf-eabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "netbsd".to_string(),
-        target_env: "eabihf".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
+            env: "eabihf".to_string(),
             features: "+v7,+vfp3,-d32,+thumb2,-neon".to_string(),
             cpu: "generic".to_string(),
             max_atomic_width: Some(64),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "__mcount".to_string(),
+            mcount: "__mcount".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7_wrs_vxworks_eabihf.rs b/compiler/rustc_target/src/spec/armv7_wrs_vxworks_eabihf.rs
index 04d8702..6a43054 100644
--- a/compiler/rustc_target/src/spec/armv7_wrs_vxworks_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armv7_wrs_vxworks_eabihf.rs
@@ -1,18 +1,12 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let base = super::vxworks_base::opts();
-    Ok(Target {
+    Target {
         llvm_target: "armv7-unknown-linux-gnueabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "vxworks".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "wrs".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             // Info about features at https://wiki.debian.org/ArmHardFloatPort
             features: "+v7,+vfp3,-d32,+thumb2,-neon".to_string(),
@@ -21,5 +15,5 @@
             unsupported_abis: super::arm_base::unsupported_abis(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7a_none_eabi.rs b/compiler/rustc_target/src/spec/armv7a_none_eabi.rs
index 1db279d..742b403 100644
--- a/compiler/rustc_target/src/spec/armv7a_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/armv7a_none_eabi.rs
@@ -10,8 +10,8 @@
 // bare-metal binaries (the `gcc` linker has the advantage that it knows where C
 // libraries and crt*.o are but it's not much of an advantage here); LLD is also
 // faster
-// - `target_os` set to `none`. rationale: matches `thumb` targets
-// - `target_{env,vendor}` set to an empty string. rationale: matches `thumb`
+// - `os` set to `none`. rationale: matches `thumb` targets
+// - `env` and `vendor` are set to an empty string. rationale: matches `thumb`
 // targets
 // - `panic_strategy` set to `abort`. rationale: matches `thumb` targets
 // - `relocation-model` set to `static`; also no PIE, no relro and no dynamic
@@ -19,8 +19,10 @@
 
 use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
 
-pub fn target() -> Result<Target, String> {
+pub fn target() -> Target {
     let opts = TargetOptions {
+        vendor: String::new(),
+        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         linker: Some("rust-lld".to_owned()),
         features: "+v7,+thumb2,+soft-float,-neon,+strict-align".to_string(),
         executables: true,
@@ -32,17 +34,11 @@
         emit_debug_gdb_scripts: false,
         ..Default::default()
     };
-    Ok(Target {
+    Target {
         llvm_target: "armv7a-none-eabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         options: opts,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs b/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs
index 22c2b30..b9cda18 100644
--- a/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs
@@ -7,8 +7,10 @@
 
 use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
 
-pub fn target() -> Result<Target, String> {
+pub fn target() -> Target {
     let opts = TargetOptions {
+        vendor: String::new(),
+        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         linker: Some("rust-lld".to_owned()),
         features: "+v7,+vfp3,-d32,+thumb2,-neon,+strict-align".to_string(),
         executables: true,
@@ -20,17 +22,11 @@
         emit_debug_gdb_scripts: false,
         ..Default::default()
     };
-    Ok(Target {
+    Target {
         llvm_target: "armv7a-none-eabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         options: opts,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7r_none_eabi.rs b/compiler/rustc_target/src/spec/armv7r_none_eabi.rs
index fed8399..440c243 100644
--- a/compiler/rustc_target/src/spec/armv7r_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/armv7r_none_eabi.rs
@@ -1,22 +1,18 @@
 // Targets the Little-endian Cortex-R4/R5 processor (ARMv7-R)
 
 use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "armv7r-unknown-none-eabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "none".to_string(),
-        target_env: "".to_string(),
-        target_vendor: "".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
+            vendor: String::new(),
+            linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
             executables: true,
             linker: Some("rust-lld".to_owned()),
             relocation_model: RelocModel::Static,
@@ -26,5 +22,5 @@
             emit_debug_gdb_scripts: false,
             ..Default::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs b/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs
index 769ac13..c1bf332 100644
--- a/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs
@@ -1,22 +1,18 @@
 // Targets the Little-endian Cortex-R4F/R5F processor (ARMv7-R)
 
 use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "armv7r-unknown-none-eabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "none".to_string(),
-        target_env: "".to_string(),
-        target_vendor: "".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
+            vendor: String::new(),
+            linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
             executables: true,
             linker: Some("rust-lld".to_owned()),
             relocation_model: RelocModel::Static,
@@ -27,5 +23,5 @@
             emit_debug_gdb_scripts: false,
             ..Default::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/armv7s_apple_ios.rs b/compiler/rustc_target/src/spec/armv7s_apple_ios.rs
index d6c99c4..be74136 100644
--- a/compiler/rustc_target/src/spec/armv7s_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/armv7s_apple_ios.rs
@@ -1,24 +1,18 @@
 use super::apple_sdk_base::{opts, Arch};
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let base = opts(Arch::Armv7s);
-    Ok(Target {
+pub fn target() -> Target {
+    let base = opts("ios", Arch::Armv7s);
+    Target {
         llvm_target: "armv7s-apple-ios".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(),
         arch: "arm".to_string(),
-        target_os: "ios".to_string(),
-        target_env: String::new(),
-        target_vendor: "apple".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             features: "+v7,+vfp4,+neon".to_string(),
             max_atomic_width: Some(64),
             unsupported_abis: super::arm_base::unsupported_abis(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs b/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs
index d3dbc39..b1adefe 100644
--- a/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs
+++ b/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs
@@ -1,12 +1,11 @@
 use super::{wasm32_unknown_emscripten, LinkerFlavor, Target};
 
-pub fn target() -> Result<Target, String> {
-    let mut target = wasm32_unknown_emscripten::target()?;
+pub fn target() -> Target {
+    let mut target = wasm32_unknown_emscripten::target();
     target
-        .options
         .post_link_args
         .entry(LinkerFlavor::Em)
         .or_default()
         .extend(vec!["-s".to_string(), "WASM=0".to_string()]);
-    Ok(target)
+    target
 }
diff --git a/compiler/rustc_target/src/spec/avr_gnu_base.rs b/compiler/rustc_target/src/spec/avr_gnu_base.rs
index 527a322..9cc1003 100644
--- a/compiler/rustc_target/src/spec/avr_gnu_base.rs
+++ b/compiler/rustc_target/src/spec/avr_gnu_base.rs
@@ -1,21 +1,17 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
 /// A base target for AVR devices using the GNU toolchain.
 ///
 /// Requires GNU avr-gcc and avr-binutils on the host system.
-pub fn target(target_cpu: String) -> TargetResult {
-    Ok(Target {
+pub fn target(target_cpu: String) -> Target {
+    Target {
         arch: "avr".to_string(),
         data_layout: "e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8".to_string(),
         llvm_target: "avr-unknown-unknown".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "16".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        target_os: "unknown".to_string(),
-        target_env: "".to_string(),
-        target_vendor: "unknown".to_string(),
-        target_c_int_width: 16.to_string(),
+        pointer_width: 16,
         options: TargetOptions {
+            c_int_width: "16".to_string(),
+            os: "unknown".to_string(),
             cpu: target_cpu.clone(),
             exe_suffix: ".elf".to_string(),
 
@@ -49,5 +45,5 @@
             atomic_cas: false,
             ..TargetOptions::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/avr_unknown_gnu_atmega328.rs b/compiler/rustc_target/src/spec/avr_unknown_gnu_atmega328.rs
index 5d22598..7e63ae9 100644
--- a/compiler/rustc_target/src/spec/avr_unknown_gnu_atmega328.rs
+++ b/compiler/rustc_target/src/spec/avr_unknown_gnu_atmega328.rs
@@ -1,5 +1,5 @@
-use crate::spec::TargetResult;
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     super::avr_gnu_base::target("atmega328".to_owned())
 }
diff --git a/compiler/rustc_target/src/spec/cloudabi_base.rs b/compiler/rustc_target/src/spec/cloudabi_base.rs
index 3903943..20a0957 100644
--- a/compiler/rustc_target/src/spec/cloudabi_base.rs
+++ b/compiler/rustc_target/src/spec/cloudabi_base.rs
@@ -12,8 +12,9 @@
     );
 
     TargetOptions {
+        os: "cloudabi".to_string(),
         executables: true,
-        target_family: None,
+        os_family: None,
         linker_is_gnu: true,
         pre_link_args: args,
         position_independent_executables: true,
diff --git a/compiler/rustc_target/src/spec/crt_objects.rs b/compiler/rustc_target/src/spec/crt_objects.rs
index 8991691..76c0bf4 100644
--- a/compiler/rustc_target/src/spec/crt_objects.rs
+++ b/compiler/rustc_target/src/spec/crt_objects.rs
@@ -3,7 +3,7 @@
 //!
 //! Table of CRT objects for popular toolchains.
 //! The `crtx` ones are generally distributed with libc and the `begin/end` ones with gcc.
-//! See https://dev.gentoo.org/~vapier/crt.txt for some more details.
+//! See <https://dev.gentoo.org/~vapier/crt.txt> for some more details.
 //!
 //! | Pre-link CRT objects | glibc                  | musl                   | bionic           | mingw             | wasi |
 //! |----------------------|------------------------|------------------------|------------------|-------------------|------|
diff --git a/compiler/rustc_target/src/spec/dragonfly_base.rs b/compiler/rustc_target/src/spec/dragonfly_base.rs
index c7062e1..b96de7a 100644
--- a/compiler/rustc_target/src/spec/dragonfly_base.rs
+++ b/compiler/rustc_target/src/spec/dragonfly_base.rs
@@ -16,14 +16,16 @@
     );
 
     TargetOptions {
+        os: "dragonfly".to_string(),
         dynamic_linking: true,
         executables: true,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         linker_is_gnu: true,
         has_rpath: true,
         pre_link_args: args,
         position_independent_executables: true,
         relro_level: RelroLevel::Full,
+        dwarf_version: Some(2),
         ..Default::default()
     }
 }
diff --git a/compiler/rustc_target/src/spec/freebsd_base.rs b/compiler/rustc_target/src/spec/freebsd_base.rs
index d2a087a..c70c492 100644
--- a/compiler/rustc_target/src/spec/freebsd_base.rs
+++ b/compiler/rustc_target/src/spec/freebsd_base.rs
@@ -16,9 +16,10 @@
     );
 
     TargetOptions {
+        os: "freebsd".to_string(),
         dynamic_linking: true,
         executables: true,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         linker_is_gnu: true,
         has_rpath: true,
         pre_link_args: args,
@@ -26,6 +27,7 @@
         eliminate_frame_pointer: false, // FIXME 43575
         relro_level: RelroLevel::Full,
         abi_return_struct_as_int: true,
+        dwarf_version: Some(2),
         ..Default::default()
     }
 }
diff --git a/compiler/rustc_target/src/spec/fuchsia_base.rs b/compiler/rustc_target/src/spec/fuchsia_base.rs
index 6f432dc..e467c7c 100644
--- a/compiler/rustc_target/src/spec/fuchsia_base.rs
+++ b/compiler/rustc_target/src/spec/fuchsia_base.rs
@@ -20,11 +20,14 @@
     );
 
     TargetOptions {
+        os: "fuchsia".to_string(),
+        vendor: String::new(),
+        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         linker: Some("rust-lld".to_owned()),
         lld_flavor: LldFlavor::Ld,
         dynamic_linking: true,
         executables: true,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         is_like_fuchsia: true,
         linker_is_gnu: true,
         has_rpath: false,
diff --git a/compiler/rustc_target/src/spec/haiku_base.rs b/compiler/rustc_target/src/spec/haiku_base.rs
index 3d7ae6c..ec87645 100644
--- a/compiler/rustc_target/src/spec/haiku_base.rs
+++ b/compiler/rustc_target/src/spec/haiku_base.rs
@@ -2,10 +2,11 @@
 
 pub fn opts() -> TargetOptions {
     TargetOptions {
+        os: "haiku".to_string(),
         dynamic_linking: true,
         executables: true,
         has_rpath: false,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         relro_level: RelroLevel::Full,
         linker_is_gnu: true,
         ..Default::default()
diff --git a/compiler/rustc_target/src/spec/hermit_base.rs b/compiler/rustc_target/src/spec/hermit_base.rs
index e063c94..a75158a 100644
--- a/compiler/rustc_target/src/spec/hermit_base.rs
+++ b/compiler/rustc_target/src/spec/hermit_base.rs
@@ -9,6 +9,8 @@
     );
 
     TargetOptions {
+        os: "hermit".to_string(),
+        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         linker: Some("rust-lld".to_owned()),
         executables: true,
         has_elf_tls: true,
@@ -18,7 +20,7 @@
         position_independent_executables: true,
         static_position_independent_executables: true,
         relocation_model: RelocModel::Pic,
-        target_family: None,
+        os_family: None,
         tls_model: TlsModel::InitialExec,
         ..Default::default()
     }
diff --git a/compiler/rustc_target/src/spec/hermit_kernel_base.rs b/compiler/rustc_target/src/spec/hermit_kernel_base.rs
index 01b9f75..622f0d9 100644
--- a/compiler/rustc_target/src/spec/hermit_kernel_base.rs
+++ b/compiler/rustc_target/src/spec/hermit_kernel_base.rs
@@ -9,6 +9,8 @@
     );
 
     TargetOptions {
+        os: "hermit".to_string(),
+        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         disable_redzone: true,
         linker: Some("rust-lld".to_owned()),
         executables: true,
@@ -19,7 +21,7 @@
         position_independent_executables: true,
         static_position_independent_executables: true,
         relocation_model: RelocModel::Pic,
-        target_family: None,
+        os_family: None,
         tls_model: TlsModel::InitialExec,
         ..Default::default()
     }
diff --git a/compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs
index 0976acb..73d5e20 100644
--- a/compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkArgs, LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkArgs, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
     base.cpu = "hexagonv60".to_string();
     base.max_atomic_width = Some(32);
@@ -17,11 +17,9 @@
     base.pre_link_args = LinkArgs::new();
     base.post_link_args = LinkArgs::new();
 
-    Ok(Target {
+    Target {
         llvm_target: "hexagon-unknown-linux-musl".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: concat!(
             "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32",
             ":32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32",
@@ -30,10 +28,6 @@
         )
         .to_string(),
         arch: "hexagon".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i386_apple_ios.rs b/compiler/rustc_target/src/spec/i386_apple_ios.rs
index 6cb209a..302306e 100644
--- a/compiler/rustc_target/src/spec/i386_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/i386_apple_ios.rs
@@ -1,21 +1,15 @@
 use super::apple_sdk_base::{opts, Arch};
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let base = opts(Arch::I386);
-    Ok(Target {
+pub fn target() -> Target {
+    let base = opts("ios", Arch::I386);
+    Target {
         llvm_target: "i386-apple-ios".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             f64:32:64-f80:128-n8:16:32-S128"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "ios".to_string(),
-        target_env: String::new(),
-        target_vendor: "apple".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions { max_atomic_width: Some(64), stack_probes: true, ..base },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i586_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/i586_pc_windows_msvc.rs
index ba712ac..4a7779a 100644
--- a/compiler/rustc_target/src/spec/i586_pc_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/i586_pc_windows_msvc.rs
@@ -1,8 +1,8 @@
-use crate::spec::TargetResult;
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
-    let mut base = super::i686_pc_windows_msvc::target()?;
-    base.options.cpu = "pentium".to_string();
+pub fn target() -> Target {
+    let mut base = super::i686_pc_windows_msvc::target();
+    base.cpu = "pentium".to_string();
     base.llvm_target = "i586-pc-windows-msvc".to_string();
-    Ok(base)
+    base
 }
diff --git a/compiler/rustc_target/src/spec/i586_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/i586_unknown_linux_gnu.rs
index 49f4f2c..7c92dda 100644
--- a/compiler/rustc_target/src/spec/i586_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/i586_unknown_linux_gnu.rs
@@ -1,8 +1,8 @@
-use crate::spec::TargetResult;
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
-    let mut base = super::i686_unknown_linux_gnu::target()?;
-    base.options.cpu = "pentium".to_string();
+pub fn target() -> Target {
+    let mut base = super::i686_unknown_linux_gnu::target();
+    base.cpu = "pentium".to_string();
     base.llvm_target = "i586-unknown-linux-gnu".to_string();
-    Ok(base)
+    base
 }
diff --git a/compiler/rustc_target/src/spec/i586_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/i586_unknown_linux_musl.rs
index 0f2cceb..1fea02b 100644
--- a/compiler/rustc_target/src/spec/i586_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/i586_unknown_linux_musl.rs
@@ -1,8 +1,8 @@
-use crate::spec::TargetResult;
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
-    let mut base = super::i686_unknown_linux_musl::target()?;
-    base.options.cpu = "pentium".to_string();
+pub fn target() -> Target {
+    let mut base = super::i686_unknown_linux_musl::target();
+    base.cpu = "pentium".to_string();
     base.llvm_target = "i586-unknown-linux-musl".to_string();
-    Ok(base)
+    base
 }
diff --git a/compiler/rustc_target/src/spec/i686_apple_darwin.rs b/compiler/rustc_target/src/spec/i686_apple_darwin.rs
index b7a34f9..0ab4034 100644
--- a/compiler/rustc_target/src/spec/i686_apple_darwin.rs
+++ b/compiler/rustc_target/src/spec/i686_apple_darwin.rs
@@ -1,7 +1,7 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let mut base = super::apple_base::opts();
+pub fn target() -> Target {
+    let mut base = super::apple_base::opts("macos");
     base.cpu = "yonah".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m32".to_string()]);
@@ -15,19 +15,13 @@
     let arch = "i686";
     let llvm_target = super::apple_base::macos_llvm_target(&arch);
 
-    Ok(Target {
+    Target {
         llvm_target,
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             f64:32:64-f80:128-n8:16:32-S128"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "macos".to_string(),
-        target_env: String::new(),
-        target_vendor: "apple".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "\u{1}mcount".to_string(), ..base },
-    })
+        options: TargetOptions { mcount: "\u{1}mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_linux_android.rs b/compiler/rustc_target/src/spec/i686_linux_android.rs
index 79242f2..52059b9 100644
--- a/compiler/rustc_target/src/spec/i686_linux_android.rs
+++ b/compiler/rustc_target/src/spec/i686_linux_android.rs
@@ -1,9 +1,9 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::Target;
 
 // See https://developer.android.com/ndk/guides/abis.html#x86
 // for target ABI requirements.
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::android_base::opts();
 
     base.max_atomic_width = Some(64);
@@ -13,19 +13,13 @@
     base.features = "+mmx,+sse,+sse2,+sse3,+ssse3".to_string();
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-linux-android".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             f64:32:64-f80:32-n8:16:32-S128"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "android".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs b/compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs
index 33c9008..4979a5b 100644
--- a/compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs
+++ b/compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, LldFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::windows_gnu_base::opts();
     base.cpu = "pentium4".to_string();
     base.pre_link_args
@@ -16,19 +16,13 @@
         .unwrap()
         .push("-Wl,--large-address-aware".to_string());
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-pc-windows-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             i64:64-f80:32-n8:16:32-a:0:32-S32"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "windows".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "pc".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs
index 9d0922b..e7a5643 100644
--- a/compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, LldFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::windows_msvc_base::opts();
     base.cpu = "pentium4".to_string();
     base.max_atomic_width = Some(64);
@@ -20,19 +20,13 @@
         .unwrap()
         .extend(pre_link_args_msvc);
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-pc-windows-msvc".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             i64:64-f80:32-n8:16:32-a:0:32-S32"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "windows".to_string(),
-        target_env: "msvc".to_string(),
-        target_vendor: "pc".to_string(),
-        linker_flavor: LinkerFlavor::Msvc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_unknown_cloudabi.rs b/compiler/rustc_target/src/spec/i686_unknown_cloudabi.rs
index 729b1f6..0cdb9f9 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_cloudabi.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_cloudabi.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::cloudabi_base::opts();
     base.cpu = "pentium4".to_string();
     base.max_atomic_width = Some(64);
@@ -8,19 +8,13 @@
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-unknown-cloudabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             f64:32:64-f80:32-n8:16:32-S128"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "cloudabi".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs b/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs
index 60f2188..fc1c860 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::freebsd_base::opts();
     base.cpu = "pentium4".to_string();
     base.max_atomic_width = Some(64);
@@ -9,19 +9,13 @@
     pre_link_args.push("-Wl,-znotext".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-unknown-freebsd".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             f64:32:64-f80:32-n8:16:32-S128"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "freebsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_unknown_haiku.rs b/compiler/rustc_target/src/spec/i686_unknown_haiku.rs
index 4dc27af..22c8ba5 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_haiku.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_haiku.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::haiku_base::opts();
     base.cpu = "pentium4".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m32".to_string()]);
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-unknown-haiku".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             f64:32:64-f80:32-n8:16:32-S128"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "haiku".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs
index 0d578f2..083c115 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
     base.cpu = "pentium4".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-unknown-linux-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             f64:32:64-f80:32-n8:16:32-S128"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs
index 699a0ab..1673b2a 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
     base.cpu = "pentium4".to_string();
     base.max_atomic_width = Some(64);
@@ -22,19 +22,13 @@
     // https://llvm.org/bugs/show_bug.cgi?id=30879
     base.eliminate_frame_pointer = false;
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-unknown-linux-musl".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             f64:32:64-f80:32-n8:16:32-S128"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs b/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs
index 88b1ae7..c22139b 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::netbsd_base::opts();
     base.cpu = "pentium4".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-unknown-netbsdelf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             f64:32:64-f80:32-n8:16:32-S128"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "netbsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "__mcount".to_string(), ..base },
-    })
+        options: TargetOptions { mcount: "__mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs b/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs
index 829cd1a..87642ef 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::openbsd_base::opts();
     base.cpu = "pentium4".to_string();
     base.max_atomic_width = Some(64);
@@ -8,19 +8,13 @@
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-fuse-ld=lld".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-unknown-openbsd".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             f64:32:64-f80:32-n8:16:32-S128"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "openbsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_unknown_uefi.rs b/compiler/rustc_target/src/spec/i686_unknown_uefi.rs
index 221d5f0..5af3a6b 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_uefi.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_uefi.rs
@@ -5,9 +5,9 @@
 // The cdecl ABI is used. It differs from the stdcall or fastcall ABI.
 // "i686-unknown-windows" is used to get the minimal subset of windows-specific features.
 
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::uefi_msvc_base::opts();
     base.cpu = "pentium4".to_string();
     base.max_atomic_width = Some(64);
@@ -76,20 +76,14 @@
     // As a result, we choose -gnu for i686 version before those intrisics are implemented in
     // compiler-builtins. After compiler-builtins implements all required intrinsics, we may
     // remove -gnu and use the default one.
-    Ok(Target {
+    Target {
         llvm_target: "i686-unknown-windows-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             i64:64-f80:32-n8:16:32-a:0:32-S32"
             .to_string(),
-        target_os: "uefi".to_string(),
-        target_env: "".to_string(),
-        target_vendor: "unknown".to_string(),
         arch: "x86".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Link),
 
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs b/compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs
index 1c6d2e0..a3de93e 100644
--- a/compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs
+++ b/compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, LldFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::windows_uwp_gnu_base::opts();
     base.cpu = "pentium4".to_string();
     base.pre_link_args
@@ -15,19 +15,13 @@
         .unwrap()
         .push("-Wl,--large-address-aware".to_string());
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-pc-windows-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             i64:64-f80:32-n8:16:32-a:0:32-S32"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "windows".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "uwp".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_uwp_windows_msvc.rs b/compiler/rustc_target/src/spec/i686_uwp_windows_msvc.rs
index ed2dba5..ce6200b 100644
--- a/compiler/rustc_target/src/spec/i686_uwp_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/i686_uwp_windows_msvc.rs
@@ -1,24 +1,18 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::windows_uwp_msvc_base::opts();
     base.cpu = "pentium4".to_string();
     base.max_atomic_width = Some(64);
     base.has_elf_tls = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-pc-windows-msvc".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             i64:64-f80:32-n8:16:32-a:0:32-S32"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "windows".to_string(),
-        target_env: "msvc".to_string(),
-        target_vendor: "uwp".to_string(),
-        linker_flavor: LinkerFlavor::Msvc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs b/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs
index f5f66ca..c082535 100644
--- a/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::vxworks_base::opts();
     base.cpu = "pentium4".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "i686-unknown-linux-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             f64:32:64-f80:32-n8:16:32-S128"
             .to_string(),
         arch: "x86".to_string(),
-        target_os: "vxworks".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "wrs".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/illumos_base.rs b/compiler/rustc_target/src/spec/illumos_base.rs
index 214142b..d9b5716 100644
--- a/compiler/rustc_target/src/spec/illumos_base.rs
+++ b/compiler/rustc_target/src/spec/illumos_base.rs
@@ -16,10 +16,11 @@
     );
 
     TargetOptions {
+        os: "illumos".to_string(),
         dynamic_linking: true,
         executables: true,
         has_rpath: true,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         is_like_solaris: true,
         limit_rdylib_exports: false, // Linker doesn't support this
         eliminate_frame_pointer: false,
diff --git a/compiler/rustc_target/src/spec/l4re_base.rs b/compiler/rustc_target/src/spec/l4re_base.rs
index 5caad10..660fae5 100644
--- a/compiler/rustc_target/src/spec/l4re_base.rs
+++ b/compiler/rustc_target/src/spec/l4re_base.rs
@@ -17,12 +17,15 @@
     args.insert(LinkerFlavor::Gcc, vec![]);
 
     TargetOptions {
+        os: "l4re".to_string(),
+        env: "uclibc".to_string(),
+        linker_flavor: LinkerFlavor::Ld,
         executables: true,
         has_elf_tls: false,
         panic_strategy: PanicStrategy::Abort,
         linker: Some("ld".to_string()),
         pre_link_args: args,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         ..Default::default()
     }
 }
diff --git a/compiler/rustc_target/src/spec/linux_base.rs b/compiler/rustc_target/src/spec/linux_base.rs
index 52892fc..0631644 100644
--- a/compiler/rustc_target/src/spec/linux_base.rs
+++ b/compiler/rustc_target/src/spec/linux_base.rs
@@ -19,15 +19,17 @@
     );
 
     TargetOptions {
+        os: "linux".to_string(),
         dynamic_linking: true,
         executables: true,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         linker_is_gnu: true,
         has_rpath: true,
         pre_link_args: args,
         position_independent_executables: true,
         relro_level: RelroLevel::Full,
         has_elf_tls: true,
+        crt_static_respected: true,
         ..Default::default()
     }
 }
diff --git a/compiler/rustc_target/src/spec/linux_gnu_base.rs b/compiler/rustc_target/src/spec/linux_gnu_base.rs
new file mode 100644
index 0000000..3d940ce
--- /dev/null
+++ b/compiler/rustc_target/src/spec/linux_gnu_base.rs
@@ -0,0 +1,5 @@
+use crate::spec::TargetOptions;
+
+pub fn opts() -> TargetOptions {
+    TargetOptions { env: "gnu".to_string(), ..super::linux_base::opts() }
+}
diff --git a/compiler/rustc_target/src/spec/linux_kernel_base.rs b/compiler/rustc_target/src/spec/linux_kernel_base.rs
index 6d929d1..a5fc164 100644
--- a/compiler/rustc_target/src/spec/linux_kernel_base.rs
+++ b/compiler/rustc_target/src/spec/linux_kernel_base.rs
@@ -8,6 +8,7 @@
     );
 
     TargetOptions {
+        env: "gnu".to_string(),
         disable_redzone: true,
         panic_strategy: PanicStrategy::Abort,
         stack_probes: true,
diff --git a/compiler/rustc_target/src/spec/linux_musl_base.rs b/compiler/rustc_target/src/spec/linux_musl_base.rs
index b90e91d..5038a96 100644
--- a/compiler/rustc_target/src/spec/linux_musl_base.rs
+++ b/compiler/rustc_target/src/spec/linux_musl_base.rs
@@ -4,14 +4,13 @@
 pub fn opts() -> TargetOptions {
     let mut base = super::linux_base::opts();
 
+    base.env = "musl".to_string();
     base.pre_link_objects_fallback = crt_objects::pre_musl_fallback();
     base.post_link_objects_fallback = crt_objects::post_musl_fallback();
     base.crt_objects_fallback = Some(CrtObjectsFallback::Musl);
 
     // These targets statically link libc by default
     base.crt_static_default = true;
-    // These targets allow the user to choose between static and dynamic linking.
-    base.crt_static_respected = true;
 
     base
 }
diff --git a/compiler/rustc_target/src/spec/linux_uclibc_base.rs b/compiler/rustc_target/src/spec/linux_uclibc_base.rs
new file mode 100644
index 0000000..ef6d506
--- /dev/null
+++ b/compiler/rustc_target/src/spec/linux_uclibc_base.rs
@@ -0,0 +1,5 @@
+use crate::spec::TargetOptions;
+
+pub fn opts() -> TargetOptions {
+    TargetOptions { env: "uclibc".to_string(), ..super::linux_base::opts() }
+}
diff --git a/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs
index b2ea8a6..daa0d9d 100644
--- a/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs
+++ b/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs
@@ -1,25 +1,20 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "mips64-unknown-linux-gnuabi64".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(),
         arch: "mips64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
+            endian: "big".to_string(),
             // NOTE(mips64r2) matches C toolchain
             cpu: "mips64r2".to_string(),
             features: "+mips64r2".to_string(),
             max_atomic_width: Some(64),
-            target_mcount: "_mcount".to_string(),
+            mcount: "_mcount".to_string(),
 
-            ..super::linux_base::opts()
+            ..super::linux_gnu_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs b/compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs
index 17584de..db8d0c0 100644
--- a/compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs
+++ b/compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
     base.cpu = "mips64r2".to_string();
     base.features = "+mips64r2".to_string();
     base.max_atomic_width = Some(64);
-    Ok(Target {
+    Target {
         // LLVM doesn't recognize "muslabi64" yet.
         llvm_target: "mips64-unknown-linux-musl".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(),
         arch: "mips64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "_mcount".to_string(), ..base },
-    })
+        options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs
index 48aea4a3..d767705 100644
--- a/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs
+++ b/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "mips64el-unknown-linux-gnuabi64".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(),
         arch: "mips64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             // NOTE(mips64r2) matches C toolchain
             cpu: "mips64r2".to_string(),
             features: "+mips64r2".to_string(),
             max_atomic_width: Some(64),
-            target_mcount: "_mcount".to_string(),
+            mcount: "_mcount".to_string(),
 
-            ..super::linux_base::opts()
+            ..super::linux_gnu_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs b/compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs
index c7a849a..766ed69 100644
--- a/compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs
+++ b/compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
     base.cpu = "mips64r2".to_string();
     base.features = "+mips64r2".to_string();
     base.max_atomic_width = Some(64);
-    Ok(Target {
+    Target {
         // LLVM doesn't recognize "muslabi64" yet.
         llvm_target: "mips64el-unknown-linux-musl".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(),
         arch: "mips64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "_mcount".to_string(), ..base },
-    })
+        options: TargetOptions { mcount: "_mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mips_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mips_unknown_linux_gnu.rs
index e360abd..a7ec1f1 100644
--- a/compiler/rustc_target/src/spec/mips_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/mips_unknown_linux_gnu.rs
@@ -1,24 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "mips-unknown-linux-gnu".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
+            endian: "big".to_string(),
             cpu: "mips32r2".to_string(),
             features: "+mips32r2,+fpxx,+nooddspreg".to_string(),
             max_atomic_width: Some(32),
-            target_mcount: "_mcount".to_string(),
+            mcount: "_mcount".to_string(),
 
-            ..super::linux_base::opts()
+            ..super::linux_gnu_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs
index c8d97e6..1ebe577 100644
--- a/compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
     base.cpu = "mips32r2".to_string();
     base.features = "+mips32r2,+soft-float".to_string();
     base.max_atomic_width = Some(32);
     base.crt_static_default = false;
-    Ok(Target {
+    Target {
         llvm_target: "mips-unknown-linux-musl".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "_mcount".to_string(), ..base },
-    })
+        options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mips_unknown_linux_uclibc.rs b/compiler/rustc_target/src/spec/mips_unknown_linux_uclibc.rs
index 8116b8c..2123d5e 100644
--- a/compiler/rustc_target/src/spec/mips_unknown_linux_uclibc.rs
+++ b/compiler/rustc_target/src/spec/mips_unknown_linux_uclibc.rs
@@ -1,24 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "mips-unknown-linux-uclibc".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "uclibc".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
+            endian: "big".to_string(),
             cpu: "mips32r2".to_string(),
             features: "+mips32r2,+soft-float".to_string(),
             max_atomic_width: Some(32),
-            target_mcount: "_mcount".to_string(),
+            mcount: "_mcount".to_string(),
 
-            ..super::linux_base::opts()
+            ..super::linux_uclibc_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mipsel_sony_psp.rs b/compiler/rustc_target/src/spec/mipsel_sony_psp.rs
index b3bda97..08c290e 100644
--- a/compiler/rustc_target/src/spec/mipsel_sony_psp.rs
+++ b/compiler/rustc_target/src/spec/mipsel_sony_psp.rs
@@ -1,26 +1,23 @@
 use crate::spec::{LinkArgs, LinkerFlavor, LldFlavor, RelocModel};
-use crate::spec::{Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
 // The PSP has custom linker requirements.
 const LINKER_SCRIPT: &str = include_str!("./mipsel_sony_psp_linker_script.ld");
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut pre_link_args = LinkArgs::new();
     pre_link_args.insert(LinkerFlavor::Lld(LldFlavor::Ld), vec!["--emit-relocs".to_string()]);
 
-    Ok(Target {
+    Target {
         llvm_target: "mipsel-sony-psp".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
-        target_os: "psp".to_string(),
-        target_env: "".to_string(),
-        target_vendor: "sony".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
+            os: "psp".to_string(),
+            vendor: "sony".to_string(),
+            linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
             cpu: "mips2".to_string(),
             executables: true,
             linker: Some("rust-lld".to_owned()),
@@ -36,5 +33,5 @@
             link_script: Some(LINKER_SCRIPT.to_string()),
             ..Default::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mipsel_unknown_linux_gnu.rs
index 7e9d8cd..9cb2a13 100644
--- a/compiler/rustc_target/src/spec/mipsel_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/mipsel_unknown_linux_gnu.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "mipsel-unknown-linux-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
             cpu: "mips32r2".to_string(),
             features: "+mips32r2,+fpxx,+nooddspreg".to_string(),
             max_atomic_width: Some(32),
-            target_mcount: "_mcount".to_string(),
+            mcount: "_mcount".to_string(),
 
-            ..super::linux_base::opts()
+            ..super::linux_gnu_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs
index f70cc13..3374cdd 100644
--- a/compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
     base.cpu = "mips32r2".to_string();
     base.features = "+mips32r2,+soft-float".to_string();
     base.max_atomic_width = Some(32);
     base.crt_static_default = false;
-    Ok(Target {
+    Target {
         llvm_target: "mipsel-unknown-linux-musl".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "_mcount".to_string(), ..base },
-    })
+        options: TargetOptions { mcount: "_mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_linux_uclibc.rs b/compiler/rustc_target/src/spec/mipsel_unknown_linux_uclibc.rs
index a815201..0831eb7a 100644
--- a/compiler/rustc_target/src/spec/mipsel_unknown_linux_uclibc.rs
+++ b/compiler/rustc_target/src/spec/mipsel_unknown_linux_uclibc.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "mipsel-unknown-linux-uclibc".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "uclibc".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
             cpu: "mips32r2".to_string(),
             features: "+mips32r2,+soft-float".to_string(),
             max_atomic_width: Some(32),
-            target_mcount: "_mcount".to_string(),
+            mcount: "_mcount".to_string(),
 
-            ..super::linux_base::opts()
+            ..super::linux_uclibc_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_none.rs b/compiler/rustc_target/src/spec/mipsel_unknown_none.rs
new file mode 100644
index 0000000..a800592
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mipsel_unknown_none.rs
@@ -0,0 +1,38 @@
+//! Bare MIPS32r2, little endian, softfloat, O32 calling convention
+//!
+//! Can be used for MIPS M4K core (e.g. on PIC32MX devices)
+
+use crate::spec::abi::Abi;
+use crate::spec::{LinkerFlavor, LldFlavor, RelocModel};
+use crate::spec::{PanicStrategy, Target, TargetOptions};
+
+pub fn target() -> Target {
+    Target {
+        llvm_target: "mipsel-unknown-none".to_string(),
+        pointer_width: 32,
+        data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
+        arch: "mips".to_string(),
+
+        options: TargetOptions {
+            vendor: String::new(),
+            linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+            cpu: "mips32r2".to_string(),
+            features: "+mips32r2,+soft-float,+noabicalls".to_string(),
+            max_atomic_width: Some(32),
+            executables: true,
+            linker: Some("rust-lld".to_owned()),
+            panic_strategy: PanicStrategy::Abort,
+            relocation_model: RelocModel::Static,
+            unsupported_abis: vec![
+                Abi::Stdcall,
+                Abi::Fastcall,
+                Abi::Vectorcall,
+                Abi::Thiscall,
+                Abi::Win64,
+                Abi::SysV64,
+            ],
+            emit_debug_gdb_scripts: false,
+            ..Default::default()
+        },
+    }
+}
diff --git a/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs
index 36b83c6..11b3734 100644
--- a/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs
@@ -1,24 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "mipsisa32r6-unknown-linux-gnu".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
+            endian: "big".to_string(),
             cpu: "mips32r6".to_string(),
             features: "+mips32r6".to_string(),
             max_atomic_width: Some(32),
-            target_mcount: "_mcount".to_string(),
+            mcount: "_mcount".to_string(),
 
-            ..super::linux_base::opts()
+            ..super::linux_gnu_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs
index 717ae3f..06a5f40 100644
--- a/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "mipsisa32r6el-unknown-linux-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
             cpu: "mips32r6".to_string(),
             features: "+mips32r6".to_string(),
             max_atomic_width: Some(32),
-            target_mcount: "_mcount".to_string(),
+            mcount: "_mcount".to_string(),
 
-            ..super::linux_base::opts()
+            ..super::linux_gnu_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs
index 3f7d233..6282c9e 100644
--- a/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs
+++ b/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs
@@ -1,25 +1,20 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "mipsisa64r6-unknown-linux-gnuabi64".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(),
         arch: "mips64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
+            endian: "big".to_string(),
             // NOTE(mips64r6) matches C toolchain
             cpu: "mips64r6".to_string(),
             features: "+mips64r6".to_string(),
             max_atomic_width: Some(64),
-            target_mcount: "_mcount".to_string(),
+            mcount: "_mcount".to_string(),
 
-            ..super::linux_base::opts()
+            ..super::linux_gnu_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs
index 4f41b83..589d7ac 100644
--- a/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs
+++ b/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "mipsisa64r6el-unknown-linux-gnuabi64".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(),
         arch: "mips64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             // NOTE(mips64r6) matches C toolchain
             cpu: "mips64r6".to_string(),
             features: "+mips64r6".to_string(),
             max_atomic_width: Some(64),
-            target_mcount: "_mcount".to_string(),
+            mcount: "_mcount".to_string(),
 
-            ..super::linux_base::opts()
+            ..super::linux_gnu_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index f1e8330..f949bf9 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -37,7 +37,9 @@
 use crate::spec::abi::{lookup as lookup_abi, Abi};
 use crate::spec::crt_objects::{CrtObjects, CrtObjectsFallback};
 use rustc_serialize::json::{Json, ToJson};
+use rustc_span::symbol::{sym, Symbol};
 use std::collections::BTreeMap;
+use std::ops::{Deref, DerefMut};
 use std::path::{Path, PathBuf};
 use std::str::FromStr;
 use std::{fmt, io};
@@ -62,8 +64,10 @@
 mod illumos_base;
 mod l4re_base;
 mod linux_base;
+mod linux_gnu_base;
 mod linux_kernel_base;
 mod linux_musl_base;
+mod linux_uclibc_base;
 mod msvc_base;
 mod netbsd_base;
 mod openbsd_base;
@@ -174,6 +178,13 @@
             PanicStrategy::Abort => "abort",
         }
     }
+
+    pub fn desc_symbol(&self) -> Symbol {
+        match *self {
+            PanicStrategy::Unwind => sym::unwind,
+            PanicStrategy::Abort => sym::abort,
+        }
+    }
 }
 
 impl ToJson for PanicStrategy {
@@ -430,48 +441,23 @@
     }
 }
 
-pub enum LoadTargetError {
-    BuiltinTargetNotFound(String),
-    Other(String),
-}
-
 pub type LinkArgs = BTreeMap<LinkerFlavor, Vec<String>>;
-pub type TargetResult = Result<Target, String>;
 
 macro_rules! supported_targets {
     ( $(($( $triple:literal, )+ $module:ident ),)+ ) => {
         $(mod $module;)+
 
         /// List of supported targets
-        const TARGETS: &[&str] = &[$($($triple),+),+];
+        pub const TARGETS: &[&str] = &[$($($triple),+),+];
 
-        fn load_specific(target: &str) -> Result<Target, LoadTargetError> {
-            match target {
-                $(
-                    $($triple)|+ => {
-                        let mut t = $module::target()
-                            .map_err(LoadTargetError::Other)?;
-                        t.options.is_builtin = true;
-
-                        // round-trip through the JSON parser to ensure at
-                        // run-time that the parser works correctly
-                        t = Target::from_json(t.to_json())
-                            .map_err(LoadTargetError::Other)?;
-                        debug!("got builtin target: {:?}", t);
-                        Ok(t)
-                    },
-                )+
-                    _ => Err(LoadTargetError::BuiltinTargetNotFound(
-                        format!("Unable to find target: {}", target)))
-            }
-        }
-
-        pub fn get_targets() -> impl Iterator<Item = String> {
-            TARGETS.iter().filter_map(|t| -> Option<String> {
-                load_specific(t)
-                    .and(Ok(t.to_string()))
-                    .ok()
-            })
+        fn load_builtin(target: &str) -> Option<Target> {
+            let mut t = match target {
+                $( $($triple)|+ => $module::target(), )+
+                _ => return None,
+            };
+            t.is_builtin = true;
+            debug!("got builtin target: {:?}", t);
+            Some(t)
         }
 
         #[cfg(test)]
@@ -678,6 +664,7 @@
     ("powerpc64-wrs-vxworks", powerpc64_wrs_vxworks),
 
     ("mipsel-sony-psp", mipsel_sony_psp),
+    ("mipsel-unknown-none", mipsel_unknown_none),
     ("thumbv4t-none-eabi", thumbv4t_none_eabi),
 }
 
@@ -688,26 +675,13 @@
 pub struct Target {
     /// Target triple to pass to LLVM.
     pub llvm_target: String,
-    /// String to use as the `target_endian` `cfg` variable.
-    pub target_endian: String,
-    /// String to use as the `target_pointer_width` `cfg` variable.
-    pub target_pointer_width: String,
-    /// Width of c_int type
-    pub target_c_int_width: String,
-    /// OS name to use for conditional compilation.
-    pub target_os: String,
-    /// Environment name to use for conditional compilation.
-    pub target_env: String,
-    /// Vendor name to use for conditional compilation.
-    pub target_vendor: String,
+    /// Number of bits in a pointer. Influences the `target_pointer_width` `cfg` variable.
+    pub pointer_width: u32,
     /// Architecture to use for ABI considerations. Valid options include: "x86",
     /// "x86_64", "arm", "aarch64", "mips", "powerpc", "powerpc64", and others.
     pub arch: String,
     /// [Data layout](http://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM.
     pub data_layout: String,
-    /// Default linker flavor used if `-C linker-flavor` or `-C linker` are not passed
-    /// on the command line.
-    pub linker_flavor: LinkerFlavor,
     /// Optional settings with defaults.
     pub options: TargetOptions,
 }
@@ -726,11 +700,29 @@
 ///
 /// This has an implementation of `Default`, see each field for what the default is. In general,
 /// these try to take "minimal defaults" that don't assume anything about the runtime they run in.
+///
+/// `TargetOptions` as a separate structure is mostly an implementation detail of `Target`
+/// construction, all its fields logically belong to `Target` and available from `Target`
+/// through `Deref` impls.
 #[derive(PartialEq, Clone, Debug)]
 pub struct TargetOptions {
     /// Whether the target is built-in or loaded from a custom target specification.
     pub is_builtin: bool,
 
+    /// String to use as the `target_endian` `cfg` variable. Defaults to "little".
+    pub endian: String,
+    /// Width of c_int type. Defaults to "32".
+    pub c_int_width: String,
+    /// OS name to use for conditional compilation. Defaults to "none".
+    pub os: String,
+    /// Environment name to use for conditional compilation. Defaults to "".
+    pub env: String,
+    /// Vendor name to use for conditional compilation. Defaults to "unknown".
+    pub vendor: String,
+    /// Default linker flavor used if `-C linker-flavor` or `-C linker` are not passed
+    /// on the command line. Defaults to `LinkerFlavor::Gcc`.
+    pub linker_flavor: LinkerFlavor,
+
     /// Linker to invoke
     pub linker: Option<String>,
 
@@ -817,7 +809,7 @@
     /// String to append to the name of every static library. Defaults to ".a".
     pub staticlib_suffix: String,
     /// OS family to use for conditional compilation. Valid options: "unix", "windows".
-    pub target_family: Option<String>,
+    pub os_family: Option<String>,
     /// Whether the target toolchain's ABI supports returning small structs as an integer.
     pub abi_return_struct_as_int: bool,
     /// Whether the target toolchain is like macOS's. Only useful for compiling against iOS/macOS,
@@ -832,15 +824,15 @@
     /// library naming convention. Defaults to false.
     pub is_like_windows: bool,
     pub is_like_msvc: bool,
-    /// Whether the target toolchain is like Android's. Only useful for compiling against Android.
-    /// Defaults to false.
-    pub is_like_android: bool,
     /// Whether the target toolchain is like Emscripten's. Only useful for compiling with
     /// Emscripten toolchain.
     /// Defaults to false.
     pub is_like_emscripten: bool,
     /// Whether the target toolchain is like Fuchsia's.
     pub is_like_fuchsia: bool,
+    /// Version of DWARF to use if not using the default.
+    /// Useful because some platforms (osx, bsd) only want up to DWARF2.
+    pub dwarf_version: Option<u32>,
     /// Whether the linker support GNU-like arguments such as -O. Defaults to false.
     pub linker_is_gnu: bool,
     /// The MinGW toolchain has a known issue that prevents it from correctly
@@ -971,11 +963,11 @@
     /// The MergeFunctions pass is generally useful, but some targets may need
     /// to opt out. The default is "aliases".
     ///
-    /// Workaround for: https://github.com/rust-lang/rust/issues/57356
+    /// Workaround for: <https://github.com/rust-lang/rust/issues/57356>
     pub merge_functions: MergeFunctions,
 
     /// Use platform dependent mcount function
-    pub target_mcount: String,
+    pub mcount: String,
 
     /// LLVM ABI name, corresponds to the '-mabi' parameter available in multilib C compilers
     pub llvm_abiname: String,
@@ -994,6 +986,10 @@
     /// used to locate unwinding information is passed
     /// (only has effect if the linker is `ld`-like).
     pub eh_frame_header: bool,
+
+    /// Is true if the target is an ARM architecture using thumb v1 which allows for
+    /// thumb and arm interworking.
+    pub has_thumb_interworking: bool,
 }
 
 impl Default for TargetOptions {
@@ -1002,6 +998,12 @@
     fn default() -> TargetOptions {
         TargetOptions {
             is_builtin: false,
+            endian: "little".to_string(),
+            c_int_width: "32".to_string(),
+            os: "none".to_string(),
+            env: String::new(),
+            vendor: "unknown".to_string(),
+            linker_flavor: LinkerFlavor::Gcc,
             linker: option_env!("CFG_DEFAULT_LINKER").map(|s| s.to_string()),
             lld_flavor: LldFlavor::Ld,
             pre_link_args: LinkArgs::new(),
@@ -1024,15 +1026,15 @@
             exe_suffix: String::new(),
             staticlib_prefix: "lib".to_string(),
             staticlib_suffix: ".a".to_string(),
-            target_family: None,
+            os_family: None,
             abi_return_struct_as_int: false,
             is_like_osx: false,
             is_like_solaris: false,
             is_like_windows: false,
-            is_like_android: false,
             is_like_emscripten: false,
             is_like_msvc: false,
             is_like_fuchsia: false,
+            dwarf_version: None,
             linker_is_gnu: false,
             allows_weak_linkage: true,
             has_rpath: false,
@@ -1080,22 +1082,39 @@
             limit_rdylib_exports: true,
             override_export_symbols: None,
             merge_functions: MergeFunctions::Aliases,
-            target_mcount: "mcount".to_string(),
+            mcount: "mcount".to_string(),
             llvm_abiname: "".to_string(),
             relax_elf_relocations: false,
             llvm_args: vec![],
             use_ctors_section: false,
             eh_frame_header: true,
+            has_thumb_interworking: false,
         }
     }
 }
 
+/// `TargetOptions` being a separate type is basically an implementation detail of `Target` that is
+/// used for providing defaults. Perhaps there's a way to merge `TargetOptions` into `Target` so
+/// this `Deref` implementation is no longer necessary.
+impl Deref for Target {
+    type Target = TargetOptions;
+
+    fn deref(&self) -> &Self::Target {
+        &self.options
+    }
+}
+impl DerefMut for Target {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.options
+    }
+}
+
 impl Target {
     /// Given a function ABI, turn it into the correct ABI for this target.
     pub fn adjust_abi(&self, abi: Abi) -> Abi {
         match abi {
             Abi::System => {
-                if self.options.is_like_windows && self.arch == "x86" {
+                if self.is_like_windows && self.arch == "x86" {
                     Abi::Stdcall
                 } else {
                     Abi::C
@@ -1105,7 +1124,7 @@
             // See https://docs.microsoft.com/en-us/cpp/cpp/argument-passing-and-naming-conventions
             // and the individual pages for __stdcall et al.
             Abi::Stdcall | Abi::Fastcall | Abi::Vectorcall | Abi::Thiscall => {
-                if self.options.is_like_windows && self.arch != "x86" { Abi::C } else { abi }
+                if self.is_like_windows && self.arch != "x86" { Abi::C } else { abi }
             }
             Abi::EfiApi => {
                 if self.arch == "x86_64" {
@@ -1121,21 +1140,21 @@
     /// Minimum integer size in bits that this target can perform atomic
     /// operations on.
     pub fn min_atomic_width(&self) -> u64 {
-        self.options.min_atomic_width.unwrap_or(8)
+        self.min_atomic_width.unwrap_or(8)
     }
 
     /// Maximum integer size in bits that this target can perform atomic
     /// operations on.
     pub fn max_atomic_width(&self) -> u64 {
-        self.options.max_atomic_width.unwrap_or_else(|| self.target_pointer_width.parse().unwrap())
+        self.max_atomic_width.unwrap_or_else(|| self.pointer_width.into())
     }
 
     pub fn is_abi_supported(&self, abi: Abi) -> bool {
-        abi.generic() || !self.options.unsupported_abis.contains(&abi)
+        abi.generic() || !self.unsupported_abis.contains(&abi)
     }
 
     /// Loads a target descriptor from a JSON object.
-    pub fn from_json(obj: Json) -> TargetResult {
+    pub fn from_json(obj: Json) -> Result<Target, String> {
         // While ugly, this code must remain this way to retain
         // compatibility with existing JSON fields and the internal
         // expected naming of the Target and TargetOptions structs.
@@ -1150,25 +1169,13 @@
                 .ok_or_else(|| format!("Field {} in target specification is required", name))
         };
 
-        let get_opt_field = |name: &str, default: &str| {
-            obj.find(name)
-                .and_then(|s| s.as_string())
-                .map(|s| s.to_string())
-                .unwrap_or_else(|| default.to_string())
-        };
-
         let mut base = Target {
             llvm_target: get_req_field("llvm-target")?,
-            target_endian: get_req_field("target-endian")?,
-            target_pointer_width: get_req_field("target-pointer-width")?,
-            target_c_int_width: get_req_field("target-c-int-width")?,
+            pointer_width: get_req_field("target-pointer-width")?
+                .parse::<u32>()
+                .map_err(|_| "target-pointer-width must be an integer".to_string())?,
             data_layout: get_req_field("data-layout")?,
             arch: get_req_field("arch")?,
-            target_os: get_req_field("os")?,
-            target_env: get_opt_field("env", ""),
-            target_vendor: get_opt_field("vendor", "unknown"),
-            linker_flavor: LinkerFlavor::from_str(&*get_req_field("linker-flavor")?)
-                .ok_or_else(|| format!("linker flavor must be {}", LinkerFlavor::one_of()))?,
             options: Default::default(),
         };
 
@@ -1176,26 +1183,41 @@
             ($key_name:ident) => ( {
                 let name = (stringify!($key_name)).replace("_", "-");
                 if let Some(s) = obj.find(&name).and_then(Json::as_string) {
-                    base.options.$key_name = s.to_string();
+                    base.$key_name = s.to_string();
+                }
+            } );
+            ($key_name:ident = $json_name:expr) => ( {
+                let name = $json_name;
+                if let Some(s) = obj.find(&name).and_then(Json::as_string) {
+                    base.$key_name = s.to_string();
                 }
             } );
             ($key_name:ident, bool) => ( {
                 let name = (stringify!($key_name)).replace("_", "-");
                 if let Some(s) = obj.find(&name).and_then(Json::as_boolean) {
-                    base.options.$key_name = s;
+                    base.$key_name = s;
+                }
+            } );
+            ($key_name:ident, Option<u32>) => ( {
+                let name = (stringify!($key_name)).replace("_", "-");
+                if let Some(s) = obj.find(&name).and_then(Json::as_u64) {
+                    if s < 1 || s > 5 {
+                        return Err("Not a valid DWARF version number".to_string());
+                    }
+                    base.$key_name = Some(s as u32);
                 }
             } );
             ($key_name:ident, Option<u64>) => ( {
                 let name = (stringify!($key_name)).replace("_", "-");
                 if let Some(s) = obj.find(&name).and_then(Json::as_u64) {
-                    base.options.$key_name = Some(s);
+                    base.$key_name = Some(s);
                 }
             } );
             ($key_name:ident, MergeFunctions) => ( {
                 let name = (stringify!($key_name)).replace("_", "-");
                 obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
                     match s.parse::<MergeFunctions>() {
-                        Ok(mergefunc) => base.options.$key_name = mergefunc,
+                        Ok(mergefunc) => base.$key_name = mergefunc,
                         _ => return Some(Err(format!("'{}' is not a valid value for \
                                                       merge-functions. Use 'disabled', \
                                                       'trampolines', or 'aliases'.",
@@ -1208,7 +1230,7 @@
                 let name = (stringify!($key_name)).replace("_", "-");
                 obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
                     match s.parse::<RelocModel>() {
-                        Ok(relocation_model) => base.options.$key_name = relocation_model,
+                        Ok(relocation_model) => base.$key_name = relocation_model,
                         _ => return Some(Err(format!("'{}' is not a valid relocation model. \
                                                       Run `rustc --print relocation-models` to \
                                                       see the list of supported values.", s))),
@@ -1220,7 +1242,7 @@
                 let name = (stringify!($key_name)).replace("_", "-");
                 obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
                     match s.parse::<CodeModel>() {
-                        Ok(code_model) => base.options.$key_name = Some(code_model),
+                        Ok(code_model) => base.$key_name = Some(code_model),
                         _ => return Some(Err(format!("'{}' is not a valid code model. \
                                                       Run `rustc --print code-models` to \
                                                       see the list of supported values.", s))),
@@ -1232,7 +1254,7 @@
                 let name = (stringify!($key_name)).replace("_", "-");
                 obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
                     match s.parse::<TlsModel>() {
-                        Ok(tls_model) => base.options.$key_name = tls_model,
+                        Ok(tls_model) => base.$key_name = tls_model,
                         _ => return Some(Err(format!("'{}' is not a valid TLS model. \
                                                       Run `rustc --print tls-models` to \
                                                       see the list of supported values.", s))),
@@ -1244,8 +1266,8 @@
                 let name = (stringify!($key_name)).replace("_", "-");
                 obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
                     match s {
-                        "unwind" => base.options.$key_name = PanicStrategy::Unwind,
-                        "abort" => base.options.$key_name = PanicStrategy::Abort,
+                        "unwind" => base.$key_name = PanicStrategy::Unwind,
+                        "abort" => base.$key_name = PanicStrategy::Abort,
                         _ => return Some(Err(format!("'{}' is not a valid value for \
                                                       panic-strategy. Use 'unwind' or 'abort'.",
                                                      s))),
@@ -1257,7 +1279,7 @@
                 let name = (stringify!($key_name)).replace("_", "-");
                 obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
                     match s.parse::<RelroLevel>() {
-                        Ok(level) => base.options.$key_name = level,
+                        Ok(level) => base.$key_name = level,
                         _ => return Some(Err(format!("'{}' is not a valid value for \
                                                       relro-level. Use 'full', 'partial, or 'off'.",
                                                       s))),
@@ -1268,7 +1290,7 @@
             ($key_name:ident, list) => ( {
                 let name = (stringify!($key_name)).replace("_", "-");
                 if let Some(v) = obj.find(&name).and_then(Json::as_array) {
-                    base.options.$key_name = v.iter()
+                    base.$key_name = v.iter()
                         .map(|a| a.as_string().unwrap().to_string())
                         .collect();
                 }
@@ -1276,7 +1298,7 @@
             ($key_name:ident, opt_list) => ( {
                 let name = (stringify!($key_name)).replace("_", "-");
                 if let Some(v) = obj.find(&name).and_then(Json::as_array) {
-                    base.options.$key_name = Some(v.iter()
+                    base.$key_name = Some(v.iter()
                         .map(|a| a.as_string().unwrap().to_string())
                         .collect());
                 }
@@ -1284,7 +1306,15 @@
             ($key_name:ident, optional) => ( {
                 let name = (stringify!($key_name)).replace("_", "-");
                 if let Some(o) = obj.find(&name[..]) {
-                    base.options.$key_name = o
+                    base.$key_name = o
+                        .as_string()
+                        .map(|s| s.to_string() );
+                }
+            } );
+            ($key_name:ident = $json_name:expr, optional) => ( {
+                let name = $json_name;
+                if let Some(o) = obj.find(&name[..]) {
+                    base.$key_name = o
                         .as_string()
                         .map(|s| s.to_string() );
                 }
@@ -1293,7 +1323,7 @@
                 let name = (stringify!($key_name)).replace("_", "-");
                 obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
                     if let Some(flavor) = LldFlavor::from_str(&s) {
-                        base.options.$key_name = flavor;
+                        base.$key_name = flavor;
                     } else {
                         return Some(Err(format!(
                             "'{}' is not a valid value for lld-flavor. \
@@ -1305,18 +1335,20 @@
             } );
             ($key_name:ident, LinkerFlavor) => ( {
                 let name = (stringify!($key_name)).replace("_", "-");
-                obj.find(&name[..]).and_then(|o| o.as_string().map(|s| {
-                    LinkerFlavor::from_str(&s).ok_or_else(|| {
-                        Err(format!("'{}' is not a valid value for linker-flavor. \
-                                     Use 'em', 'gcc', 'ld' or 'msvc.", s))
-                    })
+                obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
+                    match LinkerFlavor::from_str(s) {
+                        Some(linker_flavor) => base.$key_name = linker_flavor,
+                        _ => return Some(Err(format!("'{}' is not a valid value for linker-flavor. \
+                                                      Use {}", s, LinkerFlavor::one_of()))),
+                    }
+                    Some(Ok(()))
                 })).unwrap_or(Ok(()))
             } );
             ($key_name:ident, crt_objects_fallback) => ( {
                 let name = (stringify!($key_name)).replace("_", "-");
                 obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
                     match s.parse::<CrtObjectsFallback>() {
-                        Ok(fallback) => base.options.$key_name = Some(fallback),
+                        Ok(fallback) => base.$key_name = Some(fallback),
                         _ => return Some(Err(format!("'{}' is not a valid CRT objects fallback. \
                                                       Use 'musl', 'mingw' or 'wasm'", s))),
                     }
@@ -1348,7 +1380,7 @@
 
                         args.insert(kind, v);
                     }
-                    base.options.$key_name = args;
+                    base.$key_name = args;
                 }
             } );
             ($key_name:ident, link_args) => ( {
@@ -1375,7 +1407,7 @@
 
                         args.insert(flavor, v);
                     }
-                    base.options.$key_name = args;
+                    base.$key_name = args;
                 }
             } );
             ($key_name:ident, env) => ( {
@@ -1387,7 +1419,7 @@
                             if p.len() == 2 {
                                 let k = p[0].to_string();
                                 let v = p[1].to_string();
-                                base.options.$key_name.push((k, v));
+                                base.$key_name.push((k, v));
                             }
                         }
                     }
@@ -1396,6 +1428,12 @@
         }
 
         key!(is_builtin, bool);
+        key!(endian = "target_endian");
+        key!(c_int_width = "target_c_int_width");
+        key!(os);
+        key!(env);
+        key!(vendor);
+        key!(linker_flavor, LinkerFlavor)?;
         key!(linker, optional);
         key!(lld_flavor, LldFlavor)?;
         key!(pre_link_objects, link_objects);
@@ -1428,15 +1466,15 @@
         key!(exe_suffix);
         key!(staticlib_prefix);
         key!(staticlib_suffix);
-        key!(target_family, optional);
+        key!(os_family = "target_family", optional);
         key!(abi_return_struct_as_int, bool);
         key!(is_like_osx, bool);
         key!(is_like_solaris, bool);
         key!(is_like_windows, bool);
         key!(is_like_msvc, bool);
         key!(is_like_emscripten, bool);
-        key!(is_like_android, bool);
         key!(is_like_fuchsia, bool);
+        key!(dwarf_version, Option<u32>);
         key!(linker_is_gnu, bool);
         key!(allows_weak_linkage, bool);
         key!(has_rpath, bool);
@@ -1473,12 +1511,13 @@
         key!(limit_rdylib_exports, bool);
         key!(override_export_symbols, opt_list);
         key!(merge_functions, MergeFunctions)?;
-        key!(target_mcount);
+        key!(mcount = "target_mcount");
         key!(llvm_abiname);
         key!(relax_elf_relocations, bool);
         key!(llvm_args, list);
         key!(use_ctors_section, bool);
         key!(eh_frame_header, bool);
+        key!(has_thumb_interworking, bool);
 
         // NB: The old name is deprecated, but support for it is retained for
         // compatibility.
@@ -1495,7 +1534,7 @@
                                 ));
                             }
 
-                            base.options.unsupported_abis.push(abi)
+                            base.unsupported_abis.push(abi)
                         }
                         None => {
                             return Err(format!(
@@ -1531,11 +1570,9 @@
 
         match *target_triple {
             TargetTriple::TargetTriple(ref target_triple) => {
-                // check if triple is in list of supported targets
-                match load_specific(target_triple) {
-                    Ok(t) => return Ok(t),
-                    Err(LoadTargetError::BuiltinTargetNotFound(_)) => (),
-                    Err(LoadTargetError::Other(e)) => return Err(e),
+                // check if triple is in list of built-in targets
+                if let Some(t) = load_builtin(target_triple) {
+                    return Ok(t);
                 }
 
                 // search for a file named `target_triple`.json in RUST_TARGET_PATH
@@ -1586,21 +1623,20 @@
         macro_rules! target_option_val {
             ($attr:ident) => {{
                 let name = (stringify!($attr)).replace("_", "-");
-                if default.$attr != self.options.$attr {
-                    d.insert(name, self.options.$attr.to_json());
+                if default.$attr != self.$attr {
+                    d.insert(name, self.$attr.to_json());
                 }
             }};
             ($attr:ident, $key_name:expr) => {{
                 let name = $key_name;
-                if default.$attr != self.options.$attr {
-                    d.insert(name.to_string(), self.options.$attr.to_json());
+                if default.$attr != self.$attr {
+                    d.insert(name.to_string(), self.$attr.to_json());
                 }
             }};
             (link_args - $attr:ident) => {{
                 let name = (stringify!($attr)).replace("_", "-");
-                if default.$attr != self.options.$attr {
+                if default.$attr != self.$attr {
                     let obj = self
-                        .options
                         .$attr
                         .iter()
                         .map(|(k, v)| (k.desc().to_owned(), v.clone()))
@@ -1610,9 +1646,8 @@
             }};
             (env - $attr:ident) => {{
                 let name = (stringify!($attr)).replace("_", "-");
-                if default.$attr != self.options.$attr {
+                if default.$attr != self.$attr {
                     let obj = self
-                        .options
                         .$attr
                         .iter()
                         .map(|&(ref k, ref v)| k.clone() + "=" + &v)
@@ -1623,17 +1658,17 @@
         }
 
         target_val!(llvm_target);
-        target_val!(target_endian);
-        target_val!(target_pointer_width);
-        target_val!(target_c_int_width);
+        d.insert("target-pointer-width".to_string(), self.pointer_width.to_string().to_json());
         target_val!(arch);
-        target_val!(target_os, "os");
-        target_val!(target_env, "env");
-        target_val!(target_vendor, "vendor");
         target_val!(data_layout);
-        target_val!(linker_flavor);
 
         target_option_val!(is_builtin);
+        target_option_val!(endian, "target_endian");
+        target_option_val!(c_int_width, "target_c_int_width");
+        target_option_val!(os);
+        target_option_val!(env);
+        target_option_val!(vendor);
+        target_option_val!(linker_flavor);
         target_option_val!(linker);
         target_option_val!(lld_flavor);
         target_option_val!(pre_link_objects);
@@ -1666,15 +1701,15 @@
         target_option_val!(exe_suffix);
         target_option_val!(staticlib_prefix);
         target_option_val!(staticlib_suffix);
-        target_option_val!(target_family);
+        target_option_val!(os_family, "target_family");
         target_option_val!(abi_return_struct_as_int);
         target_option_val!(is_like_osx);
         target_option_val!(is_like_solaris);
         target_option_val!(is_like_windows);
         target_option_val!(is_like_msvc);
         target_option_val!(is_like_emscripten);
-        target_option_val!(is_like_android);
         target_option_val!(is_like_fuchsia);
+        target_option_val!(dwarf_version);
         target_option_val!(linker_is_gnu);
         target_option_val!(allows_weak_linkage);
         target_option_val!(has_rpath);
@@ -1711,18 +1746,18 @@
         target_option_val!(limit_rdylib_exports);
         target_option_val!(override_export_symbols);
         target_option_val!(merge_functions);
-        target_option_val!(target_mcount);
+        target_option_val!(mcount, "target_mcount");
         target_option_val!(llvm_abiname);
         target_option_val!(relax_elf_relocations);
         target_option_val!(llvm_args);
         target_option_val!(use_ctors_section);
         target_option_val!(eh_frame_header);
+        target_option_val!(has_thumb_interworking);
 
-        if default.unsupported_abis != self.options.unsupported_abis {
+        if default.unsupported_abis != self.unsupported_abis {
             d.insert(
                 "unsupported-abis".to_string(),
-                self.options
-                    .unsupported_abis
+                self.unsupported_abis
                     .iter()
                     .map(|&name| Abi::name(name).to_json())
                     .collect::<Vec<_>>()
diff --git a/compiler/rustc_target/src/spec/msp430_none_elf.rs b/compiler/rustc_target/src/spec/msp430_none_elf.rs
index f756979..ef966cb 100644
--- a/compiler/rustc_target/src/spec/msp430_none_elf.rs
+++ b/compiler/rustc_target/src/spec/msp430_none_elf.rs
@@ -1,19 +1,15 @@
-use crate::spec::{LinkerFlavor, PanicStrategy, RelocModel, Target, TargetOptions, TargetResult};
+use crate::spec::{PanicStrategy, RelocModel, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "msp430-none-elf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "16".to_string(),
-        target_c_int_width: "16".to_string(),
+        pointer_width: 16,
         data_layout: "e-m:e-p:16:16-i32:16-i64:16-f32:16-f64:16-a:8-n8:16-S16".to_string(),
         arch: "msp430".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
+            c_int_width: "16".to_string(),
+            vendor: String::new(),
             executables: true,
 
             // The LLVM backend currently can't generate object files. To
@@ -60,5 +56,5 @@
 
             ..Default::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/msvc_base.rs b/compiler/rustc_target/src/spec/msvc_base.rs
index f57ef87..8cd6735 100644
--- a/compiler/rustc_target/src/spec/msvc_base.rs
+++ b/compiler/rustc_target/src/spec/msvc_base.rs
@@ -18,6 +18,7 @@
     pre_link_args.insert(LinkerFlavor::Lld(LldFlavor::Link), pre_link_args_msvc);
 
     TargetOptions {
+        linker_flavor: LinkerFlavor::Msvc,
         executables: true,
         is_like_windows: true,
         is_like_msvc: true,
diff --git a/compiler/rustc_target/src/spec/netbsd_base.rs b/compiler/rustc_target/src/spec/netbsd_base.rs
index 988346a..a77d60b 100644
--- a/compiler/rustc_target/src/spec/netbsd_base.rs
+++ b/compiler/rustc_target/src/spec/netbsd_base.rs
@@ -14,9 +14,10 @@
     );
 
     TargetOptions {
+        os: "netbsd".to_string(),
         dynamic_linking: true,
         executables: true,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         linker_is_gnu: true,
         no_default_libraries: false,
         has_rpath: true,
@@ -24,6 +25,7 @@
         position_independent_executables: true,
         relro_level: RelroLevel::Full,
         use_ctors_section: true,
+        dwarf_version: Some(2),
         ..Default::default()
     }
 }
diff --git a/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs b/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs
index 0c8f2a3..3c9c7d5 100644
--- a/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs
+++ b/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs
@@ -1,25 +1,17 @@
 use crate::spec::abi::Abi;
-use crate::spec::{
-    LinkerFlavor, MergeFunctions, PanicStrategy, Target, TargetOptions, TargetResult,
-};
+use crate::spec::{LinkerFlavor, MergeFunctions, PanicStrategy, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         arch: "nvptx64".to_string(),
         data_layout: "e-i64:64-i128:128-v16:16-v32:32-n16:32:64".to_string(),
         llvm_target: "nvptx64-nvidia-cuda".to_string(),
-
-        target_os: "cuda".to_string(),
-        target_vendor: "nvidia".to_string(),
-        target_env: String::new(),
-
-        linker_flavor: LinkerFlavor::PtxLinker,
-
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
 
         options: TargetOptions {
+            os: "cuda".to_string(),
+            vendor: "nvidia".to_string(),
+            linker_flavor: LinkerFlavor::PtxLinker,
             // The linker can be installed from `crates.io`.
             linker: Some("rust-ptx-linker".to_string()),
 
@@ -71,5 +63,5 @@
 
             ..Default::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/openbsd_base.rs b/compiler/rustc_target/src/spec/openbsd_base.rs
index cadd14d..2b40a1e 100644
--- a/compiler/rustc_target/src/spec/openbsd_base.rs
+++ b/compiler/rustc_target/src/spec/openbsd_base.rs
@@ -16,9 +16,10 @@
     );
 
     TargetOptions {
+        os: "openbsd".to_string(),
         dynamic_linking: true,
         executables: true,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         linker_is_gnu: true,
         has_rpath: true,
         abi_return_struct_as_int: true,
@@ -26,6 +27,7 @@
         position_independent_executables: true,
         eliminate_frame_pointer: false, // FIXME 43575
         relro_level: RelroLevel::Full,
+        dwarf_version: Some(2),
         ..Default::default()
     }
 }
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs
index 60c15d6..626865a 100644
--- a/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::freebsd_base::opts();
     base.cpu = "ppc64".to_string();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.max_atomic_width = Some(64);
 
-    Ok(Target {
+    Target {
         llvm_target: "powerpc64-unknown-freebsd".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "E-m:e-i64:64-n32:64".to_string(),
         arch: "powerpc64".to_string(),
-        target_os: "freebsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "_mcount".to_string(), ..base },
-    })
+        options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
index 5306d90..0332281 100644
--- a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
@@ -1,7 +1,7 @@
-use crate::spec::{LinkerFlavor, RelroLevel, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, RelroLevel, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
     base.cpu = "ppc64".to_string();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.max_atomic_width = Some(64);
@@ -10,17 +10,11 @@
     // for now. https://github.com/rust-lang/rust/pull/43170#issuecomment-315411474
     base.relro_level = RelroLevel::Partial;
 
-    Ok(Target {
+    Target {
         llvm_target: "powerpc64-unknown-linux-gnu".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "E-m:e-i64:64-n32:64".to_string(),
         arch: "powerpc64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "_mcount".to_string(), ..base },
-    })
+        options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
index c3b956e..2315397 100644
--- a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
     base.cpu = "ppc64".to_string();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.max_atomic_width = Some(64);
 
-    Ok(Target {
+    Target {
         llvm_target: "powerpc64-unknown-linux-musl".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "E-m:e-i64:64-n32:64".to_string(),
         arch: "powerpc64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "_mcount".to_string(), ..base },
-    })
+        options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
index e00a927..1c83e3e 100644
--- a/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::vxworks_base::opts();
     base.cpu = "ppc64".to_string();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.max_atomic_width = Some(64);
 
-    Ok(Target {
+    Target {
         llvm_target: "powerpc64-unknown-linux-gnu".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "E-m:e-i64:64-n32:64".to_string(),
         arch: "powerpc64".to_string(),
-        target_os: "vxworks".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "wrs".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { ..base },
-    })
+        options: TargetOptions { endian: "big".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
index 9073799..07e0bf8 100644
--- a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
     base.cpu = "ppc64le".to_string();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.max_atomic_width = Some(64);
 
-    Ok(Target {
+    Target {
         llvm_target: "powerpc64le-unknown-linux-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i64:64-n32:64".to_string(),
         arch: "powerpc64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "_mcount".to_string(), ..base },
-    })
+        options: TargetOptions { mcount: "_mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
index 1a1fccf..41c78a5 100644
--- a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
     base.cpu = "ppc64le".to_string();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.max_atomic_width = Some(64);
 
-    Ok(Target {
+    Target {
         llvm_target: "powerpc64le-unknown-linux-musl".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-i64:64-n32:64".to_string(),
         arch: "powerpc64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "_mcount".to_string(), ..base },
-    })
+        options: TargetOptions { mcount: "_mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs
index 2d4c598..3a92712 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs
@@ -1,21 +1,15 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
     base.max_atomic_width = Some(32);
 
-    Ok(Target {
+    Target {
         llvm_target: "powerpc-unknown-linux-gnu".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(),
         arch: "powerpc".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "_mcount".to_string(), ..base },
-    })
+        options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs
index fabc431..105a0b2 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs
@@ -1,21 +1,15 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-mspe".to_string());
     base.max_atomic_width = Some(32);
 
-    Ok(Target {
+    Target {
         llvm_target: "powerpc-unknown-linux-gnuspe".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(),
         arch: "powerpc".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "_mcount".to_string(), ..base },
-    })
+        options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs
index 240cbcb..49d3294 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs
@@ -1,21 +1,15 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
     base.max_atomic_width = Some(32);
 
-    Ok(Target {
+    Target {
         llvm_target: "powerpc-unknown-linux-musl".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(),
         arch: "powerpc".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "_mcount".to_string(), ..base },
-    })
+        options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs b/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs
index 6ca7053..387d6cd 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs
@@ -1,21 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::netbsd_base::opts();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
     base.max_atomic_width = Some(32);
 
-    Ok(Target {
+    Target {
         llvm_target: "powerpc-unknown-netbsd".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(),
         arch: "powerpc".to_string(),
-        target_os: "netbsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "__mcount".to_string(), ..base },
-    })
+        options: TargetOptions {
+            endian: "big".to_string(),
+            mcount: "__mcount".to_string(),
+            ..base
+        },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs
index 2211dc2..20ffa07 100644
--- a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs
@@ -1,22 +1,20 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::vxworks_base::opts();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("--secure-plt".to_string());
     base.max_atomic_width = Some(32);
 
-    Ok(Target {
+    Target {
         llvm_target: "powerpc-unknown-linux-gnu".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(),
         arch: "powerpc".to_string(),
-        target_os: "vxworks".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "wrs".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { features: "+secure-plt".to_string(), ..base },
-    })
+        options: TargetOptions {
+            endian: "big".to_string(),
+            features: "+secure-plt".to_string(),
+            ..base
+        },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs
index b10182c..0e713fc 100644
--- a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs
+++ b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs
@@ -1,26 +1,21 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::vxworks_base::opts();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-mspe".to_string());
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("--secure-plt".to_string());
     base.max_atomic_width = Some(32);
 
-    Ok(Target {
+    Target {
         llvm_target: "powerpc-unknown-linux-gnuspe".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(),
         arch: "powerpc".to_string(),
-        target_os: "vxworks".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "wrs".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
+            endian: "big".to_string(),
             // feature msync would disable instruction 'fsync' which is not supported by fsl_p1p2
             features: "+secure-plt,+msync".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/redox_base.rs b/compiler/rustc_target/src/spec/redox_base.rs
index 18cafe6..5ef70587 100644
--- a/compiler/rustc_target/src/spec/redox_base.rs
+++ b/compiler/rustc_target/src/spec/redox_base.rs
@@ -19,9 +19,11 @@
     );
 
     TargetOptions {
+        os: "redox".to_string(),
+        env: "relibc".to_string(),
         dynamic_linking: true,
         executables: true,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         linker_is_gnu: true,
         has_rpath: true,
         pre_link_args: args,
diff --git a/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_gnu.rs
index 28710c6..cf5e020 100644
--- a/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_gnu.rs
@@ -1,17 +1,11 @@
-use crate::spec::{CodeModel, LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{CodeModel, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "riscv32-unknown-linux-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_env: "gnu".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-i64:64-n32-S128".to_string(),
         arch: "riscv32".to_string(),
-        target_os: "linux".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             unsupported_abis: super::riscv_base::unsupported_abis(),
             code_model: Some(CodeModel::Medium),
@@ -19,7 +13,7 @@
             features: "+m,+a,+f,+d,+c".to_string(),
             llvm_abiname: "ilp32d".to_string(),
             max_atomic_width: Some(32),
-            ..super::linux_base::opts()
+            ..super::linux_gnu_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs
index 5b5e342..a31a08a 100644
--- a/compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs
@@ -1,20 +1,15 @@
 use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         data_layout: "e-m:e-p:32:32-i64:64-n32-S128".to_string(),
         llvm_target: "riscv32".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
+        pointer_width: 32,
         arch: "riscv32".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
+            linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
             linker: Some("rust-lld".to_string()),
             cpu: "generic-rv32".to_string(),
             max_atomic_width: Some(0),
@@ -28,5 +23,5 @@
             eh_frame_header: false,
             ..Default::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs
index 4cef5c4..2ee53fd 100644
--- a/compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs
@@ -1,20 +1,15 @@
 use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         data_layout: "e-m:e-p:32:32-i64:64-n32-S128".to_string(),
         llvm_target: "riscv32".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
+        pointer_width: 32,
         arch: "riscv32".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
+            linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
             linker: Some("rust-lld".to_string()),
             cpu: "generic-rv32".to_string(),
             max_atomic_width: Some(32),
@@ -28,5 +23,5 @@
             eh_frame_header: false,
             ..Default::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs
index 8ad563e..89d760e 100644
--- a/compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs
@@ -1,20 +1,15 @@
 use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         data_layout: "e-m:e-p:32:32-i64:64-n32-S128".to_string(),
         llvm_target: "riscv32".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
+        pointer_width: 32,
         arch: "riscv32".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
+            linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
             linker: Some("rust-lld".to_string()),
             cpu: "generic-rv32".to_string(),
             max_atomic_width: Some(0),
@@ -28,5 +23,5 @@
             eh_frame_header: false,
             ..Default::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs
index f7a93c9..84f2841 100644
--- a/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs
@@ -1,17 +1,11 @@
-use crate::spec::{CodeModel, LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{CodeModel, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "riscv64-unknown-linux-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_env: "gnu".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".to_string(),
         arch: "riscv64".to_string(),
-        target_os: "linux".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions {
             unsupported_abis: super::riscv_base::unsupported_abis(),
             code_model: Some(CodeModel::Medium),
@@ -19,7 +13,7 @@
             features: "+m,+a,+f,+d,+c".to_string(),
             llvm_abiname: "lp64d".to_string(),
             max_atomic_width: Some(64),
-            ..super::linux_base::opts()
+            ..super::linux_gnu_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
index 3aeb3f3..33a785f 100644
--- a/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
@@ -1,20 +1,15 @@
 use crate::spec::{CodeModel, LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".to_string(),
         llvm_target: "riscv64".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
+        pointer_width: 64,
         arch: "riscv64".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
+            linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
             linker: Some("rust-lld".to_string()),
             cpu: "generic-rv64".to_string(),
             max_atomic_width: Some(64),
@@ -29,5 +24,5 @@
             eh_frame_header: false,
             ..Default::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs
index d814496..908367e 100644
--- a/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs
@@ -1,20 +1,15 @@
-use crate::spec::{CodeModel, Target, TargetOptions, TargetResult};
+use crate::spec::{CodeModel, Target, TargetOptions};
 use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".to_string(),
         llvm_target: "riscv64".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
+        pointer_width: 64,
         arch: "riscv64".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
+            linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
             linker: Some("rust-lld".to_string()),
             cpu: "generic-rv64".to_string(),
             max_atomic_width: Some(64),
@@ -29,5 +24,5 @@
             eh_frame_header: false,
             ..Default::default()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs
index f259787..d6e8e6e 100644
--- a/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs
@@ -1,7 +1,8 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
+    base.endian = "big".to_string();
     // z10 is the oldest CPU supported by LLVM
     base.cpu = "z10".to_string();
     // FIXME: The data_layout string below and the ABI implementation in
@@ -11,17 +12,11 @@
     base.max_atomic_width = Some(64);
     base.min_global_align = Some(16);
 
-    Ok(Target {
+    Target {
         llvm_target: "s390x-unknown-linux-gnu".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64".to_string(),
         arch: "s390x".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/solaris_base.rs b/compiler/rustc_target/src/spec/solaris_base.rs
index 3d7f003..33e0cf8 100644
--- a/compiler/rustc_target/src/spec/solaris_base.rs
+++ b/compiler/rustc_target/src/spec/solaris_base.rs
@@ -2,10 +2,12 @@
 
 pub fn opts() -> TargetOptions {
     TargetOptions {
+        os: "solaris".to_string(),
+        vendor: "sun".to_string(),
         dynamic_linking: true,
         executables: true,
         has_rpath: true,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         is_like_solaris: true,
         limit_rdylib_exports: false, // Linker doesn't support this
         eh_frame_header: false,
diff --git a/compiler/rustc_target/src/spec/sparc64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/sparc64_unknown_linux_gnu.rs
index c842b22..e9b5520 100644
--- a/compiler/rustc_target/src/spec/sparc64_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/sparc64_unknown_linux_gnu.rs
@@ -1,21 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
+    base.endian = "big".to_string();
     base.cpu = "v9".to_string();
     base.max_atomic_width = Some(64);
 
-    Ok(Target {
+    Target {
         llvm_target: "sparc64-unknown-linux-gnu".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "E-m:e-i64:64-n32:64-S128".to_string(),
         arch: "sparc64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs
index aad85e8..c8e90f8 100644
--- a/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs
@@ -1,22 +1,20 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::netbsd_base::opts();
     base.cpu = "v9".to_string();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.max_atomic_width = Some(64);
 
-    Ok(Target {
+    Target {
         llvm_target: "sparc64-unknown-netbsd".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "E-m:e-i64:64-n32:64-S128".to_string(),
         arch: "sparc64".to_string(),
-        target_os: "netbsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "__mcount".to_string(), ..base },
-    })
+        options: TargetOptions {
+            endian: "big".to_string(),
+            mcount: "__mcount".to_string(),
+            ..base
+        },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs
index 229e062..630ce61 100644
--- a/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs
@@ -1,22 +1,17 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::openbsd_base::opts();
+    base.endian = "big".to_string();
     base.cpu = "v9".to_string();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.max_atomic_width = Some(64);
 
-    Ok(Target {
+    Target {
         llvm_target: "sparc64-unknown-openbsd".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "E-m:e-i64:64-n32:64-S128".to_string(),
         arch: "sparc64".to_string(),
-        target_os: "openbsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs
index 162cd31..aae186b 100644
--- a/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs
@@ -1,22 +1,17 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
+    base.endian = "big".to_string();
     base.cpu = "v9".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-mv8plus".to_string());
 
-    Ok(Target {
+    Target {
         llvm_target: "sparc-unknown-linux-gnu".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "E-m:e-p:32:32-i64:64-f128:64-n32-S64".to_string(),
         arch: "sparc".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs b/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs
index acc03fd..5f99e0b 100644
--- a/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs
+++ b/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs
@@ -1,27 +1,22 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::solaris_base::opts();
+    base.endian = "big".to_string();
     base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m64".to_string()]);
     // llvm calls this "v9"
     base.cpu = "v9".to_string();
     base.max_atomic_width = Some(64);
 
-    Ok(Target {
+    Target {
         llvm_target: "sparcv9-sun-solaris".to_string(),
-        target_endian: "big".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "E-m:e-i64:64-n32:64-S128".to_string(),
         // Use "sparc64" instead of "sparcv9" here, since the former is already
         // used widely in the source base.  If we ever needed ABI
         // differentiation from the sparc64, we could, but that would probably
         // just be confusing.
         arch: "sparc64".to_string(),
-        target_os: "solaris".to_string(),
-        target_env: String::new(),
-        target_vendor: "sun".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/tests/tests_impl.rs b/compiler/rustc_target/src/spec/tests/tests_impl.rs
index b2c2b82..f348df7 100644
--- a/compiler/rustc_target/src/spec/tests/tests_impl.rs
+++ b/compiler/rustc_target/src/spec/tests/tests_impl.rs
@@ -1,16 +1,9 @@
 use super::super::*;
 
-pub(super) fn test_target(target: TargetResult) {
-    // Grab the TargetResult struct. If we successfully retrieved
-    // a Target, then the test JSON encoding/decoding can run for this
-    // Target on this testing platform (i.e., checking the iOS targets
-    // only on a Mac test platform).
-    if let Ok(original) = target {
-        original.check_consistency();
-        let as_json = original.to_json();
-        let parsed = Target::from_json(as_json).unwrap();
-        assert_eq!(original, parsed);
-    }
+// Test target self-consistency and JSON encoding/decoding roundtrip.
+pub(super) fn test_target(target: Target) {
+    target.check_consistency();
+    assert_eq!(Target::from_json(target.to_json()), Ok(target));
 }
 
 impl Target {
@@ -21,27 +14,27 @@
         assert_eq!(
             self.linker_flavor == LinkerFlavor::Msvc
                 || self.linker_flavor == LinkerFlavor::Lld(LldFlavor::Link),
-            self.options.lld_flavor == LldFlavor::Link,
+            self.lld_flavor == LldFlavor::Link,
         );
         for args in &[
-            &self.options.pre_link_args,
-            &self.options.late_link_args,
-            &self.options.late_link_args_dynamic,
-            &self.options.late_link_args_static,
-            &self.options.post_link_args,
+            &self.pre_link_args,
+            &self.late_link_args,
+            &self.late_link_args_dynamic,
+            &self.late_link_args_static,
+            &self.post_link_args,
         ] {
             assert_eq!(
                 args.get(&LinkerFlavor::Msvc),
                 args.get(&LinkerFlavor::Lld(LldFlavor::Link)),
             );
             if args.contains_key(&LinkerFlavor::Msvc) {
-                assert_eq!(self.options.lld_flavor, LldFlavor::Link);
+                assert_eq!(self.lld_flavor, LldFlavor::Link);
             }
         }
         assert!(
-            (self.options.pre_link_objects_fallback.is_empty()
-                && self.options.post_link_objects_fallback.is_empty())
-                || self.options.crt_objects_fallback.is_some()
+            (self.pre_link_objects_fallback.is_empty()
+                && self.post_link_objects_fallback.is_empty())
+                || self.crt_objects_fallback.is_some()
         );
     }
 }
diff --git a/compiler/rustc_target/src/spec/thumb_base.rs b/compiler/rustc_target/src/spec/thumb_base.rs
index 2f7d15d..e550467 100644
--- a/compiler/rustc_target/src/spec/thumb_base.rs
+++ b/compiler/rustc_target/src/spec/thumb_base.rs
@@ -27,11 +27,13 @@
 // differentiate these targets from our other `arm(v7)-*-*-gnueabi(hf)` targets in the context of
 // build scripts / gcc flags.
 
-use crate::spec::{PanicStrategy, RelocModel, TargetOptions};
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, TargetOptions};
 
 pub fn opts() -> TargetOptions {
     // See rust-lang/rfcs#1645 for a discussion about these defaults
     TargetOptions {
+        vendor: String::new(),
+        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         executables: true,
         // In most cases, LLD is good enough
         linker: Some("rust-lld".to_string()),
diff --git a/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs
index a8c78f0..d87c06d 100644
--- a/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs
@@ -8,17 +8,12 @@
 //!
 //! **Important:** This target profile **does not** specify a linker script. You just get the default link script when you build a binary for this target. The default link script is very likely wrong, so you should use `-Clink-arg=-Tmy_script.ld` to override that with a correct linker script.
 
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "thumbv4t-none-eabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "none".to_string(),
-        target_env: "".to_string(),
-        target_vendor: "".to_string(),
+        pointer_width: 32,
         arch: "arm".to_string(),
         /* Data layout args are '-' separated:
          * little endian
@@ -30,8 +25,8 @@
          * All other elements are default
          */
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
-        linker_flavor: LinkerFlavor::Ld,
         options: TargetOptions {
+            linker_flavor: LinkerFlavor::Ld,
             linker: Some("arm-none-eabi-ld".to_string()),
             linker_is_gnu: true,
 
@@ -55,8 +50,9 @@
 
             // don't have atomic compare-and-swap
             atomic_cas: false,
+            has_thumb_interworking: true,
 
             ..super::thumb_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs
index 953d60f..11c8bf4 100644
--- a/compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs
@@ -1,19 +1,13 @@
 // Targets the Cortex-M0, Cortex-M0+ and Cortex-M1 processors (ARMv6-M architecture)
 
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "thumbv6m-none-eabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
             // The ARMv6-M architecture doesn't support unaligned loads/stores so we disable them
@@ -24,5 +18,5 @@
             atomic_cas: false,
             ..super::thumb_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs
index 3782802..8131a6e 100644
--- a/compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::windows_msvc_base::opts();
 
     // Prevent error LNK2013: BRANCH24(T) fixup overflow
@@ -21,17 +21,11 @@
     // implemented for windows/arm in LLVM
     base.panic_strategy = PanicStrategy::Abort;
 
-    Ok(Target {
+    Target {
         llvm_target: "thumbv7a-pc-windows-msvc".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:w-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "windows".to_string(),
-        target_env: "msvc".to_string(),
-        target_vendor: "pc".to_string(),
-        linker_flavor: LinkerFlavor::Msvc,
 
         options: TargetOptions {
             features: "+vfp3,+neon".to_string(),
@@ -40,5 +34,5 @@
             unsupported_abis: super::arm_base::unsupported_abis(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/thumbv7a_uwp_windows_msvc.rs b/compiler/rustc_target/src/spec/thumbv7a_uwp_windows_msvc.rs
index 29a4a98..a2c1b6b 100644
--- a/compiler/rustc_target/src/spec/thumbv7a_uwp_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/thumbv7a_uwp_windows_msvc.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, PanicStrategy, Target, TargetOptions, TargetResult};
+use crate::spec::{PanicStrategy, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::windows_uwp_msvc_base::opts();
     base.max_atomic_width = Some(64);
     base.has_elf_tls = true;
@@ -9,22 +9,16 @@
     // implemented for windows/arm in LLVM
     base.panic_strategy = PanicStrategy::Abort;
 
-    Ok(Target {
+    Target {
         llvm_target: "thumbv7a-pc-windows-msvc".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:w-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "windows".to_string(),
-        target_env: "msvc".to_string(),
-        target_vendor: "uwp".to_string(),
-        linker_flavor: LinkerFlavor::Msvc,
         options: TargetOptions {
             features: "+vfp3,+neon".to_string(),
             cpu: "generic".to_string(),
             unsupported_abis: super::arm_base::unsupported_abis(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/thumbv7em_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv7em_none_eabi.rs
index 9e08538..141eb7e 100644
--- a/compiler/rustc_target/src/spec/thumbv7em_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/thumbv7em_none_eabi.rs
@@ -9,21 +9,15 @@
 // To opt-in to hardware accelerated floating point operations, you can use, for example,
 // `-C target-feature=+vfp4` or `-C target-cpu=cortex-m4`.
 
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "thumbv7em-none-eabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions { max_atomic_width: Some(32), ..super::thumb_base::opts() },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/thumbv7em_none_eabihf.rs b/compiler/rustc_target/src/spec/thumbv7em_none_eabihf.rs
index 95b9b9d..f5bd054 100644
--- a/compiler/rustc_target/src/spec/thumbv7em_none_eabihf.rs
+++ b/compiler/rustc_target/src/spec/thumbv7em_none_eabihf.rs
@@ -8,20 +8,14 @@
 //
 // To opt into double precision hardware support, use the `-C target-feature=+fp64` flag.
 
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "thumbv7em-none-eabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
             // `+vfp4` is the lowest common denominator between the Cortex-M4 (vfp4-16) and the
@@ -37,5 +31,5 @@
             max_atomic_width: Some(32),
             ..super::thumb_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/thumbv7m_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv7m_none_eabi.rs
index 528359f..7af28cd 100644
--- a/compiler/rustc_target/src/spec/thumbv7m_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/thumbv7m_none_eabi.rs
@@ -1,20 +1,14 @@
 // Targets the Cortex-M3 processor (ARMv7-M)
 
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "thumbv7m-none-eabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions { max_atomic_width: Some(32), ..super::thumb_base::opts() },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs b/compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs
index c52f077..41fdbc2 100644
--- a/compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs
+++ b/compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs
@@ -1,4 +1,4 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
 // This target if is for the Android v7a ABI in thumb mode with
 // NEON unconditionally enabled and, therefore, with 32 FPU registers
@@ -8,23 +8,17 @@
 // See https://developer.android.com/ndk/guides/abis.html#v7a
 // for target ABI requirements.
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::android_base::opts();
     base.features = "+v7,+thumb-mode,+thumb2,+vfp3,+neon".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-march=armv7-a".to_string());
 
-    Ok(Target {
+    Target {
         llvm_target: "armv7-none-linux-android".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "android".to_string(),
-        target_env: "".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions { unsupported_abis: super::arm_base::unsupported_abis(), ..base },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_gnueabihf.rs b/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_gnueabihf.rs
index 7893694..352d246 100644
--- a/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_gnueabihf.rs
+++ b/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_gnueabihf.rs
@@ -1,4 +1,4 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
 // This target is for glibc Linux on ARMv7 with thumb mode enabled
 // (for consistency with Android and Debian-based distributions)
@@ -6,19 +6,13 @@
 // registers enabled as well. See section A2.6.2 on page A2-56 in
 // https://static.docs.arm.com/ddi0406/cd/DDI0406C_d_armv7ar_arm.pdf
 
-pub fn target() -> TargetResult {
-    let base = super::linux_base::opts();
-    Ok(Target {
+pub fn target() -> Target {
+    let base = super::linux_gnu_base::opts();
+    Target {
         llvm_target: "armv7-unknown-linux-gnueabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: TargetOptions {
             // Info about features at https://wiki.debian.org/ArmHardFloatPort
@@ -28,5 +22,5 @@
             unsupported_abis: super::arm_base::unsupported_abis(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_musleabihf.rs b/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_musleabihf.rs
index f759c3e..a788167 100644
--- a/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_musleabihf.rs
+++ b/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_musleabihf.rs
@@ -1,4 +1,4 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
 // This target is for musl Linux on ARMv7 with thumb mode enabled
 // (for consistency with Android and Debian-based distributions)
@@ -6,22 +6,16 @@
 // registers enabled as well. See section A2.6.2 on page A2-56 in
 // https://static.docs.arm.com/ddi0406/cd/DDI0406C_d_armv7ar_arm.pdf
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let base = super::linux_musl_base::opts();
-    Ok(Target {
+    Target {
         // It's important we use "gnueabihf" and not "musleabihf" here. LLVM
         // uses it to determine the calling convention and float ABI, and LLVM
         // doesn't support the "musleabihf" value.
         llvm_target: "armv7-unknown-linux-gnueabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         // Most of these settings are copied from the thumbv7neon_unknown_linux_gnueabihf
         // target.
@@ -30,8 +24,8 @@
             cpu: "generic".to_string(),
             max_atomic_width: Some(64),
             unsupported_abis: super::arm_base::unsupported_abis(),
-            target_mcount: "\u{1}mcount".to_string(),
+            mcount: "\u{1}mcount".to_string(),
             ..base
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/thumbv8m_base_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv8m_base_none_eabi.rs
index 3f67c67..a2200bc 100644
--- a/compiler/rustc_target/src/spec/thumbv8m_base_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/thumbv8m_base_none_eabi.rs
@@ -1,19 +1,13 @@
 // Targets the Cortex-M23 processor (Baseline ARMv8-M)
 
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "thumbv8m.base-none-eabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
             // ARMv8-M baseline doesn't support unaligned loads/stores so we disable them
@@ -22,5 +16,5 @@
             max_atomic_width: Some(32),
             ..super::thumb_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/thumbv8m_main_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv8m_main_none_eabi.rs
index 2f8103f..67cdbab 100644
--- a/compiler/rustc_target/src/spec/thumbv8m_main_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/thumbv8m_main_none_eabi.rs
@@ -1,21 +1,15 @@
 // Targets the Cortex-M33 processor (Armv8-M Mainline architecture profile),
 // without the Floating Point extension.
 
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "thumbv8m.main-none-eabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions { max_atomic_width: Some(32), ..super::thumb_base::opts() },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/thumbv8m_main_none_eabihf.rs b/compiler/rustc_target/src/spec/thumbv8m_main_none_eabihf.rs
index 53a3402..49748f5 100644
--- a/compiler/rustc_target/src/spec/thumbv8m_main_none_eabihf.rs
+++ b/compiler/rustc_target/src/spec/thumbv8m_main_none_eabihf.rs
@@ -1,20 +1,14 @@
 // Targets the Cortex-M33 processor (Armv8-M Mainline architecture profile),
 // with the Floating Point extension.
 
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    Ok(Target {
+pub fn target() -> Target {
+    Target {
         llvm_target: "thumbv8m.main-none-eabihf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
-        target_os: "none".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
 
         options: TargetOptions {
             // If the Floating Point extension is implemented in the Cortex-M33
@@ -26,5 +20,5 @@
             max_atomic_width: Some(32),
             ..super::thumb_base::opts()
         },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/uefi_msvc_base.rs b/compiler/rustc_target/src/spec/uefi_msvc_base.rs
index 3f7c78c..79fe774 100644
--- a/compiler/rustc_target/src/spec/uefi_msvc_base.rs
+++ b/compiler/rustc_target/src/spec/uefi_msvc_base.rs
@@ -37,6 +37,8 @@
         .extend(pre_link_args_msvc);
 
     TargetOptions {
+        os: "uefi".to_string(),
+        linker_flavor: LinkerFlavor::Lld(LldFlavor::Link),
         disable_redzone: true,
         exe_suffix: ".efi".to_string(),
         allows_weak_linkage: false,
diff --git a/compiler/rustc_target/src/spec/vxworks_base.rs b/compiler/rustc_target/src/spec/vxworks_base.rs
index 777bb58..70bc9ce 100644
--- a/compiler/rustc_target/src/spec/vxworks_base.rs
+++ b/compiler/rustc_target/src/spec/vxworks_base.rs
@@ -17,11 +17,14 @@
     );
 
     TargetOptions {
+        os: "vxworks".to_string(),
+        env: "gnu".to_string(),
+        vendor: "wrs".to_string(),
         linker: Some("wr-c++".to_string()),
         exe_suffix: ".vxe".to_string(),
         dynamic_linking: true,
         executables: true,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         linker_is_gnu: true,
         has_rpath: true,
         pre_link_args: args,
@@ -31,7 +34,7 @@
         crt_static_respected: true,
         crt_static_allows_dylibs: true,
         // VxWorks needs to implement this to support profiling
-        target_mcount: "_mcount".to_string(),
+        mcount: "_mcount".to_string(),
         ..Default::default()
     }
 }
diff --git a/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs b/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs
index 1916639..c12757b 100644
--- a/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs
+++ b/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs
@@ -1,7 +1,7 @@
 use super::wasm32_base;
 use super::{LinkArgs, LinkerFlavor, PanicStrategy, Target, TargetOptions};
 
-pub fn target() -> Result<Target, String> {
+pub fn target() -> Target {
     let mut post_link_args = LinkArgs::new();
     post_link_args.insert(
         LinkerFlavor::Em,
@@ -17,6 +17,8 @@
     );
 
     let opts = TargetOptions {
+        os: "emscripten".to_string(),
+        linker_flavor: LinkerFlavor::Em,
         // emcc emits two files - a .js file to instantiate the wasm and supply platform
         // functionality, and a .wasm file.
         exe_suffix: ".js".to_string(),
@@ -25,20 +27,14 @@
         is_like_emscripten: true,
         panic_strategy: PanicStrategy::Unwind,
         post_link_args,
-        target_family: Some("unix".to_string()),
+        os_family: Some("unix".to_string()),
         ..wasm32_base::options()
     };
-    Ok(Target {
+    Target {
         llvm_target: "wasm32-unknown-emscripten".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "emscripten".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(),
         arch: "wasm32".to_string(),
-        linker_flavor: LinkerFlavor::Em,
         options: opts,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs b/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs
index ded95a3..6037aa5 100644
--- a/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs
+++ b/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs
@@ -8,13 +8,15 @@
 //! (e.g. trying to create a TCP stream or something like that).
 //!
 //! This target is more or less managed by the Rust and WebAssembly Working
-//! Group nowadays at https://github.com/rustwasm.
+//! Group nowadays at <https://github.com/rustwasm>.
 
 use super::wasm32_base;
 use super::{LinkerFlavor, LldFlavor, Target};
 
-pub fn target() -> Result<Target, String> {
+pub fn target() -> Target {
     let mut options = wasm32_base::options();
+    options.os = "unknown".to_string();
+    options.linker_flavor = LinkerFlavor::Lld(LldFlavor::Wasm);
     let clang_args = options.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap();
 
     // Make sure clang uses LLD as its linker and is configured appropriately
@@ -30,17 +32,11 @@
         .unwrap()
         .push("--no-entry".to_string());
 
-    Ok(Target {
+    Target {
         llvm_target: "wasm32-unknown-unknown".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "unknown".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(),
         arch: "wasm32".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Wasm),
         options,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/wasm32_wasi.rs b/compiler/rustc_target/src/spec/wasm32_wasi.rs
index 3511671..9c69767 100644
--- a/compiler/rustc_target/src/spec/wasm32_wasi.rs
+++ b/compiler/rustc_target/src/spec/wasm32_wasi.rs
@@ -7,7 +7,7 @@
 //! intended to empower WebAssembly binaries with native capabilities such as
 //! filesystem access, network access, etc.
 //!
-//! You can see more about the proposal at https://wasi.dev
+//! You can see more about the proposal at <https://wasi.dev>.
 //!
 //! The Rust target definition here is interesting in a few ways. We want to
 //! serve two use cases here with this target:
@@ -75,9 +75,12 @@
 use super::wasm32_base;
 use super::{crt_objects, LinkerFlavor, LldFlavor, Target};
 
-pub fn target() -> Result<Target, String> {
+pub fn target() -> Target {
     let mut options = wasm32_base::options();
 
+    options.os = "wasi".to_string();
+    options.vendor = String::new();
+    options.linker_flavor = LinkerFlavor::Lld(LldFlavor::Wasm);
     options
         .pre_link_args
         .entry(LinkerFlavor::Gcc)
@@ -104,17 +107,11 @@
     // `args::args()` makes the WASI API calls itself.
     options.main_needs_argc_argv = false;
 
-    Ok(Target {
+    Target {
         llvm_target: "wasm32-wasi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
-        target_os: "wasi".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(),
         arch: "wasm32".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Wasm),
         options,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/windows_gnu_base.rs b/compiler/rustc_target/src/spec/windows_gnu_base.rs
index 0234ff5..4ae940e 100644
--- a/compiler/rustc_target/src/spec/windows_gnu_base.rs
+++ b/compiler/rustc_target/src/spec/windows_gnu_base.rs
@@ -58,6 +58,9 @@
     late_link_args_static.insert(LinkerFlavor::Lld(LldFlavor::Ld), static_unwind_libs);
 
     TargetOptions {
+        os: "windows".to_string(),
+        env: "gnu".to_string(),
+        vendor: "pc".to_string(),
         // FIXME(#13846) this should be enabled for windows
         function_sections: false,
         linker: Some("gcc".to_string()),
@@ -68,7 +71,7 @@
         exe_suffix: ".exe".to_string(),
         staticlib_prefix: "lib".to_string(),
         staticlib_suffix: ".a".to_string(),
-        target_family: Some("windows".to_string()),
+        os_family: Some("windows".to_string()),
         is_like_windows: true,
         allows_weak_linkage: false,
         pre_link_args,
diff --git a/compiler/rustc_target/src/spec/windows_msvc_base.rs b/compiler/rustc_target/src/spec/windows_msvc_base.rs
index 77171f8..c041245 100644
--- a/compiler/rustc_target/src/spec/windows_msvc_base.rs
+++ b/compiler/rustc_target/src/spec/windows_msvc_base.rs
@@ -4,13 +4,16 @@
     let base = super::msvc_base::opts();
 
     TargetOptions {
+        os: "windows".to_string(),
+        env: "msvc".to_string(),
+        vendor: "pc".to_string(),
         dynamic_linking: true,
         dll_prefix: String::new(),
         dll_suffix: ".dll".to_string(),
         exe_suffix: ".exe".to_string(),
         staticlib_prefix: String::new(),
         staticlib_suffix: ".lib".to_string(),
-        target_family: Some("windows".to_string()),
+        os_family: Some("windows".to_string()),
         crt_static_allows_dylibs: true,
         crt_static_respected: true,
         requires_uwtable: true,
diff --git a/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs b/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs
index fcb2af0..67d1be3 100644
--- a/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs
+++ b/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs
@@ -25,6 +25,7 @@
     late_link_args.insert(LinkerFlavor::Lld(LldFlavor::Ld), mingw_libs);
 
     TargetOptions {
+        vendor: "uwp".to_string(),
         executables: false,
         limit_rdylib_exports: false,
         late_link_args,
diff --git a/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs b/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs
index 04ffa1a..700ee5e 100644
--- a/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs
+++ b/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs
@@ -3,6 +3,7 @@
 pub fn opts() -> TargetOptions {
     let mut opts = super::windows_msvc_base::opts();
 
+    opts.vendor = "uwp".to_string();
     let pre_link_args_msvc = vec!["/APPCONTAINER".to_string(), "mincore.lib".to_string()];
     opts.pre_link_args.get_mut(&LinkerFlavor::Msvc).unwrap().extend(pre_link_args_msvc.clone());
     opts.pre_link_args
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
index 909aebe..edb33fe 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
@@ -1,7 +1,7 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let mut base = super::apple_base::opts();
+pub fn target() -> Target {
+    let mut base = super::apple_base::opts("macos");
     base.cpu = "core2".to_string();
     base.max_atomic_width = Some(128); // core2 support cmpxchg16b
     base.eliminate_frame_pointer = false;
@@ -18,18 +18,12 @@
     let arch = "x86_64";
     let llvm_target = super::apple_base::macos_llvm_target(&arch);
 
-    Ok(Target {
+    Target {
         llvm_target,
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: arch.to_string(),
-        target_os: "macos".to_string(),
-        target_env: String::new(),
-        target_vendor: "apple".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "\u{1}mcount".to_string(), ..base },
-    })
+        options: TargetOptions { mcount: "\u{1}mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_ios.rs b/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
index fd3e4e2..c9c7eeb 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
@@ -1,20 +1,14 @@
 use super::apple_sdk_base::{opts, Arch};
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let base = opts(Arch::X86_64);
-    Ok(Target {
+pub fn target() -> Target {
+    let base = opts("ios", Arch::X86_64);
+    Target {
         llvm_target: "x86_64-apple-ios".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "ios".to_string(),
-        target_env: String::new(),
-        target_vendor: "apple".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions { max_atomic_width: Some(64), stack_probes: true, ..base },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs b/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
index 4cfbd9e..6b360e5 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
@@ -1,20 +1,14 @@
 use super::apple_sdk_base::{opts, Arch};
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let base = opts(Arch::X86_64_macabi);
-    Ok(Target {
+pub fn target() -> Target {
+    let base = opts("ios", Arch::X86_64_macabi);
+    Target {
         llvm_target: "x86_64-apple-ios13.0-macabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "ios".to_string(),
-        target_env: String::new(),
-        target_vendor: "apple".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions { max_atomic_width: Some(64), stack_probes: true, ..base },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs b/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
index 664a3ed..5b2a62a 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
@@ -1,19 +1,13 @@
 use super::apple_sdk_base::{opts, Arch};
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{Target, TargetOptions};
 
-pub fn target() -> TargetResult {
-    let base = opts(Arch::X86_64);
-    Ok(Target {
+pub fn target() -> Target {
+    let base = opts("tvos", Arch::X86_64);
+    Target {
         llvm_target: "x86_64-apple-tvos".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
-        target_os: "tvos".to_string(),
-        target_env: String::new(),
-        target_vendor: "apple".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: TargetOptions { max_atomic_width: Some(64), stack_probes: true, ..base },
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs b/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs
index 3b5233a..74fb6f0 100644
--- a/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs
+++ b/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs
@@ -2,7 +2,7 @@
 
 use super::{LinkerFlavor, LldFlavor, PanicStrategy, Target, TargetOptions};
 
-pub fn target() -> Result<Target, String> {
+pub fn target() -> Target {
     const PRE_LINK_ARGS: &[&str] = &[
         "--as-needed",
         "-z",
@@ -55,6 +55,10 @@
         "TEXT_SIZE",
     ];
     let opts = TargetOptions {
+        os: "unknown".into(),
+        env: "sgx".into(),
+        vendor: "fortanix".into(),
+        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         dynamic_linking: false,
         executables: true,
         linker_is_gnu: true,
@@ -74,18 +78,12 @@
         relax_elf_relocations: true,
         ..Default::default()
     };
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-elf".into(),
-        target_endian: "little".into(),
-        target_pointer_width: "64".into(),
-        target_c_int_width: "32".into(),
-        target_os: "unknown".into(),
-        target_env: "sgx".into(),
-        target_vendor: "fortanix".into(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .into(),
         arch: "x86_64".into(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         options: opts,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_fuchsia.rs b/compiler/rustc_target/src/spec/x86_64_fuchsia.rs
index 37b6d57..6c049c2 100644
--- a/compiler/rustc_target/src/spec/x86_64_fuchsia.rs
+++ b/compiler/rustc_target/src/spec/x86_64_fuchsia.rs
@@ -1,23 +1,17 @@
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::fuchsia_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-fuchsia".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "fuchsia".to_string(),
-        target_env: String::new(),
-        target_vendor: String::new(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_linux_android.rs b/compiler/rustc_target/src/spec/x86_64_linux_android.rs
index 74097f5..2732716 100644
--- a/compiler/rustc_target/src/spec/x86_64_linux_android.rs
+++ b/compiler/rustc_target/src/spec/x86_64_linux_android.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::android_base::opts();
     base.cpu = "x86-64".to_string();
     // https://developer.android.com/ndk/guides/abis.html#86-64
@@ -9,18 +9,12 @@
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-linux-android".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "android".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_linux_kernel.rs b/compiler/rustc_target/src/spec/x86_64_linux_kernel.rs
index 65bb97d..43e683d 100644
--- a/compiler/rustc_target/src/spec/x86_64_linux_kernel.rs
+++ b/compiler/rustc_target/src/spec/x86_64_linux_kernel.rs
@@ -1,9 +1,9 @@
 // This defines the amd64 target for the Linux Kernel. See the linux-kernel-base module for
 // generic Linux kernel options.
 
-use crate::spec::{CodeModel, LinkerFlavor, Target, TargetResult};
+use crate::spec::{CodeModel, LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_kernel_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
@@ -13,20 +13,14 @@
     base.code_model = Some(CodeModel::Kernel);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
 
-    Ok(Target {
+    Target {
         // FIXME: Some dispute, the linux-on-clang folks think this should use "Linux"
         llvm_target: "x86_64-elf".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
-        target_os: "none".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
         arch: "x86_64".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
 
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs b/compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs
index 99af483..e8dee94 100644
--- a/compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs
+++ b/compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, LldFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::windows_gnu_base::opts();
     base.cpu = "x86-64".to_string();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
@@ -9,18 +9,12 @@
     base.max_atomic_width = Some(64);
     base.linker = Some("x86_64-w64-mingw32-gcc".to_string());
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-pc-windows-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "windows".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "pc".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/x86_64_pc_windows_msvc.rs
index 75ff6b9..72bbb10 100644
--- a/compiler/rustc_target/src/spec/x86_64_pc_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/x86_64_pc_windows_msvc.rs
@@ -1,23 +1,17 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::windows_msvc_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
     base.has_elf_tls = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-pc-windows-msvc".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "windows".to_string(),
-        target_env: "msvc".to_string(),
-        target_vendor: "pc".to_string(),
-        linker_flavor: LinkerFlavor::Msvc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_rumprun_netbsd.rs b/compiler/rustc_target/src/spec/x86_64_rumprun_netbsd.rs
index fbade02..095c6f1 100644
--- a/compiler/rustc_target/src/spec/x86_64_rumprun_netbsd.rs
+++ b/compiler/rustc_target/src/spec/x86_64_rumprun_netbsd.rs
@@ -1,7 +1,8 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::netbsd_base::opts();
+    base.vendor = "rumprun".to_string();
     base.cpu = "x86-64".to_string();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.linker = Some("x86_64-rumprun-netbsd-gcc".to_string());
@@ -13,18 +14,12 @@
     base.disable_redzone = true;
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-rumprun-netbsd".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "netbsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "rumprun".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "__mcount".to_string(), ..base },
-    })
+        options: TargetOptions { mcount: "__mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs b/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs
index 53f4df9..6ccf784 100644
--- a/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs
+++ b/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs
@@ -1,24 +1,18 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::solaris_base::opts();
     base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m64".to_string()]);
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-pc-solaris".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "solaris".to_string(),
-        target_env: String::new(),
-        target_vendor: "sun".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_cloudabi.rs b/compiler/rustc_target/src/spec/x86_64_unknown_cloudabi.rs
index dbc5f96..cf57f4e 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_cloudabi.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_cloudabi.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::cloudabi_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
@@ -8,18 +8,12 @@
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-cloudabi".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "cloudabi".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs b/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs
index fd1871b..30aa290 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs
@@ -1,24 +1,18 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::dragonfly_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-dragonfly".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "dragonfly".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs
index a124f58..ee904d7 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs
@@ -1,24 +1,18 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::freebsd_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-freebsd".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "freebsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs b/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs
index 5123769..ea7e068 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::haiku_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
@@ -9,18 +9,12 @@
     // This option is required to build executables on Haiku x86_64
     base.position_independent_executables = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-haiku".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "haiku".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs b/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
index 4a526f9..4005aaf 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
@@ -1,24 +1,18 @@
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::hermit_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
     base.features = "+rdrnd,+rdseed".to_string();
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-hermit".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "hermit".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_hermit_kernel.rs b/compiler/rustc_target/src/spec/x86_64_unknown_hermit_kernel.rs
index c25cd08..b72d529 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_hermit_kernel.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_hermit_kernel.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::hermit_kernel_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
@@ -9,18 +9,12 @@
             .to_string();
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-hermit".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "hermit".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs b/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs
index 2567ca4..d3f9349 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs
@@ -1,25 +1,19 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::illumos_base::opts();
     base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m64".to_string(), "-std=c99".to_string()]);
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
 
-    Ok(Target {
+    Target {
         // LLVM does not currently have a separate illumos target,
         // so we still pass Solaris to it
         llvm_target: "x86_64-pc-solaris".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "illumos".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs b/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs
index cab19f1..1fbd0bb 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs
@@ -1,22 +1,16 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::l4re_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-l4re-uclibc".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "l4re".to_string(),
-        target_env: "uclibc".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Ld,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs
index 29cbb77..f127dd4 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs
@@ -1,24 +1,18 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-linux-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs
index 0a37399..0cae575 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs
@@ -1,7 +1,7 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
-    let mut base = super::linux_base::opts();
+pub fn target() -> Target {
+    let mut base = super::linux_gnu_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-mx32".to_string());
@@ -11,19 +11,13 @@
     // breaks code gen. See LLVM bug 36743
     base.needs_plt = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-linux-gnux32".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 32,
         data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
             i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs
index 3a22290..3669c10 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::linux_musl_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
@@ -8,18 +8,12 @@
     base.stack_probes = true;
     base.static_position_independent_executables = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-linux-musl".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "musl".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs
index adf09c8..7e91a6d 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs
@@ -1,24 +1,18 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::netbsd_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-netbsd".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "netbsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
-        options: TargetOptions { target_mcount: "__mcount".to_string(), ..base },
-    })
+        options: TargetOptions { mcount: "__mcount".to_string(), ..base },
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs
index dbd163d..0fe01f0 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs
@@ -1,24 +1,18 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::openbsd_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-openbsd".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "openbsd".to_string(),
-        target_env: String::new(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs b/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs
index 3d40baf..cdd445b 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs
@@ -1,24 +1,18 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::redox_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
     base.stack_probes = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-redox".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "redox".to_string(),
-        target_env: "relibc".to_string(),
-        target_vendor: "unknown".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs b/compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs
index 849227a..b7dcce5 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs
@@ -5,9 +5,9 @@
 // The win64 ABI is used. It differs from the sysv64 ABI, so we must use a windows target with
 // LLVM. "x86_64-unknown-windows" is used to get the minimal subset of windows-specific features.
 
-use crate::spec::{CodeModel, LinkerFlavor, LldFlavor, Target, TargetResult};
+use crate::spec::{CodeModel, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::uefi_msvc_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
@@ -28,19 +28,13 @@
     // places no locality-restrictions, so it fits well here.
     base.code_model = Some(CodeModel::Large);
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-windows".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
-        target_os: "uefi".to_string(),
-        target_env: "".to_string(),
-        target_vendor: "unknown".to_string(),
         arch: "x86_64".to_string(),
-        linker_flavor: LinkerFlavor::Lld(LldFlavor::Link),
 
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs b/compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs
index 3bd18f2..e2ba553 100644
--- a/compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs
+++ b/compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, LldFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, LldFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::windows_uwp_gnu_base::opts();
     base.cpu = "x86-64".to_string();
     base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
@@ -8,18 +8,12 @@
         .insert(LinkerFlavor::Lld(LldFlavor::Ld), vec!["-m".to_string(), "i386pep".to_string()]);
     base.max_atomic_width = Some(64);
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-pc-windows-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "windows".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "uwp".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_uwp_windows_msvc.rs b/compiler/rustc_target/src/spec/x86_64_uwp_windows_msvc.rs
index 258df01..27c579e 100644
--- a/compiler/rustc_target/src/spec/x86_64_uwp_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/x86_64_uwp_windows_msvc.rs
@@ -1,23 +1,17 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::Target;
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::windows_uwp_msvc_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
     base.has_elf_tls = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-pc-windows-msvc".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "windows".to_string(),
-        target_env: "msvc".to_string(),
-        target_vendor: "uwp".to_string(),
-        linker_flavor: LinkerFlavor::Msvc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs
index f1e27f4..163af6f 100644
--- a/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs
@@ -1,6 +1,6 @@
-use crate::spec::{LinkerFlavor, Target, TargetResult};
+use crate::spec::{LinkerFlavor, Target};
 
-pub fn target() -> TargetResult {
+pub fn target() -> Target {
     let mut base = super::vxworks_base::opts();
     base.cpu = "x86-64".to_string();
     base.max_atomic_width = Some(64);
@@ -8,18 +8,12 @@
     base.stack_probes = true;
     base.disable_redzone = true;
 
-    Ok(Target {
+    Target {
         llvm_target: "x86_64-unknown-linux-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "64".to_string(),
-        target_c_int_width: "32".to_string(),
+        pointer_width: 64,
         data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
             .to_string(),
         arch: "x86_64".to_string(),
-        target_os: "vxworks".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "wrs".to_string(),
-        linker_flavor: LinkerFlavor::Gcc,
         options: base,
-    })
+    }
 }
diff --git a/compiler/rustc_trait_selection/src/lib.rs b/compiler/rustc_trait_selection/src/lib.rs
index ddeab34..42509cd 100644
--- a/compiler/rustc_trait_selection/src/lib.rs
+++ b/compiler/rustc_trait_selection/src/lib.rs
@@ -11,6 +11,7 @@
 //! This API is completely unstable and subject to change.
 
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(array_value_iter)]
 #![feature(bool_to_option)]
 #![feature(box_patterns)]
 #![feature(drain_filter)]
@@ -18,6 +19,7 @@
 #![feature(never_type)]
 #![feature(crate_visibility_modifier)]
 #![feature(or_patterns)]
+#![feature(control_flow_enum)]
 #![recursion_limit = "512"] // For rustdoc
 
 #[macro_use]
diff --git a/compiler/rustc_trait_selection/src/opaque_types.rs b/compiler/rustc_trait_selection/src/opaque_types.rs
index 28697ec..914fa1e 100644
--- a/compiler/rustc_trait_selection/src/opaque_types.rs
+++ b/compiler/rustc_trait_selection/src/opaque_types.rs
@@ -10,11 +10,13 @@
 use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
 use rustc_infer::infer::{self, InferCtxt, InferOk};
 use rustc_middle::ty::fold::{BottomUpFolder, TypeFoldable, TypeFolder, TypeVisitor};
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, SubstsRef};
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_session::config::nightly_options;
 use rustc_span::Span;
 
+use std::ops::ControlFlow;
+
 pub type OpaqueTypeMap<'tcx> = DefIdMap<OpaqueTypeDecl<'tcx>>;
 
 /// Information about the opaque types whose values we
@@ -38,13 +40,13 @@
     /// then `substs` would be `['a, T]`.
     pub substs: SubstsRef<'tcx>,
 
-    /// The span of this particular definition of the opaque type.  So
+    /// The span of this particular definition of the opaque type. So
     /// for example:
     ///
-    /// ```
+    /// ```ignore (incomplete snippet)
     /// type Foo = impl Baz;
     /// fn bar() -> Foo {
-    ///             ^^^ This is the span we are looking for!
+    /// //          ^^^ This is the span we are looking for!
     /// ```
     ///
     /// In cases where the fn returns `(impl Trait, impl Trait)` or
@@ -428,14 +430,15 @@
 
         // If there are required region bounds, we can use them.
         if opaque_defn.has_required_region_bounds {
-            let predicates_of = tcx.predicates_of(def_id);
-            debug!("constrain_opaque_type: predicates: {:#?}", predicates_of,);
-            let bounds = predicates_of.instantiate(tcx, opaque_defn.substs);
+            let bounds = tcx.explicit_item_bounds(def_id);
+            debug!("constrain_opaque_type: predicates: {:#?}", bounds);
+            let bounds: Vec<_> =
+                bounds.iter().map(|(bound, _)| bound.subst(tcx, opaque_defn.substs)).collect();
             debug!("constrain_opaque_type: bounds={:#?}", bounds);
             let opaque_type = tcx.mk_opaque(def_id, opaque_defn.substs);
 
             let required_region_bounds =
-                required_region_bounds(tcx, opaque_type, bounds.predicates.into_iter());
+                required_region_bounds(tcx, opaque_type, bounds.into_iter());
             debug_assert!(!required_region_bounds.is_empty());
 
             for required_region in required_region_bounds {
@@ -690,32 +693,34 @@
 where
     OP: FnMut(ty::Region<'tcx>),
 {
-    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> bool {
+    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ControlFlow<()> {
         t.as_ref().skip_binder().visit_with(self);
-        false // keep visiting
+        ControlFlow::CONTINUE
     }
 
-    fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+    fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> {
         match *r {
             // ignore bound regions, keep visiting
-            ty::ReLateBound(_, _) => false,
+            ty::ReLateBound(_, _) => ControlFlow::CONTINUE,
             _ => {
                 (self.op)(r);
-                false
+                ControlFlow::CONTINUE
             }
         }
     }
 
-    fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> {
         // We're only interested in types involving regions
         if !ty.flags().intersects(ty::TypeFlags::HAS_FREE_REGIONS) {
-            return false; // keep visiting
+            return ControlFlow::CONTINUE;
         }
 
         match ty.kind() {
             ty::Closure(_, ref substs) => {
                 // Skip lifetime parameters of the enclosing item(s)
 
+                substs.as_closure().tupled_upvars_ty().visit_with(self);
+
                 for upvar_ty in substs.as_closure().upvar_tys() {
                     upvar_ty.visit_with(self);
                 }
@@ -727,6 +732,8 @@
                 // Skip lifetime parameters of the enclosing item(s)
                 // Also skip the witness type, because that has no free regions.
 
+                substs.as_generator().tupled_upvars_ty().visit_with(self);
+
                 for upvar_ty in substs.as_generator().upvar_tys() {
                     upvar_ty.visit_with(self);
                 }
@@ -740,7 +747,7 @@
             }
         }
 
-        false
+        ControlFlow::CONTINUE
     }
 }
 
@@ -1112,9 +1119,10 @@
         let ty_var = infcx
             .next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span });
 
-        let predicates_of = tcx.predicates_of(def_id);
-        debug!("instantiate_opaque_types: predicates={:#?}", predicates_of,);
-        let bounds = predicates_of.instantiate(tcx, substs);
+        let item_bounds = tcx.explicit_item_bounds(def_id);
+        debug!("instantiate_opaque_types: bounds={:#?}", item_bounds);
+        let bounds: Vec<_> =
+            item_bounds.iter().map(|(bound, _)| bound.subst(tcx, substs)).collect();
 
         let param_env = tcx.param_env(def_id);
         let InferOk { value: bounds, obligations } =
@@ -1123,8 +1131,7 @@
 
         debug!("instantiate_opaque_types: bounds={:?}", bounds);
 
-        let required_region_bounds =
-            required_region_bounds(tcx, ty, bounds.predicates.iter().cloned());
+        let required_region_bounds = required_region_bounds(tcx, ty, bounds.iter().copied());
         debug!("instantiate_opaque_types: required_region_bounds={:?}", required_region_bounds);
 
         // Make sure that we are in fact defining the *entire* type
@@ -1153,7 +1160,7 @@
         );
         debug!("instantiate_opaque_types: ty_var={:?}", ty_var);
 
-        for predicate in &bounds.predicates {
+        for predicate in &bounds {
             if let ty::PredicateAtom::Projection(projection) = predicate.skip_binders() {
                 if projection.ty.references_error() {
                     // No point on adding these obligations since there's a type error involved.
@@ -1162,14 +1169,14 @@
             }
         }
 
-        self.obligations.reserve(bounds.predicates.len());
-        for predicate in bounds.predicates {
+        self.obligations.reserve(bounds.len());
+        for predicate in bounds {
             // Change the predicate to refer to the type variable,
             // which will be the concrete type instead of the opaque type.
             // This also instantiates nested instances of `impl Trait`.
             let predicate = self.instantiate_opaque_types_in_map(&predicate);
 
-            let cause = traits::ObligationCause::new(span, self.body_id, traits::SizedReturnType);
+            let cause = traits::ObligationCause::new(span, self.body_id, traits::MiscObligation);
 
             // Require that the predicate holds for the concrete type.
             debug!("instantiate_opaque_types: predicate={:?}", predicate);
diff --git a/compiler/rustc_trait_selection/src/traits/auto_trait.rs b/compiler/rustc_trait_selection/src/traits/auto_trait.rs
index e400672..93a0073 100644
--- a/compiler/rustc_trait_selection/src/traits/auto_trait.rs
+++ b/compiler/rustc_trait_selection/src/traits/auto_trait.rs
@@ -642,7 +642,8 @@
             // We check this by calling is_of_param on the relevant types
             // from the various possible predicates
 
-            match predicate.skip_binders() {
+            let bound_predicate = predicate.bound_atom();
+            match bound_predicate.skip_binder() {
                 ty::PredicateAtom::Trait(p, _) => {
                     if self.is_param_no_infer(p.trait_ref.substs)
                         && !only_projections
@@ -650,10 +651,10 @@
                     {
                         self.add_user_pred(computed_preds, predicate);
                     }
-                    predicates.push_back(ty::Binder::bind(p));
+                    predicates.push_back(bound_predicate.rebind(p));
                 }
                 ty::PredicateAtom::Projection(p) => {
-                    let p = ty::Binder::bind(p);
+                    let p = bound_predicate.rebind(p);
                     debug!(
                         "evaluate_nested_obligations: examining projection predicate {:?}",
                         predicate
@@ -783,13 +784,13 @@
                     }
                 }
                 ty::PredicateAtom::RegionOutlives(binder) => {
-                    let binder = ty::Binder::bind(binder);
+                    let binder = bound_predicate.rebind(binder);
                     if select.infcx().region_outlives_predicate(&dummy_cause, binder).is_err() {
                         return false;
                     }
                 }
                 ty::PredicateAtom::TypeOutlives(binder) => {
-                    let binder = ty::Binder::bind(binder);
+                    let binder = bound_predicate.rebind(binder);
                     match (
                         binder.no_bound_vars(),
                         binder.map_bound_ref(|pred| pred.0).no_bound_vars(),
diff --git a/compiler/rustc_trait_selection/src/traits/codegen/mod.rs b/compiler/rustc_trait_selection/src/traits/codegen.rs
similarity index 93%
rename from compiler/rustc_trait_selection/src/traits/codegen/mod.rs
rename to compiler/rustc_trait_selection/src/traits/codegen.rs
index 6b1ed5f..3cb6ec8 100644
--- a/compiler/rustc_trait_selection/src/traits/codegen/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/codegen.rs
@@ -17,14 +17,17 @@
 /// (necessarily) resolve all nested obligations on the impl. Note
 /// that type check should guarantee to us that all nested
 /// obligations *could be* resolved if we wanted to.
+///
 /// Assumes that this is run after the entire crate has been successfully type-checked.
+/// This also expects that `trait_ref` is fully normalized.
 pub fn codegen_fulfill_obligation<'tcx>(
-    ty: TyCtxt<'tcx>,
+    tcx: TyCtxt<'tcx>,
     (param_env, trait_ref): (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>),
 ) -> Result<ImplSource<'tcx, ()>, ErrorReported> {
     // Remove any references to regions; this helps improve caching.
-    let trait_ref = ty.erase_regions(&trait_ref);
-
+    let trait_ref = tcx.erase_regions(&trait_ref);
+    // We expect the input to be fully normalized.
+    debug_assert_eq!(trait_ref, tcx.normalize_erasing_regions(param_env, trait_ref));
     debug!(
         "codegen_fulfill_obligation(trait_ref={:?}, def_id={:?})",
         (param_env, trait_ref),
@@ -33,7 +36,7 @@
 
     // Do the initial selection for the obligation. This yields the
     // shallow result we are looking for -- that is, what specific impl.
-    ty.infer_ctxt().enter(|infcx| {
+    tcx.infer_ctxt().enter(|infcx| {
         let mut selcx = SelectionContext::new(&infcx);
 
         let obligation_cause = ObligationCause::dummy();
diff --git a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
index 3828cf4..e1721a5 100644
--- a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
+++ b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
@@ -24,6 +24,7 @@
 use rustc_span::Span;
 
 use std::cmp;
+use std::ops::ControlFlow;
 
 /// Check if a given constant can be evaluated.
 pub fn is_const_evaluatable<'cx, 'tcx>(
@@ -85,8 +86,12 @@
                         } else if leaf.has_param_types_or_consts() {
                             failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam);
                         }
+
+                        ControlFlow::CONTINUE
                     }
-                    Node::Binop(_, _, _) | Node::UnaryOp(_, _) | Node::FunctionCall(_, _) => (),
+                    Node::Binop(_, _, _) | Node::UnaryOp(_, _) | Node::FunctionCall(_, _) => {
+                        ControlFlow::CONTINUE
+                    }
                 });
 
                 match failure_kind {
@@ -147,11 +152,7 @@
     if concrete.is_ok() && substs.has_param_types_or_consts() {
         match infcx.tcx.def_kind(def.did) {
             DefKind::AnonConst => {
-                let mir_body = if let Some(def) = def.as_const_arg() {
-                    infcx.tcx.optimized_mir_of_const_arg(def)
-                } else {
-                    infcx.tcx.optimized_mir(def.did)
-                };
+                let mir_body = infcx.tcx.optimized_mir_opt_const_arg(def);
 
                 if mir_body.is_polymorphic {
                     future_compat_lint();
@@ -198,12 +199,12 @@
 ///
 /// This is only able to represent a subset of `MIR`,
 /// and should not leak any information about desugarings.
-#[derive(Clone, Copy)]
+#[derive(Debug, Clone, Copy)]
 pub struct AbstractConst<'tcx> {
     // FIXME: Consider adding something like `IndexSlice`
     // and use this here.
-    inner: &'tcx [Node<'tcx>],
-    substs: SubstsRef<'tcx>,
+    pub inner: &'tcx [Node<'tcx>],
+    pub substs: SubstsRef<'tcx>,
 }
 
 impl AbstractConst<'tcx> {
@@ -212,16 +213,22 @@
         def: ty::WithOptConstParam<DefId>,
         substs: SubstsRef<'tcx>,
     ) -> Result<Option<AbstractConst<'tcx>>, ErrorReported> {
-        let inner = match (def.did.as_local(), def.const_param_did) {
-            (Some(did), Some(param_did)) => {
-                tcx.mir_abstract_const_of_const_arg((did, param_did))?
-            }
-            _ => tcx.mir_abstract_const(def.did)?,
-        };
-
+        let inner = tcx.mir_abstract_const_opt_const_arg(def)?;
+        debug!("AbstractConst::new({:?}) = {:?}", def, inner);
         Ok(inner.map(|inner| AbstractConst { inner, substs }))
     }
 
+    pub fn from_const(
+        tcx: TyCtxt<'tcx>,
+        ct: &ty::Const<'tcx>,
+    ) -> Result<Option<AbstractConst<'tcx>>, ErrorReported> {
+        match ct.val {
+            ty::ConstKind::Unevaluated(def, substs, None) => AbstractConst::new(tcx, def, substs),
+            ty::ConstKind::Error(_) => Err(ErrorReported),
+            _ => Ok(None),
+        }
+    }
+
     #[inline]
     pub fn subtree(self, node: NodeId) -> AbstractConst<'tcx> {
         AbstractConst { inner: &self.inner[..=node.index()], substs: self.substs }
@@ -233,11 +240,23 @@
     }
 }
 
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+struct WorkNode<'tcx> {
+    node: Node<'tcx>,
+    span: Span,
+    used: bool,
+}
+
 struct AbstractConstBuilder<'a, 'tcx> {
     tcx: TyCtxt<'tcx>,
     body: &'a mir::Body<'tcx>,
     /// The current WIP node tree.
-    nodes: IndexVec<NodeId, Node<'tcx>>,
+    ///
+    /// We require all nodes to be used in the final abstract const,
+    /// so we store this here. Note that we also consider nodes as used
+    /// if they are mentioned in an assert, so some used nodes are never
+    /// actually reachable by walking the [`AbstractConst`].
+    nodes: IndexVec<NodeId, WorkNode<'tcx>>,
     locals: IndexVec<mir::Local, NodeId>,
     /// We only allow field accesses if they access
     /// the result of a checked operation.
@@ -284,6 +303,27 @@
         Ok(Some(builder))
     }
 
+    fn add_node(&mut self, node: Node<'tcx>, span: Span) -> NodeId {
+        // Mark used nodes.
+        match node {
+            Node::Leaf(_) => (),
+            Node::Binop(_, lhs, rhs) => {
+                self.nodes[lhs].used = true;
+                self.nodes[rhs].used = true;
+            }
+            Node::UnaryOp(_, input) => {
+                self.nodes[input].used = true;
+            }
+            Node::FunctionCall(func, nodes) => {
+                self.nodes[func].used = true;
+                nodes.iter().for_each(|&n| self.nodes[n].used = true);
+            }
+        }
+
+        // Nodes start as unused.
+        self.nodes.push(WorkNode { node, span, used: false })
+    }
+
     fn place_to_local(
         &mut self,
         span: Span,
@@ -321,7 +361,7 @@
                 let local = self.place_to_local(span, p)?;
                 Ok(self.locals[local])
             }
-            mir::Operand::Constant(ct) => Ok(self.nodes.push(Node::Leaf(ct.literal))),
+            mir::Operand::Constant(ct) => Ok(self.add_node(Node::Leaf(ct.literal), span)),
         }
     }
 
@@ -346,19 +386,19 @@
 
     fn build_statement(&mut self, stmt: &mir::Statement<'tcx>) -> Result<(), ErrorReported> {
         debug!("AbstractConstBuilder: stmt={:?}", stmt);
+        let span = stmt.source_info.span;
         match stmt.kind {
             StatementKind::Assign(box (ref place, ref rvalue)) => {
-                let local = self.place_to_local(stmt.source_info.span, place)?;
+                let local = self.place_to_local(span, place)?;
                 match *rvalue {
                     Rvalue::Use(ref operand) => {
-                        self.locals[local] =
-                            self.operand_to_node(stmt.source_info.span, operand)?;
+                        self.locals[local] = self.operand_to_node(span, operand)?;
                         Ok(())
                     }
                     Rvalue::BinaryOp(op, ref lhs, ref rhs) if Self::check_binop(op) => {
-                        let lhs = self.operand_to_node(stmt.source_info.span, lhs)?;
-                        let rhs = self.operand_to_node(stmt.source_info.span, rhs)?;
-                        self.locals[local] = self.nodes.push(Node::Binop(op, lhs, rhs));
+                        let lhs = self.operand_to_node(span, lhs)?;
+                        let rhs = self.operand_to_node(span, rhs)?;
+                        self.locals[local] = self.add_node(Node::Binop(op, lhs, rhs), span);
                         if op.is_checkable() {
                             bug!("unexpected unchecked checkable binary operation");
                         } else {
@@ -366,18 +406,18 @@
                         }
                     }
                     Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) if Self::check_binop(op) => {
-                        let lhs = self.operand_to_node(stmt.source_info.span, lhs)?;
-                        let rhs = self.operand_to_node(stmt.source_info.span, rhs)?;
-                        self.locals[local] = self.nodes.push(Node::Binop(op, lhs, rhs));
+                        let lhs = self.operand_to_node(span, lhs)?;
+                        let rhs = self.operand_to_node(span, rhs)?;
+                        self.locals[local] = self.add_node(Node::Binop(op, lhs, rhs), span);
                         self.checked_op_locals.insert(local);
                         Ok(())
                     }
                     Rvalue::UnaryOp(op, ref operand) if Self::check_unop(op) => {
-                        let operand = self.operand_to_node(stmt.source_info.span, operand)?;
-                        self.locals[local] = self.nodes.push(Node::UnaryOp(op, operand));
+                        let operand = self.operand_to_node(span, operand)?;
+                        self.locals[local] = self.add_node(Node::UnaryOp(op, operand), span);
                         Ok(())
                     }
-                    _ => self.error(Some(stmt.source_info.span), "unsupported rvalue")?,
+                    _ => self.error(Some(span), "unsupported rvalue")?,
                 }
             }
             // These are not actually relevant for us here, so we can ignore them.
@@ -425,13 +465,9 @@
                         .map(|arg| self.operand_to_node(terminator.source_info.span, arg))
                         .collect::<Result<Vec<NodeId>, _>>()?,
                 );
-                self.locals[local] = self.nodes.push(Node::FunctionCall(func, args));
+                self.locals[local] = self.add_node(Node::FunctionCall(func, args), fn_span);
                 Ok(Some(target))
             }
-            // We only allow asserts for checked operations.
-            //
-            // These asserts seem to all have the form `!_local.0` so
-            // we only allow exactly that.
             TerminatorKind::Assert { ref cond, expected: false, target, .. } => {
                 let p = match cond {
                     mir::Operand::Copy(p) | mir::Operand::Move(p) => p,
@@ -440,7 +476,15 @@
 
                 const ONE_FIELD: mir::Field = mir::Field::from_usize(1);
                 debug!("proj: {:?}", p.projection);
-                if let &[mir::ProjectionElem::Field(ONE_FIELD, _)] = p.projection.as_ref() {
+                if let Some(p) = p.as_local() {
+                    debug_assert!(!self.checked_op_locals.contains(p));
+                    // Mark locals directly used in asserts as used.
+                    //
+                    // This is needed because division does not use `CheckedBinop` but instead
+                    // adds an explicit assert for `divisor != 0`.
+                    self.nodes[self.locals[p]].used = true;
+                    return Ok(Some(target));
+                } else if let &[mir::ProjectionElem::Field(ONE_FIELD, _)] = p.projection.as_ref() {
                     // Only allow asserts checking the result of a checked operation.
                     if self.checked_op_locals.contains(p.local) {
                         return Ok(Some(target));
@@ -467,7 +511,20 @@
             if let Some(next) = self.build_terminator(block.terminator())? {
                 block = &self.body.basic_blocks()[next];
             } else {
-                return Ok(self.tcx.arena.alloc_from_iter(self.nodes));
+                assert_eq!(self.locals[mir::RETURN_PLACE], self.nodes.last().unwrap());
+                // `AbstractConst`s should not contain any promoteds as they require references which
+                // are not allowed.
+                assert!(!self.nodes.iter().any(|n| matches!(
+                    n.node,
+                    Node::Leaf(ty::Const { val: ty::ConstKind::Unevaluated(_, _, Some(_)), ty: _ })
+                )));
+
+                self.nodes[self.locals[mir::RETURN_PLACE]].used = true;
+                if let Some(&unused) = self.nodes.iter().find(|n| !n.used) {
+                    self.error(Some(unused.span), "dead code")?;
+                }
+
+                return Ok(self.tcx.arena.alloc_from_iter(self.nodes.into_iter().map(|n| n.node)));
             }
         }
     }
@@ -517,31 +574,36 @@
     // on `ErrorReported`.
 }
 
-fn walk_abstract_const<'tcx, F>(tcx: TyCtxt<'tcx>, ct: AbstractConst<'tcx>, mut f: F)
+pub fn walk_abstract_const<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    ct: AbstractConst<'tcx>,
+    mut f: F,
+) -> ControlFlow<()>
 where
-    F: FnMut(Node<'tcx>),
+    F: FnMut(Node<'tcx>) -> ControlFlow<()>,
 {
-    recurse(tcx, ct, &mut f);
-    fn recurse<'tcx>(tcx: TyCtxt<'tcx>, ct: AbstractConst<'tcx>, f: &mut dyn FnMut(Node<'tcx>)) {
+    fn recurse<'tcx>(
+        tcx: TyCtxt<'tcx>,
+        ct: AbstractConst<'tcx>,
+        f: &mut dyn FnMut(Node<'tcx>) -> ControlFlow<()>,
+    ) -> ControlFlow<()> {
         let root = ct.root();
-        f(root);
+        f(root)?;
         match root {
-            Node::Leaf(_) => (),
+            Node::Leaf(_) => ControlFlow::CONTINUE,
             Node::Binop(_, l, r) => {
-                recurse(tcx, ct.subtree(l), f);
-                recurse(tcx, ct.subtree(r), f);
+                recurse(tcx, ct.subtree(l), f)?;
+                recurse(tcx, ct.subtree(r), f)
             }
-            Node::UnaryOp(_, v) => {
-                recurse(tcx, ct.subtree(v), f);
-            }
+            Node::UnaryOp(_, v) => recurse(tcx, ct.subtree(v), f),
             Node::FunctionCall(func, args) => {
-                recurse(tcx, ct.subtree(func), f);
-                for &arg in args {
-                    recurse(tcx, ct.subtree(arg), f);
-                }
+                recurse(tcx, ct.subtree(func), f)?;
+                args.iter().try_for_each(|&arg| recurse(tcx, ct.subtree(arg), f))
             }
         }
     }
+
+    recurse(tcx, ct, &mut f)
 }
 
 /// Tries to unify two abstract constants using structural equality.
@@ -554,6 +616,10 @@
         (Node::Leaf(a_ct), Node::Leaf(b_ct)) => {
             let a_ct = a_ct.subst(tcx, a.substs);
             let b_ct = b_ct.subst(tcx, b.substs);
+            if a_ct.ty != b_ct.ty {
+                return false;
+            }
+
             match (a_ct.val, b_ct.val) {
                 // We can just unify errors with everything to reduce the amount of
                 // emitted errors here.
@@ -566,6 +632,12 @@
                 // we do not want to use `assert_eq!(a(), b())` to infer that `N` and `M` have to be `1`. This
                 // means that we only allow inference variables if they are equal.
                 (ty::ConstKind::Infer(a_val), ty::ConstKind::Infer(b_val)) => a_val == b_val,
+                // We may want to instead recurse into unevaluated constants here. That may require some
+                // care to prevent infinite recursion, so let's just ignore this for now.
+                (
+                    ty::ConstKind::Unevaluated(a_def, a_substs, None),
+                    ty::ConstKind::Unevaluated(b_def, b_substs, None),
+                ) => a_def == b_def && a_substs == b_substs,
                 // FIXME(const_evaluatable_checked): We may want to either actually try
                 // to evaluate `a_ct` and `b_ct` if they are are fully concrete or something like
                 // this, for now we just return false here.
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
index cb3de57..2d57c39 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
@@ -255,9 +255,10 @@
                     return;
                 }
 
-                match obligation.predicate.skip_binders() {
+                let bound_predicate = obligation.predicate.bound_atom();
+                match bound_predicate.skip_binder() {
                     ty::PredicateAtom::Trait(trait_predicate, _) => {
-                        let trait_predicate = ty::Binder::bind(trait_predicate);
+                        let trait_predicate = bound_predicate.rebind(trait_predicate);
                         let trait_predicate = self.resolve_vars_if_possible(&trait_predicate);
 
                         if self.tcx.sess.has_errors() && trait_predicate.references_error() {
@@ -531,7 +532,7 @@
                     }
 
                     ty::PredicateAtom::RegionOutlives(predicate) => {
-                        let predicate = ty::Binder::bind(predicate);
+                        let predicate = bound_predicate.rebind(predicate);
                         let predicate = self.resolve_vars_if_possible(&predicate);
                         let err = self
                             .region_outlives_predicate(&obligation.cause, predicate)
@@ -1078,9 +1079,10 @@
         }
 
         // FIXME: It should be possible to deal with `ForAll` in a cleaner way.
-        let (cond, error) = match (cond.skip_binders(), error.skip_binders()) {
+        let bound_error = error.bound_atom();
+        let (cond, error) = match (cond.skip_binders(), bound_error.skip_binder()) {
             (ty::PredicateAtom::Trait(..), ty::PredicateAtom::Trait(error, _)) => {
-                (cond, ty::Binder::bind(error))
+                (cond, bound_error.rebind(error))
             }
             _ => {
                 // FIXME: make this work in other cases too.
@@ -1089,9 +1091,10 @@
         };
 
         for obligation in super::elaborate_predicates(self.tcx, std::iter::once(cond)) {
-            if let ty::PredicateAtom::Trait(implication, _) = obligation.predicate.skip_binders() {
+            let bound_predicate = obligation.predicate.bound_atom();
+            if let ty::PredicateAtom::Trait(implication, _) = bound_predicate.skip_binder() {
                 let error = error.to_poly_trait_ref();
-                let implication = ty::Binder::bind(implication.trait_ref);
+                let implication = bound_predicate.rebind(implication.trait_ref);
                 // FIXME: I'm just not taking associated types at all here.
                 // Eventually I'll need to implement param-env-aware
                 // `Γ₁ ⊦ φ₁ => Γā‚‚ ⊦ φā‚‚` logic.
@@ -1169,12 +1172,13 @@
             //
             // this can fail if the problem was higher-ranked, in which
             // cause I have no idea for a good error message.
-            if let ty::PredicateAtom::Projection(data) = predicate.skip_binders() {
+            let bound_predicate = predicate.bound_atom();
+            if let ty::PredicateAtom::Projection(data) = bound_predicate.skip_binder() {
                 let mut selcx = SelectionContext::new(self);
                 let (data, _) = self.replace_bound_vars_with_fresh_vars(
                     obligation.cause.span,
                     infer::LateBoundRegionConversionTime::HigherRankedType,
-                    &ty::Binder::bind(data),
+                    &bound_predicate.rebind(data),
                 );
                 let mut obligations = vec![];
                 let normalized_ty = super::normalize_projection_type(
@@ -1384,17 +1388,7 @@
         trait_ref: &ty::PolyTraitRef<'tcx>,
     ) {
         let get_trait_impl = |trait_def_id| {
-            let mut trait_impl = None;
-            self.tcx.for_each_relevant_impl(
-                trait_def_id,
-                trait_ref.skip_binder().self_ty(),
-                |impl_def_id| {
-                    if trait_impl.is_none() {
-                        trait_impl = Some(impl_def_id);
-                    }
-                },
-            );
-            trait_impl
+            self.tcx.find_map_relevant_impl(trait_def_id, trait_ref.skip_binder().self_ty(), Some)
         };
         let required_trait_path = self.tcx.def_path_str(trait_ref.def_id());
         let all_traits = self.tcx.all_traits(LOCAL_CRATE);
@@ -1461,11 +1455,11 @@
             return;
         }
 
-        let mut err = match predicate.skip_binders() {
+        let bound_predicate = predicate.bound_atom();
+        let mut err = match bound_predicate.skip_binder() {
             ty::PredicateAtom::Trait(data, _) => {
-                let trait_ref = ty::Binder::bind(data.trait_ref);
-                let self_ty = trait_ref.skip_binder().self_ty();
-                debug!("self_ty {:?} {:?} trait_ref {:?}", self_ty, self_ty.kind(), trait_ref);
+                let trait_ref = bound_predicate.rebind(data.trait_ref);
+                debug!("trait_ref {:?}", trait_ref);
 
                 if predicate.references_error() {
                     return;
@@ -1480,6 +1474,17 @@
                 // known, since we don't dispatch based on region
                 // relationships.
 
+                // Pick the first substitution that still contains inference variables as the one
+                // we're going to emit an error for. If there are none (see above), fall back to
+                // the substitution for `Self`.
+                let subst = {
+                    let substs = data.trait_ref.substs;
+                    substs
+                        .iter()
+                        .find(|s| s.has_infer_types_or_consts())
+                        .unwrap_or_else(|| substs[0])
+                };
+
                 // This is kind of a hack: it frequently happens that some earlier
                 // error prevents types from being fully inferred, and then we get
                 // a bunch of uninteresting errors saying something like "<generic
@@ -1496,21 +1501,11 @@
                 // check upstream for type errors and don't add the obligations to
                 // begin with in those cases.
                 if self.tcx.lang_items().sized_trait() == Some(trait_ref.def_id()) {
-                    self.emit_inference_failure_err(
-                        body_id,
-                        span,
-                        self_ty.into(),
-                        ErrorCode::E0282,
-                    )
-                    .emit();
+                    self.emit_inference_failure_err(body_id, span, subst, ErrorCode::E0282).emit();
                     return;
                 }
-                let mut err = self.emit_inference_failure_err(
-                    body_id,
-                    span,
-                    self_ty.into(),
-                    ErrorCode::E0283,
-                );
+                let mut err =
+                    self.emit_inference_failure_err(body_id, span, subst, ErrorCode::E0283);
                 err.note(&format!("cannot satisfy `{}`", predicate));
                 if let ObligationCauseCode::ItemObligation(def_id) = obligation.cause.code {
                     self.suggest_fully_qualified_path(&mut err, def_id, span, trait_ref.def_id());
@@ -1588,7 +1583,7 @@
                 self.emit_inference_failure_err(body_id, span, a.into(), ErrorCode::E0282)
             }
             ty::PredicateAtom::Projection(data) => {
-                let trait_ref = ty::Binder::bind(data).to_poly_trait_ref(self.tcx);
+                let trait_ref = bound_predicate.rebind(data).to_poly_trait_ref(self.tcx);
                 let self_ty = trait_ref.skip_binder().self_ty();
                 let ty = data.ty;
                 if predicate.references_error() {
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
index 90a8d96..1c6e661 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
@@ -21,7 +21,8 @@
 };
 use rustc_middle::ty::{TypeAndMut, TypeckResults};
 use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_span::{MultiSpan, Span, DUMMY_SP};
+use rustc_span::{BytePos, MultiSpan, Span, DUMMY_SP};
+use rustc_target::spec::abi;
 use std::fmt;
 
 use super::InferCtxtPrivExt;
@@ -1157,15 +1158,15 @@
                     tcx.mk_ty_infer(ty::TyVar(ty::TyVid { index: 0 })),
                     false,
                     hir::Unsafety::Normal,
-                    ::rustc_target::spec::abi::Abi::Rust,
+                    abi::Abi::Rust,
                 )
             } else {
                 tcx.mk_fn_sig(
-                    ::std::iter::once(inputs),
+                    std::iter::once(inputs),
                     tcx.mk_ty_infer(ty::TyVar(ty::TyVid { index: 0 })),
                     false,
                     hir::Unsafety::Normal,
-                    ::rustc_target::spec::abi::Abi::Rust,
+                    abi::Abi::Rust,
                 )
             };
             ty::Binder::bind(sig).to_string()
@@ -1307,6 +1308,9 @@
         let mut generator = None;
         let mut outer_generator = None;
         let mut next_code = Some(&obligation.cause.code);
+
+        let mut seen_upvar_tys_infer_tuple = false;
+
         while let Some(code) = next_code {
             debug!("maybe_note_obligation_cause_for_async_await: code={:?}", code);
             match code {
@@ -1327,6 +1331,13 @@
                             outer_generator = Some(did);
                         }
                         ty::GeneratorWitness(..) => {}
+                        ty::Tuple(_) if !seen_upvar_tys_infer_tuple => {
+                            // By introducing a tuple of upvar types into the chain of obligations
+                            // of a generator, the first non-generator item is now the tuple itself,
+                            // we shall ignore this.
+
+                            seen_upvar_tys_infer_tuple = true;
+                        }
                         _ if generator.is_none() => {
                             trait_ref = Some(derived_obligation.parent_trait_ref.skip_binder());
                             target_ty = Some(ty);
@@ -1559,36 +1570,130 @@
             format!("does not implement `{}`", trait_ref.print_only_trait_path())
         };
 
-        let mut explain_yield = |interior_span: Span,
-                                 yield_span: Span,
-                                 scope_span: Option<Span>| {
-            let mut span = MultiSpan::from_span(yield_span);
-            if let Ok(snippet) = source_map.span_to_snippet(interior_span) {
-                span.push_span_label(
-                    yield_span,
-                    format!("{} occurs here, with `{}` maybe used later", await_or_yield, snippet),
-                );
-                // If available, use the scope span to annotate the drop location.
-                if let Some(scope_span) = scope_span {
-                    span.push_span_label(
-                        source_map.end_point(scope_span),
-                        format!("`{}` is later dropped here", snippet),
-                    );
+        let mut explain_yield =
+            |interior_span: Span, yield_span: Span, scope_span: Option<Span>| {
+                let mut span = MultiSpan::from_span(yield_span);
+                if let Ok(snippet) = source_map.span_to_snippet(interior_span) {
+                    // #70935: If snippet contains newlines, display "the value" instead
+                    // so that we do not emit complex diagnostics.
+                    let snippet = &format!("`{}`", snippet);
+                    let snippet = if snippet.contains('\n') { "the value" } else { snippet };
+                    // The multispan can be complex here, like:
+                    // note: future is not `Send` as this value is used across an await
+                    //   --> $DIR/issue-70935-complex-spans.rs:13:9
+                    //    |
+                    // LL |            baz(|| async{
+                    //    |  __________^___-
+                    //    | | _________|
+                    //    | ||
+                    // LL | ||             foo(tx.clone());
+                    // LL | ||         }).await;
+                    //    | ||         -      ^- value is later dropped here
+                    //    | ||_________|______|
+                    //    | |__________|      await occurs here, with value maybe used later
+                    //    |            has type `closure` which is not `Send`
+                    //
+                    // So, detect it and separate into some notes, like:
+                    //
+                    // note: future is not `Send` as this value is used across an await
+                    //   --> $DIR/issue-70935-complex-spans.rs:13:9
+                    //    |
+                    // LL | /         baz(|| async{
+                    // LL | |             foo(tx.clone());
+                    // LL | |         }).await;
+                    //    | |________________^ first, await occurs here, with the value maybe used later...
+                    // note: the value is later dropped here
+                    //   --> $DIR/issue-70935-complex-spans.rs:15:17
+                    //    |
+                    // LL |         }).await;
+                    //    |                 ^
+                    //
+                    // If available, use the scope span to annotate the drop location.
+                    if let Some(scope_span) = scope_span {
+                        let scope_span = source_map.end_point(scope_span);
+                        let is_overlapped =
+                            yield_span.overlaps(scope_span) || yield_span.overlaps(interior_span);
+                        if is_overlapped {
+                            span.push_span_label(
+                                yield_span,
+                                format!(
+                                    "first, {} occurs here, with {} maybe used later...",
+                                    await_or_yield, snippet
+                                ),
+                            );
+                            err.span_note(
+                                span,
+                                &format!(
+                                    "{} {} as this value is used across {}",
+                                    future_or_generator, trait_explanation, an_await_or_yield
+                                ),
+                            );
+                            if source_map.is_multiline(interior_span) {
+                                err.span_note(
+                                    scope_span,
+                                    &format!("{} is later dropped here", snippet),
+                                );
+                                err.span_note(
+                                    interior_span,
+                                    &format!(
+                                        "this has type `{}` which {}",
+                                        target_ty, trait_explanation
+                                    ),
+                                );
+                            } else {
+                                let mut span = MultiSpan::from_span(scope_span);
+                                span.push_span_label(
+                                    interior_span,
+                                    format!("has type `{}` which {}", target_ty, trait_explanation),
+                                );
+                                err.span_note(span, &format!("{} is later dropped here", snippet));
+                            }
+                        } else {
+                            span.push_span_label(
+                                yield_span,
+                                format!(
+                                    "{} occurs here, with {} maybe used later",
+                                    await_or_yield, snippet
+                                ),
+                            );
+                            span.push_span_label(
+                                scope_span,
+                                format!("{} is later dropped here", snippet),
+                            );
+                            span.push_span_label(
+                                interior_span,
+                                format!("has type `{}` which {}", target_ty, trait_explanation),
+                            );
+                            err.span_note(
+                                span,
+                                &format!(
+                                    "{} {} as this value is used across {}",
+                                    future_or_generator, trait_explanation, an_await_or_yield
+                                ),
+                            );
+                        }
+                    } else {
+                        span.push_span_label(
+                            yield_span,
+                            format!(
+                                "{} occurs here, with {} maybe used later",
+                                await_or_yield, snippet
+                            ),
+                        );
+                        span.push_span_label(
+                            interior_span,
+                            format!("has type `{}` which {}", target_ty, trait_explanation),
+                        );
+                        err.span_note(
+                            span,
+                            &format!(
+                                "{} {} as this value is used across {}",
+                                future_or_generator, trait_explanation, an_await_or_yield
+                            ),
+                        );
+                    }
                 }
-            }
-            span.push_span_label(
-                interior_span,
-                format!("has type `{}` which {}", target_ty, trait_explanation),
-            );
-
-            err.span_note(
-                span,
-                &format!(
-                    "{} {} as this value is used across {}",
-                    future_or_generator, trait_explanation, an_await_or_yield
-                ),
-            );
-        };
+            };
         match interior_or_upvar_span {
             GeneratorInteriorOrUpvar::Interior(interior_span) => {
                 if let Some((scope_span, yield_span, expr, from_awaited_ty)) = interior_extra_info {
@@ -1834,9 +1939,9 @@
                     err.note("all function arguments must have a statically known size");
                 }
                 if tcx.sess.opts.unstable_features.is_nightly_build()
-                    && !self.tcx.features().unsized_locals
+                    && !self.tcx.features().unsized_fn_params
                 {
-                    err.help("unsized locals are gated as an unstable feature");
+                    err.help("unsized fn params are gated as an unstable feature");
                 }
             }
             ObligationCauseCode::SizedReturnType => {
@@ -1907,7 +2012,34 @@
             ObligationCauseCode::BuiltinDerivedObligation(ref data) => {
                 let parent_trait_ref = self.resolve_vars_if_possible(&data.parent_trait_ref);
                 let ty = parent_trait_ref.skip_binder().self_ty();
-                err.note(&format!("required because it appears within the type `{}`", ty));
+                if parent_trait_ref.references_error() {
+                    err.cancel();
+                    return;
+                }
+
+                // If the obligation for a tuple is set directly by a Generator or Closure,
+                // then the tuple must be the one containing capture types.
+                let is_upvar_tys_infer_tuple = if !matches!(ty.kind(), ty::Tuple(..)) {
+                    false
+                } else {
+                    if let ObligationCauseCode::BuiltinDerivedObligation(ref data) =
+                        *data.parent_code
+                    {
+                        let parent_trait_ref =
+                            self.resolve_vars_if_possible(&data.parent_trait_ref);
+                        let ty = parent_trait_ref.skip_binder().self_ty();
+                        matches!(ty.kind(), ty::Generator(..))
+                            || matches!(ty.kind(), ty::Closure(..))
+                    } else {
+                        false
+                    }
+                };
+
+                // Don't print the tuple of capture types
+                if !is_upvar_tys_infer_tuple {
+                    err.note(&format!("required because it appears within the type `{}`", ty));
+                }
+
                 obligated_types.push(ty);
 
                 let parent_predicate = parent_trait_ref.without_const().to_predicate(tcx);
@@ -2076,10 +2208,10 @@
                 if self.predicate_may_hold(&try_obligation) && impls_future {
                     if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
                         if snippet.ends_with('?') {
-                            err.span_suggestion(
-                                span,
-                                "consider using `.await` here",
-                                format!("{}.await?", snippet.trim_end_matches('?')),
+                            err.span_suggestion_verbose(
+                                span.with_hi(span.hi() - BytePos(1)).shrink_to_hi(),
+                                "consider `await`ing on the `Future`",
+                                ".await".to_string(),
                                 Applicability::MaybeIncorrect,
                             );
                         }
diff --git a/compiler/rustc_trait_selection/src/traits/fulfill.rs b/compiler/rustc_trait_selection/src/traits/fulfill.rs
index 8586a55..538c14c 100644
--- a/compiler/rustc_trait_selection/src/traits/fulfill.rs
+++ b/compiler/rustc_trait_selection/src/traits/fulfill.rs
@@ -1,6 +1,6 @@
 use crate::infer::{InferCtxt, TyOrConstInferVar};
 use rustc_data_structures::obligation_forest::ProcessResult;
-use rustc_data_structures::obligation_forest::{DoCompleted, Error, ForestObligation};
+use rustc_data_structures::obligation_forest::{Error, ForestObligation, Outcome};
 use rustc_data_structures::obligation_forest::{ObligationForest, ObligationProcessor};
 use rustc_errors::ErrorReported;
 use rustc_infer::traits::{TraitEngine, TraitEngineExt as _, TraitObligation};
@@ -120,7 +120,8 @@
         &mut self,
         selcx: &mut SelectionContext<'a, 'tcx>,
     ) -> Result<(), Vec<FulfillmentError<'tcx>>> {
-        debug!("select(obligation-forest-size={})", self.predicates.len());
+        let span = debug_span!("select", obligation_forest_size = ?self.predicates.len());
+        let _enter = span.enter();
 
         let mut errors = Vec::new();
 
@@ -128,13 +129,11 @@
             debug!("select: starting another iteration");
 
             // Process pending obligations.
-            let outcome = self.predicates.process_obligations(
-                &mut FulfillProcessor {
+            let outcome: Outcome<_, _> =
+                self.predicates.process_obligations(&mut FulfillProcessor {
                     selcx,
                     register_region_obligations: self.register_region_obligations,
-                },
-                DoCompleted::No,
-            );
+                });
             debug!("select: outcome={:#?}", outcome);
 
             // FIXME: if we kept the original cache key, we could mark projection
@@ -173,7 +172,7 @@
         projection_ty: ty::ProjectionTy<'tcx>,
         cause: ObligationCause<'tcx>,
     ) -> Ty<'tcx> {
-        debug!("normalize_projection_type(projection_ty={:?})", projection_ty);
+        debug!(?projection_ty, "normalize_projection_type");
 
         debug_assert!(!projection_ty.has_escaping_bound_vars());
 
@@ -191,7 +190,7 @@
         );
         self.register_predicate_obligations(infcx, obligations);
 
-        debug!("normalize_projection_type: result={:?}", normalized_ty);
+        debug!(?normalized_ty);
 
         normalized_ty
     }
@@ -205,7 +204,7 @@
         // debug output much nicer to read and so on.
         let obligation = infcx.resolve_vars_if_possible(&obligation);
 
-        debug!("register_predicate_obligation(obligation={:?})", obligation);
+        debug!(?obligation, "register_predicate_obligation");
 
         assert!(!infcx.is_in_snapshot() || self.usable_in_snapshot);
 
@@ -342,7 +341,7 @@
                 self.selcx.infcx().resolve_vars_if_possible(&obligation.predicate);
         }
 
-        debug!("process_obligation: obligation = {:?} cause = {:?}", obligation, obligation.cause);
+        debug!(?obligation, ?obligation.cause, "process_obligation");
 
         let infcx = self.selcx.infcx();
 
@@ -352,7 +351,7 @@
                 // This means we need to pass it the bound version of our
                 // predicate.
                 ty::PredicateAtom::Trait(trait_ref, _constness) => {
-                    let trait_obligation = obligation.with(Binder::bind(trait_ref));
+                    let trait_obligation = obligation.with(binder.rebind(trait_ref));
 
                     self.process_trait_obligation(
                         obligation,
@@ -361,7 +360,7 @@
                     )
                 }
                 ty::PredicateAtom::Projection(data) => {
-                    let project_obligation = obligation.with(Binder::bind(data));
+                    let project_obligation = obligation.with(binder.rebind(data));
 
                     self.process_projection_obligation(
                         project_obligation,
@@ -376,7 +375,7 @@
                 | ty::PredicateAtom::Subtype(_)
                 | ty::PredicateAtom::ConstEvaluatable(..)
                 | ty::PredicateAtom::ConstEquate(..) => {
-                    let (pred, _) = infcx.replace_bound_vars_with_placeholders(binder);
+                    let pred = infcx.replace_bound_vars_with_placeholders(binder);
                     ProcessResult::Changed(mk_pending(vec![
                         obligation.with(pred.to_predicate(self.selcx.tcx())),
                     ]))
@@ -449,6 +448,7 @@
                         self.selcx.infcx(),
                         obligation.param_env,
                         obligation.cause.body_id,
+                        obligation.recursion_depth + 1,
                         arg,
                         obligation.cause.span,
                     ) {
@@ -499,7 +499,7 @@
                         Err(ErrorHandled::TooGeneric) => {
                             pending_obligation.stalled_on = substs
                                 .iter()
-                                .filter_map(|ty| TyOrConstInferVar::maybe_from_generic_arg(ty))
+                                .filter_map(TyOrConstInferVar::maybe_from_generic_arg)
                                 .collect();
                             ProcessResult::Unchanged
                         }
@@ -508,7 +508,7 @@
                 }
 
                 ty::PredicateAtom::ConstEquate(c1, c2) => {
-                    debug!("equating consts: c1={:?} c2={:?}", c1, c2);
+                    debug!(?c1, ?c2, "equating consts");
                     if self.selcx.tcx().features().const_evaluatable_checked {
                         // FIXME: we probably should only try to unify abstract constants
                         // if the constants depend on generic parameters.
@@ -600,6 +600,7 @@
         }
     }
 
+    #[instrument(level = "debug", skip(self, obligation, stalled_on))]
     fn process_trait_obligation(
         &mut self,
         obligation: &PredicateObligation<'tcx>,
@@ -612,8 +613,8 @@
             // FIXME: consider caching errors too.
             if infcx.predicate_must_hold_considering_regions(obligation) {
                 debug!(
-                    "selecting trait `{:?}` at depth {} evaluated to holds",
-                    obligation.predicate, obligation.recursion_depth
+                    "selecting trait at depth {} evaluated to holds",
+                    obligation.recursion_depth
                 );
                 return ProcessResult::Changed(vec![]);
             }
@@ -621,17 +622,11 @@
 
         match self.selcx.select(&trait_obligation) {
             Ok(Some(impl_source)) => {
-                debug!(
-                    "selecting trait `{:?}` at depth {} yielded Ok(Some)",
-                    trait_obligation.predicate, obligation.recursion_depth
-                );
+                debug!("selecting trait at depth {} yielded Ok(Some)", obligation.recursion_depth);
                 ProcessResult::Changed(mk_pending(impl_source.nested_obligations()))
             }
             Ok(None) => {
-                debug!(
-                    "selecting trait `{:?}` at depth {} yielded Ok(None)",
-                    trait_obligation.predicate, obligation.recursion_depth
-                );
+                debug!("selecting trait at depth {} yielded Ok(None)", obligation.recursion_depth);
 
                 // This is a bit subtle: for the most part, the
                 // only reason we can fail to make progress on
@@ -651,10 +646,7 @@
                 ProcessResult::Unchanged
             }
             Err(selection_err) => {
-                info!(
-                    "selecting trait `{:?}` at depth {} yielded Err",
-                    trait_obligation.predicate, obligation.recursion_depth
-                );
+                info!("selecting trait at depth {} yielded Err", obligation.recursion_depth);
 
                 ProcessResult::Error(CodeSelectionError(selection_err))
             }
@@ -672,7 +664,7 @@
             Ok(Ok(None)) => {
                 *stalled_on = trait_ref_infer_vars(
                     self.selcx,
-                    project_obligation.predicate.to_poly_trait_ref(self.selcx.tcx()),
+                    project_obligation.predicate.to_poly_trait_ref(tcx),
                 );
                 ProcessResult::Unchanged
             }
diff --git a/compiler/rustc_trait_selection/src/traits/object_safety.rs b/compiler/rustc_trait_selection/src/traits/object_safety.rs
index 86fc3cb..32e0991 100644
--- a/compiler/rustc_trait_selection/src/traits/object_safety.rs
+++ b/compiler/rustc_trait_selection/src/traits/object_safety.rs
@@ -11,9 +11,10 @@
 use super::elaborate_predicates;
 
 use crate::infer::TyCtxtInferExt;
+use crate::traits::const_evaluatable::{self, AbstractConst};
 use crate::traits::query::evaluate_obligation::InferCtxtExt;
 use crate::traits::{self, Obligation, ObligationCause};
-use rustc_errors::{Applicability, FatalError};
+use rustc_errors::FatalError;
 use rustc_hir as hir;
 use rustc_hir::def_id::DefId;
 use rustc_middle::ty::subst::{GenericArg, InternalSubsts, Subst};
@@ -21,10 +22,12 @@
 use rustc_middle::ty::{Predicate, ToPredicate};
 use rustc_session::lint::builtin::WHERE_CLAUSES_OBJECT_SAFETY;
 use rustc_span::symbol::Symbol;
-use rustc_span::Span;
+use rustc_span::{MultiSpan, Span};
 use smallvec::SmallVec;
 
+use std::array;
 use std::iter;
+use std::ops::ControlFlow;
 
 pub use crate::traits::{MethodViolationCode, ObjectSafetyViolation};
 
@@ -99,49 +102,7 @@
                 span,
             ) = violation
             {
-                // Using `CRATE_NODE_ID` is wrong, but it's hard to get a more precise id.
-                // It's also hard to get a use site span, so we use the method definition span.
-                tcx.struct_span_lint_hir(
-                    WHERE_CLAUSES_OBJECT_SAFETY,
-                    hir::CRATE_HIR_ID,
-                    *span,
-                    |lint| {
-                        let mut err = lint.build(&format!(
-                            "the trait `{}` cannot be made into an object",
-                            tcx.def_path_str(trait_def_id)
-                        ));
-                        let node = tcx.hir().get_if_local(trait_def_id);
-                        let msg = if let Some(hir::Node::Item(item)) = node {
-                            err.span_label(
-                                item.ident.span,
-                                "this trait cannot be made into an object...",
-                            );
-                            format!("...because {}", violation.error_msg())
-                        } else {
-                            format!(
-                                "the trait cannot be made into an object because {}",
-                                violation.error_msg()
-                            )
-                        };
-                        err.span_label(*span, &msg);
-                        match (node, violation.solution()) {
-                            (Some(_), Some((note, None))) => {
-                                err.help(&note);
-                            }
-                            (Some(_), Some((note, Some((sugg, span))))) => {
-                                err.span_suggestion(
-                                    span,
-                                    &note,
-                                    sugg,
-                                    Applicability::MachineApplicable,
-                                );
-                            }
-                            // Only provide the help if its a local trait, otherwise it's not actionable.
-                            _ => {}
-                        }
-                        err.emit();
-                    },
-                );
+                lint_object_unsafe_trait(tcx, *span, trait_def_id, violation);
                 false
             } else {
                 true
@@ -159,6 +120,10 @@
     if !spans.is_empty() {
         violations.push(ObjectSafetyViolation::SupertraitSelf(spans));
     }
+    let spans = bounds_reference_self(tcx, trait_def_id);
+    if !spans.is_empty() {
+        violations.push(ObjectSafetyViolation::SupertraitSelf(spans));
+    }
 
     violations.extend(
         tcx.associated_items(trait_def_id)
@@ -175,6 +140,51 @@
     violations
 }
 
+/// Lint object-unsafe trait.
+fn lint_object_unsafe_trait(
+    tcx: TyCtxt<'_>,
+    span: Span,
+    trait_def_id: DefId,
+    violation: &ObjectSafetyViolation,
+) {
+    // Using `CRATE_NODE_ID` is wrong, but it's hard to get a more precise id.
+    // It's also hard to get a use site span, so we use the method definition span.
+    tcx.struct_span_lint_hir(WHERE_CLAUSES_OBJECT_SAFETY, hir::CRATE_HIR_ID, span, |lint| {
+        let mut err = lint.build(&format!(
+            "the trait `{}` cannot be made into an object",
+            tcx.def_path_str(trait_def_id)
+        ));
+        let node = tcx.hir().get_if_local(trait_def_id);
+        let mut spans = MultiSpan::from_span(span);
+        if let Some(hir::Node::Item(item)) = node {
+            spans.push_span_label(
+                item.ident.span,
+                "this trait cannot be made into an object...".into(),
+            );
+            spans.push_span_label(span, format!("...because {}", violation.error_msg()));
+        } else {
+            spans.push_span_label(
+                span,
+                format!(
+                    "the trait cannot be made into an object because {}",
+                    violation.error_msg()
+                ),
+            );
+        };
+        err.span_note(
+            spans,
+            "for a trait to be \"object safe\" it needs to allow building a vtable to allow the \
+             call to be resolvable dynamically; for more information visit \
+             <https://doc.rust-lang.org/reference/items/traits.html#object-safety>",
+        );
+        if node.is_some() {
+            // Only provide the help if its a local trait, otherwise it's not
+            violation.solution(&mut err);
+        }
+        err.emit();
+    });
+}
+
 fn sized_trait_bound_spans<'tcx>(
     tcx: TyCtxt<'tcx>,
     bounds: hir::GenericBounds<'tcx>,
@@ -238,51 +248,70 @@
     } else {
         tcx.predicates_of(trait_def_id)
     };
-    let self_ty = tcx.types.self_param;
-    let has_self_ty = |arg: &GenericArg<'_>| arg.walk().any(|arg| arg == self_ty.into());
     predicates
         .predicates
         .iter()
-        .map(|(predicate, sp)| (predicate.subst_supertrait(tcx, &trait_ref), sp))
-        .filter_map(|(predicate, &sp)| {
-            match predicate.skip_binders() {
-                ty::PredicateAtom::Trait(ref data, _) => {
-                    // In the case of a trait predicate, we can skip the "self" type.
-                    if data.trait_ref.substs[1..].iter().any(has_self_ty) { Some(sp) } else { None }
-                }
-                ty::PredicateAtom::Projection(ref data) => {
-                    // And similarly for projections. This should be redundant with
-                    // the previous check because any projection should have a
-                    // matching `Trait` predicate with the same inputs, but we do
-                    // the check to be safe.
-                    //
-                    // Note that we *do* allow projection *outputs* to contain
-                    // `self` (i.e., `trait Foo: Bar<Output=Self::Result> { type Result; }`),
-                    // we just require the user to specify *both* outputs
-                    // in the object type (i.e., `dyn Foo<Output=(), Result=()>`).
-                    //
-                    // This is ALT2 in issue #56288, see that for discussion of the
-                    // possible alternatives.
-                    if data.projection_ty.trait_ref(tcx).substs[1..].iter().any(has_self_ty) {
-                        Some(sp)
-                    } else {
-                        None
-                    }
-                }
-                ty::PredicateAtom::WellFormed(..)
-                | ty::PredicateAtom::ObjectSafe(..)
-                | ty::PredicateAtom::TypeOutlives(..)
-                | ty::PredicateAtom::RegionOutlives(..)
-                | ty::PredicateAtom::ClosureKind(..)
-                | ty::PredicateAtom::Subtype(..)
-                | ty::PredicateAtom::ConstEvaluatable(..)
-                | ty::PredicateAtom::ConstEquate(..)
-                | ty::PredicateAtom::TypeWellFormedFromEnv(..) => None,
-            }
-        })
+        .map(|&(predicate, sp)| (predicate.subst_supertrait(tcx, &trait_ref), sp))
+        .filter_map(|predicate| predicate_references_self(tcx, predicate))
         .collect()
 }
 
+fn bounds_reference_self(tcx: TyCtxt<'_>, trait_def_id: DefId) -> SmallVec<[Span; 1]> {
+    let trait_ref = ty::Binder::dummy(ty::TraitRef::identity(tcx, trait_def_id));
+    tcx.associated_items(trait_def_id)
+        .in_definition_order()
+        .filter(|item| item.kind == ty::AssocKind::Type)
+        .flat_map(|item| tcx.explicit_item_bounds(item.def_id))
+        .map(|&(predicate, sp)| (predicate.subst_supertrait(tcx, &trait_ref), sp))
+        .filter_map(|predicate| predicate_references_self(tcx, predicate))
+        .collect()
+}
+
+fn predicate_references_self(
+    tcx: TyCtxt<'tcx>,
+    (predicate, sp): (ty::Predicate<'tcx>, Span),
+) -> Option<Span> {
+    let self_ty = tcx.types.self_param;
+    let has_self_ty = |arg: &GenericArg<'_>| arg.walk().any(|arg| arg == self_ty.into());
+    match predicate.skip_binders() {
+        ty::PredicateAtom::Trait(ref data, _) => {
+            // In the case of a trait predicate, we can skip the "self" type.
+            if data.trait_ref.substs[1..].iter().any(has_self_ty) { Some(sp) } else { None }
+        }
+        ty::PredicateAtom::Projection(ref data) => {
+            // And similarly for projections. This should be redundant with
+            // the previous check because any projection should have a
+            // matching `Trait` predicate with the same inputs, but we do
+            // the check to be safe.
+            //
+            // It's also won't be redundant if we allow type-generic associated
+            // types for trait objects.
+            //
+            // Note that we *do* allow projection *outputs* to contain
+            // `self` (i.e., `trait Foo: Bar<Output=Self::Result> { type Result; }`),
+            // we just require the user to specify *both* outputs
+            // in the object type (i.e., `dyn Foo<Output=(), Result=()>`).
+            //
+            // This is ALT2 in issue #56288, see that for discussion of the
+            // possible alternatives.
+            if data.projection_ty.trait_ref(tcx).substs[1..].iter().any(has_self_ty) {
+                Some(sp)
+            } else {
+                None
+            }
+        }
+        ty::PredicateAtom::WellFormed(..)
+        | ty::PredicateAtom::ObjectSafe(..)
+        | ty::PredicateAtom::TypeOutlives(..)
+        | ty::PredicateAtom::RegionOutlives(..)
+        | ty::PredicateAtom::ClosureKind(..)
+        | ty::PredicateAtom::Subtype(..)
+        | ty::PredicateAtom::ConstEvaluatable(..)
+        | ty::PredicateAtom::ConstEquate(..)
+        | ty::PredicateAtom::TypeWellFormedFromEnv(..) => None,
+    }
+}
+
 fn trait_has_sized_self(tcx: TyCtxt<'_>, trait_def_id: DefId) -> bool {
     generics_require_sized_self(tcx, trait_def_id)
 }
@@ -361,6 +390,8 @@
     trait_def_id: DefId,
     method: &ty::AssocItem,
 ) -> Option<MethodViolationCode> {
+    let sig = tcx.fn_sig(method.def_id);
+
     // The method's first parameter must be named `self`
     if !method.fn_has_self_parameter {
         // We'll attempt to provide a structured suggestion for `Self: Sized`.
@@ -371,12 +402,22 @@
                     [.., pred] => (", Self: Sized", pred.span().shrink_to_hi()),
                 },
             );
-        return Some(MethodViolationCode::StaticMethod(sugg));
+        // Get the span pointing at where the `self` receiver should be.
+        let sm = tcx.sess.source_map();
+        let self_span = method.ident.span.to(tcx
+            .hir()
+            .span_if_local(method.def_id)
+            .unwrap_or_else(|| sm.next_point(method.ident.span))
+            .shrink_to_hi());
+        let self_span = sm.span_through_char(self_span, '(').shrink_to_hi();
+        return Some(MethodViolationCode::StaticMethod(
+            sugg,
+            self_span,
+            !sig.inputs().skip_binder().is_empty(),
+        ));
     }
 
-    let sig = tcx.fn_sig(method.def_id);
-
-    for (i, input_ty) in sig.skip_binder().inputs()[1..].iter().enumerate() {
+    for (i, &input_ty) in sig.skip_binder().inputs()[1..].iter().enumerate() {
         if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) {
             return Some(MethodViolationCode::ReferencesSelfInput(i));
         }
@@ -399,10 +440,7 @@
         // so outlives predicates will always hold.
         .cloned()
         .filter(|(p, _)| p.to_opt_type_outlives().is_none())
-        .collect::<Vec<_>>()
-        // Do a shallow visit so that `contains_illegal_self_type_reference`
-        // may apply it's custom visiting.
-        .visit_tys_shallow(|t| contains_illegal_self_type_reference(tcx, trait_def_id, t))
+        .any(|pred| contains_illegal_self_type_reference(tcx, trait_def_id, pred))
     {
         return Some(MethodViolationCode::WhereClauseReferencesSelf);
     }
@@ -583,7 +621,7 @@
 ///
 /// In practice, we cannot use `dyn Trait` explicitly in the obligation because it would result
 /// in a new check that `Trait` is object safe, creating a cycle (until object_safe_for_dispatch
-/// is stabilized, see tracking issue https://github.com/rust-lang/rust/issues/43561).
+/// is stabilized, see tracking issue <https://github.com/rust-lang/rust/issues/43561>).
 /// Instead, we fudge a little by introducing a new type parameter `U` such that
 /// `Self: Unsize<U>` and `U: Trait + ?Sized`, and use `U` in place of `dyn Trait`.
 /// Written as a chalk-style query:
@@ -658,8 +696,7 @@
         let caller_bounds: Vec<Predicate<'tcx>> = param_env
             .caller_bounds()
             .iter()
-            .chain(iter::once(unsize_predicate))
-            .chain(iter::once(trait_predicate))
+            .chain(array::IntoIter::new([unsize_predicate, trait_predicate]))
             .collect();
 
         ty::ParamEnv::new(tcx.intern_predicates(&caller_bounds), param_env.reveal())
@@ -683,10 +720,10 @@
     })
 }
 
-fn contains_illegal_self_type_reference<'tcx>(
+fn contains_illegal_self_type_reference<'tcx, T: TypeFoldable<'tcx>>(
     tcx: TyCtxt<'tcx>,
     trait_def_id: DefId,
-    ty: Ty<'tcx>,
+    value: T,
 ) -> bool {
     // This is somewhat subtle. In general, we want to forbid
     // references to `Self` in the argument and return types,
@@ -729,15 +766,20 @@
 
     struct IllegalSelfTypeVisitor<'tcx> {
         tcx: TyCtxt<'tcx>,
-        self_ty: Ty<'tcx>,
         trait_def_id: DefId,
         supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>>,
     }
 
     impl<'tcx> TypeVisitor<'tcx> for IllegalSelfTypeVisitor<'tcx> {
-        fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+        fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> {
             match t.kind() {
-                ty::Param(_) => t == self.self_ty,
+                ty::Param(_) => {
+                    if t == self.tcx.types.self_param {
+                        ControlFlow::BREAK
+                    } else {
+                        ControlFlow::CONTINUE
+                    }
+                }
                 ty::Projection(ref data) => {
                     // This is a projected type `<Foo as SomeTrait>::X`.
 
@@ -761,7 +803,7 @@
                         self.supertraits.as_ref().unwrap().contains(&projection_trait_ref);
 
                     if is_supertrait_of_current_trait {
-                        false // do not walk contained types, do not report error, do collect $200
+                        ControlFlow::CONTINUE // do not walk contained types, do not report error, do collect $200
                     } else {
                         t.super_visit_with(self) // DO walk contained types, POSSIBLY reporting an error
                     }
@@ -770,22 +812,66 @@
             }
         }
 
-        fn visit_const(&mut self, _c: &ty::Const<'tcx>) -> bool {
-            // FIXME(#72219) Look into the unevaluated constants for object safety violations.
-            // Do not walk substitutions of unevaluated consts, as they contain `Self`, even
-            // though the const expression doesn't necessary use it. Currently type variables
-            // inside array length expressions are forbidden, so they can't break the above
-            // rules.
-            false
+        fn visit_const(&mut self, ct: &ty::Const<'tcx>) -> ControlFlow<()> {
+            // First check if the type of this constant references `Self`.
+            self.visit_ty(ct.ty)?;
+
+            // Constants can only influence object safety if they reference `Self`.
+            // This is only possible for unevaluated constants, so we walk these here.
+            //
+            // If `AbstractConst::new` returned an error we already failed compilation
+            // so we don't have to emit an additional error here.
+            //
+            // We currently recurse into abstract consts here but do not recurse in
+            // `is_const_evaluatable`. This means that the object safety check is more
+            // liberal than the const eval check.
+            //
+            // This shouldn't really matter though as we can't really use any
+            // constants which are not considered const evaluatable.
+            use rustc_middle::mir::abstract_const::Node;
+            if let Ok(Some(ct)) = AbstractConst::from_const(self.tcx, ct) {
+                const_evaluatable::walk_abstract_const(self.tcx, ct, |node| match node {
+                    Node::Leaf(leaf) => {
+                        let leaf = leaf.subst(self.tcx, ct.substs);
+                        self.visit_const(leaf)
+                    }
+                    Node::Binop(..) | Node::UnaryOp(..) | Node::FunctionCall(_, _) => {
+                        ControlFlow::CONTINUE
+                    }
+                })
+            } else {
+                ControlFlow::CONTINUE
+            }
+        }
+
+        fn visit_predicate(&mut self, pred: ty::Predicate<'tcx>) -> ControlFlow<()> {
+            if let ty::PredicateAtom::ConstEvaluatable(def, substs) = pred.skip_binders() {
+                // FIXME(const_evaluatable_checked): We should probably deduplicate the logic for
+                // `AbstractConst`s here, it might make sense to change `ConstEvaluatable` to
+                // take a `ty::Const` instead.
+                use rustc_middle::mir::abstract_const::Node;
+                if let Ok(Some(ct)) = AbstractConst::new(self.tcx, def, substs) {
+                    const_evaluatable::walk_abstract_const(self.tcx, ct, |node| match node {
+                        Node::Leaf(leaf) => {
+                            let leaf = leaf.subst(self.tcx, ct.substs);
+                            self.visit_const(leaf)
+                        }
+                        Node::Binop(..) | Node::UnaryOp(..) | Node::FunctionCall(_, _) => {
+                            ControlFlow::CONTINUE
+                        }
+                    })
+                } else {
+                    ControlFlow::CONTINUE
+                }
+            } else {
+                pred.super_visit_with(self)
+            }
         }
     }
 
-    ty.visit_with(&mut IllegalSelfTypeVisitor {
-        tcx,
-        self_ty: tcx.types.self_param,
-        trait_def_id,
-        supertraits: None,
-    })
+    value
+        .visit_with(&mut IllegalSelfTypeVisitor { tcx, trait_def_id, supertraits: None })
+        .is_break()
 }
 
 pub fn provide(providers: &mut ty::query::Providers) {
diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs
index 6ac620b..170110a 100644
--- a/compiler/rustc_trait_selection/src/traits/project.rs
+++ b/compiler/rustc_trait_selection/src/traits/project.rs
@@ -1,6 +1,5 @@
 //! Code for projecting associated types out of trait references.
 
-use super::elaborate_predicates;
 use super::specialization_graph;
 use super::translate_substs;
 use super::util;
@@ -29,7 +28,6 @@
 use rustc_middle::ty::subst::Subst;
 use rustc_middle::ty::{self, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, WithConstness};
 use rustc_span::symbol::sym;
-use rustc_span::DUMMY_SP;
 
 pub use rustc_middle::traits::Reveal;
 
@@ -53,13 +51,16 @@
 
 #[derive(PartialEq, Eq, Debug)]
 enum ProjectionTyCandidate<'tcx> {
-    // from a where-clause in the env or object type
+    /// From a where-clause in the env or object type
     ParamEnv(ty::PolyProjectionPredicate<'tcx>),
 
-    // from the definition of `Trait` when you have something like <<A as Trait>::B as Trait2>::C
+    /// From the definition of `Trait` when you have something like <<A as Trait>::B as Trait2>::C
     TraitDef(ty::PolyProjectionPredicate<'tcx>),
 
-    // from a "impl" (or a "pseudo-impl" returned by select)
+    /// Bounds specified on an object type
+    Object(ty::PolyProjectionPredicate<'tcx>),
+
+    /// From a "impl" (or a "pseudo-impl" returned by select)
     Select(Selection<'tcx>),
 }
 
@@ -156,6 +157,7 @@
 ///    the given obligations. If the projection cannot be normalized because
 ///    the required trait bound doesn't hold this returned with `obligations`
 ///    being a predicate that cannot be proven.
+#[instrument(level = "debug", skip(selcx))]
 pub(super) fn poly_project_and_unify_type<'cx, 'tcx>(
     selcx: &mut SelectionContext<'cx, 'tcx>,
     obligation: &PolyProjectionObligation<'tcx>,
@@ -163,11 +165,9 @@
     Result<Option<Vec<PredicateObligation<'tcx>>>, InProgress>,
     MismatchedProjectionTypes<'tcx>,
 > {
-    debug!("poly_project_and_unify_type(obligation={:?})", obligation);
-
     let infcx = selcx.infcx();
     infcx.commit_if_ok(|_snapshot| {
-        let (placeholder_predicate, _) =
+        let placeholder_predicate =
             infcx.replace_bound_vars_with_placeholders(&obligation.predicate);
 
         let placeholder_obligation = obligation.with(placeholder_predicate);
@@ -190,7 +190,7 @@
     Result<Option<Vec<PredicateObligation<'tcx>>>, InProgress>,
     MismatchedProjectionTypes<'tcx>,
 > {
-    debug!("project_and_unify_type(obligation={:?})", obligation);
+    debug!(?obligation, "project_and_unify_type");
 
     let mut obligations = vec![];
     let normalized_ty = match opt_normalize_projection_type(
@@ -206,10 +206,7 @@
         Err(InProgress) => return Ok(Err(InProgress)),
     };
 
-    debug!(
-        "project_and_unify_type: normalized_ty={:?} obligations={:?}",
-        normalized_ty, obligations
-    );
+    debug!(?normalized_ty, ?obligations, "project_and_unify_type result");
 
     let infcx = selcx.infcx();
     match infcx
@@ -274,6 +271,7 @@
     Normalized { value, obligations }
 }
 
+#[instrument(level = "debug", skip(selcx, param_env, cause, obligations))]
 pub fn normalize_with_depth_to<'a, 'b, 'tcx, T>(
     selcx: &'a mut SelectionContext<'b, 'tcx>,
     param_env: ty::ParamEnv<'tcx>,
@@ -285,16 +283,10 @@
 where
     T: TypeFoldable<'tcx>,
 {
-    debug!("normalize_with_depth(depth={}, value={:?})", depth, value);
     let mut normalizer = AssocTypeNormalizer::new(selcx, param_env, cause, depth, obligations);
     let result = ensure_sufficient_stack(|| normalizer.fold(value));
-    debug!(
-        "normalize_with_depth: depth={} result={:?} with {} obligations",
-        depth,
-        result,
-        normalizer.obligations.len()
-    );
-    debug!("normalize_with_depth: depth={} obligations={:?}", depth, normalizer.obligations);
+    debug!(?result, obligations.len = normalizer.obligations.len());
+    debug!(?normalizer.obligations,);
     result
 }
 
@@ -395,12 +387,11 @@
                     &mut self.obligations,
                 );
                 debug!(
-                    "AssocTypeNormalizer: depth={} normalized {:?} to {:?}, \
-                     now with {} obligations",
-                    self.depth,
-                    ty,
-                    normalized_ty,
-                    self.obligations.len()
+                    ?self.depth,
+                    ?ty,
+                    ?normalized_ty,
+                    obligations.len = ?self.obligations.len(),
+                    "AssocTypeNormalizer: normalized type"
                 );
                 normalized_ty
             }
@@ -472,6 +463,7 @@
 /// often immediately appended to another obligations vector. So now this
 /// function takes an obligations vector and appends to it directly, which is
 /// slightly uglier but avoids the need for an extra short-lived allocation.
+#[instrument(level = "debug", skip(selcx, param_env, cause, obligations))]
 fn opt_normalize_projection_type<'a, 'b, 'tcx>(
     selcx: &'a mut SelectionContext<'b, 'tcx>,
     param_env: ty::ParamEnv<'tcx>,
@@ -485,13 +477,6 @@
     let projection_ty = infcx.resolve_vars_if_possible(&projection_ty);
     let cache_key = ProjectionCacheKey::new(projection_ty);
 
-    debug!(
-        "opt_normalize_projection_type(\
-         projection_ty={:?}, \
-         depth={})",
-        projection_ty, depth
-    );
-
     // FIXME(#20304) For now, I am caching here, which is good, but it
     // means we don't capture the type variables that are created in
     // the case of ambiguity. Which means we may create a large stream
@@ -507,19 +492,10 @@
             // If we found ambiguity the last time, that means we will continue
             // to do so until some type in the key changes (and we know it
             // hasn't, because we just fully resolved it).
-            debug!(
-                "opt_normalize_projection_type: \
-                 found cache entry: ambiguous"
-            );
+            debug!("found cache entry: ambiguous");
             return Ok(None);
         }
         Err(ProjectionCacheEntry::InProgress) => {
-            // If while normalized A::B, we are asked to normalize
-            // A::B, just return A::B itself. This is a conservative
-            // answer, in the sense that A::B *is* clearly equivalent
-            // to A::B, though there may be a better value we can
-            // find.
-
             // Under lazy normalization, this can arise when
             // bootstrapping.  That is, imagine an environment with a
             // where-clause like `A::B == u32`. Now, if we are asked
@@ -528,11 +504,16 @@
             // with `A::B`, which can trigger a recursive
             // normalization.
 
-            debug!(
-                "opt_normalize_projection_type: \
-                 found cache entry: in-progress"
-            );
+            debug!("found cache entry: in-progress");
 
+            // Cache that normalizing this projection resulted in a cycle. This
+            // should ensure that, unless this happens within a snapshot that's
+            // rolled back, fulfillment or evaluation will notice the cycle.
+
+            infcx.inner.borrow_mut().projection_cache().recur(cache_key);
+            return Err(InProgress);
+        }
+        Err(ProjectionCacheEntry::Recur) => {
             return Err(InProgress);
         }
         Err(ProjectionCacheEntry::NormalizedTy(ty)) => {
@@ -547,11 +528,7 @@
             // discarded as duplicated). But when doing trait
             // evaluation this is not the case, and dropping the trait
             // evaluations can causes ICEs (e.g., #43132).
-            debug!(
-                "opt_normalize_projection_type: \
-                 found normalized ty `{:?}`",
-                ty
-            );
+            debug!(?ty, "found normalized ty");
 
             // Once we have inferred everything we need to know, we
             // can ignore the `obligations` from that point on.
@@ -561,21 +538,10 @@
             } else {
                 obligations.extend(ty.obligations);
             }
-
-            obligations.push(get_paranoid_cache_value_obligation(
-                infcx,
-                param_env,
-                projection_ty,
-                cause,
-                depth,
-            ));
             return Ok(Some(ty.value));
         }
         Err(ProjectionCacheEntry::Error) => {
-            debug!(
-                "opt_normalize_projection_type: \
-                 found error"
-            );
+            debug!("opt_normalize_projection_type: found error");
             let result = normalize_to_error(selcx, param_env, projection_ty, cause, depth);
             obligations.extend(result.obligations);
             return Ok(Some(result.value));
@@ -593,13 +559,7 @@
             // an impl, where-clause etc) and hence we must
             // re-normalize it
 
-            debug!(
-                "opt_normalize_projection_type: \
-                 projected_ty={:?} \
-                 depth={} \
-                 projected_obligations={:?}",
-                projected_ty, depth, projected_obligations
-            );
+            debug!(?projected_ty, ?depth, ?projected_obligations);
 
             let result = if projected_ty.has_projections() {
                 let mut normalizer = AssocTypeNormalizer::new(
@@ -611,11 +571,7 @@
                 );
                 let normalized_ty = normalizer.fold(&projected_ty);
 
-                debug!(
-                    "opt_normalize_projection_type: \
-                     normalized_ty={:?} depth={}",
-                    normalized_ty, depth
-                );
+                debug!(?normalized_ty, ?depth);
 
                 Normalized { value: normalized_ty, obligations: projected_obligations }
             } else {
@@ -628,21 +584,14 @@
             Ok(Some(result.value))
         }
         Ok(ProjectedTy::NoProgress(projected_ty)) => {
-            debug!(
-                "opt_normalize_projection_type: \
-                 projected_ty={:?} no progress",
-                projected_ty
-            );
+            debug!(?projected_ty, "opt_normalize_projection_type: no progress");
             let result = Normalized { value: projected_ty, obligations: vec![] };
             infcx.inner.borrow_mut().projection_cache().insert_ty(cache_key, result.clone());
             // No need to extend `obligations`.
             Ok(Some(result.value))
         }
         Err(ProjectionTyError::TooManyCandidates) => {
-            debug!(
-                "opt_normalize_projection_type: \
-                 too many candidates"
-            );
+            debug!("opt_normalize_projection_type: too many candidates");
             infcx.inner.borrow_mut().projection_cache().ambiguous(cache_key);
             Ok(None)
         }
@@ -676,7 +625,8 @@
         .obligations
         .iter()
         .filter(|obligation| {
-            match obligation.predicate.skip_binders() {
+            let bound_predicate = obligation.predicate.bound_atom();
+            match bound_predicate.skip_binder() {
                 // We found a `T: Foo<X = U>` predicate, let's check
                 // if `U` references any unresolved type
                 // variables. In principle, we only care if this
@@ -687,7 +637,7 @@
                 // but we have `T: Foo<X = ?1>` and `?1: Bar<X =
                 // ?0>`).
                 ty::PredicateAtom::Projection(data) => {
-                    infcx.unresolved_type_vars(&ty::Binder::bind(data.ty)).is_some()
+                    infcx.unresolved_type_vars(&bound_predicate.rebind(data.ty)).is_some()
                 }
 
                 // We are only interested in `T: Foo<X = U>` predicates, whre
@@ -703,45 +653,6 @@
     NormalizedTy { value: result.value, obligations }
 }
 
-/// Whenever we give back a cache result for a projection like `<T as
-/// Trait>::Item ==> X`, we *always* include the obligation to prove
-/// that `T: Trait` (we may also include some other obligations). This
-/// may or may not be necessary -- in principle, all the obligations
-/// that must be proven to show that `T: Trait` were also returned
-/// when the cache was first populated. But there are some vague concerns,
-/// and so we take the precautionary measure of including `T: Trait` in
-/// the result:
-///
-/// Concern #1. The current setup is fragile. Perhaps someone could
-/// have failed to prove the concerns from when the cache was
-/// populated, but also not have used a snapshot, in which case the
-/// cache could remain populated even though `T: Trait` has not been
-/// shown. In this case, the "other code" is at fault -- when you
-/// project something, you are supposed to either have a snapshot or
-/// else prove all the resulting obligations -- but it's still easy to
-/// get wrong.
-///
-/// Concern #2. Even within the snapshot, if those original
-/// obligations are not yet proven, then we are able to do projections
-/// that may yet turn out to be wrong. This *may* lead to some sort
-/// of trouble, though we don't have a concrete example of how that
-/// can occur yet. But it seems risky at best.
-fn get_paranoid_cache_value_obligation<'a, 'tcx>(
-    infcx: &'a InferCtxt<'a, 'tcx>,
-    param_env: ty::ParamEnv<'tcx>,
-    projection_ty: ty::ProjectionTy<'tcx>,
-    cause: ObligationCause<'tcx>,
-    depth: usize,
-) -> PredicateObligation<'tcx> {
-    let trait_ref = projection_ty.trait_ref(infcx.tcx).to_poly_trait_ref();
-    Obligation {
-        cause,
-        recursion_depth: depth,
-        param_env,
-        predicate: trait_ref.without_const().to_predicate(infcx.tcx),
-    }
-}
-
 /// If we are projecting `<T as Trait>::Item`, but `T: Trait` does not
 /// hold. In various error cases, we cannot generate a valid
 /// normalized projection. Therefore, we create an inference variable
@@ -801,15 +712,12 @@
 
     fn with_addl_obligations(mut self, mut obligations: Vec<PredicateObligation<'tcx>>) -> Self {
         debug!(
-            "with_addl_obligations: self.obligations.len={} obligations.len={}",
-            self.obligations.len(),
-            obligations.len()
+            self.obligations.len = ?self.obligations.len(),
+            obligations.len = obligations.len(),
+            "with_addl_obligations"
         );
 
-        debug!(
-            "with_addl_obligations: self.obligations={:?} obligations={:?}",
-            self.obligations, obligations
-        );
+        debug!(?self.obligations, ?obligations, "with_addl_obligations");
 
         self.obligations.append(&mut obligations);
         self
@@ -824,16 +732,23 @@
     selcx: &mut SelectionContext<'cx, 'tcx>,
     obligation: &ProjectionTyObligation<'tcx>,
 ) -> Result<ProjectedTy<'tcx>, ProjectionTyError<'tcx>> {
-    debug!("project(obligation={:?})", obligation);
+    debug!(?obligation, "project_type");
 
     if !selcx.tcx().sess.recursion_limit().value_within_limit(obligation.recursion_depth) {
         debug!("project: overflow!");
-        return Err(ProjectionTyError::TraitSelectionError(SelectionError::Overflow));
+        match selcx.query_mode() {
+            super::TraitQueryMode::Standard => {
+                selcx.infcx().report_overflow_error(&obligation, true);
+            }
+            super::TraitQueryMode::Canonical => {
+                return Err(ProjectionTyError::TraitSelectionError(SelectionError::Overflow));
+            }
+        }
     }
 
     let obligation_trait_ref = &obligation.predicate.trait_ref(selcx.tcx());
 
-    debug!("project: obligation_trait_ref={:?}", obligation_trait_ref);
+    debug!(?obligation_trait_ref);
 
     if obligation_trait_ref.references_error() {
         return Ok(ProjectedTy::Progress(Progress::error(selcx.tcx())));
@@ -848,12 +763,21 @@
 
     assemble_candidates_from_trait_def(selcx, obligation, &obligation_trait_ref, &mut candidates);
 
-    assemble_candidates_from_impls(selcx, obligation, &obligation_trait_ref, &mut candidates);
+    assemble_candidates_from_object_ty(selcx, obligation, &obligation_trait_ref, &mut candidates);
+
+    if let ProjectionTyCandidateSet::Single(ProjectionTyCandidate::Object(_)) = candidates {
+        // Avoid normalization cycle from selection (see
+        // `assemble_candidates_from_object_ty`).
+        // FIXME(lazy_normalization): Lazy normalization should save us from
+        // having to do special case this.
+    } else {
+        assemble_candidates_from_impls(selcx, obligation, &obligation_trait_ref, &mut candidates);
+    };
 
     match candidates {
-        ProjectionTyCandidateSet::Single(candidate) => Ok(ProjectedTy::Progress(
-            confirm_candidate(selcx, obligation, &obligation_trait_ref, candidate),
-        )),
+        ProjectionTyCandidateSet::Single(candidate) => {
+            Ok(ProjectedTy::Progress(confirm_candidate(selcx, obligation, candidate)))
+        }
         ProjectionTyCandidateSet::None => Ok(ProjectedTy::NoProgress(
             selcx
                 .tcx()
@@ -884,6 +808,7 @@
         candidate_set,
         ProjectionTyCandidate::ParamEnv,
         obligation.param_env.caller_bounds().iter(),
+        false,
     );
 }
 
@@ -909,10 +834,8 @@
     // Check whether the self-type is itself a projection.
     // If so, extract what we know from the trait and try to come up with a good answer.
     let bounds = match *obligation_trait_ref.self_ty().kind() {
-        ty::Projection(ref data) => {
-            tcx.projection_predicates(data.item_def_id).subst(tcx, data.substs)
-        }
-        ty::Opaque(def_id, substs) => tcx.projection_predicates(def_id).subst(tcx, substs),
+        ty::Projection(ref data) => tcx.item_bounds(data.item_def_id).subst(tcx, data.substs),
+        ty::Opaque(def_id, substs) => tcx.item_bounds(def_id).subst(tcx, substs),
         ty::Infer(ty::TyVar(_)) => {
             // If the self-type is an inference variable, then it MAY wind up
             // being a projected type, so induce an ambiguity.
@@ -929,9 +852,57 @@
         candidate_set,
         ProjectionTyCandidate::TraitDef,
         bounds.iter(),
+        true,
     )
 }
 
+/// In the case of a trait object like
+/// `<dyn Iterator<Item = ()> as Iterator>::Item` we can use the existential
+/// predicate in the trait object.
+///
+/// We don't go through the select candidate for these bounds to avoid cycles:
+/// In the above case, `dyn Iterator<Item = ()>: Iterator` would create a
+/// nested obligation of `<dyn Iterator<Item = ()> as Iterator>::Item: Sized`,
+/// this then has to be normalized without having to prove
+/// `dyn Iterator<Item = ()>: Iterator` again.
+fn assemble_candidates_from_object_ty<'cx, 'tcx>(
+    selcx: &mut SelectionContext<'cx, 'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>,
+    obligation_trait_ref: &ty::TraitRef<'tcx>,
+    candidate_set: &mut ProjectionTyCandidateSet<'tcx>,
+) {
+    debug!("assemble_candidates_from_object_ty(..)");
+
+    let tcx = selcx.tcx();
+
+    let self_ty = obligation_trait_ref.self_ty();
+    let object_ty = selcx.infcx().shallow_resolve(self_ty);
+    let data = match object_ty.kind() {
+        ty::Dynamic(data, ..) => data,
+        ty::Infer(ty::TyVar(_)) => {
+            // If the self-type is an inference variable, then it MAY wind up
+            // being an object type, so induce an ambiguity.
+            candidate_set.mark_ambiguous();
+            return;
+        }
+        _ => return,
+    };
+    let env_predicates = data
+        .projection_bounds()
+        .filter(|bound| bound.item_def_id() == obligation.predicate.item_def_id)
+        .map(|p| p.with_self_ty(tcx, object_ty).to_predicate(tcx));
+
+    assemble_candidates_from_predicates(
+        selcx,
+        obligation,
+        obligation_trait_ref,
+        candidate_set,
+        ProjectionTyCandidate::Object,
+        env_predicates,
+        false,
+    );
+}
+
 fn assemble_candidates_from_predicates<'cx, 'tcx>(
     selcx: &mut SelectionContext<'cx, 'tcx>,
     obligation: &ProjectionTyObligation<'tcx>,
@@ -939,37 +910,41 @@
     candidate_set: &mut ProjectionTyCandidateSet<'tcx>,
     ctor: fn(ty::PolyProjectionPredicate<'tcx>) -> ProjectionTyCandidate<'tcx>,
     env_predicates: impl Iterator<Item = ty::Predicate<'tcx>>,
+    potentially_unnormalized_candidates: bool,
 ) {
-    debug!("assemble_candidates_from_predicates(obligation={:?})", obligation);
+    debug!(?obligation, "assemble_candidates_from_predicates");
+
     let infcx = selcx.infcx();
     for predicate in env_predicates {
-        debug!("assemble_candidates_from_predicates: predicate={:?}", predicate);
+        debug!(?predicate);
+        let bound_predicate = predicate.bound_atom();
         if let ty::PredicateAtom::Projection(data) = predicate.skip_binders() {
-            let data = ty::Binder::bind(data);
+            let data = bound_predicate.rebind(data);
             let same_def_id = data.projection_def_id() == obligation.predicate.item_def_id;
 
             let is_match = same_def_id
                 && infcx.probe(|_| {
-                    let data_poly_trait_ref = data.to_poly_trait_ref(infcx.tcx);
-                    let obligation_poly_trait_ref = obligation_trait_ref.to_poly_trait_ref();
-                    infcx
-                        .at(&obligation.cause, obligation.param_env)
-                        .sup(obligation_poly_trait_ref, data_poly_trait_ref)
-                        .map(|InferOk { obligations: _, value: () }| {
-                            // FIXME(#32730) -- do we need to take obligations
-                            // into account in any way? At the moment, no.
-                        })
-                        .is_ok()
+                    selcx.match_projection_projections(
+                        obligation,
+                        obligation_trait_ref,
+                        &data,
+                        potentially_unnormalized_candidates,
+                    )
                 });
 
-            debug!(
-                "assemble_candidates_from_predicates: candidate={:?} \
-                 is_match={} same_def_id={}",
-                data, is_match, same_def_id
-            );
+            debug!(?data, ?is_match, ?same_def_id);
 
             if is_match {
                 candidate_set.push_candidate(ctor(data));
+
+                if potentially_unnormalized_candidates
+                    && !obligation.predicate.has_infer_types_or_consts()
+                {
+                    // HACK: Pick the first trait def candidate for a fully
+                    // inferred predicate. This is to allow duplicates that
+                    // differ only in normalization.
+                    return;
+                }
             }
         }
     }
@@ -981,6 +956,8 @@
     obligation_trait_ref: &ty::TraitRef<'tcx>,
     candidate_set: &mut ProjectionTyCandidateSet<'tcx>,
 ) {
+    debug!("assemble_candidates_from_impls");
+
     // If we are resolving `<T as TraitRef<...>>::Item == Type`,
     // start out by selecting the predicate `T as TraitRef<...>`:
     let poly_trait_ref = obligation_trait_ref.to_poly_trait_ref();
@@ -993,7 +970,7 @@
                 return Err(());
             }
             Err(e) => {
-                debug!("assemble_candidates_from_impls: selection error {:?}", e);
+                debug!(error = ?e, "selection error");
                 candidate_set.mark_error(e);
                 return Err(());
             }
@@ -1003,9 +980,8 @@
             super::ImplSource::Closure(_)
             | super::ImplSource::Generator(_)
             | super::ImplSource::FnPointer(_)
-            | super::ImplSource::Object(_)
             | super::ImplSource::TraitAlias(_) => {
-                debug!("assemble_candidates_from_impls: impl_source={:?}", impl_source);
+                debug!(?impl_source);
                 true
             }
             super::ImplSource::UserDefined(impl_data) => {
@@ -1051,10 +1027,9 @@
                         !poly_trait_ref.still_further_specializable()
                     } else {
                         debug!(
-                            "assemble_candidates_from_impls: not eligible due to default: \
-                             assoc_ty={} predicate={}",
-                            selcx.tcx().def_path_str(node_item.item.def_id),
-                            obligation.predicate,
+                            assoc_ty = ?selcx.tcx().def_path_str(node_item.item.def_id),
+                            ?obligation.predicate,
+                            "assemble_candidates_from_impls: not eligible due to default",
                         );
                         false
                     }
@@ -1128,6 +1103,12 @@
                 // in `assemble_candidates_from_param_env`.
                 false
             }
+            super::ImplSource::Object(_) => {
+                // Handled by the `Object` projection candidate. See
+                // `assemble_candidates_from_object_ty` for an explanation of
+                // why we special case object types.
+                false
+            }
             super::ImplSource::AutoImpl(..) | super::ImplSource::Builtin(..) => {
                 // These traits have no associated types.
                 selcx.tcx().sess.delay_span_bug(
@@ -1153,19 +1134,21 @@
 fn confirm_candidate<'cx, 'tcx>(
     selcx: &mut SelectionContext<'cx, 'tcx>,
     obligation: &ProjectionTyObligation<'tcx>,
-    obligation_trait_ref: &ty::TraitRef<'tcx>,
     candidate: ProjectionTyCandidate<'tcx>,
 ) -> Progress<'tcx> {
-    debug!("confirm_candidate(candidate={:?}, obligation={:?})", candidate, obligation);
-
+    debug!(?obligation, ?candidate, "confirm_candidate");
     let mut progress = match candidate {
         ProjectionTyCandidate::ParamEnv(poly_projection)
-        | ProjectionTyCandidate::TraitDef(poly_projection) => {
-            confirm_param_env_candidate(selcx, obligation, poly_projection)
+        | ProjectionTyCandidate::Object(poly_projection) => {
+            confirm_param_env_candidate(selcx, obligation, poly_projection, false)
+        }
+
+        ProjectionTyCandidate::TraitDef(poly_projection) => {
+            confirm_param_env_candidate(selcx, obligation, poly_projection, true)
         }
 
         ProjectionTyCandidate::Select(impl_source) => {
-            confirm_select_candidate(selcx, obligation, obligation_trait_ref, impl_source)
+            confirm_select_candidate(selcx, obligation, impl_source)
         }
     };
     // When checking for cycle during evaluation, we compare predicates with
@@ -1182,7 +1165,6 @@
 fn confirm_select_candidate<'cx, 'tcx>(
     selcx: &mut SelectionContext<'cx, 'tcx>,
     obligation: &ProjectionTyObligation<'tcx>,
-    obligation_trait_ref: &ty::TraitRef<'tcx>,
     impl_source: Selection<'tcx>,
 ) -> Progress<'tcx> {
     match impl_source {
@@ -1193,15 +1175,12 @@
         super::ImplSource::DiscriminantKind(data) => {
             confirm_discriminant_kind_candidate(selcx, obligation, data)
         }
-        super::ImplSource::Object(_) => {
-            confirm_object_candidate(selcx, obligation, obligation_trait_ref)
-        }
-        super::ImplSource::AutoImpl(..)
+        super::ImplSource::Object(_)
+        | super::ImplSource::AutoImpl(..)
         | super::ImplSource::Param(..)
         | super::ImplSource::Builtin(..)
-        | super::ImplSource::TraitAlias(..) =>
-        // we don't create Select candidates with this kind of resolution
-        {
+        | super::ImplSource::TraitAlias(..) => {
+            // we don't create Select candidates with this kind of resolution
             span_bug!(
                 obligation.cause.span,
                 "Cannot project an associated type from `{:?}`",
@@ -1211,72 +1190,6 @@
     }
 }
 
-fn confirm_object_candidate<'cx, 'tcx>(
-    selcx: &mut SelectionContext<'cx, 'tcx>,
-    obligation: &ProjectionTyObligation<'tcx>,
-    obligation_trait_ref: &ty::TraitRef<'tcx>,
-) -> Progress<'tcx> {
-    let self_ty = obligation_trait_ref.self_ty();
-    let object_ty = selcx.infcx().shallow_resolve(self_ty);
-    debug!("confirm_object_candidate(object_ty={:?})", object_ty);
-    let data = match object_ty.kind() {
-        ty::Dynamic(data, ..) => data,
-        _ => span_bug!(
-            obligation.cause.span,
-            "confirm_object_candidate called with non-object: {:?}",
-            object_ty
-        ),
-    };
-    let env_predicates = data
-        .projection_bounds()
-        .map(|p| p.with_self_ty(selcx.tcx(), object_ty).to_predicate(selcx.tcx()));
-    let env_predicate = {
-        let env_predicates = elaborate_predicates(selcx.tcx(), env_predicates);
-
-        // select only those projections that are actually projecting an
-        // item with the correct name
-
-        let env_predicates = env_predicates.filter_map(|o| match o.predicate.skip_binders() {
-            ty::PredicateAtom::Projection(data)
-                if data.projection_ty.item_def_id == obligation.predicate.item_def_id =>
-            {
-                Some(ty::Binder::bind(data))
-            }
-            _ => None,
-        });
-
-        // select those with a relevant trait-ref
-        let mut env_predicates = env_predicates.filter(|data| {
-            let data_poly_trait_ref = data.to_poly_trait_ref(selcx.tcx());
-            let obligation_poly_trait_ref = obligation_trait_ref.to_poly_trait_ref();
-            selcx.infcx().probe(|_| {
-                selcx
-                    .infcx()
-                    .at(&obligation.cause, obligation.param_env)
-                    .sup(obligation_poly_trait_ref, data_poly_trait_ref)
-                    .is_ok()
-            })
-        });
-
-        // select the first matching one; there really ought to be one or
-        // else the object type is not WF, since an object type should
-        // include all of its projections explicitly
-        match env_predicates.next() {
-            Some(env_predicate) => env_predicate,
-            None => {
-                debug!(
-                    "confirm_object_candidate: no env-predicate \
-                     found in object type `{:?}`; ill-formed",
-                    object_ty
-                );
-                return Progress::error(selcx.tcx());
-            }
-        }
-    };
-
-    confirm_param_env_candidate(selcx, obligation, env_predicate)
-}
-
 fn confirm_generator_candidate<'cx, 'tcx>(
     selcx: &mut SelectionContext<'cx, 'tcx>,
     obligation: &ProjectionTyObligation<'tcx>,
@@ -1291,10 +1204,7 @@
         &gen_sig,
     );
 
-    debug!(
-        "confirm_generator_candidate: obligation={:?},gen_sig={:?},obligations={:?}",
-        obligation, gen_sig, obligations
-    );
+    debug!(?obligation, ?gen_sig, ?obligations, "confirm_generator_candidate");
 
     let tcx = selcx.tcx();
 
@@ -1325,7 +1235,7 @@
         }
     });
 
-    confirm_param_env_candidate(selcx, obligation, predicate)
+    confirm_param_env_candidate(selcx, obligation, predicate, false)
         .with_addl_obligations(impl_source.nested)
         .with_addl_obligations(obligations)
 }
@@ -1347,7 +1257,7 @@
         ty: self_ty.discriminant_ty(tcx),
     };
 
-    confirm_param_env_candidate(selcx, obligation, ty::Binder::bind(predicate))
+    confirm_param_env_candidate(selcx, obligation, ty::Binder::bind(predicate), false)
 }
 
 fn confirm_fn_pointer_candidate<'cx, 'tcx>(
@@ -1384,10 +1294,7 @@
         &closure_sig,
     );
 
-    debug!(
-        "confirm_closure_candidate: obligation={:?},closure_sig={:?},obligations={:?}",
-        obligation, closure_sig, obligations
-    );
+    debug!(?obligation, ?closure_sig, ?obligations, "confirm_closure_candidate");
 
     confirm_callable_candidate(selcx, obligation, closure_sig, util::TupleArgumentsFlag::No)
         .with_addl_obligations(impl_source.nested)
@@ -1402,7 +1309,7 @@
 ) -> Progress<'tcx> {
     let tcx = selcx.tcx();
 
-    debug!("confirm_callable_candidate({:?},{:?})", obligation, fn_sig);
+    debug!(?obligation, ?fn_sig, "confirm_callable_candidate");
 
     let fn_once_def_id = tcx.require_lang_item(LangItem::FnOnce, None);
     let fn_once_output_def_id = tcx.require_lang_item(LangItem::FnOnceOutput, None);
@@ -1422,13 +1329,14 @@
         ty: ret_type,
     });
 
-    confirm_param_env_candidate(selcx, obligation, predicate)
+    confirm_param_env_candidate(selcx, obligation, predicate, false)
 }
 
 fn confirm_param_env_candidate<'cx, 'tcx>(
     selcx: &mut SelectionContext<'cx, 'tcx>,
     obligation: &ProjectionTyObligation<'tcx>,
     poly_cache_entry: ty::PolyProjectionPredicate<'tcx>,
+    potentially_unnormalized_candidate: bool,
 ) -> Progress<'tcx> {
     let infcx = selcx.infcx();
     let cause = &obligation.cause;
@@ -1442,8 +1350,28 @@
 
     let cache_trait_ref = cache_entry.projection_ty.trait_ref(infcx.tcx);
     let obligation_trait_ref = obligation.predicate.trait_ref(infcx.tcx);
+    let mut nested_obligations = Vec::new();
+    let cache_trait_ref = if potentially_unnormalized_candidate {
+        ensure_sufficient_stack(|| {
+            normalize_with_depth_to(
+                selcx,
+                obligation.param_env,
+                obligation.cause.clone(),
+                obligation.recursion_depth + 1,
+                &cache_trait_ref,
+                &mut nested_obligations,
+            )
+        })
+    } else {
+        cache_trait_ref
+    };
+
     match infcx.at(cause, param_env).eq(cache_trait_ref, obligation_trait_ref) {
-        Ok(InferOk { value: _, obligations }) => Progress { ty: cache_entry.ty, obligations },
+        Ok(InferOk { value: _, obligations }) => {
+            nested_obligations.extend(obligations);
+            assoc_ty_own_obligations(selcx, obligation, &mut nested_obligations);
+            Progress { ty: cache_entry.ty, obligations: nested_obligations }
+        }
         Err(e) => {
             let msg = format!(
                 "Failed to unify obligation `{:?}` with poly_projection `{:?}`: {:?}",
@@ -1463,7 +1391,7 @@
 ) -> Progress<'tcx> {
     let tcx = selcx.tcx();
 
-    let ImplSourceUserDefinedData { impl_def_id, substs, nested } = impl_impl_source;
+    let ImplSourceUserDefinedData { impl_def_id, substs, mut nested } = impl_impl_source;
     let assoc_item_id = obligation.predicate.item_def_id;
     let trait_def_id = tcx.trait_id_of_impl(impl_def_id).unwrap();
 
@@ -1496,15 +1424,48 @@
     let ty = tcx.type_of(assoc_ty.item.def_id);
     if substs.len() != tcx.generics_of(assoc_ty.item.def_id).count() {
         let err = tcx.ty_error_with_message(
-            DUMMY_SP,
+            obligation.cause.span,
             "impl item and trait item have different parameter counts",
         );
         Progress { ty: err, obligations: nested }
     } else {
+        assoc_ty_own_obligations(selcx, obligation, &mut nested);
         Progress { ty: ty.subst(tcx, substs), obligations: nested }
     }
 }
 
+// Get obligations corresponding to the predicates from the where-clause of the
+// associated type itself.
+// Note: `feature(generic_associated_types)` is required to write such
+// predicates, even for non-generic associcated types.
+fn assoc_ty_own_obligations<'cx, 'tcx>(
+    selcx: &mut SelectionContext<'cx, 'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>,
+    nested: &mut Vec<PredicateObligation<'tcx>>,
+) {
+    let tcx = selcx.tcx();
+    for predicate in tcx
+        .predicates_of(obligation.predicate.item_def_id)
+        .instantiate_own(tcx, obligation.predicate.substs)
+        .predicates
+    {
+        let normalized = normalize_with_depth_to(
+            selcx,
+            obligation.param_env,
+            obligation.cause.clone(),
+            obligation.recursion_depth + 1,
+            &predicate,
+            nested,
+        );
+        nested.push(Obligation::with_depth(
+            obligation.cause.clone(),
+            obligation.recursion_depth + 1,
+            obligation.param_env,
+            normalized,
+        ));
+    }
+}
+
 /// Locate the definition of an associated type in the specialization hierarchy,
 /// starting from the given impl.
 ///
diff --git a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs
index 424b3bd..8212823a 100644
--- a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs
@@ -110,7 +110,7 @@
         // check if *any* of those are trivial.
         ty::Tuple(ref tys) => tys.iter().all(|t| trivial_dropck_outlives(tcx, t.expect_ty())),
         ty::Closure(_, ref substs) => {
-            substs.as_closure().upvar_tys().all(|t| trivial_dropck_outlives(tcx, t))
+            trivial_dropck_outlives(tcx, substs.as_closure().tupled_upvars_ty())
         }
 
         ty::Adt(def, _) => {
diff --git a/compiler/rustc_trait_selection/src/traits/query/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
index c0ae7bf..42a598c 100644
--- a/compiler/rustc_trait_selection/src/traits/query/normalize.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
@@ -7,7 +7,7 @@
 use crate::infer::{InferCtxt, InferOk};
 use crate::traits::error_reporting::InferCtxtExt;
 use crate::traits::{Obligation, ObligationCause, PredicateObligation, Reveal};
-use rustc_data_structures::mini_map::MiniMap;
+use rustc_data_structures::sso::SsoHashMap;
 use rustc_data_structures::stack::ensure_sufficient_stack;
 use rustc_infer::traits::Normalized;
 use rustc_middle::ty::fold::{TypeFoldable, TypeFolder};
@@ -44,7 +44,7 @@
     {
         debug!(
             "normalize::<{}>(value={:?}, param_env={:?})",
-            ::std::any::type_name::<T>(),
+            std::any::type_name::<T>(),
             value,
             self.param_env,
         );
@@ -58,20 +58,20 @@
             param_env: self.param_env,
             obligations: vec![],
             error: false,
-            cache: MiniMap::new(),
+            cache: SsoHashMap::new(),
             anon_depth: 0,
         };
 
         let result = value.fold_with(&mut normalizer);
         debug!(
             "normalize::<{}>: result={:?} with {} obligations",
-            ::std::any::type_name::<T>(),
+            std::any::type_name::<T>(),
             result,
             normalizer.obligations.len(),
         );
         debug!(
             "normalize::<{}>: obligations={:?}",
-            ::std::any::type_name::<T>(),
+            std::any::type_name::<T>(),
             normalizer.obligations,
         );
         if normalizer.error {
@@ -87,7 +87,7 @@
     cause: &'cx ObligationCause<'tcx>,
     param_env: ty::ParamEnv<'tcx>,
     obligations: Vec<PredicateObligation<'tcx>>,
-    cache: MiniMap<Ty<'tcx>, Ty<'tcx>>,
+    cache: SsoHashMap<Ty<'tcx>, Ty<'tcx>>,
     error: bool,
     anon_depth: usize,
 }
diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
index 9cb5c23..b0bfb4a 100644
--- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
@@ -22,6 +22,7 @@
 use super::{EvaluatedCandidate, SelectionCandidateSet, SelectionContext, TraitObligationStack};
 
 impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
+    #[instrument(level = "debug", skip(self))]
     pub(super) fn candidate_from_obligation<'o>(
         &mut self,
         stack: &TraitObligationStack<'o, 'tcx>,
@@ -35,16 +36,13 @@
         // this is because we want the unbound variables to be
         // replaced with fresh types starting from index 0.
         let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate);
-        debug!(
-            "candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})",
-            cache_fresh_trait_pred, stack
-        );
+        debug!(?cache_fresh_trait_pred);
         debug_assert!(!stack.obligation.predicate.has_escaping_bound_vars());
 
         if let Some(c) =
             self.check_candidate_cache(stack.obligation.param_env, cache_fresh_trait_pred)
         {
-            debug!("CACHE HIT: SELECT({:?})={:?}", cache_fresh_trait_pred, c);
+            debug!(candidate = ?c, "CACHE HIT");
             return c;
         }
 
@@ -57,7 +55,7 @@
         let (candidate, dep_node) =
             self.in_task(|this| this.candidate_from_obligation_no_cache(stack));
 
-        debug!("CACHE MISS: SELECT({:?})={:?}", cache_fresh_trait_pred, candidate);
+        debug!(?candidate, "CACHE MISS");
         self.insert_candidate_cache(
             stack.obligation.param_env,
             cache_fresh_trait_pred,
@@ -103,7 +101,7 @@
                         } else {
                             IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc }
                         };
-                        debug!("evaluate_stack: pushing cause = {:?}", cause);
+                        debug!(?cause, "evaluate_stack: pushing cause");
                         self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause);
                     }
                 }
@@ -120,7 +118,7 @@
 
         let mut candidates = candidate_set.vec;
 
-        debug!("assembled {} candidates for {:?}: {:?}", candidates.len(), stack, candidates);
+        debug!(?stack, ?candidates, "assembled {} candidates", candidates.len());
 
         // At this point, we know that each of the entries in the
         // candidate set is *individually* applicable. Now we have to
@@ -163,9 +161,9 @@
             .flat_map(Result::transpose)
             .collect::<Result<Vec<_>, _>>()?;
 
-        debug!("winnowed to {} candidates for {:?}: {:?}", candidates.len(), stack, candidates);
+        debug!(?stack, ?candidates, "winnowed to {} candidates", candidates.len());
 
-        let needs_infer = stack.obligation.predicate.needs_infer();
+        let needs_infer = stack.obligation.predicate.has_infer_types_or_consts();
 
         // If there are STILL multiple candidates, we can further
         // reduce the list by dropping duplicates -- including
@@ -181,10 +179,10 @@
                     )
                 });
                 if is_dup {
-                    debug!("Dropping candidate #{}/{}: {:?}", i, candidates.len(), candidates[i]);
+                    debug!(candidate = ?candidates[i], "Dropping candidate #{}/{}", i, candidates.len());
                     candidates.swap_remove(i);
                 } else {
-                    debug!("Retaining candidate #{}/{}: {:?}", i, candidates.len(), candidates[i]);
+                    debug!(candidate = ?candidates[i], "Retaining candidate #{}/{}", i, candidates.len());
                     i += 1;
 
                     // If there are *STILL* multiple candidates, give up
@@ -257,7 +255,7 @@
         let lang_items = self.tcx().lang_items();
 
         if lang_items.copy_trait() == Some(def_id) {
-            debug!("obligation self ty is {:?}", obligation.predicate.skip_binder().self_ty());
+            debug!(obligation_self_ty = ?obligation.predicate.skip_binder().self_ty());
 
             // User-defined copy impls are permitted, but only for
             // structs and enums.
@@ -308,7 +306,7 @@
         obligation: &TraitObligation<'tcx>,
         candidates: &mut SelectionCandidateSet<'tcx>,
     ) {
-        debug!("assemble_candidates_for_projected_tys({:?})", obligation);
+        debug!(?obligation, "assemble_candidates_from_projected_tys");
 
         // Before we go into the whole placeholder thing, just
         // quickly check if the self-type is a projection at all.
@@ -327,8 +325,8 @@
             .infcx
             .probe(|_| self.match_projection_obligation_against_definition_bounds(obligation));
 
-        if result {
-            candidates.vec.push(ProjectionCandidate);
+        for predicate_index in result {
+            candidates.vec.push(ProjectionCandidate(predicate_index));
         }
     }
 
@@ -341,7 +339,7 @@
         stack: &TraitObligationStack<'o, 'tcx>,
         candidates: &mut SelectionCandidateSet<'tcx>,
     ) -> Result<(), SelectionError<'tcx>> {
-        debug!("assemble_candidates_from_caller_bounds({:?})", stack.obligation);
+        debug!(?stack.obligation, "assemble_candidates_from_caller_bounds");
 
         let all_bounds = stack
             .obligation
@@ -383,10 +381,7 @@
         let self_ty = obligation.self_ty().skip_binder();
         match self_ty.kind() {
             ty::Generator(..) => {
-                debug!(
-                    "assemble_generator_candidates: self_ty={:?} obligation={:?}",
-                    self_ty, obligation
-                );
+                debug!(?self_ty, ?obligation, "assemble_generator_candidates",);
 
                 candidates.vec.push(GeneratorCandidate);
             }
@@ -423,10 +418,10 @@
         // type/region parameters
         match *obligation.self_ty().skip_binder().kind() {
             ty::Closure(_, closure_substs) => {
-                debug!("assemble_unboxed_candidates: kind={:?} obligation={:?}", kind, obligation);
+                debug!(?kind, ?obligation, "assemble_unboxed_candidates");
                 match self.infcx.closure_kind(closure_substs) {
                     Some(closure_kind) => {
-                        debug!("assemble_unboxed_candidates: closure_kind = {:?}", closure_kind);
+                        debug!(?closure_kind, "assemble_unboxed_candidates");
                         if closure_kind.extends(kind) {
                             candidates.vec.push(ClosureCandidate);
                         }
@@ -503,7 +498,7 @@
         obligation: &TraitObligation<'tcx>,
         candidates: &mut SelectionCandidateSet<'tcx>,
     ) -> Result<(), SelectionError<'tcx>> {
-        debug!("assemble_candidates_from_impls(obligation={:?})", obligation);
+        debug!(?obligation, "assemble_candidates_from_impls");
 
         // Essentially any user-written impl will match with an error type,
         // so creating `ImplCandidates` isn't useful. However, we might
@@ -537,7 +532,7 @@
     ) -> Result<(), SelectionError<'tcx>> {
         // Okay to skip binder here because the tests we do below do not involve bound regions.
         let self_ty = obligation.self_ty().skip_binder();
-        debug!("assemble_candidates_from_auto_impls(self_ty={:?})", self_ty);
+        debug!(?self_ty, "assemble_candidates_from_auto_impls");
 
         let def_id = obligation.predicate.def_id();
 
@@ -604,8 +599,8 @@
         candidates: &mut SelectionCandidateSet<'tcx>,
     ) {
         debug!(
-            "assemble_candidates_from_object_ty(self_ty={:?})",
-            obligation.self_ty().skip_binder()
+            self_ty = ?obligation.self_ty().skip_binder(),
+            "assemble_candidates_from_object_ty",
         );
 
         self.infcx.probe(|_snapshot| {
@@ -645,26 +640,32 @@
                 _ => return,
             };
 
-            debug!("assemble_candidates_from_object_ty: poly_trait_ref={:?}", poly_trait_ref);
+            debug!(?poly_trait_ref, "assemble_candidates_from_object_ty");
+
+            let poly_trait_predicate = self.infcx().resolve_vars_if_possible(&obligation.predicate);
+            let placeholder_trait_predicate =
+                self.infcx().replace_bound_vars_with_placeholders(&poly_trait_predicate);
 
             // Count only those upcast versions that match the trait-ref
             // we are looking for. Specifically, do not only check for the
             // correct trait, but also the correct type parameters.
             // For example, we may be trying to upcast `Foo` to `Bar<i32>`,
             // but `Foo` is declared as `trait Foo: Bar<u32>`.
-            let upcast_trait_refs = util::supertraits(self.tcx(), poly_trait_ref)
-                .filter(|upcast_trait_ref| {
-                    self.infcx
-                        .probe(|_| self.match_poly_trait_ref(obligation, *upcast_trait_ref).is_ok())
+            let candidate_supertraits = util::supertraits(self.tcx(), poly_trait_ref)
+                .enumerate()
+                .filter(|&(_, upcast_trait_ref)| {
+                    self.infcx.probe(|_| {
+                        self.match_normalize_trait_ref(
+                            obligation,
+                            upcast_trait_ref,
+                            placeholder_trait_predicate.trait_ref,
+                        )
+                        .is_ok()
+                    })
                 })
-                .count();
+                .map(|(idx, _)| ObjectCandidate(idx));
 
-            if upcast_trait_refs > 1 {
-                // Can be upcast in many ways; need more type information.
-                candidates.ambiguous = true;
-            } else if upcast_trait_refs == 1 {
-                candidates.vec.push(ObjectCandidate);
-            }
+            candidates.vec.extend(candidate_supertraits);
         })
     }
 
@@ -697,7 +698,7 @@
         };
         let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1);
 
-        debug!("assemble_candidates_for_unsizing(source={:?}, target={:?})", source, target);
+        debug!(?source, ?target, "assemble_candidates_for_unsizing");
 
         let may_apply = match (source.kind(), target.kind()) {
             // Trait+Kx+'a -> Trait+Ky+'b (upcasts).
@@ -758,7 +759,7 @@
     ) -> Result<(), SelectionError<'tcx>> {
         // Okay to skip binder here because the tests we do below do not involve bound regions.
         let self_ty = obligation.self_ty().skip_binder();
-        debug!("assemble_candidates_for_trait_alias(self_ty={:?})", self_ty);
+        debug!(?self_ty, "assemble_candidates_for_trait_alias");
 
         let def_id = obligation.predicate.def_id();
 
@@ -778,7 +779,7 @@
     ) -> Result<(), SelectionError<'tcx>> {
         match conditions {
             BuiltinImplConditions::Where(nested) => {
-                debug!("builtin_bound: nested={:?}", nested);
+                debug!(?nested, "builtin_bound");
                 candidates
                     .vec
                     .push(BuiltinCandidate { has_nested: !nested.skip_binder().is_empty() });
diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
index 88b656c..9cfb744 100644
--- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
@@ -10,12 +10,13 @@
 use rustc_hir::lang_items::LangItem;
 use rustc_index::bit_set::GrowableBitSet;
 use rustc_infer::infer::InferOk;
+use rustc_infer::infer::LateBoundRegionConversionTime::HigherRankedType;
 use rustc_middle::ty::subst::{GenericArg, GenericArgKind, Subst, SubstsRef};
 use rustc_middle::ty::{self, Ty};
 use rustc_middle::ty::{ToPolyTraitRef, ToPredicate, WithConstness};
 use rustc_span::def_id::DefId;
 
-use crate::traits::project::{self, normalize_with_depth};
+use crate::traits::project::{normalize_with_depth, normalize_with_depth_to};
 use crate::traits::select::TraitObligationExt;
 use crate::traits::util;
 use crate::traits::util::{closure_trait_ref_and_return_type, predicate_for_trait_def};
@@ -41,13 +42,12 @@
 use std::iter;
 
 impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
+    #[instrument(level = "debug", skip(self))]
     pub(super) fn confirm_candidate(
         &mut self,
         obligation: &TraitObligation<'tcx>,
         candidate: SelectionCandidate<'tcx>,
     ) -> Result<Selection<'tcx>, SelectionError<'tcx>> {
-        debug!("confirm_candidate({:?}, {:?})", obligation, candidate);
-
         match candidate {
             BuiltinCandidate { has_nested } => {
                 let data = self.confirm_builtin_candidate(obligation, has_nested);
@@ -68,9 +68,14 @@
                 Ok(ImplSource::AutoImpl(data))
             }
 
-            ProjectionCandidate => {
-                self.confirm_projection_candidate(obligation);
-                Ok(ImplSource::Param(Vec::new()))
+            ProjectionCandidate(idx) => {
+                let obligations = self.confirm_projection_candidate(obligation, idx)?;
+                Ok(ImplSource::Param(obligations))
+            }
+
+            ObjectCandidate(idx) => {
+                let data = self.confirm_object_candidate(obligation, idx)?;
+                Ok(ImplSource::Object(data))
             }
 
             ClosureCandidate => {
@@ -97,11 +102,6 @@
                 Ok(ImplSource::TraitAlias(data))
             }
 
-            ObjectCandidate => {
-                let data = self.confirm_object_candidate(obligation);
-                Ok(ImplSource::Object(data))
-            }
-
             BuiltinObjectCandidate => {
                 // This indicates something like `Trait + Send: Send`. In this case, we know that
                 // this holds because that's what the object type is telling us, and there's really
@@ -116,10 +116,66 @@
         }
     }
 
-    fn confirm_projection_candidate(&mut self, obligation: &TraitObligation<'tcx>) {
+    fn confirm_projection_candidate(
+        &mut self,
+        obligation: &TraitObligation<'tcx>,
+        idx: usize,
+    ) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
         self.infcx.commit_unconditionally(|_| {
-            let result = self.match_projection_obligation_against_definition_bounds(obligation);
-            assert!(result);
+            let tcx = self.tcx();
+
+            let trait_predicate = self.infcx.shallow_resolve(obligation.predicate);
+            let placeholder_trait_predicate =
+                self.infcx().replace_bound_vars_with_placeholders(&trait_predicate);
+            let placeholder_self_ty = placeholder_trait_predicate.self_ty();
+            let (def_id, substs) = match *placeholder_self_ty.kind() {
+                ty::Projection(proj) => (proj.item_def_id, proj.substs),
+                ty::Opaque(def_id, substs) => (def_id, substs),
+                _ => bug!("projection candidate for unexpected type: {:?}", placeholder_self_ty),
+            };
+
+            let candidate_predicate = tcx.item_bounds(def_id)[idx].subst(tcx, substs);
+            let candidate = candidate_predicate
+                .to_opt_poly_trait_ref()
+                .expect("projection candidate is not a trait predicate");
+            let mut obligations = Vec::new();
+            let candidate = normalize_with_depth_to(
+                self,
+                obligation.param_env,
+                obligation.cause.clone(),
+                obligation.recursion_depth + 1,
+                &candidate,
+                &mut obligations,
+            );
+
+            obligations.extend(self.infcx.commit_if_ok(|_| {
+                self.infcx
+                    .at(&obligation.cause, obligation.param_env)
+                    .sup(placeholder_trait_predicate.trait_ref.to_poly_trait_ref(), candidate)
+                    .map(|InferOk { obligations, .. }| obligations)
+                    .map_err(|_| Unimplemented)
+            })?);
+
+            if let ty::Projection(..) = placeholder_self_ty.kind() {
+                for predicate in tcx.predicates_of(def_id).instantiate_own(tcx, substs).predicates {
+                    let normalized = normalize_with_depth_to(
+                        self,
+                        obligation.param_env,
+                        obligation.cause.clone(),
+                        obligation.recursion_depth + 1,
+                        &predicate,
+                        &mut obligations,
+                    );
+                    obligations.push(Obligation::with_depth(
+                        obligation.cause.clone(),
+                        obligation.recursion_depth + 1,
+                        obligation.param_env,
+                        normalized,
+                    ));
+                }
+            }
+
+            Ok(obligations)
         })
     }
 
@@ -128,7 +184,7 @@
         obligation: &TraitObligation<'tcx>,
         param: ty::PolyTraitRef<'tcx>,
     ) -> Vec<PredicateObligation<'tcx>> {
-        debug!("confirm_param_candidate({:?},{:?})", obligation, param);
+        debug!(?obligation, ?param, "confirm_param_candidate");
 
         // During evaluation, we already checked that this
         // where-clause trait-ref could be unified with the obligation
@@ -151,7 +207,7 @@
         obligation: &TraitObligation<'tcx>,
         has_nested: bool,
     ) -> ImplSourceBuiltinData<PredicateObligation<'tcx>> {
-        debug!("confirm_builtin_candidate({:?}, {:?})", obligation, has_nested);
+        debug!(?obligation, ?has_nested, "confirm_builtin_candidate");
 
         let lang_items = self.tcx().lang_items();
         let obligations = if has_nested {
@@ -184,7 +240,7 @@
             vec![]
         };
 
-        debug!("confirm_builtin_candidate: obligations={:?}", obligations);
+        debug!(?obligations);
 
         ImplSourceBuiltinData { nested: obligations }
     }
@@ -199,7 +255,7 @@
         obligation: &TraitObligation<'tcx>,
         trait_def_id: DefId,
     ) -> ImplSourceAutoImplData<PredicateObligation<'tcx>> {
-        debug!("confirm_auto_impl_candidate({:?}, {:?})", obligation, trait_def_id);
+        debug!(?obligation, ?trait_def_id, "confirm_auto_impl_candidate");
 
         let types = obligation.predicate.map_bound(|inner| {
             let self_ty = self.infcx.shallow_resolve(inner.self_ty());
@@ -215,7 +271,7 @@
         trait_def_id: DefId,
         nested: ty::Binder<Vec<Ty<'tcx>>>,
     ) -> ImplSourceAutoImplData<PredicateObligation<'tcx>> {
-        debug!("vtable_auto_impl: nested={:?}", nested);
+        debug!(?nested, "vtable_auto_impl");
         ensure_sufficient_stack(|| {
             let cause = obligation.derived_cause(BuiltinDerivedObligation);
             let mut obligations = self.collect_predicates_for_types(
@@ -229,7 +285,7 @@
             let trait_obligations: Vec<PredicateObligation<'_>> =
                 self.infcx.commit_unconditionally(|_| {
                     let poly_trait_ref = obligation.predicate.to_poly_trait_ref();
-                    let (trait_ref, _) =
+                    let trait_ref =
                         self.infcx.replace_bound_vars_with_placeholders(&poly_trait_ref);
                     let cause = obligation.derived_cause(ImplDerivedObligation);
                     self.impl_or_trait_obligations(
@@ -245,7 +301,7 @@
             // predicate as usual.  It won't have any effect since auto traits are coinductive.
             obligations.extend(trait_obligations);
 
-            debug!("vtable_auto_impl: obligations={:?}", obligations);
+            debug!(?obligations, "vtable_auto_impl");
 
             ImplSourceAutoImplData { trait_def_id, nested: obligations }
         })
@@ -256,13 +312,13 @@
         obligation: &TraitObligation<'tcx>,
         impl_def_id: DefId,
     ) -> ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>> {
-        debug!("confirm_impl_candidate({:?},{:?})", obligation, impl_def_id);
+        debug!(?obligation, ?impl_def_id, "confirm_impl_candidate");
 
         // First, create the substitutions by matching the impl again,
         // this time not in a probe.
         self.infcx.commit_unconditionally(|_| {
             let substs = self.rematch_impl(impl_def_id, obligation);
-            debug!("confirm_impl_candidate: substs={:?}", substs);
+            debug!(?substs, "impl substs");
             let cause = obligation.derived_cause(ImplDerivedObligation);
             ensure_sufficient_stack(|| {
                 self.vtable_impl(
@@ -279,15 +335,12 @@
     fn vtable_impl(
         &mut self,
         impl_def_id: DefId,
-        mut substs: Normalized<'tcx, SubstsRef<'tcx>>,
+        substs: Normalized<'tcx, SubstsRef<'tcx>>,
         cause: ObligationCause<'tcx>,
         recursion_depth: usize,
         param_env: ty::ParamEnv<'tcx>,
     ) -> ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>> {
-        debug!(
-            "vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={})",
-            impl_def_id, substs, recursion_depth,
-        );
+        debug!(?impl_def_id, ?substs, ?recursion_depth, "vtable_impl");
 
         let mut impl_obligations = self.impl_or_trait_obligations(
             cause,
@@ -297,17 +350,14 @@
             &substs.value,
         );
 
-        debug!(
-            "vtable_impl: impl_def_id={:?} impl_obligations={:?}",
-            impl_def_id, impl_obligations
-        );
+        debug!(?impl_obligations, "vtable_impl");
 
         // Because of RFC447, the impl-trait-ref and obligations
         // are sufficient to determine the impl substs, without
         // relying on projections in the impl-trait-ref.
         //
         // e.g., `impl<U: Tr, V: Iterator<Item=U>> Foo<<U as Tr>::T> for V`
-        impl_obligations.append(&mut substs.obligations);
+        impl_obligations.extend(substs.obligations);
 
         ImplSourceUserDefinedData { impl_def_id, substs: substs.value, nested: impl_obligations }
     }
@@ -315,56 +365,134 @@
     fn confirm_object_candidate(
         &mut self,
         obligation: &TraitObligation<'tcx>,
-    ) -> ImplSourceObjectData<'tcx, PredicateObligation<'tcx>> {
-        debug!("confirm_object_candidate({:?})", obligation);
+        index: usize,
+    ) -> Result<ImplSourceObjectData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
+        let tcx = self.tcx();
+        debug!(?obligation, ?index, "confirm_object_candidate");
 
-        // FIXME(nmatsakis) skipping binder here seems wrong -- we should
-        // probably flatten the binder from the obligation and the binder
-        // from the object. Have to try to make a broken test case that
-        // results.
-        let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
-        let poly_trait_ref = match self_ty.kind() {
-            ty::Dynamic(data, ..) => data
-                .principal()
-                .unwrap_or_else(|| {
-                    span_bug!(obligation.cause.span, "object candidate with no principal")
-                })
-                .with_self_ty(self.tcx(), self_ty),
+        let trait_predicate =
+            self.infcx.replace_bound_vars_with_placeholders(&obligation.predicate);
+        let self_ty = self.infcx.shallow_resolve(trait_predicate.self_ty());
+        let obligation_trait_ref = ty::Binder::dummy(trait_predicate.trait_ref);
+        let data = match self_ty.kind() {
+            ty::Dynamic(data, ..) => {
+                self.infcx
+                    .replace_bound_vars_with_fresh_vars(
+                        obligation.cause.span,
+                        HigherRankedType,
+                        data,
+                    )
+                    .0
+            }
             _ => span_bug!(obligation.cause.span, "object candidate with non-object"),
         };
 
-        let mut upcast_trait_ref = None;
+        let object_trait_ref = data
+            .principal()
+            .unwrap_or_else(|| {
+                span_bug!(obligation.cause.span, "object candidate with no principal")
+            })
+            .with_self_ty(self.tcx(), self_ty);
+
         let mut nested = vec![];
-        let vtable_base;
 
+        let mut supertraits = util::supertraits(tcx, ty::Binder::dummy(object_trait_ref));
+
+        // For each of the non-matching predicates that
+        // we pass over, we sum up the set of number of vtable
+        // entries, so that we can compute the offset for the selected
+        // trait.
+        let vtable_base = supertraits
+            .by_ref()
+            .take(index)
+            .map(|t| super::util::count_own_vtable_entries(tcx, t))
+            .sum();
+
+        let unnormalized_upcast_trait_ref =
+            supertraits.next().expect("supertraits iterator no longer has as many elements");
+
+        let upcast_trait_ref = normalize_with_depth_to(
+            self,
+            obligation.param_env,
+            obligation.cause.clone(),
+            obligation.recursion_depth + 1,
+            &unnormalized_upcast_trait_ref,
+            &mut nested,
+        );
+
+        nested.extend(self.infcx.commit_if_ok(|_| {
+            self.infcx
+                .at(&obligation.cause, obligation.param_env)
+                .sup(obligation_trait_ref, upcast_trait_ref)
+                .map(|InferOk { obligations, .. }| obligations)
+                .map_err(|_| Unimplemented)
+        })?);
+
+        // Check supertraits hold. This is so that their associated type bounds
+        // will be checked in the code below.
+        for super_trait in tcx
+            .super_predicates_of(trait_predicate.def_id())
+            .instantiate(tcx, trait_predicate.trait_ref.substs)
+            .predicates
+            .into_iter()
         {
-            let tcx = self.tcx();
-
-            // We want to find the first supertrait in the list of
-            // supertraits that we can unify with, and do that
-            // unification. We know that there is exactly one in the list
-            // where we can unify, because otherwise select would have
-            // reported an ambiguity. (When we do find a match, also
-            // record it for later.)
-            let nonmatching = util::supertraits(tcx, poly_trait_ref).take_while(|&t| {
-                match self.infcx.commit_if_ok(|_| self.match_poly_trait_ref(obligation, t)) {
-                    Ok(obligations) => {
-                        upcast_trait_ref = Some(t);
-                        nested.extend(obligations);
-                        false
-                    }
-                    Err(_) => true,
-                }
-            });
-
-            // Additionally, for each of the non-matching predicates that
-            // we pass over, we sum up the set of number of vtable
-            // entries, so that we can compute the offset for the selected
-            // trait.
-            vtable_base = nonmatching.map(|t| super::util::count_own_vtable_entries(tcx, t)).sum();
+            if let ty::PredicateAtom::Trait(..) = super_trait.skip_binders() {
+                let normalized_super_trait = normalize_with_depth_to(
+                    self,
+                    obligation.param_env,
+                    obligation.cause.clone(),
+                    obligation.recursion_depth + 1,
+                    &super_trait,
+                    &mut nested,
+                );
+                nested.push(Obligation::new(
+                    obligation.cause.clone(),
+                    obligation.param_env.clone(),
+                    normalized_super_trait,
+                ));
+            }
         }
 
-        ImplSourceObjectData { upcast_trait_ref: upcast_trait_ref.unwrap(), vtable_base, nested }
+        let assoc_types: Vec<_> = tcx
+            .associated_items(trait_predicate.def_id())
+            .in_definition_order()
+            .filter_map(
+                |item| if item.kind == ty::AssocKind::Type { Some(item.def_id) } else { None },
+            )
+            .collect();
+
+        for assoc_type in assoc_types {
+            if !tcx.generics_of(assoc_type).params.is_empty() {
+                // FIXME(generic_associated_types) generate placeholders to
+                // extend the trait substs.
+                tcx.sess.span_fatal(
+                    obligation.cause.span,
+                    "generic associated types in trait objects are not supported yet",
+                );
+            }
+            // This maybe belongs in wf, but that can't (doesn't) handle
+            // higher-ranked things.
+            // Prevent, e.g., `dyn Iterator<Item = str>`.
+            for bound in self.tcx().item_bounds(assoc_type) {
+                let subst_bound = bound.subst(tcx, trait_predicate.trait_ref.substs);
+                let normalized_bound = normalize_with_depth_to(
+                    self,
+                    obligation.param_env,
+                    obligation.cause.clone(),
+                    obligation.recursion_depth + 1,
+                    &subst_bound,
+                    &mut nested,
+                );
+                nested.push(Obligation::new(
+                    obligation.cause.clone(),
+                    obligation.param_env.clone(),
+                    normalized_bound,
+                ));
+            }
+        }
+
+        debug!(?nested, "object nested obligations");
+        Ok(ImplSourceObjectData { upcast_trait_ref, vtable_base, nested })
     }
 
     fn confirm_fn_pointer_candidate(
@@ -372,7 +500,7 @@
         obligation: &TraitObligation<'tcx>,
     ) -> Result<ImplSourceFnPointerData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>>
     {
-        debug!("confirm_fn_pointer_candidate({:?})", obligation);
+        debug!(?obligation, "confirm_fn_pointer_candidate");
 
         // Okay to skip binder; it is reintroduced below.
         let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
@@ -386,8 +514,8 @@
         )
         .map_bound(|(trait_ref, _)| trait_ref);
 
-        let Normalized { value: trait_ref, obligations } = ensure_sufficient_stack(|| {
-            project::normalize_with_depth(
+        let Normalized { value: trait_ref, mut obligations } = ensure_sufficient_stack(|| {
+            normalize_with_depth(
                 self,
                 obligation.param_env,
                 obligation.cause.clone(),
@@ -396,12 +524,12 @@
             )
         });
 
-        self.confirm_poly_trait_refs(
+        obligations.extend(self.confirm_poly_trait_refs(
             obligation.cause.clone(),
             obligation.param_env,
             obligation.predicate.to_poly_trait_ref(),
             trait_ref,
-        )?;
+        )?);
         Ok(ImplSourceFnPointerData { fn_ty: self_ty, nested: obligations })
     }
 
@@ -410,10 +538,10 @@
         obligation: &TraitObligation<'tcx>,
         alias_def_id: DefId,
     ) -> ImplSourceTraitAliasData<'tcx, PredicateObligation<'tcx>> {
-        debug!("confirm_trait_alias_candidate({:?}, {:?})", obligation, alias_def_id);
+        debug!(?obligation, ?alias_def_id, "confirm_trait_alias_candidate");
 
         self.infcx.commit_unconditionally(|_| {
-            let (predicate, _) =
+            let predicate =
                 self.infcx().replace_bound_vars_with_placeholders(&obligation.predicate);
             let trait_ref = predicate.trait_ref;
             let trait_def_id = trait_ref.def_id;
@@ -427,10 +555,7 @@
                 &substs,
             );
 
-            debug!(
-                "confirm_trait_alias_candidate: trait_def_id={:?} trait_obligations={:?}",
-                trait_def_id, trait_obligations
-            );
+            debug!(?trait_def_id, ?trait_obligations, "trait alias obligations");
 
             ImplSourceTraitAliasData { alias_def_id, substs, nested: trait_obligations }
         })
@@ -450,7 +575,7 @@
             _ => bug!("closure candidate for non-closure {:?}", obligation),
         };
 
-        debug!("confirm_generator_candidate({:?},{:?},{:?})", obligation, generator_def_id, substs);
+        debug!(?obligation, ?generator_def_id, ?substs, "confirm_generator_candidate");
 
         let trait_ref = self.generator_trait_ref_unnormalized(obligation, substs);
         let Normalized { value: trait_ref, mut obligations } = ensure_sufficient_stack(|| {
@@ -463,11 +588,7 @@
             )
         });
 
-        debug!(
-            "confirm_generator_candidate(generator_def_id={:?}, \
-             trait_ref={:?}, obligations={:?})",
-            generator_def_id, trait_ref, obligations
-        );
+        debug!(?trait_ref, ?obligations, "generator candidate obligations");
 
         obligations.extend(self.confirm_poly_trait_refs(
             obligation.cause.clone(),
@@ -483,7 +604,7 @@
         &mut self,
         obligation: &TraitObligation<'tcx>,
     ) -> Result<ImplSourceClosureData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
-        debug!("confirm_closure_candidate({:?})", obligation);
+        debug!(?obligation, "confirm_closure_candidate");
 
         let kind = self
             .tcx()
@@ -510,10 +631,7 @@
             )
         });
 
-        debug!(
-            "confirm_closure_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})",
-            closure_def_id, trait_ref, obligations
-        );
+        debug!(?closure_def_id, ?trait_ref, ?obligations, "confirm closure candidate obligations");
 
         obligations.extend(self.confirm_poly_trait_refs(
             obligation.cause.clone(),
@@ -587,7 +705,7 @@
         let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1);
         let target = self.infcx.shallow_resolve(target);
 
-        debug!("confirm_builtin_unsize_candidate(source={:?}, target={:?})", source, target);
+        debug!(?source, ?target, "confirm_builtin_unsize_candidate");
 
         let mut nested = vec![];
         match (source.kind(), target.kind()) {
diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs
index 114dc79..ea5c12a 100644
--- a/compiler/rustc_trait_selection/src/traits/select/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs
@@ -9,6 +9,7 @@
 use super::const_evaluatable;
 use super::project;
 use super::project::normalize_with_depth_to;
+use super::project::ProjectionTyObligation;
 use super::util;
 use super::util::{closure_trait_ref_and_return_type, predicate_for_trait_def};
 use super::wf;
@@ -36,9 +37,8 @@
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::relate::TypeRelation;
 use rustc_middle::ty::subst::{GenericArgKind, Subst, SubstsRef};
-use rustc_middle::ty::{
-    self, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, TypeFoldable, WithConstness,
-};
+use rustc_middle::ty::{self, PolyProjectionPredicate, ToPolyTraitRef, ToPredicate};
+use rustc_middle::ty::{Ty, TyCtxt, TypeFoldable, WithConstness};
 use rustc_span::symbol::sym;
 
 use std::cell::{Cell, RefCell};
@@ -236,7 +236,7 @@
         infcx: &'cx InferCtxt<'cx, 'tcx>,
         allow_negative_impls: bool,
     ) -> SelectionContext<'cx, 'tcx> {
-        debug!("with_negative({:?})", allow_negative_impls);
+        debug!(?allow_negative_impls, "with_negative");
         SelectionContext {
             infcx,
             freshener: infcx.freshener(),
@@ -251,7 +251,7 @@
         infcx: &'cx InferCtxt<'cx, 'tcx>,
         query_mode: TraitQueryMode,
     ) -> SelectionContext<'cx, 'tcx> {
-        debug!("with_query_mode({:?})", query_mode);
+        debug!(?query_mode, "with_query_mode");
         SelectionContext {
             infcx,
             freshener: infcx.freshener(),
@@ -279,7 +279,7 @@
     /// tracking is not enabled, just returns an empty vector.
     pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec<IntercrateAmbiguityCause> {
         assert!(self.intercrate);
-        self.intercrate_ambiguity_causes.take().unwrap_or(vec![])
+        self.intercrate_ambiguity_causes.take().unwrap_or_default()
     }
 
     pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> {
@@ -290,8 +290,8 @@
         self.infcx.tcx
     }
 
-    pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'tcx> {
-        self.infcx
+    pub(super) fn query_mode(&self) -> TraitQueryMode {
+        self.query_mode
     }
 
     ///////////////////////////////////////////////////////////////////////////
@@ -311,11 +311,11 @@
 
     /// Attempts to satisfy the obligation. If successful, this will affect the surrounding
     /// type environment by performing unification.
+    #[instrument(level = "debug", skip(self))]
     pub fn select(
         &mut self,
         obligation: &TraitObligation<'tcx>,
     ) -> SelectionResult<'tcx, Selection<'tcx>> {
-        debug!("select({:?})", obligation);
         debug_assert!(!obligation.predicate.has_escaping_bound_vars());
 
         let pec = &ProvisionalEvaluationCache::default();
@@ -343,7 +343,10 @@
                 Err(SelectionError::Overflow)
             }
             Err(e) => Err(e),
-            Ok(candidate) => Ok(Some(candidate)),
+            Ok(candidate) => {
+                debug!(?candidate);
+                Ok(Some(candidate))
+            }
         }
     }
 
@@ -359,7 +362,7 @@
 
     /// Evaluates whether the obligation `obligation` can be satisfied (by any means).
     pub fn predicate_may_hold_fatal(&mut self, obligation: &PredicateObligation<'tcx>) -> bool {
-        debug!("predicate_may_hold_fatal({:?})", obligation);
+        debug!(?obligation, "predicate_may_hold_fatal");
 
         // This fatal query is a stopgap that should only be used in standard mode,
         // where we do not expect overflow to be propagated.
@@ -413,12 +416,12 @@
         predicates: I,
     ) -> Result<EvaluationResult, OverflowError>
     where
-        I: IntoIterator<Item = PredicateObligation<'tcx>>,
+        I: IntoIterator<Item = PredicateObligation<'tcx>> + std::fmt::Debug,
     {
         let mut result = EvaluatedToOk;
+        debug!(?predicates, "evaluate_predicates_recursively");
         for obligation in predicates {
             let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?;
-            debug!("evaluate_predicate_recursively({:?}) = {:?}", obligation, eval);
             if let EvaluatedToErr = eval {
                 // fast-path - EvaluatedToErr is the top of the lattice,
                 // so we don't need to look on the other predicates.
@@ -430,17 +433,16 @@
         Ok(result)
     }
 
+    #[instrument(
+        level = "debug",
+        skip(self, previous_stack),
+        fields(previous_stack = ?previous_stack.head())
+    )]
     fn evaluate_predicate_recursively<'o>(
         &mut self,
         previous_stack: TraitObligationStackList<'o, 'tcx>,
         obligation: PredicateObligation<'tcx>,
     ) -> Result<EvaluationResult, OverflowError> {
-        debug!(
-            "evaluate_predicate_recursively(previous_stack={:?}, obligation={:?})",
-            previous_stack.head(),
-            obligation
-        );
-
         // `previous_stack` stores a `TraitObligation`, while `obligation` is
         // a `PredicateObligation`. These are distinct types, so we can't
         // use any `Option` combinator method that would force them to be
@@ -450,17 +452,18 @@
             None => self.check_recursion_limit(&obligation, &obligation)?,
         }
 
-        ensure_sufficient_stack(|| {
-            match obligation.predicate.skip_binders() {
+        let result = ensure_sufficient_stack(|| {
+            let bound_predicate = obligation.predicate.bound_atom();
+            match bound_predicate.skip_binder() {
                 ty::PredicateAtom::Trait(t, _) => {
-                    let t = ty::Binder::bind(t);
+                    let t = bound_predicate.rebind(t);
                     debug_assert!(!t.has_escaping_bound_vars());
                     let obligation = obligation.with(t);
                     self.evaluate_trait_predicate_recursively(previous_stack, obligation)
                 }
 
                 ty::PredicateAtom::Subtype(p) => {
-                    let p = ty::Binder::bind(p);
+                    let p = bound_predicate.rebind(p);
                     // Does this code ever run?
                     match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) {
                         Some(Ok(InferOk { mut obligations, .. })) => {
@@ -479,15 +482,13 @@
                     self.infcx,
                     obligation.param_env,
                     obligation.cause.body_id,
+                    obligation.recursion_depth + 1,
                     arg,
                     obligation.cause.span,
                 ) {
                     Some(mut obligations) => {
                         self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
-                        self.evaluate_predicates_recursively(
-                            previous_stack,
-                            obligations.into_iter(),
-                        )
+                        self.evaluate_predicates_recursively(previous_stack, obligations)
                     }
                     None => Ok(EvaluatedToAmbig),
                 },
@@ -506,15 +507,13 @@
                 }
 
                 ty::PredicateAtom::Projection(data) => {
-                    let data = ty::Binder::bind(data);
+                    let data = bound_predicate.rebind(data);
                     let project_obligation = obligation.with(data);
                     match project::poly_project_and_unify_type(self, &project_obligation) {
                         Ok(Ok(Some(mut subobligations))) => {
                             self.add_depth(subobligations.iter_mut(), obligation.recursion_depth);
-                            let result = self.evaluate_predicates_recursively(
-                                previous_stack,
-                                subobligations.into_iter(),
-                            );
+                            let result = self
+                                .evaluate_predicates_recursively(previous_stack, subobligations);
                             if let Some(key) =
                                 ProjectionCacheKey::from_poly_projection_predicate(self, data)
                             {
@@ -523,12 +522,7 @@
                             result
                         }
                         Ok(Ok(None)) => Ok(EvaluatedToAmbig),
-                        // EvaluatedToRecur might also be acceptable here, but use
-                        // Unknown for now because it means that we won't dismiss a
-                        // selection candidate solely because it has a projection
-                        // cycle. This is closest to the previous behavior of
-                        // immediately erroring.
-                        Ok(Err(project::InProgress)) => Ok(EvaluatedToUnknown),
+                        Ok(Err(project::InProgress)) => Ok(EvaluatedToRecur),
                         Err(_) => Ok(EvaluatedToErr),
                     }
                 }
@@ -561,10 +555,7 @@
                 }
 
                 ty::PredicateAtom::ConstEquate(c1, c2) => {
-                    debug!(
-                        "evaluate_predicate_recursively: equating consts c1={:?} c2={:?}",
-                        c1, c2
-                    );
+                    debug!(?c1, ?c2, "evaluate_predicate_recursively: equating consts");
 
                     let evaluate = |c: &'tcx ty::Const<'tcx>| {
                         if let ty::ConstKind::Unevaluated(def, substs, promoted) = c.val {
@@ -610,7 +601,11 @@
                     bug!("TypeWellFormedFromEnv is only used for chalk")
                 }
             }
-        })
+        });
+
+        debug!(?result);
+
+        result
     }
 
     fn evaluate_trait_predicate_recursively<'o>(
@@ -618,7 +613,7 @@
         previous_stack: TraitObligationStackList<'o, 'tcx>,
         mut obligation: TraitObligation<'tcx>,
     ) -> Result<EvaluationResult, OverflowError> {
-        debug!("evaluate_trait_predicate_recursively({:?})", obligation);
+        debug!(?obligation, "evaluate_trait_predicate_recursively");
 
         if !self.intercrate
             && obligation.is_global()
@@ -627,19 +622,22 @@
             // If a param env has no global bounds, global obligations do not
             // depend on its particular value in order to work, so we can clear
             // out the param env and get better caching.
-            debug!("evaluate_trait_predicate_recursively({:?}) - in global", obligation);
+            debug!("evaluate_trait_predicate_recursively - in global");
             obligation.param_env = obligation.param_env.without_caller_bounds();
         }
 
         let stack = self.push_stack(previous_stack, &obligation);
         let fresh_trait_ref = stack.fresh_trait_ref;
+
+        debug!(?fresh_trait_ref);
+
         if let Some(result) = self.check_evaluation_cache(obligation.param_env, fresh_trait_ref) {
-            debug!("CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result);
+            debug!(?result, "CACHE HIT");
             return Ok(result);
         }
 
         if let Some(result) = stack.cache().get_provisional(fresh_trait_ref) {
-            debug!("PROVISIONAL CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result);
+            debug!(?result, "PROVISIONAL CACHE HIT");
             stack.update_reached_depth(stack.cache().current_reached_depth());
             return Ok(result);
         }
@@ -662,7 +660,7 @@
 
         let reached_depth = stack.reached_depth.get();
         if reached_depth >= stack.depth {
-            debug!("CACHE MISS: EVAL({:?})={:?}", fresh_trait_ref, result);
+            debug!(?result, "CACHE MISS");
             self.insert_evaluation_cache(obligation.param_env, fresh_trait_ref, dep_node, result);
 
             stack.cache().on_completion(stack.depth, |fresh_trait_ref, provisional_result| {
@@ -674,7 +672,7 @@
                 );
             });
         } else {
-            debug!("PROVISIONAL: {:?}={:?}", fresh_trait_ref, result);
+            debug!(?result, "PROVISIONAL");
             debug!(
                 "evaluate_trait_predicate_recursively: caching provisionally because {:?} \
                  is a cycle participant (at depth {}, reached depth {})",
@@ -719,10 +717,7 @@
             })
             .map(|stack| stack.depth)
         {
-            debug!(
-                "evaluate_stack({:?}) --> recursive at depth {}",
-                stack.fresh_trait_ref, cycle_depth,
-            );
+            debug!("evaluate_stack --> recursive at depth {}", cycle_depth);
 
             // If we have a stack like `A B C D E A`, where the top of
             // the stack is the final `A`, then this will iterate over
@@ -742,10 +737,10 @@
             let cycle =
                 cycle.map(|stack| stack.obligation.predicate.without_const().to_predicate(tcx));
             if self.coinductive_match(cycle) {
-                debug!("evaluate_stack({:?}) --> recursive, coinductive", stack.fresh_trait_ref);
+                debug!("evaluate_stack --> recursive, coinductive");
                 Some(EvaluatedToOk)
             } else {
-                debug!("evaluate_stack({:?}) --> recursive, inductive", stack.fresh_trait_ref);
+                debug!("evaluate_stack --> recursive, inductive");
                 Some(EvaluatedToRecur)
             }
         } else {
@@ -786,10 +781,7 @@
         // This check was an imperfect workaround for a bug in the old
         // intercrate mode; it should be removed when that goes away.
         if unbound_input_types && self.intercrate {
-            debug!(
-                "evaluate_stack({:?}) --> unbound argument, intercrate -->  ambiguous",
-                stack.fresh_trait_ref
-            );
+            debug!("evaluate_stack --> unbound argument, intercrate -->  ambiguous",);
             // Heuristics: show the diagnostics when there are no candidates in crate.
             if self.intercrate_ambiguity_causes.is_some() {
                 debug!("evaluate_stack: intercrate_ambiguity_causes is some");
@@ -807,7 +799,7 @@
                                 },
                             });
 
-                        debug!("evaluate_stack: pushing cause = {:?}", cause);
+                        debug!(?cause, "evaluate_stack: pushing cause");
                         self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause);
                     }
                 }
@@ -824,10 +816,7 @@
                     )
             })
         {
-            debug!(
-                "evaluate_stack({:?}) --> unbound argument, recursive --> giving up",
-                stack.fresh_trait_ref
-            );
+            debug!("evaluate_stack --> unbound argument, recursive --> giving up",);
             return Ok(EvaluatedToUnknown);
         }
 
@@ -860,36 +849,37 @@
             ty::PredicateAtom::Trait(ref data, _) => self.tcx().trait_is_auto(data.def_id()),
             _ => false,
         };
-        debug!("coinductive_predicate({:?}) = {:?}", predicate, result);
+        debug!(?predicate, ?result, "coinductive_predicate");
         result
     }
 
     /// Further evaluates `candidate` to decide whether all type parameters match and whether nested
     /// obligations are met. Returns whether `candidate` remains viable after this further
     /// scrutiny.
+    #[instrument(
+        level = "debug",
+        skip(self, stack),
+        fields(depth = stack.obligation.recursion_depth)
+    )]
     fn evaluate_candidate<'o>(
         &mut self,
         stack: &TraitObligationStack<'o, 'tcx>,
         candidate: &SelectionCandidate<'tcx>,
     ) -> Result<EvaluationResult, OverflowError> {
-        debug!(
-            "evaluate_candidate: depth={} candidate={:?}",
-            stack.obligation.recursion_depth, candidate
-        );
         let result = self.evaluation_probe(|this| {
             let candidate = (*candidate).clone();
             match this.confirm_candidate(stack.obligation, candidate) {
-                Ok(selection) => this.evaluate_predicates_recursively(
-                    stack.list(),
-                    selection.nested_obligations().into_iter(),
-                ),
+                Ok(selection) => {
+                    debug!(?selection);
+                    this.evaluate_predicates_recursively(
+                        stack.list(),
+                        selection.nested_obligations().into_iter(),
+                    )
+                }
                 Err(..) => Ok(EvaluatedToErr),
             }
         })?;
-        debug!(
-            "evaluate_candidate: depth={} result={:?}",
-            stack.obligation.recursion_depth, result
-        );
+        debug!(?result);
         Ok(result)
     }
 
@@ -922,10 +912,7 @@
 
         if self.can_use_global_caches(param_env) {
             if !trait_ref.needs_infer() {
-                debug!(
-                    "insert_evaluation_cache(trait_ref={:?}, candidate={:?}) global",
-                    trait_ref, result,
-                );
+                debug!(?trait_ref, ?result, "insert_evaluation_cache global");
                 // This may overwrite the cache with the same value
                 // FIXME: Due to #50507 this overwrites the different values
                 // This should be changed to use HashMapExt::insert_same
@@ -935,7 +922,7 @@
             }
         }
 
-        debug!("insert_evaluation_cache(trait_ref={:?}, candidate={:?})", trait_ref, result,);
+        debug!(?trait_ref, ?result, "insert_evaluation_cache");
         self.infcx.evaluation_cache.insert(param_env.and(trait_ref), dep_node, result);
     }
 
@@ -943,10 +930,9 @@
     /// to have a *lower* recursion_depth than the obligation used to create it.
     /// Projection sub-obligations may be returned from the projection cache,
     /// which results in obligations with an 'old' `recursion_depth`.
-    /// Additionally, methods like `wf::obligations` and
-    /// `InferCtxt.subtype_predicate` produce subobligations without
-    /// taking in a 'parent' depth, causing the generated subobligations
-    /// to have a `recursion_depth` of `0`.
+    /// Additionally, methods like `InferCtxt.subtype_predicate` produce
+    /// subobligations without taking in a 'parent' depth, causing the
+    /// generated subobligations to have a `recursion_depth` of `0`.
     ///
     /// To ensure that obligation_depth never decreasees, we force all subobligations
     /// to have at least the depth of the original obligation.
@@ -1125,11 +1111,7 @@
         let trait_ref = cache_fresh_trait_pred.skip_binder().trait_ref;
 
         if !self.can_cache_candidate(&candidate) {
-            debug!(
-                "insert_candidate_cache(trait_ref={:?}, candidate={:?} -\
-                 candidate is not cacheable",
-                trait_ref, candidate
-            );
+            debug!(?trait_ref, ?candidate, "insert_candidate_cache - candidate is not cacheable");
             return;
         }
 
@@ -1138,10 +1120,7 @@
                 // Don't cache overflow globally; we only produce this in certain modes.
             } else if !trait_ref.needs_infer() {
                 if !candidate.needs_infer() {
-                    debug!(
-                        "insert_candidate_cache(trait_ref={:?}, candidate={:?}) global",
-                        trait_ref, candidate,
-                    );
+                    debug!(?trait_ref, ?candidate, "insert_candidate_cache global");
                     // This may overwrite the cache with the same value.
                     tcx.selection_cache.insert(param_env.and(trait_ref), dep_node, candidate);
                     return;
@@ -1149,32 +1128,32 @@
             }
         }
 
-        debug!(
-            "insert_candidate_cache(trait_ref={:?}, candidate={:?}) local",
-            trait_ref, candidate,
-        );
+        debug!(?trait_ref, ?candidate, "insert_candidate_cache local");
         self.infcx.selection_cache.insert(param_env.and(trait_ref), dep_node, candidate);
     }
 
+    /// Matches a predicate against the bounds of its self type.
+    ///
+    /// Given an obligation like `<T as Foo>::Bar: Baz` where the self type is
+    /// a projection, look at the bounds of `T::Bar`, see if we can find a
+    /// `Baz` bound. We return indexes into the list returned by
+    /// `tcx.item_bounds` for any applicable bounds.
     fn match_projection_obligation_against_definition_bounds(
         &mut self,
         obligation: &TraitObligation<'tcx>,
-    ) -> bool {
+    ) -> smallvec::SmallVec<[usize; 2]> {
         let poly_trait_predicate = self.infcx().resolve_vars_if_possible(&obligation.predicate);
-        let (placeholder_trait_predicate, _) =
+        let placeholder_trait_predicate =
             self.infcx().replace_bound_vars_with_placeholders(&poly_trait_predicate);
         debug!(
-            "match_projection_obligation_against_definition_bounds: \
-             placeholder_trait_predicate={:?}",
-            placeholder_trait_predicate,
+            ?placeholder_trait_predicate,
+            "match_projection_obligation_against_definition_bounds"
         );
 
         let tcx = self.infcx.tcx;
-        let predicates = match *placeholder_trait_predicate.trait_ref.self_ty().kind() {
-            ty::Projection(ref data) => {
-                tcx.projection_predicates(data.item_def_id).subst(tcx, data.substs)
-            }
-            ty::Opaque(def_id, substs) => tcx.projection_predicates(def_id).subst(tcx, substs),
+        let (def_id, substs) = match *placeholder_trait_predicate.trait_ref.self_ty().kind() {
+            ty::Projection(ref data) => (data.item_def_id, data.substs),
+            ty::Opaque(def_id, substs) => (def_id, substs),
             _ => {
                 span_bug!(
                     obligation.cause.span,
@@ -1184,48 +1163,83 @@
                 );
             }
         };
+        let bounds = tcx.item_bounds(def_id).subst(tcx, substs);
 
-        let matching_bound = predicates.iter().find_map(|bound| {
-            if let ty::PredicateAtom::Trait(pred, _) = bound.skip_binders() {
-                let bound = ty::Binder::bind(pred.trait_ref);
-                if self.infcx.probe(|_| {
-                    self.match_projection(obligation, bound, placeholder_trait_predicate.trait_ref)
-                }) {
-                    return Some(bound);
+        // The bounds returned by `item_bounds` may contain duplicates after
+        // normalization, so try to deduplicate when possible to avoid
+        // unnecessary ambiguity.
+        let mut distinct_normalized_bounds = FxHashSet::default();
+
+        let matching_bounds = bounds
+            .iter()
+            .enumerate()
+            .filter_map(|(idx, bound)| {
+                let bound_predicate = bound.bound_atom();
+                if let ty::PredicateAtom::Trait(pred, _) = bound_predicate.skip_binder() {
+                    let bound = bound_predicate.rebind(pred.trait_ref);
+                    if self.infcx.probe(|_| {
+                        match self.match_normalize_trait_ref(
+                            obligation,
+                            bound,
+                            placeholder_trait_predicate.trait_ref,
+                        ) {
+                            Ok(None) => true,
+                            Ok(Some(normalized_trait))
+                                if distinct_normalized_bounds.insert(normalized_trait) =>
+                            {
+                                true
+                            }
+                            _ => false,
+                        }
+                    }) {
+                        return Some(idx);
+                    }
                 }
-            }
-            None
-        });
+                None
+            })
+            .collect();
 
-        debug!(
-            "match_projection_obligation_against_definition_bounds: \
-             matching_bound={:?}",
-            matching_bound
-        );
-        match matching_bound {
-            None => false,
-            Some(bound) => {
-                // Repeat the successful match, if any, this time outside of a probe.
-                let result =
-                    self.match_projection(obligation, bound, placeholder_trait_predicate.trait_ref);
-
-                assert!(result);
-                true
-            }
-        }
+        debug!(?matching_bounds, "match_projection_obligation_against_definition_bounds");
+        matching_bounds
     }
 
-    fn match_projection(
+    /// Equates the trait in `obligation` with trait bound. If the two traits
+    /// can be equated and the normalized trait bound doesn't contain inference
+    /// variables or placeholders, the normalized bound is returned.
+    fn match_normalize_trait_ref(
         &mut self,
         obligation: &TraitObligation<'tcx>,
         trait_bound: ty::PolyTraitRef<'tcx>,
         placeholder_trait_ref: ty::TraitRef<'tcx>,
-    ) -> bool {
+    ) -> Result<Option<ty::PolyTraitRef<'tcx>>, ()> {
         debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars());
+        if placeholder_trait_ref.def_id != trait_bound.def_id() {
+            // Avoid unnecessary normalization
+            return Err(());
+        }
+
+        let Normalized { value: trait_bound, obligations: _ } = ensure_sufficient_stack(|| {
+            project::normalize_with_depth(
+                self,
+                obligation.param_env,
+                obligation.cause.clone(),
+                obligation.recursion_depth + 1,
+                &trait_bound,
+            )
+        });
         self.infcx
             .at(&obligation.cause, obligation.param_env)
             .sup(ty::Binder::dummy(placeholder_trait_ref), trait_bound)
-            .is_ok()
+            .map(|InferOk { obligations: _, value: () }| {
+                // This method is called within a probe, so we can't have
+                // inference variables and placeholders escape.
+                if !trait_bound.needs_infer() && !trait_bound.has_placeholders() {
+                    Some(trait_bound)
+                } else {
+                    None
+                }
+            })
+            .map_err(|_| ())
     }
 
     fn evaluate_where_clause<'o>(
@@ -1235,14 +1249,50 @@
     ) -> Result<EvaluationResult, OverflowError> {
         self.evaluation_probe(|this| {
             match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
-                Ok(obligations) => {
-                    this.evaluate_predicates_recursively(stack.list(), obligations.into_iter())
-                }
+                Ok(obligations) => this.evaluate_predicates_recursively(stack.list(), obligations),
                 Err(()) => Ok(EvaluatedToErr),
             }
         })
     }
 
+    pub(super) fn match_projection_projections(
+        &mut self,
+        obligation: &ProjectionTyObligation<'tcx>,
+        obligation_trait_ref: &ty::TraitRef<'tcx>,
+        data: &PolyProjectionPredicate<'tcx>,
+        potentially_unnormalized_candidates: bool,
+    ) -> bool {
+        let mut nested_obligations = Vec::new();
+        let projection_ty = if potentially_unnormalized_candidates {
+            ensure_sufficient_stack(|| {
+                project::normalize_with_depth_to(
+                    self,
+                    obligation.param_env,
+                    obligation.cause.clone(),
+                    obligation.recursion_depth + 1,
+                    &data.map_bound_ref(|data| data.projection_ty),
+                    &mut nested_obligations,
+                )
+            })
+        } else {
+            data.map_bound_ref(|data| data.projection_ty)
+        };
+
+        // FIXME(generic_associated_types): Compare the whole projections
+        let data_poly_trait_ref = projection_ty.map_bound(|proj| proj.trait_ref(self.tcx()));
+        let obligation_poly_trait_ref = obligation_trait_ref.to_poly_trait_ref();
+        self.infcx
+            .at(&obligation.cause, obligation.param_env)
+            .sup(obligation_poly_trait_ref, data_poly_trait_ref)
+            .map_or(false, |InferOk { obligations, value: () }| {
+                self.evaluate_predicates_recursively(
+                    TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
+                    nested_obligations.into_iter().chain(obligations),
+                )
+                .map_or(false, |res| res.may_apply())
+            })
+    }
+
     ///////////////////////////////////////////////////////////////////////////
     // WINNOW
     //
@@ -1277,18 +1327,27 @@
         //
         // This is a fix for #53123 and prevents winnowing from accidentally extending the
         // lifetime of a variable.
-        match other.candidate {
+        match (&other.candidate, &victim.candidate) {
+            (_, AutoImplCandidate(..)) | (AutoImplCandidate(..), _) => {
+                bug!(
+                    "default implementations shouldn't be recorded \
+                    when there are other valid candidates"
+                );
+            }
+
             // (*)
-            BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate => true,
-            ParamCandidate(ref cand) => match victim.candidate {
-                AutoImplCandidate(..) => {
-                    bug!(
-                        "default implementations shouldn't be recorded \
-                         when there are other valid candidates"
-                    );
-                }
-                // (*)
-                BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate => false,
+            (BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate, _) => true,
+            (_, BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate) => false,
+
+            (ParamCandidate(..), ParamCandidate(..)) => false,
+
+            // Global bounds from the where clause should be ignored
+            // here (see issue #50825). Otherwise, we have a where
+            // clause so don't go around looking for impls.
+            // Arbitrarily give param candidates priority
+            // over projection and object candidates.
+            (
+                ParamCandidate(ref cand),
                 ImplCandidate(..)
                 | ClosureCandidate
                 | GeneratorCandidate
@@ -1296,28 +1355,45 @@
                 | BuiltinObjectCandidate
                 | BuiltinUnsizeCandidate
                 | BuiltinCandidate { .. }
-                | TraitAliasCandidate(..) => {
-                    // Global bounds from the where clause should be ignored
-                    // here (see issue #50825). Otherwise, we have a where
-                    // clause so don't go around looking for impls.
-                    !is_global(cand)
-                }
-                ObjectCandidate | ProjectionCandidate => {
-                    // Arbitrarily give param candidates priority
-                    // over projection and object candidates.
-                    !is_global(cand)
-                }
-                ParamCandidate(..) => false,
-            },
-            ObjectCandidate | ProjectionCandidate => match victim.candidate {
-                AutoImplCandidate(..) => {
-                    bug!(
-                        "default implementations shouldn't be recorded \
-                         when there are other valid candidates"
-                    );
-                }
-                // (*)
-                BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate => false,
+                | TraitAliasCandidate(..)
+                | ObjectCandidate(_)
+                | ProjectionCandidate(_),
+            ) => !is_global(cand),
+            (ObjectCandidate(_) | ProjectionCandidate(_), ParamCandidate(ref cand)) => {
+                // Prefer these to a global where-clause bound
+                // (see issue #50825).
+                is_global(cand)
+            }
+            (
+                ImplCandidate(_)
+                | ClosureCandidate
+                | GeneratorCandidate
+                | FnPointerCandidate
+                | BuiltinObjectCandidate
+                | BuiltinUnsizeCandidate
+                | BuiltinCandidate { has_nested: true }
+                | TraitAliasCandidate(..),
+                ParamCandidate(ref cand),
+            ) => {
+                // Prefer these to a global where-clause bound
+                // (see issue #50825).
+                is_global(cand) && other.evaluation.must_apply_modulo_regions()
+            }
+
+            (ProjectionCandidate(i), ProjectionCandidate(j))
+            | (ObjectCandidate(i), ObjectCandidate(j)) => {
+                // Arbitrarily pick the lower numbered candidate for backwards
+                // compatibility reasons. Don't let this affect inference.
+                i < j && !needs_infer
+            }
+            (ObjectCandidate(_), ProjectionCandidate(_))
+            | (ProjectionCandidate(_), ObjectCandidate(_)) => {
+                bug!("Have both object and projection candidate")
+            }
+
+            // Arbitrarily give projection and object candidates priority.
+            (
+                ObjectCandidate(_) | ProjectionCandidate(_),
                 ImplCandidate(..)
                 | ClosureCandidate
                 | GeneratorCandidate
@@ -1325,98 +1401,100 @@
                 | BuiltinObjectCandidate
                 | BuiltinUnsizeCandidate
                 | BuiltinCandidate { .. }
-                | TraitAliasCandidate(..) => true,
-                ObjectCandidate | ProjectionCandidate => {
-                    // Arbitrarily give param candidates priority
-                    // over projection and object candidates.
-                    true
-                }
-                ParamCandidate(ref cand) => is_global(cand),
-            },
-            ImplCandidate(other_def) => {
+                | TraitAliasCandidate(..),
+            ) => true,
+
+            (
+                ImplCandidate(..)
+                | ClosureCandidate
+                | GeneratorCandidate
+                | FnPointerCandidate
+                | BuiltinObjectCandidate
+                | BuiltinUnsizeCandidate
+                | BuiltinCandidate { .. }
+                | TraitAliasCandidate(..),
+                ObjectCandidate(_) | ProjectionCandidate(_),
+            ) => false,
+
+            (&ImplCandidate(other_def), &ImplCandidate(victim_def)) => {
                 // See if we can toss out `victim` based on specialization.
                 // This requires us to know *for sure* that the `other` impl applies
                 // i.e., `EvaluatedToOk`.
                 if other.evaluation.must_apply_modulo_regions() {
-                    match victim.candidate {
-                        ImplCandidate(victim_def) => {
-                            let tcx = self.tcx();
-                            if tcx.specializes((other_def, victim_def)) {
-                                return true;
-                            }
-                            return match tcx.impls_are_allowed_to_overlap(other_def, victim_def) {
-                                Some(ty::ImplOverlapKind::Permitted { marker: true }) => {
-                                    // Subtle: If the predicate we are evaluating has inference
-                                    // variables, do *not* allow discarding candidates due to
-                                    // marker trait impls.
-                                    //
-                                    // Without this restriction, we could end up accidentally
-                                    // constrainting inference variables based on an arbitrarily
-                                    // chosen trait impl.
-                                    //
-                                    // Imagine we have the following code:
-                                    //
-                                    // ```rust
-                                    // #[marker] trait MyTrait {}
-                                    // impl MyTrait for u8 {}
-                                    // impl MyTrait for bool {}
-                                    // ```
-                                    //
-                                    // And we are evaluating the predicate `<_#0t as MyTrait>`.
-                                    //
-                                    // During selection, we will end up with one candidate for each
-                                    // impl of `MyTrait`. If we were to discard one impl in favor
-                                    // of the other, we would be left with one candidate, causing
-                                    // us to "successfully" select the predicate, unifying
-                                    // _#0t with (for example) `u8`.
-                                    //
-                                    // However, we have no reason to believe that this unification
-                                    // is correct - we've essentially just picked an arbitrary
-                                    // *possibility* for _#0t, and required that this be the *only*
-                                    // possibility.
-                                    //
-                                    // Eventually, we will either:
-                                    // 1) Unify all inference variables in the predicate through
-                                    // some other means (e.g. type-checking of a function). We will
-                                    // then be in a position to drop marker trait candidates
-                                    // without constraining inference variables (since there are
-                                    // none left to constrin)
-                                    // 2) Be left with some unconstrained inference variables. We
-                                    // will then correctly report an inference error, since the
-                                    // existence of multiple marker trait impls tells us nothing
-                                    // about which one should actually apply.
-                                    !needs_infer
-                                }
-                                Some(_) => true,
-                                None => false,
-                            };
-                        }
-                        ParamCandidate(ref cand) => {
-                            // Prefer the impl to a global where clause candidate.
-                            return is_global(cand);
-                        }
-                        _ => (),
+                    let tcx = self.tcx();
+                    if tcx.specializes((other_def, victim_def)) {
+                        return true;
                     }
+                    return match tcx.impls_are_allowed_to_overlap(other_def, victim_def) {
+                        Some(ty::ImplOverlapKind::Permitted { marker: true }) => {
+                            // Subtle: If the predicate we are evaluating has inference
+                            // variables, do *not* allow discarding candidates due to
+                            // marker trait impls.
+                            //
+                            // Without this restriction, we could end up accidentally
+                            // constrainting inference variables based on an arbitrarily
+                            // chosen trait impl.
+                            //
+                            // Imagine we have the following code:
+                            //
+                            // ```rust
+                            // #[marker] trait MyTrait {}
+                            // impl MyTrait for u8 {}
+                            // impl MyTrait for bool {}
+                            // ```
+                            //
+                            // And we are evaluating the predicate `<_#0t as MyTrait>`.
+                            //
+                            // During selection, we will end up with one candidate for each
+                            // impl of `MyTrait`. If we were to discard one impl in favor
+                            // of the other, we would be left with one candidate, causing
+                            // us to "successfully" select the predicate, unifying
+                            // _#0t with (for example) `u8`.
+                            //
+                            // However, we have no reason to believe that this unification
+                            // is correct - we've essentially just picked an arbitrary
+                            // *possibility* for _#0t, and required that this be the *only*
+                            // possibility.
+                            //
+                            // Eventually, we will either:
+                            // 1) Unify all inference variables in the predicate through
+                            // some other means (e.g. type-checking of a function). We will
+                            // then be in a position to drop marker trait candidates
+                            // without constraining inference variables (since there are
+                            // none left to constrin)
+                            // 2) Be left with some unconstrained inference variables. We
+                            // will then correctly report an inference error, since the
+                            // existence of multiple marker trait impls tells us nothing
+                            // about which one should actually apply.
+                            !needs_infer
+                        }
+                        Some(_) => true,
+                        None => false,
+                    };
+                } else {
+                    false
                 }
+            }
 
-                false
-            }
-            ClosureCandidate
-            | GeneratorCandidate
-            | FnPointerCandidate
-            | BuiltinObjectCandidate
-            | BuiltinUnsizeCandidate
-            | BuiltinCandidate { has_nested: true } => {
-                match victim.candidate {
-                    ParamCandidate(ref cand) => {
-                        // Prefer these to a global where-clause bound
-                        // (see issue #50825).
-                        is_global(cand) && other.evaluation.must_apply_modulo_regions()
-                    }
-                    _ => false,
-                }
-            }
-            _ => false,
+            // Everything else is ambiguous
+            (
+                ImplCandidate(_)
+                | ClosureCandidate
+                | GeneratorCandidate
+                | FnPointerCandidate
+                | BuiltinObjectCandidate
+                | BuiltinUnsizeCandidate
+                | BuiltinCandidate { has_nested: true }
+                | TraitAliasCandidate(..),
+                ImplCandidate(_)
+                | ClosureCandidate
+                | GeneratorCandidate
+                | FnPointerCandidate
+                | BuiltinObjectCandidate
+                | BuiltinUnsizeCandidate
+                | BuiltinCandidate { has_nested: true }
+                | TraitAliasCandidate(..),
+            ) => false,
         }
     }
 
@@ -1452,16 +1530,20 @@
 
             ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None,
 
-            ty::Tuple(tys) => {
-                Where(ty::Binder::bind(tys.last().into_iter().map(|k| k.expect_ty()).collect()))
-            }
+            ty::Tuple(tys) => Where(
+                obligation
+                    .predicate
+                    .rebind(tys.last().into_iter().map(|k| k.expect_ty()).collect()),
+            ),
 
             ty::Adt(def, substs) => {
                 let sized_crit = def.sized_constraint(self.tcx());
                 // (*) binder moved here
-                Where(ty::Binder::bind(
-                    sized_crit.iter().map(|ty| ty.subst(self.tcx(), substs)).collect(),
-                ))
+                Where(
+                    obligation.predicate.rebind({
+                        sized_crit.iter().map(|ty| ty.subst(self.tcx(), substs)).collect()
+                    }),
+                )
             }
 
             ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None,
@@ -1484,7 +1566,7 @@
 
         use self::BuiltinImplConditions::{Ambiguous, None, Where};
 
-        match self_ty.kind() {
+        match *self_ty.kind() {
             ty::Infer(ty::IntVar(_))
             | ty::Infer(ty::FloatVar(_))
             | ty::FnDef(..)
@@ -1513,17 +1595,23 @@
 
             ty::Array(element_ty, _) => {
                 // (*) binder moved here
-                Where(ty::Binder::bind(vec![element_ty]))
+                Where(obligation.predicate.rebind(vec![element_ty]))
             }
 
             ty::Tuple(tys) => {
                 // (*) binder moved here
-                Where(ty::Binder::bind(tys.iter().map(|k| k.expect_ty()).collect()))
+                Where(obligation.predicate.rebind(tys.iter().map(|k| k.expect_ty()).collect()))
             }
 
             ty::Closure(_, substs) => {
                 // (*) binder moved here
-                Where(ty::Binder::bind(substs.as_closure().upvar_tys().collect()))
+                let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty());
+                if let ty::Infer(ty::TyVar(_)) = ty.kind() {
+                    // Not yet resolved.
+                    Ambiguous
+                } else {
+                    Where(obligation.predicate.rebind(substs.as_closure().upvar_tys().collect()))
+                }
             }
 
             ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => {
@@ -1592,11 +1680,15 @@
                 tys.iter().map(|k| k.expect_ty()).collect()
             }
 
-            ty::Closure(_, ref substs) => substs.as_closure().upvar_tys().collect(),
+            ty::Closure(_, ref substs) => {
+                let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty());
+                vec![ty]
+            }
 
             ty::Generator(_, ref substs, _) => {
+                let ty = self.infcx.shallow_resolve(substs.as_generator().tupled_upvars_ty());
                 let witness = substs.as_generator().witness();
-                substs.as_generator().upvar_tys().chain(iter::once(witness)).collect()
+                vec![ty].into_iter().chain(iter::once(witness)).collect()
             }
 
             ty::GeneratorWitness(types) => {
@@ -1649,7 +1741,7 @@
                 let ty: ty::Binder<Ty<'tcx>> = ty::Binder::bind(ty); // <----/
 
                 self.infcx.commit_unconditionally(|_| {
-                    let (placeholder_ty, _) = self.infcx.replace_bound_vars_with_placeholders(&ty);
+                    let placeholder_ty = self.infcx.replace_bound_vars_with_placeholders(&ty);
                     let Normalized { value: normalized_ty, mut obligations } =
                         ensure_sufficient_stack(|| {
                             project::normalize_with_depth(
@@ -1708,6 +1800,7 @@
         impl_def_id: DefId,
         obligation: &TraitObligation<'tcx>,
     ) -> Result<Normalized<'tcx, SubstsRef<'tcx>>, ()> {
+        debug!(?impl_def_id, ?obligation, "match_impl");
         let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
 
         // Before we create the substitutions and everything, first
@@ -1717,7 +1810,7 @@
             return Err(());
         }
 
-        let (placeholder_obligation, _) =
+        let placeholder_obligation =
             self.infcx().replace_bound_vars_with_placeholders(&obligation.predicate);
         let placeholder_obligation_trait_ref = placeholder_obligation.trait_ref;
 
@@ -1736,11 +1829,7 @@
                 )
             });
 
-        debug!(
-            "match_impl(impl_def_id={:?}, obligation={:?}, \
-             impl_trait_ref={:?}, placeholder_obligation_trait_ref={:?})",
-            impl_def_id, obligation, impl_trait_ref, placeholder_obligation_trait_ref
-        );
+        debug!(?impl_trait_ref, ?placeholder_obligation_trait_ref);
 
         let InferOk { obligations, .. } = self
             .infcx
@@ -1756,7 +1845,7 @@
             return Err(());
         }
 
-        debug!("match_impl: success impl_substs={:?}", impl_substs);
+        debug!(?impl_substs, "match_impl: success");
         Ok(Normalized { value: impl_substs, obligations: nested_obligations })
     }
 
@@ -1800,9 +1889,7 @@
 
     /// Normalize `where_clause_trait_ref` and try to match it against
     /// `obligation`. If successful, return any predicates that
-    /// result from the normalization. Normalization is necessary
-    /// because where-clauses are stored in the parameter environment
-    /// unnormalized.
+    /// result from the normalization.
     fn match_where_clause_trait_ref(
         &mut self,
         obligation: &TraitObligation<'tcx>,
@@ -1818,10 +1905,7 @@
         obligation: &TraitObligation<'tcx>,
         poly_trait_ref: ty::PolyTraitRef<'tcx>,
     ) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
-        debug!(
-            "match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}",
-            obligation, poly_trait_ref
-        );
+        debug!(?obligation, ?poly_trait_ref, "match_poly_trait_ref");
 
         self.infcx
             .at(&obligation.cause, obligation.param_env)
@@ -1868,10 +1952,10 @@
         obligation: &TraitObligation<'tcx>,
         substs: SubstsRef<'tcx>,
     ) -> ty::PolyTraitRef<'tcx> {
-        debug!("closure_trait_ref_unnormalized(obligation={:?}, substs={:?})", obligation, substs);
+        debug!(?obligation, ?substs, "closure_trait_ref_unnormalized");
         let closure_sig = substs.as_closure().sig();
 
-        debug!("closure_trait_ref_unnormalized: closure_sig = {:?}", closure_sig);
+        debug!(?closure_sig);
 
         // (1) Feels icky to skip the binder here, but OTOH we know
         // that the self-type is an unboxed closure type and hence is
@@ -1922,7 +2006,7 @@
         def_id: DefId,           // of impl or trait
         substs: SubstsRef<'tcx>, // for impl or trait
     ) -> Vec<PredicateObligation<'tcx>> {
-        debug!("impl_or_trait_obligations(def_id={:?})", def_id);
+        debug!(?def_id, "impl_or_trait_obligations");
         let tcx = self.tcx();
 
         // To allow for one-pass evaluation of the nested obligation,
@@ -2044,10 +2128,10 @@
             self.depth,
             reached_depth,
         );
-        debug!("update_reached_depth(reached_depth={})", reached_depth);
+        debug!(reached_depth, "update_reached_depth");
         let mut p = self;
         while reached_depth < p.depth {
-            debug!("update_reached_depth: marking {:?} as cycle participant", p.fresh_trait_ref);
+            debug!(?p.fresh_trait_ref, "update_reached_depth: marking as cycle participant");
             p.reached_depth.set(p.reached_depth.get().min(reached_depth));
             p = p.previous.head.unwrap();
         }
@@ -2174,10 +2258,10 @@
     /// `self.current_reached_depth()` and above.
     fn get_provisional(&self, fresh_trait_ref: ty::PolyTraitRef<'tcx>) -> Option<EvaluationResult> {
         debug!(
-            "get_provisional(fresh_trait_ref={:?}) = {:#?} with reached-depth {}",
-            fresh_trait_ref,
+            ?fresh_trait_ref,
+            reached_depth = ?self.reached_depth.get(),
+            "get_provisional = {:#?}",
             self.map.borrow().get(&fresh_trait_ref),
-            self.reached_depth.get(),
         );
         Some(self.map.borrow().get(&fresh_trait_ref)?.result)
     }
@@ -2200,14 +2284,11 @@
         fresh_trait_ref: ty::PolyTraitRef<'tcx>,
         result: EvaluationResult,
     ) {
-        debug!(
-            "insert_provisional(from_dfn={}, reached_depth={}, fresh_trait_ref={:?}, result={:?})",
-            from_dfn, reached_depth, fresh_trait_ref, result,
-        );
+        debug!(?from_dfn, ?reached_depth, ?fresh_trait_ref, ?result, "insert_provisional");
         let r_d = self.reached_depth.get();
         self.reached_depth.set(r_d.min(reached_depth));
 
-        debug!("insert_provisional: reached_depth={:?}", self.reached_depth.get());
+        debug!(reached_depth = self.reached_depth.get());
 
         self.map.borrow_mut().insert(fresh_trait_ref, ProvisionalEvaluation { from_dfn, result });
     }
@@ -2221,7 +2302,7 @@
     /// these provisional entries must either depend on it or some
     /// ancestor of it.
     fn on_failure(&self, dfn: usize) {
-        debug!("on_failure(dfn={:?})", dfn,);
+        debug!(?dfn, "on_failure");
         self.map.borrow_mut().retain(|key, eval| {
             if !eval.from_dfn >= dfn {
                 debug!("on_failure: removing {:?}", key);
@@ -2242,7 +2323,7 @@
         depth: usize,
         mut op: impl FnMut(ty::PolyTraitRef<'tcx>, EvaluationResult),
     ) {
-        debug!("on_completion(depth={}, reached_depth={})", depth, self.reached_depth.get(),);
+        debug!(?depth, reached_depth = ?self.reached_depth.get(), "on_completion");
 
         if self.reached_depth.get() < depth {
             debug!("on_completion: did not yet reach depth to complete");
@@ -2250,7 +2331,7 @@
         }
 
         for (fresh_trait_ref, eval) in self.map.borrow_mut().drain() {
-            debug!("on_completion: fresh_trait_ref={:?} eval={:?}", fresh_trait_ref, eval,);
+            debug!(?fresh_trait_ref, ?eval, "on_completion");
 
             op(fresh_trait_ref, eval.result);
         }
diff --git a/compiler/rustc_trait_selection/src/traits/structural_match.rs b/compiler/rustc_trait_selection/src/traits/structural_match.rs
index 4f7fa2c..ce0d3ef 100644
--- a/compiler/rustc_trait_selection/src/traits/structural_match.rs
+++ b/compiler/rustc_trait_selection/src/traits/structural_match.rs
@@ -8,6 +8,7 @@
 use rustc_middle::ty::query::Providers;
 use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt, TypeFoldable, TypeVisitor};
 use rustc_span::Span;
+use std::ops::ControlFlow;
 
 #[derive(Debug)]
 pub enum NonStructuralMatchTy<'tcx> {
@@ -134,38 +135,38 @@
 }
 
 impl<'a, 'tcx> TypeVisitor<'tcx> for Search<'a, 'tcx> {
-    fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> {
         debug!("Search visiting ty: {:?}", ty);
 
         let (adt_def, substs) = match *ty.kind() {
             ty::Adt(adt_def, substs) => (adt_def, substs),
             ty::Param(_) => {
                 self.found = Some(NonStructuralMatchTy::Param);
-                return true; // Stop visiting.
+                return ControlFlow::BREAK;
             }
             ty::Dynamic(..) => {
                 self.found = Some(NonStructuralMatchTy::Dynamic);
-                return true; // Stop visiting.
+                return ControlFlow::BREAK;
             }
             ty::Foreign(_) => {
                 self.found = Some(NonStructuralMatchTy::Foreign);
-                return true; // Stop visiting.
+                return ControlFlow::BREAK;
             }
             ty::Opaque(..) => {
                 self.found = Some(NonStructuralMatchTy::Opaque);
-                return true; // Stop visiting.
+                return ControlFlow::BREAK;
             }
             ty::Projection(..) => {
                 self.found = Some(NonStructuralMatchTy::Projection);
-                return true; // Stop visiting.
+                return ControlFlow::BREAK;
             }
             ty::Generator(..) | ty::GeneratorWitness(..) => {
                 self.found = Some(NonStructuralMatchTy::Generator);
-                return true; // Stop visiting.
+                return ControlFlow::BREAK;
             }
             ty::Closure(..) => {
                 self.found = Some(NonStructuralMatchTy::Closure);
-                return true; // Stop visiting.
+                return ControlFlow::BREAK;
             }
             ty::RawPtr(..) => {
                 // structural-match ignores substructure of
@@ -182,39 +183,31 @@
                 // Even though `NonStructural` does not implement `PartialEq`,
                 // structural equality on `T` does not recur into the raw
                 // pointer. Therefore, one can still use `C` in a pattern.
-
-                // (But still tell the caller to continue search.)
-                return false;
+                return ControlFlow::CONTINUE;
             }
             ty::FnDef(..) | ty::FnPtr(..) => {
                 // Types of formals and return in `fn(_) -> _` are also irrelevant;
                 // so we do not recur into them via `super_visit_with`
-                //
-                // (But still tell the caller to continue search.)
-                return false;
+                return ControlFlow::CONTINUE;
             }
             ty::Array(_, n)
                 if { n.try_eval_usize(self.tcx(), ty::ParamEnv::reveal_all()) == Some(0) } =>
             {
                 // rust-lang/rust#62336: ignore type of contents
                 // for empty array.
-                //
-                // (But still tell the caller to continue search.)
-                return false;
+                return ControlFlow::CONTINUE;
             }
             ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => {
                 // These primitive types are always structural match.
                 //
                 // `Never` is kind of special here, but as it is not inhabitable, this should be fine.
-                //
-                // (But still tell the caller to continue search.)
-                return false;
+                return ControlFlow::CONTINUE;
             }
 
             ty::Array(..) | ty::Slice(_) | ty::Ref(..) | ty::Tuple(..) => {
                 // First check all contained types and then tell the caller to continue searching.
                 ty.super_visit_with(self);
-                return false;
+                return ControlFlow::CONTINUE;
             }
             ty::Infer(_) | ty::Placeholder(_) | ty::Bound(..) => {
                 bug!("unexpected type during structural-match checking: {:?}", ty);
@@ -223,22 +216,19 @@
                 self.tcx().sess.delay_span_bug(self.span, "ty::Error in structural-match check");
                 // We still want to check other types after encountering an error,
                 // as this may still emit relevant errors.
-                //
-                // So we continue searching here.
-                return false;
+                return ControlFlow::CONTINUE;
             }
         };
 
         if !self.seen.insert(adt_def.did) {
             debug!("Search already seen adt_def: {:?}", adt_def);
-            // Let caller continue its search.
-            return false;
+            return ControlFlow::CONTINUE;
         }
 
         if !self.type_marked_structural(ty) {
             debug!("Search found ty: {:?}", ty);
             self.found = Some(NonStructuralMatchTy::Adt(&adt_def));
-            return true; // Halt visiting!
+            return ControlFlow::BREAK;
         }
 
         // structural-match does not care about the
@@ -258,16 +248,16 @@
             let ty = self.tcx().normalize_erasing_regions(ty::ParamEnv::empty(), field_ty);
             debug!("structural-match ADT: field_ty={:?}, ty={:?}", field_ty, ty);
 
-            if ty.visit_with(self) {
+            if ty.visit_with(self).is_break() {
                 // found an ADT without structural-match; halt visiting!
                 assert!(self.found.is_some());
-                return true;
+                return ControlFlow::BREAK;
             }
         }
 
         // Even though we do not want to recur on substs, we do
         // want our caller to continue its own search.
-        false
+        ControlFlow::CONTINUE
     }
 }
 
diff --git a/compiler/rustc_trait_selection/src/traits/wf.rs b/compiler/rustc_trait_selection/src/traits/wf.rs
index 909cd2a..496dff6 100644
--- a/compiler/rustc_trait_selection/src/traits/wf.rs
+++ b/compiler/rustc_trait_selection/src/traits/wf.rs
@@ -20,6 +20,7 @@
     infcx: &InferCtxt<'a, 'tcx>,
     param_env: ty::ParamEnv<'tcx>,
     body_id: hir::HirId,
+    recursion_depth: usize,
     arg: GenericArg<'tcx>,
     span: Span,
 ) -> Option<Vec<traits::PredicateObligation<'tcx>>> {
@@ -59,7 +60,8 @@
         GenericArgKind::Lifetime(..) => return Some(Vec::new()),
     };
 
-    let mut wf = WfPredicates { infcx, param_env, body_id, span, out: vec![], item: None };
+    let mut wf =
+        WfPredicates { infcx, param_env, body_id, span, out: vec![], recursion_depth, item: None };
     wf.compute(arg);
     debug!("wf::obligations({:?}, body_id={:?}) = {:?}", arg, body_id, wf.out);
 
@@ -80,7 +82,8 @@
     span: Span,
     item: Option<&'tcx hir::Item<'tcx>>,
 ) -> Vec<traits::PredicateObligation<'tcx>> {
-    let mut wf = WfPredicates { infcx, param_env, body_id, span, out: vec![], item };
+    let mut wf =
+        WfPredicates { infcx, param_env, body_id, span, out: vec![], recursion_depth: 0, item };
     wf.compute_trait_ref(trait_ref, Elaborate::All);
     wf.normalize()
 }
@@ -92,7 +95,15 @@
     predicate: ty::Predicate<'tcx>,
     span: Span,
 ) -> Vec<traits::PredicateObligation<'tcx>> {
-    let mut wf = WfPredicates { infcx, param_env, body_id, span, out: vec![], item: None };
+    let mut wf = WfPredicates {
+        infcx,
+        param_env,
+        body_id,
+        span,
+        out: vec![],
+        recursion_depth: 0,
+        item: None,
+    };
 
     // It's ok to skip the binder here because wf code is prepared for it
     match predicate.skip_binders() {
@@ -142,6 +153,7 @@
     body_id: hir::HirId,
     span: Span,
     out: Vec<traits::PredicateObligation<'tcx>>,
+    recursion_depth: usize,
     item: Option<&'tcx hir::Item<'tcx>>,
 }
 
@@ -241,18 +253,27 @@
         traits::ObligationCause::new(self.span, self.body_id, code)
     }
 
-    fn normalize(&mut self) -> Vec<traits::PredicateObligation<'tcx>> {
+    fn normalize(mut self) -> Vec<traits::PredicateObligation<'tcx>> {
         let cause = self.cause(traits::MiscObligation);
         let infcx = &mut self.infcx;
         let param_env = self.param_env;
         let mut obligations = Vec::with_capacity(self.out.len());
-        for pred in &self.out {
-            assert!(!pred.has_escaping_bound_vars());
+        for mut obligation in self.out {
+            assert!(!obligation.has_escaping_bound_vars());
             let mut selcx = traits::SelectionContext::new(infcx);
-            let i = obligations.len();
-            let value =
-                traits::normalize_to(&mut selcx, param_env, cause.clone(), pred, &mut obligations);
-            obligations.insert(i, value);
+            // Don't normalize the whole obligation, the param env is either
+            // already normalized, or we're currently normalizing the
+            // param_env. Either way we should only normalize the predicate.
+            let normalized_predicate = traits::project::normalize_with_depth_to(
+                &mut selcx,
+                param_env,
+                cause.clone(),
+                self.recursion_depth,
+                &obligation.predicate,
+                &mut obligations,
+            );
+            obligation.predicate = normalized_predicate;
+            obligations.push(obligation);
         }
         obligations
     }
@@ -265,6 +286,7 @@
         debug!("compute_trait_ref obligations {:?}", obligations);
         let cause = self.cause(traits::MiscObligation);
         let param_env = self.param_env;
+        let depth = self.recursion_depth;
 
         let item = self.item;
 
@@ -286,7 +308,7 @@
                 &obligation.predicate,
                 tcx.associated_items(trait_ref.def_id).in_definition_order(),
             );
-            traits::Obligation::new(cause, param_env, obligation.predicate)
+            traits::Obligation::with_depth(cause, depth, param_env, obligation.predicate)
         };
 
         if let Elaborate::All = elaborate {
@@ -315,8 +337,9 @@
                             new_cause.make_mut().span = self_ty.span;
                         }
                     }
-                    traits::Obligation::new(
+                    traits::Obligation::with_depth(
                         new_cause,
+                        depth,
                         param_env,
                         ty::PredicateAtom::WellFormed(arg).to_predicate(tcx),
                     )
@@ -327,17 +350,51 @@
     /// Pushes the obligations required for `trait_ref::Item` to be WF
     /// into `self.out`.
     fn compute_projection(&mut self, data: ty::ProjectionTy<'tcx>) {
-        // A projection is well-formed if (a) the trait ref itself is
-        // WF and (b) the trait-ref holds.  (It may also be
-        // normalizable and be WF that way.)
-        let trait_ref = data.trait_ref(self.infcx.tcx);
-        self.compute_trait_ref(&trait_ref, Elaborate::None);
+        // A projection is well-formed if
+        //
+        // (a) its predicates hold (*)
+        // (b) its substs are wf
+        //
+        // (*) The predicates of an associated type include the predicates of
+        //     the trait that it's contained in. For example, given
+        //
+        // trait A<T>: Clone {
+        //     type X where T: Copy;
+        // }
+        //
+        // The predicates of `<() as A<i32>>::X` are:
+        // [
+        //     `(): Sized`
+        //     `(): Clone`
+        //     `(): A<i32>`
+        //     `i32: Sized`
+        //     `i32: Clone`
+        //     `i32: Copy`
+        // ]
+        let obligations = self.nominal_obligations(data.item_def_id, data.substs);
+        self.out.extend(obligations);
 
-        if !data.has_escaping_bound_vars() {
-            let predicate = trait_ref.without_const().to_predicate(self.infcx.tcx);
-            let cause = self.cause(traits::ProjectionWf(data));
-            self.out.push(traits::Obligation::new(cause, self.param_env, predicate));
-        }
+        let tcx = self.tcx();
+        let cause = self.cause(traits::MiscObligation);
+        let param_env = self.param_env;
+        let depth = self.recursion_depth;
+
+        self.out.extend(
+            data.substs
+                .iter()
+                .filter(|arg| {
+                    matches!(arg.unpack(), GenericArgKind::Type(..) | GenericArgKind::Const(..))
+                })
+                .filter(|arg| !arg.has_escaping_bound_vars())
+                .map(|arg| {
+                    traits::Obligation::with_depth(
+                        cause.clone(),
+                        depth,
+                        param_env,
+                        ty::PredicateAtom::WellFormed(arg).to_predicate(tcx),
+                    )
+                }),
+        );
     }
 
     fn require_sized(&mut self, subty: Ty<'tcx>, cause: traits::ObligationCauseCode<'tcx>) {
@@ -347,8 +404,9 @@
                 def_id: self.infcx.tcx.require_lang_item(LangItem::Sized, None),
                 substs: self.infcx.tcx.mk_substs_trait(subty, &[]),
             };
-            self.out.push(traits::Obligation::new(
+            self.out.push(traits::Obligation::with_depth(
                 cause,
+                self.recursion_depth,
                 self.param_env,
                 trait_ref.without_const().to_predicate(self.infcx.tcx),
             ));
@@ -359,6 +417,7 @@
     fn compute(&mut self, arg: GenericArg<'tcx>) {
         let mut walker = arg.walk();
         let param_env = self.param_env;
+        let depth = self.recursion_depth;
         while let Some(arg) = walker.next() {
             let ty = match arg.unpack() {
                 GenericArgKind::Type(ty) => ty,
@@ -378,8 +437,9 @@
                             let predicate = ty::PredicateAtom::ConstEvaluatable(def, substs)
                                 .to_predicate(self.tcx());
                             let cause = self.cause(traits::MiscObligation);
-                            self.out.push(traits::Obligation::new(
+                            self.out.push(traits::Obligation::with_depth(
                                 cause,
+                                self.recursion_depth,
                                 self.param_env,
                                 predicate,
                             ));
@@ -394,8 +454,9 @@
                                     val: ty::ConstKind::Infer(resolved),
                                     ..*constant
                                 });
-                                self.out.push(traits::Obligation::new(
+                                self.out.push(traits::Obligation::with_depth(
                                     cause,
+                                    self.recursion_depth,
                                     self.param_env,
                                     ty::PredicateAtom::WellFormed(resolved_constant.into())
                                         .to_predicate(self.tcx()),
@@ -480,8 +541,9 @@
                     // WfReference
                     if !r.has_escaping_bound_vars() && !rty.has_escaping_bound_vars() {
                         let cause = self.cause(traits::ReferenceOutlivesReferent(ty));
-                        self.out.push(traits::Obligation::new(
+                        self.out.push(traits::Obligation::with_depth(
                             cause,
+                            depth,
                             param_env,
                             ty::PredicateAtom::TypeOutlives(ty::OutlivesPredicate(rty, r))
                                 .to_predicate(self.tcx()),
@@ -530,10 +592,8 @@
                     // anyway, except via auto trait matching (which
                     // only inspects the upvar types).
                     walker.skip_current_subtree(); // subtree handled below
-                    for upvar_ty in substs.as_closure().upvar_tys() {
-                        // FIXME(eddyb) add the type to `walker` instead of recursing.
-                        self.compute(upvar_ty.into());
-                    }
+                    // FIXME(eddyb) add the type to `walker` instead of recursing.
+                    self.compute(substs.as_closure().tupled_upvars_ty().into());
                 }
 
                 ty::FnPtr(_) => {
@@ -571,8 +631,9 @@
                         let component_traits = data.auto_traits().chain(data.principal_def_id());
                         let tcx = self.tcx();
                         self.out.extend(component_traits.map(|did| {
-                            traits::Obligation::new(
+                            traits::Obligation::with_depth(
                                 cause.clone(),
+                                depth,
                                 param_env,
                                 ty::PredicateAtom::ObjectSafe(did).to_predicate(tcx),
                             )
@@ -597,8 +658,9 @@
                     if let ty::Infer(ty::TyVar(_)) = ty.kind() {
                         // Not yet resolved, but we've made progress.
                         let cause = self.cause(traits::MiscObligation);
-                        self.out.push(traits::Obligation::new(
+                        self.out.push(traits::Obligation::with_depth(
                             cause,
+                            self.recursion_depth,
                             param_env,
                             ty::PredicateAtom::WellFormed(ty.into()).to_predicate(self.tcx()),
                         ));
@@ -635,7 +697,7 @@
             .zip(origins.into_iter().rev())
             .map(|((pred, span), origin_def_id)| {
                 let cause = self.cause(traits::BindingObligation(origin_def_id, span));
-                traits::Obligation::new(cause, self.param_env, pred)
+                traits::Obligation::with_depth(cause, self.recursion_depth, self.param_env, pred)
             })
             .filter(|pred| !pred.has_escaping_bound_vars())
             .collect()
@@ -688,8 +750,9 @@
                 let cause = self.cause(traits::ObjectTypeBound(ty, explicit_bound));
                 let outlives =
                     ty::Binder::dummy(ty::OutlivesPredicate(explicit_bound, implicit_bound));
-                self.out.push(traits::Obligation::new(
+                self.out.push(traits::Obligation::with_depth(
                     cause,
+                    self.recursion_depth,
                     self.param_env,
                     outlives.to_predicate(self.infcx.tcx),
                 ));
diff --git a/compiler/rustc_traits/Cargo.toml b/compiler/rustc_traits/Cargo.toml
index 6d495718..8bd9e29 100644
--- a/compiler/rustc_traits/Cargo.toml
+++ b/compiler/rustc_traits/Cargo.toml
@@ -12,9 +12,9 @@
 rustc_index = { path = "../rustc_index" }
 rustc_ast = { path = "../rustc_ast" }
 rustc_span = { path = "../rustc_span" }
-chalk-ir = "0.29.0"
-chalk-solve = "0.29.0"
-chalk-engine = "0.29.0"
+chalk-ir = "0.36.0"
+chalk-solve = "0.36.0"
+chalk-engine = "0.36.0"
 smallvec = { version = "1.0", features = ["union", "may_dangle"] }
 rustc_infer = { path = "../rustc_infer" }
 rustc_trait_selection = { path = "../rustc_trait_selection" }
diff --git a/compiler/rustc_traits/src/chalk/db.rs b/compiler/rustc_traits/src/chalk/db.rs
index 828ee6d..c5a46b1 100644
--- a/compiler/rustc_traits/src/chalk/db.rs
+++ b/compiler/rustc_traits/src/chalk/db.rs
@@ -22,7 +22,6 @@
 
 pub struct RustIrDatabase<'tcx> {
     pub(crate) interner: RustInterner<'tcx>,
-    pub(crate) restatic_placeholder: ty::Region<'tcx>,
     pub(crate) reempty_placeholder: ty::Region<'tcx>,
 }
 
@@ -38,18 +37,28 @@
         def_id: DefId,
         bound_vars: SubstsRef<'tcx>,
     ) -> Vec<chalk_ir::QuantifiedWhereClause<RustInterner<'tcx>>> {
-        let predicates = self.interner.tcx.predicates_of(def_id).predicates;
-        let mut regions_substitutor = lowering::RegionsSubstitutor::new(
-            self.interner.tcx,
-            self.restatic_placeholder,
-            self.reempty_placeholder,
-        );
+        let predicates = self.interner.tcx.predicates_defined_on(def_id).predicates;
+        let mut regions_substitutor =
+            lowering::RegionsSubstitutor::new(self.interner.tcx, self.reempty_placeholder);
         predicates
             .iter()
             .map(|(wc, _)| wc.subst(self.interner.tcx, bound_vars))
             .map(|wc| wc.fold_with(&mut regions_substitutor))
             .filter_map(|wc| LowerInto::<Option<chalk_ir::QuantifiedWhereClause<RustInterner<'tcx>>>>::lower_into(wc, &self.interner)).collect()
     }
+
+    fn bounds_for<T>(&self, def_id: DefId, bound_vars: SubstsRef<'tcx>) -> Vec<T>
+    where
+        ty::Predicate<'tcx>: LowerInto<'tcx, std::option::Option<T>>,
+    {
+        self.interner
+            .tcx
+            .explicit_item_bounds(def_id)
+            .iter()
+            .map(|(bound, _)| bound.subst(self.interner.tcx, &bound_vars))
+            .filter_map(|bound| LowerInto::<Option<_>>::lower_into(bound, &self.interner))
+            .collect()
+    }
 }
 
 impl<'tcx> chalk_solve::RustIrDatabase<RustInterner<'tcx>> for RustIrDatabase<'tcx> {
@@ -73,10 +82,9 @@
         }
         let bound_vars = bound_vars_for_item(self.interner.tcx, def_id);
         let binders = binders_for(&self.interner, bound_vars);
-        // FIXME(chalk): this really isn't right I don't think. The functions
-        // for GATs are a bit hard to figure out. Are these supposed to be where
-        // clauses or bounds?
+
         let where_clauses = self.where_clauses_for(def_id, bound_vars);
+        let bounds = self.bounds_for(def_id, bound_vars);
 
         Arc::new(chalk_solve::rust_ir::AssociatedTyDatum {
             trait_id: chalk_ir::TraitId(trait_def_id),
@@ -84,7 +92,7 @@
             name: (),
             binders: chalk_ir::Binders::new(
                 binders,
-                chalk_solve::rust_ir::AssociatedTyDatumBound { bounds: vec![], where_clauses },
+                chalk_solve::rust_ir::AssociatedTyDatumBound { bounds, where_clauses },
             ),
         })
     }
@@ -110,34 +118,27 @@
             .map(|i| chalk_ir::AssocTypeId(i.def_id))
             .collect();
 
-        let well_known = if self.interner.tcx.lang_items().sized_trait() == Some(def_id) {
+        let lang_items = self.interner.tcx.lang_items();
+        let well_known = if lang_items.sized_trait() == Some(def_id) {
             Some(chalk_solve::rust_ir::WellKnownTrait::Sized)
-        } else if self.interner.tcx.lang_items().copy_trait() == Some(def_id) {
+        } else if lang_items.copy_trait() == Some(def_id) {
             Some(chalk_solve::rust_ir::WellKnownTrait::Copy)
-        } else if self.interner.tcx.lang_items().clone_trait() == Some(def_id) {
+        } else if lang_items.clone_trait() == Some(def_id) {
             Some(chalk_solve::rust_ir::WellKnownTrait::Clone)
-        } else if self.interner.tcx.lang_items().drop_trait() == Some(def_id) {
+        } else if lang_items.drop_trait() == Some(def_id) {
             Some(chalk_solve::rust_ir::WellKnownTrait::Drop)
-        } else if self.interner.tcx.lang_items().fn_trait() == Some(def_id) {
+        } else if lang_items.fn_trait() == Some(def_id) {
             Some(chalk_solve::rust_ir::WellKnownTrait::Fn)
-        } else if self
-            .interner
-            .tcx
-            .lang_items()
-            .fn_once_trait()
-            .map(|t| def_id == t)
-            .unwrap_or(false)
-        {
+        } else if lang_items.fn_once_trait() == Some(def_id) {
             Some(chalk_solve::rust_ir::WellKnownTrait::FnOnce)
-        } else if self
-            .interner
-            .tcx
-            .lang_items()
-            .fn_mut_trait()
-            .map(|t| def_id == t)
-            .unwrap_or(false)
-        {
+        } else if lang_items.fn_mut_trait() == Some(def_id) {
             Some(chalk_solve::rust_ir::WellKnownTrait::FnMut)
+        } else if lang_items.unsize_trait() == Some(def_id) {
+            Some(chalk_solve::rust_ir::WellKnownTrait::Unsize)
+        } else if lang_items.unpin_trait() == Some(def_id) {
+            Some(chalk_solve::rust_ir::WellKnownTrait::Unpin)
+        } else if lang_items.coerce_unsized_trait() == Some(def_id) {
+            Some(chalk_solve::rust_ir::WellKnownTrait::CoerceUnsized)
         } else {
             None
         };
@@ -262,11 +263,8 @@
 
         let trait_ref = self.interner.tcx.impl_trait_ref(def_id).expect("not an impl");
         let trait_ref = trait_ref.subst(self.interner.tcx, bound_vars);
-        let mut regions_substitutor = lowering::RegionsSubstitutor::new(
-            self.interner.tcx,
-            self.restatic_placeholder,
-            self.reempty_placeholder,
-        );
+        let mut regions_substitutor =
+            lowering::RegionsSubstitutor::new(self.interner.tcx, self.reempty_placeholder);
         let trait_ref = trait_ref.fold_with(&mut regions_substitutor);
 
         let where_clauses = self.where_clauses_for(def_id, bound_vars);
@@ -276,11 +274,20 @@
             where_clauses,
         };
 
+        let associated_ty_value_ids: Vec<_> = self
+            .interner
+            .tcx
+            .associated_items(def_id)
+            .in_definition_order()
+            .filter(|i| i.kind == AssocKind::Type)
+            .map(|i| chalk_solve::rust_ir::AssociatedTyValueId(i.def_id))
+            .collect();
+
         Arc::new(chalk_solve::rust_ir::ImplDatum {
-            polarity: chalk_solve::rust_ir::Polarity::Positive,
+            polarity: self.interner.tcx.impl_polarity(def_id).lower_into(&self.interner),
             binders: chalk_ir::Binders::new(binders, value),
             impl_type: chalk_solve::rust_ir::ImplType::Local,
-            associated_ty_value_ids: vec![],
+            associated_ty_value_ids,
         })
     }
 
@@ -304,11 +311,8 @@
 
             let self_ty = trait_ref.self_ty();
             let self_ty = self_ty.subst(self.interner.tcx, bound_vars);
-            let mut regions_substitutor = lowering::RegionsSubstitutor::new(
-                self.interner.tcx,
-                self.restatic_placeholder,
-                self.reempty_placeholder,
-            );
+            let mut regions_substitutor =
+                lowering::RegionsSubstitutor::new(self.interner.tcx, self.reempty_placeholder);
             let self_ty = self_ty.fold_with(&mut regions_substitutor);
             let lowered_ty = self_ty.lower_into(&self.interner);
 
@@ -322,51 +326,51 @@
     fn impl_provided_for(
         &self,
         auto_trait_id: chalk_ir::TraitId<RustInterner<'tcx>>,
-        app_ty: &chalk_ir::ApplicationTy<RustInterner<'tcx>>,
+        chalk_ty: &chalk_ir::TyKind<RustInterner<'tcx>>,
     ) -> bool {
         use chalk_ir::Scalar::*;
-        use chalk_ir::TypeName::*;
+        use chalk_ir::TyKind::*;
 
         let trait_def_id = auto_trait_id.0;
         let all_impls = self.interner.tcx.all_impls(trait_def_id);
         for impl_def_id in all_impls {
             let trait_ref = self.interner.tcx.impl_trait_ref(impl_def_id).unwrap();
             let self_ty = trait_ref.self_ty();
-            let provides = match (self_ty.kind(), app_ty.name) {
-                (&ty::Adt(impl_adt_def, ..), Adt(id)) => impl_adt_def.did == id.0.did,
-                (_, AssociatedType(_ty_id)) => {
+            let provides = match (self_ty.kind(), chalk_ty) {
+                (&ty::Adt(impl_adt_def, ..), Adt(id, ..)) => impl_adt_def.did == id.0.did,
+                (_, AssociatedType(_ty_id, ..)) => {
                     // FIXME(chalk): See https://github.com/rust-lang/rust/pull/77152#discussion_r494484774
                     false
                 }
                 (ty::Bool, Scalar(Bool)) => true,
                 (ty::Char, Scalar(Char)) => true,
-                (ty::Int(ty1), Scalar(Int(ty2))) => match (ty1, ty2) {
+                (ty::Int(ty1), Scalar(Int(ty2))) => matches!(
+                    (ty1, ty2),
                     (ast::IntTy::Isize, chalk_ir::IntTy::Isize)
-                    | (ast::IntTy::I8, chalk_ir::IntTy::I8)
-                    | (ast::IntTy::I16, chalk_ir::IntTy::I16)
-                    | (ast::IntTy::I32, chalk_ir::IntTy::I32)
-                    | (ast::IntTy::I64, chalk_ir::IntTy::I64)
-                    | (ast::IntTy::I128, chalk_ir::IntTy::I128) => true,
-                    _ => false,
-                },
-                (ty::Uint(ty1), Scalar(Uint(ty2))) => match (ty1, ty2) {
+                        | (ast::IntTy::I8, chalk_ir::IntTy::I8)
+                        | (ast::IntTy::I16, chalk_ir::IntTy::I16)
+                        | (ast::IntTy::I32, chalk_ir::IntTy::I32)
+                        | (ast::IntTy::I64, chalk_ir::IntTy::I64)
+                        | (ast::IntTy::I128, chalk_ir::IntTy::I128)
+                ),
+                (ty::Uint(ty1), Scalar(Uint(ty2))) => matches!(
+                    (ty1, ty2),
                     (ast::UintTy::Usize, chalk_ir::UintTy::Usize)
-                    | (ast::UintTy::U8, chalk_ir::UintTy::U8)
-                    | (ast::UintTy::U16, chalk_ir::UintTy::U16)
-                    | (ast::UintTy::U32, chalk_ir::UintTy::U32)
-                    | (ast::UintTy::U64, chalk_ir::UintTy::U64)
-                    | (ast::UintTy::U128, chalk_ir::UintTy::U128) => true,
-                    _ => false,
-                },
-                (ty::Float(ty1), Scalar(Float(ty2))) => match (ty1, ty2) {
+                        | (ast::UintTy::U8, chalk_ir::UintTy::U8)
+                        | (ast::UintTy::U16, chalk_ir::UintTy::U16)
+                        | (ast::UintTy::U32, chalk_ir::UintTy::U32)
+                        | (ast::UintTy::U64, chalk_ir::UintTy::U64)
+                        | (ast::UintTy::U128, chalk_ir::UintTy::U128)
+                ),
+                (ty::Float(ty1), Scalar(Float(ty2))) => matches!(
+                    (ty1, ty2),
                     (ast::FloatTy::F32, chalk_ir::FloatTy::F32)
-                    | (ast::FloatTy::F64, chalk_ir::FloatTy::F64) => true,
-                    _ => false,
-                },
-                (&ty::Tuple(..), Tuple(..)) => true,
-                (&ty::Array(..), Array) => true,
-                (&ty::Slice(..), Slice) => true,
-                (&ty::RawPtr(type_and_mut), Raw(mutability)) => {
+                        | (ast::FloatTy::F64, chalk_ir::FloatTy::F64)
+                ),
+                (&ty::Tuple(substs), Tuple(len, _)) => substs.len() == *len,
+                (&ty::Array(..), Array(..)) => true,
+                (&ty::Slice(..), Slice(..)) => true,
+                (&ty::RawPtr(type_and_mut), Raw(mutability, _)) => {
                     match (type_and_mut.mutbl, mutability) {
                         (ast::Mutability::Mut, chalk_ir::Mutability::Mut) => true,
                         (ast::Mutability::Mut, chalk_ir::Mutability::Not) => false,
@@ -374,17 +378,19 @@
                         (ast::Mutability::Not, chalk_ir::Mutability::Not) => true,
                     }
                 }
-                (&ty::Ref(.., mutability1), Ref(mutability2)) => match (mutability1, mutability2) {
-                    (ast::Mutability::Mut, chalk_ir::Mutability::Mut) => true,
-                    (ast::Mutability::Mut, chalk_ir::Mutability::Not) => false,
-                    (ast::Mutability::Not, chalk_ir::Mutability::Mut) => false,
-                    (ast::Mutability::Not, chalk_ir::Mutability::Not) => true,
-                },
-                (&ty::Opaque(def_id, ..), OpaqueType(opaque_ty_id)) => def_id == opaque_ty_id.0,
-                (&ty::FnDef(def_id, ..), FnDef(fn_def_id)) => def_id == fn_def_id.0,
+                (&ty::Ref(.., mutability1), Ref(mutability2, ..)) => {
+                    match (mutability1, mutability2) {
+                        (ast::Mutability::Mut, chalk_ir::Mutability::Mut) => true,
+                        (ast::Mutability::Mut, chalk_ir::Mutability::Not) => false,
+                        (ast::Mutability::Not, chalk_ir::Mutability::Mut) => false,
+                        (ast::Mutability::Not, chalk_ir::Mutability::Not) => true,
+                    }
+                }
+                (&ty::Opaque(def_id, ..), OpaqueType(opaque_ty_id, ..)) => def_id == opaque_ty_id.0,
+                (&ty::FnDef(def_id, ..), FnDef(fn_def_id, ..)) => def_id == fn_def_id.0,
                 (&ty::Str, Str) => true,
                 (&ty::Never, Never) => true,
-                (&ty::Closure(def_id, ..), Closure(closure_id)) => def_id == closure_id.0,
+                (&ty::Closure(def_id, ..), Closure(closure_id, _)) => def_id == closure_id.0,
                 (&ty::Foreign(def_id), Foreign(foreign_def_id)) => def_id == foreign_def_id.0,
                 (&ty::Error(..), Error) => false,
                 _ => false,
@@ -402,24 +408,38 @@
     ) -> Arc<chalk_solve::rust_ir::AssociatedTyValue<RustInterner<'tcx>>> {
         let def_id = associated_ty_id.0;
         let assoc_item = self.interner.tcx.associated_item(def_id);
-        let impl_id = match assoc_item.container {
-            AssocItemContainer::TraitContainer(def_id) => def_id,
-            _ => unimplemented!("Not possible??"),
+        let (impl_id, trait_id) = match assoc_item.container {
+            AssocItemContainer::TraitContainer(def_id) => (def_id, def_id),
+            AssocItemContainer::ImplContainer(def_id) => {
+                (def_id, self.interner.tcx.impl_trait_ref(def_id).unwrap().def_id)
+            }
         };
         match assoc_item.kind {
             AssocKind::Type => {}
             _ => unimplemented!("Not possible??"),
         }
+
+        let trait_item = self
+            .interner
+            .tcx
+            .associated_items(trait_id)
+            .find_by_name_and_kind(self.interner.tcx, assoc_item.ident, assoc_item.kind, trait_id)
+            .unwrap();
         let bound_vars = bound_vars_for_item(self.interner.tcx, def_id);
         let binders = binders_for(&self.interner, bound_vars);
-        let ty = self.interner.tcx.type_of(def_id);
+        let ty = self
+            .interner
+            .tcx
+            .type_of(def_id)
+            .subst(self.interner.tcx, bound_vars)
+            .lower_into(&self.interner);
 
         Arc::new(chalk_solve::rust_ir::AssociatedTyValue {
             impl_id: chalk_ir::ImplId(impl_id),
-            associated_ty_id: chalk_ir::AssocTypeId(def_id),
+            associated_ty_id: chalk_ir::AssocTypeId(trait_item.def_id),
             value: chalk_ir::Binders::new(
                 binders,
-                chalk_solve::rust_ir::AssociatedTyValueBound { ty: ty.lower_into(&self.interner) },
+                chalk_solve::rust_ir::AssociatedTyValueBound { ty },
             ),
         })
     }
@@ -439,17 +459,61 @@
         &self,
         opaque_ty_id: chalk_ir::OpaqueTyId<RustInterner<'tcx>>,
     ) -> Arc<chalk_solve::rust_ir::OpaqueTyDatum<RustInterner<'tcx>>> {
-        let bound_vars = bound_vars_for_item(self.interner.tcx, opaque_ty_id.0);
-        let binders = binders_for(&self.interner, bound_vars);
+        let bound_vars = ty::fold::shift_vars(
+            self.interner.tcx,
+            &bound_vars_for_item(self.interner.tcx, opaque_ty_id.0),
+            1,
+        );
         let where_clauses = self.where_clauses_for(opaque_ty_id.0, bound_vars);
 
+        let identity_substs = InternalSubsts::identity_for_item(self.interner.tcx, opaque_ty_id.0);
+
+        let bounds =
+            self.interner
+                .tcx
+                .explicit_item_bounds(opaque_ty_id.0)
+                .iter()
+                .map(|(bound, _)| bound.subst(self.interner.tcx, &bound_vars))
+                .map(|bound| {
+                    bound.fold_with(&mut ty::fold::BottomUpFolder {
+                        tcx: self.interner.tcx,
+                        ty_op: |ty| {
+                            if let ty::Opaque(def_id, substs) = *ty.kind() {
+                                if def_id == opaque_ty_id.0 && substs == identity_substs {
+                                    return self.interner.tcx.mk_ty(ty::Bound(
+                                        ty::INNERMOST,
+                                        ty::BoundTy::from(ty::BoundVar::from_u32(0)),
+                                    ));
+                                }
+                            }
+                            ty
+                        },
+                        lt_op: |lt| lt,
+                        ct_op: |ct| ct,
+                    })
+                })
+                .filter_map(|bound| {
+                    LowerInto::<
+                    Option<chalk_ir::QuantifiedWhereClause<RustInterner<'tcx>>>
+                >::lower_into(bound, &self.interner)
+                })
+                .collect();
+
+        // Binder for the bound variable representing the concrete impl Trait type.
+        let existential_binder = chalk_ir::VariableKinds::from1(
+            &self.interner,
+            chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General),
+        );
+
         let value = chalk_solve::rust_ir::OpaqueTyDatumBound {
-            bounds: chalk_ir::Binders::new(binders.clone(), vec![]),
-            where_clauses: chalk_ir::Binders::new(binders, where_clauses),
+            bounds: chalk_ir::Binders::new(existential_binder.clone(), bounds),
+            where_clauses: chalk_ir::Binders::new(existential_binder, where_clauses),
         };
+
+        let binders = binders_for(&self.interner, bound_vars);
         Arc::new(chalk_solve::rust_ir::OpaqueTyDatum {
             opaque_ty_id,
-            bound: chalk_ir::Binders::empty(&self.interner, value),
+            bound: chalk_ir::Binders::new(binders, value),
         })
     }
 
@@ -502,17 +566,11 @@
         substs: &chalk_ir::Substitution<RustInterner<'tcx>>,
     ) -> chalk_solve::rust_ir::ClosureKind {
         let kind = &substs.as_slice(&self.interner)[substs.len(&self.interner) - 3];
-        match kind.assert_ty_ref(&self.interner).data(&self.interner) {
-            chalk_ir::TyData::Apply(apply) => match apply.name {
-                chalk_ir::TypeName::Scalar(scalar) => match scalar {
-                    chalk_ir::Scalar::Int(int_ty) => match int_ty {
-                        chalk_ir::IntTy::I8 => chalk_solve::rust_ir::ClosureKind::Fn,
-                        chalk_ir::IntTy::I16 => chalk_solve::rust_ir::ClosureKind::FnMut,
-                        chalk_ir::IntTy::I32 => chalk_solve::rust_ir::ClosureKind::FnOnce,
-                        _ => bug!("bad closure kind"),
-                    },
-                    _ => bug!("bad closure kind"),
-                },
+        match kind.assert_ty_ref(&self.interner).kind(&self.interner) {
+            chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Int(int_ty)) => match int_ty {
+                chalk_ir::IntTy::I8 => chalk_solve::rust_ir::ClosureKind::Fn,
+                chalk_ir::IntTy::I16 => chalk_solve::rust_ir::ClosureKind::FnMut,
+                chalk_ir::IntTy::I32 => chalk_solve::rust_ir::ClosureKind::FnOnce,
                 _ => bug!("bad closure kind"),
             },
             _ => bug!("bad closure kind"),
@@ -526,23 +584,19 @@
     ) -> chalk_ir::Binders<chalk_solve::rust_ir::FnDefInputsAndOutputDatum<RustInterner<'tcx>>>
     {
         let sig = &substs.as_slice(&self.interner)[substs.len(&self.interner) - 2];
-        match sig.assert_ty_ref(&self.interner).data(&self.interner) {
-            chalk_ir::TyData::Function(f) => {
+        match sig.assert_ty_ref(&self.interner).kind(&self.interner) {
+            chalk_ir::TyKind::Function(f) => {
                 let substitution = f.substitution.as_slice(&self.interner);
                 let return_type =
                     substitution.last().unwrap().assert_ty_ref(&self.interner).clone();
                 // Closure arguments are tupled
                 let argument_tuple = substitution[0].assert_ty_ref(&self.interner);
-                let argument_types = match argument_tuple.data(&self.interner) {
-                    chalk_ir::TyData::Apply(apply) => match apply.name {
-                        chalk_ir::TypeName::Tuple(_) => apply
-                            .substitution
-                            .iter(&self.interner)
-                            .map(|arg| arg.assert_ty_ref(&self.interner))
-                            .cloned()
-                            .collect(),
-                        _ => bug!("Expecting closure FnSig args to be tupled."),
-                    },
+                let argument_types = match argument_tuple.kind(&self.interner) {
+                    chalk_ir::TyKind::Tuple(_len, substitution) => substitution
+                        .iter(&self.interner)
+                        .map(|arg| arg.assert_ty_ref(&self.interner))
+                        .cloned()
+                        .collect(),
                     _ => bug!("Expecting closure FnSig args to be tupled."),
                 };
 
@@ -576,6 +630,20 @@
         let substitution = &substs.as_slice(&self.interner)[0..substs.len(&self.interner) - 3];
         chalk_ir::Substitution::from_iter(&self.interner, substitution)
     }
+
+    fn generator_datum(
+        &self,
+        _generator_id: chalk_ir::GeneratorId<RustInterner<'tcx>>,
+    ) -> Arc<chalk_solve::rust_ir::GeneratorDatum<RustInterner<'tcx>>> {
+        unimplemented!()
+    }
+
+    fn generator_witness_datum(
+        &self,
+        _generator_id: chalk_ir::GeneratorId<RustInterner<'tcx>>,
+    ) -> Arc<chalk_solve::rust_ir::GeneratorWitnessDatum<RustInterner<'tcx>>> {
+        unimplemented!()
+    }
 }
 
 /// Creates a `InternalSubsts` that maps each generic parameter to a higher-ranked
@@ -619,7 +687,7 @@
         bound_vars.iter().map(|arg| match arg.unpack() {
             ty::subst::GenericArgKind::Lifetime(_re) => chalk_ir::VariableKind::Lifetime,
             ty::subst::GenericArgKind::Type(_ty) => {
-                chalk_ir::VariableKind::Ty(chalk_ir::TyKind::General)
+                chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General)
             }
             ty::subst::GenericArgKind::Const(c) => {
                 chalk_ir::VariableKind::Const(c.ty.lower_into(interner))
diff --git a/compiler/rustc_traits/src/chalk/lowering.rs b/compiler/rustc_traits/src/chalk/lowering.rs
index 1e1841a..c4e2c7f 100644
--- a/compiler/rustc_traits/src/chalk/lowering.rs
+++ b/compiler/rustc_traits/src/chalk/lowering.rs
@@ -31,17 +31,19 @@
 //! not. To lower anything wrapped in a `Binder`, we first deeply find any bound
 //! variables from the current `Binder`.
 
+use rustc_ast::ast;
 use rustc_middle::traits::{ChalkEnvironmentAndGoal, ChalkRustInterner as RustInterner};
 use rustc_middle::ty::fold::TypeFolder;
 use rustc_middle::ty::subst::{GenericArg, GenericArgKind, SubstsRef};
 use rustc_middle::ty::{
-    self, Binder, BoundRegion, Region, RegionKind, Ty, TyCtxt, TyKind, TypeFoldable, TypeVisitor,
+    self, Binder, BoundRegion, Region, RegionKind, Ty, TyCtxt, TypeFoldable, TypeVisitor,
 };
 use rustc_span::def_id::DefId;
 
 use chalk_ir::{FnSig, ForeignDefId};
 use rustc_hir::Unsafety;
 use std::collections::btree_map::{BTreeMap, Entry};
+use std::ops::ControlFlow;
 
 /// Essentially an `Into` with a `&RustInterner` parameter
 crate trait LowerInto<'tcx, T> {
@@ -81,8 +83,11 @@
         interner: &RustInterner<'tcx>,
     ) -> chalk_ir::InEnvironment<chalk_ir::Goal<RustInterner<'tcx>>> {
         let clauses = self.environment.into_iter().map(|predicate| {
-            let (predicate, binders, _named_regions) =
-                collect_bound_vars(interner, interner.tcx, &predicate.bound_atom(interner.tcx));
+            let (predicate, binders, _named_regions) = collect_bound_vars(
+                interner,
+                interner.tcx,
+                &predicate.bound_atom_with_opt_escaping(interner.tcx),
+            );
             let consequence = match predicate {
                 ty::PredicateAtom::TypeWellFormedFromEnv(ty) => {
                     chalk_ir::DomainGoal::FromEnv(chalk_ir::FromEnv::Ty(ty.lower_into(interner)))
@@ -133,8 +138,11 @@
 
 impl<'tcx> LowerInto<'tcx, chalk_ir::GoalData<RustInterner<'tcx>>> for ty::Predicate<'tcx> {
     fn lower_into(self, interner: &RustInterner<'tcx>) -> chalk_ir::GoalData<RustInterner<'tcx>> {
-        let (predicate, binders, _named_regions) =
-            collect_bound_vars(interner, interner.tcx, &self.bound_atom(interner.tcx));
+        let (predicate, binders, _named_regions) = collect_bound_vars(
+            interner,
+            interner.tcx,
+            &self.bound_atom_with_opt_escaping(interner.tcx),
+        );
 
         let value = match predicate {
             ty::PredicateAtom::Trait(predicate, _) => {
@@ -233,24 +241,16 @@
 
 impl<'tcx> LowerInto<'tcx, chalk_ir::Ty<RustInterner<'tcx>>> for Ty<'tcx> {
     fn lower_into(self, interner: &RustInterner<'tcx>) -> chalk_ir::Ty<RustInterner<'tcx>> {
-        use chalk_ir::TyData;
         use rustc_ast as ast;
-        use TyKind::*;
 
-        let empty = || chalk_ir::Substitution::empty(interner);
-        let struct_ty =
-            |def_id| chalk_ir::TypeName::Adt(chalk_ir::AdtId(interner.tcx.adt_def(def_id)));
-        let apply = |name, substitution| {
-            TyData::Apply(chalk_ir::ApplicationTy { name, substitution }).intern(interner)
-        };
-        let int = |i| apply(chalk_ir::TypeName::Scalar(chalk_ir::Scalar::Int(i)), empty());
-        let uint = |i| apply(chalk_ir::TypeName::Scalar(chalk_ir::Scalar::Uint(i)), empty());
-        let float = |f| apply(chalk_ir::TypeName::Scalar(chalk_ir::Scalar::Float(f)), empty());
+        let int = |i| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Int(i));
+        let uint = |i| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Uint(i));
+        let float = |f| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Float(f));
 
         match *self.kind() {
-            Bool => apply(chalk_ir::TypeName::Scalar(chalk_ir::Scalar::Bool), empty()),
-            Char => apply(chalk_ir::TypeName::Scalar(chalk_ir::Scalar::Char), empty()),
-            Int(ty) => match ty {
+            ty::Bool => chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Bool),
+            ty::Char => chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Char),
+            ty::Int(ty) => match ty {
                 ast::IntTy::Isize => int(chalk_ir::IntTy::Isize),
                 ast::IntTy::I8 => int(chalk_ir::IntTy::I8),
                 ast::IntTy::I16 => int(chalk_ir::IntTy::I16),
@@ -258,7 +258,7 @@
                 ast::IntTy::I64 => int(chalk_ir::IntTy::I64),
                 ast::IntTy::I128 => int(chalk_ir::IntTy::I128),
             },
-            Uint(ty) => match ty {
+            ty::Uint(ty) => match ty {
                 ast::UintTy::Usize => uint(chalk_ir::UintTy::Usize),
                 ast::UintTy::U8 => uint(chalk_ir::UintTy::U8),
                 ast::UintTy::U16 => uint(chalk_ir::UintTy::U16),
@@ -266,80 +266,35 @@
                 ast::UintTy::U64 => uint(chalk_ir::UintTy::U64),
                 ast::UintTy::U128 => uint(chalk_ir::UintTy::U128),
             },
-            Float(ty) => match ty {
+            ty::Float(ty) => match ty {
                 ast::FloatTy::F32 => float(chalk_ir::FloatTy::F32),
                 ast::FloatTy::F64 => float(chalk_ir::FloatTy::F64),
             },
-            Adt(def, substs) => apply(struct_ty(def.did), substs.lower_into(interner)),
-            Foreign(def_id) => apply(chalk_ir::TypeName::Foreign(ForeignDefId(def_id)), empty()),
-            Str => apply(chalk_ir::TypeName::Str, empty()),
-            Array(ty, len) => {
-                let value = match len.val {
-                    ty::ConstKind::Value(val) => {
-                        chalk_ir::ConstValue::Concrete(chalk_ir::ConcreteConst { interned: val })
-                    }
-                    ty::ConstKind::Bound(db, bound) => {
-                        chalk_ir::ConstValue::BoundVar(chalk_ir::BoundVar::new(
-                            chalk_ir::DebruijnIndex::new(db.as_u32()),
-                            bound.index(),
-                        ))
-                    }
-                    _ => unimplemented!("Const not implemented. {:?}", len.val),
-                };
-                apply(
-                    chalk_ir::TypeName::Array,
-                    chalk_ir::Substitution::from_iter(
-                        interner,
-                        &[
-                            chalk_ir::GenericArgData::Ty(ty.lower_into(interner)).intern(interner),
-                            chalk_ir::GenericArgData::Const(
-                                chalk_ir::ConstData { ty: len.ty.lower_into(interner), value }
-                                    .intern(interner),
-                            )
-                            .intern(interner),
-                        ],
-                    ),
-                )
+            ty::Adt(def, substs) => {
+                chalk_ir::TyKind::Adt(chalk_ir::AdtId(def), substs.lower_into(interner))
             }
-            Slice(ty) => apply(
-                chalk_ir::TypeName::Slice,
-                chalk_ir::Substitution::from1(
-                    interner,
-                    chalk_ir::GenericArgData::Ty(ty.lower_into(interner)).intern(interner),
-                ),
+            ty::Foreign(def_id) => chalk_ir::TyKind::Foreign(ForeignDefId(def_id)),
+            ty::Str => chalk_ir::TyKind::Str,
+            ty::Array(ty, len) => {
+                chalk_ir::TyKind::Array(ty.lower_into(interner), len.lower_into(interner))
+            }
+            ty::Slice(ty) => chalk_ir::TyKind::Slice(ty.lower_into(interner)),
+
+            ty::RawPtr(ptr) => {
+                chalk_ir::TyKind::Raw(ptr.mutbl.lower_into(interner), ptr.ty.lower_into(interner))
+            }
+            ty::Ref(region, ty, mutability) => chalk_ir::TyKind::Ref(
+                mutability.lower_into(interner),
+                region.lower_into(interner),
+                ty.lower_into(interner),
             ),
-            RawPtr(ptr) => {
-                let name = match ptr.mutbl {
-                    ast::Mutability::Mut => chalk_ir::TypeName::Raw(chalk_ir::Mutability::Mut),
-                    ast::Mutability::Not => chalk_ir::TypeName::Raw(chalk_ir::Mutability::Not),
-                };
-                apply(name, chalk_ir::Substitution::from1(interner, ptr.ty.lower_into(interner)))
+            ty::FnDef(def_id, substs) => {
+                chalk_ir::TyKind::FnDef(chalk_ir::FnDefId(def_id), substs.lower_into(interner))
             }
-            Ref(region, ty, mutability) => {
-                let name = match mutability {
-                    ast::Mutability::Mut => chalk_ir::TypeName::Ref(chalk_ir::Mutability::Mut),
-                    ast::Mutability::Not => chalk_ir::TypeName::Ref(chalk_ir::Mutability::Not),
-                };
-                apply(
-                    name,
-                    chalk_ir::Substitution::from_iter(
-                        interner,
-                        &[
-                            chalk_ir::GenericArgData::Lifetime(region.lower_into(interner))
-                                .intern(interner),
-                            chalk_ir::GenericArgData::Ty(ty.lower_into(interner)).intern(interner),
-                        ],
-                    ),
-                )
-            }
-            FnDef(def_id, substs) => apply(
-                chalk_ir::TypeName::FnDef(chalk_ir::FnDefId(def_id)),
-                substs.lower_into(interner),
-            ),
-            FnPtr(sig) => {
+            ty::FnPtr(sig) => {
                 let (inputs_and_outputs, binders, _named_regions) =
                     collect_bound_vars(interner, interner.tcx, &sig.inputs_and_output());
-                TyData::Function(chalk_ir::FnPointer {
+                chalk_ir::TyKind::Function(chalk_ir::FnPointer {
                     num_binders: binders.len(interner),
                     sig: sig.lower_into(interner),
                     substitution: chalk_ir::Substitution::from_iter(
@@ -349,151 +304,133 @@
                         }),
                     ),
                 })
-                .intern(interner)
             }
-            Dynamic(predicates, region) => TyData::Dyn(chalk_ir::DynTy {
+            ty::Dynamic(predicates, region) => chalk_ir::TyKind::Dyn(chalk_ir::DynTy {
                 bounds: predicates.lower_into(interner),
                 lifetime: region.lower_into(interner),
-            })
-            .intern(interner),
-            Closure(def_id, substs) => apply(
-                chalk_ir::TypeName::Closure(chalk_ir::ClosureId(def_id)),
-                substs.lower_into(interner),
-            ),
-            Generator(_def_id, _substs, _) => unimplemented!(),
-            GeneratorWitness(_) => unimplemented!(),
-            Never => apply(chalk_ir::TypeName::Never, empty()),
-            Tuple(substs) => {
-                apply(chalk_ir::TypeName::Tuple(substs.len()), substs.lower_into(interner))
+            }),
+            ty::Closure(def_id, substs) => {
+                chalk_ir::TyKind::Closure(chalk_ir::ClosureId(def_id), substs.lower_into(interner))
             }
-            Projection(proj) => TyData::Alias(proj.lower_into(interner)).intern(interner),
-            Opaque(def_id, substs) => {
-                TyData::Alias(chalk_ir::AliasTy::Opaque(chalk_ir::OpaqueTy {
+            ty::Generator(_def_id, _substs, _) => unimplemented!(),
+            ty::GeneratorWitness(_) => unimplemented!(),
+            ty::Never => chalk_ir::TyKind::Never,
+            ty::Tuple(substs) => chalk_ir::TyKind::Tuple(substs.len(), substs.lower_into(interner)),
+            ty::Projection(proj) => chalk_ir::TyKind::Alias(proj.lower_into(interner)),
+            ty::Opaque(def_id, substs) => {
+                chalk_ir::TyKind::Alias(chalk_ir::AliasTy::Opaque(chalk_ir::OpaqueTy {
                     opaque_ty_id: chalk_ir::OpaqueTyId(def_id),
                     substitution: substs.lower_into(interner),
                 }))
-                .intern(interner)
             }
             // This should have been done eagerly prior to this, and all Params
             // should have been substituted to placeholders
-            Param(_) => panic!("Lowering Param when not expected."),
-            Bound(db, bound) => TyData::BoundVar(chalk_ir::BoundVar::new(
+            ty::Param(_) => panic!("Lowering Param when not expected."),
+            ty::Bound(db, bound) => chalk_ir::TyKind::BoundVar(chalk_ir::BoundVar::new(
                 chalk_ir::DebruijnIndex::new(db.as_u32()),
                 bound.var.index(),
-            ))
-            .intern(interner),
-            Placeholder(_placeholder) => TyData::Placeholder(chalk_ir::PlaceholderIndex {
-                ui: chalk_ir::UniverseIndex { counter: _placeholder.universe.as_usize() },
-                idx: _placeholder.name.as_usize(),
-            })
-            .intern(interner),
-            Infer(_infer) => unimplemented!(),
-            Error(_) => apply(chalk_ir::TypeName::Error, empty()),
+            )),
+            ty::Placeholder(_placeholder) => {
+                chalk_ir::TyKind::Placeholder(chalk_ir::PlaceholderIndex {
+                    ui: chalk_ir::UniverseIndex { counter: _placeholder.universe.as_usize() },
+                    idx: _placeholder.name.as_usize(),
+                })
+            }
+            ty::Infer(_infer) => unimplemented!(),
+            ty::Error(_) => chalk_ir::TyKind::Error,
         }
+        .intern(interner)
     }
 }
 
 impl<'tcx> LowerInto<'tcx, Ty<'tcx>> for &chalk_ir::Ty<RustInterner<'tcx>> {
     fn lower_into(self, interner: &RustInterner<'tcx>) -> Ty<'tcx> {
-        use chalk_ir::TyData;
-        use rustc_ast::ast;
+        use chalk_ir::TyKind;
 
-        let kind = match self.data(interner) {
-            TyData::Apply(application_ty) => match application_ty.name {
-                chalk_ir::TypeName::Adt(struct_id) => {
-                    ty::Adt(struct_id.0, application_ty.substitution.lower_into(interner))
-                }
-                chalk_ir::TypeName::Scalar(scalar) => match scalar {
-                    chalk_ir::Scalar::Bool => ty::Bool,
-                    chalk_ir::Scalar::Char => ty::Char,
-                    chalk_ir::Scalar::Int(int_ty) => match int_ty {
-                        chalk_ir::IntTy::Isize => ty::Int(ast::IntTy::Isize),
-                        chalk_ir::IntTy::I8 => ty::Int(ast::IntTy::I8),
-                        chalk_ir::IntTy::I16 => ty::Int(ast::IntTy::I16),
-                        chalk_ir::IntTy::I32 => ty::Int(ast::IntTy::I32),
-                        chalk_ir::IntTy::I64 => ty::Int(ast::IntTy::I64),
-                        chalk_ir::IntTy::I128 => ty::Int(ast::IntTy::I128),
-                    },
-                    chalk_ir::Scalar::Uint(int_ty) => match int_ty {
-                        chalk_ir::UintTy::Usize => ty::Uint(ast::UintTy::Usize),
-                        chalk_ir::UintTy::U8 => ty::Uint(ast::UintTy::U8),
-                        chalk_ir::UintTy::U16 => ty::Uint(ast::UintTy::U16),
-                        chalk_ir::UintTy::U32 => ty::Uint(ast::UintTy::U32),
-                        chalk_ir::UintTy::U64 => ty::Uint(ast::UintTy::U64),
-                        chalk_ir::UintTy::U128 => ty::Uint(ast::UintTy::U128),
-                    },
-                    chalk_ir::Scalar::Float(float_ty) => match float_ty {
-                        chalk_ir::FloatTy::F32 => ty::Float(ast::FloatTy::F32),
-                        chalk_ir::FloatTy::F64 => ty::Float(ast::FloatTy::F64),
-                    },
+        let kind = match self.kind(interner) {
+            TyKind::Adt(struct_id, substitution) => {
+                ty::Adt(struct_id.0, substitution.lower_into(interner))
+            }
+            TyKind::Scalar(scalar) => match scalar {
+                chalk_ir::Scalar::Bool => ty::Bool,
+                chalk_ir::Scalar::Char => ty::Char,
+                chalk_ir::Scalar::Int(int_ty) => match int_ty {
+                    chalk_ir::IntTy::Isize => ty::Int(ast::IntTy::Isize),
+                    chalk_ir::IntTy::I8 => ty::Int(ast::IntTy::I8),
+                    chalk_ir::IntTy::I16 => ty::Int(ast::IntTy::I16),
+                    chalk_ir::IntTy::I32 => ty::Int(ast::IntTy::I32),
+                    chalk_ir::IntTy::I64 => ty::Int(ast::IntTy::I64),
+                    chalk_ir::IntTy::I128 => ty::Int(ast::IntTy::I128),
                 },
-                chalk_ir::TypeName::Array => unimplemented!(),
-                chalk_ir::TypeName::FnDef(id) => {
-                    ty::FnDef(id.0, application_ty.substitution.lower_into(interner))
-                }
-                chalk_ir::TypeName::Closure(closure) => {
-                    ty::Closure(closure.0, application_ty.substitution.lower_into(interner))
-                }
-                chalk_ir::TypeName::Never => ty::Never,
-                chalk_ir::TypeName::Tuple(_size) => {
-                    ty::Tuple(application_ty.substitution.lower_into(interner))
-                }
-                chalk_ir::TypeName::Slice => ty::Slice(
-                    application_ty.substitution.as_slice(interner)[0]
-                        .ty(interner)
-                        .unwrap()
-                        .lower_into(interner),
-                ),
-                chalk_ir::TypeName::Raw(mutbl) => ty::RawPtr(ty::TypeAndMut {
-                    ty: application_ty.substitution.as_slice(interner)[0]
-                        .ty(interner)
-                        .unwrap()
-                        .lower_into(interner),
-                    mutbl: match mutbl {
-                        chalk_ir::Mutability::Mut => ast::Mutability::Mut,
-                        chalk_ir::Mutability::Not => ast::Mutability::Not,
-                    },
-                }),
-                chalk_ir::TypeName::Ref(mutbl) => ty::Ref(
-                    application_ty.substitution.as_slice(interner)[0]
-                        .lifetime(interner)
-                        .unwrap()
-                        .lower_into(interner),
-                    application_ty.substitution.as_slice(interner)[1]
-                        .ty(interner)
-                        .unwrap()
-                        .lower_into(interner),
-                    match mutbl {
-                        chalk_ir::Mutability::Mut => ast::Mutability::Mut,
-                        chalk_ir::Mutability::Not => ast::Mutability::Not,
-                    },
-                ),
-                chalk_ir::TypeName::Str => ty::Str,
-                chalk_ir::TypeName::OpaqueType(opaque_ty) => {
-                    ty::Opaque(opaque_ty.0, application_ty.substitution.lower_into(interner))
-                }
-                chalk_ir::TypeName::AssociatedType(assoc_ty) => ty::Projection(ty::ProjectionTy {
-                    substs: application_ty.substitution.lower_into(interner),
-                    item_def_id: assoc_ty.0,
-                }),
-                chalk_ir::TypeName::Foreign(def_id) => ty::Foreign(def_id.0),
-                chalk_ir::TypeName::Error => unimplemented!(),
+                chalk_ir::Scalar::Uint(int_ty) => match int_ty {
+                    chalk_ir::UintTy::Usize => ty::Uint(ast::UintTy::Usize),
+                    chalk_ir::UintTy::U8 => ty::Uint(ast::UintTy::U8),
+                    chalk_ir::UintTy::U16 => ty::Uint(ast::UintTy::U16),
+                    chalk_ir::UintTy::U32 => ty::Uint(ast::UintTy::U32),
+                    chalk_ir::UintTy::U64 => ty::Uint(ast::UintTy::U64),
+                    chalk_ir::UintTy::U128 => ty::Uint(ast::UintTy::U128),
+                },
+                chalk_ir::Scalar::Float(float_ty) => match float_ty {
+                    chalk_ir::FloatTy::F32 => ty::Float(ast::FloatTy::F32),
+                    chalk_ir::FloatTy::F64 => ty::Float(ast::FloatTy::F64),
+                },
             },
-            TyData::Placeholder(placeholder) => ty::Placeholder(ty::Placeholder {
+            TyKind::Array(ty, c) => {
+                let ty = ty.lower_into(interner);
+                let c = c.lower_into(interner);
+                ty::Array(ty, interner.tcx.mk_const(c))
+            }
+            TyKind::FnDef(id, substitution) => ty::FnDef(id.0, substitution.lower_into(interner)),
+            TyKind::Closure(closure, substitution) => {
+                ty::Closure(closure.0, substitution.lower_into(interner))
+            }
+            TyKind::Generator(..) => unimplemented!(),
+            TyKind::GeneratorWitness(..) => unimplemented!(),
+            TyKind::Never => ty::Never,
+            TyKind::Tuple(_len, substitution) => ty::Tuple(substitution.lower_into(interner)),
+            TyKind::Slice(ty) => ty::Slice(ty.lower_into(interner)),
+            TyKind::Raw(mutbl, ty) => ty::RawPtr(ty::TypeAndMut {
+                ty: ty.lower_into(interner),
+                mutbl: mutbl.lower_into(interner),
+            }),
+            TyKind::Ref(mutbl, lifetime, ty) => ty::Ref(
+                lifetime.lower_into(interner),
+                ty.lower_into(interner),
+                mutbl.lower_into(interner),
+            ),
+            TyKind::Str => ty::Str,
+            TyKind::OpaqueType(opaque_ty, substitution) => {
+                ty::Opaque(opaque_ty.0, substitution.lower_into(interner))
+            }
+            TyKind::AssociatedType(assoc_ty, substitution) => ty::Projection(ty::ProjectionTy {
+                substs: substitution.lower_into(interner),
+                item_def_id: assoc_ty.0,
+            }),
+            TyKind::Foreign(def_id) => ty::Foreign(def_id.0),
+            TyKind::Error => return interner.tcx.ty_error(),
+            TyKind::Placeholder(placeholder) => ty::Placeholder(ty::Placeholder {
                 universe: ty::UniverseIndex::from_usize(placeholder.ui.counter),
                 name: ty::BoundVar::from_usize(placeholder.idx),
             }),
-            TyData::Alias(_alias_ty) => unimplemented!(),
-            TyData::Function(_quantified_ty) => unimplemented!(),
-            TyData::BoundVar(_bound) => ty::Bound(
+            TyKind::Alias(alias_ty) => match alias_ty {
+                chalk_ir::AliasTy::Projection(projection) => ty::Projection(ty::ProjectionTy {
+                    item_def_id: projection.associated_ty_id.0,
+                    substs: projection.substitution.lower_into(interner),
+                }),
+                chalk_ir::AliasTy::Opaque(opaque) => {
+                    ty::Opaque(opaque.opaque_ty_id.0, opaque.substitution.lower_into(interner))
+                }
+            },
+            TyKind::Function(_quantified_ty) => unimplemented!(),
+            TyKind::BoundVar(_bound) => ty::Bound(
                 ty::DebruijnIndex::from_usize(_bound.debruijn.depth() as usize),
                 ty::BoundTy {
                     var: ty::BoundVar::from_usize(_bound.index),
                     kind: ty::BoundTyKind::Anon,
                 },
             ),
-            TyData::InferenceVar(_, _) => unimplemented!(),
-            TyData::Dyn(_) => unimplemented!(),
+            TyKind::InferenceVar(_, _) => unimplemented!(),
+            TyKind::Dyn(_) => unimplemented!(),
         };
         interner.tcx.mk_ty(kind)
     }
@@ -519,8 +456,7 @@
                 ty::BrEnv => unimplemented!(),
             },
             ReFree(_) => unimplemented!(),
-            // FIXME(chalk): need to handle ReStatic
-            ReStatic => unimplemented!(),
+            ReStatic => chalk_ir::LifetimeData::Static.intern(interner),
             ReVar(_) => unimplemented!(),
             RePlaceholder(placeholder_region) => {
                 chalk_ir::LifetimeData::Placeholder(chalk_ir::PlaceholderIndex {
@@ -550,6 +486,7 @@
                     name: ty::BoundRegion::BrAnon(p.idx as u32),
                 })
             }
+            chalk_ir::LifetimeData::Static => ty::RegionKind::ReStatic,
             chalk_ir::LifetimeData::Phantom(_, _) => unimplemented!(),
         };
         interner.tcx.mk_region(kind)
@@ -638,8 +575,11 @@
         self,
         interner: &RustInterner<'tcx>,
     ) -> Option<chalk_ir::QuantifiedWhereClause<RustInterner<'tcx>>> {
-        let (predicate, binders, _named_regions) =
-            collect_bound_vars(interner, interner.tcx, &self.bound_atom(interner.tcx));
+        let (predicate, binders, _named_regions) = collect_bound_vars(
+            interner,
+            interner.tcx,
+            &self.bound_atom_with_opt_escaping(interner.tcx),
+        );
         let value = match predicate {
             ty::PredicateAtom::Trait(predicate, _) => {
                 Some(chalk_ir::WhereClause::Implemented(predicate.trait_ref.lower_into(interner)))
@@ -681,8 +621,16 @@
         self,
         interner: &RustInterner<'tcx>,
     ) -> chalk_ir::Binders<chalk_ir::QuantifiedWhereClauses<RustInterner<'tcx>>> {
+        // `Self` has one binder:
+        // Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>
+        // The return type has two:
+        // Binders<&[Binders<WhereClause<I>>]>
+        // This means that any variables that are escaping `self` need to be
+        // shifted in by one so that they are still escaping.
+        let shifted_predicates = ty::fold::shift_vars(interner.tcx, &self, 1);
+
         let (predicates, binders, _named_regions) =
-            collect_bound_vars(interner, interner.tcx, &self);
+            collect_bound_vars(interner, interner.tcx, &shifted_predicates);
         let self_ty = interner.tcx.mk_ty(ty::Bound(
             // This is going to be wrapped in a binder
             ty::DebruijnIndex::from_usize(1),
@@ -691,7 +639,7 @@
         let where_clauses = predicates.into_iter().map(|predicate| match predicate {
             ty::ExistentialPredicate::Trait(ty::ExistentialTraitRef { def_id, substs }) => {
                 chalk_ir::Binders::new(
-                    chalk_ir::VariableKinds::empty(interner),
+                    binders.clone(),
                     chalk_ir::WhereClause::Implemented(chalk_ir::TraitRef {
                         trait_id: chalk_ir::TraitId(def_id),
                         substitution: interner
@@ -701,17 +649,35 @@
                     }),
                 )
             }
-            ty::ExistentialPredicate::Projection(_predicate) => unimplemented!(),
+            ty::ExistentialPredicate::Projection(predicate) => chalk_ir::Binders::new(
+                binders.clone(),
+                chalk_ir::WhereClause::AliasEq(chalk_ir::AliasEq {
+                    alias: chalk_ir::AliasTy::Projection(chalk_ir::ProjectionTy {
+                        associated_ty_id: chalk_ir::AssocTypeId(predicate.item_def_id),
+                        substitution: interner
+                            .tcx
+                            .mk_substs_trait(self_ty, predicate.substs)
+                            .lower_into(interner),
+                    }),
+                    ty: predicate.ty.lower_into(interner),
+                }),
+            ),
             ty::ExistentialPredicate::AutoTrait(def_id) => chalk_ir::Binders::new(
-                chalk_ir::VariableKinds::empty(interner),
+                binders.clone(),
                 chalk_ir::WhereClause::Implemented(chalk_ir::TraitRef {
                     trait_id: chalk_ir::TraitId(def_id),
                     substitution: interner.tcx.mk_substs_trait(self_ty, &[]).lower_into(interner),
                 }),
             ),
         });
+
+        // Binder for the bound variable representing the concrete underlying type.
+        let existential_binder = chalk_ir::VariableKinds::from1(
+            interner,
+            chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General),
+        );
         let value = chalk_ir::QuantifiedWhereClauses::from_iter(interner, where_clauses);
-        chalk_ir::Binders::new(binders, value)
+        chalk_ir::Binders::new(existential_binder, value)
     }
 }
 
@@ -728,6 +694,111 @@
     }
 }
 
+// We lower into an Option here since there are some predicates which Chalk
+// doesn't have a representation for yet (as an `InlineBound`). The `Option` will
+// eventually be removed.
+impl<'tcx> LowerInto<'tcx, Option<chalk_solve::rust_ir::QuantifiedInlineBound<RustInterner<'tcx>>>>
+    for ty::Predicate<'tcx>
+{
+    fn lower_into(
+        self,
+        interner: &RustInterner<'tcx>,
+    ) -> Option<chalk_solve::rust_ir::QuantifiedInlineBound<RustInterner<'tcx>>> {
+        let (predicate, binders, _named_regions) = collect_bound_vars(
+            interner,
+            interner.tcx,
+            &self.bound_atom_with_opt_escaping(interner.tcx),
+        );
+        match predicate {
+            ty::PredicateAtom::Trait(predicate, _) => Some(chalk_ir::Binders::new(
+                binders,
+                chalk_solve::rust_ir::InlineBound::TraitBound(
+                    predicate.trait_ref.lower_into(interner),
+                ),
+            )),
+            ty::PredicateAtom::Projection(predicate) => Some(chalk_ir::Binders::new(
+                binders,
+                chalk_solve::rust_ir::InlineBound::AliasEqBound(predicate.lower_into(interner)),
+            )),
+            ty::PredicateAtom::TypeOutlives(_predicate) => None,
+            ty::PredicateAtom::WellFormed(_ty) => None,
+
+            ty::PredicateAtom::RegionOutlives(..)
+            | ty::PredicateAtom::ObjectSafe(..)
+            | ty::PredicateAtom::ClosureKind(..)
+            | ty::PredicateAtom::Subtype(..)
+            | ty::PredicateAtom::ConstEvaluatable(..)
+            | ty::PredicateAtom::ConstEquate(..)
+            | ty::PredicateAtom::TypeWellFormedFromEnv(..) => {
+                bug!("unexpected predicate {}", &self)
+            }
+        }
+    }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_solve::rust_ir::TraitBound<RustInterner<'tcx>>>
+    for ty::TraitRef<'tcx>
+{
+    fn lower_into(
+        self,
+        interner: &RustInterner<'tcx>,
+    ) -> chalk_solve::rust_ir::TraitBound<RustInterner<'tcx>> {
+        chalk_solve::rust_ir::TraitBound {
+            trait_id: chalk_ir::TraitId(self.def_id),
+            args_no_self: self.substs[1..].iter().map(|arg| arg.lower_into(interner)).collect(),
+        }
+    }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::Mutability> for ast::Mutability {
+    fn lower_into(self, _interner: &RustInterner<'tcx>) -> chalk_ir::Mutability {
+        match self {
+            rustc_ast::Mutability::Mut => chalk_ir::Mutability::Mut,
+            rustc_ast::Mutability::Not => chalk_ir::Mutability::Not,
+        }
+    }
+}
+
+impl<'tcx> LowerInto<'tcx, ast::Mutability> for chalk_ir::Mutability {
+    fn lower_into(self, _interner: &RustInterner<'tcx>) -> ast::Mutability {
+        match self {
+            chalk_ir::Mutability::Mut => ast::Mutability::Mut,
+            chalk_ir::Mutability::Not => ast::Mutability::Not,
+        }
+    }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_solve::rust_ir::Polarity> for ty::ImplPolarity {
+    fn lower_into(self, _interner: &RustInterner<'tcx>) -> chalk_solve::rust_ir::Polarity {
+        match self {
+            ty::ImplPolarity::Positive => chalk_solve::rust_ir::Polarity::Positive,
+            ty::ImplPolarity::Negative => chalk_solve::rust_ir::Polarity::Negative,
+            // FIXME(chalk) reservation impls
+            ty::ImplPolarity::Reservation => chalk_solve::rust_ir::Polarity::Negative,
+        }
+    }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_solve::rust_ir::AliasEqBound<RustInterner<'tcx>>>
+    for ty::ProjectionPredicate<'tcx>
+{
+    fn lower_into(
+        self,
+        interner: &RustInterner<'tcx>,
+    ) -> chalk_solve::rust_ir::AliasEqBound<RustInterner<'tcx>> {
+        let trait_ref = self.projection_ty.trait_ref(interner.tcx);
+        chalk_solve::rust_ir::AliasEqBound {
+            trait_bound: trait_ref.lower_into(interner),
+            associated_ty_id: chalk_ir::AssocTypeId(self.projection_ty.item_def_id),
+            parameters: self.projection_ty.substs[trait_ref.substs.len()..]
+                .iter()
+                .map(|arg| arg.lower_into(interner))
+                .collect(),
+            value: self.ty.lower_into(interner),
+        }
+    }
+}
+
 /// To collect bound vars, we have to do two passes. In the first pass, we
 /// collect all `BoundRegion`s and `ty::Bound`s. In the second pass, we then
 /// replace `BrNamed` into `BrAnon`. The two separate passes are important,
@@ -788,19 +859,19 @@
 }
 
 impl<'tcx> TypeVisitor<'tcx> for BoundVarsCollector<'tcx> {
-    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> bool {
+    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> ControlFlow<()> {
         self.binder_index.shift_in(1);
         let result = t.super_visit_with(self);
         self.binder_index.shift_out(1);
         result
     }
 
-    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> {
         match *t.kind() {
             ty::Bound(debruijn, bound_ty) if debruijn == self.binder_index => {
                 match self.parameters.entry(bound_ty.var.as_u32()) {
                     Entry::Vacant(entry) => {
-                        entry.insert(chalk_ir::VariableKind::Ty(chalk_ir::TyKind::General));
+                        entry.insert(chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General));
                     }
                     Entry::Occupied(entry) => match entry.get() {
                         chalk_ir::VariableKind::Ty(_) => {}
@@ -815,7 +886,7 @@
         t.super_visit_with(self)
     }
 
-    fn visit_region(&mut self, r: Region<'tcx>) -> bool {
+    fn visit_region(&mut self, r: Region<'tcx>) -> ControlFlow<()> {
         match r {
             ty::ReLateBound(index, br) if *index == self.binder_index => match br {
                 ty::BoundRegion::BrNamed(def_id, _name) => {
@@ -1005,7 +1076,7 @@
 }
 
 impl<'tcx> TypeVisitor<'tcx> for PlaceholdersCollector {
-    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> {
         match t.kind() {
             ty::Placeholder(p) if p.universe == self.universe_index => {
                 self.next_ty_placeholder = self.next_ty_placeholder.max(p.name.as_usize() + 1);
@@ -1017,7 +1088,7 @@
         t.super_visit_with(self)
     }
 
-    fn visit_region(&mut self, r: Region<'tcx>) -> bool {
+    fn visit_region(&mut self, r: Region<'tcx>) -> ControlFlow<()> {
         match r {
             ty::RePlaceholder(p) if p.universe == self.universe_index => {
                 if let ty::BoundRegion::BrAnon(anon) = p.name {
@@ -1035,17 +1106,12 @@
 /// Used to substitute specific `Regions`s with placeholders.
 crate struct RegionsSubstitutor<'tcx> {
     tcx: TyCtxt<'tcx>,
-    restatic_placeholder: ty::Region<'tcx>,
     reempty_placeholder: ty::Region<'tcx>,
 }
 
 impl<'tcx> RegionsSubstitutor<'tcx> {
-    crate fn new(
-        tcx: TyCtxt<'tcx>,
-        restatic_placeholder: ty::Region<'tcx>,
-        reempty_placeholder: ty::Region<'tcx>,
-    ) -> Self {
-        RegionsSubstitutor { tcx, restatic_placeholder, reempty_placeholder }
+    crate fn new(tcx: TyCtxt<'tcx>, reempty_placeholder: ty::Region<'tcx>) -> Self {
+        RegionsSubstitutor { tcx, reempty_placeholder }
     }
 }
 
@@ -1056,7 +1122,6 @@
 
     fn fold_region(&mut self, r: Region<'tcx>) -> Region<'tcx> {
         match r {
-            ty::ReStatic => self.restatic_placeholder,
             ty::ReEmpty(ui) => {
                 assert_eq!(ui.as_usize(), 0);
                 self.reempty_placeholder
diff --git a/compiler/rustc_traits/src/chalk/mod.rs b/compiler/rustc_traits/src/chalk/mod.rs
index 63c5b88..b117e28 100644
--- a/compiler/rustc_traits/src/chalk/mod.rs
+++ b/compiler/rustc_traits/src/chalk/mod.rs
@@ -42,10 +42,6 @@
     let mut placeholders_collector = PlaceholdersCollector::new();
     obligation.visit_with(&mut placeholders_collector);
 
-    let restatic_placeholder = tcx.mk_region(ty::RegionKind::RePlaceholder(ty::Placeholder {
-        universe: ty::UniverseIndex::ROOT,
-        name: ty::BoundRegion::BrAnon(placeholders_collector.next_anon_region_placeholder),
-    }));
     let reempty_placeholder = tcx.mk_region(ty::RegionKind::RePlaceholder(ty::Placeholder {
         universe: ty::UniverseIndex::ROOT,
         name: ty::BoundRegion::BrAnon(placeholders_collector.next_anon_region_placeholder + 1),
@@ -57,8 +53,7 @@
     // FIXME(chalk): we really should be substituting these back in the solution
     let _params: FxHashMap<usize, ParamTy> = params_substitutor.params;
 
-    let mut regions_substitutor =
-        RegionsSubstitutor::new(tcx, restatic_placeholder, reempty_placeholder);
+    let mut regions_substitutor = RegionsSubstitutor::new(tcx, reempty_placeholder);
     let obligation = obligation.fold_with(&mut regions_substitutor);
 
     let max_universe = obligation.max_universe.index();
@@ -74,15 +69,15 @@
                     CanonicalVarKind::PlaceholderRegion(_ui) => unimplemented!(),
                     CanonicalVarKind::Ty(ty) => match ty {
                         CanonicalTyVarKind::General(ui) => chalk_ir::WithKind::new(
-                            chalk_ir::VariableKind::Ty(chalk_ir::TyKind::General),
+                            chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General),
                             chalk_ir::UniverseIndex { counter: ui.index() },
                         ),
                         CanonicalTyVarKind::Int => chalk_ir::WithKind::new(
-                            chalk_ir::VariableKind::Ty(chalk_ir::TyKind::Integer),
+                            chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::Integer),
                             chalk_ir::UniverseIndex::root(),
                         ),
                         CanonicalTyVarKind::Float => chalk_ir::WithKind::new(
-                            chalk_ir::VariableKind::Ty(chalk_ir::TyKind::Float),
+                            chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::Float),
                             chalk_ir::UniverseIndex::root(),
                         ),
                     },
@@ -101,8 +96,9 @@
 
     use chalk_solve::Solver;
     let mut solver = chalk_engine::solve::SLGSolver::new(32, None);
-    let db = ChalkRustIrDatabase { interner, restatic_placeholder, reempty_placeholder };
-    let solution = chalk_solve::logging::with_tracing_logs(|| solver.solve(&db, &lowered_goal));
+    let db = ChalkRustIrDatabase { interner, reempty_placeholder };
+    let solution = solver.solve(&db, &lowered_goal);
+    debug!(?obligation, ?solution, "evaluatate goal");
 
     // Ideally, the code to convert *back* to rustc types would live close to
     // the code to convert *from* rustc types. Right now though, we don't
diff --git a/compiler/rustc_traits/src/dropck_outlives.rs b/compiler/rustc_traits/src/dropck_outlives.rs
index 3ee391d..6cffa6d 100644
--- a/compiler/rustc_traits/src/dropck_outlives.rs
+++ b/compiler/rustc_traits/src/dropck_outlives.rs
@@ -210,12 +210,25 @@
             Ok::<_, NoSolution>(())
         })?,
 
-        ty::Closure(_, substs) => rustc_data_structures::stack::ensure_sufficient_stack(|| {
-            for ty in substs.as_closure().upvar_tys() {
-                dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, ty, constraints)?;
+        ty::Closure(_, substs) => {
+            if !substs.as_closure().is_valid() {
+                // By the time this code runs, all type variables ought to
+                // be fully resolved.
+
+                tcx.sess.delay_span_bug(
+                    span,
+                    &format!("upvar_tys for closure not found. Expected capture information for closure {}", ty,),
+                );
+                return Err(NoSolution);
             }
-            Ok::<_, NoSolution>(())
-        })?,
+
+            rustc_data_structures::stack::ensure_sufficient_stack(|| {
+                for ty in substs.as_closure().upvar_tys() {
+                    dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, ty, constraints)?;
+                }
+                Ok::<_, NoSolution>(())
+            })?
+        }
 
         ty::Generator(_, substs, _movability) => {
             // rust-lang/rust#49918: types can be constructed, stored
@@ -241,6 +254,16 @@
             // derived from lifetimes attached to the upvars and resume
             // argument, and we *do* incorporate those here.
 
+            if !substs.as_generator().is_valid() {
+                // By the time this code runs, all type variables ought to
+                // be fully resolved.
+                tcx.sess.delay_span_bug(
+                    span,
+                    &format!("upvar_tys for generator not found. Expected capture information for generator {}", ty,),
+                );
+                return Err(NoSolution);
+            }
+
             constraints.outlives.extend(
                 substs
                     .as_generator()
diff --git a/compiler/rustc_traits/src/implied_outlives_bounds.rs b/compiler/rustc_traits/src/implied_outlives_bounds.rs
index 79308b0..c44fd1d 100644
--- a/compiler/rustc_traits/src/implied_outlives_bounds.rs
+++ b/compiler/rustc_traits/src/implied_outlives_bounds.rs
@@ -61,8 +61,8 @@
         // than the ultimate set. (Note: normally there won't be
         // unresolved inference variables here anyway, but there might be
         // during typeck under some circumstances.)
-        let obligations =
-            wf::obligations(infcx, param_env, hir::CRATE_HIR_ID, arg, DUMMY_SP).unwrap_or(vec![]);
+        let obligations = wf::obligations(infcx, param_env, hir::CRATE_HIR_ID, 0, arg, DUMMY_SP)
+            .unwrap_or_default();
 
         // N.B., all of these predicates *ought* to be easily proven
         // true. In fact, their correctness is (mostly) implied by
diff --git a/compiler/rustc_traits/src/lib.rs b/compiler/rustc_traits/src/lib.rs
index d0b05be..7b688cd 100644
--- a/compiler/rustc_traits/src/lib.rs
+++ b/compiler/rustc_traits/src/lib.rs
@@ -4,6 +4,7 @@
 #![feature(crate_visibility_modifier)]
 #![feature(in_band_lifetimes)]
 #![feature(nll)]
+#![feature(control_flow_enum)]
 #![recursion_limit = "256"]
 
 #[macro_use]
diff --git a/compiler/rustc_ty/src/ty.rs b/compiler/rustc_ty/src/ty.rs
index c4b6b64..2562140 100644
--- a/compiler/rustc_ty/src/ty.rs
+++ b/compiler/rustc_ty/src/ty.rs
@@ -1,11 +1,9 @@
 use rustc_data_structures::fx::FxIndexSet;
 use rustc_data_structures::svh::Svh;
 use rustc_hir as hir;
-use rustc_hir::def::DefKind;
 use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
-use rustc_infer::traits::util;
 use rustc_middle::hir::map as hir_map;
-use rustc_middle::ty::subst::{InternalSubsts, Subst};
+use rustc_middle::ty::subst::Subst;
 use rustc_middle::ty::{
     self, Binder, Predicate, PredicateAtom, PredicateKind, ToPredicate, Ty, TyCtxt, WithConstness,
 };
@@ -82,7 +80,6 @@
 fn associated_item_from_trait_item_ref(
     tcx: TyCtxt<'_>,
     parent_def_id: LocalDefId,
-    parent_vis: &hir::Visibility<'_>,
     trait_item_ref: &hir::TraitItemRef,
 ) -> ty::AssocItem {
     let def_id = tcx.hir().local_def_id(trait_item_ref.id.hir_id);
@@ -95,8 +92,7 @@
     ty::AssocItem {
         ident: trait_item_ref.ident,
         kind,
-        // Visibility of trait items is inherited from their traits.
-        vis: ty::Visibility::from_hir(parent_vis, trait_item_ref.id.hir_id, tcx),
+        vis: tcx.visibility(def_id),
         defaultness: trait_item_ref.defaultness,
         def_id: def_id.to_def_id(),
         container: ty::TraitContainer(parent_def_id.to_def_id()),
@@ -119,8 +115,7 @@
     ty::AssocItem {
         ident: impl_item_ref.ident,
         kind,
-        // Visibility of trait impl items doesn't matter.
-        vis: ty::Visibility::from_hir(&impl_item_ref.vis, impl_item_ref.id.hir_id, tcx),
+        vis: tcx.visibility(def_id),
         defaultness: impl_item_ref.defaultness,
         def_id: def_id.to_def_id(),
         container: ty::ImplContainer(parent_def_id.to_def_id()),
@@ -145,12 +140,8 @@
 
         hir::ItemKind::Trait(.., ref trait_item_refs) => {
             if let Some(trait_item_ref) = trait_item_refs.iter().find(|i| i.id.hir_id == id) {
-                let assoc_item = associated_item_from_trait_item_ref(
-                    tcx,
-                    parent_def_id,
-                    &parent_item.vis,
-                    trait_item_ref,
-                );
+                let assoc_item =
+                    associated_item_from_trait_item_ref(tcx, parent_def_id, trait_item_ref);
                 debug_assert_eq!(assoc_item.def_id, def_id);
                 return assoc_item;
             }
@@ -492,133 +483,6 @@
     fn_like.asyncness()
 }
 
-/// For associated types we allow bounds written on the associated type
-/// (`type X: Trait`) to be used as candidates. We also allow the same bounds
-/// when desugared as bounds on the trait `where Self::X: Trait`.
-///
-/// Note that this filtering is done with the items identity substs to
-/// simplify checking that these bounds are met in impls. This means that
-/// a bound such as `for<'b> <Self as X<'b>>::U: Clone` can't be used, as in
-/// `hr-associated-type-bound-1.rs`.
-fn associated_type_projection_predicates(
-    tcx: TyCtxt<'_>,
-    assoc_item_def_id: DefId,
-) -> &'_ ty::List<ty::Predicate<'_>> {
-    let generic_trait_bounds = tcx.predicates_of(assoc_item_def_id);
-    // We include predicates from the trait as well to handle
-    // `where Self::X: Trait`.
-    let item_bounds = generic_trait_bounds.instantiate_identity(tcx);
-    let item_predicates = util::elaborate_predicates(tcx, item_bounds.predicates.into_iter());
-
-    let assoc_item_ty = ty::ProjectionTy {
-        item_def_id: assoc_item_def_id,
-        substs: InternalSubsts::identity_for_item(tcx, assoc_item_def_id),
-    };
-
-    let predicates = item_predicates.filter_map(|obligation| {
-        let pred = obligation.predicate;
-        match pred.skip_binders() {
-            ty::PredicateAtom::Trait(tr, _) => {
-                if let ty::Projection(p) = *tr.self_ty().kind() {
-                    if p == assoc_item_ty {
-                        return Some(pred);
-                    }
-                }
-            }
-            ty::PredicateAtom::Projection(proj) => {
-                if let ty::Projection(p) = *proj.projection_ty.self_ty().kind() {
-                    if p == assoc_item_ty {
-                        return Some(pred);
-                    }
-                }
-            }
-            ty::PredicateAtom::TypeOutlives(outlives) => {
-                if let ty::Projection(p) = *outlives.0.kind() {
-                    if p == assoc_item_ty {
-                        return Some(pred);
-                    }
-                }
-            }
-            _ => {}
-        }
-        None
-    });
-
-    let result = tcx.mk_predicates(predicates);
-    debug!(
-        "associated_type_projection_predicates({}) = {:?}",
-        tcx.def_path_str(assoc_item_def_id),
-        result
-    );
-    result
-}
-
-/// Opaque types don't have the same issues as associated types: the only
-/// predicates on an opaque type (excluding those it inherits from its parent
-/// item) should be of the form we're expecting.
-fn opaque_type_projection_predicates(
-    tcx: TyCtxt<'_>,
-    def_id: DefId,
-) -> &'_ ty::List<ty::Predicate<'_>> {
-    let substs = InternalSubsts::identity_for_item(tcx, def_id);
-
-    let bounds = tcx.predicates_of(def_id);
-    let predicates =
-        util::elaborate_predicates(tcx, bounds.predicates.iter().map(|&(pred, _)| pred));
-
-    let filtered_predicates = predicates.filter_map(|obligation| {
-        let pred = obligation.predicate;
-        match pred.skip_binders() {
-            ty::PredicateAtom::Trait(tr, _) => {
-                if let ty::Opaque(opaque_def_id, opaque_substs) = *tr.self_ty().kind() {
-                    if opaque_def_id == def_id && opaque_substs == substs {
-                        return Some(pred);
-                    }
-                }
-            }
-            ty::PredicateAtom::Projection(proj) => {
-                if let ty::Opaque(opaque_def_id, opaque_substs) =
-                    *proj.projection_ty.self_ty().kind()
-                {
-                    if opaque_def_id == def_id && opaque_substs == substs {
-                        return Some(pred);
-                    }
-                }
-            }
-            ty::PredicateAtom::TypeOutlives(outlives) => {
-                if let ty::Opaque(opaque_def_id, opaque_substs) = *outlives.0.kind() {
-                    if opaque_def_id == def_id && opaque_substs == substs {
-                        return Some(pred);
-                    }
-                } else {
-                    // These can come from elaborating other predicates
-                    return None;
-                }
-            }
-            // These can come from elaborating other predicates
-            ty::PredicateAtom::RegionOutlives(_) => return None,
-            _ => {}
-        }
-        tcx.sess.delay_span_bug(
-            obligation.cause.span(tcx),
-            &format!("unexpected predicate {:?} on opaque type", pred),
-        );
-        None
-    });
-
-    let result = tcx.mk_predicates(filtered_predicates);
-    debug!("opaque_type_projection_predicates({}) = {:?}", tcx.def_path_str(def_id), result);
-    result
-}
-
-fn projection_predicates(tcx: TyCtxt<'_>, def_id: DefId) -> &'_ ty::List<ty::Predicate<'_>> {
-    match tcx.def_kind(def_id) {
-        DefKind::AssocTy => associated_type_projection_predicates(tcx, def_id),
-        DefKind::OpaqueTy => opaque_type_projection_predicates(tcx, def_id),
-        k => bug!("projection_predicates called on {}", k.descr(def_id)),
-    }
-}
-
 pub fn provide(providers: &mut ty::query::Providers) {
     *providers = ty::query::Providers {
         asyncness,
@@ -636,7 +500,6 @@
         instance_def_size_estimate,
         issue33140_self_ty,
         impl_defaultness,
-        projection_predicates,
         ..*providers
     };
 }
diff --git a/compiler/rustc_typeck/src/astconv/generics.rs b/compiler/rustc_typeck/src/astconv/generics.rs
index b54de1d..3bfb2d3 100644
--- a/compiler/rustc_typeck/src/astconv/generics.rs
+++ b/compiler/rustc_typeck/src/astconv/generics.rs
@@ -548,13 +548,18 @@
         generics: &ty::Generics,
     ) -> bool {
         let explicit = !seg.infer_args;
-        let impl_trait = generics.params.iter().any(|param| match param.kind {
-            ty::GenericParamDefKind::Type {
-                synthetic: Some(hir::SyntheticTyParamKind::ImplTrait),
-                ..
-            } => true,
-            _ => false,
-        });
+        let impl_trait =
+            generics.params.iter().any(|param| match param.kind {
+                ty::GenericParamDefKind::Type {
+                    synthetic:
+                        Some(
+                            hir::SyntheticTyParamKind::ImplTrait
+                            | hir::SyntheticTyParamKind::FromAttr,
+                        ),
+                    ..
+                } => true,
+                _ => false,
+            });
 
         if explicit && impl_trait {
             let spans = seg
@@ -562,7 +567,7 @@
                 .args
                 .iter()
                 .filter_map(|arg| match arg {
-                    GenericArg::Type(_) => Some(arg.span()),
+                    GenericArg::Type(_) | GenericArg::Const(_) => Some(arg.span()),
                     _ => None,
                 })
                 .collect::<Vec<_>>();
diff --git a/compiler/rustc_typeck/src/astconv/mod.rs b/compiler/rustc_typeck/src/astconv/mod.rs
index 46b8b2e..07e523a 100644
--- a/compiler/rustc_typeck/src/astconv/mod.rs
+++ b/compiler/rustc_typeck/src/astconv/mod.rs
@@ -35,8 +35,8 @@
 use rustc_trait_selection::traits::wf::object_region_bounds;
 
 use smallvec::SmallVec;
+use std::array;
 use std::collections::BTreeSet;
-use std::iter;
 use std::slice;
 
 #[derive(Debug)]
@@ -1095,9 +1095,10 @@
                     obligation.predicate
                 );
 
-                match obligation.predicate.skip_binders() {
+                let bound_predicate = obligation.predicate.bound_atom();
+                match bound_predicate.skip_binder() {
                     ty::PredicateAtom::Trait(pred, _) => {
-                        let pred = ty::Binder::bind(pred);
+                        let pred = bound_predicate.rebind(pred);
                         associated_types.entry(span).or_default().extend(
                             tcx.associated_items(pred.def_id())
                                 .in_definition_order()
@@ -1106,7 +1107,7 @@
                         );
                     }
                     ty::PredicateAtom::Projection(pred) => {
-                        let pred = ty::Binder::bind(pred);
+                        let pred = bound_predicate.rebind(pred);
                         // A `Self` within the original bound will be substituted with a
                         // `trait_object_dummy_self`, so check for that.
                         let references_self =
@@ -1346,7 +1347,7 @@
             debug!("one_bound_for_assoc_type: bound2 = {:?}", bound2);
 
             let is_equality = is_equality();
-            let bounds = iter::once(bound).chain(iter::once(bound2)).chain(matching_candidates);
+            let bounds = array::IntoIter::new([bound, bound2]).chain(matching_candidates);
             let mut err = if is_equality.is_some() {
                 // More specific Error Index entry.
                 struct_span_err!(
diff --git a/compiler/rustc_typeck/src/bounds.rs b/compiler/rustc_typeck/src/bounds.rs
index 63295f5..6837074 100644
--- a/compiler/rustc_typeck/src/bounds.rs
+++ b/compiler/rustc_typeck/src/bounds.rs
@@ -71,10 +71,6 @@
                 self.region_bounds
                     .iter()
                     .map(|&(region_bound, span)| {
-                        // Account for the binder being introduced below; no need to shift `param_ty`
-                        // because, at present at least, it either only refers to early-bound regions,
-                        // or it's a generic associated type that deliberately has escaping bound vars.
-                        let region_bound = ty::fold::shift_region(tcx, region_bound, 1);
                         let outlives = ty::OutlivesPredicate(param_ty, region_bound);
                         (ty::Binder::bind(outlives).to_predicate(tcx), span)
                     })
diff --git a/compiler/rustc_typeck/src/check/_match.rs b/compiler/rustc_typeck/src/check/_match.rs
index 7cb23dc..e8eea65 100644
--- a/compiler/rustc_typeck/src/check/_match.rs
+++ b/compiler/rustc_typeck/src/check/_match.rs
@@ -9,6 +9,7 @@
 use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
 use rustc_trait_selection::traits::{
     IfExpressionCause, MatchExpressionArmCause, ObligationCause, ObligationCauseCode,
+    StatementAsExpression,
 };
 
 impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
@@ -188,11 +189,8 @@
                     }
                 }
             } else {
-                let (arm_span, semi_span) = if let hir::ExprKind::Block(blk, _) = &arm.body.kind {
-                    self.find_block_span(blk, prior_arm_ty)
-                } else {
-                    (arm.body.span, None)
-                };
+                let (arm_span, semi_span) =
+                    self.get_appropriate_arm_semicolon_removal_span(&arms, i, prior_arm_ty, arm_ty);
                 let (span, code) = match i {
                     // The reason for the first arm to fail is not that the match arms diverge,
                     // but rather that there's a prior obligation that doesn't hold.
@@ -201,6 +199,7 @@
                         expr.span,
                         ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
                             arm_span,
+                            scrut_span: scrut.span,
                             semi_span,
                             source: match_src,
                             prior_arms: other_arms.clone(),
@@ -241,6 +240,28 @@
         coercion.complete(self)
     }
 
+    fn get_appropriate_arm_semicolon_removal_span(
+        &self,
+        arms: &'tcx [hir::Arm<'tcx>],
+        i: usize,
+        prior_arm_ty: Option<Ty<'tcx>>,
+        arm_ty: Ty<'tcx>,
+    ) -> (Span, Option<(Span, StatementAsExpression)>) {
+        let arm = &arms[i];
+        let (arm_span, mut semi_span) = if let hir::ExprKind::Block(blk, _) = &arm.body.kind {
+            self.find_block_span(blk, prior_arm_ty)
+        } else {
+            (arm.body.span, None)
+        };
+        if semi_span.is_none() && i > 0 {
+            if let hir::ExprKind::Block(blk, _) = &arms[i - 1].body.kind {
+                let (_, semi_span_prev) = self.find_block_span(blk, Some(arm_ty));
+                semi_span = semi_span_prev;
+            }
+        }
+        (arm_span, semi_span)
+    }
+
     /// When the previously checked expression (the scrutinee) diverges,
     /// warn the user about the match arms being unreachable.
     fn warn_arms_when_scrutinee_diverges(
@@ -513,7 +534,7 @@
         &self,
         block: &'tcx hir::Block<'tcx>,
         expected_ty: Option<Ty<'tcx>>,
-    ) -> (Span, Option<Span>) {
+    ) -> (Span, Option<(Span, StatementAsExpression)>) {
         if let Some(expr) = &block.expr {
             (expr.span, None)
         } else if let Some(stmt) = block.stmts.last() {
diff --git a/compiler/rustc_typeck/src/check/callee.rs b/compiler/rustc_typeck/src/check/callee.rs
index 740783a..a38fb96 100644
--- a/compiler/rustc_typeck/src/check/callee.rs
+++ b/compiler/rustc_typeck/src/check/callee.rs
@@ -285,10 +285,8 @@
         arg_exprs: &'tcx [hir::Expr<'tcx>],
         expected: Expectation<'tcx>,
     ) -> Ty<'tcx> {
-        let (fn_sig, def_span) = match *callee_ty.kind() {
-            ty::FnDef(def_id, _) => {
-                (callee_ty.fn_sig(self.tcx), self.tcx.hir().span_if_local(def_id))
-            }
+        let (fn_sig, def_id) = match *callee_ty.kind() {
+            ty::FnDef(def_id, _) => (callee_ty.fn_sig(self.tcx), Some(def_id)),
             ty::FnPtr(sig) => (sig, None),
             ref t => {
                 let mut unit_variant = None;
@@ -427,7 +425,7 @@
             arg_exprs,
             fn_sig.c_variadic,
             TupleArgumentsFlag::DontTupleArguments,
-            def_span,
+            def_id,
         );
 
         fn_sig.output()
diff --git a/compiler/rustc_typeck/src/check/check.rs b/compiler/rustc_typeck/src/check/check.rs
index 0647be2..70d94ef 100644
--- a/compiler/rustc_typeck/src/check/check.rs
+++ b/compiler/rustc_typeck/src/check/check.rs
@@ -1,32 +1,38 @@
 use super::coercion::CoerceMany;
+use super::compare_method::check_type_bounds;
 use super::compare_method::{compare_const_impl, compare_impl_method, compare_ty_impl};
 use super::*;
 
 use rustc_attr as attr;
-use rustc_errors::Applicability;
+use rustc_errors::{Applicability, ErrorReported};
 use rustc_hir as hir;
 use rustc_hir::def_id::{DefId, LocalDefId, LOCAL_CRATE};
 use rustc_hir::lang_items::LangItem;
 use rustc_hir::{ItemKind, Node};
 use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
-use rustc_infer::infer::RegionVariableOrigin;
+use rustc_infer::infer::{RegionVariableOrigin, TyCtxtInferExt};
 use rustc_middle::ty::fold::TypeFoldable;
 use rustc_middle::ty::subst::GenericArgKind;
 use rustc_middle::ty::util::{Discr, IntTypeExt, Representability};
-use rustc_middle::ty::{self, RegionKind, ToPredicate, Ty, TyCtxt};
+use rustc_middle::ty::{self, ParamEnv, RegionKind, ToPredicate, Ty, TyCtxt};
 use rustc_session::config::EntryFnType;
+use rustc_session::lint::builtin::UNINHABITED_STATIC;
 use rustc_span::symbol::sym;
 use rustc_span::{self, MultiSpan, Span};
 use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::opaque_types::InferCtxtExt as _;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
 use rustc_trait_selection::traits::{self, ObligationCauseCode};
 
+use std::ops::ControlFlow;
+
 pub fn check_wf_new(tcx: TyCtxt<'_>) {
     let visit = wfcheck::CheckTypeWellFormedVisitor::new(tcx);
     tcx.hir().krate().par_visit_all_item_likes(&visit);
 }
 
 pub(super) fn check_abi(tcx: TyCtxt<'_>, span: Span, abi: Abi) {
-    if !tcx.sess.target.target.is_abi_supported(abi) {
+    if !tcx.sess.target.is_abi_supported(abi) {
         struct_span_err!(
             tcx.sess,
             span,
@@ -127,7 +133,7 @@
         // The check for a non-trivial pattern is a hack to avoid duplicate warnings
         // for simple cases like `fn foo(x: Trait)`,
         // where we would error once on the parameter as a whole, and once on the binding `x`.
-        if param.pat.simple_ident().is_none() && !tcx.features().unsized_locals {
+        if param.pat.simple_ident().is_none() && !tcx.features().unsized_fn_params {
             fcx.require_type_is_sized(param_ty, param.pat.span, traits::SizedArgumentType(ty_span));
         }
 
@@ -335,7 +341,7 @@
     check_packed(tcx, span, def);
 }
 
-pub(super) fn check_union(tcx: TyCtxt<'_>, id: hir::HirId, span: Span) {
+fn check_union(tcx: TyCtxt<'_>, id: hir::HirId, span: Span) {
     let def_id = tcx.hir().local_def_id(id);
     let def = tcx.adt_def(def_id);
     def.destructor(tcx); // force the destructor to be evaluated
@@ -345,9 +351,8 @@
     check_packed(tcx, span, def);
 }
 
-/// When the `#![feature(untagged_unions)]` gate is active,
-/// check that the fields of the `union` does not contain fields that need dropping.
-pub(super) fn check_union_fields(tcx: TyCtxt<'_>, span: Span, item_def_id: LocalDefId) -> bool {
+/// Check that the fields of the `union` do not need dropping.
+fn check_union_fields(tcx: TyCtxt<'_>, span: Span, item_def_id: LocalDefId) -> bool {
     let item_type = tcx.type_of(item_def_id);
     if let ty::Adt(def, substs) = item_type.kind() {
         assert!(def.is_union());
@@ -375,6 +380,36 @@
     true
 }
 
+/// Check that a `static` is inhabited.
+fn check_static_inhabited<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId, span: Span) {
+    // Make sure statics are inhabited.
+    // Other parts of the compiler assume that there are no uninhabited places. In principle it
+    // would be enough to check this for `extern` statics, as statics with an initializer will
+    // have UB during initialization if they are uninhabited, but there also seems to be no good
+    // reason to allow any statics to be uninhabited.
+    let ty = tcx.type_of(def_id);
+    let layout = match tcx.layout_of(ParamEnv::reveal_all().and(ty)) {
+        Ok(l) => l,
+        Err(_) => {
+            // Generic statics are rejected, but we still reach this case.
+            tcx.sess.delay_span_bug(span, "generic static must be rejected");
+            return;
+        }
+    };
+    if layout.abi.is_uninhabited() {
+        tcx.struct_span_lint_hir(
+            UNINHABITED_STATIC,
+            tcx.hir().local_def_id_to_hir_id(def_id),
+            span,
+            |lint| {
+                lint.build("static of uninhabited type")
+                .note("uninhabited statics cannot be initialized, and any access would be an immediate error")
+                .emit();
+            },
+        );
+    }
+}
+
 /// Checks that an opaque type does not contain cycles and does not use `Self` or `T::Foo`
 /// projections that would result in "inheriting lifetimes".
 pub(super) fn check_opaque<'tcx>(
@@ -385,8 +420,13 @@
     origin: &hir::OpaqueTyOrigin,
 ) {
     check_opaque_for_inheriting_lifetimes(tcx, def_id, span);
-    tcx.ensure().type_of(def_id);
-    check_opaque_for_cycles(tcx, def_id, substs, span, origin);
+    if tcx.type_of(def_id).references_error() {
+        return;
+    }
+    if check_opaque_for_cycles(tcx, def_id, substs, span, origin).is_err() {
+        return;
+    }
+    check_opaque_meets_bounds(tcx, def_id, substs, span, origin);
 }
 
 /// Checks that an opaque type does not use `Self` or `T::Foo` projections that would result
@@ -410,30 +450,34 @@
     };
 
     impl<'tcx> ty::fold::TypeVisitor<'tcx> for ProhibitOpaqueVisitor<'tcx> {
-        fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+        fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> {
             debug!("check_opaque_for_inheriting_lifetimes: (visit_ty) t={:?}", t);
-            if t != self.opaque_identity_ty && t.super_visit_with(self) {
+            if t != self.opaque_identity_ty && t.super_visit_with(self).is_break() {
                 self.ty = Some(t);
-                return true;
+                return ControlFlow::BREAK;
             }
-            false
+            ControlFlow::CONTINUE
         }
 
-        fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+        fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> {
             debug!("check_opaque_for_inheriting_lifetimes: (visit_region) r={:?}", r);
             if let RegionKind::ReEarlyBound(ty::EarlyBoundRegion { index, .. }) = r {
-                return *index < self.generics.parent_count as u32;
+                if *index < self.generics.parent_count as u32 {
+                    return ControlFlow::BREAK;
+                } else {
+                    return ControlFlow::CONTINUE;
+                }
             }
 
             r.super_visit_with(self)
         }
 
-        fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+        fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> {
             if let ty::ConstKind::Unevaluated(..) = c.val {
                 // FIXME(#72219) We currenctly don't detect lifetimes within substs
                 // which would violate this check. Even though the particular substitution is not used
                 // within the const, this should still be fixed.
-                return false;
+                return ControlFlow::CONTINUE;
             }
             c.super_visit_with(self)
         }
@@ -453,10 +497,9 @@
             ty: None,
         };
         let prohibit_opaque = tcx
-            .predicates_of(def_id)
-            .predicates
+            .explicit_item_bounds(def_id)
             .iter()
-            .any(|(predicate, _)| predicate.visit_with(&mut visitor));
+            .any(|(predicate, _)| predicate.visit_with(&mut visitor).is_break());
         debug!(
             "check_opaque_for_inheriting_lifetimes: prohibit_opaque={:?}, visitor={:?}",
             prohibit_opaque, visitor
@@ -476,7 +519,7 @@
                 span,
                 E0760,
                 "`{}` return type cannot contain a projection or `Self` that references lifetimes from \
-             a parent scope",
+                 a parent scope",
                 if is_async { "async fn" } else { "impl Trait" },
             );
 
@@ -504,7 +547,7 @@
     substs: SubstsRef<'tcx>,
     span: Span,
     origin: &hir::OpaqueTyOrigin,
-) {
+) -> Result<(), ErrorReported> {
     if let Err(partially_expanded_type) = tcx.try_expand_impl_trait_type(def_id.to_def_id(), substs)
     {
         match origin {
@@ -514,9 +557,82 @@
             }
             _ => opaque_type_cycle_error(tcx, def_id, span),
         }
+        Err(ErrorReported)
+    } else {
+        Ok(())
     }
 }
 
+/// Check that the concrete type behind `impl Trait` actually implements `Trait`.
+///
+/// This is mostly checked at the places that specify the opaque type, but we
+/// check those cases in the `param_env` of that function, which may have
+/// bounds not on this opaque type:
+///
+/// type X<T> = impl Clone
+/// fn f<T: Clone>(t: T) -> X<T> {
+///     t
+/// }
+///
+/// Without this check the above code is incorrectly accepted: we would ICE if
+/// some tried, for example, to clone an `Option<X<&mut ()>>`.
+fn check_opaque_meets_bounds<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    def_id: LocalDefId,
+    substs: SubstsRef<'tcx>,
+    span: Span,
+    origin: &hir::OpaqueTyOrigin,
+) {
+    match origin {
+        // Checked when type checking the function containing them.
+        hir::OpaqueTyOrigin::FnReturn | hir::OpaqueTyOrigin::AsyncFn => return,
+        // Can have different predicates to their defining use
+        hir::OpaqueTyOrigin::Binding | hir::OpaqueTyOrigin::Misc => {}
+    }
+
+    let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+    let param_env = tcx.param_env(def_id);
+
+    tcx.infer_ctxt().enter(move |infcx| {
+        let inh = Inherited::new(infcx, def_id);
+        let infcx = &inh.infcx;
+        let opaque_ty = tcx.mk_opaque(def_id.to_def_id(), substs);
+
+        let misc_cause = traits::ObligationCause::misc(span, hir_id);
+
+        let (_, opaque_type_map) = inh.register_infer_ok_obligations(
+            infcx.instantiate_opaque_types(def_id, hir_id, param_env, &opaque_ty, span),
+        );
+
+        for (def_id, opaque_defn) in opaque_type_map {
+            match infcx
+                .at(&misc_cause, param_env)
+                .eq(opaque_defn.concrete_ty, tcx.type_of(def_id).subst(tcx, opaque_defn.substs))
+            {
+                Ok(infer_ok) => inh.register_infer_ok_obligations(infer_ok),
+                Err(ty_err) => tcx.sess.delay_span_bug(
+                    opaque_defn.definition_span,
+                    &format!(
+                        "could not unify `{}` with revealed type:\n{}",
+                        opaque_defn.concrete_ty, ty_err,
+                    ),
+                ),
+            }
+        }
+
+        // Check that all obligations are satisfied by the implementation's
+        // version.
+        if let Err(ref errors) = inh.fulfillment_cx.borrow_mut().select_all_or_error(&infcx) {
+            infcx.report_fulfillment_errors(errors, None, false);
+        }
+
+        // Finally, resolve all regions. This catches wily misuses of
+        // lifetime parameters.
+        let fcx = FnCtxt::new(&inh, param_env, hir_id);
+        fcx.regionck_item(hir_id, span, &[]);
+    });
+}
+
 pub fn check_item_type<'tcx>(tcx: TyCtxt<'tcx>, it: &'tcx hir::Item<'tcx>) {
     debug!(
         "check_item_type(it.hir_id={}, it.name={})",
@@ -530,6 +646,7 @@
             let def_id = tcx.hir().local_def_id(it.hir_id);
             tcx.ensure().typeck(def_id);
             maybe_check_static_with_link_section(tcx, def_id, it.span);
+            check_static_inhabited(tcx, def_id, it.span);
         }
         hir::ItemKind::Const(..) => {
             tcx.ensure().typeck(tcx.hir().local_def_id(it.hir_id));
@@ -553,9 +670,25 @@
 
             for item in items.iter() {
                 let item = tcx.hir().trait_item(item.id);
-                if let hir::TraitItemKind::Fn(sig, _) = &item.kind {
-                    let abi = sig.header.abi;
-                    fn_maybe_err(tcx, item.ident.span, abi);
+                match item.kind {
+                    hir::TraitItemKind::Fn(ref sig, _) => {
+                        let abi = sig.header.abi;
+                        fn_maybe_err(tcx, item.ident.span, abi);
+                    }
+                    hir::TraitItemKind::Type(.., Some(_default)) => {
+                        let item_def_id = tcx.hir().local_def_id(item.hir_id).to_def_id();
+                        let assoc_item = tcx.associated_item(item_def_id);
+                        let trait_substs =
+                            InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
+                        let _: Result<_, rustc_errors::ErrorReported> = check_type_bounds(
+                            tcx,
+                            assoc_item,
+                            assoc_item,
+                            item.span,
+                            ty::TraitRef { def_id: def_id.to_def_id(), substs: trait_substs },
+                        );
+                    }
+                    _ => {}
                 }
             }
         }
@@ -596,7 +729,8 @@
                 }
             } else {
                 for item in m.items {
-                    let generics = tcx.generics_of(tcx.hir().local_def_id(item.hir_id));
+                    let def_id = tcx.hir().local_def_id(item.hir_id);
+                    let generics = tcx.generics_of(def_id);
                     let own_counts = generics.own_counts();
                     if generics.params.len() - own_counts.lifetimes != 0 {
                         let (kinds, kinds_pl, egs) = match (own_counts.types, own_counts.consts) {
@@ -627,8 +761,14 @@
                         .emit();
                     }
 
-                    if let hir::ForeignItemKind::Fn(ref fn_decl, _, _) = item.kind {
-                        require_c_abi_if_c_variadic(tcx, fn_decl, m.abi, item.span);
+                    match item.kind {
+                        hir::ForeignItemKind::Fn(ref fn_decl, _, _) => {
+                            require_c_abi_if_c_variadic(tcx, fn_decl, m.abi, item.span);
+                        }
+                        hir::ForeignItemKind::Static(..) => {
+                            check_static_inhabited(tcx, def_id, item.span);
+                        }
+                        _ => {}
                     }
                 }
             }
@@ -1315,11 +1455,11 @@
             {
                 struct VisitTypes(Vec<DefId>);
                 impl<'tcx> ty::fold::TypeVisitor<'tcx> for VisitTypes {
-                    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+                    fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> {
                         match *t.kind() {
                             ty::Opaque(def, _) => {
                                 self.0.push(def);
-                                false
+                                ControlFlow::CONTINUE
                             }
                             _ => t.super_visit_with(self),
                         }
diff --git a/compiler/rustc_typeck/src/check/closure.rs b/compiler/rustc_typeck/src/check/closure.rs
index 8898a54..2ba0507 100644
--- a/compiler/rustc_typeck/src/check/closure.rs
+++ b/compiler/rustc_typeck/src/check/closure.rs
@@ -81,19 +81,10 @@
             self.tcx.closure_base_def_id(expr_def_id.to_def_id()),
         );
 
-        let tupled_upvars_ty =
-            self.tcx.mk_tup(self.tcx.upvars_mentioned(expr_def_id).iter().flat_map(|upvars| {
-                upvars.iter().map(|(&var_hir_id, _)| {
-                    // Create type variables (for now) to represent the transformed
-                    // types of upvars. These will be unified during the upvar
-                    // inference phase (`upvar.rs`).
-                    self.infcx.next_ty_var(TypeVariableOrigin {
-                        // FIXME(eddyb) distinguish upvar inference variables from the rest.
-                        kind: TypeVariableOriginKind::ClosureSynthetic,
-                        span: self.tcx.hir().span(var_hir_id),
-                    })
-                })
-            }));
+        let tupled_upvars_ty = self.infcx.next_ty_var(TypeVariableOrigin {
+            kind: TypeVariableOriginKind::ClosureSynthetic,
+            span: self.tcx.hir().span(expr.hir_id),
+        });
 
         if let Some(GeneratorTypes { resume_ty, yield_ty, interior, movability }) = generator_types
         {
@@ -201,6 +192,7 @@
                     obligation.predicate
                 );
 
+                let bound_predicate = obligation.predicate.bound_atom();
                 if let ty::PredicateAtom::Projection(proj_predicate) =
                     obligation.predicate.skip_binders()
                 {
@@ -208,7 +200,7 @@
                     // the complete signature.
                     self.deduce_sig_from_projection(
                         Some(obligation.cause.span),
-                        ty::Binder::bind(proj_predicate),
+                        bound_predicate.rebind(proj_predicate),
                     )
                 } else {
                     None
@@ -613,6 +605,7 @@
         let ret_ty = self.inh.infcx.shallow_resolve(ret_ty);
         let ret_vid = match *ret_ty.kind() {
             ty::Infer(ty::TyVar(ret_vid)) => ret_vid,
+            ty::Error(_) => return None,
             _ => span_bug!(
                 self.tcx.def_span(expr_def_id),
                 "async fn generator return type not an inference variable"
diff --git a/compiler/rustc_typeck/src/check/coercion.rs b/compiler/rustc_typeck/src/check/coercion.rs
index 4addee1..6da3ecd 100644
--- a/compiler/rustc_typeck/src/check/coercion.rs
+++ b/compiler/rustc_typeck/src/check/coercion.rs
@@ -39,6 +39,7 @@
 use crate::check::FnCtxt;
 use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder};
 use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
 use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
 use rustc_infer::infer::{Coercion, InferOk, InferResult};
 use rustc_middle::ty::adjustment::{
@@ -221,11 +222,11 @@
                 // unsafe qualifier.
                 self.coerce_from_fn_pointer(a, a_f, b)
             }
-            ty::Closure(_, substs_a) => {
+            ty::Closure(closure_def_id_a, substs_a) => {
                 // Non-capturing closures are coercible to
                 // function pointers or unsafe function pointers.
                 // It cannot convert closures that require unsafe.
-                self.coerce_closure_to_fn(a, substs_a, b)
+                self.coerce_closure_to_fn(a, closure_def_id_a, substs_a, b)
             }
             _ => {
                 // Otherwise, just use unification rules.
@@ -582,7 +583,8 @@
         while !queue.is_empty() {
             let obligation = queue.remove(0);
             debug!("coerce_unsized resolve step: {:?}", obligation);
-            let trait_pred = match obligation.predicate.skip_binders() {
+            let bound_predicate = obligation.predicate.bound_atom();
+            let trait_pred = match bound_predicate.skip_binder() {
                 ty::PredicateAtom::Trait(trait_pred, _)
                     if traits.contains(&trait_pred.def_id()) =>
                 {
@@ -593,7 +595,7 @@
                             has_unsized_tuple_coercion = true;
                         }
                     }
-                    ty::Binder::bind(trait_pred)
+                    bound_predicate.rebind(trait_pred)
                 }
                 _ => {
                     coercion.obligations.push(obligation);
@@ -762,6 +764,7 @@
     fn coerce_closure_to_fn(
         &self,
         a: Ty<'tcx>,
+        closure_def_id_a: DefId,
         substs_a: SubstsRef<'tcx>,
         b: Ty<'tcx>,
     ) -> CoerceResult<'tcx> {
@@ -772,7 +775,18 @@
         let b = self.shallow_resolve(b);
 
         match b.kind() {
-            ty::FnPtr(fn_ty) if substs_a.as_closure().upvar_tys().next().is_none() => {
+            // At this point we haven't done capture analysis, which means
+            // that the ClosureSubsts just contains an inference variable instead
+            // of tuple of captured types.
+            //
+            // All we care here is if any variable is being captured and not the exact paths,
+            // so we check `upvars_mentioned` for root variables being captured.
+            ty::FnPtr(fn_ty)
+                if self
+                    .tcx
+                    .upvars_mentioned(closure_def_id_a.expect_local())
+                    .map_or(true, |u| u.is_empty()) =>
+            {
                 // We coerce the closure, which has fn type
                 //     `extern "rust-call" fn((arg0,arg1,...)) -> _`
                 // to
@@ -906,8 +920,8 @@
         // Function items or non-capturing closures of differing IDs or InternalSubsts.
         let (a_sig, b_sig) = {
             let is_capturing_closure = |ty| {
-                if let &ty::Closure(_, substs) = ty {
-                    substs.as_closure().upvar_tys().next().is_some()
+                if let &ty::Closure(closure_def_id, _substs) = ty {
+                    self.tcx.upvars_mentioned(closure_def_id.expect_local()).is_some()
                 } else {
                     false
                 }
@@ -1461,6 +1475,28 @@
         if let (Some(sp), Some(fn_output)) = (fcx.ret_coercion_span.borrow().as_ref(), fn_output) {
             self.add_impl_trait_explanation(&mut err, cause, fcx, expected, *sp, fn_output);
         }
+
+        if let Some(sp) = fcx.ret_coercion_span.borrow().as_ref() {
+            // If the closure has an explicit return type annotation,
+            // then a type error may occur at the first return expression we
+            // see in the closure (if it conflicts with the declared
+            // return type). Skip adding a note in this case, since it
+            // would be incorrect.
+            if !err.span.primary_spans().iter().any(|span| span == sp) {
+                let hir = fcx.tcx.hir();
+                let body_owner = hir.body_owned_by(hir.enclosing_body_owner(fcx.body_id));
+                if fcx.tcx.is_closure(hir.body_owner_def_id(body_owner).to_def_id()) {
+                    err.span_note(
+                        *sp,
+                        &format!(
+                            "return type inferred to be `{}` here",
+                            fcx.resolve_vars_if_possible(&expected)
+                        ),
+                    );
+                }
+            }
+        }
+
         err
     }
 
diff --git a/compiler/rustc_typeck/src/check/compare_method.rs b/compiler/rustc_typeck/src/check/compare_method.rs
index 7aa54e0..4acc745 100644
--- a/compiler/rustc_typeck/src/check/compare_method.rs
+++ b/compiler/rustc_typeck/src/check/compare_method.rs
@@ -5,6 +5,7 @@
 use rustc_hir::intravisit;
 use rustc_hir::{GenericParamKind, ImplItemKind, TraitItemKind};
 use rustc_infer::infer::{self, InferOk, TyCtxtInferExt};
+use rustc_infer::traits::util;
 use rustc_middle::ty;
 use rustc_middle::ty::error::{ExpectedFound, TypeError};
 use rustc_middle::ty::subst::{InternalSubsts, Subst};
@@ -327,7 +328,7 @@
         // Finally, resolve all regions. This catches wily misuses of
         // lifetime parameters.
         let fcx = FnCtxt::new(&inh, param_env, impl_m_hir_id);
-        fcx.regionck_item(impl_m_hir_id, impl_m_span, &[]);
+        fcx.regionck_item(impl_m_hir_id, impl_m_span, trait_sig.inputs_and_output);
 
         Ok(())
     })
@@ -1052,7 +1053,7 @@
 
         compare_type_predicate_entailment(tcx, impl_ty, impl_ty_span, trait_ty, impl_trait_ref)?;
 
-        compare_projection_bounds(tcx, trait_ty, impl_ty, impl_ty_span, impl_trait_ref)
+        check_type_bounds(tcx, trait_ty, impl_ty, impl_ty_span, impl_trait_ref)
     })();
 }
 
@@ -1170,20 +1171,13 @@
 /// For default associated types the normalization is not possible (the value
 /// from the impl could be overridden). We also can't normalize generic
 /// associated types (yet) because they contain bound parameters.
-fn compare_projection_bounds<'tcx>(
+pub fn check_type_bounds<'tcx>(
     tcx: TyCtxt<'tcx>,
     trait_ty: &ty::AssocItem,
     impl_ty: &ty::AssocItem,
     impl_ty_span: Span,
     impl_trait_ref: ty::TraitRef<'tcx>,
 ) -> Result<(), ErrorReported> {
-    let have_gats = tcx.features().generic_associated_types;
-    if impl_ty.defaultness.is_final() && !have_gats {
-        // For "final", non-generic associate type implementations, we
-        // don't need this as described above.
-        return Ok(());
-    }
-
     // Given
     //
     // impl<A, B> Foo<u32> for (A, B) {
@@ -1211,16 +1205,27 @@
     // ParamEnv for normalization specifically.
     let normalize_param_env = {
         let mut predicates = param_env.caller_bounds().iter().collect::<Vec<_>>();
-        predicates.push(
-            ty::Binder::dummy(ty::ProjectionPredicate {
-                projection_ty: ty::ProjectionTy {
-                    item_def_id: trait_ty.def_id,
-                    substs: rebased_substs,
-                },
-                ty: impl_ty_value,
-            })
-            .to_predicate(tcx),
-        );
+        match impl_ty_value.kind() {
+            ty::Projection(proj)
+                if proj.item_def_id == trait_ty.def_id && proj.substs == rebased_substs =>
+            {
+                // Don't include this predicate if the projected type is
+                // exactly the same as the projection. This can occur in
+                // (somewhat dubious) code like this:
+                //
+                // impl<T> X for T where T: X { type Y = <T as X>::Y; }
+            }
+            _ => predicates.push(
+                ty::Binder::dummy(ty::ProjectionPredicate {
+                    projection_ty: ty::ProjectionTy {
+                        item_def_id: trait_ty.def_id,
+                        substs: rebased_substs,
+                    },
+                    ty: impl_ty_value,
+                })
+                .to_predicate(tcx),
+            ),
+        };
         ty::ParamEnv::new(tcx.intern_predicates(&predicates), Reveal::UserFacing)
     };
 
@@ -1231,33 +1236,38 @@
 
         let impl_ty_hir_id = tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local());
         let normalize_cause = traits::ObligationCause::misc(impl_ty_span, impl_ty_hir_id);
-        let cause = ObligationCause::new(
-            impl_ty_span,
-            impl_ty_hir_id,
-            ObligationCauseCode::ItemObligation(trait_ty.def_id),
-        );
+        let mk_cause = |span| {
+            ObligationCause::new(
+                impl_ty_span,
+                impl_ty_hir_id,
+                ObligationCauseCode::BindingObligation(trait_ty.def_id, span),
+            )
+        };
 
-        let predicates = tcx.projection_predicates(trait_ty.def_id);
-        debug!("compare_projection_bounds: projection_predicates={:?}", predicates);
+        let obligations = tcx
+            .explicit_item_bounds(trait_ty.def_id)
+            .iter()
+            .map(|&(bound, span)| {
+                let concrete_ty_bound = bound.subst(tcx, rebased_substs);
+                debug!("check_type_bounds: concrete_ty_bound = {:?}", concrete_ty_bound);
 
-        for predicate in predicates {
-            let concrete_ty_predicate = predicate.subst(tcx, rebased_substs);
-            debug!("compare_projection_bounds: concrete predicate = {:?}", concrete_ty_predicate);
+                traits::Obligation::new(mk_cause(span), param_env, concrete_ty_bound)
+            })
+            .collect();
+        debug!("check_type_bounds: item_bounds={:?}", obligations);
 
+        for mut obligation in util::elaborate_obligations(tcx, obligations) {
             let traits::Normalized { value: normalized_predicate, obligations } = traits::normalize(
                 &mut selcx,
                 normalize_param_env,
                 normalize_cause.clone(),
-                &concrete_ty_predicate,
+                &obligation.predicate,
             );
             debug!("compare_projection_bounds: normalized predicate = {:?}", normalized_predicate);
+            obligation.predicate = normalized_predicate;
 
             inh.register_predicates(obligations);
-            inh.register_predicate(traits::Obligation::new(
-                cause.clone(),
-                param_env,
-                normalized_predicate,
-            ));
+            inh.register_predicate(obligation);
         }
 
         // Check that all obligations are satisfied by the implementation's
@@ -1270,7 +1280,11 @@
         // Finally, resolve all regions. This catches wily misuses of
         // lifetime parameters.
         let fcx = FnCtxt::new(&inh, param_env, impl_ty_hir_id);
-        fcx.regionck_item(impl_ty_hir_id, impl_ty_span, &[]);
+        let implied_bounds = match impl_ty.container {
+            ty::TraitContainer(_) => vec![],
+            ty::ImplContainer(def_id) => fcx.impl_implied_bounds(def_id, impl_ty_span),
+        };
+        fcx.regionck_item(impl_ty_hir_id, impl_ty_span, &implied_bounds);
 
         Ok(())
     })
diff --git a/compiler/rustc_typeck/src/check/demand.rs b/compiler/rustc_typeck/src/check/demand.rs
index 3e66885..241803f 100644
--- a/compiler/rustc_typeck/src/check/demand.rs
+++ b/compiler/rustc_typeck/src/check/demand.rs
@@ -33,7 +33,6 @@
             return;
         }
         self.suggest_boxing_when_appropriate(err, expr, expected, expr_ty);
-        self.suggest_missing_await(err, expr, expected, expr_ty);
         self.suggest_missing_parentheses(err, expr);
         self.note_need_for_fn_pointer(err, expected, expr_ty);
         self.note_internal_mutation_in_method(err, expr, expected, expr_ty);
@@ -751,8 +750,20 @@
             }
         }
 
-        let msg = format!("you can convert an `{}` to `{}`", checked_ty, expected_ty);
-        let cast_msg = format!("you can cast an `{} to `{}`", checked_ty, expected_ty);
+        let msg = format!(
+            "you can convert {} `{}` to {} `{}`",
+            checked_ty.kind().article(),
+            checked_ty,
+            expected_ty.kind().article(),
+            expected_ty,
+        );
+        let cast_msg = format!(
+            "you can cast {} `{}` to {} `{}`",
+            checked_ty.kind().article(),
+            checked_ty,
+            expected_ty.kind().article(),
+            expected_ty,
+        );
         let lit_msg = format!(
             "change the type of the numeric literal from `{}` to `{}`",
             checked_ty, expected_ty,
@@ -814,7 +825,7 @@
                     let suggestion = format!("{}::from({})", checked_ty, lhs_src);
                     (lhs_expr.span, msg, suggestion)
                 } else {
-                    let msg = format!("{} and panic if the converted value wouldn't fit", msg);
+                    let msg = format!("{} and panic if the converted value doesn't fit", msg);
                     let suggestion =
                         format!("{}{}.try_into().unwrap()", prefix, with_opt_paren(&src));
                     (expr.span, msg, suggestion)
diff --git a/compiler/rustc_typeck/src/check/dropck.rs b/compiler/rustc_typeck/src/check/dropck.rs
index ae94a6d..5650b2c 100644
--- a/compiler/rustc_typeck/src/check/dropck.rs
+++ b/compiler/rustc_typeck/src/check/dropck.rs
@@ -226,12 +226,14 @@
         // could be extended easily also to the other `Predicate`.
         let predicate_matches_closure = |p: Predicate<'tcx>| {
             let mut relator: SimpleEqRelation<'tcx> = SimpleEqRelation::new(tcx, self_param_env);
-            match (predicate.skip_binders(), p.skip_binders()) {
+            let predicate = predicate.bound_atom();
+            let p = p.bound_atom();
+            match (predicate.skip_binder(), p.skip_binder()) {
                 (ty::PredicateAtom::Trait(a, _), ty::PredicateAtom::Trait(b, _)) => {
-                    relator.relate(ty::Binder::bind(a), ty::Binder::bind(b)).is_ok()
+                    relator.relate(predicate.rebind(a), p.rebind(b)).is_ok()
                 }
                 (ty::PredicateAtom::Projection(a), ty::PredicateAtom::Projection(b)) => {
-                    relator.relate(ty::Binder::bind(a), ty::Binder::bind(b)).is_ok()
+                    relator.relate(predicate.rebind(a), p.rebind(b)).is_ok()
                 }
                 _ => predicate == p,
             }
diff --git a/compiler/rustc_typeck/src/check/expr.rs b/compiler/rustc_typeck/src/check/expr.rs
index 275f2ed..af19ad0 100644
--- a/compiler/rustc_typeck/src/check/expr.rs
+++ b/compiler/rustc_typeck/src/check/expr.rs
@@ -42,7 +42,7 @@
 use rustc_span::hygiene::DesugaringKind;
 use rustc_span::source_map::Span;
 use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_trait_selection::traits::{self, ObligationCauseCode, SelectionContext};
+use rustc_trait_selection::traits::{self, ObligationCauseCode};
 
 use std::fmt::Display;
 
@@ -286,6 +286,7 @@
             }
             ExprKind::DropTemps(ref e) => self.check_expr_with_expectation(e, expected),
             ExprKind::Array(ref args) => self.check_expr_array(args, expected, expr),
+            ExprKind::ConstBlock(ref anon_const) => self.to_const(anon_const).ty,
             ExprKind::Repeat(ref element, ref count) => {
                 self.check_expr_repeat(element, count, expected, expr)
             }
@@ -475,7 +476,7 @@
 
         if let ty::FnDef(..) = ty.kind() {
             let fn_sig = ty.fn_sig(tcx);
-            if !tcx.features().unsized_locals {
+            if !tcx.features().unsized_fn_params {
                 // We want to remove some Sized bounds from std functions,
                 // but don't want to expose the removal to stable Rust.
                 // i.e., we don't want to allow
@@ -626,7 +627,10 @@
                 assert!(expr_opt.is_none() || self.tcx.sess.has_errors());
             }
 
-            ctxt.may_break = true;
+            // If we encountered a `break`, then (no surprise) it may be possible to break from the
+            // loop... unless the value being returned from the loop diverges itself, e.g.
+            // `break return 5` or `break loop {}`.
+            ctxt.may_break |= !self.diverges.get().is_always();
 
             // the type of a `break` is always `!`, since it diverges
             tcx.types.never
@@ -714,39 +718,24 @@
         );
     }
 
-    fn is_destructuring_place_expr(&self, expr: &'tcx hir::Expr<'tcx>) -> bool {
-        match &expr.kind {
-            ExprKind::Array(comps) | ExprKind::Tup(comps) => {
-                comps.iter().all(|e| self.is_destructuring_place_expr(e))
-            }
-            ExprKind::Struct(_path, fields, rest) => {
-                rest.as_ref().map(|e| self.is_destructuring_place_expr(e)).unwrap_or(true)
-                    && fields.iter().all(|f| self.is_destructuring_place_expr(&f.expr))
-            }
-            _ => expr.is_syntactic_place_expr(),
-        }
-    }
-
     pub(crate) fn check_lhs_assignable(
         &self,
         lhs: &'tcx hir::Expr<'tcx>,
         err_code: &'static str,
         expr_span: &Span,
     ) {
-        if !lhs.is_syntactic_place_expr() {
-            // FIXME: Make this use SessionDiagnostic once error codes can be dynamically set.
-            let mut err = self.tcx.sess.struct_span_err_with_code(
-                *expr_span,
-                "invalid left-hand side of assignment",
-                DiagnosticId::Error(err_code.into()),
-            );
-            err.span_label(lhs.span, "cannot assign to this expression");
-            if self.is_destructuring_place_expr(lhs) {
-                err.note("destructuring assignments are not currently supported");
-                err.note("for more information, see https://github.com/rust-lang/rfcs/issues/372");
-            }
-            err.emit();
+        if lhs.is_syntactic_place_expr() {
+            return;
         }
+
+        // FIXME: Make this use SessionDiagnostic once error codes can be dynamically set.
+        let mut err = self.tcx.sess.struct_span_err_with_code(
+            *expr_span,
+            "invalid left-hand side of assignment",
+            DiagnosticId::Error(err_code.into()),
+        );
+        err.span_label(lhs.span, "cannot assign to this expression");
+        err.emit();
     }
 
     /// Type check assignment expression `expr` of form `lhs = rhs`.
@@ -1281,7 +1270,7 @@
 
     /// Report an error for a struct field expression when there are fields which aren't provided.
     ///
-    /// ```ignore (diagnostic)
+    /// ```text
     /// error: missing field `you_can_use_this_field` in initializer of `foo::Foo`
     ///  --> src/main.rs:8:5
     ///   |
@@ -1333,7 +1322,7 @@
 
     /// Report an error for a struct field expression when there are no visible fields.
     ///
-    /// ```ignore (diagnostic)
+    /// ```text
     /// error: cannot construct `Foo` with struct literal syntax due to inaccessible fields
     ///  --> src/main.rs:8:5
     ///   |
@@ -1579,51 +1568,34 @@
         err: &mut DiagnosticBuilder<'_>,
         field_ident: Ident,
         base: &'tcx hir::Expr<'tcx>,
-        expr: &'tcx hir::Expr<'tcx>,
-        def_id: DefId,
+        ty: Ty<'tcx>,
     ) {
-        let param_env = self.tcx().param_env(def_id);
-        let future_trait = self.tcx.require_lang_item(LangItem::Future, None);
-        // Future::Output
-        let item_def_id =
-            self.tcx.associated_items(future_trait).in_definition_order().next().unwrap().def_id;
-
-        let projection_ty = self.tcx.projection_ty_from_predicates((def_id, item_def_id));
-        debug!("suggest_await_on_field_access: projection_ty={:?}", projection_ty);
-
-        let cause = self.misc(expr.span);
-        let mut selcx = SelectionContext::new(&self.infcx);
-
-        let mut obligations = vec![];
-        if let Some(projection_ty) = projection_ty {
-            let normalized_ty = rustc_trait_selection::traits::normalize_projection_type(
-                &mut selcx,
-                param_env,
-                projection_ty,
-                cause,
-                0,
-                &mut obligations,
-            );
-            debug!(
-                "suggest_await_on_field_access: normalized_ty={:?}, ty_kind={:?}",
-                self.resolve_vars_if_possible(&normalized_ty),
-                normalized_ty.kind(),
-            );
-            if let ty::Adt(def, _) = normalized_ty.kind() {
-                // no field access on enum type
-                if !def.is_enum() {
-                    if def.non_enum_variant().fields.iter().any(|field| field.ident == field_ident)
-                    {
-                        err.span_suggestion_verbose(
-                            base.span.shrink_to_hi(),
-                            "consider awaiting before field access",
-                            ".await".to_string(),
-                            Applicability::MaybeIncorrect,
-                        );
-                    }
+        let output_ty = match self.infcx.get_impl_future_output_ty(ty) {
+            Some(output_ty) => self.resolve_vars_if_possible(&output_ty),
+            _ => return,
+        };
+        let mut add_label = true;
+        if let ty::Adt(def, _) = output_ty.kind() {
+            // no field access on enum type
+            if !def.is_enum() {
+                if def.non_enum_variant().fields.iter().any(|field| field.ident == field_ident) {
+                    add_label = false;
+                    err.span_label(
+                        field_ident.span,
+                        "field not available in `impl Future`, but it is available in its `Output`",
+                    );
+                    err.span_suggestion_verbose(
+                        base.span.shrink_to_hi(),
+                        "consider `await`ing on the `Future` and access the field of its `Output`",
+                        ".await".to_string(),
+                        Applicability::MaybeIncorrect,
+                    );
                 }
             }
         }
+        if add_label {
+            err.span_label(field_ident.span, &format!("field not found in `{}`", ty));
+        }
     }
 
     fn ban_nonexisting_field(
@@ -1652,8 +1624,8 @@
             ty::Param(param_ty) => {
                 self.point_at_param_definition(&mut err, param_ty);
             }
-            ty::Opaque(def_id, _) => {
-                self.suggest_await_on_field_access(&mut err, field, base, expr, def_id);
+            ty::Opaque(_, _) => {
+                self.suggest_await_on_field_access(&mut err, field, base, expr_t.peel_refs());
             }
             _ => {}
         }
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt.rs b/compiler/rustc_typeck/src/check/fn_ctxt.rs
deleted file mode 100644
index 79d6c7d..0000000
--- a/compiler/rustc_typeck/src/check/fn_ctxt.rs
+++ /dev/null
@@ -1,3200 +0,0 @@
-// ignore-tidy-filelength
-// FIXME: This file seems to have too much functionality wrapped into it,
-// leading to it being too long.
-// Splitting this file may involve abstracting functionality into other files.
-
-use super::callee::{self, DeferredCallResolution};
-use super::coercion::{CoerceMany, DynamicCoerceMany};
-use super::method::{self, MethodCallee, SelfSource};
-use super::Expectation::*;
-use super::TupleArgumentsFlag::*;
-use super::{
-    potentially_plural_count, struct_span_err, BreakableCtxt, Diverges, EnclosingBreakables,
-    Expectation, FallbackMode, Inherited, LocalTy, Needs, TupleArgumentsFlag, UnsafetyState,
-};
-use crate::astconv::{
-    AstConv, ExplicitLateBound, GenericArgCountMismatch, GenericArgCountResult, PathSeg,
-};
-
-use rustc_ast as ast;
-use rustc_ast::util::parser::ExprPrecedence;
-use rustc_data_structures::captures::Captures;
-use rustc_data_structures::fx::FxHashSet;
-use rustc_errors::ErrorReported;
-use rustc_errors::{Applicability, DiagnosticBuilder, DiagnosticId};
-use rustc_hir as hir;
-use rustc_hir::def::{CtorOf, DefKind, Res};
-use rustc_hir::def_id::DefId;
-use rustc_hir::lang_items::LangItem;
-use rustc_hir::{ExprKind, GenericArg, ItemKind, Node, QPath};
-use rustc_infer::infer::canonical::{Canonical, OriginalQueryValues, QueryResponse};
-use rustc_infer::infer::error_reporting::TypeAnnotationNeeded::E0282;
-use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
-use rustc_infer::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
-use rustc_infer::infer::{self, InferOk, InferResult};
-use rustc_middle::hir::map::blocks::FnLikeNode;
-use rustc_middle::ty::adjustment::{
-    Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability,
-};
-use rustc_middle::ty::fold::TypeFoldable;
-use rustc_middle::ty::subst::{
-    self, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSelfTy, UserSubsts,
-};
-use rustc_middle::ty::{
-    self, AdtKind, CanonicalUserType, Const, DefIdTree, GenericParamDefKind, ToPolyTraitRef,
-    ToPredicate, Ty, TyCtxt, UserType,
-};
-use rustc_session::{lint, Session};
-use rustc_span::hygiene::DesugaringKind;
-use rustc_span::source_map::{original_sp, DUMMY_SP};
-use rustc_span::symbol::{kw, sym, Ident};
-use rustc_span::{self, BytePos, MultiSpan, Span};
-use rustc_trait_selection::infer::InferCtxtExt as _;
-use rustc_trait_selection::opaque_types::InferCtxtExt as _;
-use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
-use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
-use rustc_trait_selection::traits::{
-    self, ObligationCause, ObligationCauseCode, TraitEngine, TraitEngineExt,
-};
-
-use std::cell::{Cell, RefCell};
-use std::collections::hash_map::Entry;
-use std::iter;
-use std::mem::replace;
-use std::ops::Deref;
-use std::slice;
-
-pub struct FnCtxt<'a, 'tcx> {
-    pub(super) body_id: hir::HirId,
-
-    /// The parameter environment used for proving trait obligations
-    /// in this function. This can change when we descend into
-    /// closures (as they bring new things into scope), hence it is
-    /// not part of `Inherited` (as of the time of this writing,
-    /// closures do not yet change the environment, but they will
-    /// eventually).
-    pub(super) param_env: ty::ParamEnv<'tcx>,
-
-    /// Number of errors that had been reported when we started
-    /// checking this function. On exit, if we find that *more* errors
-    /// have been reported, we will skip regionck and other work that
-    /// expects the types within the function to be consistent.
-    // FIXME(matthewjasper) This should not exist, and it's not correct
-    // if type checking is run in parallel.
-    err_count_on_creation: usize,
-
-    /// If `Some`, this stores coercion information for returned
-    /// expressions. If `None`, this is in a context where return is
-    /// inappropriate, such as a const expression.
-    ///
-    /// This is a `RefCell<DynamicCoerceMany>`, which means that we
-    /// can track all the return expressions and then use them to
-    /// compute a useful coercion from the set, similar to a match
-    /// expression or other branching context. You can use methods
-    /// like `expected_ty` to access the declared return type (if
-    /// any).
-    pub(super) ret_coercion: Option<RefCell<DynamicCoerceMany<'tcx>>>,
-
-    pub(super) ret_coercion_impl_trait: Option<Ty<'tcx>>,
-
-    pub(super) ret_type_span: Option<Span>,
-
-    /// Used exclusively to reduce cost of advanced evaluation used for
-    /// more helpful diagnostics.
-    pub(super) in_tail_expr: bool,
-
-    /// First span of a return site that we find. Used in error messages.
-    pub(super) ret_coercion_span: RefCell<Option<Span>>,
-
-    pub(super) resume_yield_tys: Option<(Ty<'tcx>, Ty<'tcx>)>,
-
-    pub(super) ps: RefCell<UnsafetyState>,
-
-    /// Whether the last checked node generates a divergence (e.g.,
-    /// `return` will set this to `Always`). In general, when entering
-    /// an expression or other node in the tree, the initial value
-    /// indicates whether prior parts of the containing expression may
-    /// have diverged. It is then typically set to `Maybe` (and the
-    /// old value remembered) for processing the subparts of the
-    /// current expression. As each subpart is processed, they may set
-    /// the flag to `Always`, etc. Finally, at the end, we take the
-    /// result and "union" it with the original value, so that when we
-    /// return the flag indicates if any subpart of the parent
-    /// expression (up to and including this part) has diverged. So,
-    /// if you read it after evaluating a subexpression `X`, the value
-    /// you get indicates whether any subexpression that was
-    /// evaluating up to and including `X` diverged.
-    ///
-    /// We currently use this flag only for diagnostic purposes:
-    ///
-    /// - To warn about unreachable code: if, after processing a
-    ///   sub-expression but before we have applied the effects of the
-    ///   current node, we see that the flag is set to `Always`, we
-    ///   can issue a warning. This corresponds to something like
-    ///   `foo(return)`; we warn on the `foo()` expression. (We then
-    ///   update the flag to `WarnedAlways` to suppress duplicate
-    ///   reports.) Similarly, if we traverse to a fresh statement (or
-    ///   tail expression) from a `Always` setting, we will issue a
-    ///   warning. This corresponds to something like `{return;
-    ///   foo();}` or `{return; 22}`, where we would warn on the
-    ///   `foo()` or `22`.
-    ///
-    /// An expression represents dead code if, after checking it,
-    /// the diverges flag is set to something other than `Maybe`.
-    pub(super) diverges: Cell<Diverges>,
-
-    /// Whether any child nodes have any type errors.
-    pub(super) has_errors: Cell<bool>,
-
-    pub(super) enclosing_breakables: RefCell<EnclosingBreakables<'tcx>>,
-
-    pub(super) inh: &'a Inherited<'a, 'tcx>,
-}
-
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
-    pub fn new(
-        inh: &'a Inherited<'a, 'tcx>,
-        param_env: ty::ParamEnv<'tcx>,
-        body_id: hir::HirId,
-    ) -> FnCtxt<'a, 'tcx> {
-        FnCtxt {
-            body_id,
-            param_env,
-            err_count_on_creation: inh.tcx.sess.err_count(),
-            ret_coercion: None,
-            ret_coercion_impl_trait: None,
-            ret_type_span: None,
-            in_tail_expr: false,
-            ret_coercion_span: RefCell::new(None),
-            resume_yield_tys: None,
-            ps: RefCell::new(UnsafetyState::function(hir::Unsafety::Normal, hir::CRATE_HIR_ID)),
-            diverges: Cell::new(Diverges::Maybe),
-            has_errors: Cell::new(false),
-            enclosing_breakables: RefCell::new(EnclosingBreakables {
-                stack: Vec::new(),
-                by_id: Default::default(),
-            }),
-            inh,
-        }
-    }
-
-    pub fn sess(&self) -> &Session {
-        &self.tcx.sess
-    }
-
-    pub fn errors_reported_since_creation(&self) -> bool {
-        self.tcx.sess.err_count() > self.err_count_on_creation
-    }
-
-    /// Produces warning on the given node, if the current point in the
-    /// function is unreachable, and there hasn't been another warning.
-    pub(super) fn warn_if_unreachable(&self, id: hir::HirId, span: Span, kind: &str) {
-        // FIXME: Combine these two 'if' expressions into one once
-        // let chains are implemented
-        if let Diverges::Always { span: orig_span, custom_note } = self.diverges.get() {
-            // If span arose from a desugaring of `if` or `while`, then it is the condition itself,
-            // which diverges, that we are about to lint on. This gives suboptimal diagnostics.
-            // Instead, stop here so that the `if`- or `while`-expression's block is linted instead.
-            if !span.is_desugaring(DesugaringKind::CondTemporary)
-                && !span.is_desugaring(DesugaringKind::Async)
-                && !orig_span.is_desugaring(DesugaringKind::Await)
-            {
-                self.diverges.set(Diverges::WarnedAlways);
-
-                debug!("warn_if_unreachable: id={:?} span={:?} kind={}", id, span, kind);
-
-                self.tcx().struct_span_lint_hir(lint::builtin::UNREACHABLE_CODE, id, span, |lint| {
-                    let msg = format!("unreachable {}", kind);
-                    lint.build(&msg)
-                        .span_label(span, &msg)
-                        .span_label(
-                            orig_span,
-                            custom_note
-                                .unwrap_or("any code following this expression is unreachable"),
-                        )
-                        .emit();
-                })
-            }
-        }
-    }
-
-    pub fn cause(&self, span: Span, code: ObligationCauseCode<'tcx>) -> ObligationCause<'tcx> {
-        ObligationCause::new(span, self.body_id, code)
-    }
-
-    pub fn misc(&self, span: Span) -> ObligationCause<'tcx> {
-        self.cause(span, ObligationCauseCode::MiscObligation)
-    }
-
-    /// Resolves type and const variables in `ty` if possible. Unlike the infcx
-    /// version (resolve_vars_if_possible), this version will
-    /// also select obligations if it seems useful, in an effort
-    /// to get more type information.
-    pub(super) fn resolve_vars_with_obligations(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
-        debug!("resolve_vars_with_obligations(ty={:?})", ty);
-
-        // No Infer()? Nothing needs doing.
-        if !ty.has_infer_types_or_consts() {
-            debug!("resolve_vars_with_obligations: ty={:?}", ty);
-            return ty;
-        }
-
-        // If `ty` is a type variable, see whether we already know what it is.
-        ty = self.resolve_vars_if_possible(&ty);
-        if !ty.has_infer_types_or_consts() {
-            debug!("resolve_vars_with_obligations: ty={:?}", ty);
-            return ty;
-        }
-
-        // If not, try resolving pending obligations as much as
-        // possible. This can help substantially when there are
-        // indirect dependencies that don't seem worth tracking
-        // precisely.
-        self.select_obligations_where_possible(false, |_| {});
-        ty = self.resolve_vars_if_possible(&ty);
-
-        debug!("resolve_vars_with_obligations: ty={:?}", ty);
-        ty
-    }
-
-    pub(super) fn record_deferred_call_resolution(
-        &self,
-        closure_def_id: DefId,
-        r: DeferredCallResolution<'tcx>,
-    ) {
-        let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
-        deferred_call_resolutions.entry(closure_def_id).or_default().push(r);
-    }
-
-    pub(super) fn remove_deferred_call_resolutions(
-        &self,
-        closure_def_id: DefId,
-    ) -> Vec<DeferredCallResolution<'tcx>> {
-        let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
-        deferred_call_resolutions.remove(&closure_def_id).unwrap_or(vec![])
-    }
-
-    pub fn tag(&self) -> String {
-        format!("{:p}", self)
-    }
-
-    pub fn local_ty(&self, span: Span, nid: hir::HirId) -> LocalTy<'tcx> {
-        self.locals.borrow().get(&nid).cloned().unwrap_or_else(|| {
-            span_bug!(span, "no type for local variable {}", self.tcx.hir().node_to_string(nid))
-        })
-    }
-
-    #[inline]
-    pub fn write_ty(&self, id: hir::HirId, ty: Ty<'tcx>) {
-        debug!(
-            "write_ty({:?}, {:?}) in fcx {}",
-            id,
-            self.resolve_vars_if_possible(&ty),
-            self.tag()
-        );
-        self.typeck_results.borrow_mut().node_types_mut().insert(id, ty);
-
-        if ty.references_error() {
-            self.has_errors.set(true);
-            self.set_tainted_by_errors();
-        }
-    }
-
-    pub fn write_field_index(&self, hir_id: hir::HirId, index: usize) {
-        self.typeck_results.borrow_mut().field_indices_mut().insert(hir_id, index);
-    }
-
-    fn write_resolution(&self, hir_id: hir::HirId, r: Result<(DefKind, DefId), ErrorReported>) {
-        self.typeck_results.borrow_mut().type_dependent_defs_mut().insert(hir_id, r);
-    }
-
-    pub fn write_method_call(&self, hir_id: hir::HirId, method: MethodCallee<'tcx>) {
-        debug!("write_method_call(hir_id={:?}, method={:?})", hir_id, method);
-        self.write_resolution(hir_id, Ok((DefKind::AssocFn, method.def_id)));
-        self.write_substs(hir_id, method.substs);
-
-        // When the method is confirmed, the `method.substs` includes
-        // parameters from not just the method, but also the impl of
-        // the method -- in particular, the `Self` type will be fully
-        // resolved. However, those are not something that the "user
-        // specified" -- i.e., those types come from the inferred type
-        // of the receiver, not something the user wrote. So when we
-        // create the user-substs, we want to replace those earlier
-        // types with just the types that the user actually wrote --
-        // that is, those that appear on the *method itself*.
-        //
-        // As an example, if the user wrote something like
-        // `foo.bar::<u32>(...)` -- the `Self` type here will be the
-        // type of `foo` (possibly adjusted), but we don't want to
-        // include that. We want just the `[_, u32]` part.
-        if !method.substs.is_noop() {
-            let method_generics = self.tcx.generics_of(method.def_id);
-            if !method_generics.params.is_empty() {
-                let user_type_annotation = self.infcx.probe(|_| {
-                    let user_substs = UserSubsts {
-                        substs: InternalSubsts::for_item(self.tcx, method.def_id, |param, _| {
-                            let i = param.index as usize;
-                            if i < method_generics.parent_count {
-                                self.infcx.var_for_def(DUMMY_SP, param)
-                            } else {
-                                method.substs[i]
-                            }
-                        }),
-                        user_self_ty: None, // not relevant here
-                    };
-
-                    self.infcx.canonicalize_user_type_annotation(&UserType::TypeOf(
-                        method.def_id,
-                        user_substs,
-                    ))
-                });
-
-                debug!("write_method_call: user_type_annotation={:?}", user_type_annotation);
-                self.write_user_type_annotation(hir_id, user_type_annotation);
-            }
-        }
-    }
-
-    pub fn write_substs(&self, node_id: hir::HirId, substs: SubstsRef<'tcx>) {
-        if !substs.is_noop() {
-            debug!("write_substs({:?}, {:?}) in fcx {}", node_id, substs, self.tag());
-
-            self.typeck_results.borrow_mut().node_substs_mut().insert(node_id, substs);
-        }
-    }
-
-    /// Given the substs that we just converted from the HIR, try to
-    /// canonicalize them and store them as user-given substitutions
-    /// (i.e., substitutions that must be respected by the NLL check).
-    ///
-    /// This should be invoked **before any unifications have
-    /// occurred**, so that annotations like `Vec<_>` are preserved
-    /// properly.
-    pub fn write_user_type_annotation_from_substs(
-        &self,
-        hir_id: hir::HirId,
-        def_id: DefId,
-        substs: SubstsRef<'tcx>,
-        user_self_ty: Option<UserSelfTy<'tcx>>,
-    ) {
-        debug!(
-            "write_user_type_annotation_from_substs: hir_id={:?} def_id={:?} substs={:?} \
-             user_self_ty={:?} in fcx {}",
-            hir_id,
-            def_id,
-            substs,
-            user_self_ty,
-            self.tag(),
-        );
-
-        if Self::can_contain_user_lifetime_bounds((substs, user_self_ty)) {
-            let canonicalized = self.infcx.canonicalize_user_type_annotation(&UserType::TypeOf(
-                def_id,
-                UserSubsts { substs, user_self_ty },
-            ));
-            debug!("write_user_type_annotation_from_substs: canonicalized={:?}", canonicalized);
-            self.write_user_type_annotation(hir_id, canonicalized);
-        }
-    }
-
-    pub fn write_user_type_annotation(
-        &self,
-        hir_id: hir::HirId,
-        canonical_user_type_annotation: CanonicalUserType<'tcx>,
-    ) {
-        debug!(
-            "write_user_type_annotation: hir_id={:?} canonical_user_type_annotation={:?} tag={}",
-            hir_id,
-            canonical_user_type_annotation,
-            self.tag(),
-        );
-
-        if !canonical_user_type_annotation.is_identity() {
-            self.typeck_results
-                .borrow_mut()
-                .user_provided_types_mut()
-                .insert(hir_id, canonical_user_type_annotation);
-        } else {
-            debug!("write_user_type_annotation: skipping identity substs");
-        }
-    }
-
-    pub fn apply_adjustments(&self, expr: &hir::Expr<'_>, adj: Vec<Adjustment<'tcx>>) {
-        debug!("apply_adjustments(expr={:?}, adj={:?})", expr, adj);
-
-        if adj.is_empty() {
-            return;
-        }
-
-        let autoborrow_mut = adj.iter().any(|adj| {
-            matches!(adj, &Adjustment {
-                kind: Adjust::Borrow(AutoBorrow::Ref(_, AutoBorrowMutability::Mut { .. })),
-                ..
-            })
-        });
-
-        match self.typeck_results.borrow_mut().adjustments_mut().entry(expr.hir_id) {
-            Entry::Vacant(entry) => {
-                entry.insert(adj);
-            }
-            Entry::Occupied(mut entry) => {
-                debug!(" - composing on top of {:?}", entry.get());
-                match (&entry.get()[..], &adj[..]) {
-                    // Applying any adjustment on top of a NeverToAny
-                    // is a valid NeverToAny adjustment, because it can't
-                    // be reached.
-                    (&[Adjustment { kind: Adjust::NeverToAny, .. }], _) => return,
-                    (&[
-                        Adjustment { kind: Adjust::Deref(_), .. },
-                        Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), .. },
-                    ], &[
-                        Adjustment { kind: Adjust::Deref(_), .. },
-                        .. // Any following adjustments are allowed.
-                    ]) => {
-                        // A reborrow has no effect before a dereference.
-                    }
-                    // FIXME: currently we never try to compose autoderefs
-                    // and ReifyFnPointer/UnsafeFnPointer, but we could.
-                    _ =>
-                        bug!("while adjusting {:?}, can't compose {:?} and {:?}",
-                             expr, entry.get(), adj)
-                };
-                *entry.get_mut() = adj;
-            }
-        }
-
-        // If there is an mutable auto-borrow, it is equivalent to `&mut <expr>`.
-        // In this case implicit use of `Deref` and `Index` within `<expr>` should
-        // instead be `DerefMut` and `IndexMut`, so fix those up.
-        if autoborrow_mut {
-            self.convert_place_derefs_to_mutable(expr);
-        }
-    }
-
-    /// Basically whenever we are converting from a type scheme into
-    /// the fn body space, we always want to normalize associated
-    /// types as well. This function combines the two.
-    fn instantiate_type_scheme<T>(&self, span: Span, substs: SubstsRef<'tcx>, value: &T) -> T
-    where
-        T: TypeFoldable<'tcx>,
-    {
-        let value = value.subst(self.tcx, substs);
-        let result = self.normalize_associated_types_in(span, &value);
-        debug!("instantiate_type_scheme(value={:?}, substs={:?}) = {:?}", value, substs, result);
-        result
-    }
-
-    /// As `instantiate_type_scheme`, but for the bounds found in a
-    /// generic type scheme.
-    fn instantiate_bounds(
-        &self,
-        span: Span,
-        def_id: DefId,
-        substs: SubstsRef<'tcx>,
-    ) -> (ty::InstantiatedPredicates<'tcx>, Vec<Span>) {
-        let bounds = self.tcx.predicates_of(def_id);
-        let spans: Vec<Span> = bounds.predicates.iter().map(|(_, span)| *span).collect();
-        let result = bounds.instantiate(self.tcx, substs);
-        let result = self.normalize_associated_types_in(span, &result);
-        debug!(
-            "instantiate_bounds(bounds={:?}, substs={:?}) = {:?}, {:?}",
-            bounds, substs, result, spans,
-        );
-        (result, spans)
-    }
-
-    /// Replaces the opaque types from the given value with type variables,
-    /// and records the `OpaqueTypeMap` for later use during writeback. See
-    /// `InferCtxt::instantiate_opaque_types` for more details.
-    pub(super) fn instantiate_opaque_types_from_value<T: TypeFoldable<'tcx>>(
-        &self,
-        parent_id: hir::HirId,
-        value: &T,
-        value_span: Span,
-    ) -> T {
-        let parent_def_id = self.tcx.hir().local_def_id(parent_id);
-        debug!(
-            "instantiate_opaque_types_from_value(parent_def_id={:?}, value={:?})",
-            parent_def_id, value
-        );
-
-        let (value, opaque_type_map) =
-            self.register_infer_ok_obligations(self.instantiate_opaque_types(
-                parent_def_id,
-                self.body_id,
-                self.param_env,
-                value,
-                value_span,
-            ));
-
-        let mut opaque_types = self.opaque_types.borrow_mut();
-        let mut opaque_types_vars = self.opaque_types_vars.borrow_mut();
-        for (ty, decl) in opaque_type_map {
-            let _ = opaque_types.insert(ty, decl);
-            let _ = opaque_types_vars.insert(decl.concrete_ty, decl.opaque_type);
-        }
-
-        value
-    }
-
-    pub(super) fn normalize_associated_types_in<T>(&self, span: Span, value: &T) -> T
-    where
-        T: TypeFoldable<'tcx>,
-    {
-        self.inh.normalize_associated_types_in(span, self.body_id, self.param_env, value)
-    }
-
-    pub(super) fn normalize_associated_types_in_as_infer_ok<T>(
-        &self,
-        span: Span,
-        value: &T,
-    ) -> InferOk<'tcx, T>
-    where
-        T: TypeFoldable<'tcx>,
-    {
-        self.inh.partially_normalize_associated_types_in(span, self.body_id, self.param_env, value)
-    }
-
-    pub fn require_type_meets(
-        &self,
-        ty: Ty<'tcx>,
-        span: Span,
-        code: traits::ObligationCauseCode<'tcx>,
-        def_id: DefId,
-    ) {
-        self.register_bound(ty, def_id, traits::ObligationCause::new(span, self.body_id, code));
-    }
-
-    pub fn require_type_is_sized(
-        &self,
-        ty: Ty<'tcx>,
-        span: Span,
-        code: traits::ObligationCauseCode<'tcx>,
-    ) {
-        if !ty.references_error() {
-            let lang_item = self.tcx.require_lang_item(LangItem::Sized, None);
-            self.require_type_meets(ty, span, code, lang_item);
-        }
-    }
-
-    pub fn require_type_is_sized_deferred(
-        &self,
-        ty: Ty<'tcx>,
-        span: Span,
-        code: traits::ObligationCauseCode<'tcx>,
-    ) {
-        if !ty.references_error() {
-            self.deferred_sized_obligations.borrow_mut().push((ty, span, code));
-        }
-    }
-
-    pub fn register_bound(
-        &self,
-        ty: Ty<'tcx>,
-        def_id: DefId,
-        cause: traits::ObligationCause<'tcx>,
-    ) {
-        if !ty.references_error() {
-            self.fulfillment_cx.borrow_mut().register_bound(
-                self,
-                self.param_env,
-                ty,
-                def_id,
-                cause,
-            );
-        }
-    }
-
-    pub fn to_ty(&self, ast_t: &hir::Ty<'_>) -> Ty<'tcx> {
-        let t = AstConv::ast_ty_to_ty(self, ast_t);
-        self.register_wf_obligation(t.into(), ast_t.span, traits::MiscObligation);
-        t
-    }
-
-    pub fn to_ty_saving_user_provided_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
-        let ty = self.to_ty(ast_ty);
-        debug!("to_ty_saving_user_provided_ty: ty={:?}", ty);
-
-        if Self::can_contain_user_lifetime_bounds(ty) {
-            let c_ty = self.infcx.canonicalize_response(&UserType::Ty(ty));
-            debug!("to_ty_saving_user_provided_ty: c_ty={:?}", c_ty);
-            self.typeck_results.borrow_mut().user_provided_types_mut().insert(ast_ty.hir_id, c_ty);
-        }
-
-        ty
-    }
-
-    pub fn to_const(&self, ast_c: &hir::AnonConst) -> &'tcx ty::Const<'tcx> {
-        let const_def_id = self.tcx.hir().local_def_id(ast_c.hir_id);
-        let c = ty::Const::from_anon_const(self.tcx, const_def_id);
-        self.register_wf_obligation(
-            c.into(),
-            self.tcx.hir().span(ast_c.hir_id),
-            ObligationCauseCode::MiscObligation,
-        );
-        c
-    }
-
-    pub fn const_arg_to_const(
-        &self,
-        ast_c: &hir::AnonConst,
-        param_def_id: DefId,
-    ) -> &'tcx ty::Const<'tcx> {
-        let const_def = ty::WithOptConstParam {
-            did: self.tcx.hir().local_def_id(ast_c.hir_id),
-            const_param_did: Some(param_def_id),
-        };
-        let c = ty::Const::from_opt_const_arg_anon_const(self.tcx, const_def);
-        self.register_wf_obligation(
-            c.into(),
-            self.tcx.hir().span(ast_c.hir_id),
-            ObligationCauseCode::MiscObligation,
-        );
-        c
-    }
-
-    // If the type given by the user has free regions, save it for later, since
-    // NLL would like to enforce those. Also pass in types that involve
-    // projections, since those can resolve to `'static` bounds (modulo #54940,
-    // which hopefully will be fixed by the time you see this comment, dear
-    // reader, although I have my doubts). Also pass in types with inference
-    // types, because they may be repeated. Other sorts of things are already
-    // sufficiently enforced with erased regions. =)
-    fn can_contain_user_lifetime_bounds<T>(t: T) -> bool
-    where
-        T: TypeFoldable<'tcx>,
-    {
-        t.has_free_regions() || t.has_projections() || t.has_infer_types()
-    }
-
-    pub fn node_ty(&self, id: hir::HirId) -> Ty<'tcx> {
-        match self.typeck_results.borrow().node_types().get(id) {
-            Some(&t) => t,
-            None if self.is_tainted_by_errors() => self.tcx.ty_error(),
-            None => {
-                bug!(
-                    "no type for node {}: {} in fcx {}",
-                    id,
-                    self.tcx.hir().node_to_string(id),
-                    self.tag()
-                );
-            }
-        }
-    }
-
-    /// Registers an obligation for checking later, during regionck, that `arg` is well-formed.
-    pub fn register_wf_obligation(
-        &self,
-        arg: subst::GenericArg<'tcx>,
-        span: Span,
-        code: traits::ObligationCauseCode<'tcx>,
-    ) {
-        // WF obligations never themselves fail, so no real need to give a detailed cause:
-        let cause = traits::ObligationCause::new(span, self.body_id, code);
-        self.register_predicate(traits::Obligation::new(
-            cause,
-            self.param_env,
-            ty::PredicateAtom::WellFormed(arg).to_predicate(self.tcx),
-        ));
-    }
-
-    /// Registers obligations that all `substs` are well-formed.
-    pub fn add_wf_bounds(&self, substs: SubstsRef<'tcx>, expr: &hir::Expr<'_>) {
-        for arg in substs.iter().filter(|arg| {
-            matches!(arg.unpack(), GenericArgKind::Type(..) | GenericArgKind::Const(..))
-        }) {
-            self.register_wf_obligation(arg, expr.span, traits::MiscObligation);
-        }
-    }
-
-    /// Given a fully substituted set of bounds (`generic_bounds`), and the values with which each
-    /// type/region parameter was instantiated (`substs`), creates and registers suitable
-    /// trait/region obligations.
-    ///
-    /// For example, if there is a function:
-    ///
-    /// ```
-    /// fn foo<'a,T:'a>(...)
-    /// ```
-    ///
-    /// and a reference:
-    ///
-    /// ```
-    /// let f = foo;
-    /// ```
-    ///
-    /// Then we will create a fresh region variable `'$0` and a fresh type variable `$1` for `'a`
-    /// and `T`. This routine will add a region obligation `$1:'$0` and register it locally.
-    pub fn add_obligations_for_parameters(
-        &self,
-        cause: traits::ObligationCause<'tcx>,
-        predicates: ty::InstantiatedPredicates<'tcx>,
-    ) {
-        assert!(!predicates.has_escaping_bound_vars());
-
-        debug!("add_obligations_for_parameters(predicates={:?})", predicates);
-
-        for obligation in traits::predicates_for_generics(cause, self.param_env, predicates) {
-            self.register_predicate(obligation);
-        }
-    }
-
-    // FIXME(arielb1): use this instead of field.ty everywhere
-    // Only for fields! Returns <none> for methods>
-    // Indifferent to privacy flags
-    pub fn field_ty(
-        &self,
-        span: Span,
-        field: &'tcx ty::FieldDef,
-        substs: SubstsRef<'tcx>,
-    ) -> Ty<'tcx> {
-        self.normalize_associated_types_in(span, &field.ty(self.tcx, substs))
-    }
-
-    pub(super) fn check_casts(&self) {
-        let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
-        for cast in deferred_cast_checks.drain(..) {
-            cast.check(self);
-        }
-    }
-
-    pub(super) fn resolve_generator_interiors(&self, def_id: DefId) {
-        let mut generators = self.deferred_generator_interiors.borrow_mut();
-        for (body_id, interior, kind) in generators.drain(..) {
-            self.select_obligations_where_possible(false, |_| {});
-            super::generator_interior::resolve_interior(self, def_id, body_id, interior, kind);
-        }
-    }
-
-    // Tries to apply a fallback to `ty` if it is an unsolved variable.
-    //
-    // - Unconstrained ints are replaced with `i32`.
-    //
-    // - Unconstrained floats are replaced with with `f64`.
-    //
-    // - Non-numerics get replaced with `!` when `#![feature(never_type_fallback)]`
-    //   is enabled. Otherwise, they are replaced with `()`.
-    //
-    // Fallback becomes very dubious if we have encountered type-checking errors.
-    // In that case, fallback to Error.
-    // The return value indicates whether fallback has occurred.
-    pub(super) fn fallback_if_possible(&self, ty: Ty<'tcx>, mode: FallbackMode) -> bool {
-        use rustc_middle::ty::error::UnconstrainedNumeric::Neither;
-        use rustc_middle::ty::error::UnconstrainedNumeric::{UnconstrainedFloat, UnconstrainedInt};
-
-        assert!(ty.is_ty_infer());
-        let fallback = match self.type_is_unconstrained_numeric(ty) {
-            _ if self.is_tainted_by_errors() => self.tcx().ty_error(),
-            UnconstrainedInt => self.tcx.types.i32,
-            UnconstrainedFloat => self.tcx.types.f64,
-            Neither if self.type_var_diverges(ty) => self.tcx.mk_diverging_default(),
-            Neither => {
-                // This type variable was created from the instantiation of an opaque
-                // type. The fact that we're attempting to perform fallback for it
-                // means that the function neither constrained it to a concrete
-                // type, nor to the opaque type itself.
-                //
-                // For example, in this code:
-                //
-                //```
-                // type MyType = impl Copy;
-                // fn defining_use() -> MyType { true }
-                // fn other_use() -> MyType { defining_use() }
-                // ```
-                //
-                // `defining_use` will constrain the instantiated inference
-                // variable to `bool`, while `other_use` will constrain
-                // the instantiated inference variable to `MyType`.
-                //
-                // When we process opaque types during writeback, we
-                // will handle cases like `other_use`, and not count
-                // them as defining usages
-                //
-                // However, we also need to handle cases like this:
-                //
-                // ```rust
-                // pub type Foo = impl Copy;
-                // fn produce() -> Option<Foo> {
-                //     None
-                //  }
-                //  ```
-                //
-                // In the above snippet, the inference variable created by
-                // instantiating `Option<Foo>` will be completely unconstrained.
-                // We treat this as a non-defining use by making the inference
-                // variable fall back to the opaque type itself.
-                if let FallbackMode::All = mode {
-                    if let Some(opaque_ty) = self.opaque_types_vars.borrow().get(ty) {
-                        debug!(
-                            "fallback_if_possible: falling back opaque type var {:?} to {:?}",
-                            ty, opaque_ty
-                        );
-                        *opaque_ty
-                    } else {
-                        return false;
-                    }
-                } else {
-                    return false;
-                }
-            }
-        };
-        debug!("fallback_if_possible: defaulting `{:?}` to `{:?}`", ty, fallback);
-        self.demand_eqtype(rustc_span::DUMMY_SP, ty, fallback);
-        true
-    }
-
-    pub(super) fn select_all_obligations_or_error(&self) {
-        debug!("select_all_obligations_or_error");
-        if let Err(errors) = self.fulfillment_cx.borrow_mut().select_all_or_error(&self) {
-            self.report_fulfillment_errors(&errors, self.inh.body_id, false);
-        }
-    }
-
-    /// Select as many obligations as we can at present.
-    pub(super) fn select_obligations_where_possible(
-        &self,
-        fallback_has_occurred: bool,
-        mutate_fullfillment_errors: impl Fn(&mut Vec<traits::FulfillmentError<'tcx>>),
-    ) {
-        let result = self.fulfillment_cx.borrow_mut().select_where_possible(self);
-        if let Err(mut errors) = result {
-            mutate_fullfillment_errors(&mut errors);
-            self.report_fulfillment_errors(&errors, self.inh.body_id, fallback_has_occurred);
-        }
-    }
-
-    /// For the overloaded place expressions (`*x`, `x[3]`), the trait
-    /// returns a type of `&T`, but the actual type we assign to the
-    /// *expression* is `T`. So this function just peels off the return
-    /// type by one layer to yield `T`.
-    pub(super) fn make_overloaded_place_return_type(
-        &self,
-        method: MethodCallee<'tcx>,
-    ) -> ty::TypeAndMut<'tcx> {
-        // extract method return type, which will be &T;
-        let ret_ty = method.sig.output();
-
-        // method returns &T, but the type as visible to user is T, so deref
-        ret_ty.builtin_deref(true).unwrap()
-    }
-
-    pub(super) fn check_method_argument_types(
-        &self,
-        sp: Span,
-        expr: &'tcx hir::Expr<'tcx>,
-        method: Result<MethodCallee<'tcx>, ()>,
-        args_no_rcvr: &'tcx [hir::Expr<'tcx>],
-        tuple_arguments: TupleArgumentsFlag,
-        expected: Expectation<'tcx>,
-    ) -> Ty<'tcx> {
-        let has_error = match method {
-            Ok(method) => method.substs.references_error() || method.sig.references_error(),
-            Err(_) => true,
-        };
-        if has_error {
-            let err_inputs = self.err_args(args_no_rcvr.len());
-
-            let err_inputs = match tuple_arguments {
-                DontTupleArguments => err_inputs,
-                TupleArguments => vec![self.tcx.intern_tup(&err_inputs[..])],
-            };
-
-            self.check_argument_types(
-                sp,
-                expr,
-                &err_inputs[..],
-                &[],
-                args_no_rcvr,
-                false,
-                tuple_arguments,
-                None,
-            );
-            return self.tcx.ty_error();
-        }
-
-        let method = method.unwrap();
-        // HACK(eddyb) ignore self in the definition (see above).
-        let expected_arg_tys = self.expected_inputs_for_expected_output(
-            sp,
-            expected,
-            method.sig.output(),
-            &method.sig.inputs()[1..],
-        );
-        self.check_argument_types(
-            sp,
-            expr,
-            &method.sig.inputs()[1..],
-            &expected_arg_tys[..],
-            args_no_rcvr,
-            method.sig.c_variadic,
-            tuple_arguments,
-            self.tcx.hir().span_if_local(method.def_id),
-        );
-        method.sig.output()
-    }
-
-    fn self_type_matches_expected_vid(
-        &self,
-        trait_ref: ty::PolyTraitRef<'tcx>,
-        expected_vid: ty::TyVid,
-    ) -> bool {
-        let self_ty = self.shallow_resolve(trait_ref.skip_binder().self_ty());
-        debug!(
-            "self_type_matches_expected_vid(trait_ref={:?}, self_ty={:?}, expected_vid={:?})",
-            trait_ref, self_ty, expected_vid
-        );
-        match *self_ty.kind() {
-            ty::Infer(ty::TyVar(found_vid)) => {
-                // FIXME: consider using `sub_root_var` here so we
-                // can see through subtyping.
-                let found_vid = self.root_var(found_vid);
-                debug!("self_type_matches_expected_vid - found_vid={:?}", found_vid);
-                expected_vid == found_vid
-            }
-            _ => false,
-        }
-    }
-
-    pub(super) fn obligations_for_self_ty<'b>(
-        &'b self,
-        self_ty: ty::TyVid,
-    ) -> impl Iterator<Item = (ty::PolyTraitRef<'tcx>, traits::PredicateObligation<'tcx>)>
-    + Captures<'tcx>
-    + 'b {
-        // FIXME: consider using `sub_root_var` here so we
-        // can see through subtyping.
-        let ty_var_root = self.root_var(self_ty);
-        debug!(
-            "obligations_for_self_ty: self_ty={:?} ty_var_root={:?} pending_obligations={:?}",
-            self_ty,
-            ty_var_root,
-            self.fulfillment_cx.borrow().pending_obligations()
-        );
-
-        self.fulfillment_cx
-            .borrow()
-            .pending_obligations()
-            .into_iter()
-            .filter_map(move |obligation| {
-                match obligation.predicate.skip_binders() {
-                    ty::PredicateAtom::Projection(data) => {
-                        Some((ty::Binder::bind(data).to_poly_trait_ref(self.tcx), obligation))
-                    }
-                    ty::PredicateAtom::Trait(data, _) => {
-                        Some((ty::Binder::bind(data).to_poly_trait_ref(), obligation))
-                    }
-                    ty::PredicateAtom::Subtype(..) => None,
-                    ty::PredicateAtom::RegionOutlives(..) => None,
-                    ty::PredicateAtom::TypeOutlives(..) => None,
-                    ty::PredicateAtom::WellFormed(..) => None,
-                    ty::PredicateAtom::ObjectSafe(..) => None,
-                    ty::PredicateAtom::ConstEvaluatable(..) => None,
-                    ty::PredicateAtom::ConstEquate(..) => None,
-                    // N.B., this predicate is created by breaking down a
-                    // `ClosureType: FnFoo()` predicate, where
-                    // `ClosureType` represents some `Closure`. It can't
-                    // possibly be referring to the current closure,
-                    // because we haven't produced the `Closure` for
-                    // this closure yet; this is exactly why the other
-                    // code is looking for a self type of a unresolved
-                    // inference variable.
-                    ty::PredicateAtom::ClosureKind(..) => None,
-                    ty::PredicateAtom::TypeWellFormedFromEnv(..) => None,
-                }
-            })
-            .filter(move |(tr, _)| self.self_type_matches_expected_vid(*tr, ty_var_root))
-    }
-
-    pub(super) fn type_var_is_sized(&self, self_ty: ty::TyVid) -> bool {
-        self.obligations_for_self_ty(self_ty)
-            .any(|(tr, _)| Some(tr.def_id()) == self.tcx.lang_items().sized_trait())
-    }
-
-    /// Generic function that factors out common logic from function calls,
-    /// method calls and overloaded operators.
-    pub(super) fn check_argument_types(
-        &self,
-        sp: Span,
-        expr: &'tcx hir::Expr<'tcx>,
-        fn_inputs: &[Ty<'tcx>],
-        expected_arg_tys: &[Ty<'tcx>],
-        args: &'tcx [hir::Expr<'tcx>],
-        c_variadic: bool,
-        tuple_arguments: TupleArgumentsFlag,
-        def_span: Option<Span>,
-    ) {
-        let tcx = self.tcx;
-        // Grab the argument types, supplying fresh type variables
-        // if the wrong number of arguments were supplied
-        let supplied_arg_count = if tuple_arguments == DontTupleArguments { args.len() } else { 1 };
-
-        // All the input types from the fn signature must outlive the call
-        // so as to validate implied bounds.
-        for (&fn_input_ty, arg_expr) in fn_inputs.iter().zip(args.iter()) {
-            self.register_wf_obligation(fn_input_ty.into(), arg_expr.span, traits::MiscObligation);
-        }
-
-        let expected_arg_count = fn_inputs.len();
-
-        let param_count_error = |expected_count: usize,
-                                 arg_count: usize,
-                                 error_code: &str,
-                                 c_variadic: bool,
-                                 sugg_unit: bool| {
-            let (span, start_span, args) = match &expr.kind {
-                hir::ExprKind::Call(hir::Expr { span, .. }, args) => (*span, *span, &args[..]),
-                hir::ExprKind::MethodCall(path_segment, span, args, _) => (
-                    *span,
-                    // `sp` doesn't point at the whole `foo.bar()`, only at `bar`.
-                    path_segment
-                        .args
-                        .and_then(|args| args.args.iter().last())
-                        // Account for `foo.bar::<T>()`.
-                        .map(|arg| {
-                            // Skip the closing `>`.
-                            tcx.sess
-                                .source_map()
-                                .next_point(tcx.sess.source_map().next_point(arg.span()))
-                        })
-                        .unwrap_or(*span),
-                    &args[1..], // Skip the receiver.
-                ),
-                k => span_bug!(sp, "checking argument types on a non-call: `{:?}`", k),
-            };
-            let arg_spans = if args.is_empty() {
-                // foo()
-                // ^^^-- supplied 0 arguments
-                // |
-                // expected 2 arguments
-                vec![tcx.sess.source_map().next_point(start_span).with_hi(sp.hi())]
-            } else {
-                // foo(1, 2, 3)
-                // ^^^ -  -  - supplied 3 arguments
-                // |
-                // expected 2 arguments
-                args.iter().map(|arg| arg.span).collect::<Vec<Span>>()
-            };
-
-            let mut err = tcx.sess.struct_span_err_with_code(
-                span,
-                &format!(
-                    "this function takes {}{} but {} {} supplied",
-                    if c_variadic { "at least " } else { "" },
-                    potentially_plural_count(expected_count, "argument"),
-                    potentially_plural_count(arg_count, "argument"),
-                    if arg_count == 1 { "was" } else { "were" }
-                ),
-                DiagnosticId::Error(error_code.to_owned()),
-            );
-            let label = format!("supplied {}", potentially_plural_count(arg_count, "argument"));
-            for (i, span) in arg_spans.into_iter().enumerate() {
-                err.span_label(
-                    span,
-                    if arg_count == 0 || i + 1 == arg_count { &label } else { "" },
-                );
-            }
-
-            if let Some(def_s) = def_span.map(|sp| tcx.sess.source_map().guess_head_span(sp)) {
-                err.span_label(def_s, "defined here");
-            }
-            if sugg_unit {
-                let sugg_span = tcx.sess.source_map().end_point(expr.span);
-                // remove closing `)` from the span
-                let sugg_span = sugg_span.shrink_to_lo();
-                err.span_suggestion(
-                    sugg_span,
-                    "expected the unit value `()`; create it with empty parentheses",
-                    String::from("()"),
-                    Applicability::MachineApplicable,
-                );
-            } else {
-                err.span_label(
-                    span,
-                    format!(
-                        "expected {}{}",
-                        if c_variadic { "at least " } else { "" },
-                        potentially_plural_count(expected_count, "argument")
-                    ),
-                );
-            }
-            err.emit();
-        };
-
-        let mut expected_arg_tys = expected_arg_tys.to_vec();
-
-        let formal_tys = if tuple_arguments == TupleArguments {
-            let tuple_type = self.structurally_resolved_type(sp, fn_inputs[0]);
-            match tuple_type.kind() {
-                ty::Tuple(arg_types) if arg_types.len() != args.len() => {
-                    param_count_error(arg_types.len(), args.len(), "E0057", false, false);
-                    expected_arg_tys = vec![];
-                    self.err_args(args.len())
-                }
-                ty::Tuple(arg_types) => {
-                    expected_arg_tys = match expected_arg_tys.get(0) {
-                        Some(&ty) => match ty.kind() {
-                            ty::Tuple(ref tys) => tys.iter().map(|k| k.expect_ty()).collect(),
-                            _ => vec![],
-                        },
-                        None => vec![],
-                    };
-                    arg_types.iter().map(|k| k.expect_ty()).collect()
-                }
-                _ => {
-                    struct_span_err!(
-                        tcx.sess,
-                        sp,
-                        E0059,
-                        "cannot use call notation; the first type parameter \
-                         for the function trait is neither a tuple nor unit"
-                    )
-                    .emit();
-                    expected_arg_tys = vec![];
-                    self.err_args(args.len())
-                }
-            }
-        } else if expected_arg_count == supplied_arg_count {
-            fn_inputs.to_vec()
-        } else if c_variadic {
-            if supplied_arg_count >= expected_arg_count {
-                fn_inputs.to_vec()
-            } else {
-                param_count_error(expected_arg_count, supplied_arg_count, "E0060", true, false);
-                expected_arg_tys = vec![];
-                self.err_args(supplied_arg_count)
-            }
-        } else {
-            // is the missing argument of type `()`?
-            let sugg_unit = if expected_arg_tys.len() == 1 && supplied_arg_count == 0 {
-                self.resolve_vars_if_possible(&expected_arg_tys[0]).is_unit()
-            } else if fn_inputs.len() == 1 && supplied_arg_count == 0 {
-                self.resolve_vars_if_possible(&fn_inputs[0]).is_unit()
-            } else {
-                false
-            };
-            param_count_error(expected_arg_count, supplied_arg_count, "E0061", false, sugg_unit);
-
-            expected_arg_tys = vec![];
-            self.err_args(supplied_arg_count)
-        };
-
-        debug!(
-            "check_argument_types: formal_tys={:?}",
-            formal_tys.iter().map(|t| self.ty_to_string(*t)).collect::<Vec<String>>()
-        );
-
-        // If there is no expectation, expect formal_tys.
-        let expected_arg_tys =
-            if !expected_arg_tys.is_empty() { expected_arg_tys } else { formal_tys.clone() };
-
-        let mut final_arg_types: Vec<(usize, Ty<'_>, Ty<'_>)> = vec![];
-
-        // Check the arguments.
-        // We do this in a pretty awful way: first we type-check any arguments
-        // that are not closures, then we type-check the closures. This is so
-        // that we have more information about the types of arguments when we
-        // type-check the functions. This isn't really the right way to do this.
-        for &check_closures in &[false, true] {
-            debug!("check_closures={}", check_closures);
-
-            // More awful hacks: before we check argument types, try to do
-            // an "opportunistic" trait resolution of any trait bounds on
-            // the call. This helps coercions.
-            if check_closures {
-                self.select_obligations_where_possible(false, |errors| {
-                    self.point_at_type_arg_instead_of_call_if_possible(errors, expr);
-                    self.point_at_arg_instead_of_call_if_possible(
-                        errors,
-                        &final_arg_types[..],
-                        sp,
-                        &args,
-                    );
-                })
-            }
-
-            // For C-variadic functions, we don't have a declared type for all of
-            // the arguments hence we only do our usual type checking with
-            // the arguments who's types we do know.
-            let t = if c_variadic {
-                expected_arg_count
-            } else if tuple_arguments == TupleArguments {
-                args.len()
-            } else {
-                supplied_arg_count
-            };
-            for (i, arg) in args.iter().take(t).enumerate() {
-                // Warn only for the first loop (the "no closures" one).
-                // Closure arguments themselves can't be diverging, but
-                // a previous argument can, e.g., `foo(panic!(), || {})`.
-                if !check_closures {
-                    self.warn_if_unreachable(arg.hir_id, arg.span, "expression");
-                }
-
-                let is_closure = match arg.kind {
-                    ExprKind::Closure(..) => true,
-                    _ => false,
-                };
-
-                if is_closure != check_closures {
-                    continue;
-                }
-
-                debug!("checking the argument");
-                let formal_ty = formal_tys[i];
-
-                // The special-cased logic below has three functions:
-                // 1. Provide as good of an expected type as possible.
-                let expected = Expectation::rvalue_hint(self, expected_arg_tys[i]);
-
-                let checked_ty = self.check_expr_with_expectation(&arg, expected);
-
-                // 2. Coerce to the most detailed type that could be coerced
-                //    to, which is `expected_ty` if `rvalue_hint` returns an
-                //    `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise.
-                let coerce_ty = expected.only_has_type(self).unwrap_or(formal_ty);
-                // We're processing function arguments so we definitely want to use
-                // two-phase borrows.
-                self.demand_coerce(&arg, checked_ty, coerce_ty, None, AllowTwoPhase::Yes);
-                final_arg_types.push((i, checked_ty, coerce_ty));
-
-                // 3. Relate the expected type and the formal one,
-                //    if the expected type was used for the coercion.
-                self.demand_suptype(arg.span, formal_ty, coerce_ty);
-            }
-        }
-
-        // We also need to make sure we at least write the ty of the other
-        // arguments which we skipped above.
-        if c_variadic {
-            fn variadic_error<'tcx>(s: &Session, span: Span, t: Ty<'tcx>, cast_ty: &str) {
-                use crate::structured_errors::{StructuredDiagnostic, VariadicError};
-                VariadicError::new(s, span, t, cast_ty).diagnostic().emit();
-            }
-
-            for arg in args.iter().skip(expected_arg_count) {
-                let arg_ty = self.check_expr(&arg);
-
-                // There are a few types which get autopromoted when passed via varargs
-                // in C but we just error out instead and require explicit casts.
-                let arg_ty = self.structurally_resolved_type(arg.span, arg_ty);
-                match arg_ty.kind() {
-                    ty::Float(ast::FloatTy::F32) => {
-                        variadic_error(tcx.sess, arg.span, arg_ty, "c_double");
-                    }
-                    ty::Int(ast::IntTy::I8 | ast::IntTy::I16) | ty::Bool => {
-                        variadic_error(tcx.sess, arg.span, arg_ty, "c_int");
-                    }
-                    ty::Uint(ast::UintTy::U8 | ast::UintTy::U16) => {
-                        variadic_error(tcx.sess, arg.span, arg_ty, "c_uint");
-                    }
-                    ty::FnDef(..) => {
-                        let ptr_ty = self.tcx.mk_fn_ptr(arg_ty.fn_sig(self.tcx));
-                        let ptr_ty = self.resolve_vars_if_possible(&ptr_ty);
-                        variadic_error(tcx.sess, arg.span, arg_ty, &ptr_ty.to_string());
-                    }
-                    _ => {}
-                }
-            }
-        }
-    }
-
-    pub(super) fn err_args(&self, len: usize) -> Vec<Ty<'tcx>> {
-        vec![self.tcx.ty_error(); len]
-    }
-
-    /// Given a vec of evaluated `FulfillmentError`s and an `fn` call argument expressions, we walk
-    /// the checked and coerced types for each argument to see if any of the `FulfillmentError`s
-    /// reference a type argument. The reason to walk also the checked type is that the coerced type
-    /// can be not easily comparable with predicate type (because of coercion). If the types match
-    /// for either checked or coerced type, and there's only *one* argument that does, we point at
-    /// the corresponding argument's expression span instead of the `fn` call path span.
-    fn point_at_arg_instead_of_call_if_possible(
-        &self,
-        errors: &mut Vec<traits::FulfillmentError<'tcx>>,
-        final_arg_types: &[(usize, Ty<'tcx>, Ty<'tcx>)],
-        call_sp: Span,
-        args: &'tcx [hir::Expr<'tcx>],
-    ) {
-        // We *do not* do this for desugared call spans to keep good diagnostics when involving
-        // the `?` operator.
-        if call_sp.desugaring_kind().is_some() {
-            return;
-        }
-
-        for error in errors {
-            // Only if the cause is somewhere inside the expression we want try to point at arg.
-            // Otherwise, it means that the cause is somewhere else and we should not change
-            // anything because we can break the correct span.
-            if !call_sp.contains(error.obligation.cause.span) {
-                continue;
-            }
-
-            if let ty::PredicateAtom::Trait(predicate, _) =
-                error.obligation.predicate.skip_binders()
-            {
-                // Collect the argument position for all arguments that could have caused this
-                // `FulfillmentError`.
-                let mut referenced_in = final_arg_types
-                    .iter()
-                    .map(|&(i, checked_ty, _)| (i, checked_ty))
-                    .chain(final_arg_types.iter().map(|&(i, _, coerced_ty)| (i, coerced_ty)))
-                    .flat_map(|(i, ty)| {
-                        let ty = self.resolve_vars_if_possible(&ty);
-                        // We walk the argument type because the argument's type could have
-                        // been `Option<T>`, but the `FulfillmentError` references `T`.
-                        if ty.walk().any(|arg| arg == predicate.self_ty().into()) {
-                            Some(i)
-                        } else {
-                            None
-                        }
-                    })
-                    .collect::<Vec<usize>>();
-
-                // Both checked and coerced types could have matched, thus we need to remove
-                // duplicates.
-
-                // We sort primitive type usize here and can use unstable sort
-                referenced_in.sort_unstable();
-                referenced_in.dedup();
-
-                if let (Some(ref_in), None) = (referenced_in.pop(), referenced_in.pop()) {
-                    // We make sure that only *one* argument matches the obligation failure
-                    // and we assign the obligation's span to its expression's.
-                    error.obligation.cause.make_mut().span = args[ref_in].span;
-                    error.points_at_arg_span = true;
-                }
-            }
-        }
-    }
-
-    /// Given a vec of evaluated `FulfillmentError`s and an `fn` call expression, we walk the
-    /// `PathSegment`s and resolve their type parameters to see if any of the `FulfillmentError`s
-    /// were caused by them. If they were, we point at the corresponding type argument's span
-    /// instead of the `fn` call path span.
-    fn point_at_type_arg_instead_of_call_if_possible(
-        &self,
-        errors: &mut Vec<traits::FulfillmentError<'tcx>>,
-        call_expr: &'tcx hir::Expr<'tcx>,
-    ) {
-        if let hir::ExprKind::Call(path, _) = &call_expr.kind {
-            if let hir::ExprKind::Path(qpath) = &path.kind {
-                if let hir::QPath::Resolved(_, path) = &qpath {
-                    for error in errors {
-                        if let ty::PredicateAtom::Trait(predicate, _) =
-                            error.obligation.predicate.skip_binders()
-                        {
-                            // If any of the type arguments in this path segment caused the
-                            // `FullfillmentError`, point at its span (#61860).
-                            for arg in path
-                                .segments
-                                .iter()
-                                .filter_map(|seg| seg.args.as_ref())
-                                .flat_map(|a| a.args.iter())
-                            {
-                                if let hir::GenericArg::Type(hir_ty) = &arg {
-                                    if let hir::TyKind::Path(hir::QPath::TypeRelative(..)) =
-                                        &hir_ty.kind
-                                    {
-                                        // Avoid ICE with associated types. As this is best
-                                        // effort only, it's ok to ignore the case. It
-                                        // would trigger in `is_send::<T::AssocType>();`
-                                        // from `typeck-default-trait-impl-assoc-type.rs`.
-                                    } else {
-                                        let ty = AstConv::ast_ty_to_ty(self, hir_ty);
-                                        let ty = self.resolve_vars_if_possible(&ty);
-                                        if ty == predicate.self_ty() {
-                                            error.obligation.cause.make_mut().span = hir_ty.span;
-                                        }
-                                    }
-                                }
-                            }
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    // AST fragment checking
-    pub(super) fn check_lit(&self, lit: &hir::Lit, expected: Expectation<'tcx>) -> Ty<'tcx> {
-        let tcx = self.tcx;
-
-        match lit.node {
-            ast::LitKind::Str(..) => tcx.mk_static_str(),
-            ast::LitKind::ByteStr(ref v) => {
-                tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.u8, v.len() as u64))
-            }
-            ast::LitKind::Byte(_) => tcx.types.u8,
-            ast::LitKind::Char(_) => tcx.types.char,
-            ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => tcx.mk_mach_int(t),
-            ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => tcx.mk_mach_uint(t),
-            ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => {
-                let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() {
-                    ty::Int(_) | ty::Uint(_) => Some(ty),
-                    ty::Char => Some(tcx.types.u8),
-                    ty::RawPtr(..) => Some(tcx.types.usize),
-                    ty::FnDef(..) | ty::FnPtr(_) => Some(tcx.types.usize),
-                    _ => None,
-                });
-                opt_ty.unwrap_or_else(|| self.next_int_var())
-            }
-            ast::LitKind::Float(_, ast::LitFloatType::Suffixed(t)) => tcx.mk_mach_float(t),
-            ast::LitKind::Float(_, ast::LitFloatType::Unsuffixed) => {
-                let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() {
-                    ty::Float(_) => Some(ty),
-                    _ => None,
-                });
-                opt_ty.unwrap_or_else(|| self.next_float_var())
-            }
-            ast::LitKind::Bool(_) => tcx.types.bool,
-            ast::LitKind::Err(_) => tcx.ty_error(),
-        }
-    }
-
-    /// Unifies the output type with the expected type early, for more coercions
-    /// and forward type information on the input expressions.
-    pub(super) fn expected_inputs_for_expected_output(
-        &self,
-        call_span: Span,
-        expected_ret: Expectation<'tcx>,
-        formal_ret: Ty<'tcx>,
-        formal_args: &[Ty<'tcx>],
-    ) -> Vec<Ty<'tcx>> {
-        let formal_ret = self.resolve_vars_with_obligations(formal_ret);
-        let ret_ty = match expected_ret.only_has_type(self) {
-            Some(ret) => ret,
-            None => return Vec::new(),
-        };
-        let expect_args = self
-            .fudge_inference_if_ok(|| {
-                // Attempt to apply a subtyping relationship between the formal
-                // return type (likely containing type variables if the function
-                // is polymorphic) and the expected return type.
-                // No argument expectations are produced if unification fails.
-                let origin = self.misc(call_span);
-                let ures = self.at(&origin, self.param_env).sup(ret_ty, &formal_ret);
-
-                // FIXME(#27336) can't use ? here, Try::from_error doesn't default
-                // to identity so the resulting type is not constrained.
-                match ures {
-                    Ok(ok) => {
-                        // Process any obligations locally as much as
-                        // we can.  We don't care if some things turn
-                        // out unconstrained or ambiguous, as we're
-                        // just trying to get hints here.
-                        self.save_and_restore_in_snapshot_flag(|_| {
-                            let mut fulfill = TraitEngine::new(self.tcx);
-                            for obligation in ok.obligations {
-                                fulfill.register_predicate_obligation(self, obligation);
-                            }
-                            fulfill.select_where_possible(self)
-                        })
-                        .map_err(|_| ())?;
-                    }
-                    Err(_) => return Err(()),
-                }
-
-                // Record all the argument types, with the substitutions
-                // produced from the above subtyping unification.
-                Ok(formal_args.iter().map(|ty| self.resolve_vars_if_possible(ty)).collect())
-            })
-            .unwrap_or_default();
-        debug!(
-            "expected_inputs_for_expected_output(formal={:?} -> {:?}, expected={:?} -> {:?})",
-            formal_args, formal_ret, expect_args, expected_ret
-        );
-        expect_args
-    }
-
-    pub fn check_struct_path(
-        &self,
-        qpath: &QPath<'_>,
-        hir_id: hir::HirId,
-    ) -> Option<(&'tcx ty::VariantDef, Ty<'tcx>)> {
-        let path_span = qpath.qself_span();
-        let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, hir_id);
-        let variant = match def {
-            Res::Err => {
-                self.set_tainted_by_errors();
-                return None;
-            }
-            Res::Def(DefKind::Variant, _) => match ty.kind() {
-                ty::Adt(adt, substs) => Some((adt.variant_of_res(def), adt.did, substs)),
-                _ => bug!("unexpected type: {:?}", ty),
-            },
-            Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
-            | Res::SelfTy(..) => match ty.kind() {
-                ty::Adt(adt, substs) if !adt.is_enum() => {
-                    Some((adt.non_enum_variant(), adt.did, substs))
-                }
-                _ => None,
-            },
-            _ => bug!("unexpected definition: {:?}", def),
-        };
-
-        if let Some((variant, did, substs)) = variant {
-            debug!("check_struct_path: did={:?} substs={:?}", did, substs);
-            self.write_user_type_annotation_from_substs(hir_id, did, substs, None);
-
-            // Check bounds on type arguments used in the path.
-            let (bounds, _) = self.instantiate_bounds(path_span, did, substs);
-            let cause =
-                traits::ObligationCause::new(path_span, self.body_id, traits::ItemObligation(did));
-            self.add_obligations_for_parameters(cause, bounds);
-
-            Some((variant, ty))
-        } else {
-            struct_span_err!(
-                self.tcx.sess,
-                path_span,
-                E0071,
-                "expected struct, variant or union type, found {}",
-                ty.sort_string(self.tcx)
-            )
-            .span_label(path_span, "not a struct")
-            .emit();
-            None
-        }
-    }
-
-    // Finish resolving a path in a struct expression or pattern `S::A { .. }` if necessary.
-    // The newly resolved definition is written into `type_dependent_defs`.
-    fn finish_resolving_struct_path(
-        &self,
-        qpath: &QPath<'_>,
-        path_span: Span,
-        hir_id: hir::HirId,
-    ) -> (Res, Ty<'tcx>) {
-        match *qpath {
-            QPath::Resolved(ref maybe_qself, ref path) => {
-                let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself));
-                let ty = AstConv::res_to_ty(self, self_ty, path, true);
-                (path.res, ty)
-            }
-            QPath::TypeRelative(ref qself, ref segment) => {
-                let ty = self.to_ty(qself);
-
-                let res = if let hir::TyKind::Path(QPath::Resolved(_, ref path)) = qself.kind {
-                    path.res
-                } else {
-                    Res::Err
-                };
-                let result =
-                    AstConv::associated_path_to_ty(self, hir_id, path_span, ty, res, segment, true);
-                let ty = result.map(|(ty, _, _)| ty).unwrap_or_else(|_| self.tcx().ty_error());
-                let result = result.map(|(_, kind, def_id)| (kind, def_id));
-
-                // Write back the new resolution.
-                self.write_resolution(hir_id, result);
-
-                (result.map(|(kind, def_id)| Res::Def(kind, def_id)).unwrap_or(Res::Err), ty)
-            }
-            QPath::LangItem(lang_item, span) => {
-                self.resolve_lang_item_path(lang_item, span, hir_id)
-            }
-        }
-    }
-
-    pub(super) fn resolve_lang_item_path(
-        &self,
-        lang_item: hir::LangItem,
-        span: Span,
-        hir_id: hir::HirId,
-    ) -> (Res, Ty<'tcx>) {
-        let def_id = self.tcx.require_lang_item(lang_item, Some(span));
-        let def_kind = self.tcx.def_kind(def_id);
-
-        let item_ty = if let DefKind::Variant = def_kind {
-            self.tcx.type_of(self.tcx.parent(def_id).expect("variant w/out parent"))
-        } else {
-            self.tcx.type_of(def_id)
-        };
-        let substs = self.infcx.fresh_substs_for_item(span, def_id);
-        let ty = item_ty.subst(self.tcx, substs);
-
-        self.write_resolution(hir_id, Ok((def_kind, def_id)));
-        self.add_required_obligations(span, def_id, &substs);
-        (Res::Def(def_kind, def_id), ty)
-    }
-
-    /// Resolves an associated value path into a base type and associated constant, or method
-    /// resolution. The newly resolved definition is written into `type_dependent_defs`.
-    pub fn resolve_ty_and_res_ufcs<'b>(
-        &self,
-        qpath: &'b QPath<'b>,
-        hir_id: hir::HirId,
-        span: Span,
-    ) -> (Res, Option<Ty<'tcx>>, &'b [hir::PathSegment<'b>]) {
-        debug!("resolve_ty_and_res_ufcs: qpath={:?} hir_id={:?} span={:?}", qpath, hir_id, span);
-        let (ty, qself, item_segment) = match *qpath {
-            QPath::Resolved(ref opt_qself, ref path) => {
-                return (
-                    path.res,
-                    opt_qself.as_ref().map(|qself| self.to_ty(qself)),
-                    &path.segments[..],
-                );
-            }
-            QPath::TypeRelative(ref qself, ref segment) => (self.to_ty(qself), qself, segment),
-            QPath::LangItem(..) => bug!("`resolve_ty_and_res_ufcs` called on `LangItem`"),
-        };
-        if let Some(&cached_result) = self.typeck_results.borrow().type_dependent_defs().get(hir_id)
-        {
-            // Return directly on cache hit. This is useful to avoid doubly reporting
-            // errors with default match binding modes. See #44614.
-            let def =
-                cached_result.map(|(kind, def_id)| Res::Def(kind, def_id)).unwrap_or(Res::Err);
-            return (def, Some(ty), slice::from_ref(&**item_segment));
-        }
-        let item_name = item_segment.ident;
-        let result = self.resolve_ufcs(span, item_name, ty, hir_id).or_else(|error| {
-            let result = match error {
-                method::MethodError::PrivateMatch(kind, def_id, _) => Ok((kind, def_id)),
-                _ => Err(ErrorReported),
-            };
-            if item_name.name != kw::Invalid {
-                if let Some(mut e) = self.report_method_error(
-                    span,
-                    ty,
-                    item_name,
-                    SelfSource::QPath(qself),
-                    error,
-                    None,
-                ) {
-                    e.emit();
-                }
-            }
-            result
-        });
-
-        // Write back the new resolution.
-        self.write_resolution(hir_id, result);
-        (
-            result.map(|(kind, def_id)| Res::Def(kind, def_id)).unwrap_or(Res::Err),
-            Some(ty),
-            slice::from_ref(&**item_segment),
-        )
-    }
-
-    pub fn check_decl_initializer(
-        &self,
-        local: &'tcx hir::Local<'tcx>,
-        init: &'tcx hir::Expr<'tcx>,
-    ) -> Ty<'tcx> {
-        // FIXME(tschottdorf): `contains_explicit_ref_binding()` must be removed
-        // for #42640 (default match binding modes).
-        //
-        // See #44848.
-        let ref_bindings = local.pat.contains_explicit_ref_binding();
-
-        let local_ty = self.local_ty(init.span, local.hir_id).revealed_ty;
-        if let Some(m) = ref_bindings {
-            // Somewhat subtle: if we have a `ref` binding in the pattern,
-            // we want to avoid introducing coercions for the RHS. This is
-            // both because it helps preserve sanity and, in the case of
-            // ref mut, for soundness (issue #23116). In particular, in
-            // the latter case, we need to be clear that the type of the
-            // referent for the reference that results is *equal to* the
-            // type of the place it is referencing, and not some
-            // supertype thereof.
-            let init_ty = self.check_expr_with_needs(init, Needs::maybe_mut_place(m));
-            self.demand_eqtype(init.span, local_ty, init_ty);
-            init_ty
-        } else {
-            self.check_expr_coercable_to_type(init, local_ty, None)
-        }
-    }
-
-    /// Type check a `let` statement.
-    pub fn check_decl_local(&self, local: &'tcx hir::Local<'tcx>) {
-        // Determine and write the type which we'll check the pattern against.
-        let ty = self.local_ty(local.span, local.hir_id).decl_ty;
-        self.write_ty(local.hir_id, ty);
-
-        // Type check the initializer.
-        if let Some(ref init) = local.init {
-            let init_ty = self.check_decl_initializer(local, &init);
-            self.overwrite_local_ty_if_err(local, ty, init_ty);
-        }
-
-        // Does the expected pattern type originate from an expression and what is the span?
-        let (origin_expr, ty_span) = match (local.ty, local.init) {
-            (Some(ty), _) => (false, Some(ty.span)), // Bias towards the explicit user type.
-            (_, Some(init)) => (true, Some(init.span)), // No explicit type; so use the scrutinee.
-            _ => (false, None), // We have `let $pat;`, so the expected type is unconstrained.
-        };
-
-        // Type check the pattern. Override if necessary to avoid knock-on errors.
-        self.check_pat_top(&local.pat, ty, ty_span, origin_expr);
-        let pat_ty = self.node_ty(local.pat.hir_id);
-        self.overwrite_local_ty_if_err(local, ty, pat_ty);
-    }
-
-    fn overwrite_local_ty_if_err(
-        &self,
-        local: &'tcx hir::Local<'tcx>,
-        decl_ty: Ty<'tcx>,
-        ty: Ty<'tcx>,
-    ) {
-        if ty.references_error() {
-            // Override the types everywhere with `err()` to avoid knock on errors.
-            self.write_ty(local.hir_id, ty);
-            self.write_ty(local.pat.hir_id, ty);
-            let local_ty = LocalTy { decl_ty, revealed_ty: ty };
-            self.locals.borrow_mut().insert(local.hir_id, local_ty);
-            self.locals.borrow_mut().insert(local.pat.hir_id, local_ty);
-        }
-    }
-
-    pub(super) fn suggest_semicolon_at_end(&self, span: Span, err: &mut DiagnosticBuilder<'_>) {
-        err.span_suggestion_short(
-            span.shrink_to_hi(),
-            "consider using a semicolon here",
-            ";".to_string(),
-            Applicability::MachineApplicable,
-        );
-    }
-
-    pub fn check_stmt(&self, stmt: &'tcx hir::Stmt<'tcx>) {
-        // Don't do all the complex logic below for `DeclItem`.
-        match stmt.kind {
-            hir::StmtKind::Item(..) => return,
-            hir::StmtKind::Local(..) | hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {}
-        }
-
-        self.warn_if_unreachable(stmt.hir_id, stmt.span, "statement");
-
-        // Hide the outer diverging and `has_errors` flags.
-        let old_diverges = self.diverges.replace(Diverges::Maybe);
-        let old_has_errors = self.has_errors.replace(false);
-
-        match stmt.kind {
-            hir::StmtKind::Local(ref l) => {
-                self.check_decl_local(&l);
-            }
-            // Ignore for now.
-            hir::StmtKind::Item(_) => {}
-            hir::StmtKind::Expr(ref expr) => {
-                // Check with expected type of `()`.
-                self.check_expr_has_type_or_error(&expr, self.tcx.mk_unit(), |err| {
-                    self.suggest_semicolon_at_end(expr.span, err);
-                });
-            }
-            hir::StmtKind::Semi(ref expr) => {
-                self.check_expr(&expr);
-            }
-        }
-
-        // Combine the diverging and `has_error` flags.
-        self.diverges.set(self.diverges.get() | old_diverges);
-        self.has_errors.set(self.has_errors.get() | old_has_errors);
-    }
-
-    pub fn check_block_no_value(&self, blk: &'tcx hir::Block<'tcx>) {
-        let unit = self.tcx.mk_unit();
-        let ty = self.check_block_with_expected(blk, ExpectHasType(unit));
-
-        // if the block produces a `!` value, that can always be
-        // (effectively) coerced to unit.
-        if !ty.is_never() {
-            self.demand_suptype(blk.span, unit, ty);
-        }
-    }
-
-    /// If `expr` is a `match` expression that has only one non-`!` arm, use that arm's tail
-    /// expression's `Span`, otherwise return `expr.span`. This is done to give better errors
-    /// when given code like the following:
-    /// ```text
-    /// if false { return 0i32; } else { 1u32 }
-    /// //                               ^^^^ point at this instead of the whole `if` expression
-    /// ```
-    fn get_expr_coercion_span(&self, expr: &hir::Expr<'_>) -> rustc_span::Span {
-        if let hir::ExprKind::Match(_, arms, _) = &expr.kind {
-            let arm_spans: Vec<Span> = arms
-                .iter()
-                .filter_map(|arm| {
-                    self.in_progress_typeck_results
-                        .and_then(|typeck_results| {
-                            typeck_results.borrow().node_type_opt(arm.body.hir_id)
-                        })
-                        .and_then(|arm_ty| {
-                            if arm_ty.is_never() {
-                                None
-                            } else {
-                                Some(match &arm.body.kind {
-                                    // Point at the tail expression when possible.
-                                    hir::ExprKind::Block(block, _) => {
-                                        block.expr.as_ref().map(|e| e.span).unwrap_or(block.span)
-                                    }
-                                    _ => arm.body.span,
-                                })
-                            }
-                        })
-                })
-                .collect();
-            if arm_spans.len() == 1 {
-                return arm_spans[0];
-            }
-        }
-        expr.span
-    }
-
-    pub(super) fn check_block_with_expected(
-        &self,
-        blk: &'tcx hir::Block<'tcx>,
-        expected: Expectation<'tcx>,
-    ) -> Ty<'tcx> {
-        let prev = {
-            let mut fcx_ps = self.ps.borrow_mut();
-            let unsafety_state = fcx_ps.recurse(blk);
-            replace(&mut *fcx_ps, unsafety_state)
-        };
-
-        // In some cases, blocks have just one exit, but other blocks
-        // can be targeted by multiple breaks. This can happen both
-        // with labeled blocks as well as when we desugar
-        // a `try { ... }` expression.
-        //
-        // Example 1:
-        //
-        //    'a: { if true { break 'a Err(()); } Ok(()) }
-        //
-        // Here we would wind up with two coercions, one from
-        // `Err(())` and the other from the tail expression
-        // `Ok(())`. If the tail expression is omitted, that's a
-        // "forced unit" -- unless the block diverges, in which
-        // case we can ignore the tail expression (e.g., `'a: {
-        // break 'a 22; }` would not force the type of the block
-        // to be `()`).
-        let tail_expr = blk.expr.as_ref();
-        let coerce_to_ty = expected.coercion_target_type(self, blk.span);
-        let coerce = if blk.targeted_by_break {
-            CoerceMany::new(coerce_to_ty)
-        } else {
-            let tail_expr: &[&hir::Expr<'_>] = match tail_expr {
-                Some(e) => slice::from_ref(e),
-                None => &[],
-            };
-            CoerceMany::with_coercion_sites(coerce_to_ty, tail_expr)
-        };
-
-        let prev_diverges = self.diverges.get();
-        let ctxt = BreakableCtxt { coerce: Some(coerce), may_break: false };
-
-        let (ctxt, ()) = self.with_breakable_ctxt(blk.hir_id, ctxt, || {
-            for s in blk.stmts {
-                self.check_stmt(s);
-            }
-
-            // check the tail expression **without** holding the
-            // `enclosing_breakables` lock below.
-            let tail_expr_ty = tail_expr.map(|t| self.check_expr_with_expectation(t, expected));
-
-            let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
-            let ctxt = enclosing_breakables.find_breakable(blk.hir_id);
-            let coerce = ctxt.coerce.as_mut().unwrap();
-            if let Some(tail_expr_ty) = tail_expr_ty {
-                let tail_expr = tail_expr.unwrap();
-                let span = self.get_expr_coercion_span(tail_expr);
-                let cause = self.cause(span, ObligationCauseCode::BlockTailExpression(blk.hir_id));
-                coerce.coerce(self, &cause, tail_expr, tail_expr_ty);
-            } else {
-                // Subtle: if there is no explicit tail expression,
-                // that is typically equivalent to a tail expression
-                // of `()` -- except if the block diverges. In that
-                // case, there is no value supplied from the tail
-                // expression (assuming there are no other breaks,
-                // this implies that the type of the block will be
-                // `!`).
-                //
-                // #41425 -- label the implicit `()` as being the
-                // "found type" here, rather than the "expected type".
-                if !self.diverges.get().is_always() {
-                    // #50009 -- Do not point at the entire fn block span, point at the return type
-                    // span, as it is the cause of the requirement, and
-                    // `consider_hint_about_removing_semicolon` will point at the last expression
-                    // if it were a relevant part of the error. This improves usability in editors
-                    // that highlight errors inline.
-                    let mut sp = blk.span;
-                    let mut fn_span = None;
-                    if let Some((decl, ident)) = self.get_parent_fn_decl(blk.hir_id) {
-                        let ret_sp = decl.output.span();
-                        if let Some(block_sp) = self.parent_item_span(blk.hir_id) {
-                            // HACK: on some cases (`ui/liveness/liveness-issue-2163.rs`) the
-                            // output would otherwise be incorrect and even misleading. Make sure
-                            // the span we're aiming at correspond to a `fn` body.
-                            if block_sp == blk.span {
-                                sp = ret_sp;
-                                fn_span = Some(ident.span);
-                            }
-                        }
-                    }
-                    coerce.coerce_forced_unit(
-                        self,
-                        &self.misc(sp),
-                        &mut |err| {
-                            if let Some(expected_ty) = expected.only_has_type(self) {
-                                self.consider_hint_about_removing_semicolon(blk, expected_ty, err);
-                            }
-                            if let Some(fn_span) = fn_span {
-                                err.span_label(
-                                    fn_span,
-                                    "implicitly returns `()` as its body has no tail or `return` \
-                                     expression",
-                                );
-                            }
-                        },
-                        false,
-                    );
-                }
-            }
-        });
-
-        if ctxt.may_break {
-            // If we can break from the block, then the block's exit is always reachable
-            // (... as long as the entry is reachable) - regardless of the tail of the block.
-            self.diverges.set(prev_diverges);
-        }
-
-        let mut ty = ctxt.coerce.unwrap().complete(self);
-
-        if self.has_errors.get() || ty.references_error() {
-            ty = self.tcx.ty_error()
-        }
-
-        self.write_ty(blk.hir_id, ty);
-
-        *self.ps.borrow_mut() = prev;
-        ty
-    }
-
-    fn parent_item_span(&self, id: hir::HirId) -> Option<Span> {
-        let node = self.tcx.hir().get(self.tcx.hir().get_parent_item(id));
-        match node {
-            Node::Item(&hir::Item { kind: hir::ItemKind::Fn(_, _, body_id), .. })
-            | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(_, body_id), .. }) => {
-                let body = self.tcx.hir().body(body_id);
-                if let ExprKind::Block(block, _) = &body.value.kind {
-                    return Some(block.span);
-                }
-            }
-            _ => {}
-        }
-        None
-    }
-
-    /// Given a function block's `HirId`, returns its `FnDecl` if it exists, or `None` otherwise.
-    fn get_parent_fn_decl(&self, blk_id: hir::HirId) -> Option<(&'tcx hir::FnDecl<'tcx>, Ident)> {
-        let parent = self.tcx.hir().get(self.tcx.hir().get_parent_item(blk_id));
-        self.get_node_fn_decl(parent).map(|(fn_decl, ident, _)| (fn_decl, ident))
-    }
-
-    /// Given a function `Node`, return its `FnDecl` if it exists, or `None` otherwise.
-    pub(super) fn get_node_fn_decl(
-        &self,
-        node: Node<'tcx>,
-    ) -> Option<(&'tcx hir::FnDecl<'tcx>, Ident, bool)> {
-        match node {
-            Node::Item(&hir::Item { ident, kind: hir::ItemKind::Fn(ref sig, ..), .. }) => {
-                // This is less than ideal, it will not suggest a return type span on any
-                // method called `main`, regardless of whether it is actually the entry point,
-                // but it will still present it as the reason for the expected type.
-                Some((&sig.decl, ident, ident.name != sym::main))
-            }
-            Node::TraitItem(&hir::TraitItem {
-                ident,
-                kind: hir::TraitItemKind::Fn(ref sig, ..),
-                ..
-            }) => Some((&sig.decl, ident, true)),
-            Node::ImplItem(&hir::ImplItem {
-                ident,
-                kind: hir::ImplItemKind::Fn(ref sig, ..),
-                ..
-            }) => Some((&sig.decl, ident, false)),
-            _ => None,
-        }
-    }
-
-    /// Given a `HirId`, return the `FnDecl` of the method it is enclosed by and whether a
-    /// suggestion can be made, `None` otherwise.
-    pub fn get_fn_decl(&self, blk_id: hir::HirId) -> Option<(&'tcx hir::FnDecl<'tcx>, bool)> {
-        // Get enclosing Fn, if it is a function or a trait method, unless there's a `loop` or
-        // `while` before reaching it, as block tail returns are not available in them.
-        self.tcx.hir().get_return_block(blk_id).and_then(|blk_id| {
-            let parent = self.tcx.hir().get(blk_id);
-            self.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main))
-        })
-    }
-
-    /// On implicit return expressions with mismatched types, provides the following suggestions:
-    ///
-    /// - Points out the method's return type as the reason for the expected type.
-    /// - Possible missing semicolon.
-    /// - Possible missing return type if the return type is the default, and not `fn main()`.
-    pub fn suggest_mismatched_types_on_tail(
-        &self,
-        err: &mut DiagnosticBuilder<'_>,
-        expr: &'tcx hir::Expr<'tcx>,
-        expected: Ty<'tcx>,
-        found: Ty<'tcx>,
-        cause_span: Span,
-        blk_id: hir::HirId,
-    ) -> bool {
-        let expr = expr.peel_drop_temps();
-        self.suggest_missing_semicolon(err, expr, expected, cause_span);
-        let mut pointing_at_return_type = false;
-        if let Some((fn_decl, can_suggest)) = self.get_fn_decl(blk_id) {
-            pointing_at_return_type =
-                self.suggest_missing_return_type(err, &fn_decl, expected, found, can_suggest);
-        }
-        pointing_at_return_type
-    }
-
-    /// When encountering an fn-like ctor that needs to unify with a value, check whether calling
-    /// the ctor would successfully solve the type mismatch and if so, suggest it:
-    /// ```
-    /// fn foo(x: usize) -> usize { x }
-    /// let x: usize = foo;  // suggest calling the `foo` function: `foo(42)`
-    /// ```
-    fn suggest_fn_call(
-        &self,
-        err: &mut DiagnosticBuilder<'_>,
-        expr: &hir::Expr<'_>,
-        expected: Ty<'tcx>,
-        found: Ty<'tcx>,
-    ) -> bool {
-        let hir = self.tcx.hir();
-        let (def_id, sig) = match *found.kind() {
-            ty::FnDef(def_id, _) => (def_id, found.fn_sig(self.tcx)),
-            ty::Closure(def_id, substs) => (def_id, substs.as_closure().sig()),
-            _ => return false,
-        };
-
-        let sig = self.replace_bound_vars_with_fresh_vars(expr.span, infer::FnCall, &sig).0;
-        let sig = self.normalize_associated_types_in(expr.span, &sig);
-        if self.can_coerce(sig.output(), expected) {
-            let (mut sugg_call, applicability) = if sig.inputs().is_empty() {
-                (String::new(), Applicability::MachineApplicable)
-            } else {
-                ("...".to_string(), Applicability::HasPlaceholders)
-            };
-            let mut msg = "call this function";
-            match hir.get_if_local(def_id) {
-                Some(
-                    Node::Item(hir::Item { kind: ItemKind::Fn(.., body_id), .. })
-                    | Node::ImplItem(hir::ImplItem {
-                        kind: hir::ImplItemKind::Fn(_, body_id), ..
-                    })
-                    | Node::TraitItem(hir::TraitItem {
-                        kind: hir::TraitItemKind::Fn(.., hir::TraitFn::Provided(body_id)),
-                        ..
-                    }),
-                ) => {
-                    let body = hir.body(*body_id);
-                    sugg_call = body
-                        .params
-                        .iter()
-                        .map(|param| match &param.pat.kind {
-                            hir::PatKind::Binding(_, _, ident, None)
-                                if ident.name != kw::SelfLower =>
-                            {
-                                ident.to_string()
-                            }
-                            _ => "_".to_string(),
-                        })
-                        .collect::<Vec<_>>()
-                        .join(", ");
-                }
-                Some(Node::Expr(hir::Expr {
-                    kind: ExprKind::Closure(_, _, body_id, _, _),
-                    span: full_closure_span,
-                    ..
-                })) => {
-                    if *full_closure_span == expr.span {
-                        return false;
-                    }
-                    msg = "call this closure";
-                    let body = hir.body(*body_id);
-                    sugg_call = body
-                        .params
-                        .iter()
-                        .map(|param| match &param.pat.kind {
-                            hir::PatKind::Binding(_, _, ident, None)
-                                if ident.name != kw::SelfLower =>
-                            {
-                                ident.to_string()
-                            }
-                            _ => "_".to_string(),
-                        })
-                        .collect::<Vec<_>>()
-                        .join(", ");
-                }
-                Some(Node::Ctor(hir::VariantData::Tuple(fields, _))) => {
-                    sugg_call = fields.iter().map(|_| "_").collect::<Vec<_>>().join(", ");
-                    match def_id.as_local().map(|def_id| hir.def_kind(def_id)) {
-                        Some(DefKind::Ctor(hir::def::CtorOf::Variant, _)) => {
-                            msg = "instantiate this tuple variant";
-                        }
-                        Some(DefKind::Ctor(CtorOf::Struct, _)) => {
-                            msg = "instantiate this tuple struct";
-                        }
-                        _ => {}
-                    }
-                }
-                Some(Node::ForeignItem(hir::ForeignItem {
-                    kind: hir::ForeignItemKind::Fn(_, idents, _),
-                    ..
-                })) => {
-                    sugg_call = idents
-                        .iter()
-                        .map(|ident| {
-                            if ident.name != kw::SelfLower {
-                                ident.to_string()
-                            } else {
-                                "_".to_string()
-                            }
-                        })
-                        .collect::<Vec<_>>()
-                        .join(", ")
-                }
-                Some(Node::TraitItem(hir::TraitItem {
-                    kind: hir::TraitItemKind::Fn(.., hir::TraitFn::Required(idents)),
-                    ..
-                })) => {
-                    sugg_call = idents
-                        .iter()
-                        .map(|ident| {
-                            if ident.name != kw::SelfLower {
-                                ident.to_string()
-                            } else {
-                                "_".to_string()
-                            }
-                        })
-                        .collect::<Vec<_>>()
-                        .join(", ")
-                }
-                _ => {}
-            }
-            err.span_suggestion_verbose(
-                expr.span.shrink_to_hi(),
-                &format!("use parentheses to {}", msg),
-                format!("({})", sugg_call),
-                applicability,
-            );
-            return true;
-        }
-        false
-    }
-
-    pub fn suggest_deref_ref_or_into(
-        &self,
-        err: &mut DiagnosticBuilder<'_>,
-        expr: &hir::Expr<'_>,
-        expected: Ty<'tcx>,
-        found: Ty<'tcx>,
-        expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
-    ) {
-        if let Some((sp, msg, suggestion, applicability)) = self.check_ref(expr, found, expected) {
-            err.span_suggestion(sp, msg, suggestion, applicability);
-        } else if let (ty::FnDef(def_id, ..), true) =
-            (&found.kind(), self.suggest_fn_call(err, expr, expected, found))
-        {
-            if let Some(sp) = self.tcx.hir().span_if_local(*def_id) {
-                let sp = self.sess().source_map().guess_head_span(sp);
-                err.span_label(sp, &format!("{} defined here", found));
-            }
-        } else if !self.check_for_cast(err, expr, found, expected, expected_ty_expr) {
-            let is_struct_pat_shorthand_field =
-                self.is_hir_id_from_struct_pattern_shorthand_field(expr.hir_id, expr.span);
-            let methods = self.get_conversion_methods(expr.span, expected, found, expr.hir_id);
-            if let Ok(expr_text) = self.sess().source_map().span_to_snippet(expr.span) {
-                let mut suggestions = iter::repeat(&expr_text)
-                    .zip(methods.iter())
-                    .filter_map(|(receiver, method)| {
-                        let method_call = format!(".{}()", method.ident);
-                        if receiver.ends_with(&method_call) {
-                            None // do not suggest code that is already there (#53348)
-                        } else {
-                            let method_call_list = [".to_vec()", ".to_string()"];
-                            let sugg = if receiver.ends_with(".clone()")
-                                && method_call_list.contains(&method_call.as_str())
-                            {
-                                let max_len = receiver.rfind('.').unwrap();
-                                format!("{}{}", &receiver[..max_len], method_call)
-                            } else {
-                                if expr.precedence().order() < ExprPrecedence::MethodCall.order() {
-                                    format!("({}){}", receiver, method_call)
-                                } else {
-                                    format!("{}{}", receiver, method_call)
-                                }
-                            };
-                            Some(if is_struct_pat_shorthand_field {
-                                format!("{}: {}", receiver, sugg)
-                            } else {
-                                sugg
-                            })
-                        }
-                    })
-                    .peekable();
-                if suggestions.peek().is_some() {
-                    err.span_suggestions(
-                        expr.span,
-                        "try using a conversion method",
-                        suggestions,
-                        Applicability::MaybeIncorrect,
-                    );
-                }
-            }
-        }
-    }
-
-    /// When encountering the expected boxed value allocated in the stack, suggest allocating it
-    /// in the heap by calling `Box::new()`.
-    pub(super) fn suggest_boxing_when_appropriate(
-        &self,
-        err: &mut DiagnosticBuilder<'_>,
-        expr: &hir::Expr<'_>,
-        expected: Ty<'tcx>,
-        found: Ty<'tcx>,
-    ) {
-        if self.tcx.hir().is_inside_const_context(expr.hir_id) {
-            // Do not suggest `Box::new` in const context.
-            return;
-        }
-        if !expected.is_box() || found.is_box() {
-            return;
-        }
-        let boxed_found = self.tcx.mk_box(found);
-        if let (true, Ok(snippet)) = (
-            self.can_coerce(boxed_found, expected),
-            self.sess().source_map().span_to_snippet(expr.span),
-        ) {
-            err.span_suggestion(
-                expr.span,
-                "store this in the heap by calling `Box::new`",
-                format!("Box::new({})", snippet),
-                Applicability::MachineApplicable,
-            );
-            err.note(
-                "for more on the distinction between the stack and the heap, read \
-                 https://doc.rust-lang.org/book/ch15-01-box.html, \
-                 https://doc.rust-lang.org/rust-by-example/std/box.html, and \
-                 https://doc.rust-lang.org/std/boxed/index.html",
-            );
-        }
-    }
-
-    pub(super) fn note_internal_mutation_in_method(
-        &self,
-        err: &mut DiagnosticBuilder<'_>,
-        expr: &hir::Expr<'_>,
-        expected: Ty<'tcx>,
-        found: Ty<'tcx>,
-    ) {
-        if found != self.tcx.types.unit {
-            return;
-        }
-        if let ExprKind::MethodCall(path_segment, _, [rcvr, ..], _) = expr.kind {
-            if self
-                .typeck_results
-                .borrow()
-                .expr_ty_adjusted_opt(rcvr)
-                .map_or(true, |ty| expected.peel_refs() != ty.peel_refs())
-            {
-                return;
-            }
-            let mut sp = MultiSpan::from_span(path_segment.ident.span);
-            sp.push_span_label(
-                path_segment.ident.span,
-                format!(
-                    "this call modifies {} in-place",
-                    match rcvr.kind {
-                        ExprKind::Path(QPath::Resolved(
-                            None,
-                            hir::Path { segments: [segment], .. },
-                        )) => format!("`{}`", segment.ident),
-                        _ => "its receiver".to_string(),
-                    }
-                ),
-            );
-            sp.push_span_label(
-                rcvr.span,
-                "you probably want to use this value after calling the method...".to_string(),
-            );
-            err.span_note(
-                sp,
-                &format!("method `{}` modifies its receiver in-place", path_segment.ident),
-            );
-            err.note(&format!("...instead of the `()` output of method `{}`", path_segment.ident));
-        }
-    }
-
-    /// When encountering an `impl Future` where `BoxFuture` is expected, suggest `Box::pin`.
-    pub(super) fn suggest_calling_boxed_future_when_appropriate(
-        &self,
-        err: &mut DiagnosticBuilder<'_>,
-        expr: &hir::Expr<'_>,
-        expected: Ty<'tcx>,
-        found: Ty<'tcx>,
-    ) -> bool {
-        // Handle #68197.
-
-        if self.tcx.hir().is_inside_const_context(expr.hir_id) {
-            // Do not suggest `Box::new` in const context.
-            return false;
-        }
-        let pin_did = self.tcx.lang_items().pin_type();
-        match expected.kind() {
-            ty::Adt(def, _) if Some(def.did) != pin_did => return false,
-            // This guards the `unwrap` and `mk_box` below.
-            _ if pin_did.is_none() || self.tcx.lang_items().owned_box().is_none() => return false,
-            _ => {}
-        }
-        let boxed_found = self.tcx.mk_box(found);
-        let new_found = self.tcx.mk_lang_item(boxed_found, LangItem::Pin).unwrap();
-        if let (true, Ok(snippet)) = (
-            self.can_coerce(new_found, expected),
-            self.sess().source_map().span_to_snippet(expr.span),
-        ) {
-            match found.kind() {
-                ty::Adt(def, _) if def.is_box() => {
-                    err.help("use `Box::pin`");
-                }
-                _ => {
-                    err.span_suggestion(
-                        expr.span,
-                        "you need to pin and box this expression",
-                        format!("Box::pin({})", snippet),
-                        Applicability::MachineApplicable,
-                    );
-                }
-            }
-            true
-        } else {
-            false
-        }
-    }
-
-    /// A common error is to forget to add a semicolon at the end of a block, e.g.,
-    ///
-    /// ```
-    /// fn foo() {
-    ///     bar_that_returns_u32()
-    /// }
-    /// ```
-    ///
-    /// This routine checks if the return expression in a block would make sense on its own as a
-    /// statement and the return type has been left as default or has been specified as `()`. If so,
-    /// it suggests adding a semicolon.
-    fn suggest_missing_semicolon(
-        &self,
-        err: &mut DiagnosticBuilder<'_>,
-        expression: &'tcx hir::Expr<'tcx>,
-        expected: Ty<'tcx>,
-        cause_span: Span,
-    ) {
-        if expected.is_unit() {
-            // `BlockTailExpression` only relevant if the tail expr would be
-            // useful on its own.
-            match expression.kind {
-                ExprKind::Call(..)
-                | ExprKind::MethodCall(..)
-                | ExprKind::Loop(..)
-                | ExprKind::Match(..)
-                | ExprKind::Block(..) => {
-                    err.span_suggestion(
-                        cause_span.shrink_to_hi(),
-                        "try adding a semicolon",
-                        ";".to_string(),
-                        Applicability::MachineApplicable,
-                    );
-                }
-                _ => (),
-            }
-        }
-    }
-
-    /// A possible error is to forget to add a return type that is needed:
-    ///
-    /// ```
-    /// fn foo() {
-    ///     bar_that_returns_u32()
-    /// }
-    /// ```
-    ///
-    /// This routine checks if the return type is left as default, the method is not part of an
-    /// `impl` block and that it isn't the `main` method. If so, it suggests setting the return
-    /// type.
-    pub(super) fn suggest_missing_return_type(
-        &self,
-        err: &mut DiagnosticBuilder<'_>,
-        fn_decl: &hir::FnDecl<'_>,
-        expected: Ty<'tcx>,
-        found: Ty<'tcx>,
-        can_suggest: bool,
-    ) -> bool {
-        // Only suggest changing the return type for methods that
-        // haven't set a return type at all (and aren't `fn main()` or an impl).
-        match (&fn_decl.output, found.is_suggestable(), can_suggest, expected.is_unit()) {
-            (&hir::FnRetTy::DefaultReturn(span), true, true, true) => {
-                err.span_suggestion(
-                    span,
-                    "try adding a return type",
-                    format!("-> {} ", self.resolve_vars_with_obligations(found)),
-                    Applicability::MachineApplicable,
-                );
-                true
-            }
-            (&hir::FnRetTy::DefaultReturn(span), false, true, true) => {
-                err.span_label(span, "possibly return type missing here?");
-                true
-            }
-            (&hir::FnRetTy::DefaultReturn(span), _, false, true) => {
-                // `fn main()` must return `()`, do not suggest changing return type
-                err.span_label(span, "expected `()` because of default return type");
-                true
-            }
-            // expectation was caused by something else, not the default return
-            (&hir::FnRetTy::DefaultReturn(_), _, _, false) => false,
-            (&hir::FnRetTy::Return(ref ty), _, _, _) => {
-                // Only point to return type if the expected type is the return type, as if they
-                // are not, the expectation must have been caused by something else.
-                debug!("suggest_missing_return_type: return type {:?} node {:?}", ty, ty.kind);
-                let sp = ty.span;
-                let ty = AstConv::ast_ty_to_ty(self, ty);
-                debug!("suggest_missing_return_type: return type {:?}", ty);
-                debug!("suggest_missing_return_type: expected type {:?}", ty);
-                if ty.kind() == expected.kind() {
-                    err.span_label(sp, format!("expected `{}` because of return type", expected));
-                    return true;
-                }
-                false
-            }
-        }
-    }
-
-    /// A possible error is to forget to add `.await` when using futures:
-    ///
-    /// ```
-    /// async fn make_u32() -> u32 {
-    ///     22
-    /// }
-    ///
-    /// fn take_u32(x: u32) {}
-    ///
-    /// async fn foo() {
-    ///     let x = make_u32();
-    ///     take_u32(x);
-    /// }
-    /// ```
-    ///
-    /// This routine checks if the found type `T` implements `Future<Output=U>` where `U` is the
-    /// expected type. If this is the case, and we are inside of an async body, it suggests adding
-    /// `.await` to the tail of the expression.
-    pub(super) fn suggest_missing_await(
-        &self,
-        err: &mut DiagnosticBuilder<'_>,
-        expr: &hir::Expr<'_>,
-        expected: Ty<'tcx>,
-        found: Ty<'tcx>,
-    ) {
-        debug!("suggest_missing_await: expr={:?} expected={:?}, found={:?}", expr, expected, found);
-        // `.await` is not permitted outside of `async` bodies, so don't bother to suggest if the
-        // body isn't `async`.
-        let item_id = self.tcx().hir().get_parent_node(self.body_id);
-        if let Some(body_id) = self.tcx().hir().maybe_body_owned_by(item_id) {
-            let body = self.tcx().hir().body(body_id);
-            if let Some(hir::GeneratorKind::Async(_)) = body.generator_kind {
-                let sp = expr.span;
-                // Check for `Future` implementations by constructing a predicate to
-                // prove: `<T as Future>::Output == U`
-                let future_trait = self.tcx.require_lang_item(LangItem::Future, Some(sp));
-                let item_def_id = self
-                    .tcx
-                    .associated_items(future_trait)
-                    .in_definition_order()
-                    .next()
-                    .unwrap()
-                    .def_id;
-                // `<T as Future>::Output`
-                let projection_ty = ty::ProjectionTy {
-                    // `T`
-                    substs: self
-                        .tcx
-                        .mk_substs_trait(found, self.fresh_substs_for_item(sp, item_def_id)),
-                    // `Future::Output`
-                    item_def_id,
-                };
-
-                let predicate = ty::PredicateAtom::Projection(ty::ProjectionPredicate {
-                    projection_ty,
-                    ty: expected,
-                })
-                .potentially_quantified(self.tcx, ty::PredicateKind::ForAll);
-                let obligation = traits::Obligation::new(self.misc(sp), self.param_env, predicate);
-
-                debug!("suggest_missing_await: trying obligation {:?}", obligation);
-
-                if self.infcx.predicate_may_hold(&obligation) {
-                    debug!("suggest_missing_await: obligation held: {:?}", obligation);
-                    if let Ok(code) = self.sess().source_map().span_to_snippet(sp) {
-                        err.span_suggestion(
-                            sp,
-                            "consider using `.await` here",
-                            format!("{}.await", code),
-                            Applicability::MaybeIncorrect,
-                        );
-                    } else {
-                        debug!("suggest_missing_await: no snippet for {:?}", sp);
-                    }
-                } else {
-                    debug!("suggest_missing_await: obligation did not hold: {:?}", obligation)
-                }
-            }
-        }
-    }
-
-    pub(super) fn suggest_missing_parentheses(
-        &self,
-        err: &mut DiagnosticBuilder<'_>,
-        expr: &hir::Expr<'_>,
-    ) {
-        let sp = self.tcx.sess.source_map().start_point(expr.span);
-        if let Some(sp) = self.tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp) {
-            // `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }`
-            self.tcx.sess.parse_sess.expr_parentheses_needed(err, *sp, None);
-        }
-    }
-
-    pub(super) fn note_need_for_fn_pointer(
-        &self,
-        err: &mut DiagnosticBuilder<'_>,
-        expected: Ty<'tcx>,
-        found: Ty<'tcx>,
-    ) {
-        let (sig, did, substs) = match (&expected.kind(), &found.kind()) {
-            (ty::FnDef(did1, substs1), ty::FnDef(did2, substs2)) => {
-                let sig1 = self.tcx.fn_sig(*did1).subst(self.tcx, substs1);
-                let sig2 = self.tcx.fn_sig(*did2).subst(self.tcx, substs2);
-                if sig1 != sig2 {
-                    return;
-                }
-                err.note(
-                    "different `fn` items always have unique types, even if their signatures are \
-                     the same",
-                );
-                (sig1, *did1, substs1)
-            }
-            (ty::FnDef(did, substs), ty::FnPtr(sig2)) => {
-                let sig1 = self.tcx.fn_sig(*did).subst(self.tcx, substs);
-                if sig1 != *sig2 {
-                    return;
-                }
-                (sig1, *did, substs)
-            }
-            _ => return,
-        };
-        err.help(&format!("change the expected type to be function pointer `{}`", sig));
-        err.help(&format!(
-            "if the expected type is due to type inference, cast the expected `fn` to a function \
-             pointer: `{} as {}`",
-            self.tcx.def_path_str_with_substs(did, substs),
-            sig
-        ));
-    }
-
-    /// A common error is to add an extra semicolon:
-    ///
-    /// ```
-    /// fn foo() -> usize {
-    ///     22;
-    /// }
-    /// ```
-    ///
-    /// This routine checks if the final statement in a block is an
-    /// expression with an explicit semicolon whose type is compatible
-    /// with `expected_ty`. If so, it suggests removing the semicolon.
-    fn consider_hint_about_removing_semicolon(
-        &self,
-        blk: &'tcx hir::Block<'tcx>,
-        expected_ty: Ty<'tcx>,
-        err: &mut DiagnosticBuilder<'_>,
-    ) {
-        if let Some(span_semi) = self.could_remove_semicolon(blk, expected_ty) {
-            err.span_suggestion(
-                span_semi,
-                "consider removing this semicolon",
-                String::new(),
-                Applicability::MachineApplicable,
-            );
-        }
-    }
-
-    pub(super) fn could_remove_semicolon(
-        &self,
-        blk: &'tcx hir::Block<'tcx>,
-        expected_ty: Ty<'tcx>,
-    ) -> Option<Span> {
-        // Be helpful when the user wrote `{... expr;}` and
-        // taking the `;` off is enough to fix the error.
-        let last_stmt = blk.stmts.last()?;
-        let last_expr = match last_stmt.kind {
-            hir::StmtKind::Semi(ref e) => e,
-            _ => return None,
-        };
-        let last_expr_ty = self.node_ty(last_expr.hir_id);
-        if matches!(last_expr_ty.kind(), ty::Error(_))
-            || self.can_sub(self.param_env, last_expr_ty, expected_ty).is_err()
-        {
-            return None;
-        }
-        let original_span = original_sp(last_stmt.span, blk.span);
-        Some(original_span.with_lo(original_span.hi() - BytePos(1)))
-    }
-
-    // Instantiates the given path, which must refer to an item with the given
-    // number of type parameters and type.
-    pub fn instantiate_value_path(
-        &self,
-        segments: &[hir::PathSegment<'_>],
-        self_ty: Option<Ty<'tcx>>,
-        res: Res,
-        span: Span,
-        hir_id: hir::HirId,
-    ) -> (Ty<'tcx>, Res) {
-        debug!(
-            "instantiate_value_path(segments={:?}, self_ty={:?}, res={:?}, hir_id={})",
-            segments, self_ty, res, hir_id,
-        );
-
-        let tcx = self.tcx;
-
-        let path_segs = match res {
-            Res::Local(_) | Res::SelfCtor(_) => vec![],
-            Res::Def(kind, def_id) => {
-                AstConv::def_ids_for_value_path_segments(self, segments, self_ty, kind, def_id)
-            }
-            _ => bug!("instantiate_value_path on {:?}", res),
-        };
-
-        let mut user_self_ty = None;
-        let mut is_alias_variant_ctor = false;
-        match res {
-            Res::Def(DefKind::Ctor(CtorOf::Variant, _), _) => {
-                if let Some(self_ty) = self_ty {
-                    let adt_def = self_ty.ty_adt_def().unwrap();
-                    user_self_ty = Some(UserSelfTy { impl_def_id: adt_def.did, self_ty });
-                    is_alias_variant_ctor = true;
-                }
-            }
-            Res::Def(DefKind::AssocFn | DefKind::AssocConst, def_id) => {
-                let container = tcx.associated_item(def_id).container;
-                debug!("instantiate_value_path: def_id={:?} container={:?}", def_id, container);
-                match container {
-                    ty::TraitContainer(trait_did) => {
-                        callee::check_legal_trait_for_method_call(tcx, span, None, trait_did)
-                    }
-                    ty::ImplContainer(impl_def_id) => {
-                        if segments.len() == 1 {
-                            // `<T>::assoc` will end up here, and so
-                            // can `T::assoc`. It this came from an
-                            // inherent impl, we need to record the
-                            // `T` for posterity (see `UserSelfTy` for
-                            // details).
-                            let self_ty = self_ty.expect("UFCS sugared assoc missing Self");
-                            user_self_ty = Some(UserSelfTy { impl_def_id, self_ty });
-                        }
-                    }
-                }
-            }
-            _ => {}
-        }
-
-        // Now that we have categorized what space the parameters for each
-        // segment belong to, let's sort out the parameters that the user
-        // provided (if any) into their appropriate spaces. We'll also report
-        // errors if type parameters are provided in an inappropriate place.
-
-        let generic_segs: FxHashSet<_> = path_segs.iter().map(|PathSeg(_, index)| index).collect();
-        let generics_has_err = AstConv::prohibit_generics(
-            self,
-            segments.iter().enumerate().filter_map(|(index, seg)| {
-                if !generic_segs.contains(&index) || is_alias_variant_ctor {
-                    Some(seg)
-                } else {
-                    None
-                }
-            }),
-        );
-
-        if let Res::Local(hid) = res {
-            let ty = self.local_ty(span, hid).decl_ty;
-            let ty = self.normalize_associated_types_in(span, &ty);
-            self.write_ty(hir_id, ty);
-            return (ty, res);
-        }
-
-        if generics_has_err {
-            // Don't try to infer type parameters when prohibited generic arguments were given.
-            user_self_ty = None;
-        }
-
-        // Now we have to compare the types that the user *actually*
-        // provided against the types that were *expected*. If the user
-        // did not provide any types, then we want to substitute inference
-        // variables. If the user provided some types, we may still need
-        // to add defaults. If the user provided *too many* types, that's
-        // a problem.
-
-        let mut infer_args_for_err = FxHashSet::default();
-        for &PathSeg(def_id, index) in &path_segs {
-            let seg = &segments[index];
-            let generics = tcx.generics_of(def_id);
-            // Argument-position `impl Trait` is treated as a normal generic
-            // parameter internally, but we don't allow users to specify the
-            // parameter's value explicitly, so we have to do some error-
-            // checking here.
-            if let GenericArgCountResult {
-                correct: Err(GenericArgCountMismatch { reported: Some(ErrorReported), .. }),
-                ..
-            } = AstConv::check_generic_arg_count_for_call(
-                tcx, span, &generics, &seg, false, // `is_method_call`
-            ) {
-                infer_args_for_err.insert(index);
-                self.set_tainted_by_errors(); // See issue #53251.
-            }
-        }
-
-        let has_self = path_segs
-            .last()
-            .map(|PathSeg(def_id, _)| tcx.generics_of(*def_id).has_self)
-            .unwrap_or(false);
-
-        let (res, self_ctor_substs) = if let Res::SelfCtor(impl_def_id) = res {
-            let ty = self.normalize_ty(span, tcx.at(span).type_of(impl_def_id));
-            match *ty.kind() {
-                ty::Adt(adt_def, substs) if adt_def.has_ctor() => {
-                    let variant = adt_def.non_enum_variant();
-                    let ctor_def_id = variant.ctor_def_id.unwrap();
-                    (
-                        Res::Def(DefKind::Ctor(CtorOf::Struct, variant.ctor_kind), ctor_def_id),
-                        Some(substs),
-                    )
-                }
-                _ => {
-                    let mut err = tcx.sess.struct_span_err(
-                        span,
-                        "the `Self` constructor can only be used with tuple or unit structs",
-                    );
-                    if let Some(adt_def) = ty.ty_adt_def() {
-                        match adt_def.adt_kind() {
-                            AdtKind::Enum => {
-                                err.help("did you mean to use one of the enum's variants?");
-                            }
-                            AdtKind::Struct | AdtKind::Union => {
-                                err.span_suggestion(
-                                    span,
-                                    "use curly brackets",
-                                    String::from("Self { /* fields */ }"),
-                                    Applicability::HasPlaceholders,
-                                );
-                            }
-                        }
-                    }
-                    err.emit();
-
-                    return (tcx.ty_error(), res);
-                }
-            }
-        } else {
-            (res, None)
-        };
-        let def_id = res.def_id();
-
-        // The things we are substituting into the type should not contain
-        // escaping late-bound regions, and nor should the base type scheme.
-        let ty = tcx.type_of(def_id);
-
-        let arg_count = GenericArgCountResult {
-            explicit_late_bound: ExplicitLateBound::No,
-            correct: if infer_args_for_err.is_empty() {
-                Ok(())
-            } else {
-                Err(GenericArgCountMismatch::default())
-            },
-        };
-
-        let substs = self_ctor_substs.unwrap_or_else(|| {
-            AstConv::create_substs_for_generic_args(
-                tcx,
-                def_id,
-                &[][..],
-                has_self,
-                self_ty,
-                arg_count,
-                // Provide the generic args, and whether types should be inferred.
-                |def_id| {
-                    if let Some(&PathSeg(_, index)) =
-                        path_segs.iter().find(|&PathSeg(did, _)| *did == def_id)
-                    {
-                        // If we've encountered an `impl Trait`-related error, we're just
-                        // going to infer the arguments for better error messages.
-                        if !infer_args_for_err.contains(&index) {
-                            // Check whether the user has provided generic arguments.
-                            if let Some(ref data) = segments[index].args {
-                                return (Some(data), segments[index].infer_args);
-                            }
-                        }
-                        return (None, segments[index].infer_args);
-                    }
-
-                    (None, true)
-                },
-                // Provide substitutions for parameters for which (valid) arguments have been provided.
-                |param, arg| match (&param.kind, arg) {
-                    (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
-                        AstConv::ast_region_to_region(self, lt, Some(param)).into()
-                    }
-                    (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
-                        self.to_ty(ty).into()
-                    }
-                    (GenericParamDefKind::Const, GenericArg::Const(ct)) => {
-                        self.const_arg_to_const(&ct.value, param.def_id).into()
-                    }
-                    _ => unreachable!(),
-                },
-                // Provide substitutions for parameters for which arguments are inferred.
-                |substs, param, infer_args| {
-                    match param.kind {
-                        GenericParamDefKind::Lifetime => {
-                            self.re_infer(Some(param), span).unwrap().into()
-                        }
-                        GenericParamDefKind::Type { has_default, .. } => {
-                            if !infer_args && has_default {
-                                // If we have a default, then we it doesn't matter that we're not
-                                // inferring the type arguments: we provide the default where any
-                                // is missing.
-                                let default = tcx.type_of(param.def_id);
-                                self.normalize_ty(
-                                    span,
-                                    default.subst_spanned(tcx, substs.unwrap(), Some(span)),
-                                )
-                                .into()
-                            } else {
-                                // If no type arguments were provided, we have to infer them.
-                                // This case also occurs as a result of some malformed input, e.g.
-                                // a lifetime argument being given instead of a type parameter.
-                                // Using inference instead of `Error` gives better error messages.
-                                self.var_for_def(span, param)
-                            }
-                        }
-                        GenericParamDefKind::Const => {
-                            // FIXME(const_generics:defaults)
-                            // No const parameters were provided, we have to infer them.
-                            self.var_for_def(span, param)
-                        }
-                    }
-                },
-            )
-        });
-        assert!(!substs.has_escaping_bound_vars());
-        assert!(!ty.has_escaping_bound_vars());
-
-        // First, store the "user substs" for later.
-        self.write_user_type_annotation_from_substs(hir_id, def_id, substs, user_self_ty);
-
-        self.add_required_obligations(span, def_id, &substs);
-
-        // Substitute the values for the type parameters into the type of
-        // the referenced item.
-        let ty_substituted = self.instantiate_type_scheme(span, &substs, &ty);
-
-        if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty {
-            // In the case of `Foo<T>::method` and `<Foo<T>>::method`, if `method`
-            // is inherent, there is no `Self` parameter; instead, the impl needs
-            // type parameters, which we can infer by unifying the provided `Self`
-            // with the substituted impl type.
-            // This also occurs for an enum variant on a type alias.
-            let ty = tcx.type_of(impl_def_id);
-
-            let impl_ty = self.instantiate_type_scheme(span, &substs, &ty);
-            match self.at(&self.misc(span), self.param_env).sup(impl_ty, self_ty) {
-                Ok(ok) => self.register_infer_ok_obligations(ok),
-                Err(_) => {
-                    self.tcx.sess.delay_span_bug(
-                        span,
-                        &format!(
-                        "instantiate_value_path: (UFCS) {:?} was a subtype of {:?} but now is not?",
-                        self_ty,
-                        impl_ty,
-                    ),
-                    );
-                }
-            }
-        }
-
-        self.check_rustc_args_require_const(def_id, hir_id, span);
-
-        debug!("instantiate_value_path: type of {:?} is {:?}", hir_id, ty_substituted);
-        self.write_substs(hir_id, substs);
-
-        (ty_substituted, res)
-    }
-
-    /// Add all the obligations that are required, substituting and normalized appropriately.
-    fn add_required_obligations(&self, span: Span, def_id: DefId, substs: &SubstsRef<'tcx>) {
-        let (bounds, spans) = self.instantiate_bounds(span, def_id, &substs);
-
-        for (i, mut obligation) in traits::predicates_for_generics(
-            traits::ObligationCause::new(span, self.body_id, traits::ItemObligation(def_id)),
-            self.param_env,
-            bounds,
-        )
-        .enumerate()
-        {
-            // This makes the error point at the bound, but we want to point at the argument
-            if let Some(span) = spans.get(i) {
-                obligation.cause.make_mut().code = traits::BindingObligation(def_id, *span);
-            }
-            self.register_predicate(obligation);
-        }
-    }
-
-    fn check_rustc_args_require_const(&self, def_id: DefId, hir_id: hir::HirId, span: Span) {
-        // We're only interested in functions tagged with
-        // #[rustc_args_required_const], so ignore anything that's not.
-        if !self.tcx.has_attr(def_id, sym::rustc_args_required_const) {
-            return;
-        }
-
-        // If our calling expression is indeed the function itself, we're good!
-        // If not, generate an error that this can only be called directly.
-        if let Node::Expr(expr) = self.tcx.hir().get(self.tcx.hir().get_parent_node(hir_id)) {
-            if let ExprKind::Call(ref callee, ..) = expr.kind {
-                if callee.hir_id == hir_id {
-                    return;
-                }
-            }
-        }
-
-        self.tcx.sess.span_err(
-            span,
-            "this function can only be invoked directly, not through a function pointer",
-        );
-    }
-
-    /// Resolves `typ` by a single level if `typ` is a type variable.
-    /// If no resolution is possible, then an error is reported.
-    /// Numeric inference variables may be left unresolved.
-    pub fn structurally_resolved_type(&self, sp: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
-        let ty = self.resolve_vars_with_obligations(ty);
-        if !ty.is_ty_var() {
-            ty
-        } else {
-            if !self.is_tainted_by_errors() {
-                self.emit_inference_failure_err((**self).body_id, sp, ty.into(), E0282)
-                    .note("type must be known at this point")
-                    .emit();
-            }
-            let err = self.tcx.ty_error();
-            self.demand_suptype(sp, err, ty);
-            err
-        }
-    }
-
-    pub(super) fn with_breakable_ctxt<F: FnOnce() -> R, R>(
-        &self,
-        id: hir::HirId,
-        ctxt: BreakableCtxt<'tcx>,
-        f: F,
-    ) -> (BreakableCtxt<'tcx>, R) {
-        let index;
-        {
-            let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
-            index = enclosing_breakables.stack.len();
-            enclosing_breakables.by_id.insert(id, index);
-            enclosing_breakables.stack.push(ctxt);
-        }
-        let result = f();
-        let ctxt = {
-            let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
-            debug_assert!(enclosing_breakables.stack.len() == index + 1);
-            enclosing_breakables.by_id.remove(&id).expect("missing breakable context");
-            enclosing_breakables.stack.pop().expect("missing breakable context")
-        };
-        (ctxt, result)
-    }
-
-    /// Instantiate a QueryResponse in a probe context, without a
-    /// good ObligationCause.
-    pub(super) fn probe_instantiate_query_response(
-        &self,
-        span: Span,
-        original_values: &OriginalQueryValues<'tcx>,
-        query_result: &Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>,
-    ) -> InferResult<'tcx, Ty<'tcx>> {
-        self.instantiate_query_response_and_region_obligations(
-            &traits::ObligationCause::misc(span, self.body_id),
-            self.param_env,
-            original_values,
-            query_result,
-        )
-    }
-
-    /// Returns `true` if an expression is contained inside the LHS of an assignment expression.
-    pub(super) fn expr_in_place(&self, mut expr_id: hir::HirId) -> bool {
-        let mut contained_in_place = false;
-
-        while let hir::Node::Expr(parent_expr) =
-            self.tcx.hir().get(self.tcx.hir().get_parent_node(expr_id))
-        {
-            match &parent_expr.kind {
-                hir::ExprKind::Assign(lhs, ..) | hir::ExprKind::AssignOp(_, lhs, ..) => {
-                    if lhs.hir_id == expr_id {
-                        contained_in_place = true;
-                        break;
-                    }
-                }
-                _ => (),
-            }
-            expr_id = parent_expr.hir_id;
-        }
-
-        contained_in_place
-    }
-}
-impl<'a, 'tcx> Deref for FnCtxt<'a, 'tcx> {
-    type Target = Inherited<'a, 'tcx>;
-    fn deref(&self) -> &Self::Target {
-        &self.inh
-    }
-}
-
-impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
-    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
-        self.tcx
-    }
-
-    fn item_def_id(&self) -> Option<DefId> {
-        None
-    }
-
-    fn default_constness_for_trait_bounds(&self) -> hir::Constness {
-        // FIXME: refactor this into a method
-        let node = self.tcx.hir().get(self.body_id);
-        if let Some(fn_like) = FnLikeNode::from_node(node) {
-            fn_like.constness()
-        } else {
-            hir::Constness::NotConst
-        }
-    }
-
-    fn get_type_parameter_bounds(&self, _: Span, def_id: DefId) -> ty::GenericPredicates<'tcx> {
-        let tcx = self.tcx;
-        let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
-        let item_id = tcx.hir().ty_param_owner(hir_id);
-        let item_def_id = tcx.hir().local_def_id(item_id);
-        let generics = tcx.generics_of(item_def_id);
-        let index = generics.param_def_id_to_index[&def_id];
-        ty::GenericPredicates {
-            parent: None,
-            predicates: tcx.arena.alloc_from_iter(
-                self.param_env.caller_bounds().iter().filter_map(|predicate| {
-                    match predicate.skip_binders() {
-                        ty::PredicateAtom::Trait(data, _) if data.self_ty().is_param(index) => {
-                            // HACK(eddyb) should get the original `Span`.
-                            let span = tcx.def_span(def_id);
-                            Some((predicate, span))
-                        }
-                        _ => None,
-                    }
-                }),
-            ),
-        }
-    }
-
-    fn re_infer(&self, def: Option<&ty::GenericParamDef>, span: Span) -> Option<ty::Region<'tcx>> {
-        let v = match def {
-            Some(def) => infer::EarlyBoundRegion(span, def.name),
-            None => infer::MiscVariable(span),
-        };
-        Some(self.next_region_var(v))
-    }
-
-    fn allow_ty_infer(&self) -> bool {
-        true
-    }
-
-    fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> {
-        if let Some(param) = param {
-            if let GenericArgKind::Type(ty) = self.var_for_def(span, param).unpack() {
-                return ty;
-            }
-            unreachable!()
-        } else {
-            self.next_ty_var(TypeVariableOrigin {
-                kind: TypeVariableOriginKind::TypeInference,
-                span,
-            })
-        }
-    }
-
-    fn ct_infer(
-        &self,
-        ty: Ty<'tcx>,
-        param: Option<&ty::GenericParamDef>,
-        span: Span,
-    ) -> &'tcx Const<'tcx> {
-        if let Some(param) = param {
-            if let GenericArgKind::Const(ct) = self.var_for_def(span, param).unpack() {
-                return ct;
-            }
-            unreachable!()
-        } else {
-            self.next_const_var(
-                ty,
-                ConstVariableOrigin { kind: ConstVariableOriginKind::ConstInference, span },
-            )
-        }
-    }
-
-    fn projected_ty_from_poly_trait_ref(
-        &self,
-        span: Span,
-        item_def_id: DefId,
-        item_segment: &hir::PathSegment<'_>,
-        poly_trait_ref: ty::PolyTraitRef<'tcx>,
-    ) -> Ty<'tcx> {
-        let (trait_ref, _) = self.replace_bound_vars_with_fresh_vars(
-            span,
-            infer::LateBoundRegionConversionTime::AssocTypeProjection(item_def_id),
-            &poly_trait_ref,
-        );
-
-        let item_substs = <dyn AstConv<'tcx>>::create_substs_for_associated_item(
-            self,
-            self.tcx,
-            span,
-            item_def_id,
-            item_segment,
-            trait_ref.substs,
-        );
-
-        self.tcx().mk_projection(item_def_id, item_substs)
-    }
-
-    fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
-        if ty.has_escaping_bound_vars() {
-            ty // FIXME: normalization and escaping regions
-        } else {
-            self.normalize_associated_types_in(span, &ty)
-        }
-    }
-
-    fn set_tainted_by_errors(&self) {
-        self.infcx.set_tainted_by_errors()
-    }
-
-    fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, _span: Span) {
-        self.write_ty(hir_id, ty)
-    }
-}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs b/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs
new file mode 100644
index 0000000..0bb7b46
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs
@@ -0,0 +1,1516 @@
+use crate::astconv::{
+    AstConv, ExplicitLateBound, GenericArgCountMismatch, GenericArgCountResult, PathSeg,
+};
+use crate::check::callee::{self, DeferredCallResolution};
+use crate::check::method::{self, MethodCallee, SelfSource};
+use crate::check::{BreakableCtxt, Diverges, Expectation, FallbackMode, FnCtxt, LocalTy};
+
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{Applicability, DiagnosticBuilder, ErrorReported};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{ExprKind, GenericArg, Node, QPath};
+use rustc_infer::infer::canonical::{Canonical, OriginalQueryValues, QueryResponse};
+use rustc_infer::infer::error_reporting::TypeAnnotationNeeded::E0282;
+use rustc_infer::infer::{InferOk, InferResult};
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, AutoBorrow, AutoBorrowMutability};
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::subst::{
+    self, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSelfTy, UserSubsts,
+};
+use rustc_middle::ty::{
+    self, AdtKind, CanonicalUserType, DefIdTree, GenericParamDefKind, ToPolyTraitRef, ToPredicate,
+    Ty, UserType,
+};
+use rustc_session::lint;
+use rustc_span::hygiene::DesugaringKind;
+use rustc_span::source_map::{original_sp, DUMMY_SP};
+use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::{self, BytePos, MultiSpan, Span};
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::opaque_types::InferCtxtExt as _;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use rustc_trait_selection::traits::{
+    self, ObligationCauseCode, StatementAsExpression, TraitEngine, TraitEngineExt,
+};
+
+use std::collections::hash_map::Entry;
+use std::slice;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+    /// Produces warning on the given node, if the current point in the
+    /// function is unreachable, and there hasn't been another warning.
+    pub(in super::super) fn warn_if_unreachable(&self, id: hir::HirId, span: Span, kind: &str) {
+        // FIXME: Combine these two 'if' expressions into one once
+        // let chains are implemented
+        if let Diverges::Always { span: orig_span, custom_note } = self.diverges.get() {
+            // If span arose from a desugaring of `if` or `while`, then it is the condition itself,
+            // which diverges, that we are about to lint on. This gives suboptimal diagnostics.
+            // Instead, stop here so that the `if`- or `while`-expression's block is linted instead.
+            if !span.is_desugaring(DesugaringKind::CondTemporary)
+                && !span.is_desugaring(DesugaringKind::Async)
+                && !orig_span.is_desugaring(DesugaringKind::Await)
+            {
+                self.diverges.set(Diverges::WarnedAlways);
+
+                debug!("warn_if_unreachable: id={:?} span={:?} kind={}", id, span, kind);
+
+                self.tcx().struct_span_lint_hir(lint::builtin::UNREACHABLE_CODE, id, span, |lint| {
+                    let msg = format!("unreachable {}", kind);
+                    lint.build(&msg)
+                        .span_label(span, &msg)
+                        .span_label(
+                            orig_span,
+                            custom_note
+                                .unwrap_or("any code following this expression is unreachable"),
+                        )
+                        .emit();
+                })
+            }
+        }
+    }
+
+    /// Resolves type and const variables in `ty` if possible. Unlike the infcx
+    /// version (resolve_vars_if_possible), this version will
+    /// also select obligations if it seems useful, in an effort
+    /// to get more type information.
+    pub(in super::super) fn resolve_vars_with_obligations(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
+        debug!("resolve_vars_with_obligations(ty={:?})", ty);
+
+        // No Infer()? Nothing needs doing.
+        if !ty.has_infer_types_or_consts() {
+            debug!("resolve_vars_with_obligations: ty={:?}", ty);
+            return ty;
+        }
+
+        // If `ty` is a type variable, see whether we already know what it is.
+        ty = self.resolve_vars_if_possible(&ty);
+        if !ty.has_infer_types_or_consts() {
+            debug!("resolve_vars_with_obligations: ty={:?}", ty);
+            return ty;
+        }
+
+        // If not, try resolving pending obligations as much as
+        // possible. This can help substantially when there are
+        // indirect dependencies that don't seem worth tracking
+        // precisely.
+        self.select_obligations_where_possible(false, |_| {});
+        ty = self.resolve_vars_if_possible(&ty);
+
+        debug!("resolve_vars_with_obligations: ty={:?}", ty);
+        ty
+    }
+
+    pub(in super::super) fn record_deferred_call_resolution(
+        &self,
+        closure_def_id: DefId,
+        r: DeferredCallResolution<'tcx>,
+    ) {
+        let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
+        deferred_call_resolutions.entry(closure_def_id).or_default().push(r);
+    }
+
+    pub(in super::super) fn remove_deferred_call_resolutions(
+        &self,
+        closure_def_id: DefId,
+    ) -> Vec<DeferredCallResolution<'tcx>> {
+        let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
+        deferred_call_resolutions.remove(&closure_def_id).unwrap_or_default()
+    }
+
+    pub fn tag(&self) -> String {
+        format!("{:p}", self)
+    }
+
+    pub fn local_ty(&self, span: Span, nid: hir::HirId) -> LocalTy<'tcx> {
+        self.locals.borrow().get(&nid).cloned().unwrap_or_else(|| {
+            span_bug!(span, "no type for local variable {}", self.tcx.hir().node_to_string(nid))
+        })
+    }
+
+    #[inline]
+    pub fn write_ty(&self, id: hir::HirId, ty: Ty<'tcx>) {
+        debug!(
+            "write_ty({:?}, {:?}) in fcx {}",
+            id,
+            self.resolve_vars_if_possible(&ty),
+            self.tag()
+        );
+        self.typeck_results.borrow_mut().node_types_mut().insert(id, ty);
+
+        if ty.references_error() {
+            self.has_errors.set(true);
+            self.set_tainted_by_errors();
+        }
+    }
+
+    pub fn write_field_index(&self, hir_id: hir::HirId, index: usize) {
+        self.typeck_results.borrow_mut().field_indices_mut().insert(hir_id, index);
+    }
+
+    pub(in super::super) fn write_resolution(
+        &self,
+        hir_id: hir::HirId,
+        r: Result<(DefKind, DefId), ErrorReported>,
+    ) {
+        self.typeck_results.borrow_mut().type_dependent_defs_mut().insert(hir_id, r);
+    }
+
+    pub fn write_method_call(&self, hir_id: hir::HirId, method: MethodCallee<'tcx>) {
+        debug!("write_method_call(hir_id={:?}, method={:?})", hir_id, method);
+        self.write_resolution(hir_id, Ok((DefKind::AssocFn, method.def_id)));
+        self.write_substs(hir_id, method.substs);
+
+        // When the method is confirmed, the `method.substs` includes
+        // parameters from not just the method, but also the impl of
+        // the method -- in particular, the `Self` type will be fully
+        // resolved. However, those are not something that the "user
+        // specified" -- i.e., those types come from the inferred type
+        // of the receiver, not something the user wrote. So when we
+        // create the user-substs, we want to replace those earlier
+        // types with just the types that the user actually wrote --
+        // that is, those that appear on the *method itself*.
+        //
+        // As an example, if the user wrote something like
+        // `foo.bar::<u32>(...)` -- the `Self` type here will be the
+        // type of `foo` (possibly adjusted), but we don't want to
+        // include that. We want just the `[_, u32]` part.
+        if !method.substs.is_noop() {
+            let method_generics = self.tcx.generics_of(method.def_id);
+            if !method_generics.params.is_empty() {
+                let user_type_annotation = self.infcx.probe(|_| {
+                    let user_substs = UserSubsts {
+                        substs: InternalSubsts::for_item(self.tcx, method.def_id, |param, _| {
+                            let i = param.index as usize;
+                            if i < method_generics.parent_count {
+                                self.infcx.var_for_def(DUMMY_SP, param)
+                            } else {
+                                method.substs[i]
+                            }
+                        }),
+                        user_self_ty: None, // not relevant here
+                    };
+
+                    self.infcx.canonicalize_user_type_annotation(&UserType::TypeOf(
+                        method.def_id,
+                        user_substs,
+                    ))
+                });
+
+                debug!("write_method_call: user_type_annotation={:?}", user_type_annotation);
+                self.write_user_type_annotation(hir_id, user_type_annotation);
+            }
+        }
+    }
+
+    pub fn write_substs(&self, node_id: hir::HirId, substs: SubstsRef<'tcx>) {
+        if !substs.is_noop() {
+            debug!("write_substs({:?}, {:?}) in fcx {}", node_id, substs, self.tag());
+
+            self.typeck_results.borrow_mut().node_substs_mut().insert(node_id, substs);
+        }
+    }
+
+    /// Given the substs that we just converted from the HIR, try to
+    /// canonicalize them and store them as user-given substitutions
+    /// (i.e., substitutions that must be respected by the NLL check).
+    ///
+    /// This should be invoked **before any unifications have
+    /// occurred**, so that annotations like `Vec<_>` are preserved
+    /// properly.
+    pub fn write_user_type_annotation_from_substs(
+        &self,
+        hir_id: hir::HirId,
+        def_id: DefId,
+        substs: SubstsRef<'tcx>,
+        user_self_ty: Option<UserSelfTy<'tcx>>,
+    ) {
+        debug!(
+            "write_user_type_annotation_from_substs: hir_id={:?} def_id={:?} substs={:?} \
+             user_self_ty={:?} in fcx {}",
+            hir_id,
+            def_id,
+            substs,
+            user_self_ty,
+            self.tag(),
+        );
+
+        if Self::can_contain_user_lifetime_bounds((substs, user_self_ty)) {
+            let canonicalized = self.infcx.canonicalize_user_type_annotation(&UserType::TypeOf(
+                def_id,
+                UserSubsts { substs, user_self_ty },
+            ));
+            debug!("write_user_type_annotation_from_substs: canonicalized={:?}", canonicalized);
+            self.write_user_type_annotation(hir_id, canonicalized);
+        }
+    }
+
+    pub fn write_user_type_annotation(
+        &self,
+        hir_id: hir::HirId,
+        canonical_user_type_annotation: CanonicalUserType<'tcx>,
+    ) {
+        debug!(
+            "write_user_type_annotation: hir_id={:?} canonical_user_type_annotation={:?} tag={}",
+            hir_id,
+            canonical_user_type_annotation,
+            self.tag(),
+        );
+
+        if !canonical_user_type_annotation.is_identity() {
+            self.typeck_results
+                .borrow_mut()
+                .user_provided_types_mut()
+                .insert(hir_id, canonical_user_type_annotation);
+        } else {
+            debug!("write_user_type_annotation: skipping identity substs");
+        }
+    }
+
+    pub fn apply_adjustments(&self, expr: &hir::Expr<'_>, adj: Vec<Adjustment<'tcx>>) {
+        debug!("apply_adjustments(expr={:?}, adj={:?})", expr, adj);
+
+        if adj.is_empty() {
+            return;
+        }
+
+        let autoborrow_mut = adj.iter().any(|adj| {
+            matches!(adj, &Adjustment {
+                kind: Adjust::Borrow(AutoBorrow::Ref(_, AutoBorrowMutability::Mut { .. })),
+                ..
+            })
+        });
+
+        match self.typeck_results.borrow_mut().adjustments_mut().entry(expr.hir_id) {
+            Entry::Vacant(entry) => {
+                entry.insert(adj);
+            }
+            Entry::Occupied(mut entry) => {
+                debug!(" - composing on top of {:?}", entry.get());
+                match (&entry.get()[..], &adj[..]) {
+                    // Applying any adjustment on top of a NeverToAny
+                    // is a valid NeverToAny adjustment, because it can't
+                    // be reached.
+                    (&[Adjustment { kind: Adjust::NeverToAny, .. }], _) => return,
+                    (&[
+                        Adjustment { kind: Adjust::Deref(_), .. },
+                        Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), .. },
+                    ], &[
+                        Adjustment { kind: Adjust::Deref(_), .. },
+                        .. // Any following adjustments are allowed.
+                    ]) => {
+                        // A reborrow has no effect before a dereference.
+                    }
+                    // FIXME: currently we never try to compose autoderefs
+                    // and ReifyFnPointer/UnsafeFnPointer, but we could.
+                    _ =>
+                        bug!("while adjusting {:?}, can't compose {:?} and {:?}",
+                             expr, entry.get(), adj)
+                };
+                *entry.get_mut() = adj;
+            }
+        }
+
+        // If there is an mutable auto-borrow, it is equivalent to `&mut <expr>`.
+        // In this case implicit use of `Deref` and `Index` within `<expr>` should
+        // instead be `DerefMut` and `IndexMut`, so fix those up.
+        if autoborrow_mut {
+            self.convert_place_derefs_to_mutable(expr);
+        }
+    }
+
+    /// Basically whenever we are converting from a type scheme into
+    /// the fn body space, we always want to normalize associated
+    /// types as well. This function combines the two.
+    fn instantiate_type_scheme<T>(&self, span: Span, substs: SubstsRef<'tcx>, value: &T) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        let value = value.subst(self.tcx, substs);
+        let result = self.normalize_associated_types_in(span, &value);
+        debug!("instantiate_type_scheme(value={:?}, substs={:?}) = {:?}", value, substs, result);
+        result
+    }
+
+    /// As `instantiate_type_scheme`, but for the bounds found in a
+    /// generic type scheme.
+    pub(in super::super) fn instantiate_bounds(
+        &self,
+        span: Span,
+        def_id: DefId,
+        substs: SubstsRef<'tcx>,
+    ) -> (ty::InstantiatedPredicates<'tcx>, Vec<Span>) {
+        let bounds = self.tcx.predicates_of(def_id);
+        let spans: Vec<Span> = bounds.predicates.iter().map(|(_, span)| *span).collect();
+        let result = bounds.instantiate(self.tcx, substs);
+        let result = self.normalize_associated_types_in(span, &result);
+        debug!(
+            "instantiate_bounds(bounds={:?}, substs={:?}) = {:?}, {:?}",
+            bounds, substs, result, spans,
+        );
+        (result, spans)
+    }
+
+    /// Replaces the opaque types from the given value with type variables,
+    /// and records the `OpaqueTypeMap` for later use during writeback. See
+    /// `InferCtxt::instantiate_opaque_types` for more details.
+    pub(in super::super) fn instantiate_opaque_types_from_value<T: TypeFoldable<'tcx>>(
+        &self,
+        parent_id: hir::HirId,
+        value: &T,
+        value_span: Span,
+    ) -> T {
+        let parent_def_id = self.tcx.hir().local_def_id(parent_id);
+        debug!(
+            "instantiate_opaque_types_from_value(parent_def_id={:?}, value={:?})",
+            parent_def_id, value
+        );
+
+        let (value, opaque_type_map) =
+            self.register_infer_ok_obligations(self.instantiate_opaque_types(
+                parent_def_id,
+                self.body_id,
+                self.param_env,
+                value,
+                value_span,
+            ));
+
+        let mut opaque_types = self.opaque_types.borrow_mut();
+        let mut opaque_types_vars = self.opaque_types_vars.borrow_mut();
+        for (ty, decl) in opaque_type_map {
+            let _ = opaque_types.insert(ty, decl);
+            let _ = opaque_types_vars.insert(decl.concrete_ty, decl.opaque_type);
+        }
+
+        value
+    }
+
+    pub(in super::super) fn normalize_associated_types_in<T>(&self, span: Span, value: &T) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        self.inh.normalize_associated_types_in(span, self.body_id, self.param_env, value)
+    }
+
+    pub(in super::super) fn normalize_associated_types_in_as_infer_ok<T>(
+        &self,
+        span: Span,
+        value: &T,
+    ) -> InferOk<'tcx, T>
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        self.inh.partially_normalize_associated_types_in(span, self.body_id, self.param_env, value)
+    }
+
+    pub fn require_type_meets(
+        &self,
+        ty: Ty<'tcx>,
+        span: Span,
+        code: traits::ObligationCauseCode<'tcx>,
+        def_id: DefId,
+    ) {
+        self.register_bound(ty, def_id, traits::ObligationCause::new(span, self.body_id, code));
+    }
+
+    pub fn require_type_is_sized(
+        &self,
+        ty: Ty<'tcx>,
+        span: Span,
+        code: traits::ObligationCauseCode<'tcx>,
+    ) {
+        if !ty.references_error() {
+            let lang_item = self.tcx.require_lang_item(LangItem::Sized, None);
+            self.require_type_meets(ty, span, code, lang_item);
+        }
+    }
+
+    pub fn require_type_is_sized_deferred(
+        &self,
+        ty: Ty<'tcx>,
+        span: Span,
+        code: traits::ObligationCauseCode<'tcx>,
+    ) {
+        if !ty.references_error() {
+            self.deferred_sized_obligations.borrow_mut().push((ty, span, code));
+        }
+    }
+
+    pub fn register_bound(
+        &self,
+        ty: Ty<'tcx>,
+        def_id: DefId,
+        cause: traits::ObligationCause<'tcx>,
+    ) {
+        if !ty.references_error() {
+            self.fulfillment_cx.borrow_mut().register_bound(
+                self,
+                self.param_env,
+                ty,
+                def_id,
+                cause,
+            );
+        }
+    }
+
+    pub fn to_ty(&self, ast_t: &hir::Ty<'_>) -> Ty<'tcx> {
+        let t = AstConv::ast_ty_to_ty(self, ast_t);
+        self.register_wf_obligation(t.into(), ast_t.span, traits::MiscObligation);
+        t
+    }
+
+    pub fn to_ty_saving_user_provided_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
+        let ty = self.to_ty(ast_ty);
+        debug!("to_ty_saving_user_provided_ty: ty={:?}", ty);
+
+        if Self::can_contain_user_lifetime_bounds(ty) {
+            let c_ty = self.infcx.canonicalize_response(&UserType::Ty(ty));
+            debug!("to_ty_saving_user_provided_ty: c_ty={:?}", c_ty);
+            self.typeck_results.borrow_mut().user_provided_types_mut().insert(ast_ty.hir_id, c_ty);
+        }
+
+        ty
+    }
+
+    pub fn to_const(&self, ast_c: &hir::AnonConst) -> &'tcx ty::Const<'tcx> {
+        let const_def_id = self.tcx.hir().local_def_id(ast_c.hir_id);
+        let c = ty::Const::from_anon_const(self.tcx, const_def_id);
+        self.register_wf_obligation(
+            c.into(),
+            self.tcx.hir().span(ast_c.hir_id),
+            ObligationCauseCode::MiscObligation,
+        );
+        c
+    }
+
+    pub fn const_arg_to_const(
+        &self,
+        ast_c: &hir::AnonConst,
+        param_def_id: DefId,
+    ) -> &'tcx ty::Const<'tcx> {
+        let const_def = ty::WithOptConstParam {
+            did: self.tcx.hir().local_def_id(ast_c.hir_id),
+            const_param_did: Some(param_def_id),
+        };
+        let c = ty::Const::from_opt_const_arg_anon_const(self.tcx, const_def);
+        self.register_wf_obligation(
+            c.into(),
+            self.tcx.hir().span(ast_c.hir_id),
+            ObligationCauseCode::MiscObligation,
+        );
+        c
+    }
+
+    // If the type given by the user has free regions, save it for later, since
+    // NLL would like to enforce those. Also pass in types that involve
+    // projections, since those can resolve to `'static` bounds (modulo #54940,
+    // which hopefully will be fixed by the time you see this comment, dear
+    // reader, although I have my doubts). Also pass in types with inference
+    // types, because they may be repeated. Other sorts of things are already
+    // sufficiently enforced with erased regions. =)
+    fn can_contain_user_lifetime_bounds<T>(t: T) -> bool
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        t.has_free_regions() || t.has_projections() || t.has_infer_types()
+    }
+
+    pub fn node_ty(&self, id: hir::HirId) -> Ty<'tcx> {
+        match self.typeck_results.borrow().node_types().get(id) {
+            Some(&t) => t,
+            None if self.is_tainted_by_errors() => self.tcx.ty_error(),
+            None => {
+                bug!(
+                    "no type for node {}: {} in fcx {}",
+                    id,
+                    self.tcx.hir().node_to_string(id),
+                    self.tag()
+                );
+            }
+        }
+    }
+
+    /// Registers an obligation for checking later, during regionck, that `arg` is well-formed.
+    pub fn register_wf_obligation(
+        &self,
+        arg: subst::GenericArg<'tcx>,
+        span: Span,
+        code: traits::ObligationCauseCode<'tcx>,
+    ) {
+        // WF obligations never themselves fail, so no real need to give a detailed cause:
+        let cause = traits::ObligationCause::new(span, self.body_id, code);
+        self.register_predicate(traits::Obligation::new(
+            cause,
+            self.param_env,
+            ty::PredicateAtom::WellFormed(arg).to_predicate(self.tcx),
+        ));
+    }
+
+    /// Registers obligations that all `substs` are well-formed.
+    pub fn add_wf_bounds(&self, substs: SubstsRef<'tcx>, expr: &hir::Expr<'_>) {
+        for arg in substs.iter().filter(|arg| {
+            matches!(arg.unpack(), GenericArgKind::Type(..) | GenericArgKind::Const(..))
+        }) {
+            self.register_wf_obligation(arg, expr.span, traits::MiscObligation);
+        }
+    }
+
+    /// Given a fully substituted set of bounds (`generic_bounds`), and the values with which each
+    /// type/region parameter was instantiated (`substs`), creates and registers suitable
+    /// trait/region obligations.
+    ///
+    /// For example, if there is a function:
+    ///
+    /// ```
+    /// fn foo<'a,T:'a>(...)
+    /// ```
+    ///
+    /// and a reference:
+    ///
+    /// ```
+    /// let f = foo;
+    /// ```
+    ///
+    /// Then we will create a fresh region variable `'$0` and a fresh type variable `$1` for `'a`
+    /// and `T`. This routine will add a region obligation `$1:'$0` and register it locally.
+    pub fn add_obligations_for_parameters(
+        &self,
+        cause: traits::ObligationCause<'tcx>,
+        predicates: ty::InstantiatedPredicates<'tcx>,
+    ) {
+        assert!(!predicates.has_escaping_bound_vars());
+
+        debug!("add_obligations_for_parameters(predicates={:?})", predicates);
+
+        for obligation in traits::predicates_for_generics(cause, self.param_env, predicates) {
+            self.register_predicate(obligation);
+        }
+    }
+
+    // FIXME(arielb1): use this instead of field.ty everywhere
+    // Only for fields! Returns <none> for methods>
+    // Indifferent to privacy flags
+    pub fn field_ty(
+        &self,
+        span: Span,
+        field: &'tcx ty::FieldDef,
+        substs: SubstsRef<'tcx>,
+    ) -> Ty<'tcx> {
+        self.normalize_associated_types_in(span, &field.ty(self.tcx, substs))
+    }
+
+    pub(in super::super) fn resolve_generator_interiors(&self, def_id: DefId) {
+        let mut generators = self.deferred_generator_interiors.borrow_mut();
+        for (body_id, interior, kind) in generators.drain(..) {
+            self.select_obligations_where_possible(false, |_| {});
+            crate::check::generator_interior::resolve_interior(
+                self, def_id, body_id, interior, kind,
+            );
+        }
+    }
+
+    // Tries to apply a fallback to `ty` if it is an unsolved variable.
+    //
+    // - Unconstrained ints are replaced with `i32`.
+    //
+    // - Unconstrained floats are replaced with with `f64`.
+    //
+    // - Non-numerics get replaced with `!` when `#![feature(never_type_fallback)]`
+    //   is enabled. Otherwise, they are replaced with `()`.
+    //
+    // Fallback becomes very dubious if we have encountered type-checking errors.
+    // In that case, fallback to Error.
+    // The return value indicates whether fallback has occurred.
+    pub(in super::super) fn fallback_if_possible(&self, ty: Ty<'tcx>, mode: FallbackMode) -> bool {
+        use rustc_middle::ty::error::UnconstrainedNumeric::Neither;
+        use rustc_middle::ty::error::UnconstrainedNumeric::{UnconstrainedFloat, UnconstrainedInt};
+
+        assert!(ty.is_ty_infer());
+        let fallback = match self.type_is_unconstrained_numeric(ty) {
+            _ if self.is_tainted_by_errors() => self.tcx().ty_error(),
+            UnconstrainedInt => self.tcx.types.i32,
+            UnconstrainedFloat => self.tcx.types.f64,
+            Neither if self.type_var_diverges(ty) => self.tcx.mk_diverging_default(),
+            Neither => {
+                // This type variable was created from the instantiation of an opaque
+                // type. The fact that we're attempting to perform fallback for it
+                // means that the function neither constrained it to a concrete
+                // type, nor to the opaque type itself.
+                //
+                // For example, in this code:
+                //
+                //```
+                // type MyType = impl Copy;
+                // fn defining_use() -> MyType { true }
+                // fn other_use() -> MyType { defining_use() }
+                // ```
+                //
+                // `defining_use` will constrain the instantiated inference
+                // variable to `bool`, while `other_use` will constrain
+                // the instantiated inference variable to `MyType`.
+                //
+                // When we process opaque types during writeback, we
+                // will handle cases like `other_use`, and not count
+                // them as defining usages
+                //
+                // However, we also need to handle cases like this:
+                //
+                // ```rust
+                // pub type Foo = impl Copy;
+                // fn produce() -> Option<Foo> {
+                //     None
+                //  }
+                //  ```
+                //
+                // In the above snippet, the inference variable created by
+                // instantiating `Option<Foo>` will be completely unconstrained.
+                // We treat this as a non-defining use by making the inference
+                // variable fall back to the opaque type itself.
+                if let FallbackMode::All = mode {
+                    if let Some(opaque_ty) = self.opaque_types_vars.borrow().get(ty) {
+                        debug!(
+                            "fallback_if_possible: falling back opaque type var {:?} to {:?}",
+                            ty, opaque_ty
+                        );
+                        *opaque_ty
+                    } else {
+                        return false;
+                    }
+                } else {
+                    return false;
+                }
+            }
+        };
+        debug!("fallback_if_possible: defaulting `{:?}` to `{:?}`", ty, fallback);
+        self.demand_eqtype(rustc_span::DUMMY_SP, ty, fallback);
+        true
+    }
+
+    pub(in super::super) fn select_all_obligations_or_error(&self) {
+        debug!("select_all_obligations_or_error");
+        if let Err(errors) = self.fulfillment_cx.borrow_mut().select_all_or_error(&self) {
+            self.report_fulfillment_errors(&errors, self.inh.body_id, false);
+        }
+    }
+
+    /// Select as many obligations as we can at present.
+    pub(in super::super) fn select_obligations_where_possible(
+        &self,
+        fallback_has_occurred: bool,
+        mutate_fullfillment_errors: impl Fn(&mut Vec<traits::FulfillmentError<'tcx>>),
+    ) {
+        let result = self.fulfillment_cx.borrow_mut().select_where_possible(self);
+        if let Err(mut errors) = result {
+            mutate_fullfillment_errors(&mut errors);
+            self.report_fulfillment_errors(&errors, self.inh.body_id, fallback_has_occurred);
+        }
+    }
+
+    /// For the overloaded place expressions (`*x`, `x[3]`), the trait
+    /// returns a type of `&T`, but the actual type we assign to the
+    /// *expression* is `T`. So this function just peels off the return
+    /// type by one layer to yield `T`.
+    pub(in super::super) fn make_overloaded_place_return_type(
+        &self,
+        method: MethodCallee<'tcx>,
+    ) -> ty::TypeAndMut<'tcx> {
+        // extract method return type, which will be &T;
+        let ret_ty = method.sig.output();
+
+        // method returns &T, but the type as visible to user is T, so deref
+        ret_ty.builtin_deref(true).unwrap()
+    }
+
+    fn self_type_matches_expected_vid(
+        &self,
+        trait_ref: ty::PolyTraitRef<'tcx>,
+        expected_vid: ty::TyVid,
+    ) -> bool {
+        let self_ty = self.shallow_resolve(trait_ref.skip_binder().self_ty());
+        debug!(
+            "self_type_matches_expected_vid(trait_ref={:?}, self_ty={:?}, expected_vid={:?})",
+            trait_ref, self_ty, expected_vid
+        );
+        match *self_ty.kind() {
+            ty::Infer(ty::TyVar(found_vid)) => {
+                // FIXME: consider using `sub_root_var` here so we
+                // can see through subtyping.
+                let found_vid = self.root_var(found_vid);
+                debug!("self_type_matches_expected_vid - found_vid={:?}", found_vid);
+                expected_vid == found_vid
+            }
+            _ => false,
+        }
+    }
+
+    pub(in super::super) fn obligations_for_self_ty<'b>(
+        &'b self,
+        self_ty: ty::TyVid,
+    ) -> impl Iterator<Item = (ty::PolyTraitRef<'tcx>, traits::PredicateObligation<'tcx>)>
+    + Captures<'tcx>
+    + 'b {
+        // FIXME: consider using `sub_root_var` here so we
+        // can see through subtyping.
+        let ty_var_root = self.root_var(self_ty);
+        debug!(
+            "obligations_for_self_ty: self_ty={:?} ty_var_root={:?} pending_obligations={:?}",
+            self_ty,
+            ty_var_root,
+            self.fulfillment_cx.borrow().pending_obligations()
+        );
+
+        self.fulfillment_cx
+            .borrow()
+            .pending_obligations()
+            .into_iter()
+            .filter_map(move |obligation| {
+                match obligation.predicate.skip_binders() {
+                    ty::PredicateAtom::Projection(data) => {
+                        Some((ty::Binder::bind(data).to_poly_trait_ref(self.tcx), obligation))
+                    }
+                    ty::PredicateAtom::Trait(data, _) => {
+                        Some((ty::Binder::bind(data).to_poly_trait_ref(), obligation))
+                    }
+                    ty::PredicateAtom::Subtype(..) => None,
+                    ty::PredicateAtom::RegionOutlives(..) => None,
+                    ty::PredicateAtom::TypeOutlives(..) => None,
+                    ty::PredicateAtom::WellFormed(..) => None,
+                    ty::PredicateAtom::ObjectSafe(..) => None,
+                    ty::PredicateAtom::ConstEvaluatable(..) => None,
+                    ty::PredicateAtom::ConstEquate(..) => None,
+                    // N.B., this predicate is created by breaking down a
+                    // `ClosureType: FnFoo()` predicate, where
+                    // `ClosureType` represents some `Closure`. It can't
+                    // possibly be referring to the current closure,
+                    // because we haven't produced the `Closure` for
+                    // this closure yet; this is exactly why the other
+                    // code is looking for a self type of a unresolved
+                    // inference variable.
+                    ty::PredicateAtom::ClosureKind(..) => None,
+                    ty::PredicateAtom::TypeWellFormedFromEnv(..) => None,
+                }
+            })
+            .filter(move |(tr, _)| self.self_type_matches_expected_vid(*tr, ty_var_root))
+    }
+
+    pub(in super::super) fn type_var_is_sized(&self, self_ty: ty::TyVid) -> bool {
+        self.obligations_for_self_ty(self_ty)
+            .any(|(tr, _)| Some(tr.def_id()) == self.tcx.lang_items().sized_trait())
+    }
+
+    pub(in super::super) fn err_args(&self, len: usize) -> Vec<Ty<'tcx>> {
+        vec![self.tcx.ty_error(); len]
+    }
+
+    /// Unifies the output type with the expected type early, for more coercions
+    /// and forward type information on the input expressions.
+    pub(in super::super) fn expected_inputs_for_expected_output(
+        &self,
+        call_span: Span,
+        expected_ret: Expectation<'tcx>,
+        formal_ret: Ty<'tcx>,
+        formal_args: &[Ty<'tcx>],
+    ) -> Vec<Ty<'tcx>> {
+        let formal_ret = self.resolve_vars_with_obligations(formal_ret);
+        let ret_ty = match expected_ret.only_has_type(self) {
+            Some(ret) => ret,
+            None => return Vec::new(),
+        };
+        let expect_args = self
+            .fudge_inference_if_ok(|| {
+                // Attempt to apply a subtyping relationship between the formal
+                // return type (likely containing type variables if the function
+                // is polymorphic) and the expected return type.
+                // No argument expectations are produced if unification fails.
+                let origin = self.misc(call_span);
+                let ures = self.at(&origin, self.param_env).sup(ret_ty, &formal_ret);
+
+                // FIXME(#27336) can't use ? here, Try::from_error doesn't default
+                // to identity so the resulting type is not constrained.
+                match ures {
+                    Ok(ok) => {
+                        // Process any obligations locally as much as
+                        // we can.  We don't care if some things turn
+                        // out unconstrained or ambiguous, as we're
+                        // just trying to get hints here.
+                        self.save_and_restore_in_snapshot_flag(|_| {
+                            let mut fulfill = TraitEngine::new(self.tcx);
+                            for obligation in ok.obligations {
+                                fulfill.register_predicate_obligation(self, obligation);
+                            }
+                            fulfill.select_where_possible(self)
+                        })
+                        .map_err(|_| ())?;
+                    }
+                    Err(_) => return Err(()),
+                }
+
+                // Record all the argument types, with the substitutions
+                // produced from the above subtyping unification.
+                Ok(formal_args.iter().map(|ty| self.resolve_vars_if_possible(ty)).collect())
+            })
+            .unwrap_or_default();
+        debug!(
+            "expected_inputs_for_expected_output(formal={:?} -> {:?}, expected={:?} -> {:?})",
+            formal_args, formal_ret, expect_args, expected_ret
+        );
+        expect_args
+    }
+
+    pub(in super::super) fn resolve_lang_item_path(
+        &self,
+        lang_item: hir::LangItem,
+        span: Span,
+        hir_id: hir::HirId,
+    ) -> (Res, Ty<'tcx>) {
+        let def_id = self.tcx.require_lang_item(lang_item, Some(span));
+        let def_kind = self.tcx.def_kind(def_id);
+
+        let item_ty = if let DefKind::Variant = def_kind {
+            self.tcx.type_of(self.tcx.parent(def_id).expect("variant w/out parent"))
+        } else {
+            self.tcx.type_of(def_id)
+        };
+        let substs = self.infcx.fresh_substs_for_item(span, def_id);
+        let ty = item_ty.subst(self.tcx, substs);
+
+        self.write_resolution(hir_id, Ok((def_kind, def_id)));
+        self.add_required_obligations(span, def_id, &substs);
+        (Res::Def(def_kind, def_id), ty)
+    }
+
+    /// Resolves an associated value path into a base type and associated constant, or method
+    /// resolution. The newly resolved definition is written into `type_dependent_defs`.
+    pub fn resolve_ty_and_res_ufcs<'b>(
+        &self,
+        qpath: &'b QPath<'b>,
+        hir_id: hir::HirId,
+        span: Span,
+    ) -> (Res, Option<Ty<'tcx>>, &'b [hir::PathSegment<'b>]) {
+        debug!("resolve_ty_and_res_ufcs: qpath={:?} hir_id={:?} span={:?}", qpath, hir_id, span);
+        let (ty, qself, item_segment) = match *qpath {
+            QPath::Resolved(ref opt_qself, ref path) => {
+                return (
+                    path.res,
+                    opt_qself.as_ref().map(|qself| self.to_ty(qself)),
+                    &path.segments[..],
+                );
+            }
+            QPath::TypeRelative(ref qself, ref segment) => (self.to_ty(qself), qself, segment),
+            QPath::LangItem(..) => bug!("`resolve_ty_and_res_ufcs` called on `LangItem`"),
+        };
+        if let Some(&cached_result) = self.typeck_results.borrow().type_dependent_defs().get(hir_id)
+        {
+            // Return directly on cache hit. This is useful to avoid doubly reporting
+            // errors with default match binding modes. See #44614.
+            let def =
+                cached_result.map(|(kind, def_id)| Res::Def(kind, def_id)).unwrap_or(Res::Err);
+            return (def, Some(ty), slice::from_ref(&**item_segment));
+        }
+        let item_name = item_segment.ident;
+        let result = self.resolve_ufcs(span, item_name, ty, hir_id).or_else(|error| {
+            let result = match error {
+                method::MethodError::PrivateMatch(kind, def_id, _) => Ok((kind, def_id)),
+                _ => Err(ErrorReported),
+            };
+            if item_name.name != kw::Invalid {
+                if let Some(mut e) = self.report_method_error(
+                    span,
+                    ty,
+                    item_name,
+                    SelfSource::QPath(qself),
+                    error,
+                    None,
+                ) {
+                    e.emit();
+                }
+            }
+            result
+        });
+
+        // Write back the new resolution.
+        self.write_resolution(hir_id, result);
+        (
+            result.map(|(kind, def_id)| Res::Def(kind, def_id)).unwrap_or(Res::Err),
+            Some(ty),
+            slice::from_ref(&**item_segment),
+        )
+    }
+
+    /// Given a function `Node`, return its `FnDecl` if it exists, or `None` otherwise.
+    pub(in super::super) fn get_node_fn_decl(
+        &self,
+        node: Node<'tcx>,
+    ) -> Option<(&'tcx hir::FnDecl<'tcx>, Ident, bool)> {
+        match node {
+            Node::Item(&hir::Item { ident, kind: hir::ItemKind::Fn(ref sig, ..), .. }) => {
+                // This is less than ideal, it will not suggest a return type span on any
+                // method called `main`, regardless of whether it is actually the entry point,
+                // but it will still present it as the reason for the expected type.
+                Some((&sig.decl, ident, ident.name != sym::main))
+            }
+            Node::TraitItem(&hir::TraitItem {
+                ident,
+                kind: hir::TraitItemKind::Fn(ref sig, ..),
+                ..
+            }) => Some((&sig.decl, ident, true)),
+            Node::ImplItem(&hir::ImplItem {
+                ident,
+                kind: hir::ImplItemKind::Fn(ref sig, ..),
+                ..
+            }) => Some((&sig.decl, ident, false)),
+            _ => None,
+        }
+    }
+
+    /// Given a `HirId`, return the `FnDecl` of the method it is enclosed by and whether a
+    /// suggestion can be made, `None` otherwise.
+    pub fn get_fn_decl(&self, blk_id: hir::HirId) -> Option<(&'tcx hir::FnDecl<'tcx>, bool)> {
+        // Get enclosing Fn, if it is a function or a trait method, unless there's a `loop` or
+        // `while` before reaching it, as block tail returns are not available in them.
+        self.tcx.hir().get_return_block(blk_id).and_then(|blk_id| {
+            let parent = self.tcx.hir().get(blk_id);
+            self.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main))
+        })
+    }
+
+    pub(in super::super) fn note_internal_mutation_in_method(
+        &self,
+        err: &mut DiagnosticBuilder<'_>,
+        expr: &hir::Expr<'_>,
+        expected: Ty<'tcx>,
+        found: Ty<'tcx>,
+    ) {
+        if found != self.tcx.types.unit {
+            return;
+        }
+        if let ExprKind::MethodCall(path_segment, _, [rcvr, ..], _) = expr.kind {
+            if self
+                .typeck_results
+                .borrow()
+                .expr_ty_adjusted_opt(rcvr)
+                .map_or(true, |ty| expected.peel_refs() != ty.peel_refs())
+            {
+                return;
+            }
+            let mut sp = MultiSpan::from_span(path_segment.ident.span);
+            sp.push_span_label(
+                path_segment.ident.span,
+                format!(
+                    "this call modifies {} in-place",
+                    match rcvr.kind {
+                        ExprKind::Path(QPath::Resolved(
+                            None,
+                            hir::Path { segments: [segment], .. },
+                        )) => format!("`{}`", segment.ident),
+                        _ => "its receiver".to_string(),
+                    }
+                ),
+            );
+            sp.push_span_label(
+                rcvr.span,
+                "you probably want to use this value after calling the method...".to_string(),
+            );
+            err.span_note(
+                sp,
+                &format!("method `{}` modifies its receiver in-place", path_segment.ident),
+            );
+            err.note(&format!("...instead of the `()` output of method `{}`", path_segment.ident));
+        }
+    }
+
+    pub(in super::super) fn note_need_for_fn_pointer(
+        &self,
+        err: &mut DiagnosticBuilder<'_>,
+        expected: Ty<'tcx>,
+        found: Ty<'tcx>,
+    ) {
+        let (sig, did, substs) = match (&expected.kind(), &found.kind()) {
+            (ty::FnDef(did1, substs1), ty::FnDef(did2, substs2)) => {
+                let sig1 = self.tcx.fn_sig(*did1).subst(self.tcx, substs1);
+                let sig2 = self.tcx.fn_sig(*did2).subst(self.tcx, substs2);
+                if sig1 != sig2 {
+                    return;
+                }
+                err.note(
+                    "different `fn` items always have unique types, even if their signatures are \
+                     the same",
+                );
+                (sig1, *did1, substs1)
+            }
+            (ty::FnDef(did, substs), ty::FnPtr(sig2)) => {
+                let sig1 = self.tcx.fn_sig(*did).subst(self.tcx, substs);
+                if sig1 != *sig2 {
+                    return;
+                }
+                (sig1, *did, substs)
+            }
+            _ => return,
+        };
+        err.help(&format!("change the expected type to be function pointer `{}`", sig));
+        err.help(&format!(
+            "if the expected type is due to type inference, cast the expected `fn` to a function \
+             pointer: `{} as {}`",
+            self.tcx.def_path_str_with_substs(did, substs),
+            sig
+        ));
+    }
+
+    pub(in super::super) fn could_remove_semicolon(
+        &self,
+        blk: &'tcx hir::Block<'tcx>,
+        expected_ty: Ty<'tcx>,
+    ) -> Option<(Span, StatementAsExpression)> {
+        // Be helpful when the user wrote `{... expr;}` and
+        // taking the `;` off is enough to fix the error.
+        let last_stmt = blk.stmts.last()?;
+        let last_expr = match last_stmt.kind {
+            hir::StmtKind::Semi(ref e) => e,
+            _ => return None,
+        };
+        let last_expr_ty = self.node_ty(last_expr.hir_id);
+        let needs_box = match (last_expr_ty.kind(), expected_ty.kind()) {
+            (ty::Opaque(last_def_id, last_bounds), ty::Opaque(exp_def_id, exp_bounds)) => {
+                debug!(
+                    "both opaque, likely future {:?} {:?} {:?} {:?}",
+                    last_def_id, last_bounds, exp_def_id, exp_bounds
+                );
+                let last_hir_id = self.tcx.hir().local_def_id_to_hir_id(last_def_id.expect_local());
+                let exp_hir_id = self.tcx.hir().local_def_id_to_hir_id(exp_def_id.expect_local());
+                match (
+                    &self.tcx.hir().expect_item(last_hir_id).kind,
+                    &self.tcx.hir().expect_item(exp_hir_id).kind,
+                ) {
+                    (
+                        hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds: last_bounds, .. }),
+                        hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds: exp_bounds, .. }),
+                    ) if last_bounds.iter().zip(exp_bounds.iter()).all(|(left, right)| {
+                        match (left, right) {
+                            (
+                                hir::GenericBound::Trait(tl, ml),
+                                hir::GenericBound::Trait(tr, mr),
+                            ) if tl.trait_ref.trait_def_id() == tr.trait_ref.trait_def_id()
+                                && ml == mr =>
+                            {
+                                true
+                            }
+                            (
+                                hir::GenericBound::LangItemTrait(langl, _, _, argsl),
+                                hir::GenericBound::LangItemTrait(langr, _, _, argsr),
+                            ) if langl == langr => {
+                                // FIXME: consider the bounds!
+                                debug!("{:?} {:?}", argsl, argsr);
+                                true
+                            }
+                            _ => false,
+                        }
+                    }) =>
+                    {
+                        StatementAsExpression::NeedsBoxing
+                    }
+                    _ => StatementAsExpression::CorrectType,
+                }
+            }
+            _ => StatementAsExpression::CorrectType,
+        };
+        if (matches!(last_expr_ty.kind(), ty::Error(_))
+            || self.can_sub(self.param_env, last_expr_ty, expected_ty).is_err())
+            && matches!(needs_box, StatementAsExpression::CorrectType)
+        {
+            return None;
+        }
+        let original_span = original_sp(last_stmt.span, blk.span);
+        Some((original_span.with_lo(original_span.hi() - BytePos(1)), needs_box))
+    }
+
+    // Instantiates the given path, which must refer to an item with the given
+    // number of type parameters and type.
+    pub fn instantiate_value_path(
+        &self,
+        segments: &[hir::PathSegment<'_>],
+        self_ty: Option<Ty<'tcx>>,
+        res: Res,
+        span: Span,
+        hir_id: hir::HirId,
+    ) -> (Ty<'tcx>, Res) {
+        debug!(
+            "instantiate_value_path(segments={:?}, self_ty={:?}, res={:?}, hir_id={})",
+            segments, self_ty, res, hir_id,
+        );
+
+        let tcx = self.tcx;
+
+        let path_segs = match res {
+            Res::Local(_) | Res::SelfCtor(_) => vec![],
+            Res::Def(kind, def_id) => {
+                AstConv::def_ids_for_value_path_segments(self, segments, self_ty, kind, def_id)
+            }
+            _ => bug!("instantiate_value_path on {:?}", res),
+        };
+
+        let mut user_self_ty = None;
+        let mut is_alias_variant_ctor = false;
+        match res {
+            Res::Def(DefKind::Ctor(CtorOf::Variant, _), _) => {
+                if let Some(self_ty) = self_ty {
+                    let adt_def = self_ty.ty_adt_def().unwrap();
+                    user_self_ty = Some(UserSelfTy { impl_def_id: adt_def.did, self_ty });
+                    is_alias_variant_ctor = true;
+                }
+            }
+            Res::Def(DefKind::AssocFn | DefKind::AssocConst, def_id) => {
+                let container = tcx.associated_item(def_id).container;
+                debug!("instantiate_value_path: def_id={:?} container={:?}", def_id, container);
+                match container {
+                    ty::TraitContainer(trait_did) => {
+                        callee::check_legal_trait_for_method_call(tcx, span, None, trait_did)
+                    }
+                    ty::ImplContainer(impl_def_id) => {
+                        if segments.len() == 1 {
+                            // `<T>::assoc` will end up here, and so
+                            // can `T::assoc`. It this came from an
+                            // inherent impl, we need to record the
+                            // `T` for posterity (see `UserSelfTy` for
+                            // details).
+                            let self_ty = self_ty.expect("UFCS sugared assoc missing Self");
+                            user_self_ty = Some(UserSelfTy { impl_def_id, self_ty });
+                        }
+                    }
+                }
+            }
+            _ => {}
+        }
+
+        // Now that we have categorized what space the parameters for each
+        // segment belong to, let's sort out the parameters that the user
+        // provided (if any) into their appropriate spaces. We'll also report
+        // errors if type parameters are provided in an inappropriate place.
+
+        let generic_segs: FxHashSet<_> = path_segs.iter().map(|PathSeg(_, index)| index).collect();
+        let generics_has_err = AstConv::prohibit_generics(
+            self,
+            segments.iter().enumerate().filter_map(|(index, seg)| {
+                if !generic_segs.contains(&index) || is_alias_variant_ctor {
+                    Some(seg)
+                } else {
+                    None
+                }
+            }),
+        );
+
+        if let Res::Local(hid) = res {
+            let ty = self.local_ty(span, hid).decl_ty;
+            let ty = self.normalize_associated_types_in(span, &ty);
+            self.write_ty(hir_id, ty);
+            return (ty, res);
+        }
+
+        if generics_has_err {
+            // Don't try to infer type parameters when prohibited generic arguments were given.
+            user_self_ty = None;
+        }
+
+        // Now we have to compare the types that the user *actually*
+        // provided against the types that were *expected*. If the user
+        // did not provide any types, then we want to substitute inference
+        // variables. If the user provided some types, we may still need
+        // to add defaults. If the user provided *too many* types, that's
+        // a problem.
+
+        let mut infer_args_for_err = FxHashSet::default();
+        for &PathSeg(def_id, index) in &path_segs {
+            let seg = &segments[index];
+            let generics = tcx.generics_of(def_id);
+            // Argument-position `impl Trait` is treated as a normal generic
+            // parameter internally, but we don't allow users to specify the
+            // parameter's value explicitly, so we have to do some error-
+            // checking here.
+            if let GenericArgCountResult {
+                correct: Err(GenericArgCountMismatch { reported: Some(ErrorReported), .. }),
+                ..
+            } = AstConv::check_generic_arg_count_for_call(
+                tcx, span, &generics, &seg, false, // `is_method_call`
+            ) {
+                infer_args_for_err.insert(index);
+                self.set_tainted_by_errors(); // See issue #53251.
+            }
+        }
+
+        let has_self = path_segs
+            .last()
+            .map(|PathSeg(def_id, _)| tcx.generics_of(*def_id).has_self)
+            .unwrap_or(false);
+
+        let (res, self_ctor_substs) = if let Res::SelfCtor(impl_def_id) = res {
+            let ty = self.normalize_ty(span, tcx.at(span).type_of(impl_def_id));
+            match *ty.kind() {
+                ty::Adt(adt_def, substs) if adt_def.has_ctor() => {
+                    let variant = adt_def.non_enum_variant();
+                    let ctor_def_id = variant.ctor_def_id.unwrap();
+                    (
+                        Res::Def(DefKind::Ctor(CtorOf::Struct, variant.ctor_kind), ctor_def_id),
+                        Some(substs),
+                    )
+                }
+                _ => {
+                    let mut err = tcx.sess.struct_span_err(
+                        span,
+                        "the `Self` constructor can only be used with tuple or unit structs",
+                    );
+                    if let Some(adt_def) = ty.ty_adt_def() {
+                        match adt_def.adt_kind() {
+                            AdtKind::Enum => {
+                                err.help("did you mean to use one of the enum's variants?");
+                            }
+                            AdtKind::Struct | AdtKind::Union => {
+                                err.span_suggestion(
+                                    span,
+                                    "use curly brackets",
+                                    String::from("Self { /* fields */ }"),
+                                    Applicability::HasPlaceholders,
+                                );
+                            }
+                        }
+                    }
+                    err.emit();
+
+                    return (tcx.ty_error(), res);
+                }
+            }
+        } else {
+            (res, None)
+        };
+        let def_id = res.def_id();
+
+        // The things we are substituting into the type should not contain
+        // escaping late-bound regions, and nor should the base type scheme.
+        let ty = tcx.type_of(def_id);
+
+        let arg_count = GenericArgCountResult {
+            explicit_late_bound: ExplicitLateBound::No,
+            correct: if infer_args_for_err.is_empty() {
+                Ok(())
+            } else {
+                Err(GenericArgCountMismatch::default())
+            },
+        };
+
+        let substs = self_ctor_substs.unwrap_or_else(|| {
+            AstConv::create_substs_for_generic_args(
+                tcx,
+                def_id,
+                &[][..],
+                has_self,
+                self_ty,
+                arg_count,
+                // Provide the generic args, and whether types should be inferred.
+                |def_id| {
+                    if let Some(&PathSeg(_, index)) =
+                        path_segs.iter().find(|&PathSeg(did, _)| *did == def_id)
+                    {
+                        // If we've encountered an `impl Trait`-related error, we're just
+                        // going to infer the arguments for better error messages.
+                        if !infer_args_for_err.contains(&index) {
+                            // Check whether the user has provided generic arguments.
+                            if let Some(ref data) = segments[index].args {
+                                return (Some(data), segments[index].infer_args);
+                            }
+                        }
+                        return (None, segments[index].infer_args);
+                    }
+
+                    (None, true)
+                },
+                // Provide substitutions for parameters for which (valid) arguments have been provided.
+                |param, arg| match (&param.kind, arg) {
+                    (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
+                        AstConv::ast_region_to_region(self, lt, Some(param)).into()
+                    }
+                    (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
+                        self.to_ty(ty).into()
+                    }
+                    (GenericParamDefKind::Const, GenericArg::Const(ct)) => {
+                        self.const_arg_to_const(&ct.value, param.def_id).into()
+                    }
+                    _ => unreachable!(),
+                },
+                // Provide substitutions for parameters for which arguments are inferred.
+                |substs, param, infer_args| {
+                    match param.kind {
+                        GenericParamDefKind::Lifetime => {
+                            self.re_infer(Some(param), span).unwrap().into()
+                        }
+                        GenericParamDefKind::Type { has_default, .. } => {
+                            if !infer_args && has_default {
+                                // If we have a default, then we it doesn't matter that we're not
+                                // inferring the type arguments: we provide the default where any
+                                // is missing.
+                                let default = tcx.type_of(param.def_id);
+                                self.normalize_ty(
+                                    span,
+                                    default.subst_spanned(tcx, substs.unwrap(), Some(span)),
+                                )
+                                .into()
+                            } else {
+                                // If no type arguments were provided, we have to infer them.
+                                // This case also occurs as a result of some malformed input, e.g.
+                                // a lifetime argument being given instead of a type parameter.
+                                // Using inference instead of `Error` gives better error messages.
+                                self.var_for_def(span, param)
+                            }
+                        }
+                        GenericParamDefKind::Const => {
+                            // FIXME(const_generics:defaults)
+                            // No const parameters were provided, we have to infer them.
+                            self.var_for_def(span, param)
+                        }
+                    }
+                },
+            )
+        });
+        assert!(!substs.has_escaping_bound_vars());
+        assert!(!ty.has_escaping_bound_vars());
+
+        // First, store the "user substs" for later.
+        self.write_user_type_annotation_from_substs(hir_id, def_id, substs, user_self_ty);
+
+        self.add_required_obligations(span, def_id, &substs);
+
+        // Substitute the values for the type parameters into the type of
+        // the referenced item.
+        let ty_substituted = self.instantiate_type_scheme(span, &substs, &ty);
+
+        if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty {
+            // In the case of `Foo<T>::method` and `<Foo<T>>::method`, if `method`
+            // is inherent, there is no `Self` parameter; instead, the impl needs
+            // type parameters, which we can infer by unifying the provided `Self`
+            // with the substituted impl type.
+            // This also occurs for an enum variant on a type alias.
+            let ty = tcx.type_of(impl_def_id);
+
+            let impl_ty = self.instantiate_type_scheme(span, &substs, &ty);
+            match self.at(&self.misc(span), self.param_env).sup(impl_ty, self_ty) {
+                Ok(ok) => self.register_infer_ok_obligations(ok),
+                Err(_) => {
+                    self.tcx.sess.delay_span_bug(
+                        span,
+                        &format!(
+                        "instantiate_value_path: (UFCS) {:?} was a subtype of {:?} but now is not?",
+                        self_ty,
+                        impl_ty,
+                    ),
+                    );
+                }
+            }
+        }
+
+        self.check_rustc_args_require_const(def_id, hir_id, span);
+
+        debug!("instantiate_value_path: type of {:?} is {:?}", hir_id, ty_substituted);
+        self.write_substs(hir_id, substs);
+
+        (ty_substituted, res)
+    }
+
+    /// Add all the obligations that are required, substituting and normalized appropriately.
+    fn add_required_obligations(&self, span: Span, def_id: DefId, substs: &SubstsRef<'tcx>) {
+        let (bounds, spans) = self.instantiate_bounds(span, def_id, &substs);
+
+        for (i, mut obligation) in traits::predicates_for_generics(
+            traits::ObligationCause::new(span, self.body_id, traits::ItemObligation(def_id)),
+            self.param_env,
+            bounds,
+        )
+        .enumerate()
+        {
+            // This makes the error point at the bound, but we want to point at the argument
+            if let Some(span) = spans.get(i) {
+                obligation.cause.make_mut().code = traits::BindingObligation(def_id, *span);
+            }
+            self.register_predicate(obligation);
+        }
+    }
+
+    /// Resolves `typ` by a single level if `typ` is a type variable.
+    /// If no resolution is possible, then an error is reported.
+    /// Numeric inference variables may be left unresolved.
+    pub fn structurally_resolved_type(&self, sp: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
+        let ty = self.resolve_vars_with_obligations(ty);
+        if !ty.is_ty_var() {
+            ty
+        } else {
+            if !self.is_tainted_by_errors() {
+                self.emit_inference_failure_err((**self).body_id, sp, ty.into(), E0282)
+                    .note("type must be known at this point")
+                    .emit();
+            }
+            let err = self.tcx.ty_error();
+            self.demand_suptype(sp, err, ty);
+            err
+        }
+    }
+
+    pub(in super::super) fn with_breakable_ctxt<F: FnOnce() -> R, R>(
+        &self,
+        id: hir::HirId,
+        ctxt: BreakableCtxt<'tcx>,
+        f: F,
+    ) -> (BreakableCtxt<'tcx>, R) {
+        let index;
+        {
+            let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+            index = enclosing_breakables.stack.len();
+            enclosing_breakables.by_id.insert(id, index);
+            enclosing_breakables.stack.push(ctxt);
+        }
+        let result = f();
+        let ctxt = {
+            let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+            debug_assert!(enclosing_breakables.stack.len() == index + 1);
+            enclosing_breakables.by_id.remove(&id).expect("missing breakable context");
+            enclosing_breakables.stack.pop().expect("missing breakable context")
+        };
+        (ctxt, result)
+    }
+
+    /// Instantiate a QueryResponse in a probe context, without a
+    /// good ObligationCause.
+    pub(in super::super) fn probe_instantiate_query_response(
+        &self,
+        span: Span,
+        original_values: &OriginalQueryValues<'tcx>,
+        query_result: &Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>,
+    ) -> InferResult<'tcx, Ty<'tcx>> {
+        self.instantiate_query_response_and_region_obligations(
+            &traits::ObligationCause::misc(span, self.body_id),
+            self.param_env,
+            original_values,
+            query_result,
+        )
+    }
+
+    /// Returns `true` if an expression is contained inside the LHS of an assignment expression.
+    pub(in super::super) fn expr_in_place(&self, mut expr_id: hir::HirId) -> bool {
+        let mut contained_in_place = false;
+
+        while let hir::Node::Expr(parent_expr) =
+            self.tcx.hir().get(self.tcx.hir().get_parent_node(expr_id))
+        {
+            match &parent_expr.kind {
+                hir::ExprKind::Assign(lhs, ..) | hir::ExprKind::AssignOp(_, lhs, ..) => {
+                    if lhs.hir_id == expr_id {
+                        contained_in_place = true;
+                        break;
+                    }
+                }
+                _ => (),
+            }
+            expr_id = parent_expr.hir_id;
+        }
+
+        contained_in_place
+    }
+}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs
new file mode 100644
index 0000000..a820661
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs
@@ -0,0 +1,1005 @@
+use crate::astconv::AstConv;
+use crate::check::coercion::CoerceMany;
+use crate::check::method::MethodCallee;
+use crate::check::Expectation::*;
+use crate::check::TupleArgumentsFlag::*;
+use crate::check::{
+    potentially_plural_count, struct_span_err, BreakableCtxt, Diverges, Expectation, FnCtxt,
+    LocalTy, Needs, TupleArgumentsFlag,
+};
+
+use rustc_ast as ast;
+use rustc_errors::{Applicability, DiagnosticBuilder, DiagnosticId};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::{ExprKind, Node, QPath};
+use rustc_middle::ty::adjustment::AllowTwoPhase;
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::{self, Ty};
+use rustc_session::Session;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::{self, MultiSpan, Span};
+use rustc_trait_selection::traits::{self, ObligationCauseCode, StatementAsExpression};
+
+use std::mem::replace;
+use std::slice;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+    pub(in super::super) fn check_casts(&self) {
+        let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
+        for cast in deferred_cast_checks.drain(..) {
+            cast.check(self);
+        }
+    }
+
+    pub(in super::super) fn check_method_argument_types(
+        &self,
+        sp: Span,
+        expr: &'tcx hir::Expr<'tcx>,
+        method: Result<MethodCallee<'tcx>, ()>,
+        args_no_rcvr: &'tcx [hir::Expr<'tcx>],
+        tuple_arguments: TupleArgumentsFlag,
+        expected: Expectation<'tcx>,
+    ) -> Ty<'tcx> {
+        let has_error = match method {
+            Ok(method) => method.substs.references_error() || method.sig.references_error(),
+            Err(_) => true,
+        };
+        if has_error {
+            let err_inputs = self.err_args(args_no_rcvr.len());
+
+            let err_inputs = match tuple_arguments {
+                DontTupleArguments => err_inputs,
+                TupleArguments => vec![self.tcx.intern_tup(&err_inputs[..])],
+            };
+
+            self.check_argument_types(
+                sp,
+                expr,
+                &err_inputs[..],
+                &[],
+                args_no_rcvr,
+                false,
+                tuple_arguments,
+                None,
+            );
+            return self.tcx.ty_error();
+        }
+
+        let method = method.unwrap();
+        // HACK(eddyb) ignore self in the definition (see above).
+        let expected_arg_tys = self.expected_inputs_for_expected_output(
+            sp,
+            expected,
+            method.sig.output(),
+            &method.sig.inputs()[1..],
+        );
+        self.check_argument_types(
+            sp,
+            expr,
+            &method.sig.inputs()[1..],
+            &expected_arg_tys[..],
+            args_no_rcvr,
+            method.sig.c_variadic,
+            tuple_arguments,
+            Some(method.def_id),
+        );
+        method.sig.output()
+    }
+
+    /// Generic function that factors out common logic from function calls,
+    /// method calls and overloaded operators.
+    pub(in super::super) fn check_argument_types(
+        &self,
+        sp: Span,
+        expr: &'tcx hir::Expr<'tcx>,
+        fn_inputs: &[Ty<'tcx>],
+        expected_arg_tys: &[Ty<'tcx>],
+        args: &'tcx [hir::Expr<'tcx>],
+        c_variadic: bool,
+        tuple_arguments: TupleArgumentsFlag,
+        def_id: Option<DefId>,
+    ) {
+        let tcx = self.tcx;
+        // Grab the argument types, supplying fresh type variables
+        // if the wrong number of arguments were supplied
+        let supplied_arg_count = if tuple_arguments == DontTupleArguments { args.len() } else { 1 };
+
+        // All the input types from the fn signature must outlive the call
+        // so as to validate implied bounds.
+        for (&fn_input_ty, arg_expr) in fn_inputs.iter().zip(args.iter()) {
+            self.register_wf_obligation(fn_input_ty.into(), arg_expr.span, traits::MiscObligation);
+        }
+
+        let expected_arg_count = fn_inputs.len();
+
+        let param_count_error = |expected_count: usize,
+                                 arg_count: usize,
+                                 error_code: &str,
+                                 c_variadic: bool,
+                                 sugg_unit: bool| {
+            let (span, start_span, args) = match &expr.kind {
+                hir::ExprKind::Call(hir::Expr { span, .. }, args) => (*span, *span, &args[..]),
+                hir::ExprKind::MethodCall(path_segment, span, args, _) => (
+                    *span,
+                    // `sp` doesn't point at the whole `foo.bar()`, only at `bar`.
+                    path_segment
+                        .args
+                        .and_then(|args| args.args.iter().last())
+                        // Account for `foo.bar::<T>()`.
+                        .map(|arg| {
+                            // Skip the closing `>`.
+                            tcx.sess
+                                .source_map()
+                                .next_point(tcx.sess.source_map().next_point(arg.span()))
+                        })
+                        .unwrap_or(*span),
+                    &args[1..], // Skip the receiver.
+                ),
+                k => span_bug!(sp, "checking argument types on a non-call: `{:?}`", k),
+            };
+            let arg_spans = if args.is_empty() {
+                // foo()
+                // ^^^-- supplied 0 arguments
+                // |
+                // expected 2 arguments
+                vec![tcx.sess.source_map().next_point(start_span).with_hi(sp.hi())]
+            } else {
+                // foo(1, 2, 3)
+                // ^^^ -  -  - supplied 3 arguments
+                // |
+                // expected 2 arguments
+                args.iter().map(|arg| arg.span).collect::<Vec<Span>>()
+            };
+
+            let mut err = tcx.sess.struct_span_err_with_code(
+                span,
+                &format!(
+                    "this function takes {}{} but {} {} supplied",
+                    if c_variadic { "at least " } else { "" },
+                    potentially_plural_count(expected_count, "argument"),
+                    potentially_plural_count(arg_count, "argument"),
+                    if arg_count == 1 { "was" } else { "were" }
+                ),
+                DiagnosticId::Error(error_code.to_owned()),
+            );
+            let label = format!("supplied {}", potentially_plural_count(arg_count, "argument"));
+            for (i, span) in arg_spans.into_iter().enumerate() {
+                err.span_label(
+                    span,
+                    if arg_count == 0 || i + 1 == arg_count { &label } else { "" },
+                );
+            }
+
+            if let Some(def_id) = def_id {
+                if let Some(node) = tcx.hir().get_if_local(def_id) {
+                    let mut spans: MultiSpan = node
+                        .ident()
+                        .map(|ident| ident.span)
+                        .unwrap_or_else(|| tcx.hir().span(node.hir_id().unwrap()))
+                        .into();
+
+                    if let Some(id) = node.body_id() {
+                        let body = tcx.hir().body(id);
+                        for param in body.params {
+                            spans.push_span_label(param.span, String::new());
+                        }
+                    }
+
+                    let def_kind = tcx.def_kind(def_id);
+                    err.span_note(spans, &format!("{} defined here", def_kind.descr(def_id)));
+                }
+            }
+
+            if sugg_unit {
+                let sugg_span = tcx.sess.source_map().end_point(expr.span);
+                // remove closing `)` from the span
+                let sugg_span = sugg_span.shrink_to_lo();
+                err.span_suggestion(
+                    sugg_span,
+                    "expected the unit value `()`; create it with empty parentheses",
+                    String::from("()"),
+                    Applicability::MachineApplicable,
+                );
+            } else {
+                err.span_label(
+                    span,
+                    format!(
+                        "expected {}{}",
+                        if c_variadic { "at least " } else { "" },
+                        potentially_plural_count(expected_count, "argument")
+                    ),
+                );
+            }
+            err.emit();
+        };
+
+        let mut expected_arg_tys = expected_arg_tys.to_vec();
+
+        let formal_tys = if tuple_arguments == TupleArguments {
+            let tuple_type = self.structurally_resolved_type(sp, fn_inputs[0]);
+            match tuple_type.kind() {
+                ty::Tuple(arg_types) if arg_types.len() != args.len() => {
+                    param_count_error(arg_types.len(), args.len(), "E0057", false, false);
+                    expected_arg_tys = vec![];
+                    self.err_args(args.len())
+                }
+                ty::Tuple(arg_types) => {
+                    expected_arg_tys = match expected_arg_tys.get(0) {
+                        Some(&ty) => match ty.kind() {
+                            ty::Tuple(ref tys) => tys.iter().map(|k| k.expect_ty()).collect(),
+                            _ => vec![],
+                        },
+                        None => vec![],
+                    };
+                    arg_types.iter().map(|k| k.expect_ty()).collect()
+                }
+                _ => {
+                    struct_span_err!(
+                        tcx.sess,
+                        sp,
+                        E0059,
+                        "cannot use call notation; the first type parameter \
+                         for the function trait is neither a tuple nor unit"
+                    )
+                    .emit();
+                    expected_arg_tys = vec![];
+                    self.err_args(args.len())
+                }
+            }
+        } else if expected_arg_count == supplied_arg_count {
+            fn_inputs.to_vec()
+        } else if c_variadic {
+            if supplied_arg_count >= expected_arg_count {
+                fn_inputs.to_vec()
+            } else {
+                param_count_error(expected_arg_count, supplied_arg_count, "E0060", true, false);
+                expected_arg_tys = vec![];
+                self.err_args(supplied_arg_count)
+            }
+        } else {
+            // is the missing argument of type `()`?
+            let sugg_unit = if expected_arg_tys.len() == 1 && supplied_arg_count == 0 {
+                self.resolve_vars_if_possible(&expected_arg_tys[0]).is_unit()
+            } else if fn_inputs.len() == 1 && supplied_arg_count == 0 {
+                self.resolve_vars_if_possible(&fn_inputs[0]).is_unit()
+            } else {
+                false
+            };
+            param_count_error(expected_arg_count, supplied_arg_count, "E0061", false, sugg_unit);
+
+            expected_arg_tys = vec![];
+            self.err_args(supplied_arg_count)
+        };
+
+        debug!(
+            "check_argument_types: formal_tys={:?}",
+            formal_tys.iter().map(|t| self.ty_to_string(*t)).collect::<Vec<String>>()
+        );
+
+        // If there is no expectation, expect formal_tys.
+        let expected_arg_tys =
+            if !expected_arg_tys.is_empty() { expected_arg_tys } else { formal_tys.clone() };
+
+        let mut final_arg_types: Vec<(usize, Ty<'_>, Ty<'_>)> = vec![];
+
+        // Check the arguments.
+        // We do this in a pretty awful way: first we type-check any arguments
+        // that are not closures, then we type-check the closures. This is so
+        // that we have more information about the types of arguments when we
+        // type-check the functions. This isn't really the right way to do this.
+        for &check_closures in &[false, true] {
+            debug!("check_closures={}", check_closures);
+
+            // More awful hacks: before we check argument types, try to do
+            // an "opportunistic" trait resolution of any trait bounds on
+            // the call. This helps coercions.
+            if check_closures {
+                self.select_obligations_where_possible(false, |errors| {
+                    self.point_at_type_arg_instead_of_call_if_possible(errors, expr);
+                    self.point_at_arg_instead_of_call_if_possible(
+                        errors,
+                        &final_arg_types[..],
+                        sp,
+                        &args,
+                    );
+                })
+            }
+
+            // For C-variadic functions, we don't have a declared type for all of
+            // the arguments hence we only do our usual type checking with
+            // the arguments who's types we do know.
+            let t = if c_variadic {
+                expected_arg_count
+            } else if tuple_arguments == TupleArguments {
+                args.len()
+            } else {
+                supplied_arg_count
+            };
+            for (i, arg) in args.iter().take(t).enumerate() {
+                // Warn only for the first loop (the "no closures" one).
+                // Closure arguments themselves can't be diverging, but
+                // a previous argument can, e.g., `foo(panic!(), || {})`.
+                if !check_closures {
+                    self.warn_if_unreachable(arg.hir_id, arg.span, "expression");
+                }
+
+                let is_closure = match arg.kind {
+                    ExprKind::Closure(..) => true,
+                    _ => false,
+                };
+
+                if is_closure != check_closures {
+                    continue;
+                }
+
+                debug!("checking the argument");
+                let formal_ty = formal_tys[i];
+
+                // The special-cased logic below has three functions:
+                // 1. Provide as good of an expected type as possible.
+                let expected = Expectation::rvalue_hint(self, expected_arg_tys[i]);
+
+                let checked_ty = self.check_expr_with_expectation(&arg, expected);
+
+                // 2. Coerce to the most detailed type that could be coerced
+                //    to, which is `expected_ty` if `rvalue_hint` returns an
+                //    `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise.
+                let coerce_ty = expected.only_has_type(self).unwrap_or(formal_ty);
+                // We're processing function arguments so we definitely want to use
+                // two-phase borrows.
+                self.demand_coerce(&arg, checked_ty, coerce_ty, None, AllowTwoPhase::Yes);
+                final_arg_types.push((i, checked_ty, coerce_ty));
+
+                // 3. Relate the expected type and the formal one,
+                //    if the expected type was used for the coercion.
+                self.demand_suptype(arg.span, formal_ty, coerce_ty);
+            }
+        }
+
+        // We also need to make sure we at least write the ty of the other
+        // arguments which we skipped above.
+        if c_variadic {
+            fn variadic_error<'tcx>(s: &Session, span: Span, t: Ty<'tcx>, cast_ty: &str) {
+                use crate::structured_errors::{StructuredDiagnostic, VariadicError};
+                VariadicError::new(s, span, t, cast_ty).diagnostic().emit();
+            }
+
+            for arg in args.iter().skip(expected_arg_count) {
+                let arg_ty = self.check_expr(&arg);
+
+                // There are a few types which get autopromoted when passed via varargs
+                // in C but we just error out instead and require explicit casts.
+                let arg_ty = self.structurally_resolved_type(arg.span, arg_ty);
+                match arg_ty.kind() {
+                    ty::Float(ast::FloatTy::F32) => {
+                        variadic_error(tcx.sess, arg.span, arg_ty, "c_double");
+                    }
+                    ty::Int(ast::IntTy::I8 | ast::IntTy::I16) | ty::Bool => {
+                        variadic_error(tcx.sess, arg.span, arg_ty, "c_int");
+                    }
+                    ty::Uint(ast::UintTy::U8 | ast::UintTy::U16) => {
+                        variadic_error(tcx.sess, arg.span, arg_ty, "c_uint");
+                    }
+                    ty::FnDef(..) => {
+                        let ptr_ty = self.tcx.mk_fn_ptr(arg_ty.fn_sig(self.tcx));
+                        let ptr_ty = self.resolve_vars_if_possible(&ptr_ty);
+                        variadic_error(tcx.sess, arg.span, arg_ty, &ptr_ty.to_string());
+                    }
+                    _ => {}
+                }
+            }
+        }
+    }
+
+    // AST fragment checking
+    pub(in super::super) fn check_lit(
+        &self,
+        lit: &hir::Lit,
+        expected: Expectation<'tcx>,
+    ) -> Ty<'tcx> {
+        let tcx = self.tcx;
+
+        match lit.node {
+            ast::LitKind::Str(..) => tcx.mk_static_str(),
+            ast::LitKind::ByteStr(ref v) => {
+                tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.u8, v.len() as u64))
+            }
+            ast::LitKind::Byte(_) => tcx.types.u8,
+            ast::LitKind::Char(_) => tcx.types.char,
+            ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => tcx.mk_mach_int(t),
+            ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => tcx.mk_mach_uint(t),
+            ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => {
+                let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() {
+                    ty::Int(_) | ty::Uint(_) => Some(ty),
+                    ty::Char => Some(tcx.types.u8),
+                    ty::RawPtr(..) => Some(tcx.types.usize),
+                    ty::FnDef(..) | ty::FnPtr(_) => Some(tcx.types.usize),
+                    _ => None,
+                });
+                opt_ty.unwrap_or_else(|| self.next_int_var())
+            }
+            ast::LitKind::Float(_, ast::LitFloatType::Suffixed(t)) => tcx.mk_mach_float(t),
+            ast::LitKind::Float(_, ast::LitFloatType::Unsuffixed) => {
+                let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() {
+                    ty::Float(_) => Some(ty),
+                    _ => None,
+                });
+                opt_ty.unwrap_or_else(|| self.next_float_var())
+            }
+            ast::LitKind::Bool(_) => tcx.types.bool,
+            ast::LitKind::Err(_) => tcx.ty_error(),
+        }
+    }
+
+    pub fn check_struct_path(
+        &self,
+        qpath: &QPath<'_>,
+        hir_id: hir::HirId,
+    ) -> Option<(&'tcx ty::VariantDef, Ty<'tcx>)> {
+        let path_span = qpath.qself_span();
+        let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, hir_id);
+        let variant = match def {
+            Res::Err => {
+                self.set_tainted_by_errors();
+                return None;
+            }
+            Res::Def(DefKind::Variant, _) => match ty.kind() {
+                ty::Adt(adt, substs) => Some((adt.variant_of_res(def), adt.did, substs)),
+                _ => bug!("unexpected type: {:?}", ty),
+            },
+            Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
+            | Res::SelfTy(..) => match ty.kind() {
+                ty::Adt(adt, substs) if !adt.is_enum() => {
+                    Some((adt.non_enum_variant(), adt.did, substs))
+                }
+                _ => None,
+            },
+            _ => bug!("unexpected definition: {:?}", def),
+        };
+
+        if let Some((variant, did, substs)) = variant {
+            debug!("check_struct_path: did={:?} substs={:?}", did, substs);
+            self.write_user_type_annotation_from_substs(hir_id, did, substs, None);
+
+            // Check bounds on type arguments used in the path.
+            let (bounds, _) = self.instantiate_bounds(path_span, did, substs);
+            let cause =
+                traits::ObligationCause::new(path_span, self.body_id, traits::ItemObligation(did));
+            self.add_obligations_for_parameters(cause, bounds);
+
+            Some((variant, ty))
+        } else {
+            struct_span_err!(
+                self.tcx.sess,
+                path_span,
+                E0071,
+                "expected struct, variant or union type, found {}",
+                ty.sort_string(self.tcx)
+            )
+            .span_label(path_span, "not a struct")
+            .emit();
+            None
+        }
+    }
+
+    pub fn check_decl_initializer(
+        &self,
+        local: &'tcx hir::Local<'tcx>,
+        init: &'tcx hir::Expr<'tcx>,
+    ) -> Ty<'tcx> {
+        // FIXME(tschottdorf): `contains_explicit_ref_binding()` must be removed
+        // for #42640 (default match binding modes).
+        //
+        // See #44848.
+        let ref_bindings = local.pat.contains_explicit_ref_binding();
+
+        let local_ty = self.local_ty(init.span, local.hir_id).revealed_ty;
+        if let Some(m) = ref_bindings {
+            // Somewhat subtle: if we have a `ref` binding in the pattern,
+            // we want to avoid introducing coercions for the RHS. This is
+            // both because it helps preserve sanity and, in the case of
+            // ref mut, for soundness (issue #23116). In particular, in
+            // the latter case, we need to be clear that the type of the
+            // referent for the reference that results is *equal to* the
+            // type of the place it is referencing, and not some
+            // supertype thereof.
+            let init_ty = self.check_expr_with_needs(init, Needs::maybe_mut_place(m));
+            self.demand_eqtype(init.span, local_ty, init_ty);
+            init_ty
+        } else {
+            self.check_expr_coercable_to_type(init, local_ty, None)
+        }
+    }
+
+    /// Type check a `let` statement.
+    pub fn check_decl_local(&self, local: &'tcx hir::Local<'tcx>) {
+        // Determine and write the type which we'll check the pattern against.
+        let ty = self.local_ty(local.span, local.hir_id).decl_ty;
+        self.write_ty(local.hir_id, ty);
+
+        // Type check the initializer.
+        if let Some(ref init) = local.init {
+            let init_ty = self.check_decl_initializer(local, &init);
+            self.overwrite_local_ty_if_err(local, ty, init_ty);
+        }
+
+        // Does the expected pattern type originate from an expression and what is the span?
+        let (origin_expr, ty_span) = match (local.ty, local.init) {
+            (Some(ty), _) => (false, Some(ty.span)), // Bias towards the explicit user type.
+            (_, Some(init)) => (true, Some(init.span)), // No explicit type; so use the scrutinee.
+            _ => (false, None), // We have `let $pat;`, so the expected type is unconstrained.
+        };
+
+        // Type check the pattern. Override if necessary to avoid knock-on errors.
+        self.check_pat_top(&local.pat, ty, ty_span, origin_expr);
+        let pat_ty = self.node_ty(local.pat.hir_id);
+        self.overwrite_local_ty_if_err(local, ty, pat_ty);
+    }
+
+    pub fn check_stmt(&self, stmt: &'tcx hir::Stmt<'tcx>) {
+        // Don't do all the complex logic below for `DeclItem`.
+        match stmt.kind {
+            hir::StmtKind::Item(..) => return,
+            hir::StmtKind::Local(..) | hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {}
+        }
+
+        self.warn_if_unreachable(stmt.hir_id, stmt.span, "statement");
+
+        // Hide the outer diverging and `has_errors` flags.
+        let old_diverges = self.diverges.replace(Diverges::Maybe);
+        let old_has_errors = self.has_errors.replace(false);
+
+        match stmt.kind {
+            hir::StmtKind::Local(ref l) => {
+                self.check_decl_local(&l);
+            }
+            // Ignore for now.
+            hir::StmtKind::Item(_) => {}
+            hir::StmtKind::Expr(ref expr) => {
+                // Check with expected type of `()`.
+                self.check_expr_has_type_or_error(&expr, self.tcx.mk_unit(), |err| {
+                    self.suggest_semicolon_at_end(expr.span, err);
+                });
+            }
+            hir::StmtKind::Semi(ref expr) => {
+                self.check_expr(&expr);
+            }
+        }
+
+        // Combine the diverging and `has_error` flags.
+        self.diverges.set(self.diverges.get() | old_diverges);
+        self.has_errors.set(self.has_errors.get() | old_has_errors);
+    }
+
+    pub fn check_block_no_value(&self, blk: &'tcx hir::Block<'tcx>) {
+        let unit = self.tcx.mk_unit();
+        let ty = self.check_block_with_expected(blk, ExpectHasType(unit));
+
+        // if the block produces a `!` value, that can always be
+        // (effectively) coerced to unit.
+        if !ty.is_never() {
+            self.demand_suptype(blk.span, unit, ty);
+        }
+    }
+
+    pub(in super::super) fn check_block_with_expected(
+        &self,
+        blk: &'tcx hir::Block<'tcx>,
+        expected: Expectation<'tcx>,
+    ) -> Ty<'tcx> {
+        let prev = {
+            let mut fcx_ps = self.ps.borrow_mut();
+            let unsafety_state = fcx_ps.recurse(blk);
+            replace(&mut *fcx_ps, unsafety_state)
+        };
+
+        // In some cases, blocks have just one exit, but other blocks
+        // can be targeted by multiple breaks. This can happen both
+        // with labeled blocks as well as when we desugar
+        // a `try { ... }` expression.
+        //
+        // Example 1:
+        //
+        //    'a: { if true { break 'a Err(()); } Ok(()) }
+        //
+        // Here we would wind up with two coercions, one from
+        // `Err(())` and the other from the tail expression
+        // `Ok(())`. If the tail expression is omitted, that's a
+        // "forced unit" -- unless the block diverges, in which
+        // case we can ignore the tail expression (e.g., `'a: {
+        // break 'a 22; }` would not force the type of the block
+        // to be `()`).
+        let tail_expr = blk.expr.as_ref();
+        let coerce_to_ty = expected.coercion_target_type(self, blk.span);
+        let coerce = if blk.targeted_by_break {
+            CoerceMany::new(coerce_to_ty)
+        } else {
+            let tail_expr: &[&hir::Expr<'_>] = match tail_expr {
+                Some(e) => slice::from_ref(e),
+                None => &[],
+            };
+            CoerceMany::with_coercion_sites(coerce_to_ty, tail_expr)
+        };
+
+        let prev_diverges = self.diverges.get();
+        let ctxt = BreakableCtxt { coerce: Some(coerce), may_break: false };
+
+        let (ctxt, ()) = self.with_breakable_ctxt(blk.hir_id, ctxt, || {
+            for s in blk.stmts {
+                self.check_stmt(s);
+            }
+
+            // check the tail expression **without** holding the
+            // `enclosing_breakables` lock below.
+            let tail_expr_ty = tail_expr.map(|t| self.check_expr_with_expectation(t, expected));
+
+            let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+            let ctxt = enclosing_breakables.find_breakable(blk.hir_id);
+            let coerce = ctxt.coerce.as_mut().unwrap();
+            if let Some(tail_expr_ty) = tail_expr_ty {
+                let tail_expr = tail_expr.unwrap();
+                let span = self.get_expr_coercion_span(tail_expr);
+                let cause = self.cause(span, ObligationCauseCode::BlockTailExpression(blk.hir_id));
+                coerce.coerce(self, &cause, tail_expr, tail_expr_ty);
+            } else {
+                // Subtle: if there is no explicit tail expression,
+                // that is typically equivalent to a tail expression
+                // of `()` -- except if the block diverges. In that
+                // case, there is no value supplied from the tail
+                // expression (assuming there are no other breaks,
+                // this implies that the type of the block will be
+                // `!`).
+                //
+                // #41425 -- label the implicit `()` as being the
+                // "found type" here, rather than the "expected type".
+                if !self.diverges.get().is_always() {
+                    // #50009 -- Do not point at the entire fn block span, point at the return type
+                    // span, as it is the cause of the requirement, and
+                    // `consider_hint_about_removing_semicolon` will point at the last expression
+                    // if it were a relevant part of the error. This improves usability in editors
+                    // that highlight errors inline.
+                    let mut sp = blk.span;
+                    let mut fn_span = None;
+                    if let Some((decl, ident)) = self.get_parent_fn_decl(blk.hir_id) {
+                        let ret_sp = decl.output.span();
+                        if let Some(block_sp) = self.parent_item_span(blk.hir_id) {
+                            // HACK: on some cases (`ui/liveness/liveness-issue-2163.rs`) the
+                            // output would otherwise be incorrect and even misleading. Make sure
+                            // the span we're aiming at correspond to a `fn` body.
+                            if block_sp == blk.span {
+                                sp = ret_sp;
+                                fn_span = Some(ident.span);
+                            }
+                        }
+                    }
+                    coerce.coerce_forced_unit(
+                        self,
+                        &self.misc(sp),
+                        &mut |err| {
+                            if let Some(expected_ty) = expected.only_has_type(self) {
+                                self.consider_hint_about_removing_semicolon(blk, expected_ty, err);
+                            }
+                            if let Some(fn_span) = fn_span {
+                                err.span_label(
+                                    fn_span,
+                                    "implicitly returns `()` as its body has no tail or `return` \
+                                     expression",
+                                );
+                            }
+                        },
+                        false,
+                    );
+                }
+            }
+        });
+
+        if ctxt.may_break {
+            // If we can break from the block, then the block's exit is always reachable
+            // (... as long as the entry is reachable) - regardless of the tail of the block.
+            self.diverges.set(prev_diverges);
+        }
+
+        let mut ty = ctxt.coerce.unwrap().complete(self);
+
+        if self.has_errors.get() || ty.references_error() {
+            ty = self.tcx.ty_error()
+        }
+
+        self.write_ty(blk.hir_id, ty);
+
+        *self.ps.borrow_mut() = prev;
+        ty
+    }
+
+    pub(in super::super) fn check_rustc_args_require_const(
+        &self,
+        def_id: DefId,
+        hir_id: hir::HirId,
+        span: Span,
+    ) {
+        // We're only interested in functions tagged with
+        // #[rustc_args_required_const], so ignore anything that's not.
+        if !self.tcx.has_attr(def_id, sym::rustc_args_required_const) {
+            return;
+        }
+
+        // If our calling expression is indeed the function itself, we're good!
+        // If not, generate an error that this can only be called directly.
+        if let Node::Expr(expr) = self.tcx.hir().get(self.tcx.hir().get_parent_node(hir_id)) {
+            if let ExprKind::Call(ref callee, ..) = expr.kind {
+                if callee.hir_id == hir_id {
+                    return;
+                }
+            }
+        }
+
+        self.tcx.sess.span_err(
+            span,
+            "this function can only be invoked directly, not through a function pointer",
+        );
+    }
+
+    /// A common error is to add an extra semicolon:
+    ///
+    /// ```
+    /// fn foo() -> usize {
+    ///     22;
+    /// }
+    /// ```
+    ///
+    /// This routine checks if the final statement in a block is an
+    /// expression with an explicit semicolon whose type is compatible
+    /// with `expected_ty`. If so, it suggests removing the semicolon.
+    fn consider_hint_about_removing_semicolon(
+        &self,
+        blk: &'tcx hir::Block<'tcx>,
+        expected_ty: Ty<'tcx>,
+        err: &mut DiagnosticBuilder<'_>,
+    ) {
+        if let Some((span_semi, boxed)) = self.could_remove_semicolon(blk, expected_ty) {
+            if let StatementAsExpression::NeedsBoxing = boxed {
+                err.span_suggestion_verbose(
+                    span_semi,
+                    "consider removing this semicolon and boxing the expression",
+                    String::new(),
+                    Applicability::HasPlaceholders,
+                );
+            } else {
+                err.span_suggestion_short(
+                    span_semi,
+                    "consider removing this semicolon",
+                    String::new(),
+                    Applicability::MachineApplicable,
+                );
+            }
+        }
+    }
+
+    fn parent_item_span(&self, id: hir::HirId) -> Option<Span> {
+        let node = self.tcx.hir().get(self.tcx.hir().get_parent_item(id));
+        match node {
+            Node::Item(&hir::Item { kind: hir::ItemKind::Fn(_, _, body_id), .. })
+            | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(_, body_id), .. }) => {
+                let body = self.tcx.hir().body(body_id);
+                if let ExprKind::Block(block, _) = &body.value.kind {
+                    return Some(block.span);
+                }
+            }
+            _ => {}
+        }
+        None
+    }
+
+    /// Given a function block's `HirId`, returns its `FnDecl` if it exists, or `None` otherwise.
+    fn get_parent_fn_decl(&self, blk_id: hir::HirId) -> Option<(&'tcx hir::FnDecl<'tcx>, Ident)> {
+        let parent = self.tcx.hir().get(self.tcx.hir().get_parent_item(blk_id));
+        self.get_node_fn_decl(parent).map(|(fn_decl, ident, _)| (fn_decl, ident))
+    }
+
+    /// If `expr` is a `match` expression that has only one non-`!` arm, use that arm's tail
+    /// expression's `Span`, otherwise return `expr.span`. This is done to give better errors
+    /// when given code like the following:
+    /// ```text
+    /// if false { return 0i32; } else { 1u32 }
+    /// //                               ^^^^ point at this instead of the whole `if` expression
+    /// ```
+    fn get_expr_coercion_span(&self, expr: &hir::Expr<'_>) -> rustc_span::Span {
+        if let hir::ExprKind::Match(_, arms, _) = &expr.kind {
+            let arm_spans: Vec<Span> = arms
+                .iter()
+                .filter_map(|arm| {
+                    self.in_progress_typeck_results
+                        .and_then(|typeck_results| {
+                            typeck_results.borrow().node_type_opt(arm.body.hir_id)
+                        })
+                        .and_then(|arm_ty| {
+                            if arm_ty.is_never() {
+                                None
+                            } else {
+                                Some(match &arm.body.kind {
+                                    // Point at the tail expression when possible.
+                                    hir::ExprKind::Block(block, _) => {
+                                        block.expr.as_ref().map(|e| e.span).unwrap_or(block.span)
+                                    }
+                                    _ => arm.body.span,
+                                })
+                            }
+                        })
+                })
+                .collect();
+            if arm_spans.len() == 1 {
+                return arm_spans[0];
+            }
+        }
+        expr.span
+    }
+
+    fn overwrite_local_ty_if_err(
+        &self,
+        local: &'tcx hir::Local<'tcx>,
+        decl_ty: Ty<'tcx>,
+        ty: Ty<'tcx>,
+    ) {
+        if ty.references_error() {
+            // Override the types everywhere with `err()` to avoid knock on errors.
+            self.write_ty(local.hir_id, ty);
+            self.write_ty(local.pat.hir_id, ty);
+            let local_ty = LocalTy { decl_ty, revealed_ty: ty };
+            self.locals.borrow_mut().insert(local.hir_id, local_ty);
+            self.locals.borrow_mut().insert(local.pat.hir_id, local_ty);
+        }
+    }
+
+    // Finish resolving a path in a struct expression or pattern `S::A { .. }` if necessary.
+    // The newly resolved definition is written into `type_dependent_defs`.
+    fn finish_resolving_struct_path(
+        &self,
+        qpath: &QPath<'_>,
+        path_span: Span,
+        hir_id: hir::HirId,
+    ) -> (Res, Ty<'tcx>) {
+        match *qpath {
+            QPath::Resolved(ref maybe_qself, ref path) => {
+                let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself));
+                let ty = AstConv::res_to_ty(self, self_ty, path, true);
+                (path.res, ty)
+            }
+            QPath::TypeRelative(ref qself, ref segment) => {
+                let ty = self.to_ty(qself);
+
+                let res = if let hir::TyKind::Path(QPath::Resolved(_, ref path)) = qself.kind {
+                    path.res
+                } else {
+                    Res::Err
+                };
+                let result =
+                    AstConv::associated_path_to_ty(self, hir_id, path_span, ty, res, segment, true);
+                let ty = result.map(|(ty, _, _)| ty).unwrap_or_else(|_| self.tcx().ty_error());
+                let result = result.map(|(_, kind, def_id)| (kind, def_id));
+
+                // Write back the new resolution.
+                self.write_resolution(hir_id, result);
+
+                (result.map(|(kind, def_id)| Res::Def(kind, def_id)).unwrap_or(Res::Err), ty)
+            }
+            QPath::LangItem(lang_item, span) => {
+                self.resolve_lang_item_path(lang_item, span, hir_id)
+            }
+        }
+    }
+
+    /// Given a vec of evaluated `FulfillmentError`s and an `fn` call argument expressions, we walk
+    /// the checked and coerced types for each argument to see if any of the `FulfillmentError`s
+    /// reference a type argument. The reason to walk also the checked type is that the coerced type
+    /// can be not easily comparable with predicate type (because of coercion). If the types match
+    /// for either checked or coerced type, and there's only *one* argument that does, we point at
+    /// the corresponding argument's expression span instead of the `fn` call path span.
+    fn point_at_arg_instead_of_call_if_possible(
+        &self,
+        errors: &mut Vec<traits::FulfillmentError<'tcx>>,
+        final_arg_types: &[(usize, Ty<'tcx>, Ty<'tcx>)],
+        call_sp: Span,
+        args: &'tcx [hir::Expr<'tcx>],
+    ) {
+        // We *do not* do this for desugared call spans to keep good diagnostics when involving
+        // the `?` operator.
+        if call_sp.desugaring_kind().is_some() {
+            return;
+        }
+
+        for error in errors {
+            // Only if the cause is somewhere inside the expression we want try to point at arg.
+            // Otherwise, it means that the cause is somewhere else and we should not change
+            // anything because we can break the correct span.
+            if !call_sp.contains(error.obligation.cause.span) {
+                continue;
+            }
+
+            if let ty::PredicateAtom::Trait(predicate, _) =
+                error.obligation.predicate.skip_binders()
+            {
+                // Collect the argument position for all arguments that could have caused this
+                // `FulfillmentError`.
+                let mut referenced_in = final_arg_types
+                    .iter()
+                    .map(|&(i, checked_ty, _)| (i, checked_ty))
+                    .chain(final_arg_types.iter().map(|&(i, _, coerced_ty)| (i, coerced_ty)))
+                    .flat_map(|(i, ty)| {
+                        let ty = self.resolve_vars_if_possible(&ty);
+                        // We walk the argument type because the argument's type could have
+                        // been `Option<T>`, but the `FulfillmentError` references `T`.
+                        if ty.walk().any(|arg| arg == predicate.self_ty().into()) {
+                            Some(i)
+                        } else {
+                            None
+                        }
+                    })
+                    .collect::<Vec<usize>>();
+
+                // Both checked and coerced types could have matched, thus we need to remove
+                // duplicates.
+
+                // We sort primitive type usize here and can use unstable sort
+                referenced_in.sort_unstable();
+                referenced_in.dedup();
+
+                if let (Some(ref_in), None) = (referenced_in.pop(), referenced_in.pop()) {
+                    // We make sure that only *one* argument matches the obligation failure
+                    // and we assign the obligation's span to its expression's.
+                    error.obligation.cause.make_mut().span = args[ref_in].span;
+                    error.points_at_arg_span = true;
+                }
+            }
+        }
+    }
+
+    /// Given a vec of evaluated `FulfillmentError`s and an `fn` call expression, we walk the
+    /// `PathSegment`s and resolve their type parameters to see if any of the `FulfillmentError`s
+    /// were caused by them. If they were, we point at the corresponding type argument's span
+    /// instead of the `fn` call path span.
+    fn point_at_type_arg_instead_of_call_if_possible(
+        &self,
+        errors: &mut Vec<traits::FulfillmentError<'tcx>>,
+        call_expr: &'tcx hir::Expr<'tcx>,
+    ) {
+        if let hir::ExprKind::Call(path, _) = &call_expr.kind {
+            if let hir::ExprKind::Path(qpath) = &path.kind {
+                if let hir::QPath::Resolved(_, path) = &qpath {
+                    for error in errors {
+                        if let ty::PredicateAtom::Trait(predicate, _) =
+                            error.obligation.predicate.skip_binders()
+                        {
+                            // If any of the type arguments in this path segment caused the
+                            // `FullfillmentError`, point at its span (#61860).
+                            for arg in path
+                                .segments
+                                .iter()
+                                .filter_map(|seg| seg.args.as_ref())
+                                .flat_map(|a| a.args.iter())
+                            {
+                                if let hir::GenericArg::Type(hir_ty) = &arg {
+                                    if let hir::TyKind::Path(hir::QPath::TypeRelative(..)) =
+                                        &hir_ty.kind
+                                    {
+                                        // Avoid ICE with associated types. As this is best
+                                        // effort only, it's ok to ignore the case. It
+                                        // would trigger in `is_send::<T::AssocType>();`
+                                        // from `typeck-default-trait-impl-assoc-type.rs`.
+                                    } else {
+                                        let ty = AstConv::ast_ty_to_ty(self, hir_ty);
+                                        let ty = self.resolve_vars_if_possible(&ty);
+                                        if ty == predicate.self_ty() {
+                                            error.obligation.cause.make_mut().span = hir_ty.span;
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs b/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs
new file mode 100644
index 0000000..72c3b23
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs
@@ -0,0 +1,295 @@
+mod _impl;
+mod checks;
+mod suggestions;
+
+pub use _impl::*;
+pub use checks::*;
+pub use suggestions::*;
+
+use crate::astconv::AstConv;
+use crate::check::coercion::DynamicCoerceMany;
+use crate::check::{Diverges, EnclosingBreakables, Inherited, UnsafetyState};
+
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
+use rustc_middle::hir::map::blocks::FnLikeNode;
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::{self, Const, Ty, TyCtxt};
+use rustc_session::Session;
+use rustc_span::{self, Span};
+use rustc_trait_selection::traits::{ObligationCause, ObligationCauseCode};
+
+use std::cell::{Cell, RefCell};
+use std::ops::Deref;
+
+pub struct FnCtxt<'a, 'tcx> {
+    pub(super) body_id: hir::HirId,
+
+    /// The parameter environment used for proving trait obligations
+    /// in this function. This can change when we descend into
+    /// closures (as they bring new things into scope), hence it is
+    /// not part of `Inherited` (as of the time of this writing,
+    /// closures do not yet change the environment, but they will
+    /// eventually).
+    pub(super) param_env: ty::ParamEnv<'tcx>,
+
+    /// Number of errors that had been reported when we started
+    /// checking this function. On exit, if we find that *more* errors
+    /// have been reported, we will skip regionck and other work that
+    /// expects the types within the function to be consistent.
+    // FIXME(matthewjasper) This should not exist, and it's not correct
+    // if type checking is run in parallel.
+    err_count_on_creation: usize,
+
+    /// If `Some`, this stores coercion information for returned
+    /// expressions. If `None`, this is in a context where return is
+    /// inappropriate, such as a const expression.
+    ///
+    /// This is a `RefCell<DynamicCoerceMany>`, which means that we
+    /// can track all the return expressions and then use them to
+    /// compute a useful coercion from the set, similar to a match
+    /// expression or other branching context. You can use methods
+    /// like `expected_ty` to access the declared return type (if
+    /// any).
+    pub(super) ret_coercion: Option<RefCell<DynamicCoerceMany<'tcx>>>,
+
+    pub(super) ret_coercion_impl_trait: Option<Ty<'tcx>>,
+
+    pub(super) ret_type_span: Option<Span>,
+
+    /// Used exclusively to reduce cost of advanced evaluation used for
+    /// more helpful diagnostics.
+    pub(super) in_tail_expr: bool,
+
+    /// First span of a return site that we find. Used in error messages.
+    pub(super) ret_coercion_span: RefCell<Option<Span>>,
+
+    pub(super) resume_yield_tys: Option<(Ty<'tcx>, Ty<'tcx>)>,
+
+    pub(super) ps: RefCell<UnsafetyState>,
+
+    /// Whether the last checked node generates a divergence (e.g.,
+    /// `return` will set this to `Always`). In general, when entering
+    /// an expression or other node in the tree, the initial value
+    /// indicates whether prior parts of the containing expression may
+    /// have diverged. It is then typically set to `Maybe` (and the
+    /// old value remembered) for processing the subparts of the
+    /// current expression. As each subpart is processed, they may set
+    /// the flag to `Always`, etc. Finally, at the end, we take the
+    /// result and "union" it with the original value, so that when we
+    /// return the flag indicates if any subpart of the parent
+    /// expression (up to and including this part) has diverged. So,
+    /// if you read it after evaluating a subexpression `X`, the value
+    /// you get indicates whether any subexpression that was
+    /// evaluating up to and including `X` diverged.
+    ///
+    /// We currently use this flag only for diagnostic purposes:
+    ///
+    /// - To warn about unreachable code: if, after processing a
+    ///   sub-expression but before we have applied the effects of the
+    ///   current node, we see that the flag is set to `Always`, we
+    ///   can issue a warning. This corresponds to something like
+    ///   `foo(return)`; we warn on the `foo()` expression. (We then
+    ///   update the flag to `WarnedAlways` to suppress duplicate
+    ///   reports.) Similarly, if we traverse to a fresh statement (or
+    ///   tail expression) from a `Always` setting, we will issue a
+    ///   warning. This corresponds to something like `{return;
+    ///   foo();}` or `{return; 22}`, where we would warn on the
+    ///   `foo()` or `22`.
+    ///
+    /// An expression represents dead code if, after checking it,
+    /// the diverges flag is set to something other than `Maybe`.
+    pub(super) diverges: Cell<Diverges>,
+
+    /// Whether any child nodes have any type errors.
+    pub(super) has_errors: Cell<bool>,
+
+    pub(super) enclosing_breakables: RefCell<EnclosingBreakables<'tcx>>,
+
+    pub(super) inh: &'a Inherited<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+    pub fn new(
+        inh: &'a Inherited<'a, 'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+        body_id: hir::HirId,
+    ) -> FnCtxt<'a, 'tcx> {
+        FnCtxt {
+            body_id,
+            param_env,
+            err_count_on_creation: inh.tcx.sess.err_count(),
+            ret_coercion: None,
+            ret_coercion_impl_trait: None,
+            ret_type_span: None,
+            in_tail_expr: false,
+            ret_coercion_span: RefCell::new(None),
+            resume_yield_tys: None,
+            ps: RefCell::new(UnsafetyState::function(hir::Unsafety::Normal, hir::CRATE_HIR_ID)),
+            diverges: Cell::new(Diverges::Maybe),
+            has_errors: Cell::new(false),
+            enclosing_breakables: RefCell::new(EnclosingBreakables {
+                stack: Vec::new(),
+                by_id: Default::default(),
+            }),
+            inh,
+        }
+    }
+
+    pub fn cause(&self, span: Span, code: ObligationCauseCode<'tcx>) -> ObligationCause<'tcx> {
+        ObligationCause::new(span, self.body_id, code)
+    }
+
+    pub fn misc(&self, span: Span) -> ObligationCause<'tcx> {
+        self.cause(span, ObligationCauseCode::MiscObligation)
+    }
+
+    pub fn sess(&self) -> &Session {
+        &self.tcx.sess
+    }
+
+    pub fn errors_reported_since_creation(&self) -> bool {
+        self.tcx.sess.err_count() > self.err_count_on_creation
+    }
+}
+
+impl<'a, 'tcx> Deref for FnCtxt<'a, 'tcx> {
+    type Target = Inherited<'a, 'tcx>;
+    fn deref(&self) -> &Self::Target {
+        &self.inh
+    }
+}
+
+impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
+    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn item_def_id(&self) -> Option<DefId> {
+        None
+    }
+
+    fn default_constness_for_trait_bounds(&self) -> hir::Constness {
+        // FIXME: refactor this into a method
+        let node = self.tcx.hir().get(self.body_id);
+        if let Some(fn_like) = FnLikeNode::from_node(node) {
+            fn_like.constness()
+        } else {
+            hir::Constness::NotConst
+        }
+    }
+
+    fn get_type_parameter_bounds(&self, _: Span, def_id: DefId) -> ty::GenericPredicates<'tcx> {
+        let tcx = self.tcx;
+        let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+        let item_id = tcx.hir().ty_param_owner(hir_id);
+        let item_def_id = tcx.hir().local_def_id(item_id);
+        let generics = tcx.generics_of(item_def_id);
+        let index = generics.param_def_id_to_index[&def_id];
+        ty::GenericPredicates {
+            parent: None,
+            predicates: tcx.arena.alloc_from_iter(
+                self.param_env.caller_bounds().iter().filter_map(|predicate| {
+                    match predicate.skip_binders() {
+                        ty::PredicateAtom::Trait(data, _) if data.self_ty().is_param(index) => {
+                            // HACK(eddyb) should get the original `Span`.
+                            let span = tcx.def_span(def_id);
+                            Some((predicate, span))
+                        }
+                        _ => None,
+                    }
+                }),
+            ),
+        }
+    }
+
+    fn re_infer(&self, def: Option<&ty::GenericParamDef>, span: Span) -> Option<ty::Region<'tcx>> {
+        let v = match def {
+            Some(def) => infer::EarlyBoundRegion(span, def.name),
+            None => infer::MiscVariable(span),
+        };
+        Some(self.next_region_var(v))
+    }
+
+    fn allow_ty_infer(&self) -> bool {
+        true
+    }
+
+    fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> {
+        if let Some(param) = param {
+            if let GenericArgKind::Type(ty) = self.var_for_def(span, param).unpack() {
+                return ty;
+            }
+            unreachable!()
+        } else {
+            self.next_ty_var(TypeVariableOrigin {
+                kind: TypeVariableOriginKind::TypeInference,
+                span,
+            })
+        }
+    }
+
+    fn ct_infer(
+        &self,
+        ty: Ty<'tcx>,
+        param: Option<&ty::GenericParamDef>,
+        span: Span,
+    ) -> &'tcx Const<'tcx> {
+        if let Some(param) = param {
+            if let GenericArgKind::Const(ct) = self.var_for_def(span, param).unpack() {
+                return ct;
+            }
+            unreachable!()
+        } else {
+            self.next_const_var(
+                ty,
+                ConstVariableOrigin { kind: ConstVariableOriginKind::ConstInference, span },
+            )
+        }
+    }
+
+    fn projected_ty_from_poly_trait_ref(
+        &self,
+        span: Span,
+        item_def_id: DefId,
+        item_segment: &hir::PathSegment<'_>,
+        poly_trait_ref: ty::PolyTraitRef<'tcx>,
+    ) -> Ty<'tcx> {
+        let (trait_ref, _) = self.replace_bound_vars_with_fresh_vars(
+            span,
+            infer::LateBoundRegionConversionTime::AssocTypeProjection(item_def_id),
+            &poly_trait_ref,
+        );
+
+        let item_substs = <dyn AstConv<'tcx>>::create_substs_for_associated_item(
+            self,
+            self.tcx,
+            span,
+            item_def_id,
+            item_segment,
+            trait_ref.substs,
+        );
+
+        self.tcx().mk_projection(item_def_id, item_substs)
+    }
+
+    fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
+        if ty.has_escaping_bound_vars() {
+            ty // FIXME: normalization and escaping regions
+        } else {
+            self.normalize_associated_types_in(span, &ty)
+        }
+    }
+
+    fn set_tainted_by_errors(&self) {
+        self.infcx.set_tainted_by_errors()
+    }
+
+    fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, _span: Span) {
+        self.write_ty(hir_id, ty)
+    }
+}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs
new file mode 100644
index 0000000..a8ad9f4
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs
@@ -0,0 +1,445 @@
+use super::FnCtxt;
+use crate::astconv::AstConv;
+
+use rustc_ast::util::parser::ExprPrecedence;
+use rustc_span::{self, Span};
+
+use rustc_errors::{Applicability, DiagnosticBuilder};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{ExprKind, ItemKind, Node};
+use rustc_infer::infer;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::symbol::kw;
+
+use std::iter;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+    pub(in super::super) fn suggest_semicolon_at_end(
+        &self,
+        span: Span,
+        err: &mut DiagnosticBuilder<'_>,
+    ) {
+        err.span_suggestion_short(
+            span.shrink_to_hi(),
+            "consider using a semicolon here",
+            ";".to_string(),
+            Applicability::MachineApplicable,
+        );
+    }
+
+    /// On implicit return expressions with mismatched types, provides the following suggestions:
+    ///
+    /// - Points out the method's return type as the reason for the expected type.
+    /// - Possible missing semicolon.
+    /// - Possible missing return type if the return type is the default, and not `fn main()`.
+    pub fn suggest_mismatched_types_on_tail(
+        &self,
+        err: &mut DiagnosticBuilder<'_>,
+        expr: &'tcx hir::Expr<'tcx>,
+        expected: Ty<'tcx>,
+        found: Ty<'tcx>,
+        cause_span: Span,
+        blk_id: hir::HirId,
+    ) -> bool {
+        let expr = expr.peel_drop_temps();
+        self.suggest_missing_semicolon(err, expr, expected, cause_span);
+        let mut pointing_at_return_type = false;
+        if let Some((fn_decl, can_suggest)) = self.get_fn_decl(blk_id) {
+            pointing_at_return_type =
+                self.suggest_missing_return_type(err, &fn_decl, expected, found, can_suggest);
+        }
+        pointing_at_return_type
+    }
+
+    /// When encountering an fn-like ctor that needs to unify with a value, check whether calling
+    /// the ctor would successfully solve the type mismatch and if so, suggest it:
+    /// ```
+    /// fn foo(x: usize) -> usize { x }
+    /// let x: usize = foo;  // suggest calling the `foo` function: `foo(42)`
+    /// ```
+    fn suggest_fn_call(
+        &self,
+        err: &mut DiagnosticBuilder<'_>,
+        expr: &hir::Expr<'_>,
+        expected: Ty<'tcx>,
+        found: Ty<'tcx>,
+    ) -> bool {
+        let hir = self.tcx.hir();
+        let (def_id, sig) = match *found.kind() {
+            ty::FnDef(def_id, _) => (def_id, found.fn_sig(self.tcx)),
+            ty::Closure(def_id, substs) => (def_id, substs.as_closure().sig()),
+            _ => return false,
+        };
+
+        let sig = self.replace_bound_vars_with_fresh_vars(expr.span, infer::FnCall, &sig).0;
+        let sig = self.normalize_associated_types_in(expr.span, &sig);
+        if self.can_coerce(sig.output(), expected) {
+            let (mut sugg_call, applicability) = if sig.inputs().is_empty() {
+                (String::new(), Applicability::MachineApplicable)
+            } else {
+                ("...".to_string(), Applicability::HasPlaceholders)
+            };
+            let mut msg = "call this function";
+            match hir.get_if_local(def_id) {
+                Some(
+                    Node::Item(hir::Item { kind: ItemKind::Fn(.., body_id), .. })
+                    | Node::ImplItem(hir::ImplItem {
+                        kind: hir::ImplItemKind::Fn(_, body_id), ..
+                    })
+                    | Node::TraitItem(hir::TraitItem {
+                        kind: hir::TraitItemKind::Fn(.., hir::TraitFn::Provided(body_id)),
+                        ..
+                    }),
+                ) => {
+                    let body = hir.body(*body_id);
+                    sugg_call = body
+                        .params
+                        .iter()
+                        .map(|param| match &param.pat.kind {
+                            hir::PatKind::Binding(_, _, ident, None)
+                                if ident.name != kw::SelfLower =>
+                            {
+                                ident.to_string()
+                            }
+                            _ => "_".to_string(),
+                        })
+                        .collect::<Vec<_>>()
+                        .join(", ");
+                }
+                Some(Node::Expr(hir::Expr {
+                    kind: ExprKind::Closure(_, _, body_id, _, _),
+                    span: full_closure_span,
+                    ..
+                })) => {
+                    if *full_closure_span == expr.span {
+                        return false;
+                    }
+                    msg = "call this closure";
+                    let body = hir.body(*body_id);
+                    sugg_call = body
+                        .params
+                        .iter()
+                        .map(|param| match &param.pat.kind {
+                            hir::PatKind::Binding(_, _, ident, None)
+                                if ident.name != kw::SelfLower =>
+                            {
+                                ident.to_string()
+                            }
+                            _ => "_".to_string(),
+                        })
+                        .collect::<Vec<_>>()
+                        .join(", ");
+                }
+                Some(Node::Ctor(hir::VariantData::Tuple(fields, _))) => {
+                    sugg_call = fields.iter().map(|_| "_").collect::<Vec<_>>().join(", ");
+                    match def_id.as_local().map(|def_id| hir.def_kind(def_id)) {
+                        Some(DefKind::Ctor(hir::def::CtorOf::Variant, _)) => {
+                            msg = "instantiate this tuple variant";
+                        }
+                        Some(DefKind::Ctor(CtorOf::Struct, _)) => {
+                            msg = "instantiate this tuple struct";
+                        }
+                        _ => {}
+                    }
+                }
+                Some(Node::ForeignItem(hir::ForeignItem {
+                    kind: hir::ForeignItemKind::Fn(_, idents, _),
+                    ..
+                })) => {
+                    sugg_call = idents
+                        .iter()
+                        .map(|ident| {
+                            if ident.name != kw::SelfLower {
+                                ident.to_string()
+                            } else {
+                                "_".to_string()
+                            }
+                        })
+                        .collect::<Vec<_>>()
+                        .join(", ")
+                }
+                Some(Node::TraitItem(hir::TraitItem {
+                    kind: hir::TraitItemKind::Fn(.., hir::TraitFn::Required(idents)),
+                    ..
+                })) => {
+                    sugg_call = idents
+                        .iter()
+                        .map(|ident| {
+                            if ident.name != kw::SelfLower {
+                                ident.to_string()
+                            } else {
+                                "_".to_string()
+                            }
+                        })
+                        .collect::<Vec<_>>()
+                        .join(", ")
+                }
+                _ => {}
+            }
+            err.span_suggestion_verbose(
+                expr.span.shrink_to_hi(),
+                &format!("use parentheses to {}", msg),
+                format!("({})", sugg_call),
+                applicability,
+            );
+            return true;
+        }
+        false
+    }
+
+    pub fn suggest_deref_ref_or_into(
+        &self,
+        err: &mut DiagnosticBuilder<'_>,
+        expr: &hir::Expr<'_>,
+        expected: Ty<'tcx>,
+        found: Ty<'tcx>,
+        expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+    ) {
+        if let Some((sp, msg, suggestion, applicability)) = self.check_ref(expr, found, expected) {
+            err.span_suggestion(sp, msg, suggestion, applicability);
+        } else if let (ty::FnDef(def_id, ..), true) =
+            (&found.kind(), self.suggest_fn_call(err, expr, expected, found))
+        {
+            if let Some(sp) = self.tcx.hir().span_if_local(*def_id) {
+                let sp = self.sess().source_map().guess_head_span(sp);
+                err.span_label(sp, &format!("{} defined here", found));
+            }
+        } else if !self.check_for_cast(err, expr, found, expected, expected_ty_expr) {
+            let is_struct_pat_shorthand_field =
+                self.is_hir_id_from_struct_pattern_shorthand_field(expr.hir_id, expr.span);
+            let methods = self.get_conversion_methods(expr.span, expected, found, expr.hir_id);
+            if let Ok(expr_text) = self.sess().source_map().span_to_snippet(expr.span) {
+                let mut suggestions = iter::repeat(&expr_text)
+                    .zip(methods.iter())
+                    .filter_map(|(receiver, method)| {
+                        let method_call = format!(".{}()", method.ident);
+                        if receiver.ends_with(&method_call) {
+                            None // do not suggest code that is already there (#53348)
+                        } else {
+                            let method_call_list = [".to_vec()", ".to_string()"];
+                            let sugg = if receiver.ends_with(".clone()")
+                                && method_call_list.contains(&method_call.as_str())
+                            {
+                                let max_len = receiver.rfind('.').unwrap();
+                                format!("{}{}", &receiver[..max_len], method_call)
+                            } else {
+                                if expr.precedence().order() < ExprPrecedence::MethodCall.order() {
+                                    format!("({}){}", receiver, method_call)
+                                } else {
+                                    format!("{}{}", receiver, method_call)
+                                }
+                            };
+                            Some(if is_struct_pat_shorthand_field {
+                                format!("{}: {}", receiver, sugg)
+                            } else {
+                                sugg
+                            })
+                        }
+                    })
+                    .peekable();
+                if suggestions.peek().is_some() {
+                    err.span_suggestions(
+                        expr.span,
+                        "try using a conversion method",
+                        suggestions,
+                        Applicability::MaybeIncorrect,
+                    );
+                }
+            }
+        }
+    }
+
+    /// When encountering the expected boxed value allocated in the stack, suggest allocating it
+    /// in the heap by calling `Box::new()`.
+    pub(in super::super) fn suggest_boxing_when_appropriate(
+        &self,
+        err: &mut DiagnosticBuilder<'_>,
+        expr: &hir::Expr<'_>,
+        expected: Ty<'tcx>,
+        found: Ty<'tcx>,
+    ) {
+        if self.tcx.hir().is_inside_const_context(expr.hir_id) {
+            // Do not suggest `Box::new` in const context.
+            return;
+        }
+        if !expected.is_box() || found.is_box() {
+            return;
+        }
+        let boxed_found = self.tcx.mk_box(found);
+        if let (true, Ok(snippet)) = (
+            self.can_coerce(boxed_found, expected),
+            self.sess().source_map().span_to_snippet(expr.span),
+        ) {
+            err.span_suggestion(
+                expr.span,
+                "store this in the heap by calling `Box::new`",
+                format!("Box::new({})", snippet),
+                Applicability::MachineApplicable,
+            );
+            err.note(
+                "for more on the distinction between the stack and the heap, read \
+                 https://doc.rust-lang.org/book/ch15-01-box.html, \
+                 https://doc.rust-lang.org/rust-by-example/std/box.html, and \
+                 https://doc.rust-lang.org/std/boxed/index.html",
+            );
+        }
+    }
+
+    /// When encountering an `impl Future` where `BoxFuture` is expected, suggest `Box::pin`.
+    pub(in super::super) fn suggest_calling_boxed_future_when_appropriate(
+        &self,
+        err: &mut DiagnosticBuilder<'_>,
+        expr: &hir::Expr<'_>,
+        expected: Ty<'tcx>,
+        found: Ty<'tcx>,
+    ) -> bool {
+        // Handle #68197.
+
+        if self.tcx.hir().is_inside_const_context(expr.hir_id) {
+            // Do not suggest `Box::new` in const context.
+            return false;
+        }
+        let pin_did = self.tcx.lang_items().pin_type();
+        match expected.kind() {
+            ty::Adt(def, _) if Some(def.did) != pin_did => return false,
+            // This guards the `unwrap` and `mk_box` below.
+            _ if pin_did.is_none() || self.tcx.lang_items().owned_box().is_none() => return false,
+            _ => {}
+        }
+        let boxed_found = self.tcx.mk_box(found);
+        let new_found = self.tcx.mk_lang_item(boxed_found, LangItem::Pin).unwrap();
+        if let (true, Ok(snippet)) = (
+            self.can_coerce(new_found, expected),
+            self.sess().source_map().span_to_snippet(expr.span),
+        ) {
+            match found.kind() {
+                ty::Adt(def, _) if def.is_box() => {
+                    err.help("use `Box::pin`");
+                }
+                _ => {
+                    err.span_suggestion(
+                        expr.span,
+                        "you need to pin and box this expression",
+                        format!("Box::pin({})", snippet),
+                        Applicability::MachineApplicable,
+                    );
+                }
+            }
+            true
+        } else {
+            false
+        }
+    }
+
+    /// A common error is to forget to add a semicolon at the end of a block, e.g.,
+    ///
+    /// ```
+    /// fn foo() {
+    ///     bar_that_returns_u32()
+    /// }
+    /// ```
+    ///
+    /// This routine checks if the return expression in a block would make sense on its own as a
+    /// statement and the return type has been left as default or has been specified as `()`. If so,
+    /// it suggests adding a semicolon.
+    fn suggest_missing_semicolon(
+        &self,
+        err: &mut DiagnosticBuilder<'_>,
+        expression: &'tcx hir::Expr<'tcx>,
+        expected: Ty<'tcx>,
+        cause_span: Span,
+    ) {
+        if expected.is_unit() {
+            // `BlockTailExpression` only relevant if the tail expr would be
+            // useful on its own.
+            match expression.kind {
+                ExprKind::Call(..)
+                | ExprKind::MethodCall(..)
+                | ExprKind::Loop(..)
+                | ExprKind::Match(..)
+                | ExprKind::Block(..) => {
+                    err.span_suggestion(
+                        cause_span.shrink_to_hi(),
+                        "try adding a semicolon",
+                        ";".to_string(),
+                        Applicability::MachineApplicable,
+                    );
+                }
+                _ => (),
+            }
+        }
+    }
+
+    /// A possible error is to forget to add a return type that is needed:
+    ///
+    /// ```
+    /// fn foo() {
+    ///     bar_that_returns_u32()
+    /// }
+    /// ```
+    ///
+    /// This routine checks if the return type is left as default, the method is not part of an
+    /// `impl` block and that it isn't the `main` method. If so, it suggests setting the return
+    /// type.
+    pub(in super::super) fn suggest_missing_return_type(
+        &self,
+        err: &mut DiagnosticBuilder<'_>,
+        fn_decl: &hir::FnDecl<'_>,
+        expected: Ty<'tcx>,
+        found: Ty<'tcx>,
+        can_suggest: bool,
+    ) -> bool {
+        // Only suggest changing the return type for methods that
+        // haven't set a return type at all (and aren't `fn main()` or an impl).
+        match (&fn_decl.output, found.is_suggestable(), can_suggest, expected.is_unit()) {
+            (&hir::FnRetTy::DefaultReturn(span), true, true, true) => {
+                err.span_suggestion(
+                    span,
+                    "try adding a return type",
+                    format!("-> {} ", self.resolve_vars_with_obligations(found)),
+                    Applicability::MachineApplicable,
+                );
+                true
+            }
+            (&hir::FnRetTy::DefaultReturn(span), false, true, true) => {
+                err.span_label(span, "possibly return type missing here?");
+                true
+            }
+            (&hir::FnRetTy::DefaultReturn(span), _, false, true) => {
+                // `fn main()` must return `()`, do not suggest changing return type
+                err.span_label(span, "expected `()` because of default return type");
+                true
+            }
+            // expectation was caused by something else, not the default return
+            (&hir::FnRetTy::DefaultReturn(_), _, _, false) => false,
+            (&hir::FnRetTy::Return(ref ty), _, _, _) => {
+                // Only point to return type if the expected type is the return type, as if they
+                // are not, the expectation must have been caused by something else.
+                debug!("suggest_missing_return_type: return type {:?} node {:?}", ty, ty.kind);
+                let sp = ty.span;
+                let ty = AstConv::ast_ty_to_ty(self, ty);
+                debug!("suggest_missing_return_type: return type {:?}", ty);
+                debug!("suggest_missing_return_type: expected type {:?}", ty);
+                if ty.kind() == expected.kind() {
+                    err.span_label(sp, format!("expected `{}` because of return type", expected));
+                    return true;
+                }
+                false
+            }
+        }
+    }
+
+    pub(in super::super) fn suggest_missing_parentheses(
+        &self,
+        err: &mut DiagnosticBuilder<'_>,
+        expr: &hir::Expr<'_>,
+    ) {
+        let sp = self.tcx.sess.source_map().start_point(expr.span);
+        if let Some(sp) = self.tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp) {
+            // `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }`
+            self.tcx.sess.parse_sess.expr_parentheses_needed(err, *sp, None);
+        }
+    }
+}
diff --git a/compiler/rustc_typeck/src/check/gather_locals.rs b/compiler/rustc_typeck/src/check/gather_locals.rs
index 1d505cf..af55238 100644
--- a/compiler/rustc_typeck/src/check/gather_locals.rs
+++ b/compiler/rustc_typeck/src/check/gather_locals.rs
@@ -6,15 +6,20 @@
 use rustc_middle::ty::Ty;
 use rustc_span::Span;
 use rustc_trait_selection::traits;
+use std::mem;
 
 pub(super) struct GatherLocalsVisitor<'a, 'tcx> {
     fcx: &'a FnCtxt<'a, 'tcx>,
     parent_id: hir::HirId,
+    // parameters are special cases of patterns, but we want to handle them as
+    // *distinct* cases. so track when we are hitting a pattern *within* an fn
+    // parameter.
+    outermost_fn_param_pat: bool,
 }
 
 impl<'a, 'tcx> GatherLocalsVisitor<'a, 'tcx> {
     pub(super) fn new(fcx: &'a FnCtxt<'a, 'tcx>, parent_id: hir::HirId) -> Self {
-        Self { fcx, parent_id }
+        Self { fcx, parent_id, outermost_fn_param_pat: false }
     }
 
     fn assign(&mut self, span: Span, nid: hir::HirId, ty_opt: Option<LocalTy<'tcx>>) -> Ty<'tcx> {
@@ -88,13 +93,29 @@
         intravisit::walk_local(self, local);
     }
 
+    fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
+        let old_outermost_fn_param_pat = mem::replace(&mut self.outermost_fn_param_pat, true);
+        intravisit::walk_param(self, param);
+        self.outermost_fn_param_pat = old_outermost_fn_param_pat;
+    }
+
     // Add pattern bindings.
     fn visit_pat(&mut self, p: &'tcx hir::Pat<'tcx>) {
         if let PatKind::Binding(_, _, ident, _) = p.kind {
             let var_ty = self.assign(p.span, p.hir_id, None);
 
-            if !self.fcx.tcx.features().unsized_locals {
-                self.fcx.require_type_is_sized(var_ty, p.span, traits::VariableType(p.hir_id));
+            if self.outermost_fn_param_pat {
+                if !self.fcx.tcx.features().unsized_fn_params {
+                    self.fcx.require_type_is_sized(
+                        var_ty,
+                        p.span,
+                        traits::SizedArgumentType(Some(p.span)),
+                    );
+                }
+            } else {
+                if !self.fcx.tcx.features().unsized_locals {
+                    self.fcx.require_type_is_sized(var_ty, p.span, traits::VariableType(p.hir_id));
+                }
             }
 
             debug!(
@@ -104,7 +125,9 @@
                 var_ty
             );
         }
+        let old_outermost_fn_param_pat = mem::replace(&mut self.outermost_fn_param_pat, false);
         intravisit::walk_pat(self, p);
+        self.outermost_fn_param_pat = old_outermost_fn_param_pat;
     }
 
     // Don't descend into the bodies of nested closures.
diff --git a/compiler/rustc_typeck/src/check/generator_interior.rs b/compiler/rustc_typeck/src/check/generator_interior.rs
index 93fdf93..293a995 100644
--- a/compiler/rustc_typeck/src/check/generator_interior.rs
+++ b/compiler/rustc_typeck/src/check/generator_interior.rs
@@ -8,11 +8,13 @@
 use rustc_hir as hir;
 use rustc_hir::def::{CtorKind, DefKind, Res};
 use rustc_hir::def_id::DefId;
+use rustc_hir::hir_id::HirIdSet;
 use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
-use rustc_hir::{Expr, ExprKind, Pat, PatKind};
+use rustc_hir::{Arm, Expr, ExprKind, Guard, HirId, Pat, PatKind};
 use rustc_middle::middle::region::{self, YieldData};
 use rustc_middle::ty::{self, Ty};
 use rustc_span::Span;
+use smallvec::SmallVec;
 
 struct InteriorVisitor<'a, 'tcx> {
     fcx: &'a FnCtxt<'a, 'tcx>,
@@ -21,6 +23,13 @@
     expr_count: usize,
     kind: hir::GeneratorKind,
     prev_unresolved_span: Option<Span>,
+    /// Match arm guards have temporary borrows from the pattern bindings.
+    /// In case there is a yield point in a guard with a reference to such bindings,
+    /// such borrows can span across this yield point.
+    /// As such, we need to track these borrows and record them despite of the fact
+    /// that they may succeed the said yield point in the post-order.
+    guard_bindings: SmallVec<[SmallVec<[HirId; 4]>; 1]>,
+    guard_bindings_set: HirIdSet,
 }
 
 impl<'a, 'tcx> InteriorVisitor<'a, 'tcx> {
@@ -30,6 +39,7 @@
         scope: Option<region::Scope>,
         expr: Option<&'tcx Expr<'tcx>>,
         source_span: Span,
+        guard_borrowing_from_pattern: bool,
     ) {
         use rustc_span::DUMMY_SP;
 
@@ -53,7 +63,12 @@
                         yield_data.expr_and_pat_count, self.expr_count, source_span
                     );
 
-                    if yield_data.expr_and_pat_count >= self.expr_count {
+                    // If it is a borrowing happening in the guard,
+                    // it needs to be recorded regardless because they
+                    // do live across this yield point.
+                    if guard_borrowing_from_pattern
+                        || yield_data.expr_and_pat_count >= self.expr_count
+                    {
                         Some(yield_data)
                     } else {
                         None
@@ -134,6 +149,8 @@
         expr_count: 0,
         kind,
         prev_unresolved_span: None,
+        guard_bindings: <_>::default(),
+        guard_bindings_set: <_>::default(),
     };
     intravisit::walk_body(&mut visitor, body);
 
@@ -169,8 +186,9 @@
                 // which means that none of the regions inside relate to any other, even if
                 // typeck had previously found constraints that would cause them to be related.
                 let folded = fcx.tcx.fold_regions(&erased, &mut false, |_, current_depth| {
+                    let r = fcx.tcx.mk_region(ty::ReLateBound(current_depth, ty::BrAnon(counter)));
                     counter += 1;
-                    fcx.tcx.mk_region(ty::ReLateBound(current_depth, ty::BrAnon(counter)))
+                    r
                 });
 
                 cause.ty = folded;
@@ -210,6 +228,35 @@
         NestedVisitorMap::None
     }
 
+    fn visit_arm(&mut self, arm: &'tcx Arm<'tcx>) {
+        let Arm { guard, pat, body, .. } = arm;
+        self.visit_pat(pat);
+        if let Some(ref g) = guard {
+            self.guard_bindings.push(<_>::default());
+            ArmPatCollector {
+                guard_bindings_set: &mut self.guard_bindings_set,
+                guard_bindings: self
+                    .guard_bindings
+                    .last_mut()
+                    .expect("should have pushed at least one earlier"),
+            }
+            .visit_pat(pat);
+
+            match g {
+                Guard::If(ref e) => {
+                    self.visit_expr(e);
+                }
+            }
+
+            let mut scope_var_ids =
+                self.guard_bindings.pop().expect("should have pushed at least one earlier");
+            for var_id in scope_var_ids.drain(..) {
+                self.guard_bindings_set.remove(&var_id);
+            }
+        }
+        self.visit_expr(body);
+    }
+
     fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) {
         intravisit::walk_pat(self, pat);
 
@@ -218,11 +265,12 @@
         if let PatKind::Binding(..) = pat.kind {
             let scope = self.region_scope_tree.var_scope(pat.hir_id.local_id);
             let ty = self.fcx.typeck_results.borrow().pat_ty(pat);
-            self.record(ty, Some(scope), None, pat.span);
+            self.record(ty, Some(scope), None, pat.span, false);
         }
     }
 
     fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
+        let mut guard_borrowing_from_pattern = false;
         match &expr.kind {
             ExprKind::Call(callee, args) => match &callee.kind {
                 ExprKind::Path(qpath) => {
@@ -249,6 +297,16 @@
                 }
                 _ => intravisit::walk_expr(self, expr),
             },
+            ExprKind::Path(qpath) => {
+                intravisit::walk_expr(self, expr);
+                let res = self.fcx.typeck_results.borrow().qpath_res(qpath, expr.hir_id);
+                match res {
+                    Res::Local(id) if self.guard_bindings_set.contains(&id) => {
+                        guard_borrowing_from_pattern = true;
+                    }
+                    _ => {}
+                }
+            }
             _ => intravisit::walk_expr(self, expr),
         }
 
@@ -259,7 +317,7 @@
         // If there are adjustments, then record the final type --
         // this is the actual value that is being produced.
         if let Some(adjusted_ty) = self.fcx.typeck_results.borrow().expr_ty_adjusted_opt(expr) {
-            self.record(adjusted_ty, scope, Some(expr), expr.span);
+            self.record(adjusted_ty, scope, Some(expr), expr.span, guard_borrowing_from_pattern);
         }
 
         // Also record the unadjusted type (which is the only type if
@@ -267,10 +325,10 @@
         // unadjusted value is sometimes a "temporary" that would wind
         // up in a MIR temporary.
         //
-        // As an example, consider an expression like `vec![].push()`.
+        // As an example, consider an expression like `vec![].push(x)`.
         // Here, the `vec![]` would wind up MIR stored into a
         // temporary variable `t` which we can borrow to invoke
-        // `<Vec<_>>::push(&mut t)`.
+        // `<Vec<_>>::push(&mut t, x)`.
         //
         // Note that an expression can have many adjustments, and we
         // are just ignoring those intermediate types. This is because
@@ -287,9 +345,42 @@
         // The type table might not have information for this expression
         // if it is in a malformed scope. (#66387)
         if let Some(ty) = self.fcx.typeck_results.borrow().expr_ty_opt(expr) {
-            self.record(ty, scope, Some(expr), expr.span);
+            if guard_borrowing_from_pattern {
+                // Match guards create references to all the bindings in the pattern that are used
+                // in the guard, e.g. `y if is_even(y) => ...` becomes `is_even(*r_y)` where `r_y`
+                // is a reference to `y`, so we must record a reference to the type of the binding.
+                let tcx = self.fcx.tcx;
+                let ref_ty = tcx.mk_ref(
+                    // Use `ReErased` as `resolve_interior` is going to replace all the regions anyway.
+                    tcx.mk_region(ty::RegionKind::ReErased),
+                    ty::TypeAndMut { ty, mutbl: hir::Mutability::Not },
+                );
+                self.record(ref_ty, scope, Some(expr), expr.span, guard_borrowing_from_pattern);
+            }
+            self.record(ty, scope, Some(expr), expr.span, guard_borrowing_from_pattern);
         } else {
             self.fcx.tcx.sess.delay_span_bug(expr.span, "no type for node");
         }
     }
 }
+
+struct ArmPatCollector<'a> {
+    guard_bindings_set: &'a mut HirIdSet,
+    guard_bindings: &'a mut SmallVec<[HirId; 4]>,
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for ArmPatCollector<'a> {
+    type Map = intravisit::ErasedMap<'tcx>;
+
+    fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
+        NestedVisitorMap::None
+    }
+
+    fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) {
+        intravisit::walk_pat(self, pat);
+        if let PatKind::Binding(_, id, ..) = pat.kind {
+            self.guard_bindings.push(id);
+            self.guard_bindings_set.insert(id);
+        }
+    }
+}
diff --git a/compiler/rustc_typeck/src/check/intrinsic.rs b/compiler/rustc_typeck/src/check/intrinsic.rs
index 2ee867c..f40a250 100644
--- a/compiler/rustc_typeck/src/check/intrinsic.rs
+++ b/compiler/rustc_typeck/src/check/intrinsic.rs
@@ -328,14 +328,14 @@
 
             kw::Try => {
                 let mut_u8 = tcx.mk_mut_ptr(tcx.types.u8);
-                let try_fn_ty = ty::Binder::bind(tcx.mk_fn_sig(
+                let try_fn_ty = ty::Binder::dummy(tcx.mk_fn_sig(
                     iter::once(mut_u8),
                     tcx.mk_unit(),
                     false,
                     hir::Unsafety::Normal,
                     Abi::Rust,
                 ));
-                let catch_fn_ty = ty::Binder::bind(tcx.mk_fn_sig(
+                let catch_fn_ty = ty::Binder::dummy(tcx.mk_fn_sig(
                     [mut_u8, mut_u8].iter().cloned(),
                     tcx.mk_unit(),
                     false,
diff --git a/compiler/rustc_typeck/src/check/method/probe.rs b/compiler/rustc_typeck/src/check/method/probe.rs
index c1ba292..713b24e 100644
--- a/compiler/rustc_typeck/src/check/method/probe.rs
+++ b/compiler/rustc_typeck/src/check/method/probe.rs
@@ -244,7 +244,7 @@
                 ProbeScope::AllTraits,
                 |probe_cx| Ok(probe_cx.candidate_method_names()),
             )
-            .unwrap_or(vec![]);
+            .unwrap_or_default();
         method_names
             .iter()
             .flat_map(|&method_name| {
@@ -796,29 +796,29 @@
         // FIXME: do we want to commit to this behavior for param bounds?
         debug!("assemble_inherent_candidates_from_param(param_ty={:?})", param_ty);
 
-        let bounds =
-            self.param_env.caller_bounds().iter().map(ty::Predicate::skip_binders).filter_map(
-                |predicate| match predicate {
-                    ty::PredicateAtom::Trait(trait_predicate, _) => {
-                        match trait_predicate.trait_ref.self_ty().kind() {
-                            ty::Param(ref p) if *p == param_ty => {
-                                Some(ty::Binder::bind(trait_predicate.trait_ref))
-                            }
-                            _ => None,
+        let bounds = self.param_env.caller_bounds().iter().filter_map(|predicate| {
+            let bound_predicate = predicate.bound_atom();
+            match bound_predicate.skip_binder() {
+                ty::PredicateAtom::Trait(trait_predicate, _) => {
+                    match *trait_predicate.trait_ref.self_ty().kind() {
+                        ty::Param(p) if p == param_ty => {
+                            Some(bound_predicate.rebind(trait_predicate.trait_ref))
                         }
+                        _ => None,
                     }
-                    ty::PredicateAtom::Subtype(..)
-                    | ty::PredicateAtom::Projection(..)
-                    | ty::PredicateAtom::RegionOutlives(..)
-                    | ty::PredicateAtom::WellFormed(..)
-                    | ty::PredicateAtom::ObjectSafe(..)
-                    | ty::PredicateAtom::ClosureKind(..)
-                    | ty::PredicateAtom::TypeOutlives(..)
-                    | ty::PredicateAtom::ConstEvaluatable(..)
-                    | ty::PredicateAtom::ConstEquate(..)
-                    | ty::PredicateAtom::TypeWellFormedFromEnv(..) => None,
-                },
-            );
+                }
+                ty::PredicateAtom::Subtype(..)
+                | ty::PredicateAtom::Projection(..)
+                | ty::PredicateAtom::RegionOutlives(..)
+                | ty::PredicateAtom::WellFormed(..)
+                | ty::PredicateAtom::ObjectSafe(..)
+                | ty::PredicateAtom::ClosureKind(..)
+                | ty::PredicateAtom::TypeOutlives(..)
+                | ty::PredicateAtom::ConstEvaluatable(..)
+                | ty::PredicateAtom::ConstEquate(..)
+                | ty::PredicateAtom::TypeWellFormedFromEnv(..) => None,
+            }
+        });
 
         self.elaborate_bounds(bounds, |this, poly_trait_ref, item| {
             let trait_ref = this.erase_late_bound_regions(&poly_trait_ref);
diff --git a/compiler/rustc_typeck/src/check/method/suggest.rs b/compiler/rustc_typeck/src/check/method/suggest.rs
index e33a4e9..46afe48 100644
--- a/compiler/rustc_typeck/src/check/method/suggest.rs
+++ b/compiler/rustc_typeck/src/check/method/suggest.rs
@@ -21,7 +21,6 @@
 use rustc_span::{source_map, FileName, Span};
 use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
 use rustc_trait_selection::traits::Obligation;
-use rustc_trait_selection::traits::SelectionContext;
 
 use std::cmp::Ordering;
 
@@ -637,9 +636,10 @@
                         }
                     };
                     let mut format_pred = |pred: ty::Predicate<'tcx>| {
-                        match pred.skip_binders() {
+                        let bound_predicate = pred.bound_atom();
+                        match bound_predicate.skip_binder() {
                             ty::PredicateAtom::Projection(pred) => {
-                                let pred = ty::Binder::bind(pred);
+                                let pred = bound_predicate.rebind(pred);
                                 // `<Foo as Iterator>::Item = String`.
                                 let trait_ref =
                                     pred.skip_binder().projection_ty.trait_ref(self.tcx);
@@ -658,8 +658,7 @@
                                 Some((obligation, trait_ref.self_ty()))
                             }
                             ty::PredicateAtom::Trait(poly_trait_ref, _) => {
-                                let poly_trait_ref = ty::Binder::bind(poly_trait_ref);
-                                let p = poly_trait_ref.skip_binder().trait_ref;
+                                let p = poly_trait_ref.trait_ref;
                                 let self_ty = p.self_ty();
                                 let path = p.print_only_trait_path();
                                 let obligation = format!("{}: {}", self_ty, path);
@@ -870,46 +869,19 @@
         call: &hir::Expr<'_>,
         span: Span,
     ) {
-        if let ty::Opaque(def_id, _) = *ty.kind() {
-            let future_trait = self.tcx.require_lang_item(LangItem::Future, None);
-            // Future::Output
-            let item_def_id = self
-                .tcx
-                .associated_items(future_trait)
-                .in_definition_order()
-                .next()
-                .unwrap()
-                .def_id;
-
-            let projection_ty = self.tcx.projection_ty_from_predicates((def_id, item_def_id));
-            let cause = self.misc(span);
-            let mut selcx = SelectionContext::new(&self.infcx);
-            let mut obligations = vec![];
-            if let Some(projection_ty) = projection_ty {
-                let normalized_ty = rustc_trait_selection::traits::normalize_projection_type(
-                    &mut selcx,
-                    self.param_env,
-                    projection_ty,
-                    cause,
-                    0,
-                    &mut obligations,
-                );
-                debug!(
-                    "suggest_await_before_method: normalized_ty={:?}, ty_kind={:?}",
-                    self.resolve_vars_if_possible(&normalized_ty),
-                    normalized_ty.kind(),
-                );
-                let method_exists = self.method_exists(item_name, normalized_ty, call.hir_id, true);
-                debug!("suggest_await_before_method: is_method_exist={}", method_exists);
-                if method_exists {
-                    err.span_suggestion_verbose(
-                        span.shrink_to_lo(),
-                        "consider awaiting before this method call",
-                        "await.".to_string(),
-                        Applicability::MaybeIncorrect,
-                    );
-                }
-            }
+        let output_ty = match self.infcx.get_impl_future_output_ty(ty) {
+            Some(output_ty) => self.resolve_vars_if_possible(&output_ty),
+            _ => return,
+        };
+        let method_exists = self.method_exists(item_name, output_ty, call.hir_id, true);
+        debug!("suggest_await_before_method: is_method_exist={}", method_exists);
+        if method_exists {
+            err.span_suggestion_verbose(
+                span.shrink_to_lo(),
+                "consider `await`ing on the `Future` and calling the method on its `Output`",
+                "await.".to_string(),
+                Applicability::MaybeIncorrect,
+            );
         }
     }
 
diff --git a/compiler/rustc_typeck/src/check/mod.rs b/compiler/rustc_typeck/src/check/mod.rs
index 97172d3..169ad0d 100644
--- a/compiler/rustc_typeck/src/check/mod.rs
+++ b/compiler/rustc_typeck/src/check/mod.rs
@@ -96,7 +96,7 @@
 pub use check::{check_item_type, check_wf_new};
 pub use diverges::Diverges;
 pub use expectation::Expectation;
-pub use fn_ctxt::FnCtxt;
+pub use fn_ctxt::*;
 pub use inherited::{Inherited, InheritedBuilder};
 
 use crate::astconv::AstConv;
@@ -111,6 +111,7 @@
 use rustc_hir::{HirIdMap, Node};
 use rustc_index::bit_set::BitSet;
 use rustc_index::vec::Idx;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
 use rustc_middle::ty::fold::{TypeFoldable, TypeFolder};
 use rustc_middle::ty::query::Providers;
 use rustc_middle::ty::subst::GenericArgKind;
@@ -264,7 +265,7 @@
 }
 
 fn adt_destructor(tcx: TyCtxt<'_>, def_id: DefId) -> Option<ty::Destructor> {
-    tcx.calculate_dtor(def_id, &mut dropck::check_drop_impl)
+    tcx.calculate_dtor(def_id, dropck::check_drop_impl)
 }
 
 /// If this `DefId` is a "primary tables entry", returns
@@ -528,7 +529,20 @@
                     hir::TyKind::Infer => Some(AstConv::ast_ty_to_ty(&fcx, ty)),
                     _ => None,
                 })
-                .unwrap_or_else(fallback);
+                .unwrap_or_else(|| match tcx.hir().get(id) {
+                    Node::AnonConst(_) => match tcx.hir().get(tcx.hir().get_parent_node(id)) {
+                        Node::Expr(&hir::Expr {
+                            kind: hir::ExprKind::ConstBlock(ref anon_const),
+                            ..
+                        }) if anon_const.hir_id == id => fcx.next_ty_var(TypeVariableOrigin {
+                            kind: TypeVariableOriginKind::TypeInference,
+                            span,
+                        }),
+                        _ => fallback(),
+                    },
+                    _ => fallback(),
+                });
+
             let expected_type = fcx.normalize_associated_types_in(body.value.span, &expected_type);
             fcx.require_type_is_sized(expected_type, body.value.span, traits::ConstSized);
 
@@ -850,7 +864,8 @@
     let mut projections = vec![];
     for (predicate, _) in predicates.predicates {
         debug!("predicate {:?}", predicate);
-        match predicate.skip_binders() {
+        let bound_predicate = predicate.bound_atom();
+        match bound_predicate.skip_binder() {
             ty::PredicateAtom::Trait(trait_predicate, _) => {
                 let entry = types.entry(trait_predicate.self_ty()).or_default();
                 let def_id = trait_predicate.def_id();
@@ -861,7 +876,7 @@
                 }
             }
             ty::PredicateAtom::Projection(projection_pred) => {
-                projections.push(ty::Binder::bind(projection_pred));
+                projections.push(bound_predicate.rebind(projection_pred));
             }
             _ => {}
         }
diff --git a/compiler/rustc_typeck/src/check/op.rs b/compiler/rustc_typeck/src/check/op.rs
index 66975f3..247b525 100644
--- a/compiler/rustc_typeck/src/check/op.rs
+++ b/compiler/rustc_typeck/src/check/op.rs
@@ -2,6 +2,7 @@
 
 use super::method::MethodCallee;
 use super::FnCtxt;
+use rustc_ast as ast;
 use rustc_errors::{self, struct_span_err, Applicability, DiagnosticBuilder};
 use rustc_hir as hir;
 use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
@@ -13,10 +14,13 @@
 use rustc_middle::ty::{
     self, suggest_constraining_type_param, Ty, TyCtxt, TypeFoldable, TypeVisitor,
 };
+use rustc_span::source_map::Spanned;
 use rustc_span::symbol::{sym, Ident};
 use rustc_span::Span;
 use rustc_trait_selection::infer::InferCtxtExt;
 
+use std::ops::ControlFlow;
+
 impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     /// Checks a `a <op>= b`
     pub fn check_binop_assign(
@@ -300,7 +304,7 @@
                                 true,
                             ),
                             hir::BinOpKind::Mul => (
-                                format!("cannot multiply `{}` to `{}`", rhs_ty, lhs_ty),
+                                format!("cannot multiply `{}` by `{}`", lhs_ty, rhs_ty),
                                 Some("std::ops::Mul"),
                                 true,
                             ),
@@ -673,6 +677,29 @@
                     match actual.kind() {
                         Uint(_) if op == hir::UnOp::UnNeg => {
                             err.note("unsigned values cannot be negated");
+
+                            if let hir::ExprKind::Unary(
+                                _,
+                                hir::Expr {
+                                    kind:
+                                        hir::ExprKind::Lit(Spanned {
+                                            node: ast::LitKind::Int(1, _),
+                                            ..
+                                        }),
+                                    ..
+                                },
+                            ) = ex.kind
+                            {
+                                err.span_suggestion(
+                                    ex.span,
+                                    &format!(
+                                        "you may have meant the maximum value of `{}`",
+                                        actual
+                                    ),
+                                    format!("{}::MAX", actual),
+                                    Applicability::MaybeIncorrect,
+                                );
+                            }
                         }
                         Str | Never | Char | Tuple(_) | Array(_, _) => {}
                         Ref(_, ref lty, _) if *lty.kind() == Str => {}
@@ -956,7 +983,7 @@
 struct TypeParamVisitor<'tcx>(Vec<Ty<'tcx>>);
 
 impl<'tcx> TypeVisitor<'tcx> for TypeParamVisitor<'tcx> {
-    fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> {
         if let ty::Param(_) = ty.kind() {
             self.0.push(ty);
         }
diff --git a/compiler/rustc_typeck/src/check/pat.rs b/compiler/rustc_typeck/src/check/pat.rs
index 3e431a9..fa7898f 100644
--- a/compiler/rustc_typeck/src/check/pat.rs
+++ b/compiler/rustc_typeck/src/check/pat.rs
@@ -149,6 +149,7 @@
     ///
     /// Outside of this module, `check_pat_top` should always be used.
     /// Conversely, inside this module, `check_pat_top` should never be used.
+    #[instrument(skip(self, ti))]
     fn check_pat(
         &self,
         pat: &'tcx Pat<'tcx>,
@@ -156,8 +157,6 @@
         def_bm: BindingMode,
         ti: TopInfo<'tcx>,
     ) {
-        debug!("check_pat(pat={:?},expected={:?},def_bm={:?})", pat, expected, def_bm);
-
         let path_res = match &pat.kind {
             PatKind::Path(qpath) => Some(self.resolve_ty_and_res_ufcs(qpath, pat.hir_id, pat.span)),
             _ => None,
@@ -270,6 +269,11 @@
     ///
     /// When the pattern is a path pattern, `opt_path_res` must be `Some(res)`.
     fn calc_adjust_mode(&self, pat: &'tcx Pat<'tcx>, opt_path_res: Option<Res>) -> AdjustMode {
+        // When we perform destructuring assignment, we disable default match bindings, which are
+        // unintuitive in this context.
+        if !pat.default_binding_modes {
+            return AdjustMode::Reset;
+        }
         match &pat.kind {
             // Type checking these product-like types successfully always require
             // that the expected type be of those types and not reference types.
@@ -393,6 +397,11 @@
             if let ty::Ref(_, inner_ty, _) = expected.kind() {
                 if matches!(inner_ty.kind(), ty::Slice(_)) {
                     let tcx = self.tcx;
+                    trace!(?lt.hir_id.local_id, "polymorphic byte string lit");
+                    self.typeck_results
+                        .borrow_mut()
+                        .treat_byte_string_as_slice
+                        .insert(lt.hir_id.local_id);
                     pat_ty = tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_slice(tcx.types.u8));
                 }
             }
@@ -1381,7 +1390,7 @@
     /// Returns a diagnostic reporting a struct pattern which is missing an `..` due to
     /// inaccessible fields.
     ///
-    /// ```ignore (diagnostic)
+    /// ```text
     /// error: pattern requires `..` due to inaccessible fields
     ///   --> src/main.rs:10:9
     ///    |
@@ -1431,7 +1440,7 @@
 
     /// Returns a diagnostic reporting a struct pattern which does not mention some fields.
     ///
-    /// ```ignore (diagnostic)
+    /// ```text
     /// error[E0027]: pattern does not mention field `you_cant_use_this_field`
     ///   --> src/main.rs:15:9
     ///    |
@@ -1500,7 +1509,7 @@
         err.span_suggestion(
             sp,
             &format!(
-                "if you don't care about {} missing field{}, you can explicitely ignore {}",
+                "if you don't care about {} missing field{}, you can explicitly ignore {}",
                 if len == 1 { "this" } else { "these" },
                 if len == 1 { "" } else { "s" },
                 if len == 1 { "it" } else { "them" },
diff --git a/compiler/rustc_typeck/src/check/regionck.rs b/compiler/rustc_typeck/src/check/regionck.rs
index ba0f225..7b31b9f 100644
--- a/compiler/rustc_typeck/src/check/regionck.rs
+++ b/compiler/rustc_typeck/src/check/regionck.rs
@@ -577,7 +577,7 @@
     fn link_pattern(&self, discr_cmt: PlaceWithHirId<'tcx>, root_pat: &hir::Pat<'_>) {
         debug!("link_pattern(discr_cmt={:?}, root_pat={:?})", discr_cmt, root_pat);
         ignore_err!(self.with_mc(|mc| {
-            mc.cat_pattern(discr_cmt, root_pat, |sub_cmt, hir::Pat { kind, span, hir_id }| {
+            mc.cat_pattern(discr_cmt, root_pat, |sub_cmt, hir::Pat { kind, span, hir_id, .. }| {
                 // `ref x` pattern
                 if let PatKind::Binding(..) = kind {
                     if let Some(ty::BindByReference(mutbl)) =
diff --git a/compiler/rustc_typeck/src/check/upvar.rs b/compiler/rustc_typeck/src/check/upvar.rs
index 2c3be0d..e9dfef7 100644
--- a/compiler/rustc_typeck/src/check/upvar.rs
+++ b/compiler/rustc_typeck/src/check/upvar.rs
@@ -202,9 +202,11 @@
             "analyze_closure: id={:?} substs={:?} final_upvar_tys={:?}",
             closure_hir_id, substs, final_upvar_tys
         );
-        for (upvar_ty, final_upvar_ty) in substs.upvar_tys().zip(final_upvar_tys) {
-            self.demand_suptype(span, upvar_ty, final_upvar_ty);
-        }
+
+        // Build a tuple (U0..Un) of the final upvar types U0..Un
+        // and unify the upvar tupe type in the closure with it:
+        let final_tupled_upvars_type = self.tcx.mk_tup(final_upvar_tys.iter());
+        self.demand_suptype(span, substs.tupled_upvars_ty(), final_tupled_upvars_type);
 
         // If we are also inferred the closure kind here,
         // process any deferred resolutions.
@@ -277,11 +279,12 @@
     fn adjust_upvar_borrow_kind_for_consume(
         &mut self,
         place_with_id: &PlaceWithHirId<'tcx>,
+        diag_expr_id: hir::HirId,
         mode: euv::ConsumeMode,
     ) {
         debug!(
-            "adjust_upvar_borrow_kind_for_consume(place_with_id={:?}, mode={:?})",
-            place_with_id, mode
+            "adjust_upvar_borrow_kind_for_consume(place_with_id={:?}, diag_expr_id={:?}, mode={:?})",
+            place_with_id, diag_expr_id, mode
         );
 
         // we only care about moves
@@ -301,7 +304,7 @@
 
         debug!("adjust_upvar_borrow_kind_for_consume: upvar={:?}", upvar_id);
 
-        let usage_span = tcx.hir().span(place_with_id.hir_id);
+        let usage_span = tcx.hir().span(diag_expr_id);
 
         // To move out of an upvar, this must be a FnOnce closure
         self.adjust_closure_kind(
@@ -311,14 +314,7 @@
             var_name(tcx, upvar_id.var_path.hir_id),
         );
 
-        // In a case like `let pat = upvar`, don't use the span
-        // of the pattern, as this just looks confusing.
-        let by_value_span = match tcx.hir().get(place_with_id.hir_id) {
-            hir::Node::Pat(_) => None,
-            _ => Some(usage_span),
-        };
-
-        let new_capture = ty::UpvarCapture::ByValue(by_value_span);
+        let new_capture = ty::UpvarCapture::ByValue(Some(usage_span));
         match self.adjust_upvar_captures.entry(upvar_id) {
             Entry::Occupied(mut e) => {
                 match e.get() {
@@ -343,8 +339,15 @@
     /// Indicates that `place_with_id` is being directly mutated (e.g., assigned
     /// to). If the place is based on a by-ref upvar, this implies that
     /// the upvar must be borrowed using an `&mut` borrow.
-    fn adjust_upvar_borrow_kind_for_mut(&mut self, place_with_id: &PlaceWithHirId<'tcx>) {
-        debug!("adjust_upvar_borrow_kind_for_mut(place_with_id={:?})", place_with_id);
+    fn adjust_upvar_borrow_kind_for_mut(
+        &mut self,
+        place_with_id: &PlaceWithHirId<'tcx>,
+        diag_expr_id: hir::HirId,
+    ) {
+        debug!(
+            "adjust_upvar_borrow_kind_for_mut(place_with_id={:?}, diag_expr_id={:?})",
+            place_with_id, diag_expr_id
+        );
 
         if let PlaceBase::Upvar(upvar_id) = place_with_id.place.base {
             let mut borrow_kind = ty::MutBorrow;
@@ -360,16 +363,19 @@
                     _ => (),
                 }
             }
-            self.adjust_upvar_deref(
-                upvar_id,
-                self.fcx.tcx.hir().span(place_with_id.hir_id),
-                borrow_kind,
-            );
+            self.adjust_upvar_deref(upvar_id, self.fcx.tcx.hir().span(diag_expr_id), borrow_kind);
         }
     }
 
-    fn adjust_upvar_borrow_kind_for_unique(&mut self, place_with_id: &PlaceWithHirId<'tcx>) {
-        debug!("adjust_upvar_borrow_kind_for_unique(place_with_id={:?})", place_with_id);
+    fn adjust_upvar_borrow_kind_for_unique(
+        &mut self,
+        place_with_id: &PlaceWithHirId<'tcx>,
+        diag_expr_id: hir::HirId,
+    ) {
+        debug!(
+            "adjust_upvar_borrow_kind_for_unique(place_with_id={:?}, diag_expr_id={:?})",
+            place_with_id, diag_expr_id
+        );
 
         if let PlaceBase::Upvar(upvar_id) = place_with_id.place.base {
             if place_with_id.place.deref_tys().any(ty::TyS::is_unsafe_ptr) {
@@ -379,7 +385,7 @@
             // for a borrowed pointer to be unique, its base must be unique
             self.adjust_upvar_deref(
                 upvar_id,
-                self.fcx.tcx.hir().span(place_with_id.hir_id),
+                self.fcx.tcx.hir().span(diag_expr_id),
                 ty::UniqueImmBorrow,
             );
         }
@@ -498,29 +504,44 @@
 }
 
 impl<'a, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'tcx> {
-    fn consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, mode: euv::ConsumeMode) {
-        debug!("consume(place_with_id={:?},mode={:?})", place_with_id, mode);
-        self.adjust_upvar_borrow_kind_for_consume(place_with_id, mode);
+    fn consume(
+        &mut self,
+        place_with_id: &PlaceWithHirId<'tcx>,
+        diag_expr_id: hir::HirId,
+        mode: euv::ConsumeMode,
+    ) {
+        debug!(
+            "consume(place_with_id={:?}, diag_expr_id={:?}, mode={:?})",
+            place_with_id, diag_expr_id, mode
+        );
+        self.adjust_upvar_borrow_kind_for_consume(&place_with_id, diag_expr_id, mode);
     }
 
-    fn borrow(&mut self, place_with_id: &PlaceWithHirId<'tcx>, bk: ty::BorrowKind) {
-        debug!("borrow(place_with_id={:?}, bk={:?})", place_with_id, bk);
+    fn borrow(
+        &mut self,
+        place_with_id: &PlaceWithHirId<'tcx>,
+        diag_expr_id: hir::HirId,
+        bk: ty::BorrowKind,
+    ) {
+        debug!(
+            "borrow(place_with_id={:?}, diag_expr_id={:?}, bk={:?})",
+            place_with_id, diag_expr_id, bk
+        );
 
         match bk {
             ty::ImmBorrow => {}
             ty::UniqueImmBorrow => {
-                self.adjust_upvar_borrow_kind_for_unique(place_with_id);
+                self.adjust_upvar_borrow_kind_for_unique(&place_with_id, diag_expr_id);
             }
             ty::MutBorrow => {
-                self.adjust_upvar_borrow_kind_for_mut(place_with_id);
+                self.adjust_upvar_borrow_kind_for_mut(&place_with_id, diag_expr_id);
             }
         }
     }
 
-    fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>) {
-        debug!("mutate(assignee_place={:?})", assignee_place);
-
-        self.adjust_upvar_borrow_kind_for_mut(assignee_place);
+    fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
+        debug!("mutate(assignee_place={:?}, diag_expr_id={:?})", assignee_place, diag_expr_id);
+        self.adjust_upvar_borrow_kind_for_mut(assignee_place, diag_expr_id);
     }
 }
 
diff --git a/compiler/rustc_typeck/src/check/wfcheck.rs b/compiler/rustc_typeck/src/check/wfcheck.rs
index 5203f3f..1e27357 100644
--- a/compiler/rustc_typeck/src/check/wfcheck.rs
+++ b/compiler/rustc_typeck/src/check/wfcheck.rs
@@ -24,6 +24,8 @@
 use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
 use rustc_trait_selection::traits::{self, ObligationCause, ObligationCauseCode};
 
+use std::ops::ControlFlow;
+
 /// Helper type of a temporary returned by `.for_item(...)`.
 /// This is necessary because we can't write the following bound:
 ///
@@ -420,6 +422,9 @@
                 check_method_receiver(fcx, hir_sig, &item, self_ty);
             }
             ty::AssocKind::Type => {
+                if let ty::AssocItemContainer::TraitContainer(_) = item.container {
+                    check_associated_type_bounds(fcx, item, span)
+                }
                 if item.defaultness.has_value() {
                     let ty = fcx.tcx.type_of(item.def_id);
                     let ty = fcx.normalize_associated_types_in(span, &ty);
@@ -571,7 +576,6 @@
 
     for_item(tcx, item).with_fcx(|fcx, _| {
         check_where_clauses(tcx, fcx, item.span, trait_def_id.to_def_id(), None);
-        check_associated_type_defaults(fcx, trait_def_id.to_def_id());
 
         vec![]
     });
@@ -581,96 +585,26 @@
 ///
 /// Assuming the defaults are used, check that all predicates (bounds on the
 /// assoc type and where clauses on the trait) hold.
-fn check_associated_type_defaults(fcx: &FnCtxt<'_, '_>, trait_def_id: DefId) {
+fn check_associated_type_bounds(fcx: &FnCtxt<'_, '_>, item: &ty::AssocItem, span: Span) {
     let tcx = fcx.tcx;
-    let substs = InternalSubsts::identity_for_item(tcx, trait_def_id);
 
-    // For all assoc. types with defaults, build a map from
-    // `<Self as Trait<...>>::Assoc` to the default type.
-    let map = tcx
-        .associated_items(trait_def_id)
-        .in_definition_order()
-        .filter_map(|item| {
-            if item.kind == ty::AssocKind::Type && item.defaultness.has_value() {
-                // `<Self as Trait<...>>::Assoc`
-                let proj = ty::ProjectionTy { substs, item_def_id: item.def_id };
-                let default_ty = tcx.type_of(item.def_id);
-                debug!("assoc. type default mapping: {} -> {}", proj, default_ty);
-                Some((proj, default_ty))
-            } else {
-                None
-            }
-        })
-        .collect::<FxHashMap<_, _>>();
+    let bounds = tcx.explicit_item_bounds(item.def_id);
 
-    /// Replaces projections of associated types with their default types.
-    ///
-    /// This does a "shallow substitution", meaning that defaults that refer to
-    /// other defaulted assoc. types will still refer to the projection
-    /// afterwards, not to the other default. For example:
-    ///
-    /// ```compile_fail
-    /// trait Tr {
-    ///     type A: Clone = Vec<Self::B>;
-    ///     type B = u8;
-    /// }
-    /// ```
-    ///
-    /// This will end up replacing the bound `Self::A: Clone` with
-    /// `Vec<Self::B>: Clone`, not with `Vec<u8>: Clone`. If we did a deep
-    /// substitution and ended up with the latter, the trait would be accepted.
-    /// If an `impl` then replaced `B` with something that isn't `Clone`,
-    /// suddenly the default for `A` is no longer valid. The shallow
-    /// substitution forces the trait to add a `B: Clone` bound to be accepted,
-    /// which means that an `impl` can replace any default without breaking
-    /// others.
-    ///
-    /// Note that this isn't needed for soundness: The defaults would still be
-    /// checked in any impl that doesn't override them.
-    struct DefaultNormalizer<'tcx> {
-        tcx: TyCtxt<'tcx>,
-        map: FxHashMap<ty::ProjectionTy<'tcx>, Ty<'tcx>>,
-    }
+    debug!("check_associated_type_bounds: bounds={:?}", bounds);
+    let wf_obligations = bounds.iter().flat_map(|&(bound, bound_span)| {
+        let normalized_bound = fcx.normalize_associated_types_in(span, &bound);
+        traits::wf::predicate_obligations(
+            fcx,
+            fcx.param_env,
+            fcx.body_id,
+            normalized_bound,
+            bound_span,
+        )
+    });
 
-    impl<'tcx> ty::fold::TypeFolder<'tcx> for DefaultNormalizer<'tcx> {
-        fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
-            self.tcx
-        }
-
-        fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
-            match t.kind() {
-                ty::Projection(proj_ty) => {
-                    if let Some(default) = self.map.get(&proj_ty) {
-                        default
-                    } else {
-                        t.super_fold_with(self)
-                    }
-                }
-                _ => t.super_fold_with(self),
-            }
-        }
-    }
-
-    // Now take all predicates defined on the trait, replace any mention of
-    // the assoc. types with their default, and prove them.
-    // We only consider predicates that directly mention the assoc. type.
-    let mut norm = DefaultNormalizer { tcx, map };
-    let predicates = fcx.tcx.predicates_of(trait_def_id);
-    for &(orig_pred, span) in predicates.predicates.iter() {
-        let pred = orig_pred.fold_with(&mut norm);
-        if pred != orig_pred {
-            // Mentions one of the defaulted assoc. types
-            debug!("default suitability check: proving predicate: {} -> {}", orig_pred, pred);
-            let pred = fcx.normalize_associated_types_in(span, &pred);
-            let cause = traits::ObligationCause::new(
-                span,
-                fcx.body_id,
-                traits::ItemObligation(trait_def_id),
-            );
-            let obligation = traits::Obligation::new(cause, fcx.param_env, pred);
-
-            fcx.register_predicate(obligation);
-        }
+    for obligation in wf_obligations {
+        debug!("next obligation cause: {:?}", obligation.cause);
+        fcx.register_predicate(obligation);
     }
 }
 
@@ -866,18 +800,18 @@
                 params: FxHashSet<u32>,
             }
             impl<'tcx> ty::fold::TypeVisitor<'tcx> for CountParams {
-                fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+                fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> {
                     if let ty::Param(param) = t.kind() {
                         self.params.insert(param.index);
                     }
                     t.super_visit_with(self)
                 }
 
-                fn visit_region(&mut self, _: ty::Region<'tcx>) -> bool {
-                    true
+                fn visit_region(&mut self, _: ty::Region<'tcx>) -> ControlFlow<()> {
+                    ControlFlow::BREAK
                 }
 
-                fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+                fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> {
                     if let ty::ConstKind::Param(param) = c.val {
                         self.params.insert(param.index);
                     }
@@ -885,7 +819,7 @@
                 }
             }
             let mut param_count = CountParams::default();
-            let has_region = pred.visit_with(&mut param_count);
+            let has_region = pred.visit_with(&mut param_count).is_break();
             let substituted_pred = pred.subst(fcx.tcx, substs);
             // Don't check non-defaulted params, dependent defaults (including lifetimes)
             // or preds with multiple params.
@@ -1493,7 +1427,7 @@
             .collect()
     }
 
-    fn impl_implied_bounds(&self, impl_def_id: DefId, span: Span) -> Vec<Ty<'tcx>> {
+    pub(super) fn impl_implied_bounds(&self, impl_def_id: DefId, span: Span) -> Vec<Ty<'tcx>> {
         match self.tcx.impl_trait_ref(impl_def_id) {
             Some(ref trait_ref) => {
                 // Trait impl: take implied bounds from all types that
diff --git a/compiler/rustc_typeck/src/check/writeback.rs b/compiler/rustc_typeck/src/check/writeback.rs
index 5363702..9c22459 100644
--- a/compiler/rustc_typeck/src/check/writeback.rs
+++ b/compiler/rustc_typeck/src/check/writeback.rs
@@ -70,6 +70,9 @@
         debug!("used_trait_imports({:?}) = {:?}", item_def_id, used_trait_imports);
         wbcx.typeck_results.used_trait_imports = used_trait_imports;
 
+        wbcx.typeck_results.treat_byte_string_as_slice =
+            mem::take(&mut self.typeck_results.borrow_mut().treat_byte_string_as_slice);
+
         wbcx.typeck_results.closure_captures =
             mem::take(&mut self.typeck_results.borrow_mut().closure_captures);
 
diff --git a/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs b/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs
index be77d04..ce157f8 100644
--- a/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs
+++ b/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs
@@ -2,8 +2,9 @@
 use rustc_hir as hir;
 use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
 use rustc_hir::itemlikevisit::ItemLikeVisitor;
-use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::{self, TyCtxt};
 use rustc_trait_selection::traits::{self, SkipLeakCheck};
+use smallvec::SmallVec;
 
 pub fn crate_inherent_impls_overlap_check(tcx: TyCtxt<'_>, crate_num: CrateNum) {
     assert_eq!(crate_num, LOCAL_CRATE);
@@ -18,9 +19,18 @@
 impl InherentOverlapChecker<'tcx> {
     /// Checks whether any associated items in impls 1 and 2 share the same identifier and
     /// namespace.
-    fn impls_have_common_items(&self, impl1: DefId, impl2: DefId) -> bool {
-        let impl_items1 = self.tcx.associated_items(impl1);
-        let impl_items2 = self.tcx.associated_items(impl2);
+    fn impls_have_common_items(
+        &self,
+        impl_items1: &ty::AssociatedItems<'_>,
+        impl_items2: &ty::AssociatedItems<'_>,
+    ) -> bool {
+        let mut impl_items1 = &impl_items1;
+        let mut impl_items2 = &impl_items2;
+
+        // Performance optimization: iterate over the smaller list
+        if impl_items1.len() > impl_items2.len() {
+            std::mem::swap(&mut impl_items1, &mut impl_items2);
+        }
 
         for item1 in impl_items1.in_definition_order() {
             let collision = impl_items2.filter_by_name_unhygienic(item1.ident.name).any(|item2| {
@@ -113,9 +123,20 @@
                 let ty_def_id = self.tcx.hir().local_def_id(item.hir_id);
                 let impls = self.tcx.inherent_impls(ty_def_id);
 
-                for (i, &impl1_def_id) in impls.iter().enumerate() {
-                    for &impl2_def_id in &impls[(i + 1)..] {
-                        if self.impls_have_common_items(impl1_def_id, impl2_def_id) {
+                // If there is only one inherent impl block,
+                // there is nothing to overlap check it with
+                if impls.len() <= 1 {
+                    return;
+                }
+
+                let impls_items = impls
+                    .iter()
+                    .map(|impl_def_id| (impl_def_id, self.tcx.associated_items(*impl_def_id)))
+                    .collect::<SmallVec<[_; 8]>>();
+
+                for (i, &(&impl1_def_id, impl_items1)) in impls_items.iter().enumerate() {
+                    for &(&impl2_def_id, impl_items2) in &impls_items[(i + 1)..] {
+                        if self.impls_have_common_items(impl_items1, impl_items2) {
                             self.check_for_overlapping_inherent_impls(impl1_def_id, impl2_def_id);
                         }
                     }
diff --git a/compiler/rustc_typeck/src/coherence/orphan.rs b/compiler/rustc_typeck/src/coherence/orphan.rs
index 917fc56..b200996 100644
--- a/compiler/rustc_typeck/src/coherence/orphan.rs
+++ b/compiler/rustc_typeck/src/coherence/orphan.rs
@@ -110,7 +110,7 @@
                             )
                             .note(
                                 "implementing a foreign trait is only possible if at \
-                                    least one of the types for which is it implemented is local, \
+                                    least one of the types for which it is implemented is local, \
                                     and no uncovered type parameters appear before that first \
                                     local type",
                             )
@@ -135,7 +135,7 @@
                                 local type",
                                 param_ty,
                             )).note("implementing a foreign trait is only possible if at \
-                                    least one of the types for which is it implemented is local"
+                                    least one of the types for which it is implemented is local"
                             ).note("only traits defined in the current crate can be \
                                     implemented for a type parameter"
                             ).emit();
diff --git a/compiler/rustc_typeck/src/collect.rs b/compiler/rustc_typeck/src/collect.rs
index 9aca112..b431de9 100644
--- a/compiler/rustc_typeck/src/collect.rs
+++ b/compiler/rustc_typeck/src/collect.rs
@@ -21,8 +21,8 @@
 use crate::errors;
 use crate::middle::resolve_lifetime as rl;
 use rustc_ast as ast;
-use rustc_ast::MetaItemKind;
-use rustc_attr::{list_contains_name, InlineAttr, OptimizeAttr};
+use rustc_ast::{MetaItemKind, NestedMetaItem};
+use rustc_attr::{list_contains_name, InlineAttr, InstructionSetAttr, OptimizeAttr};
 use rustc_data_structures::captures::Captures;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
 use rustc_errors::{struct_span_err, Applicability};
@@ -40,7 +40,7 @@
 use rustc_middle::ty::subst::InternalSubsts;
 use rustc_middle::ty::util::Discr;
 use rustc_middle::ty::util::IntTypeExt;
-use rustc_middle::ty::{self, AdtKind, Const, ToPolyTraitRef, Ty, TyCtxt};
+use rustc_middle::ty::{self, AdtKind, Const, DefIdTree, ToPolyTraitRef, Ty, TyCtxt};
 use rustc_middle::ty::{ReprOptions, ToPredicate, WithConstness};
 use rustc_session::config::SanitizerSet;
 use rustc_session::lint;
@@ -50,6 +50,9 @@
 use rustc_target::spec::abi;
 use rustc_trait_selection::traits::error_reporting::suggestions::NextTypeParamName;
 
+use std::ops::ControlFlow;
+
+mod item_bounds;
 mod type_of;
 
 struct OnlySelfBounds(bool);
@@ -68,12 +71,15 @@
     *providers = Providers {
         opt_const_param_of: type_of::opt_const_param_of,
         type_of: type_of::type_of,
+        item_bounds: item_bounds::item_bounds,
+        explicit_item_bounds: item_bounds::explicit_item_bounds,
         generics_of,
         predicates_of,
         predicates_defined_on,
         projection_ty_from_predicates,
         explicit_predicates_of,
         super_predicates_of,
+        trait_explicit_predicates_and_bounds,
         type_param_predicates,
         trait_def,
         adt_def,
@@ -222,6 +228,7 @@
                 hir::GenericParamKind::Const { .. } => {
                     let def_id = self.tcx.hir().local_def_id(param.hir_id);
                     self.tcx.ensure().type_of(def_id);
+                    // FIXME(const_generics:defaults)
                 }
             }
         }
@@ -699,6 +706,7 @@
         hir::ItemKind::OpaqueTy(..) => {
             tcx.ensure().generics_of(def_id);
             tcx.ensure().predicates_of(def_id);
+            tcx.ensure().explicit_item_bounds(def_id);
         }
         hir::ItemKind::TyAlias(..)
         | hir::ItemKind::Static(..)
@@ -707,8 +715,10 @@
             tcx.ensure().generics_of(def_id);
             tcx.ensure().type_of(def_id);
             tcx.ensure().predicates_of(def_id);
-            if let hir::ItemKind::Fn(..) = it.kind {
-                tcx.ensure().fn_sig(def_id);
+            match it.kind {
+                hir::ItemKind::Fn(..) => tcx.ensure().fn_sig(def_id),
+                hir::ItemKind::OpaqueTy(..) => tcx.ensure().item_bounds(def_id),
+                _ => (),
             }
         }
     }
@@ -729,15 +739,25 @@
             tcx.ensure().type_of(def_id);
         }
 
-        hir::TraitItemKind::Const(..) | hir::TraitItemKind::Type(_, Some(_)) => {
+        hir::TraitItemKind::Const(..) => {
             tcx.ensure().type_of(def_id);
-            // Account for `const C: _;` and `type T = _;`.
+            // Account for `const C: _;`.
+            let mut visitor = PlaceholderHirTyCollector::default();
+            visitor.visit_trait_item(trait_item);
+            placeholder_type_error(tcx, None, &[], visitor.0, false);
+        }
+
+        hir::TraitItemKind::Type(_, Some(_)) => {
+            tcx.ensure().item_bounds(def_id);
+            tcx.ensure().type_of(def_id);
+            // Account for `type T = _;`.
             let mut visitor = PlaceholderHirTyCollector::default();
             visitor.visit_trait_item(trait_item);
             placeholder_type_error(tcx, None, &[], visitor.0, false);
         }
 
         hir::TraitItemKind::Type(_, None) => {
+            tcx.ensure().item_bounds(def_id);
             // #74612: Visit and try to find bad placeholders
             // even if there is no concrete type.
             let mut visitor = PlaceholderHirTyCollector::default();
@@ -833,7 +853,6 @@
     parent_did: LocalDefId,
 ) -> ty::VariantDef {
     let mut seen_fields: FxHashMap<Ident, Span> = Default::default();
-    let hir_id = tcx.hir().local_def_id_to_hir_id(variant_did.unwrap_or(parent_did));
     let fields = def
         .fields()
         .iter()
@@ -850,11 +869,7 @@
                 seen_fields.insert(f.ident.normalize_to_macros_2_0(), f.span);
             }
 
-            ty::FieldDef {
-                did: fid.to_def_id(),
-                ident: f.ident,
-                vis: ty::Visibility::from_hir(&f.vis, hir_id, tcx),
-            }
+            ty::FieldDef { did: fid.to_def_id(), ident: f.ident, vis: tcx.visibility(fid) }
         })
         .collect();
     let recovered = match def {
@@ -1715,7 +1730,7 @@
 
 /// Returns a list of user-specified type predicates for the definition with ID `def_id`.
 /// N.B., this does not include any implied/inferred constraints.
-fn explicit_predicates_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> {
+fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> {
     use rustc_hir::*;
 
     debug!("explicit_predicates_of(def_id={:?})", def_id);
@@ -1725,7 +1740,6 @@
 
     let mut is_trait = None;
     let mut is_default_impl_trait = None;
-    let mut is_trait_associated_type = None;
 
     let icx = ItemCtxt::new(tcx, def_id);
     let constness = icx.default_constness_for_trait_bounds();
@@ -1738,12 +1752,7 @@
     let mut predicates: FxIndexSet<(ty::Predicate<'_>, Span)> = FxIndexSet::default();
 
     let ast_generics = match node {
-        Node::TraitItem(item) => {
-            if let hir::TraitItemKind::Type(bounds, _) = item.kind {
-                is_trait_associated_type = Some((bounds, item.span));
-            }
-            &item.generics
-        }
+        Node::TraitItem(item) => &item.generics,
 
         Node::ImplItem(item) => &item.generics,
 
@@ -1761,44 +1770,38 @@
                 | ItemKind::Struct(_, ref generics)
                 | ItemKind::Union(_, ref generics) => generics,
 
-                ItemKind::Trait(_, _, ref generics, .., items) => {
-                    is_trait = Some((ty::TraitRef::identity(tcx, def_id), items));
+                ItemKind::Trait(_, _, ref generics, ..) => {
+                    is_trait = Some(ty::TraitRef::identity(tcx, def_id));
                     generics
                 }
                 ItemKind::TraitAlias(ref generics, _) => {
-                    is_trait = Some((ty::TraitRef::identity(tcx, def_id), &[]));
+                    is_trait = Some(ty::TraitRef::identity(tcx, def_id));
                     generics
                 }
                 ItemKind::OpaqueTy(OpaqueTy {
-                    ref bounds,
+                    bounds: _,
                     impl_trait_fn,
                     ref generics,
                     origin: _,
                 }) => {
-                    let bounds_predicates = ty::print::with_no_queries(|| {
-                        let substs = InternalSubsts::identity_for_item(tcx, def_id);
-                        let opaque_ty = tcx.mk_opaque(def_id, substs);
-
-                        // Collect the bounds, i.e., the `A + B + 'c` in `impl A + B + 'c`.
-                        let bounds = AstConv::compute_bounds(
-                            &icx,
-                            opaque_ty,
-                            bounds,
-                            SizedByDefault::Yes,
-                            tcx.def_span(def_id),
-                        );
-
-                        bounds.predicates(tcx, opaque_ty)
-                    });
                     if impl_trait_fn.is_some() {
-                        // opaque types
-                        return ty::GenericPredicates {
-                            parent: None,
-                            predicates: tcx.arena.alloc_from_iter(bounds_predicates),
-                        };
+                        // return-position impl trait
+                        //
+                        // We don't inherit predicates from the parent here:
+                        // If we have, say `fn f<'a, T: 'a>() -> impl Sized {}`
+                        // then the return type is `f::<'static, T>::{{opaque}}`.
+                        //
+                        // If we inherited the predicates of `f` then we would
+                        // require that `T: 'static` to show that the return
+                        // type is well-formed.
+                        //
+                        // The only way to have something with this opaque type
+                        // is from the return type of the containing function,
+                        // which will ensure that the function's predicates
+                        // hold.
+                        return ty::GenericPredicates { parent: None, predicates: &[] };
                     } else {
-                        // named opaque types
-                        predicates.extend(bounds_predicates);
+                        // type-alias impl trait
                         generics
                     }
                 }
@@ -1824,7 +1827,7 @@
     // and the explicit where-clauses, but to get the full set of predicates
     // on a trait we need to add in the supertrait bounds and bounds found on
     // associated types.
-    if let Some((_trait_ref, _)) = is_trait {
+    if let Some(_trait_ref) = is_trait {
         predicates.extend(tcx.super_predicates_of(def_id).predicates.iter().cloned());
     }
 
@@ -1991,24 +1994,6 @@
         }
     }
 
-    // Add predicates from associated type bounds (`type X: Bound`)
-    if tcx.features().generic_associated_types {
-        // New behavior: bounds declared on associate type are predicates of that
-        // associated type. Not the default because it needs more testing.
-        if let Some((bounds, span)) = is_trait_associated_type {
-            let projection_ty =
-                tcx.mk_projection(def_id, InternalSubsts::identity_for_item(tcx, def_id));
-
-            predicates.extend(associated_item_bounds(tcx, def_id, bounds, projection_ty, span))
-        }
-    } else if let Some((self_trait_ref, trait_items)) = is_trait {
-        // Current behavior: bounds declared on associate type are predicates
-        // of its parent trait.
-        predicates.extend(trait_items.iter().flat_map(|trait_item_ref| {
-            trait_associated_item_predicates(tcx, def_id, self_trait_ref, trait_item_ref)
-        }))
-    }
-
     if tcx.features().const_evaluatable_checked {
         predicates.extend(const_evaluatable_predicates_of(tcx, def_id.expect_local()));
     }
@@ -2077,14 +2062,14 @@
             }
 
             impl<'a, 'tcx> TypeVisitor<'tcx> for TyAliasVisitor<'a, 'tcx> {
-                fn visit_const(&mut self, ct: &'tcx Const<'tcx>) -> bool {
+                fn visit_const(&mut self, ct: &'tcx Const<'tcx>) -> ControlFlow<()> {
                     if let ty::ConstKind::Unevaluated(def, substs, None) = ct.val {
                         self.preds.insert((
                             ty::PredicateAtom::ConstEvaluatable(def, substs).to_predicate(self.tcx),
                             self.span,
                         ));
                     }
-                    false
+                    ControlFlow::CONTINUE
                 }
             }
 
@@ -2107,29 +2092,92 @@
     if let hir::Node::Item(item) = node {
         if let hir::ItemKind::Impl { ref of_trait, ref self_ty, .. } = item.kind {
             if let Some(of_trait) = of_trait {
-                warn!("const_evaluatable_predicates_of({:?}): visit impl trait_ref", def_id);
+                debug!("const_evaluatable_predicates_of({:?}): visit impl trait_ref", def_id);
                 collector.visit_trait_ref(of_trait);
             }
 
-            warn!("const_evaluatable_predicates_of({:?}): visit_self_ty", def_id);
+            debug!("const_evaluatable_predicates_of({:?}): visit_self_ty", def_id);
             collector.visit_ty(self_ty);
         }
     }
 
     if let Some(generics) = node.generics() {
-        warn!("const_evaluatable_predicates_of({:?}): visit_generics", def_id);
+        debug!("const_evaluatable_predicates_of({:?}): visit_generics", def_id);
         collector.visit_generics(generics);
     }
 
     if let Some(fn_sig) = tcx.hir().fn_sig_by_hir_id(hir_id) {
-        warn!("const_evaluatable_predicates_of({:?}): visit_fn_decl", def_id);
+        debug!("const_evaluatable_predicates_of({:?}): visit_fn_decl", def_id);
         collector.visit_fn_decl(fn_sig.decl);
     }
-    warn!("const_evaluatable_predicates_of({:?}) = {:?}", def_id, collector.preds);
+    debug!("const_evaluatable_predicates_of({:?}) = {:?}", def_id, collector.preds);
 
     collector.preds
 }
 
+fn trait_explicit_predicates_and_bounds(
+    tcx: TyCtxt<'_>,
+    def_id: LocalDefId,
+) -> ty::GenericPredicates<'_> {
+    assert_eq!(tcx.def_kind(def_id), DefKind::Trait);
+    gather_explicit_predicates_of(tcx, def_id.to_def_id())
+}
+
+fn explicit_predicates_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> {
+    if let DefKind::Trait = tcx.def_kind(def_id) {
+        // Remove bounds on associated types from the predicates, they will be
+        // returned by `explicit_item_bounds`.
+        let predicates_and_bounds = tcx.trait_explicit_predicates_and_bounds(def_id.expect_local());
+        let trait_identity_substs = InternalSubsts::identity_for_item(tcx, def_id);
+
+        let is_assoc_item_ty = |ty: Ty<'_>| {
+            // For a predicate from a where clause to become a bound on an
+            // associated type:
+            // * It must use the identity substs of the item.
+            //     * Since any generic parameters on the item are not in scope,
+            //       this means that the item is not a GAT, and its identity
+            //       substs are the same as the trait's.
+            // * It must be an associated type for this trait (*not* a
+            //   supertrait).
+            if let ty::Projection(projection) = ty.kind() {
+                if projection.substs == trait_identity_substs
+                    && tcx.associated_item(projection.item_def_id).container.id() == def_id
+                {
+                    true
+                } else {
+                    false
+                }
+            } else {
+                false
+            }
+        };
+
+        let predicates: Vec<_> = predicates_and_bounds
+            .predicates
+            .iter()
+            .copied()
+            .filter(|(pred, _)| match pred.skip_binders() {
+                ty::PredicateAtom::Trait(tr, _) => !is_assoc_item_ty(tr.self_ty()),
+                ty::PredicateAtom::Projection(proj) => {
+                    !is_assoc_item_ty(proj.projection_ty.self_ty())
+                }
+                ty::PredicateAtom::TypeOutlives(outlives) => !is_assoc_item_ty(outlives.0),
+                _ => true,
+            })
+            .collect();
+        if predicates.len() == predicates_and_bounds.predicates.len() {
+            predicates_and_bounds
+        } else {
+            ty::GenericPredicates {
+                parent: predicates_and_bounds.parent,
+                predicates: tcx.arena.alloc_slice(&predicates),
+            }
+        }
+    } else {
+        gather_explicit_predicates_of(tcx, def_id)
+    }
+}
+
 fn projection_ty_from_predicates(
     tcx: TyCtxt<'tcx>,
     key: (
@@ -2152,55 +2200,6 @@
     projection_ty
 }
 
-fn trait_associated_item_predicates(
-    tcx: TyCtxt<'tcx>,
-    def_id: DefId,
-    self_trait_ref: ty::TraitRef<'tcx>,
-    trait_item_ref: &hir::TraitItemRef,
-) -> Vec<(ty::Predicate<'tcx>, Span)> {
-    let trait_item = tcx.hir().trait_item(trait_item_ref.id);
-    let item_def_id = tcx.hir().local_def_id(trait_item_ref.id.hir_id);
-    let bounds = match trait_item.kind {
-        hir::TraitItemKind::Type(ref bounds, _) => bounds,
-        _ => return Vec::new(),
-    };
-
-    if !tcx.generics_of(item_def_id).params.is_empty() {
-        // For GATs the substs provided to the mk_projection call below are
-        // wrong. We should emit a feature gate error if we get here so skip
-        // this type.
-        tcx.sess.delay_span_bug(trait_item.span, "gats used without feature gate");
-        return Vec::new();
-    }
-
-    let assoc_ty = tcx.mk_projection(
-        tcx.hir().local_def_id(trait_item.hir_id).to_def_id(),
-        self_trait_ref.substs,
-    );
-
-    associated_item_bounds(tcx, def_id, bounds, assoc_ty, trait_item.span)
-}
-
-fn associated_item_bounds(
-    tcx: TyCtxt<'tcx>,
-    def_id: DefId,
-    bounds: &'tcx [hir::GenericBound<'tcx>],
-    projection_ty: Ty<'tcx>,
-    span: Span,
-) -> Vec<(ty::Predicate<'tcx>, Span)> {
-    let bounds = AstConv::compute_bounds(
-        &ItemCtxt::new(tcx, def_id),
-        projection_ty,
-        bounds,
-        SizedByDefault::Yes,
-        span,
-    );
-
-    let predicates = bounds.predicates(tcx, projection_ty);
-
-    predicates
-}
-
 /// Converts a specific `GenericBound` from the AST into a set of
 /// predicates that apply to the self type. A vector is returned
 /// because this can be anywhere from zero predicates (`T: ?Sized` adds no
@@ -2412,6 +2411,7 @@
                 Some(sym::movbe_target_feature) => rust_features.movbe_target_feature,
                 Some(sym::rtm_target_feature) => rust_features.rtm_target_feature,
                 Some(sym::f16c_target_feature) => rust_features.f16c_target_feature,
+                Some(sym::ermsb_target_feature) => rust_features.ermsb_target_feature,
                 Some(name) => bug!("unknown target feature gate {}", name),
                 None => true,
             };
@@ -2553,7 +2553,7 @@
                 )
                 .emit();
             }
-            if !tcx.sess.target.target.llvm_target.contains("thumbv8m") {
+            if !tcx.sess.target.llvm_target.contains("thumbv8m") {
                 struct_span_err!(tcx.sess, attr.span, E0775, "`#[cmse_nonsecure_entry]` is only valid for targets with the TrustZone-M extension")
                     .emit();
             }
@@ -2645,6 +2645,75 @@
                     }
                 }
             }
+        } else if tcx.sess.check_name(attr, sym::instruction_set) {
+            codegen_fn_attrs.instruction_set = match attr.meta().map(|i| i.kind) {
+                Some(MetaItemKind::List(ref items)) => match items.as_slice() {
+                    [NestedMetaItem::MetaItem(set)] => {
+                        let segments =
+                            set.path.segments.iter().map(|x| x.ident.name).collect::<Vec<_>>();
+                        match segments.as_slice() {
+                            [sym::arm, sym::a32] | [sym::arm, sym::t32] => {
+                                if !tcx.sess.target.has_thumb_interworking {
+                                    struct_span_err!(
+                                        tcx.sess.diagnostic(),
+                                        attr.span,
+                                        E0779,
+                                        "target does not support `#[instruction_set]`"
+                                    )
+                                    .emit();
+                                    None
+                                } else if segments[1] == sym::a32 {
+                                    Some(InstructionSetAttr::ArmA32)
+                                } else if segments[1] == sym::t32 {
+                                    Some(InstructionSetAttr::ArmT32)
+                                } else {
+                                    unreachable!()
+                                }
+                            }
+                            _ => {
+                                struct_span_err!(
+                                    tcx.sess.diagnostic(),
+                                    attr.span,
+                                    E0779,
+                                    "invalid instruction set specified",
+                                )
+                                .emit();
+                                None
+                            }
+                        }
+                    }
+                    [] => {
+                        struct_span_err!(
+                            tcx.sess.diagnostic(),
+                            attr.span,
+                            E0778,
+                            "`#[instruction_set]` requires an argument"
+                        )
+                        .emit();
+                        None
+                    }
+                    _ => {
+                        struct_span_err!(
+                            tcx.sess.diagnostic(),
+                            attr.span,
+                            E0779,
+                            "cannot specify more than one instruction set"
+                        )
+                        .emit();
+                        None
+                    }
+                },
+                _ => {
+                    struct_span_err!(
+                        tcx.sess.diagnostic(),
+                        attr.span,
+                        E0778,
+                        "must specify an instruction set"
+                    )
+                    .emit();
+                    None
+                }
+            };
         }
     }
 
@@ -2720,6 +2789,14 @@
         }
     });
 
+    // #73631: closures inherit `#[target_feature]` annotations
+    if tcx.features().target_feature_11 && tcx.is_closure(id) {
+        let owner_id = tcx.parent(id).expect("closure should have a parent");
+        codegen_fn_attrs
+            .target_features
+            .extend(tcx.codegen_fn_attrs(owner_id).target_features.iter().copied())
+    }
+
     // If a function uses #[target_feature] it can't be inlined into general
     // purpose functions as they wouldn't have the right target features
     // enabled. For that reason we also forbid #[inline(always)] as it can't be
diff --git a/compiler/rustc_typeck/src/collect/item_bounds.rs b/compiler/rustc_typeck/src/collect/item_bounds.rs
new file mode 100644
index 0000000..e596dd1
--- /dev/null
+++ b/compiler/rustc_typeck/src/collect/item_bounds.rs
@@ -0,0 +1,111 @@
+use super::ItemCtxt;
+use crate::astconv::{AstConv, SizedByDefault};
+use rustc_hir as hir;
+use rustc_infer::traits::util;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::def_id::DefId;
+use rustc_span::Span;
+
+/// For associated types we include both bounds written on the type
+/// (`type X: Trait`) and predicates from the trait: `where Self::X: Trait`.
+///
+/// Note that this filtering is done with the items identity substs to
+/// simplify checking that these bounds are met in impls. This means that
+/// a bound such as `for<'b> <Self as X<'b>>::U: Clone` can't be used, as in
+/// `hr-associated-type-bound-1.rs`.
+fn associated_type_bounds<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    assoc_item_def_id: DefId,
+    bounds: &'tcx [hir::GenericBound<'tcx>],
+    span: Span,
+) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
+    let item_ty = tcx.mk_projection(
+        assoc_item_def_id,
+        InternalSubsts::identity_for_item(tcx, assoc_item_def_id),
+    );
+
+    let bounds = AstConv::compute_bounds(
+        &ItemCtxt::new(tcx, assoc_item_def_id),
+        item_ty,
+        bounds,
+        SizedByDefault::Yes,
+        span,
+    );
+
+    let trait_def_id = tcx.associated_item(assoc_item_def_id).container.id();
+    let trait_predicates = tcx.trait_explicit_predicates_and_bounds(trait_def_id.expect_local());
+
+    let bounds_from_parent =
+        trait_predicates.predicates.iter().copied().filter(|(pred, _)| match pred.skip_binders() {
+            ty::PredicateAtom::Trait(tr, _) => tr.self_ty() == item_ty,
+            ty::PredicateAtom::Projection(proj) => proj.projection_ty.self_ty() == item_ty,
+            ty::PredicateAtom::TypeOutlives(outlives) => outlives.0 == item_ty,
+            _ => false,
+        });
+
+    let all_bounds = tcx
+        .arena
+        .alloc_from_iter(bounds.predicates(tcx, item_ty).into_iter().chain(bounds_from_parent));
+    debug!("associated_type_bounds({}) = {:?}", tcx.def_path_str(assoc_item_def_id), all_bounds);
+    all_bounds
+}
+
+/// Opaque types don't inherit bounds from their parent: for return position
+/// impl trait it isn't possible to write a suitable predicate on the
+/// containing function and for type-alias impl trait we don't have a backwards
+/// compatibility issue.
+fn opaque_type_bounds<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    opaque_def_id: DefId,
+    bounds: &'tcx [hir::GenericBound<'tcx>],
+    span: Span,
+) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
+    ty::print::with_no_queries(|| {
+        let item_ty =
+            tcx.mk_opaque(opaque_def_id, InternalSubsts::identity_for_item(tcx, opaque_def_id));
+
+        let bounds = AstConv::compute_bounds(
+            &ItemCtxt::new(tcx, opaque_def_id),
+            item_ty,
+            bounds,
+            SizedByDefault::Yes,
+            span,
+        )
+        .predicates(tcx, item_ty);
+
+        debug!("opaque_type_bounds({}) = {:?}", tcx.def_path_str(opaque_def_id), bounds);
+
+        tcx.arena.alloc_slice(&bounds)
+    })
+}
+
+pub(super) fn explicit_item_bounds(
+    tcx: TyCtxt<'_>,
+    def_id: DefId,
+) -> &'_ [(ty::Predicate<'_>, Span)] {
+    let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+    match tcx.hir().get(hir_id) {
+        hir::Node::TraitItem(hir::TraitItem {
+            kind: hir::TraitItemKind::Type(bounds, _),
+            span,
+            ..
+        }) => associated_type_bounds(tcx, def_id, bounds, *span),
+        hir::Node::Item(hir::Item {
+            kind: hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds, .. }),
+            span,
+            ..
+        }) => opaque_type_bounds(tcx, def_id, bounds, *span),
+        _ => bug!("item_bounds called on {:?}", def_id),
+    }
+}
+
+pub(super) fn item_bounds(tcx: TyCtxt<'_>, def_id: DefId) -> &'_ ty::List<ty::Predicate<'_>> {
+    tcx.mk_predicates(
+        util::elaborate_predicates(
+            tcx,
+            tcx.explicit_item_bounds(def_id).iter().map(|&(bound, _span)| bound),
+        )
+        .map(|obligation| obligation.predicate),
+    )
+}
diff --git a/compiler/rustc_typeck/src/collect/type_of.rs b/compiler/rustc_typeck/src/collect/type_of.rs
index f6dca4a..61d1efc 100644
--- a/compiler/rustc_typeck/src/collect/type_of.rs
+++ b/compiler/rustc_typeck/src/collect/type_of.rs
@@ -118,12 +118,16 @@
                         tcx.sess.delay_span_bug(tcx.def_span(def_id), "anon const with Res::Err");
                         return None;
                     }
-                    _ => span_bug!(
-                        DUMMY_SP,
-                        "unexpected anon const res {:?} in path: {:?}",
-                        res,
-                        path,
-                    ),
+                    _ => {
+                        // If the user tries to specify generics on a type that does not take them,
+                        // e.g. `usize<T>`, we may hit this branch, in which case we treat it as if
+                        // no arguments have been passed. An error should already have been emitted.
+                        tcx.sess.delay_span_bug(
+                            tcx.def_span(def_id),
+                            &format!("unexpected anon const res {:?} in path: {:?}", res, path),
+                        );
+                        return None;
+                    }
                 };
 
                 generics
@@ -315,6 +319,12 @@
                     tcx.types.usize
                 }
 
+                Node::Expr(&Expr { kind: ExprKind::ConstBlock(ref anon_const), .. })
+                    if anon_const.hir_id == hir_id =>
+                {
+                    tcx.typeck(def_id).node_type(anon_const.hir_id)
+                }
+
                 Node::Variant(Variant { disr_expr: Some(ref e), .. }) if e.hir_id == hir_id => tcx
                     .adt_def(tcx.hir().get_parent_did(hir_id).to_def_id())
                     .repr
diff --git a/compiler/rustc_typeck/src/constrained_generic_params.rs b/compiler/rustc_typeck/src/constrained_generic_params.rs
index 09b5a9b..bae5bde 100644
--- a/compiler/rustc_typeck/src/constrained_generic_params.rs
+++ b/compiler/rustc_typeck/src/constrained_generic_params.rs
@@ -2,6 +2,7 @@
 use rustc_middle::ty::fold::{TypeFoldable, TypeVisitor};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::source_map::Span;
+use std::ops::ControlFlow;
 
 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
 pub struct Parameter(pub u32);
@@ -56,11 +57,11 @@
 }
 
 impl<'tcx> TypeVisitor<'tcx> for ParameterCollector {
-    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> {
         match *t.kind() {
             ty::Projection(..) | ty::Opaque(..) if !self.include_nonconstraining => {
                 // projections are not injective
-                return false;
+                return ControlFlow::CONTINUE;
             }
             ty::Param(data) => {
                 self.parameters.push(Parameter::from(data));
@@ -71,14 +72,14 @@
         t.super_visit_with(self)
     }
 
-    fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+    fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> {
         if let ty::ReEarlyBound(data) = *r {
             self.parameters.push(Parameter::from(data));
         }
-        false
+        ControlFlow::CONTINUE
     }
 
-    fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+    fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> {
         match c.val {
             ty::ConstKind::Unevaluated(..) if !self.include_nonconstraining => {
                 // Constant expressions are not injective
diff --git a/compiler/rustc_typeck/src/expr_use_visitor.rs b/compiler/rustc_typeck/src/expr_use_visitor.rs
index e16f26c..57bd89b 100644
--- a/compiler/rustc_typeck/src/expr_use_visitor.rs
+++ b/compiler/rustc_typeck/src/expr_use_visitor.rs
@@ -27,14 +27,31 @@
 /// employing the ExprUseVisitor.
 pub trait Delegate<'tcx> {
     // The value found at `place` is either copied or moved, depending
-    // on mode.
-    fn consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, mode: ConsumeMode);
+    // on `mode`. Where `diag_expr_id` is the id used for diagnostics for `place`.
+    //
+    // The parameter `diag_expr_id` indicates the HIR id that ought to be used for
+    // diagnostics. Around pattern matching such as `let pat = expr`, the diagnostic
+    // id will be the id of the expression `expr` but the place itself will have
+    // the id of the binding in the pattern `pat`.
+    fn consume(
+        &mut self,
+        place_with_id: &PlaceWithHirId<'tcx>,
+        diag_expr_id: hir::HirId,
+        mode: ConsumeMode,
+    );
 
     // The value found at `place` is being borrowed with kind `bk`.
-    fn borrow(&mut self, place_with_id: &PlaceWithHirId<'tcx>, bk: ty::BorrowKind);
+    // `diag_expr_id` is the id used for diagnostics (see `consume` for more details).
+    fn borrow(
+        &mut self,
+        place_with_id: &PlaceWithHirId<'tcx>,
+        diag_expr_id: hir::HirId,
+        bk: ty::BorrowKind,
+    );
 
-    // The path at `place_with_id` is being assigned to.
-    fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>);
+    // The path at `assignee_place` is being assigned to.
+    // `diag_expr_id` is the id used for diagnostics (see `consume` for more details).
+    fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId);
 }
 
 #[derive(Copy, Clone, PartialEq, Debug)]
@@ -116,11 +133,11 @@
         self.mc.tcx()
     }
 
-    fn delegate_consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>) {
+    fn delegate_consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
         debug!("delegate_consume(place_with_id={:?})", place_with_id);
 
         let mode = copy_or_move(&self.mc, place_with_id);
-        self.delegate.consume(place_with_id, mode);
+        self.delegate.consume(place_with_id, diag_expr_id, mode);
     }
 
     fn consume_exprs(&mut self, exprs: &[hir::Expr<'_>]) {
@@ -133,13 +150,13 @@
         debug!("consume_expr(expr={:?})", expr);
 
         let place_with_id = return_if_err!(self.mc.cat_expr(expr));
-        self.delegate_consume(&place_with_id);
+        self.delegate_consume(&place_with_id, place_with_id.hir_id);
         self.walk_expr(expr);
     }
 
     fn mutate_expr(&mut self, expr: &hir::Expr<'_>) {
         let place_with_id = return_if_err!(self.mc.cat_expr(expr));
-        self.delegate.mutate(&place_with_id);
+        self.delegate.mutate(&place_with_id, place_with_id.hir_id);
         self.walk_expr(expr);
     }
 
@@ -147,7 +164,7 @@
         debug!("borrow_expr(expr={:?}, bk={:?})", expr, bk);
 
         let place_with_id = return_if_err!(self.mc.cat_expr(expr));
-        self.delegate.borrow(&place_with_id, bk);
+        self.delegate.borrow(&place_with_id, place_with_id.hir_id, bk);
 
         self.walk_expr(expr)
     }
@@ -258,7 +275,10 @@
                 self.consume_exprs(&ia.inputs_exprs);
             }
 
-            hir::ExprKind::Continue(..) | hir::ExprKind::Lit(..) | hir::ExprKind::Err => {}
+            hir::ExprKind::Continue(..)
+            | hir::ExprKind::Lit(..)
+            | hir::ExprKind::ConstBlock(..)
+            | hir::ExprKind::Err => {}
 
             hir::ExprKind::Loop(ref blk, _, _) => {
                 self.walk_block(blk);
@@ -401,7 +421,7 @@
                             with_field.ty(self.tcx(), substs),
                             ProjectionKind::Field(f_index as u32, VariantIdx::new(0)),
                         );
-                        self.delegate_consume(&field_place);
+                        self.delegate_consume(&field_place, field_place.hir_id);
                     }
                 }
             }
@@ -433,7 +453,7 @@
                 adjustment::Adjust::NeverToAny | adjustment::Adjust::Pointer(_) => {
                     // Creating a closure/fn-pointer or unsizing consumes
                     // the input and stores it into the resulting rvalue.
-                    self.delegate_consume(&place_with_id);
+                    self.delegate_consume(&place_with_id, place_with_id.hir_id);
                 }
 
                 adjustment::Adjust::Deref(None) => {}
@@ -445,7 +465,7 @@
                 // this is an autoref of `x`.
                 adjustment::Adjust::Deref(Some(ref deref)) => {
                     let bk = ty::BorrowKind::from_mutbl(deref.mutbl);
-                    self.delegate.borrow(&place_with_id, bk);
+                    self.delegate.borrow(&place_with_id, place_with_id.hir_id, bk);
                 }
 
                 adjustment::Adjust::Borrow(ref autoref) => {
@@ -473,13 +493,17 @@
 
         match *autoref {
             adjustment::AutoBorrow::Ref(_, m) => {
-                self.delegate.borrow(base_place, ty::BorrowKind::from_mutbl(m.into()));
+                self.delegate.borrow(
+                    base_place,
+                    base_place.hir_id,
+                    ty::BorrowKind::from_mutbl(m.into()),
+                );
             }
 
             adjustment::AutoBorrow::RawPtr(m) => {
                 debug!("walk_autoref: expr.hir_id={} base_place={:?}", expr.hir_id, base_place);
 
-                self.delegate.borrow(base_place, ty::BorrowKind::from_mutbl(m));
+                self.delegate.borrow(base_place, base_place.hir_id, ty::BorrowKind::from_mutbl(m));
             }
         }
     }
@@ -522,19 +546,22 @@
                     // binding being produced.
                     let def = Res::Local(canonical_id);
                     if let Ok(ref binding_place) = mc.cat_res(pat.hir_id, pat.span, pat_ty, def) {
-                        delegate.mutate(binding_place);
+                        delegate.mutate(binding_place, binding_place.hir_id);
                     }
 
                     // It is also a borrow or copy/move of the value being matched.
+                    // In a cases of pattern like `let pat = upvar`, don't use the span
+                    // of the pattern, as this just looks confusing, instead use the span
+                    // of the discriminant.
                     match bm {
                         ty::BindByReference(m) => {
                             let bk = ty::BorrowKind::from_mutbl(m);
-                            delegate.borrow(place, bk);
+                            delegate.borrow(place, discr_place.hir_id, bk);
                         }
                         ty::BindByValue(..) => {
-                            let mode = copy_or_move(mc, place);
+                            let mode = copy_or_move(mc, &place);
                             debug!("walk_pat binding consuming pat");
-                            delegate.consume(place, mode);
+                            delegate.consume(place, discr_place.hir_id, mode);
                         }
                     }
                 }
@@ -561,10 +588,14 @@
                 match upvar_capture {
                     ty::UpvarCapture::ByValue(_) => {
                         let mode = copy_or_move(&self.mc, &captured_place);
-                        self.delegate.consume(&captured_place, mode);
+                        self.delegate.consume(&captured_place, captured_place.hir_id, mode);
                     }
                     ty::UpvarCapture::ByRef(upvar_borrow) => {
-                        self.delegate.borrow(&captured_place, upvar_borrow.kind);
+                        self.delegate.borrow(
+                            &captured_place,
+                            captured_place.hir_id,
+                            upvar_borrow.kind,
+                        );
                     }
                 }
             }
diff --git a/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs b/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs
index 60b9467..4cf3efc 100644
--- a/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs
+++ b/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs
@@ -337,6 +337,7 @@
             infcx,
             tcx.param_env(impl1_def_id),
             tcx.hir().local_def_id_to_hir_id(impl1_def_id),
+            0,
             arg,
             span,
         ) {
diff --git a/compiler/rustc_typeck/src/lib.rs b/compiler/rustc_typeck/src/lib.rs
index 21fb92e..3090409 100644
--- a/compiler/rustc_typeck/src/lib.rs
+++ b/compiler/rustc_typeck/src/lib.rs
@@ -56,6 +56,7 @@
 */
 
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(array_value_iter)]
 #![feature(bool_to_option)]
 #![feature(box_syntax)]
 #![feature(crate_visibility_modifier)]
@@ -65,6 +66,7 @@
 #![feature(try_blocks)]
 #![feature(never_type)]
 #![feature(slice_partition_dedup)]
+#![feature(control_flow_enum)]
 #![recursion_limit = "256"]
 
 #[macro_use]
@@ -316,7 +318,7 @@
                 }
             }
 
-            let se_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
+            let se_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
                 [tcx.types.isize, tcx.mk_imm_ptr(tcx.mk_imm_ptr(tcx.types.u8))].iter().cloned(),
                 tcx.types.isize,
                 false,
diff --git a/compiler/rustc_typeck/src/mem_categorization.rs b/compiler/rustc_typeck/src/mem_categorization.rs
index 04ead74..f6ac7aa 100644
--- a/compiler/rustc_typeck/src/mem_categorization.rs
+++ b/compiler/rustc_typeck/src/mem_categorization.rs
@@ -370,6 +370,7 @@
             | hir::ExprKind::Loop(..)
             | hir::ExprKind::Match(..)
             | hir::ExprKind::Lit(..)
+            | hir::ExprKind::ConstBlock(..)
             | hir::ExprKind::Break(..)
             | hir::ExprKind::Continue(..)
             | hir::ExprKind::Struct(..)
diff --git a/compiler/rustc_typeck/src/variance/mod.rs b/compiler/rustc_typeck/src/variance/mod.rs
index a893f69..1565efb 100644
--- a/compiler/rustc_typeck/src/variance/mod.rs
+++ b/compiler/rustc_typeck/src/variance/mod.rs
@@ -4,7 +4,7 @@
 //! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/variance.html
 
 use hir::Node;
-use rustc_arena::TypedArena;
+use rustc_arena::DroplessArena;
 use rustc_hir as hir;
 use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
 use rustc_middle::ty::query::Providers;
@@ -32,8 +32,8 @@
 
 fn crate_variances(tcx: TyCtxt<'_>, crate_num: CrateNum) -> CrateVariancesMap<'_> {
     assert_eq!(crate_num, LOCAL_CRATE);
-    let mut arena = TypedArena::default();
-    let terms_cx = terms::determine_parameters_to_be_inferred(tcx, &mut arena);
+    let arena = DroplessArena::default();
+    let terms_cx = terms::determine_parameters_to_be_inferred(tcx, &arena);
     let constraints_cx = constraints::add_constraints_from_crate(terms_cx);
     solve::solve_constraints(constraints_cx)
 }
diff --git a/compiler/rustc_typeck/src/variance/terms.rs b/compiler/rustc_typeck/src/variance/terms.rs
index f61a783..81c858c 100644
--- a/compiler/rustc_typeck/src/variance/terms.rs
+++ b/compiler/rustc_typeck/src/variance/terms.rs
@@ -9,7 +9,7 @@
 // `InferredIndex` is a newtype'd int representing the index of such
 // a variable.
 
-use rustc_arena::TypedArena;
+use rustc_arena::DroplessArena;
 use rustc_hir as hir;
 use rustc_hir::itemlikevisit::ItemLikeVisitor;
 use rustc_hir::HirIdMap;
@@ -47,7 +47,7 @@
 
 pub struct TermsContext<'a, 'tcx> {
     pub tcx: TyCtxt<'tcx>,
-    pub arena: &'a TypedArena<VarianceTerm<'a>>,
+    pub arena: &'a DroplessArena,
 
     // For marker types, UnsafeCell, and other lang items where
     // variance is hardcoded, records the item-id and the hardcoded
@@ -64,7 +64,7 @@
 
 pub fn determine_parameters_to_be_inferred<'a, 'tcx>(
     tcx: TyCtxt<'tcx>,
-    arena: &'a mut TypedArena<VarianceTerm<'a>>,
+    arena: &'a DroplessArena,
 ) -> TermsContext<'a, 'tcx> {
     let mut terms_cx = TermsContext {
         tcx,