Importing rustc-1.53.0

Bug: 194400612
Change-Id: Id2f38eeabc8325fff960e46b89b1cc7216f5227c
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index d9393ff..fba5667 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -41,12 +41,23 @@
 }
 
 pub trait ArgAttributesExt {
-    fn apply_attrs_to_llfn(&self, idx: AttributePlace, llfn: &Value);
-    fn apply_attrs_to_callsite(&self, idx: AttributePlace, callsite: &Value);
+    fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
+    fn apply_attrs_to_callsite(
+        &self,
+        idx: AttributePlace,
+        cx: &CodegenCx<'_, '_>,
+        callsite: &Value,
+    );
+}
+
+fn should_use_mutable_noalias(cx: &CodegenCx<'_, '_>) -> bool {
+    // While #84958 has been fixed, mutable noalias is not enabled by default
+    // in Rust 1.53 out of an abundance of caution.
+    cx.tcx.sess.opts.debugging_opts.mutable_noalias.unwrap_or(false)
 }
 
 impl ArgAttributesExt for ArgAttributes {
-    fn apply_attrs_to_llfn(&self, idx: AttributePlace, llfn: &Value) {
+    fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) {
         let mut regular = self.regular;
         unsafe {
             let deref = self.pointee_size.bytes();
@@ -62,6 +73,9 @@
                 llvm::LLVMRustAddAlignmentAttr(llfn, idx.as_uint(), align.bytes() as u32);
             }
             regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
+            if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
+                llvm::Attribute::NoAlias.apply_llfn(idx, llfn);
+            }
             match self.arg_ext {
                 ArgExtension::None => {}
                 ArgExtension::Zext => {
@@ -74,7 +88,12 @@
         }
     }
 
-    fn apply_attrs_to_callsite(&self, idx: AttributePlace, callsite: &Value) {
+    fn apply_attrs_to_callsite(
+        &self,
+        idx: AttributePlace,
+        cx: &CodegenCx<'_, '_>,
+        callsite: &Value,
+    ) {
         let mut regular = self.regular;
         unsafe {
             let deref = self.pointee_size.bytes();
@@ -98,6 +117,9 @@
                 );
             }
             regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
+            if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
+                llvm::Attribute::NoAlias.apply_callsite(idx, callsite);
+            }
             match self.arg_ext {
                 ArgExtension::None => {}
                 ArgExtension::Zext => {
@@ -419,13 +441,13 @@
 
         let mut i = 0;
         let mut apply = |attrs: &ArgAttributes| {
-            attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), llfn);
+            attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), cx, llfn);
             i += 1;
             i - 1
         };
         match self.ret.mode {
             PassMode::Direct(ref attrs) => {
-                attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, llfn);
+                attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
             }
             PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
                 assert!(!on_stack);
@@ -480,18 +502,18 @@
         // FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite.
 
         let mut i = 0;
-        let mut apply = |attrs: &ArgAttributes| {
-            attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), callsite);
+        let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
+            attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), cx, callsite);
             i += 1;
             i - 1
         };
         match self.ret.mode {
             PassMode::Direct(ref attrs) => {
-                attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, callsite);
+                attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, &bx.cx, callsite);
             }
             PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
                 assert!(!on_stack);
-                let i = apply(attrs);
+                let i = apply(bx.cx, attrs);
                 unsafe {
                     llvm::LLVMRustAddStructRetCallSiteAttr(
                         callsite,
@@ -517,12 +539,12 @@
         }
         for arg in &self.args {
             if arg.pad.is_some() {
-                apply(&ArgAttributes::new());
+                apply(bx.cx, &ArgAttributes::new());
             }
             match arg.mode {
                 PassMode::Ignore => {}
                 PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
-                    let i = apply(attrs);
+                    let i = apply(bx.cx, attrs);
                     unsafe {
                         llvm::LLVMRustAddByValCallSiteAttr(
                             callsite,
@@ -533,22 +555,22 @@
                 }
                 PassMode::Direct(ref attrs)
                 | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
-                    apply(attrs);
+                    apply(bx.cx, attrs);
                 }
                 PassMode::Indirect {
                     ref attrs,
                     extra_attrs: Some(ref extra_attrs),
                     on_stack: _,
                 } => {
-                    apply(attrs);
-                    apply(extra_attrs);
+                    apply(bx.cx, attrs);
+                    apply(bx.cx, extra_attrs);
                 }
                 PassMode::Pair(ref a, ref b) => {
-                    apply(a);
-                    apply(b);
+                    apply(bx.cx, a);
+                    apply(bx.cx, b);
                 }
                 PassMode::Cast(_) => {
-                    apply(&ArgAttributes::new());
+                    apply(bx.cx, &ArgAttributes::new());
                 }
             }
         }
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index e7d359c..84b091d 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -14,7 +14,7 @@
 use rustc_hir as hir;
 use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::{bug, span_bug};
-use rustc_span::{Pos, Span};
+use rustc_span::{Pos, Span, Symbol};
 use rustc_target::abi::*;
 use rustc_target::asm::*;
 
@@ -125,15 +125,39 @@
 
         // Collect the types of output operands
         let mut constraints = vec![];
+        let mut clobbers = vec![];
         let mut output_types = vec![];
         let mut op_idx = FxHashMap::default();
         for (idx, op) in operands.iter().enumerate() {
             match *op {
                 InlineAsmOperandRef::Out { reg, late, place } => {
+                    let is_target_supported = |reg_class: InlineAsmRegClass| {
+                        for &(_, feature) in reg_class.supported_types(asm_arch) {
+                            if let Some(feature) = feature {
+                                if self.tcx.sess.target_features.contains(&Symbol::intern(feature))
+                                {
+                                    return true;
+                                }
+                            } else {
+                                // Register class is unconditionally supported
+                                return true;
+                            }
+                        }
+                        false
+                    };
+
                     let mut layout = None;
                     let ty = if let Some(ref place) = place {
                         layout = Some(&place.layout);
                         llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout)
+                    } else if !is_target_supported(reg.reg_class()) {
+                        // We turn discarded outputs into clobber constraints
+                        // if the target feature needed by the register class is
+                        // disabled. This is necessary otherwise LLVM will try
+                        // to actually allocate a register for the dummy output.
+                        assert!(matches!(reg, InlineAsmRegOrRegClass::Reg(_)));
+                        clobbers.push(format!("~{}", reg_to_llvm(reg, None)));
+                        continue;
                     } else {
                         // If the output is discarded, we don't really care what
                         // type is used. We're just using this to tell LLVM to
@@ -244,6 +268,7 @@
             }
         }
 
+        constraints.append(&mut clobbers);
         if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
             match asm_arch {
                 InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 64ebe58..ede38b7 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -11,9 +11,10 @@
 use rustc_middle::ty::layout::HasTyCtxt;
 use rustc_middle::ty::query::Providers;
 use rustc_middle::ty::{self, TyCtxt};
-use rustc_session::config::{OptLevel, SanitizerSet};
+use rustc_session::config::OptLevel;
 use rustc_session::Session;
-use rustc_target::spec::StackProbeType;
+use rustc_target::spec::abi::Abi;
+use rustc_target::spec::{SanitizerSet, StackProbeType};
 
 use crate::attributes;
 use crate::llvm::AttributePlace::Function;
@@ -254,6 +255,7 @@
         attributes::emit_uwtable(llfn, true);
     }
 
+    // FIXME: none of these three functions interact with source level attributes.
     set_frame_pointer_elimination(cx, llfn);
     set_instrument_function(cx, llfn);
     set_probestack(cx, llfn);
@@ -289,7 +291,7 @@
     // The target doesn't care; the subtarget reads our attribute.
     apply_tune_cpu_attr(cx, llfn);
 
-    let function_features = codegen_fn_attrs
+    let mut function_features = codegen_fn_attrs
         .target_features
         .iter()
         .map(|f| {
@@ -301,23 +303,10 @@
             InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
         }))
         .collect::<Vec<String>>();
-    if !function_features.is_empty() {
-        let mut global_features = llvm_util::llvm_global_features(cx.tcx.sess);
-        global_features.extend(function_features.into_iter());
-        let features = global_features.join(",");
-        let val = CString::new(features).unwrap();
-        llvm::AddFunctionAttrStringValue(
-            llfn,
-            llvm::AttributePlace::Function,
-            cstr!("target-features"),
-            &val,
-        );
-    }
 
-    // Note that currently the `wasm-import-module` doesn't do anything, but
-    // eventually LLVM 7 should read this and ferry the appropriate import
-    // module to the output file.
-    if cx.tcx.sess.target.arch == "wasm32" {
+    if cx.tcx.sess.target.is_like_wasm {
+        // If this function is an import from the environment but the wasm
+        // import has a specific module/name, apply them here.
         if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
             llvm::AddFunctionAttrStringValue(
                 llfn,
@@ -336,6 +325,30 @@
                 &name,
             );
         }
+
+        // The `"wasm"` abi on wasm targets automatically enables the
+        // `+multivalue` feature because the purpose of the wasm abi is to match
+        // the WebAssembly specification, which has this feature. This won't be
+        // needed when LLVM enables this `multivalue` feature by default.
+        if !cx.tcx.is_closure(instance.def_id()) {
+            let abi = cx.tcx.fn_sig(instance.def_id()).abi();
+            if abi == Abi::Wasm {
+                function_features.push("+multivalue".to_string());
+            }
+        }
+    }
+
+    if !function_features.is_empty() {
+        let mut global_features = llvm_util::llvm_global_features(cx.tcx.sess);
+        global_features.extend(function_features.into_iter());
+        let features = global_features.join(",");
+        let val = CString::new(features).unwrap();
+        llvm::AddFunctionAttrStringValue(
+            llfn,
+            llvm::AttributePlace::Function,
+            cstr!("target-features"),
+            &val,
+        );
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 5effe68..4226ed7 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -24,6 +24,7 @@
 use std::ffi::{CStr, CString};
 use std::fs::File;
 use std::io;
+use std::iter;
 use std::path::Path;
 use std::ptr;
 use std::slice;
@@ -916,9 +917,7 @@
         modules: &[llvm::ThinLTOModule],
         names: &[CString],
     ) -> Self {
-        let keys = modules
-            .iter()
-            .zip(names.iter())
+        let keys = iter::zip(modules, names)
             .map(|(module, name)| {
                 let key = build_string(|rust_str| unsafe {
                     llvm::LLVMRustComputeLTOCacheKey(rust_str, module.identifier, data.0);
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 388dd7c..b628ae3 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -23,11 +23,11 @@
 use rustc_hir::def_id::LOCAL_CRATE;
 use rustc_middle::bug;
 use rustc_middle::ty::TyCtxt;
-use rustc_session::config::{self, Lto, OutputType, Passes, SanitizerSet, SwitchWithOptPath};
+use rustc_session::config::{self, Lto, OutputType, Passes, SwitchWithOptPath};
 use rustc_session::Session;
 use rustc_span::symbol::sym;
 use rustc_span::InnerSpan;
-use rustc_target::spec::{CodeModel, RelocModel, SplitDebuginfo};
+use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo};
 use tracing::debug;
 
 use libc::{c_char, c_int, c_uint, c_void, size_t};
@@ -170,10 +170,7 @@
     // On the wasm target once the `atomics` feature is enabled that means that
     // we're no longer single-threaded, or otherwise we don't want LLVM to
     // lower atomic operations to single-threaded operations.
-    if singlethread
-        && sess.target.llvm_target.contains("wasm32")
-        && sess.target_features.contains(&sym::atomics)
-    {
+    if singlethread && sess.target.is_like_wasm && sess.target_features.contains(&sym::atomics) {
         singlethread = false;
     }
 
@@ -548,6 +545,15 @@
                     llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
                     continue;
                 }
+                if pass_name == "insert-gcov-profiling" || pass_name == "instrprof" {
+                    // Instrumentation must be inserted before optimization,
+                    // otherwise LLVM may optimize some functions away which
+                    // breaks llvm-cov.
+                    //
+                    // This mirrors what Clang does in lib/CodeGen/BackendUtil.cpp.
+                    llvm::LLVMRustAddPass(mpm, find_pass(pass_name).unwrap());
+                    continue;
+                }
 
                 if let Some(pass) = find_pass(pass_name) {
                     extra_passes.push(pass);
@@ -1041,7 +1047,7 @@
     // thresholds copied from clang.
     match (opt_level, opt_size, inline_threshold) {
         (.., Some(t)) => {
-            llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t as u32);
+            llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t);
         }
         (llvm::CodeGenOptLevel::Aggressive, ..) => {
             llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275);
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index d5be313..6f6c649 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -32,8 +32,9 @@
 use rustc_middle::middle::exported_symbols;
 use rustc_middle::mir::mono::{Linkage, Visibility};
 use rustc_middle::ty::TyCtxt;
-use rustc_session::config::{DebugInfo, SanitizerSet};
+use rustc_session::config::DebugInfo;
 use rustc_span::symbol::Symbol;
+use rustc_target::spec::SanitizerSet;
 
 use std::ffi::CString;
 use std::time::Instant;
@@ -143,7 +144,7 @@
 
             // Finalize code coverage by injecting the coverage map. Note, the coverage map will
             // also be added to the `llvm.used` variable, created next.
-            if cx.sess().opts.debugging_opts.instrument_coverage {
+            if cx.sess().instrument_coverage() {
                 cx.coverageinfo_finalize();
             }
 
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index f4852c9..053cda1 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -2,6 +2,7 @@
 use crate::context::CodegenCx;
 use crate::llvm::{self, BasicBlock, False};
 use crate::llvm::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope};
+use crate::llvm_util;
 use crate::type_::Type;
 use crate::type_of::LayoutLlvmExt;
 use crate::value::Value;
@@ -16,11 +17,12 @@
 use rustc_hir::def_id::DefId;
 use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_span::{sym, Span};
+use rustc_span::Span;
 use rustc_target::abi::{self, Align, Size};
 use rustc_target::spec::{HasTargetSpec, Target};
 use std::borrow::Cow;
 use std::ffi::CStr;
+use std::iter;
 use std::ops::{Deref, Range};
 use std::ptr;
 use tracing::debug;
@@ -260,7 +262,7 @@
     fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
         unsafe {
             let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            llvm::LLVMRustSetFastMath(instr);
             instr
         }
     }
@@ -268,7 +270,7 @@
     fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
         unsafe {
             let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            llvm::LLVMRustSetFastMath(instr);
             instr
         }
     }
@@ -276,7 +278,7 @@
     fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
         unsafe {
             let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            llvm::LLVMRustSetFastMath(instr);
             instr
         }
     }
@@ -284,7 +286,7 @@
     fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
         unsafe {
             let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            llvm::LLVMRustSetFastMath(instr);
             instr
         }
     }
@@ -292,7 +294,7 @@
     fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
         unsafe {
             let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            llvm::LLVMRustSetFastMath(instr);
             instr
         }
     }
@@ -668,81 +670,47 @@
     }
 
     fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
-        // WebAssembly has saturating floating point to integer casts if the
-        // `nontrapping-fptoint` target feature is activated. We'll use those if
-        // they are available.
-        if self.sess().target.arch == "wasm32"
-            && self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
-        {
+        if llvm_util::get_version() >= (12, 0, 0) && !self.fptoint_sat_broken_in_llvm() {
             let src_ty = self.cx.val_ty(val);
             let float_width = self.cx.float_width(src_ty);
             let int_width = self.cx.int_width(dest_ty);
-            let name = match (int_width, float_width) {
-                (32, 32) => Some("llvm.wasm.trunc.saturate.unsigned.i32.f32"),
-                (32, 64) => Some("llvm.wasm.trunc.saturate.unsigned.i32.f64"),
-                (64, 32) => Some("llvm.wasm.trunc.saturate.unsigned.i64.f32"),
-                (64, 64) => Some("llvm.wasm.trunc.saturate.unsigned.i64.f64"),
-                _ => None,
-            };
-            if let Some(name) = name {
-                let intrinsic = self.get_intrinsic(name);
-                return Some(self.call(intrinsic, &[val], None));
-            }
+            let name = format!("llvm.fptoui.sat.i{}.f{}", int_width, float_width);
+            let intrinsic = self.get_intrinsic(&name);
+            return Some(self.call(intrinsic, &[val], None));
         }
+
         None
     }
 
     fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
-        // WebAssembly has saturating floating point to integer casts if the
-        // `nontrapping-fptoint` target feature is activated. We'll use those if
-        // they are available.
-        if self.sess().target.arch == "wasm32"
-            && self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
-        {
+        if llvm_util::get_version() >= (12, 0, 0) && !self.fptoint_sat_broken_in_llvm() {
             let src_ty = self.cx.val_ty(val);
             let float_width = self.cx.float_width(src_ty);
             let int_width = self.cx.int_width(dest_ty);
-            let name = match (int_width, float_width) {
-                (32, 32) => Some("llvm.wasm.trunc.saturate.signed.i32.f32"),
-                (32, 64) => Some("llvm.wasm.trunc.saturate.signed.i32.f64"),
-                (64, 32) => Some("llvm.wasm.trunc.saturate.signed.i64.f32"),
-                (64, 64) => Some("llvm.wasm.trunc.saturate.signed.i64.f64"),
-                _ => None,
-            };
-            if let Some(name) = name {
-                let intrinsic = self.get_intrinsic(name);
-                return Some(self.call(intrinsic, &[val], None));
-            }
+            let name = format!("llvm.fptosi.sat.i{}.f{}", int_width, float_width);
+            let intrinsic = self.get_intrinsic(&name);
+            return Some(self.call(intrinsic, &[val], None));
         }
+
         None
     }
 
-    fn fptosui_may_trap(&self, val: &'ll Value, dest_ty: &'ll Type) -> bool {
-        // Most of the time we'll be generating the `fptosi` or `fptoui`
-        // instruction for floating-point-to-integer conversions. These
-        // instructions by definition in LLVM do not trap. For the WebAssembly
-        // target, however, we'll lower in some cases to intrinsic calls instead
-        // which may trap. If we detect that this is a situation where we'll be
-        // using the intrinsics then we report that the call map trap, which
-        // callers might need to handle.
-        if !self.wasm_and_missing_nontrapping_fptoint() {
-            return false;
-        }
-        let src_ty = self.cx.val_ty(val);
-        let float_width = self.cx.float_width(src_ty);
-        let int_width = self.cx.int_width(dest_ty);
-        matches!((int_width, float_width), (32, 32) | (32, 64) | (64, 32) | (64, 64))
-    }
-
     fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
-        // When we can, use the native wasm intrinsics which have tighter
-        // codegen. Note that this has a semantic difference in that the
-        // intrinsic can trap whereas `fptoui` never traps. That difference,
-        // however, is handled by `fptosui_may_trap` above.
+        // On WebAssembly the `fptoui` and `fptosi` instructions currently have
+        // poor codegen. The reason for this is that the corresponding wasm
+        // instructions, `i32.trunc_f32_s` for example, will trap when the float
+        // is out-of-bounds, infinity, or nan. This means that LLVM
+        // automatically inserts control flow around `fptoui` and `fptosi`
+        // because the LLVM instruction `fptoui` is defined as producing a
+        // poison value, not having UB on out-of-bounds values.
         //
-        // Note that we skip the wasm intrinsics for vector types where `fptoui`
-        // must be used instead.
-        if self.wasm_and_missing_nontrapping_fptoint() {
+        // This method, however, is only used with non-saturating casts that
+        // have UB on out-of-bounds values. This means that it's ok if we use
+        // the raw wasm instruction since out-of-bounds values can do whatever
+        // we like. To ensure that LLVM picks the right instruction we choose
+        // the raw wasm intrinsic functions which avoid LLVM inserting all the
+        // other control flow automatically.
+        if self.sess().target.arch == "wasm32" {
             let src_ty = self.cx.val_ty(val);
             if self.cx.type_kind(src_ty) != TypeKind::Vector {
                 let float_width = self.cx.float_width(src_ty);
@@ -764,7 +732,8 @@
     }
 
     fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
-        if self.wasm_and_missing_nontrapping_fptoint() {
+        // see `fptoui` above for why wasm is different here
+        if self.sess().target.arch == "wasm32" {
             let src_ty = self.cx.val_ty(val);
             if self.cx.type_kind(src_ty) != TypeKind::Vector {
                 let float_width = self.cx.float_width(src_ty);
@@ -1241,14 +1210,14 @@
     pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
         unsafe {
             let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
-            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            llvm::LLVMRustSetFastMath(instr);
             instr
         }
     }
     pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
         unsafe {
             let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
-            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            llvm::LLVMRustSetFastMath(instr);
             instr
         }
     }
@@ -1281,7 +1250,7 @@
         unsafe {
             let instr =
                 llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
-            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            llvm::LLVMRustSetFastMath(instr);
             instr
         }
     }
@@ -1289,7 +1258,7 @@
         unsafe {
             let instr =
                 llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
-            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            llvm::LLVMRustSetFastMath(instr);
             instr
         }
     }
@@ -1352,18 +1321,14 @@
 
         let param_tys = self.cx.func_params_types(fn_ty);
 
-        let all_args_match = param_tys
-            .iter()
-            .zip(args.iter().map(|&v| self.val_ty(v)))
+        let all_args_match = iter::zip(&param_tys, args.iter().map(|&v| self.val_ty(v)))
             .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
 
         if all_args_match {
             return Cow::Borrowed(args);
         }
 
-        let casted_args: Vec<_> = param_tys
-            .into_iter()
-            .zip(args.iter())
+        let casted_args: Vec<_> = iter::zip(param_tys, args)
             .enumerate()
             .map(|(i, (expected_ty, &actual_val))| {
                 let actual_ty = self.val_ty(actual_val);
@@ -1423,8 +1388,11 @@
         }
     }
 
-    fn wasm_and_missing_nontrapping_fptoint(&self) -> bool {
-        self.sess().target.arch == "wasm32"
-            && !self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
+    fn fptoint_sat_broken_in_llvm(&self) -> bool {
+        match self.tcx.sess.target.arch.as_str() {
+            // FIXME - https://bugs.llvm.org/show_bug.cgi?id=50083
+            "riscv64" => llvm_util::get_version() < (13, 0, 0),
+            _ => false,
+        }
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 367c1f4..b26969a 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -14,6 +14,7 @@
 
 use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
 use rustc_middle::ty::{self, Instance, TypeFoldable};
+use rustc_target::spec::RelocModel;
 
 /// Codegens a reference to a fn/method item, monomorphizing and
 /// inlining as it goes.
@@ -170,17 +171,19 @@
                     }
                 }
             }
-        }
 
-        // MinGW: For backward compatibility we rely on the linker to decide whether it
-        // should use dllimport for functions.
-        if cx.use_dll_storage_attrs
-            && tcx.is_dllimport_foreign_item(instance_def_id)
-            && tcx.sess.target.env != "gnu"
-        {
-            unsafe {
+            // MinGW: For backward compatibility we rely on the linker to decide whether it
+            // should use dllimport for functions.
+            if cx.use_dll_storage_attrs
+                && tcx.is_dllimport_foreign_item(instance_def_id)
+                && tcx.sess.target.env != "gnu"
+            {
                 llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
             }
+
+            if cx.tcx.sess.relocation_model() == RelocModel::Static {
+                llvm::LLVMRustSetDSOLocal(llfn, true);
+            }
         }
 
         llfn
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 21473f3..f5c54b1 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -79,7 +79,7 @@
     pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
     pub isize_ty: &'ll Type,
 
-    pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'tcx>>,
+    pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'ll, 'tcx>>,
     pub dbg_cx: Option<debuginfo::CrateDebugContext<'ll, 'tcx>>,
 
     eh_personality: Cell<Option<&'ll Value>>,
@@ -101,10 +101,6 @@
     }
 }
 
-fn strip_x86_address_spaces(data_layout: String) -> String {
-    data_layout.replace("-p270:32:32-p271:32:32-p272:64:64-", "-")
-}
-
 fn strip_powerpc64_vectors(data_layout: String) -> String {
     data_layout.replace("-v256:256:256-v512:512:512", "")
 }
@@ -119,11 +115,6 @@
     let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
 
     let mut target_data_layout = sess.target.data_layout.clone();
-    if llvm_util::get_version() < (10, 0, 0)
-        && (sess.target.arch == "x86" || sess.target.arch == "x86_64")
-    {
-        target_data_layout = strip_x86_address_spaces(target_data_layout);
-    }
     if llvm_util::get_version() < (12, 0, 0) && sess.target.arch == "powerpc64" {
         target_data_layout = strip_powerpc64_vectors(target_data_layout);
     }
@@ -280,7 +271,7 @@
 
         let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
 
-        let coverage_cx = if tcx.sess.opts.debugging_opts.instrument_coverage {
+        let coverage_cx = if tcx.sess.instrument_coverage() {
             let covctx = coverageinfo::CrateCoverageContext::new();
             Some(covctx)
         } else {
@@ -331,7 +322,7 @@
     }
 
     #[inline]
-    pub fn coverage_context(&'a self) -> Option<&'a coverageinfo::CrateCoverageContext<'tcx>> {
+    pub fn coverage_context(&'a self) -> Option<&'a coverageinfo::CrateCoverageContext<'ll, 'tcx>> {
         self.coverage_cx.as_ref()
     }
 }
@@ -512,14 +503,6 @@
         let t_f32 = self.type_f32();
         let t_f64 = self.type_f64();
 
-        ifn!("llvm.wasm.trunc.saturate.unsigned.i32.f32", fn(t_f32) -> t_i32);
-        ifn!("llvm.wasm.trunc.saturate.unsigned.i32.f64", fn(t_f64) -> t_i32);
-        ifn!("llvm.wasm.trunc.saturate.unsigned.i64.f32", fn(t_f32) -> t_i64);
-        ifn!("llvm.wasm.trunc.saturate.unsigned.i64.f64", fn(t_f64) -> t_i64);
-        ifn!("llvm.wasm.trunc.saturate.signed.i32.f32", fn(t_f32) -> t_i32);
-        ifn!("llvm.wasm.trunc.saturate.signed.i32.f64", fn(t_f64) -> t_i32);
-        ifn!("llvm.wasm.trunc.saturate.signed.i64.f32", fn(t_f32) -> t_i64);
-        ifn!("llvm.wasm.trunc.saturate.signed.i64.f64", fn(t_f64) -> t_i64);
         ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
         ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
         ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
@@ -529,6 +512,28 @@
         ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
         ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
 
+        ifn!("llvm.fptosi.sat.i8.f32", fn(t_f32) -> t_i8);
+        ifn!("llvm.fptosi.sat.i16.f32", fn(t_f32) -> t_i16);
+        ifn!("llvm.fptosi.sat.i32.f32", fn(t_f32) -> t_i32);
+        ifn!("llvm.fptosi.sat.i64.f32", fn(t_f32) -> t_i64);
+        ifn!("llvm.fptosi.sat.i128.f32", fn(t_f32) -> t_i128);
+        ifn!("llvm.fptosi.sat.i8.f64", fn(t_f64) -> t_i8);
+        ifn!("llvm.fptosi.sat.i16.f64", fn(t_f64) -> t_i16);
+        ifn!("llvm.fptosi.sat.i32.f64", fn(t_f64) -> t_i32);
+        ifn!("llvm.fptosi.sat.i64.f64", fn(t_f64) -> t_i64);
+        ifn!("llvm.fptosi.sat.i128.f64", fn(t_f64) -> t_i128);
+
+        ifn!("llvm.fptoui.sat.i8.f32", fn(t_f32) -> t_i8);
+        ifn!("llvm.fptoui.sat.i16.f32", fn(t_f32) -> t_i16);
+        ifn!("llvm.fptoui.sat.i32.f32", fn(t_f32) -> t_i32);
+        ifn!("llvm.fptoui.sat.i64.f32", fn(t_f32) -> t_i64);
+        ifn!("llvm.fptoui.sat.i128.f32", fn(t_f32) -> t_i128);
+        ifn!("llvm.fptoui.sat.i8.f64", fn(t_f64) -> t_i8);
+        ifn!("llvm.fptoui.sat.i16.f64", fn(t_f64) -> t_i16);
+        ifn!("llvm.fptoui.sat.i32.f64", fn(t_f64) -> t_i32);
+        ifn!("llvm.fptoui.sat.i64.f64", fn(t_f64) -> t_i64);
+        ifn!("llvm.fptoui.sat.i128.f64", fn(t_f64) -> t_i128);
+
         ifn!("llvm.trap", fn() -> void);
         ifn!("llvm.debugtrap", fn() -> void);
         ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
@@ -712,7 +717,7 @@
         ifn!("llvm.va_end", fn(i8p) -> void);
         ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
 
-        if self.sess().opts.debugging_opts.instrument_coverage {
+        if self.sess().instrument_coverage() {
             ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
         }
 
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 352638a..1faaa7e 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -3,13 +3,13 @@
 use crate::llvm;
 
 use llvm::coverageinfo::CounterMappingRegion;
-use rustc_codegen_ssa::coverageinfo::map::{Counter, CounterExpression, FunctionCoverage};
-use rustc_codegen_ssa::traits::ConstMethods;
+use rustc_codegen_ssa::coverageinfo::map::{Counter, CounterExpression};
+use rustc_codegen_ssa::traits::{ConstMethods, CoverageInfoMethods};
 use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
 use rustc_hir::def_id::{DefId, DefIdSet, LOCAL_CRATE};
 use rustc_llvm::RustString;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir::coverage::CodeRegion;
-use rustc_middle::ty::{Instance, TyCtxt};
 use rustc_span::Symbol;
 
 use std::ffi::CString;
@@ -20,16 +20,17 @@
 ///
 /// This Coverage Map complies with Coverage Mapping Format version 4 (zero-based encoded as 3),
 /// as defined at [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/11.0-2020-10-12/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format)
-/// and published in Rust's current (November 2020) fork of LLVM. This version is supported by the
-/// LLVM coverage tools (`llvm-profdata` and `llvm-cov`) bundled with Rust's fork of LLVM.
+/// and published in Rust's November 2020 fork of LLVM. This version is supported by the LLVM
+/// coverage tools (`llvm-profdata` and `llvm-cov`) bundled with Rust's fork of LLVM.
 ///
 /// Consequently, Rust's bundled version of Clang also generates Coverage Maps compliant with
-/// version 3. Clang's implementation of Coverage Map generation was referenced when implementing
-/// this Rust version, and though the format documentation is very explicit and detailed, some
-/// undocumented details in Clang's implementation (that may or may not be important) were also
-/// replicated for Rust's Coverage Map.
+/// the same version. Clang's implementation of Coverage Map generation was referenced when
+/// implementing this Rust version, and though the format documentation is very explicit and
+/// detailed, some undocumented details in Clang's implementation (that may or may not be important)
+/// were also replicated for Rust's Coverage Map.
 pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
     let tcx = cx.tcx;
+
     // Ensure LLVM supports Coverage Map Version 4 (encoded as a zero-based value: 3).
     // If not, the LLVM Version must be less than 11.
     let version = coverageinfo::mapping_version();
@@ -39,17 +40,24 @@
 
     debug!("Generating coverage map for CodegenUnit: `{}`", cx.codegen_unit.name());
 
-    let mut function_coverage_map = match cx.coverage_context() {
+    // In order to show that unused functions have coverage counts of zero (0), LLVM requires the
+    // functions exist. Generate synthetic functions with a (required) single counter, and add the
+    // MIR `Coverage` code regions to the `function_coverage_map`, before calling
+    // `ctx.take_function_coverage_map()`.
+    if !tcx.sess.instrument_coverage_except_unused_functions() {
+        add_unused_functions(cx);
+    }
+
+    let function_coverage_map = match cx.coverage_context() {
         Some(ctx) => ctx.take_function_coverage_map(),
         None => return,
     };
+
     if function_coverage_map.is_empty() {
         // This module has no functions with coverage instrumentation
         return;
     }
 
-    add_unreachable_coverage(tcx, &mut function_coverage_map);
-
     let mut mapgen = CoverageMapGenerator::new();
 
     // Encode coverage mappings and generate function records
@@ -57,7 +65,8 @@
     for (instance, function_coverage) in function_coverage_map {
         debug!("Generate function coverage for {}, {:?}", cx.codegen_unit.name(), instance);
         let mangled_function_name = tcx.symbol_name(instance).to_string();
-        let function_source_hash = function_coverage.source_hash();
+        let source_hash = function_coverage.source_hash();
+        let is_used = function_coverage.is_used();
         let (expressions, counter_regions) =
             function_coverage.get_expressions_and_counter_regions();
 
@@ -69,7 +78,7 @@
             "Every `FunctionCoverage` should have at least one counter"
         );
 
-        function_data.push((mangled_function_name, function_source_hash, coverage_mapping_buffer));
+        function_data.push((mangled_function_name, source_hash, is_used, coverage_mapping_buffer));
     }
 
     // Encode all filenames referenced by counters/expressions in this module
@@ -84,13 +93,14 @@
     // Generate the LLVM IR representation of the coverage map and store it in a well-known global
     let cov_data_val = mapgen.generate_coverage_map(cx, version, filenames_size, filenames_val);
 
-    for (mangled_function_name, function_source_hash, coverage_mapping_buffer) in function_data {
+    for (mangled_function_name, source_hash, is_used, coverage_mapping_buffer) in function_data {
         save_function_record(
             cx,
             mangled_function_name,
-            function_source_hash,
+            source_hash,
             filenames_ref,
             coverage_mapping_buffer,
+            is_used,
         );
     }
 
@@ -201,9 +211,10 @@
 fn save_function_record(
     cx: &CodegenCx<'ll, 'tcx>,
     mangled_function_name: String,
-    function_source_hash: u64,
+    source_hash: u64,
     filenames_ref: u64,
     coverage_mapping_buffer: Vec<u8>,
+    is_used: bool,
 ) {
     // Concatenate the encoded coverage mappings
     let coverage_mapping_size = coverage_mapping_buffer.len();
@@ -212,128 +223,124 @@
     let func_name_hash = coverageinfo::hash_str(&mangled_function_name);
     let func_name_hash_val = cx.const_u64(func_name_hash);
     let coverage_mapping_size_val = cx.const_u32(coverage_mapping_size as u32);
-    let func_hash_val = cx.const_u64(function_source_hash);
+    let source_hash_val = cx.const_u64(source_hash);
     let filenames_ref_val = cx.const_u64(filenames_ref);
     let func_record_val = cx.const_struct(
         &[
             func_name_hash_val,
             coverage_mapping_size_val,
-            func_hash_val,
+            source_hash_val,
             filenames_ref_val,
             coverage_mapping_val,
         ],
         /*packed=*/ true,
     );
 
-    // At the present time, the coverage map for Rust assumes every instrumented function `is_used`.
-    // Note that Clang marks functions as "unused" in `CodeGenPGO::emitEmptyCounterMapping`. (See:
-    // https://github.com/rust-lang/llvm-project/blob/de02a75e398415bad4df27b4547c25b896c8bf3b/clang%2Flib%2FCodeGen%2FCodeGenPGO.cpp#L877-L878
-    // for example.)
-    //
-    // It's not yet clear if or how this may be applied to Rust in the future, but the `is_used`
-    // argument is available and handled similarly.
-    let is_used = true;
     coverageinfo::save_func_record_to_mod(cx, func_name_hash, func_record_val, is_used);
 }
 
 /// When finalizing the coverage map, `FunctionCoverage` only has the `CodeRegion`s and counters for
 /// the functions that went through codegen; such as public functions and "used" functions
 /// (functions referenced by other "used" or public items). Any other functions considered unused,
-/// or "Unreachable" were still parsed and processed through the MIR stage.
+/// or "Unreachable", were still parsed and processed through the MIR stage, but were not
+/// codegenned. (Note that `-Clink-dead-code` can force some unused code to be codegenned, but
+/// that flag is known to cause other errors, when combined with `-Z instrument-coverage`; and
+/// `-Clink-dead-code` will not generate code for unused generic functions.)
 ///
-/// We can find the unreachable functions by the set difference of all MIR `DefId`s (`tcx` query
-/// `mir_keys`) minus the codegenned `DefId`s (`tcx` query `collect_and_partition_mono_items`).
+/// We can find the unused functions (including generic functions) by the set difference of all MIR
+/// `DefId`s (`tcx` query `mir_keys`) minus the codegenned `DefId`s (`tcx` query
+/// `collect_and_partition_mono_items`).
 ///
 /// *HOWEVER* the codegenned `DefId`s are partitioned across multiple `CodegenUnit`s (CGUs), and
 /// this function is processing a `function_coverage_map` for the functions (`Instance`/`DefId`)
-/// allocated to only one of those CGUs. We must NOT inject any "Unreachable" functions's
-/// `CodeRegion`s more than once, so we have to pick which CGU's `function_coverage_map` to add
-/// each "Unreachable" function to.
-///
-/// Some constraints:
-///
-/// 1. The file name of an "Unreachable" function must match the file name of the existing
-///    codegenned (covered) function to which the unreachable code regions will be added.
-/// 2. The function to which the unreachable code regions will be added must not be a generic
-///    function (must not have type parameters) because the coverage tools will get confused
-///    if the codegenned function has more than one instantiation and additional `CodeRegion`s
-///    attached to only one of those instantiations.
-fn add_unreachable_coverage<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    function_coverage_map: &mut FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>>,
-) {
+/// allocated to only one of those CGUs. We must NOT inject any unused functions's `CodeRegion`s
+/// more than once, so we have to pick a CGUs `function_coverage_map` into which the unused
+/// function will be inserted.
+fn add_unused_functions<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
+    let tcx = cx.tcx;
+
     // FIXME(#79622): Can this solution be simplified and/or improved? Are there other sources
     // of compiler state data that might help (or better sources that could be exposed, but
     // aren't yet)?
 
-    // Note: If the crate *only* defines generic functions, there are no codegenerated non-generic
-    // functions to add any unreachable code to. In this case, the unreachable code regions will
-    // have no coverage, instead of having coverage with zero executions.
-    //
-    // This is probably still an improvement over Clang, which does not generate any coverage
-    // for uninstantiated template functions.
+    let ignore_unused_generics = tcx.sess.instrument_coverage_except_unused_generics();
 
-    let has_non_generic_def_ids =
-        function_coverage_map.keys().any(|instance| instance.def.attrs(tcx).len() == 0);
-
-    if !has_non_generic_def_ids {
-        // There are no non-generic functions to add unreachable `CodeRegion`s to
-        return;
-    }
-
-    let all_def_ids: DefIdSet =
-        tcx.mir_keys(LOCAL_CRATE).iter().map(|local_def_id| local_def_id.to_def_id()).collect();
+    let all_def_ids: DefIdSet = tcx
+        .mir_keys(LOCAL_CRATE)
+        .iter()
+        .filter_map(|local_def_id| {
+            let def_id = local_def_id.to_def_id();
+            if ignore_unused_generics && tcx.generics_of(def_id).requires_monomorphization(tcx) {
+                return None;
+            }
+            Some(local_def_id.to_def_id())
+        })
+        .collect();
 
     let codegenned_def_ids = tcx.codegened_and_inlined_items(LOCAL_CRATE);
 
-    let mut unreachable_def_ids_by_file: FxHashMap<Symbol, Vec<DefId>> = FxHashMap::default();
+    let mut unused_def_ids_by_file: FxHashMap<Symbol, Vec<DefId>> = FxHashMap::default();
     for &non_codegenned_def_id in all_def_ids.difference(codegenned_def_ids) {
-        // Make sure the non-codegenned (unreachable) function has a file_name
+        let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
+        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
+            continue;
+        }
+        // Make sure the non-codegenned (unused) function has a file_name
         if let Some(non_codegenned_file_name) = tcx.covered_file_name(non_codegenned_def_id) {
-            let def_ids = unreachable_def_ids_by_file
-                .entry(*non_codegenned_file_name)
-                .or_insert_with(Vec::new);
+            let def_ids =
+                unused_def_ids_by_file.entry(*non_codegenned_file_name).or_insert_with(Vec::new);
             def_ids.push(non_codegenned_def_id);
         }
     }
 
-    if unreachable_def_ids_by_file.is_empty() {
-        // There are no unreachable functions with file names to add (in any CGU)
+    if unused_def_ids_by_file.is_empty() {
+        // There are no unused functions with file names to add (in any CGU)
         return;
     }
 
-    // Since there may be multiple `CodegenUnit`s, some codegenned_def_ids may be codegenned in a
-    // different CGU, and will be added to the function_coverage_map for each CGU. Determine which
-    // function_coverage_map has the responsibility for publishing unreachable coverage
-    // based on file name:
+    // Each `CodegenUnit` (CGU) has its own function_coverage_map, and generates a specific binary
+    // with its own coverage map.
     //
-    // For each covered file name, sort ONLY the non-generic codegenned_def_ids, and if
-    // covered_def_ids.contains(the first def_id) for a given file_name, add the unreachable code
-    // region in this function_coverage_map. Otherwise, ignore it and assume another CGU's
-    // function_coverage_map will be adding it (because it will be first for one, and only one,
-    // of them).
+    // Each covered function `Instance` can be included in only one coverage map, produced from a
+    // specific function_coverage_map, from a specific CGU.
+    //
+    // Since unused functions did not generate code, they are not associated with any CGU yet.
+    //
+    // To avoid injecting the unused functions in multiple coverage maps (for multiple CGUs)
+    // determine which function_coverage_map has the responsibility for publishing unreachable
+    // coverage, based on file name: For each unused function, find the CGU that generates the
+    // first function (based on sorted `DefId`) from the same file.
+    //
+    // Add a new `FunctionCoverage` to the `function_coverage_map`, with unreachable code regions
+    // for each region in it's MIR.
+
+    // Convert the `HashSet` of `codegenned_def_ids` to a sortable vector, and sort them.
     let mut sorted_codegenned_def_ids: Vec<DefId> =
         codegenned_def_ids.iter().map(|def_id| *def_id).collect();
     sorted_codegenned_def_ids.sort_unstable();
 
     let mut first_covered_def_id_by_file: FxHashMap<Symbol, DefId> = FxHashMap::default();
     for &def_id in sorted_codegenned_def_ids.iter() {
-        // Only consider non-generic functions, to potentially add unreachable code regions
-        if tcx.generics_of(def_id).count() == 0 {
-            if let Some(covered_file_name) = tcx.covered_file_name(def_id) {
-                // Only add files known to have unreachable functions
-                if unreachable_def_ids_by_file.contains_key(covered_file_name) {
-                    first_covered_def_id_by_file.entry(*covered_file_name).or_insert(def_id);
-                }
+        if let Some(covered_file_name) = tcx.covered_file_name(def_id) {
+            // Only add files known to have unused functions
+            if unused_def_ids_by_file.contains_key(covered_file_name) {
+                first_covered_def_id_by_file.entry(*covered_file_name).or_insert(def_id);
             }
         }
     }
 
     // Get the set of def_ids with coverage regions, known by *this* CoverageContext.
-    let cgu_covered_def_ids: DefIdSet =
-        function_coverage_map.keys().map(|instance| instance.def.def_id()).collect();
+    let cgu_covered_def_ids: DefIdSet = match cx.coverage_context() {
+        Some(ctx) => ctx
+            .function_coverage_map
+            .borrow()
+            .keys()
+            .map(|&instance| instance.def.def_id())
+            .collect(),
+        None => return,
+    };
 
-    let mut cgu_covered_files: FxHashSet<Symbol> = first_covered_def_id_by_file
+    let cgu_covered_files: FxHashSet<Symbol> = first_covered_def_id_by_file
         .iter()
         .filter_map(
             |(&file_name, def_id)| {
@@ -342,49 +349,13 @@
         )
         .collect();
 
-    // Find the first covered, non-generic function (instance) for each cgu_covered_file. Take the
-    // unreachable code regions for that file, and add them to the function.
-    //
-    // There are three `for` loops here, but (a) the lists have already been reduced to the minimum
-    // required values, the lists are further reduced (by `remove()` calls) when elements are no
-    // longer needed, and there are several opportunities to branch out of loops early.
-    for (instance, function_coverage) in function_coverage_map.iter_mut() {
-        if instance.def.attrs(tcx).len() > 0 {
-            continue;
-        }
-        // The covered function is not generic...
-        let covered_def_id = instance.def.def_id();
-        if let Some(covered_file_name) = tcx.covered_file_name(covered_def_id) {
-            if !cgu_covered_files.remove(&covered_file_name) {
-                continue;
-            }
-            // The covered function's file is one of the files with unreachable code regions, so
-            // all of the unreachable code regions for this file will be added to this function.
-            for def_id in
-                unreachable_def_ids_by_file.remove(&covered_file_name).into_iter().flatten()
-            {
-                // Note, this loop adds an unreachable code regions for each MIR-derived region.
-                // Alternatively, we could add a single code region for the maximum span of all
-                // code regions here.
-                //
-                // Observed downsides of this approach are:
-                //
-                // 1. The coverage results will appear inconsistent compared with the same (or
-                //    similar) code in a function that is reached.
-                // 2. If the function is unreachable from one crate but reachable when compiling
-                //    another referencing crate (such as a cross-crate reference to a
-                //    generic function or inlined function), actual coverage regions overlaid
-                //    on a single larger code span of `Zero` coverage can appear confusing or
-                //    wrong. Chaning the unreachable coverage from a `code_region` to a
-                //    `gap_region` can help, but still can look odd with `0` line counts for
-                //    lines between executed (> 0) lines (such as for blank lines or comments).
-                for &region in tcx.covered_code_regions(def_id) {
-                    function_coverage.add_unreachable_region(region.clone());
-                }
-            }
-            if cgu_covered_files.is_empty() {
-                break;
-            }
+    // For each file for which this CGU is responsible for adding unused function coverage,
+    // get the `def_id`s for each unused function (if any), define a synthetic function with a
+    // single LLVM coverage counter, and add the function's coverage `CodeRegion`s. to the
+    // function_coverage_map.
+    for covered_file_name in cgu_covered_files {
+        for def_id in unused_def_ids_by_file.remove(&covered_file_name).into_iter().flatten() {
+            cx.define_unused_fn(def_id);
         }
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index e47b8fd..afc2bdb 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -1,5 +1,6 @@
 use crate::llvm;
 
+use crate::abi::{Abi, FnAbi};
 use crate::builder::Builder;
 use crate::common::CodegenCx;
 
@@ -7,33 +8,47 @@
 use llvm::coverageinfo::CounterMappingRegion;
 use rustc_codegen_ssa::coverageinfo::map::{CounterExpression, FunctionCoverage};
 use rustc_codegen_ssa::traits::{
-    BaseTypeMethods, CoverageInfoBuilderMethods, CoverageInfoMethods, MiscMethods, StaticMethods,
+    BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, CoverageInfoMethods,
+    MiscMethods, StaticMethods,
 };
 use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
 use rustc_llvm::RustString;
+use rustc_middle::bug;
 use rustc_middle::mir::coverage::{
     CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId, Op,
 };
+use rustc_middle::ty;
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_middle::ty::subst::InternalSubsts;
 use rustc_middle::ty::Instance;
 
 use std::cell::RefCell;
 use std::ffi::CString;
 
+use std::iter;
 use tracing::debug;
 
 pub mod mapgen;
 
+const UNUSED_FUNCTION_COUNTER_ID: CounterValueReference = CounterValueReference::START;
+
 const VAR_ALIGN_BYTES: usize = 8;
 
 /// A context object for maintaining all state needed by the coverageinfo module.
-pub struct CrateCoverageContext<'tcx> {
+pub struct CrateCoverageContext<'ll, 'tcx> {
     // Coverage data for each instrumented function identified by DefId.
     pub(crate) function_coverage_map: RefCell<FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>>>,
+    pub(crate) pgo_func_name_var_map: RefCell<FxHashMap<Instance<'tcx>, &'ll llvm::Value>>,
 }
 
-impl<'tcx> CrateCoverageContext<'tcx> {
+impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
     pub fn new() -> Self {
-        Self { function_coverage_map: Default::default() }
+        Self {
+            function_coverage_map: Default::default(),
+            pgo_func_name_var_map: Default::default(),
+        }
     }
 
     pub fn take_function_coverage_map(&self) -> FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>> {
@@ -41,23 +56,47 @@
     }
 }
 
-impl CoverageInfoMethods for CodegenCx<'ll, 'tcx> {
+impl CoverageInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     fn coverageinfo_finalize(&self) {
         mapgen::finalize(self)
     }
+
+    fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value {
+        if let Some(coverage_context) = self.coverage_context() {
+            debug!("getting pgo_func_name_var for instance={:?}", instance);
+            let mut pgo_func_name_var_map = coverage_context.pgo_func_name_var_map.borrow_mut();
+            pgo_func_name_var_map
+                .entry(instance)
+                .or_insert_with(|| create_pgo_func_name_var(self, instance))
+        } else {
+            bug!("Could not get the `coverage_context`");
+        }
+    }
+
+    /// Functions with MIR-based coverage are normally codegenned _only_ if
+    /// called. LLVM coverage tools typically expect every function to be
+    /// defined (even if unused), with at least one call to LLVM intrinsic
+    /// `instrprof.increment`.
+    ///
+    /// Codegen a small function that will never be called, with one counter
+    /// that will never be incremented.
+    ///
+    /// For used/called functions, the coverageinfo was already added to the
+    /// `function_coverage_map` (keyed by function `Instance`) during codegen.
+    /// But in this case, since the unused function was _not_ previously
+    /// codegenned, collect the coverage `CodeRegion`s from the MIR and add
+    /// them. The first `CodeRegion` is used to add a single counter, with the
+    /// same counter ID used in the injected `instrprof.increment` intrinsic
+    /// call. Since the function is never called, all other `CodeRegion`s can be
+    /// added as `unreachable_region`s.
+    fn define_unused_fn(&self, def_id: DefId) {
+        let instance = declare_unused_fn(self, &def_id);
+        codegen_unused_fn_and_counter(self, instance);
+        add_unused_function_coverage(self, instance, def_id);
+    }
 }
 
 impl CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
-    /// Calls llvm::createPGOFuncNameVar() with the given function instance's mangled function name.
-    /// The LLVM API returns an llvm::GlobalVariable containing the function name, with the specific
-    /// variable name and linkage required by LLVM InstrProf source-based coverage instrumentation.
-    fn create_pgo_func_name_var(&self, instance: Instance<'tcx>) -> Self::Value {
-        let llfn = self.cx.get_fn(instance);
-        let mangled_fn_name = CString::new(self.tcx.symbol_name(instance).name)
-            .expect("error converting function name to C string");
-        unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar(llfn, mangled_fn_name.as_ptr()) }
-    }
-
     fn set_function_source_hash(
         &mut self,
         instance: Instance<'tcx>,
@@ -145,6 +184,100 @@
     }
 }
 
+fn declare_unused_fn(cx: &CodegenCx<'ll, 'tcx>, def_id: &DefId) -> Instance<'tcx> {
+    let tcx = cx.tcx;
+
+    let instance = Instance::new(
+        *def_id,
+        InternalSubsts::for_item(tcx, *def_id, |param, _| {
+            if let ty::GenericParamDefKind::Lifetime = param.kind {
+                tcx.lifetimes.re_erased.into()
+            } else {
+                tcx.mk_param_from_def(param)
+            }
+        }),
+    );
+
+    let llfn = cx.declare_fn(
+        &tcx.symbol_name(instance).name,
+        &FnAbi::of_fn_ptr(
+            cx,
+            ty::Binder::dummy(tcx.mk_fn_sig(
+                iter::once(tcx.mk_unit()),
+                tcx.mk_unit(),
+                false,
+                hir::Unsafety::Unsafe,
+                Abi::Rust,
+            )),
+            &[],
+        ),
+    );
+
+    llvm::set_linkage(llfn, llvm::Linkage::WeakAnyLinkage);
+    llvm::set_visibility(llfn, llvm::Visibility::Hidden);
+
+    assert!(cx.instances.borrow_mut().insert(instance, llfn).is_none());
+
+    instance
+}
+
+fn codegen_unused_fn_and_counter(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) {
+    let llfn = cx.get_fn(instance);
+    let mut bx = Builder::new_block(cx, llfn, "unused_function");
+    let fn_name = bx.get_pgo_func_name_var(instance);
+    let hash = bx.const_u64(0);
+    let num_counters = bx.const_u32(1);
+    let index = bx.const_u32(u32::from(UNUSED_FUNCTION_COUNTER_ID));
+    debug!(
+        "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?},
+            index={:?}) for unused function: {:?}",
+        fn_name, hash, num_counters, index, instance
+    );
+    bx.instrprof_increment(fn_name, hash, num_counters, index);
+    bx.ret_void();
+}
+
+fn add_unused_function_coverage(
+    cx: &CodegenCx<'ll, 'tcx>,
+    instance: Instance<'tcx>,
+    def_id: DefId,
+) {
+    let tcx = cx.tcx;
+
+    let mut function_coverage = FunctionCoverage::unused(tcx, instance);
+    for (index, &code_region) in tcx.covered_code_regions(def_id).iter().enumerate() {
+        if index == 0 {
+            // Insert at least one real counter so the LLVM CoverageMappingReader will find expected
+            // definitions.
+            function_coverage.add_counter(UNUSED_FUNCTION_COUNTER_ID, code_region.clone());
+        } else {
+            function_coverage.add_unreachable_region(code_region.clone());
+        }
+    }
+
+    if let Some(coverage_context) = cx.coverage_context() {
+        coverage_context.function_coverage_map.borrow_mut().insert(instance, function_coverage);
+    } else {
+        bug!("Could not get the `coverage_context`");
+    }
+}
+
+/// Calls llvm::createPGOFuncNameVar() with the given function instance's
+/// mangled function name. The LLVM API returns an llvm::GlobalVariable
+/// containing the function name, with the specific variable name and linkage
+/// required by LLVM InstrProf source-based coverage instrumentation. Use
+/// `bx.get_pgo_func_name_var()` to ensure the variable is only created once per
+/// `Instance`.
+fn create_pgo_func_name_var(
+    cx: &CodegenCx<'ll, 'tcx>,
+    instance: Instance<'tcx>,
+) -> &'ll llvm::Value {
+    let mangled_fn_name = CString::new(cx.tcx.symbol_name(instance).name)
+        .expect("error converting function name to C string");
+    let llfn = cx.get_fn(instance);
+    unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar(llfn, mangled_fn_name.as_ptr()) }
+}
+
 pub(crate) fn write_filenames_section_to_buffer<'a>(
     filenames: impl IntoIterator<Item = &'a CString>,
     buffer: &RustString,
@@ -177,6 +310,7 @@
         );
     }
 }
+
 pub(crate) fn hash_str(strval: &str) -> u64 {
     let strval = CString::new(strval).expect("null error converting hashable str to C string");
     unsafe { llvm::LLVMRustCoverageHashCString(strval.as_ptr()) }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/doc.md b/compiler/rustc_codegen_llvm/src/debuginfo/doc.md
new file mode 100644
index 0000000..f983d09
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/doc.md
@@ -0,0 +1,180 @@
+# Debug Info Module
+
+This module serves the purpose of generating debug symbols. We use LLVM's
+[source level debugging](https://llvm.org/docs/SourceLevelDebugging.html)
+features for generating the debug information. The general principle is
+this:
+
+Given the right metadata in the LLVM IR, the LLVM code generator is able to
+create DWARF debug symbols for the given code. The
+[metadata](https://llvm.org/docs/LangRef.html#metadata-type) is structured
+much like DWARF *debugging information entries* (DIE), representing type
+information such as datatype layout, function signatures, block layout,
+variable location and scope information, etc. It is the purpose of this
+module to generate correct metadata and insert it into the LLVM IR.
+
+As the exact format of metadata trees may change between different LLVM
+versions, we now use LLVM
+[DIBuilder](https://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html)
+to create metadata where possible. This will hopefully ease the adaption of
+this module to future LLVM versions.
+
+The public API of the module is a set of functions that will insert the
+correct metadata into the LLVM IR when called with the right parameters.
+The module is thus driven from an outside client with functions like
+`debuginfo::create_local_var_metadata(bx: block, local: &ast::local)`.
+
+Internally the module will try to reuse already created metadata by
+utilizing a cache. The way to get a shared metadata node when needed is
+thus to just call the corresponding function in this module:
+
+    let file_metadata = file_metadata(cx, file);
+
+The function will take care of probing the cache for an existing node for
+that exact file path.
+
+All private state used by the module is stored within either the
+CrateDebugContext struct (owned by the CodegenCx) or the
+FunctionDebugContext (owned by the FunctionCx).
+
+This file consists of three conceptual sections:
+1. The public interface of the module
+2. Module-internal metadata creation functions
+3. Minor utility functions
+
+
+## Recursive Types
+
+Some kinds of types, such as structs and enums can be recursive. That means
+that the type definition of some type X refers to some other type which in
+turn (transitively) refers to X. This introduces cycles into the type
+referral graph. A naive algorithm doing an on-demand, depth-first traversal
+of this graph when describing types, can get trapped in an endless loop
+when it reaches such a cycle.
+
+For example, the following simple type for a singly-linked list...
+
+```
+struct List {
+    value: i32,
+    tail: Option<Box<List>>,
+}
+```
+
+will generate the following callstack with a naive DFS algorithm:
+
+```
+describe(t = List)
+  describe(t = i32)
+  describe(t = Option<Box<List>>)
+    describe(t = Box<List>)
+      describe(t = List) // at the beginning again...
+      ...
+```
+
+To break cycles like these, we use "forward declarations". That is, when
+the algorithm encounters a possibly recursive type (any struct or enum), it
+immediately creates a type description node and inserts it into the cache
+*before* describing the members of the type. This type description is just
+a stub (as type members are not described and added to it yet) but it
+allows the algorithm to already refer to the type. After the stub is
+inserted into the cache, the algorithm continues as before. If it now
+encounters a recursive reference, it will hit the cache and does not try to
+describe the type anew.
+
+This behavior is encapsulated in the 'RecursiveTypeDescription' enum,
+which represents a kind of continuation, storing all state needed to
+continue traversal at the type members after the type has been registered
+with the cache. (This implementation approach might be a tad over-
+engineered and may change in the future)
+
+
+## Source Locations and Line Information
+
+In addition to data type descriptions the debugging information must also
+allow to map machine code locations back to source code locations in order
+to be useful. This functionality is also handled in this module. The
+following functions allow to control source mappings:
+
++ `set_source_location()`
++ `clear_source_location()`
++ `start_emitting_source_locations()`
+
+`set_source_location()` allows to set the current source location. All IR
+instructions created after a call to this function will be linked to the
+given source location, until another location is specified with
+`set_source_location()` or the source location is cleared with
+`clear_source_location()`. In the later case, subsequent IR instruction
+will not be linked to any source location. As you can see, this is a
+stateful API (mimicking the one in LLVM), so be careful with source
+locations set by previous calls. It's probably best to not rely on any
+specific state being present at a given point in code.
+
+One topic that deserves some extra attention is *function prologues*. At
+the beginning of a function's machine code there are typically a few
+instructions for loading argument values into allocas and checking if
+there's enough stack space for the function to execute. This *prologue* is
+not visible in the source code and LLVM puts a special PROLOGUE END marker
+into the line table at the first non-prologue instruction of the function.
+In order to find out where the prologue ends, LLVM looks for the first
+instruction in the function body that is linked to a source location. So,
+when generating prologue instructions we have to make sure that we don't
+emit source location information until the 'real' function body begins. For
+this reason, source location emission is disabled by default for any new
+function being codegened and is only activated after a call to the third
+function from the list above, `start_emitting_source_locations()`. This
+function should be called right before regularly starting to codegen the
+top-level block of the given function.
+
+There is one exception to the above rule: `llvm.dbg.declare` instruction
+must be linked to the source location of the variable being declared. For
+function parameters these `llvm.dbg.declare` instructions typically occur
+in the middle of the prologue, however, they are ignored by LLVM's prologue
+detection. The `create_argument_metadata()` and related functions take care
+of linking the `llvm.dbg.declare` instructions to the correct source
+locations even while source location emission is still disabled, so there
+is no need to do anything special with source location handling here.
+
+## Unique Type Identification
+
+In order for link-time optimization to work properly, LLVM needs a unique
+type identifier that tells it across compilation units which types are the
+same as others. This type identifier is created by
+`TypeMap::get_unique_type_id_of_type()` using the following algorithm:
+
+1. Primitive types have their name as ID
+
+2. Structs, enums and traits have a multipart identifier
+
+  1. The first part is the SVH (strict version hash) of the crate they
+     were originally defined in
+
+  2. The second part is the ast::NodeId of the definition in their
+     original crate
+
+  3. The final part is a concatenation of the type IDs of their concrete
+     type arguments if they are generic types.
+
+3. Tuple-, pointer-, and function types are structurally identified, which
+   means that they are equivalent if their component types are equivalent
+   (i.e., `(i32, i32)` is the same regardless in which crate it is used).
+
+This algorithm also provides a stable ID for types that are defined in one
+crate but instantiated from metadata within another crate. We just have to
+take care to always map crate and `NodeId`s back to the original crate
+context.
+
+As a side-effect these unique type IDs also help to solve a problem arising
+from lifetime parameters. Since lifetime parameters are completely omitted
+in debuginfo, more than one `Ty` instance may map to the same debuginfo
+type metadata, that is, some struct `Struct<'a>` may have N instantiations
+with different concrete substitutions for `'a`, and thus there will be N
+`Ty` instances for the type `Struct<'a>` even though it is not generic
+otherwise. Unfortunately this means that we cannot use `ty::type_id()` as
+cheap identifier for type metadata -- we have done this in the past, but it
+led to unnecessary metadata duplication in the best case and LLVM
+assertions in the worst. However, the unique type ID as described above
+*can* be used as identifier. Since it is comparatively expensive to
+construct, though, `ty::type_id()` is still used additionally as an
+optimization for cases where the exact same type has been seen before
+(which is most of the time).
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs b/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs
deleted file mode 100644
index 10dd590..0000000
--- a/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs
+++ /dev/null
@@ -1,179 +0,0 @@
-//! # Debug Info Module
-//!
-//! This module serves the purpose of generating debug symbols. We use LLVM's
-//! [source level debugging](https://llvm.org/docs/SourceLevelDebugging.html)
-//! features for generating the debug information. The general principle is
-//! this:
-//!
-//! Given the right metadata in the LLVM IR, the LLVM code generator is able to
-//! create DWARF debug symbols for the given code. The
-//! [metadata](https://llvm.org/docs/LangRef.html#metadata-type) is structured
-//! much like DWARF *debugging information entries* (DIE), representing type
-//! information such as datatype layout, function signatures, block layout,
-//! variable location and scope information, etc. It is the purpose of this
-//! module to generate correct metadata and insert it into the LLVM IR.
-//!
-//! As the exact format of metadata trees may change between different LLVM
-//! versions, we now use LLVM
-//! [DIBuilder](https://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html)
-//! to create metadata where possible. This will hopefully ease the adaption of
-//! this module to future LLVM versions.
-//!
-//! The public API of the module is a set of functions that will insert the
-//! correct metadata into the LLVM IR when called with the right parameters.
-//! The module is thus driven from an outside client with functions like
-//! `debuginfo::create_local_var_metadata(bx: block, local: &ast::local)`.
-//!
-//! Internally the module will try to reuse already created metadata by
-//! utilizing a cache. The way to get a shared metadata node when needed is
-//! thus to just call the corresponding function in this module:
-//!
-//!     let file_metadata = file_metadata(cx, file);
-//!
-//! The function will take care of probing the cache for an existing node for
-//! that exact file path.
-//!
-//! All private state used by the module is stored within either the
-//! CrateDebugContext struct (owned by the CodegenCx) or the
-//! FunctionDebugContext (owned by the FunctionCx).
-//!
-//! This file consists of three conceptual sections:
-//! 1. The public interface of the module
-//! 2. Module-internal metadata creation functions
-//! 3. Minor utility functions
-//!
-//!
-//! ## Recursive Types
-//!
-//! Some kinds of types, such as structs and enums can be recursive. That means
-//! that the type definition of some type X refers to some other type which in
-//! turn (transitively) refers to X. This introduces cycles into the type
-//! referral graph. A naive algorithm doing an on-demand, depth-first traversal
-//! of this graph when describing types, can get trapped in an endless loop
-//! when it reaches such a cycle.
-//!
-//! For example, the following simple type for a singly-linked list...
-//!
-//! ```
-//! struct List {
-//!     value: i32,
-//!     tail: Option<Box<List>>,
-//! }
-//! ```
-//!
-//! will generate the following callstack with a naive DFS algorithm:
-//!
-//! ```
-//! describe(t = List)
-//!   describe(t = i32)
-//!   describe(t = Option<Box<List>>)
-//!     describe(t = Box<List>)
-//!       describe(t = List) // at the beginning again...
-//!       ...
-//! ```
-//!
-//! To break cycles like these, we use "forward declarations". That is, when
-//! the algorithm encounters a possibly recursive type (any struct or enum), it
-//! immediately creates a type description node and inserts it into the cache
-//! *before* describing the members of the type. This type description is just
-//! a stub (as type members are not described and added to it yet) but it
-//! allows the algorithm to already refer to the type. After the stub is
-//! inserted into the cache, the algorithm continues as before. If it now
-//! encounters a recursive reference, it will hit the cache and does not try to
-//! describe the type anew.
-//!
-//! This behavior is encapsulated in the 'RecursiveTypeDescription' enum,
-//! which represents a kind of continuation, storing all state needed to
-//! continue traversal at the type members after the type has been registered
-//! with the cache. (This implementation approach might be a tad over-
-//! engineered and may change in the future)
-//!
-//!
-//! ## Source Locations and Line Information
-//!
-//! In addition to data type descriptions the debugging information must also
-//! allow to map machine code locations back to source code locations in order
-//! to be useful. This functionality is also handled in this module. The
-//! following functions allow to control source mappings:
-//!
-//! + set_source_location()
-//! + clear_source_location()
-//! + start_emitting_source_locations()
-//!
-//! `set_source_location()` allows to set the current source location. All IR
-//! instructions created after a call to this function will be linked to the
-//! given source location, until another location is specified with
-//! `set_source_location()` or the source location is cleared with
-//! `clear_source_location()`. In the later case, subsequent IR instruction
-//! will not be linked to any source location. As you can see, this is a
-//! stateful API (mimicking the one in LLVM), so be careful with source
-//! locations set by previous calls. It's probably best to not rely on any
-//! specific state being present at a given point in code.
-//!
-//! One topic that deserves some extra attention is *function prologues*. At
-//! the beginning of a function's machine code there are typically a few
-//! instructions for loading argument values into allocas and checking if
-//! there's enough stack space for the function to execute. This *prologue* is
-//! not visible in the source code and LLVM puts a special PROLOGUE END marker
-//! into the line table at the first non-prologue instruction of the function.
-//! In order to find out where the prologue ends, LLVM looks for the first
-//! instruction in the function body that is linked to a source location. So,
-//! when generating prologue instructions we have to make sure that we don't
-//! emit source location information until the 'real' function body begins. For
-//! this reason, source location emission is disabled by default for any new
-//! function being codegened and is only activated after a call to the third
-//! function from the list above, `start_emitting_source_locations()`. This
-//! function should be called right before regularly starting to codegen the
-//! top-level block of the given function.
-//!
-//! There is one exception to the above rule: `llvm.dbg.declare` instruction
-//! must be linked to the source location of the variable being declared. For
-//! function parameters these `llvm.dbg.declare` instructions typically occur
-//! in the middle of the prologue, however, they are ignored by LLVM's prologue
-//! detection. The `create_argument_metadata()` and related functions take care
-//! of linking the `llvm.dbg.declare` instructions to the correct source
-//! locations even while source location emission is still disabled, so there
-//! is no need to do anything special with source location handling here.
-//!
-//! ## Unique Type Identification
-//!
-//! In order for link-time optimization to work properly, LLVM needs a unique
-//! type identifier that tells it across compilation units which types are the
-//! same as others. This type identifier is created by
-//! `TypeMap::get_unique_type_id_of_type()` using the following algorithm:
-//!
-//! (1) Primitive types have their name as ID
-//! (2) Structs, enums and traits have a multipart identifier
-//!
-//!     (1) The first part is the SVH (strict version hash) of the crate they
-//!          were originally defined in
-//!
-//!     (2) The second part is the ast::NodeId of the definition in their
-//!          original crate
-//!
-//!     (3) The final part is a concatenation of the type IDs of their concrete
-//!          type arguments if they are generic types.
-//!
-//! (3) Tuple-, pointer and function types are structurally identified, which
-//!     means that they are equivalent if their component types are equivalent
-//!     (i.e., (i32, i32) is the same regardless in which crate it is used).
-//!
-//! This algorithm also provides a stable ID for types that are defined in one
-//! crate but instantiated from metadata within another crate. We just have to
-//! take care to always map crate and `NodeId`s back to the original crate
-//! context.
-//!
-//! As a side-effect these unique type IDs also help to solve a problem arising
-//! from lifetime parameters. Since lifetime parameters are completely omitted
-//! in debuginfo, more than one `Ty` instance may map to the same debuginfo
-//! type metadata, that is, some struct `Struct<'a>` may have N instantiations
-//! with different concrete substitutions for `'a`, and thus there will be N
-//! `Ty` instances for the type `Struct<'a>` even though it is not generic
-//! otherwise. Unfortunately this means that we cannot use `ty::type_id()` as
-//! cheap identifier for type metadata -- we have done this in the past, but it
-//! led to unnecessary metadata duplication in the best case and LLVM
-//! assertions in the worst. However, the unique type ID as described above
-//! *can* be used as identifier. Since it is comparatively expensive to
-//! construct, though, `ty::type_id()` is still used additionally as an
-//! optimization for cases where the exact same type has been seen before
-//! (which is most of the time).
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index d5b32e5..e6fa852 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -1083,9 +1083,9 @@
             );
         }
 
-        // Insert `llvm.ident` metadata on the wasm32 targets since that will
+        // Insert `llvm.ident` metadata on the wasm targets since that will
         // get hooked up to the "producer" sections `processed-by` information.
-        if tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
+        if tcx.sess.target.is_like_wasm {
             let name_metadata = llvm::LLVMMDStringInContext(
                 debug_context.llcontext,
                 rustc_producer.as_ptr().cast(),
@@ -1962,9 +1962,7 @@
 
     let discriminant_type_metadata = |discr: Primitive| {
         let enumerators_metadata: Vec<_> = match enum_type.kind() {
-            ty::Adt(def, _) => def
-                .discriminants(tcx)
-                .zip(&def.variants)
+            ty::Adt(def, _) => iter::zip(def.discriminants(tcx), &def.variants)
                 .map(|((_, discr), v)| {
                     let name = v.ident.as_str();
                     let is_unsigned = match discr.ty.kind() {
@@ -2336,9 +2334,7 @@
         if substs.types().next().is_some() {
             let generics = cx.tcx.generics_of(def.did);
             let names = get_parameter_names(cx, generics);
-            let template_params: Vec<_> = substs
-                .iter()
-                .zip(names)
+            let template_params: Vec<_> = iter::zip(substs, names)
                 .filter_map(|(kind, name)| {
                     if let GenericArgKind::Type(ty) = kind.unpack() {
                         let actual_type =
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 440e4d5..b928e90 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -1,5 +1,4 @@
-// See doc.rs for documentation.
-mod doc;
+#![doc = include_str!("doc.md")]
 
 use rustc_codegen_ssa::mir::debuginfo::VariableKind::*;
 
@@ -38,6 +37,7 @@
 use libc::c_uint;
 use smallvec::SmallVec;
 use std::cell::RefCell;
+use std::iter;
 use tracing::debug;
 
 mod create_scope_map;
@@ -344,7 +344,7 @@
             spflags |= DISPFlags::SPFlagOptimized;
         }
         if let Some((id, _)) = self.tcx.entry_fn(LOCAL_CRATE) {
-            if id.to_def_id() == def_id {
+            if id == def_id {
                 spflags |= DISPFlags::SPFlagMainSubprogram;
             }
         }
@@ -449,9 +449,7 @@
             // Again, only create type information if full debuginfo is enabled
             let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full {
                 let names = get_parameter_names(cx, generics);
-                substs
-                    .iter()
-                    .zip(names)
+                iter::zip(substs, names)
                     .filter_map(|(kind, name)| {
                         if let GenericArgKind::Type(ty) = kind.unpack() {
                             let actual_type =
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index af366f9..fc6c1ab 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -1053,46 +1053,48 @@
         let vec_ty = bx.type_vector(elem_ty, in_len);
 
         let (intr_name, fn_ty) = match name {
-            sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
-            sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
-            sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
-            sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
-            sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
             sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
-            sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
             sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
             sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
             sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
             sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
             sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
             sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
-            sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
+            sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
             _ => return_error!("unrecognized intrinsic `{}`", name),
         };
-
         let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
         let f = bx.declare_cfn(&llvm_name, llvm::UnnamedAddr::No, fn_ty);
         let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
-        unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
         Ok(c)
     }
 
     if std::matches!(
         name,
-        sym::simd_fsqrt
-            | sym::simd_fsin
-            | sym::simd_fcos
+        sym::simd_ceil
             | sym::simd_fabs
-            | sym::simd_floor
-            | sym::simd_ceil
-            | sym::simd_fexp
+            | sym::simd_fcos
             | sym::simd_fexp2
+            | sym::simd_fexp
             | sym::simd_flog10
             | sym::simd_flog2
             | sym::simd_flog
-            | sym::simd_fpowi
-            | sym::simd_fpow
+            | sym::simd_floor
             | sym::simd_fma
+            | sym::simd_fpow
+            | sym::simd_fpowi
+            | sym::simd_fsin
+            | sym::simd_fsqrt
+            | sym::simd_round
+            | sym::simd_trunc
     ) {
         return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
     }
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index d11c159..5ca4b22 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -8,10 +8,12 @@
 #![feature(bool_to_option)]
 #![feature(const_cstr_unchecked)]
 #![feature(crate_visibility_modifier)]
+#![feature(extended_key_value_attributes)]
 #![feature(extern_types)]
 #![feature(in_band_lifetimes)]
+#![feature(iter_zip)]
 #![feature(nll)]
-#![feature(or_patterns)]
+#![cfg_attr(bootstrap, feature(or_patterns))]
 #![recursion_limit = "256"]
 
 use back::write::{create_informational_target_machine, create_target_machine};
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 82cd1be..32b1526 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -190,33 +190,6 @@
     RealPredicateTrue = 15,
 }
 
-impl RealPredicate {
-    pub fn from_generic(realpred: rustc_codegen_ssa::common::RealPredicate) -> Self {
-        match realpred {
-            rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse => {
-                RealPredicate::RealPredicateFalse
-            }
-            rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ,
-            rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT,
-            rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE,
-            rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT,
-            rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE,
-            rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE,
-            rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD,
-            rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO,
-            rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ,
-            rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT,
-            rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE,
-            rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT,
-            rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE,
-            rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE,
-            rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue => {
-                RealPredicate::RealPredicateTrue
-            }
-        }
-    }
-}
-
 /// LLVMTypeKind
 #[derive(Copy, Clone, PartialEq, Debug)]
 #[repr(C)]
@@ -711,7 +684,7 @@
     }
 
     impl CounterMappingRegion {
-        pub fn code_region(
+        crate fn code_region(
             counter: coverage_map::Counter,
             file_id: u32,
             start_line: u32,
@@ -731,7 +704,10 @@
             }
         }
 
-        pub fn expansion_region(
+        // This function might be used in the future; the LLVM API is still evolving, as is coverage
+        // support.
+        #[allow(dead_code)]
+        crate fn expansion_region(
             file_id: u32,
             expanded_file_id: u32,
             start_line: u32,
@@ -751,7 +727,10 @@
             }
         }
 
-        pub fn skipped_region(
+        // This function might be used in the future; the LLVM API is still evolving, as is coverage
+        // support.
+        #[allow(dead_code)]
+        crate fn skipped_region(
             file_id: u32,
             start_line: u32,
             start_col: u32,
@@ -770,7 +749,10 @@
             }
         }
 
-        pub fn gap_region(
+        // This function might be used in the future; the LLVM API is still evolving, as is coverage
+        // support.
+        #[allow(dead_code)]
+        crate fn gap_region(
             counter: coverage_map::Counter,
             file_id: u32,
             start_line: u32,
@@ -1031,6 +1013,7 @@
     pub fn LLVMSetSection(Global: &Value, Section: *const c_char);
     pub fn LLVMRustGetVisibility(Global: &Value) -> Visibility;
     pub fn LLVMRustSetVisibility(Global: &Value, Viz: Visibility);
+    pub fn LLVMRustSetDSOLocal(Global: &Value, is_dso_local: bool);
     pub fn LLVMGetAlignment(Global: &Value) -> c_uint;
     pub fn LLVMSetAlignment(Global: &Value, Bytes: c_uint);
     pub fn LLVMSetDLLStorageClass(V: &Value, C: DLLStorageClass);
@@ -1371,7 +1354,7 @@
     pub fn LLVMBuildNeg(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
     pub fn LLVMBuildFNeg(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
     pub fn LLVMBuildNot(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
-    pub fn LLVMRustSetHasUnsafeAlgebra(Instr: &Value);
+    pub fn LLVMRustSetFastMath(Instr: &Value);
 
     // Memory
     pub fn LLVMBuildAlloca(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value;
@@ -2145,7 +2128,13 @@
     pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool;
 
     pub fn LLVMRustPrintTargetCPUs(T: &TargetMachine);
-    pub fn LLVMRustPrintTargetFeatures(T: &TargetMachine);
+    pub fn LLVMRustGetTargetFeaturesCount(T: &TargetMachine) -> size_t;
+    pub fn LLVMRustGetTargetFeature(
+        T: &TargetMachine,
+        Index: size_t,
+        Feature: &mut *const c_char,
+        Desc: &mut *const c_char,
+    );
 
     pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
     pub fn LLVMRustCreateTargetMachine(
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index c7dff41..97684ca 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -1,5 +1,5 @@
 use crate::back::write::create_informational_target_machine;
-use crate::llvm;
+use crate::{llvm, llvm_util};
 use libc::c_int;
 use rustc_codegen_ssa::target_features::supported_target_features;
 use rustc_data_structures::fx::FxHashSet;
@@ -10,6 +10,7 @@
 use rustc_target::spec::{MergeFunctions, PanicStrategy};
 use std::ffi::{CStr, CString};
 
+use std::ptr;
 use std::slice;
 use std::str;
 use std::sync::atomic::{AtomicBool, Ordering};
@@ -83,6 +84,17 @@
         if !sess.opts.debugging_opts.no_generate_arange_section {
             add("-generate-arange-section", false);
         }
+
+        // FIXME(nagisa): disable the machine outliner by default in LLVM versions 11, where it was
+        // introduced and up.
+        //
+        // This should remain in place until https://reviews.llvm.org/D103167 is fixed. If LLVM
+        // has been upgraded since, consider adjusting the version check below to contain an upper
+        // bound.
+        if llvm_util::get_version() >= (11, 0, 0) {
+            add("-enable-machine-outliner=never", false);
+        }
+
         match sess.opts.debugging_opts.merge_functions.unwrap_or(sess.target.merge_functions) {
             MergeFunctions::Disabled | MergeFunctions::Trampolines => {}
             MergeFunctions::Aliases => {
@@ -98,6 +110,9 @@
         // during inlining. Unfortunately these may block other optimizations.
         add("-preserve-alignment-assumptions-during-inlining=false", false);
 
+        // Use non-zero `import-instr-limit` multiplier for cold callsites.
+        add("-import-cold-multiplier=0.1", false);
+
         for arg in sess_args {
             add(&(*arg), true);
         }
@@ -189,15 +204,77 @@
     }
 }
 
+fn llvm_target_features(tm: &llvm::TargetMachine) -> Vec<(&str, &str)> {
+    let len = unsafe { llvm::LLVMRustGetTargetFeaturesCount(tm) };
+    let mut ret = Vec::with_capacity(len);
+    for i in 0..len {
+        unsafe {
+            let mut feature = ptr::null();
+            let mut desc = ptr::null();
+            llvm::LLVMRustGetTargetFeature(tm, i, &mut feature, &mut desc);
+            if feature.is_null() || desc.is_null() {
+                bug!("LLVM returned a `null` target feature string");
+            }
+            let feature = CStr::from_ptr(feature).to_str().unwrap_or_else(|e| {
+                bug!("LLVM returned a non-utf8 feature string: {}", e);
+            });
+            let desc = CStr::from_ptr(desc).to_str().unwrap_or_else(|e| {
+                bug!("LLVM returned a non-utf8 feature string: {}", e);
+            });
+            ret.push((feature, desc));
+        }
+    }
+    ret
+}
+
+fn print_target_features(sess: &Session, tm: &llvm::TargetMachine) {
+    let mut target_features = llvm_target_features(tm);
+    let mut rustc_target_features = supported_target_features(sess)
+        .iter()
+        .filter_map(|(feature, _gate)| {
+            let llvm_feature = to_llvm_feature(sess, *feature);
+            // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these strings.
+            target_features.binary_search_by_key(&llvm_feature, |(f, _d)| *f).ok().map(|index| {
+                let (_f, desc) = target_features.remove(index);
+                (*feature, desc)
+            })
+        })
+        .collect::<Vec<_>>();
+    rustc_target_features.extend_from_slice(&[(
+        "crt-static",
+        "Enables C Run-time Libraries to be statically linked",
+    )]);
+    let max_feature_len = target_features
+        .iter()
+        .chain(rustc_target_features.iter())
+        .map(|(feature, _desc)| feature.len())
+        .max()
+        .unwrap_or(0);
+
+    println!("Features supported by rustc for this target:");
+    for (feature, desc) in &rustc_target_features {
+        println!("    {1:0$} - {2}.", max_feature_len, feature, desc);
+    }
+    println!("\nCode-generation features supported by LLVM for this target:");
+    for (feature, desc) in &target_features {
+        println!("    {1:0$} - {2}.", max_feature_len, feature, desc);
+    }
+    if target_features.len() == 0 {
+        println!("    Target features listing is not supported by this LLVM version.");
+    }
+    println!("\nUse +feature to enable a feature, or -feature to disable it.");
+    println!("For example, rustc -C target-cpu=mycpu -C target-feature=+feature1,-feature2\n");
+    println!("Code-generation features cannot be used in cfg or #[target_feature],");
+    println!("and may be renamed or removed in a future version of LLVM or rustc.\n");
+}
+
 pub(crate) fn print(req: PrintRequest, sess: &Session) {
     require_inited();
     let tm = create_informational_target_machine(sess);
-    unsafe {
-        match req {
-            PrintRequest::TargetCPUs => llvm::LLVMRustPrintTargetCPUs(tm),
-            PrintRequest::TargetFeatures => llvm::LLVMRustPrintTargetFeatures(tm),
-            _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
-        }
+    match req {
+        PrintRequest::TargetCPUs => unsafe { llvm::LLVMRustPrintTargetCPUs(tm) },
+        PrintRequest::TargetFeatures => print_target_features(sess, tm),
+        _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
index 992e83d..fc1f364 100644
--- a/compiler/rustc_codegen_llvm/src/mono_item.rs
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -10,7 +10,9 @@
 use rustc_middle::mir::mono::{Linkage, Visibility};
 use rustc_middle::ty::layout::FnAbiExt;
 use rustc_middle::ty::{self, Instance, TypeFoldable};
+use rustc_session::config::CrateType;
 use rustc_target::abi::LayoutOf;
+use rustc_target::spec::RelocModel;
 use tracing::debug;
 
 impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> {
@@ -35,6 +37,9 @@
         unsafe {
             llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage));
             llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility));
+            if self.should_assume_dso_local(linkage, visibility) {
+                llvm::LLVMRustSetDSOLocal(g, true);
+            }
         }
 
         self.instances.borrow_mut().insert(instance, g);
@@ -79,6 +84,42 @@
 
         attributes::from_fn_attrs(self, lldecl, instance);
 
+        unsafe {
+            if self.should_assume_dso_local(linkage, visibility) {
+                llvm::LLVMRustSetDSOLocal(lldecl, true);
+            }
+        }
+
         self.instances.borrow_mut().insert(instance, lldecl);
     }
 }
+
+impl CodegenCx<'ll, 'tcx> {
+    /// Whether a definition (NB: not declaration!) can be assumed to be local to a group of
+    /// libraries that form a single DSO or executable.
+    pub(crate) unsafe fn should_assume_dso_local(
+        &self,
+        linkage: Linkage,
+        visibility: Visibility,
+    ) -> bool {
+        if matches!(linkage, Linkage::Internal | Linkage::Private) {
+            return true;
+        }
+
+        if visibility != Visibility::Default && linkage != Linkage::ExternalWeak {
+            return true;
+        }
+
+        // Static relocation model should force copy relocations everywhere.
+        if self.tcx.sess.relocation_model() == RelocModel::Static {
+            return true;
+        }
+
+        // Symbols from executables can't really be imported any further.
+        if self.tcx.sess.crate_types().iter().all(|ty| *ty == CrateType::Executable) {
+            return true;
+        }
+
+        return false;
+    }
+}