Importing rustc-1.62.0

Test: ./build.py --lto=thin
Bug: 236723597
Change-Id: I3e25aff564eae13de6385b71ef3b45646a405916
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index b14a4f2..b9baa87 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -510,9 +510,9 @@
             // If the value is a boolean, the range is 0..2 and that ultimately
             // become 0..0 when the type becomes i1, which would be rejected
             // by the LLVM verifier.
-            if let Int(..) = scalar.value {
+            if let Int(..) = scalar.primitive() {
                 if !scalar.is_bool() && !scalar.is_always_valid(bx) {
-                    bx.range_metadata(callsite, scalar.valid_range);
+                    bx.range_metadata(callsite, scalar.valid_range(bx));
                 }
             }
         }
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 96c7d88..e994001 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -290,6 +290,11 @@
         }
         attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs });
 
+        // Switch to the 'normal' basic block if we did an `invoke` instead of a `call`
+        if let Some((dest, _, _)) = dest_catch_funclet {
+            self.switch_to_block(dest);
+        }
+
         // Write results to outputs
         for (idx, op) in operands.iter().enumerate() {
             if let InlineAsmOperandRef::Out { reg, place: Some(place), .. }
@@ -307,11 +312,11 @@
     }
 }
 
-impl AsmMethods for CodegenCx<'_, '_> {
+impl<'tcx> AsmMethods<'tcx> for CodegenCx<'_, 'tcx> {
     fn codegen_global_asm(
         &self,
         template: &[InlineAsmTemplatePiece],
-        operands: &[GlobalAsmOperandRef],
+        operands: &[GlobalAsmOperandRef<'tcx>],
         options: InlineAsmOptions,
         _line_spans: &[Span],
     ) {
@@ -337,6 +342,29 @@
                             // here unlike normal inline assembly.
                             template_str.push_str(string);
                         }
+                        GlobalAsmOperandRef::SymFn { instance } => {
+                            let llval = self.get_fn(instance);
+                            self.add_compiler_used_global(llval);
+                            let symbol = llvm::build_string(|s| unsafe {
+                                llvm::LLVMRustGetMangledName(llval, s);
+                            })
+                            .expect("symbol is not valid UTF-8");
+                            template_str.push_str(&symbol);
+                        }
+                        GlobalAsmOperandRef::SymStatic { def_id } => {
+                            let llval = self
+                                .renamed_statics
+                                .borrow()
+                                .get(&def_id)
+                                .copied()
+                                .unwrap_or_else(|| self.get_static(def_id));
+                            self.add_compiler_used_global(llval);
+                            let symbol = llvm::build_string(|s| unsafe {
+                                llvm::LLVMRustGetMangledName(llval, s);
+                            })
+                            .expect("symbol is not valid UTF-8");
+                            template_str.push_str(&symbol);
+                        }
                     }
                 }
             }
@@ -574,7 +602,9 @@
             InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
             InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
             InlineAsmRegClass::X86(
-                X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg,
+                X86InlineAsmRegClass::x87_reg
+                | X86InlineAsmRegClass::mmx_reg
+                | X86InlineAsmRegClass::kreg0,
             ) => unreachable!("clobber-only"),
             InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
             InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
@@ -659,7 +689,11 @@
             _ => unreachable!(),
         },
         InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg) => {
+        InlineAsmRegClass::X86(
+            X86InlineAsmRegClass::x87_reg
+            | X86InlineAsmRegClass::mmx_reg
+            | X86InlineAsmRegClass::kreg0,
+        ) => {
             unreachable!("clobber-only")
         }
         InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
@@ -729,7 +763,11 @@
         | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
         | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
         InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg) => {
+        InlineAsmRegClass::X86(
+            X86InlineAsmRegClass::x87_reg
+            | X86InlineAsmRegClass::mmx_reg
+            | X86InlineAsmRegClass::kreg0,
+        ) => {
             unreachable!("clobber-only")
         }
         InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
@@ -753,7 +791,7 @@
 /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
 /// the equivalent integer type.
 fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
-    match scalar.value {
+    match scalar.primitive() {
         Primitive::Int(Integer::I8, _) => cx.type_i8(),
         Primitive::Int(Integer::I16, _) => cx.type_i16(),
         Primitive::Int(Integer::I32, _) => cx.type_i32(),
@@ -774,7 +812,7 @@
 ) -> &'ll Value {
     match (reg, layout.abi) {
         (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
-            if let Primitive::Int(Integer::I8, _) = s.value {
+            if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
                 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
             } else {
@@ -785,7 +823,7 @@
             let elem_ty = llvm_asm_scalar_type(bx.cx, s);
             let count = 16 / layout.size.bytes();
             let vec_ty = bx.cx.type_vector(elem_ty, count);
-            if let Primitive::Pointer = s.value {
+            if let Primitive::Pointer = s.primitive() {
                 value = bx.ptrtoint(value, bx.cx.type_isize());
             }
             bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
@@ -800,7 +838,7 @@
             bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
         }
         (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
-            if s.value == Primitive::F64 =>
+            if s.primitive() == Primitive::F64 =>
         {
             bx.bitcast(value, bx.cx.type_i64())
         }
@@ -812,7 +850,7 @@
             InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
             Abi::Scalar(s),
         ) => {
-            if let Primitive::Int(Integer::I32, _) = s.value {
+            if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_f32())
             } else {
                 value
@@ -826,19 +864,21 @@
             ),
             Abi::Scalar(s),
         ) => {
-            if let Primitive::Int(Integer::I64, _) = s.value {
+            if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_f64())
             } else {
                 value
             }
         }
-        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
-            // MIPS only supports register-length arithmetics.
-            Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
-            Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
-            Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
-            _ => value,
-        },
+        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+            match s.primitive() {
+                // MIPS only supports register-length arithmetics.
+                Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
+                Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
+                Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
+                _ => value,
+            }
+        }
         _ => value,
     }
 }
@@ -852,7 +892,7 @@
 ) -> &'ll Value {
     match (reg, layout.abi) {
         (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
-            if let Primitive::Int(Integer::I8, _) = s.value {
+            if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 bx.extract_element(value, bx.const_i32(0))
             } else {
                 value
@@ -860,7 +900,7 @@
         }
         (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
             value = bx.extract_element(value, bx.const_i32(0));
-            if let Primitive::Pointer = s.value {
+            if let Primitive::Pointer = s.primitive() {
                 value = bx.inttoptr(value, layout.llvm_type(bx.cx));
             }
             value
@@ -875,7 +915,7 @@
             bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
         }
         (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
-            if s.value == Primitive::F64 =>
+            if s.primitive() == Primitive::F64 =>
         {
             bx.bitcast(value, bx.cx.type_f64())
         }
@@ -887,7 +927,7 @@
             InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
             Abi::Scalar(s),
         ) => {
-            if let Primitive::Int(Integer::I32, _) = s.value {
+            if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_i32())
             } else {
                 value
@@ -901,20 +941,22 @@
             ),
             Abi::Scalar(s),
         ) => {
-            if let Primitive::Int(Integer::I64, _) = s.value {
+            if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_i64())
             } else {
                 value
             }
         }
-        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
-            // MIPS only supports register-length arithmetics.
-            Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
-            Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
-            Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
-            Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
-            _ => value,
-        },
+        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+            match s.primitive() {
+                // MIPS only supports register-length arithmetics.
+                Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
+                Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
+                Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
+                Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
+                _ => value,
+            }
+        }
         _ => value,
     }
 }
@@ -927,7 +969,7 @@
 ) -> &'ll Type {
     match (reg, layout.abi) {
         (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
-            if let Primitive::Int(Integer::I8, _) = s.value {
+            if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 cx.type_vector(cx.type_i8(), 8)
             } else {
                 layout.llvm_type(cx)
@@ -946,7 +988,7 @@
             cx.type_vector(elem_ty, count * 2)
         }
         (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
-            if s.value == Primitive::F64 =>
+            if s.primitive() == Primitive::F64 =>
         {
             cx.type_i64()
         }
@@ -958,7 +1000,7 @@
             InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
             Abi::Scalar(s),
         ) => {
-            if let Primitive::Int(Integer::I32, _) = s.value {
+            if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 cx.type_f32()
             } else {
                 layout.llvm_type(cx)
@@ -972,19 +1014,21 @@
             ),
             Abi::Scalar(s),
         ) => {
-            if let Primitive::Int(Integer::I64, _) = s.value {
+            if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 cx.type_f64()
             } else {
                 layout.llvm_type(cx)
             }
         }
-        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
-            // MIPS only supports register-length arithmetics.
-            Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
-            Primitive::F32 => cx.type_i32(),
-            Primitive::F64 => cx.type_i64(),
-            _ => layout.llvm_type(cx),
-        },
+        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+            match s.primitive() {
+                // MIPS only supports register-length arithmetics.
+                Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
+                Primitive::F32 => cx.type_i32(),
+                Primitive::F64 => cx.type_i64(),
+                _ => layout.llvm_type(cx),
+            }
+        }
         _ => layout.llvm_type(cx),
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index c098ce3..9394d60 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -6,6 +6,7 @@
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::ty::{self, TyCtxt};
 use rustc_session::config::OptLevel;
+use rustc_span::symbol::sym;
 use rustc_target::spec::abi::Abi;
 use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector};
 use smallvec::SmallVec;
@@ -329,9 +330,7 @@
     ) {
         let span = cx
             .tcx
-            .get_attrs(instance.def_id())
-            .iter()
-            .find(|a| a.has_name(rustc_span::sym::target_feature))
+            .get_attr(instance.def_id(), sym::target_feature)
             .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span);
         let msg = format!(
             "the target features {} must all be either enabled or disabled together",
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index f814cc9..8f6438e 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -152,8 +152,10 @@
         };
 
         let target = &self.config.sess.target;
-        let mingw_gnu_toolchain =
-            target.vendor == "pc" && target.os == "windows" && target.env == "gnu";
+        let mingw_gnu_toolchain = target.vendor == "pc"
+            && target.os == "windows"
+            && target.env == "gnu"
+            && target.abi.is_empty();
 
         let import_name_and_ordinal_vector: Vec<(String, Option<u16>)> = dll_imports
             .iter()
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 0f5b1c0..d0724ba 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -6,9 +6,7 @@
 use crate::{llvm_util, LlvmCodegenBackend, ModuleLlvm};
 use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
 use rustc_codegen_ssa::back::symbol_export;
-use rustc_codegen_ssa::back::write::{
-    CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryConfig,
-};
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, TargetMachineFactoryConfig};
 use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
 use rustc_data_structures::fx::FxHashMap;
@@ -16,7 +14,7 @@
 use rustc_hir::def_id::LOCAL_CRATE;
 use rustc_middle::bug;
 use rustc_middle::dep_graph::WorkProduct;
-use rustc_middle::middle::exported_symbols::SymbolExportLevel;
+use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
 use rustc_session::cgu_reuse_tracker::CguReuse;
 use rustc_session::config::{self, CrateType, Lto};
 use tracing::{debug, info};
@@ -55,8 +53,8 @@
         Lto::No => panic!("didn't request LTO but we're doing LTO"),
     };
 
-    let symbol_filter = &|&(ref name, level): &(String, SymbolExportLevel)| {
-        if level.is_below_threshold(export_threshold) {
+    let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| {
+        if info.level.is_below_threshold(export_threshold) || info.used {
             Some(CString::new(name.as_str()).unwrap())
         } else {
             None
@@ -313,7 +311,9 @@
         for (bc_decoded, name) in serialized_modules {
             let _timer = cgcx
                 .prof
-                .generic_activity_with_arg("LLVM_fat_lto_link_module", format!("{:?}", name));
+                .generic_activity_with_arg_recorder("LLVM_fat_lto_link_module", |recorder| {
+                    recorder.record_arg(format!("{:?}", name))
+                });
             info!("linking {:?}", name);
             let data = bc_decoded.data();
             linker.add(data).map_err(|()| {
@@ -351,7 +351,7 @@
         }
     }
 
-    Ok(LtoModuleCodegen::Fat { module: Some(module), _serialized_bitcode: serialized_bitcode })
+    Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: serialized_bitcode })
 }
 
 crate struct Linker<'a>(&'a mut llvm::Linker<'a>);
@@ -394,15 +394,15 @@
 ///
 /// At a high level Thin LTO looks like:
 ///
-///     1. Prepare a "summary" of each LLVM module in question which describes
-///        the values inside, cost of the values, etc.
-///     2. Merge the summaries of all modules in question into one "index"
-///     3. Perform some global analysis on this index
-///     4. For each module, use the index and analysis calculated previously to
-///        perform local transformations on the module, for example inlining
-///        small functions from other modules.
-///     5. Run thin-specific optimization passes over each module, and then code
-///        generate everything at the end.
+///    1. Prepare a "summary" of each LLVM module in question which describes
+///       the values inside, cost of the values, etc.
+///    2. Merge the summaries of all modules in question into one "index"
+///    3. Perform some global analysis on this index
+///    4. For each module, use the index and analysis calculated previously to
+///       perform local transformations on the module, for example inlining
+///       small functions from other modules.
+///    5. Run thin-specific optimization passes over each module, and then code
+///       generate everything at the end.
 ///
 /// The summary for each module is intended to be quite cheap, and the global
 /// index is relatively quite cheap to create as well. As a result, the goal of
@@ -576,11 +576,11 @@
 pub(crate) fn run_pass_manager(
     cgcx: &CodegenContext<LlvmCodegenBackend>,
     diag_handler: &Handler,
-    module: &ModuleCodegen<ModuleLlvm>,
-    config: &ModuleConfig,
+    module: &mut ModuleCodegen<ModuleLlvm>,
     thin: bool,
 ) -> Result<(), FatalError> {
     let _timer = cgcx.prof.extra_verbose_generic_activity("LLVM_lto_optimize", &*module.name);
+    let config = cgcx.config(module.kind);
 
     // Now we have one massive module inside of llmod. Time to run the
     // LTO-specific optimization passes that LLVM provides.
@@ -623,7 +623,7 @@
             if thin {
                 llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm);
             } else {
-                llvm::LLVMPassManagerBuilderPopulateLTOPassManager(
+                llvm::LLVMRustPassManagerBuilderPopulateLTOPassManager(
                     b, pm, /* Internalize = */ False, /* RunInliner = */ True,
                 );
             }
@@ -724,7 +724,7 @@
 }
 
 pub unsafe fn optimize_thin_module(
-    thin_module: &mut ThinModule<LlvmCodegenBackend>,
+    thin_module: ThinModule<LlvmCodegenBackend>,
     cgcx: &CodegenContext<LlvmCodegenBackend>,
 ) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
     let diag_handler = cgcx.create_diag_handler();
@@ -741,7 +741,7 @@
     // that LLVM Context and Module.
     let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
     let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &diag_handler)? as *const _;
-    let module = ModuleCodegen {
+    let mut module = ModuleCodegen {
         module_llvm: ModuleLlvm { llmod_raw, llcx, tm },
         name: thin_module.name().to_string(),
         kind: ModuleKind::Regular,
@@ -857,8 +857,7 @@
         // little differently.
         {
             info!("running thin lto passes over {}", module.name);
-            let config = cgcx.config(module.kind);
-            run_pass_manager(cgcx, &diag_handler, &module, config, true)?;
+            run_pass_manager(cgcx, &diag_handler, &mut module, true)?;
             save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
         }
     }
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index c18719d..99e3053 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -523,6 +523,12 @@
     let module_name = module.name.clone();
     let module_name = Some(&module_name[..]);
 
+    if let Some(false) = config.new_llvm_pass_manager && llvm_util::get_version() >= (15, 0, 0) {
+        diag_handler.warn(
+            "ignoring `-Z new-llvm-pass-manager=no`, which is no longer supported with LLVM 15",
+        );
+    }
+
     if config.emit_no_opt_bc {
         let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
         let out = path_to_c_string(&out);
@@ -628,8 +634,8 @@
                         extra_passes.as_ptr(),
                         extra_passes.len() as size_t,
                     );
-                    llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
-                    llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
+                    llvm::LLVMRustPassManagerBuilderPopulateFunctionPassManager(b, fpm);
+                    llvm::LLVMRustPassManagerBuilderPopulateModulePassManager(b, mpm);
                 });
 
                 have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
@@ -721,8 +727,7 @@
 
     let mut linker = Linker::new(first.module_llvm.llmod());
     for module in elements {
-        let _timer =
-            cgcx.prof.generic_activity_with_arg("LLVM_link_module", format!("{:?}", module.name));
+        let _timer = cgcx.prof.generic_activity_with_arg("LLVM_link_module", &*module.name);
         let buffer = ModuleBuffer::new(module.module_llvm.llmod());
         linker.add(buffer.data()).map_err(|()| {
             let msg = format!("failed to serialize module {:?}", module.name);
@@ -1086,7 +1091,7 @@
     // Create the PassManagerBuilder for LLVM. We configure it with
     // reasonable defaults and prepare it to actually populate the pass
     // manager.
-    let builder = llvm::LLVMPassManagerBuilderCreate();
+    let builder = llvm::LLVMRustPassManagerBuilderCreate();
     let opt_size = config.opt_size.map_or(llvm::CodeGenOptSizeNone, |x| to_llvm_opt_settings(x).1);
     let inline_threshold = config.inline_threshold;
     let pgo_gen_path = get_pgo_gen_path(config);
@@ -1103,14 +1108,9 @@
         pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
         pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
         pgo_sample_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+        opt_size as c_int,
     );
 
-    llvm::LLVMPassManagerBuilderSetSizeLevel(builder, opt_size as u32);
-
-    if opt_size != llvm::CodeGenOptSizeNone {
-        llvm::LLVMPassManagerBuilderSetDisableUnrollLoops(builder, 1);
-    }
-
     llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins);
 
     // Here we match what clang does (kinda). For O0 we only inline
@@ -1119,16 +1119,16 @@
     // thresholds copied from clang.
     match (opt_level, opt_size, inline_threshold) {
         (.., Some(t)) => {
-            llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t);
+            llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, t);
         }
         (llvm::CodeGenOptLevel::Aggressive, ..) => {
-            llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275);
+            llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 275);
         }
         (_, llvm::CodeGenOptSizeDefault, _) => {
-            llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 75);
+            llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 75);
         }
         (_, llvm::CodeGenOptSizeAggressive, _) => {
-            llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25);
+            llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 25);
         }
         (llvm::CodeGenOptLevel::None, ..) => {
             llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
@@ -1137,12 +1137,12 @@
             llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
         }
         (llvm::CodeGenOptLevel::Default, ..) => {
-            llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225);
+            llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 225);
         }
     }
 
     f(builder);
-    llvm::LLVMPassManagerBuilderDispose(builder);
+    llvm::LLVMRustPassManagerBuilderDispose(builder);
 }
 
 // Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index dd3ada4..86f92dc 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -74,10 +74,11 @@
 
     fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<ModuleLlvm> {
         let cgu = tcx.codegen_unit(cgu_name);
-        let _prof_timer = tcx.prof.generic_activity_with_args(
-            "codegen_module",
-            &[cgu_name.to_string(), cgu.size_estimate().to_string()],
-        );
+        let _prof_timer =
+            tcx.prof.generic_activity_with_arg_recorder("codegen_module", |recorder| {
+                recorder.record_arg(cgu_name.to_string());
+                recorder.record_arg(cgu.size_estimate().to_string());
+            });
         // Instantiate monomorphizations without filling out definitions yet...
         let llvm_module = ModuleLlvm::new(tcx, cgu_name.as_str());
         {
@@ -99,15 +100,6 @@
                 attributes::apply_to_llfn(entry, llvm::AttributePlace::Function, &attrs);
             }
 
-            // Run replace-all-uses-with for statics that need it
-            for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
-                unsafe {
-                    let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
-                    llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
-                    llvm::LLVMDeleteGlobal(old_g);
-                }
-            }
-
             // Finalize code coverage by injecting the coverage map. Note, the coverage map will
             // also be added to the `llvm.compiler.used` variable, created next.
             if cx.sess().instrument_coverage() {
@@ -122,6 +114,16 @@
                 cx.create_compiler_used_variable()
             }
 
+            // Run replace-all-uses-with for statics that need it. This must
+            // happen after the llvm.used variables are created.
+            for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
+                unsafe {
+                    let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
+                    llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
+                    llvm::LLVMDeleteGlobal(old_g);
+                }
+            }
+
             // Finalize debuginfo
             if cx.sess().opts.debuginfo != DebugInfo::None {
                 cx.debuginfo_finalize();
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 1bbfc13..88b8795 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -484,14 +484,14 @@
                 bx.noundef_metadata(load);
             }
 
-            match scalar.value {
+            match scalar.primitive() {
                 abi::Int(..) => {
                     if !scalar.is_always_valid(bx) {
-                        bx.range_metadata(load, scalar.valid_range);
+                        bx.range_metadata(load, scalar.valid_range(bx));
                     }
                 }
                 abi::Pointer => {
-                    if !scalar.valid_range.contains(0) {
+                    if !scalar.valid_range(bx).contains(0) {
                         bx.nonnull_metadata(load);
                     }
 
@@ -525,7 +525,7 @@
             });
             OperandValue::Immediate(self.to_immediate(llval, place.layout))
         } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
-            let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
+            let b_offset = a.size(self).align_to(b.align(self).abi);
             let pair_ty = place.layout.llvm_type(self);
 
             let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index a85b2e6..b69d7a0 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -221,16 +221,16 @@
     }
 
     fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
-        let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
+        let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
         match cv {
             Scalar::Int(ScalarInt::ZST) => {
-                assert_eq!(0, layout.value.size(self).bytes());
+                assert_eq!(0, layout.size(self).bytes());
                 self.const_undef(self.type_ix(0))
             }
             Scalar::Int(int) => {
-                let data = int.assert_bits(layout.value.size(self));
+                let data = int.assert_bits(layout.size(self));
                 let llval = self.const_uint_big(self.type_ix(bitsize), data);
-                if layout.value == Pointer {
+                if layout.primitive() == Pointer {
                     unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
                 } else {
                     self.const_bitcast(llval, llty)
@@ -269,7 +269,7 @@
                         1,
                     )
                 };
-                if layout.value != Pointer {
+                if layout.primitive() != Pointer {
                     unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
                 } else {
                     self.const_bitcast(llval, llty)
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 413ef0b..4d3f3f3 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -109,7 +109,10 @@
                 Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
                 &cx.tcx,
             ),
-            Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
+            Scalar::Initialized {
+                value: Primitive::Pointer,
+                valid_range: WrappingRange::full(dl.pointer_size),
+            },
             cx.type_i8p_ext(address_space),
         ));
         next_offset = offset + pointer_size;
@@ -409,6 +412,13 @@
                 llvm::LLVMRustSetLinkage(new_g, linkage);
                 llvm::LLVMRustSetVisibility(new_g, visibility);
 
+                // The old global has had its name removed but is returned by
+                // get_static since it is in the instance cache. Provide an
+                // alternative lookup that points to the new global so that
+                // global_asm! can compute the correct mangled symbol name
+                // for the global.
+                self.renamed_statics.borrow_mut().insert(def_id, new_g);
+
                 // To avoid breaking any invariants, we leave around the old
                 // global for the moment; we'll replace all references to it
                 // with the new global later. (See base::codegen_backend.)
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 98cf873..d296ee3 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -14,6 +14,7 @@
 use rustc_data_structures::base_n;
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_hir::def_id::DefId;
 use rustc_middle::mir::mono::CodegenUnit;
 use rustc_middle::ty::layout::{
     FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, LayoutError, LayoutOfHelpers,
@@ -105,6 +106,12 @@
 
     /// A counter that is used for generating local symbol names
     local_gen_sym_counter: Cell<usize>,
+
+    /// `codegen_static` will sometimes create a second global variable with a
+    /// different type and clear the symbol name of the original global.
+    /// `global_asm!` needs to be able to find this new global so that it can
+    /// compute the correct mangled symbol name to insert into the asm.
+    pub renamed_statics: RefCell<FxHashMap<DefId, &'ll Value>>,
 }
 
 pub struct TypeLowering<'ll> {
@@ -436,6 +443,7 @@
             rust_try_fn: Cell::new(None),
             intrinsics: Default::default(),
             local_gen_sym_counter: Cell::new(0),
+            renamed_statics: Default::default(),
         }
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index 76caa3c..99e4ded 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -20,7 +20,6 @@
     cx: &CodegenCx<'ll, 'tcx>,
     instance: Instance<'tcx>,
     mir: &Body<'tcx>,
-    fn_dbg_scope: &'ll DIScope,
     debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
 ) {
     // Find all scopes with variables defined in them.
@@ -38,40 +37,41 @@
         // Nothing to emit, of course.
         None
     };
-
+    let mut instantiated = BitSet::new_empty(mir.source_scopes.len());
     // Instantiate all scopes.
     for idx in 0..mir.source_scopes.len() {
         let scope = SourceScope::new(idx);
-        make_mir_scope(cx, instance, mir, fn_dbg_scope, &variables, debug_context, scope);
+        make_mir_scope(cx, instance, mir, &variables, debug_context, &mut instantiated, scope);
     }
+    assert!(instantiated.count() == mir.source_scopes.len());
 }
 
 fn make_mir_scope<'ll, 'tcx>(
     cx: &CodegenCx<'ll, 'tcx>,
     instance: Instance<'tcx>,
     mir: &Body<'tcx>,
-    fn_dbg_scope: &'ll DIScope,
     variables: &Option<BitSet<SourceScope>>,
     debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+    instantiated: &mut BitSet<SourceScope>,
     scope: SourceScope,
 ) {
-    if debug_context.scopes[scope].dbg_scope.is_some() {
+    if instantiated.contains(scope) {
         return;
     }
 
     let scope_data = &mir.source_scopes[scope];
     let parent_scope = if let Some(parent) = scope_data.parent_scope {
-        make_mir_scope(cx, instance, mir, fn_dbg_scope, variables, debug_context, parent);
+        make_mir_scope(cx, instance, mir, variables, debug_context, instantiated, parent);
         debug_context.scopes[parent]
     } else {
         // The root is the function itself.
         let loc = cx.lookup_debug_loc(mir.span.lo());
         debug_context.scopes[scope] = DebugScope {
-            dbg_scope: Some(fn_dbg_scope),
-            inlined_at: None,
             file_start_pos: loc.file.start_pos,
             file_end_pos: loc.file.end_pos,
+            ..debug_context.scopes[scope]
         };
+        instantiated.insert(scope);
         return;
     };
 
@@ -79,6 +79,7 @@
         // Do not create a DIScope if there are no variables defined in this
         // MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
         debug_context.scopes[scope] = parent_scope;
+        instantiated.insert(scope);
         return;
     }
 
@@ -100,7 +101,7 @@
         None => unsafe {
             llvm::LLVMRustDIBuilderCreateLexicalBlock(
                 DIB(cx),
-                parent_scope.dbg_scope.unwrap(),
+                parent_scope.dbg_scope,
                 file_metadata,
                 loc.line,
                 loc.col,
@@ -116,9 +117,10 @@
     });
 
     debug_context.scopes[scope] = DebugScope {
-        dbg_scope: Some(dbg_scope),
+        dbg_scope,
         inlined_at: inlined_at.or(parent_scope.inlined_at),
         file_start_pos: loc.file.start_pos,
         file_end_pos: loc.file.end_pos,
     };
+    instantiated.insert(scope);
 }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/doc.md b/compiler/rustc_codegen_llvm/src/debuginfo/doc.md
index 5a8976c..aaec4e6 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/doc.md
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/doc.md
@@ -27,9 +27,9 @@
 Internally the module will try to reuse already created metadata by
 utilizing a cache. The way to get a shared metadata node when needed is
 thus to just call the corresponding function in this module:
-
-    let file_metadata = file_metadata(cx, file);
-
+```ignore (illustrative)
+let file_metadata = file_metadata(cx, file);
+```
 The function will take care of probing the cache for an existing node for
 that exact file path.
 
@@ -63,7 +63,7 @@
 
 will generate the following callstack with a naive DFS algorithm:
 
-```
+```ignore (illustrative)
 describe(t = List)
   describe(t = i32)
   describe(t = Option<Box<List>>)
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 74e1947..f2cf3b1 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -437,11 +437,9 @@
 
     let DINodeCreationResult { di_node, already_stored_in_typemap } = match *t.kind() {
         ty::Never | ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => {
-            DINodeCreationResult::new(build_basic_type_di_node(cx, t), false)
+            build_basic_type_di_node(cx, t)
         }
-        ty::Tuple(elements) if elements.is_empty() => {
-            DINodeCreationResult::new(build_basic_type_di_node(cx, t), false)
-        }
+        ty::Tuple(elements) if elements.is_empty() => build_basic_type_di_node(cx, t),
         ty::Array(..) => build_fixed_size_array_di_node(cx, unique_type_id, t),
         ty::Slice(_) | ty::Str => build_slice_type_di_node(cx, t, unique_type_id),
         ty::Dynamic(..) => build_dyn_type_di_node(cx, t, unique_type_id),
@@ -640,7 +638,10 @@
     }
 }
 
-fn build_basic_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
+fn build_basic_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    t: Ty<'tcx>,
+) -> DINodeCreationResult<'ll> {
     debug!("build_basic_type_di_node: {:?}", t);
 
     // When targeting MSVC, emit MSVC style type names for compatibility with
@@ -649,7 +650,13 @@
 
     let (name, encoding) = match t.kind() {
         ty::Never => ("!", DW_ATE_unsigned),
-        ty::Tuple(elements) if elements.is_empty() => ("()", DW_ATE_unsigned),
+        ty::Tuple(elements) if elements.is_empty() => {
+            if cpp_like_debuginfo {
+                return build_tuple_type_di_node(cx, UniqueTypeId::for_ty(cx.tcx, t));
+            } else {
+                ("()", DW_ATE_unsigned)
+            }
+        }
         ty::Bool => ("bool", DW_ATE_boolean),
         ty::Char => ("char", DW_ATE_UTF),
         ty::Int(int_ty) if cpp_like_debuginfo => (int_ty.msvc_basic_name(), DW_ATE_signed),
@@ -672,14 +679,14 @@
     };
 
     if !cpp_like_debuginfo {
-        return ty_di_node;
+        return DINodeCreationResult::new(ty_di_node, false);
     }
 
     let typedef_name = match t.kind() {
         ty::Int(int_ty) => int_ty.name_str(),
         ty::Uint(uint_ty) => uint_ty.name_str(),
         ty::Float(float_ty) => float_ty.name_str(),
-        _ => return ty_di_node,
+        _ => return DINodeCreationResult::new(ty_di_node, false),
     };
 
     let typedef_di_node = unsafe {
@@ -694,7 +701,7 @@
         )
     };
 
-    typedef_di_node
+    DINodeCreationResult::new(typedef_di_node, false)
 }
 
 fn build_foreign_type_di_node<'ll, 'tcx>(
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
index 31bb9ed..d6e2c8c 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -372,7 +372,6 @@
 
     // Build the type node for each field.
     let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_range
-        .clone()
         .map(|variant_index| {
             let variant_struct_type_di_node = super::build_generator_variant_struct_type_di_node(
                 cx,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
index 1eafa95..73e01d0 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -118,7 +118,7 @@
 
         Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } => {
             // Niche tags are always normalized to unsized integers of the correct size.
-            match tag.value {
+            match tag.primitive() {
                 Primitive::Int(t, _) => t,
                 Primitive::F32 => Integer::I32,
                 Primitive::F64 => Integer::I64,
@@ -136,7 +136,7 @@
 
         Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
             // Direct tags preserve the sign.
-            tag.value.to_ty(cx.tcx)
+            tag.primitive().to_ty(cx.tcx)
         }
     }
 }
@@ -425,7 +425,7 @@
                 let value = (variant_index.as_u32() as u128)
                     .wrapping_sub(niche_variants.start().as_u32() as u128)
                     .wrapping_add(niche_start);
-                let value = tag.value.size(cx).truncate(value);
+                let value = tag.size(cx).truncate(value);
                 // NOTE(eddyb) do *NOT* remove this assert, until
                 // we pass the full 128-bit value to LLVM, otherwise
                 // truncation will be silent and remain undetected.
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 4e6d3f8..6a16455 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -286,9 +286,8 @@
         }
 
         // Initialize fn debug context (including scopes).
-        // FIXME(eddyb) figure out a way to not need `Option` for `dbg_scope`.
         let empty_scope = DebugScope {
-            dbg_scope: None,
+            dbg_scope: self.dbg_scope_fn(instance, fn_abi, Some(llfn)),
             inlined_at: None,
             file_start_pos: BytePos(0),
             file_end_pos: BytePos(0),
@@ -297,13 +296,7 @@
             FunctionDebugContext { scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes) };
 
         // Fill in all the scopes, with the information from the MIR body.
-        compute_mir_scopes(
-            self,
-            instance,
-            mir,
-            self.dbg_scope_fn(instance, fn_abi, Some(llfn)),
-            &mut fn_debug_context,
-        );
+        compute_mir_scopes(self, instance, mir, &mut fn_debug_context);
 
         Some(fn_debug_context)
     }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
index fe9851c..8f24367 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -46,7 +46,7 @@
 }
 
 pub fn get_namespace_for_item<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
-    item_namespace(cx, cx.tcx.parent(def_id).expect("get_namespace_for_item: missing parent?"))
+    item_namespace(cx, cx.tcx.parent(def_id))
 }
 
 #[derive(Debug, PartialEq, Eq)]
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index f4dc334..4407297 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -134,7 +134,7 @@
             sym::va_arg => {
                 match fn_abi.ret.layout.abi {
                     abi::Abi::Scalar(scalar) => {
-                        match scalar.value {
+                        match scalar.primitive() {
                             Primitive::Int(..) => {
                                 if self.cx().size_of(ret_ty).bytes() < 4 {
                                     // `va_arg` should not be called on an integer type
@@ -816,6 +816,7 @@
     span: Span,
 ) -> Result<&'ll Value, ()> {
     // macros for error handling:
+    #[cfg_attr(not(bootstrap), allow(unused_macro_rules))]
     macro_rules! emit_error {
         ($msg: tt) => {
             emit_error!($msg, )
@@ -1144,6 +1145,7 @@
         span: Span,
         args: &[OperandRef<'tcx, &'ll Value>],
     ) -> Result<&'ll Value, ()> {
+        #[cfg_attr(not(bootstrap), allow(unused_macro_rules))]
         macro_rules! emit_error {
             ($msg: tt) => {
                 emit_error!($msg, )
@@ -1839,6 +1841,27 @@
         simd_neg: Int => neg, Float => fneg;
     }
 
+    if name == sym::simd_arith_offset {
+        // This also checks that the first operand is a ptr type.
+        let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
+            span_bug!(span, "must be called with a vector of pointer types as first argument")
+        });
+        let layout = bx.layout_of(pointee.ty);
+        let ptrs = args[0].immediate();
+        // The second argument must be a ptr-sized integer.
+        // (We don't care about the signedness, this is wrapping anyway.)
+        let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx());
+        if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
+            span_bug!(
+                span,
+                "must be called with a vector of pointer-sized integers as second argument"
+            );
+        }
+        let offsets = args[1].immediate();
+
+        return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
+    }
+
     if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
         let lhs = args[0].immediate();
         let rhs = args[1].immediate();
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 3152c50..0bead46 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -5,7 +5,6 @@
 //! This API is completely unstable and subject to change.
 
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
-#![feature(bool_to_option)]
 #![feature(crate_visibility_modifier)]
 #![feature(let_chains)]
 #![feature(let_else)]
@@ -104,19 +103,18 @@
 }
 
 impl ExtraBackendMethods for LlvmCodegenBackend {
-    fn new_metadata(&self, tcx: TyCtxt<'_>, mod_name: &str) -> ModuleLlvm {
-        ModuleLlvm::new_metadata(tcx, mod_name)
-    }
-
     fn codegen_allocator<'tcx>(
         &self,
         tcx: TyCtxt<'tcx>,
-        module_llvm: &mut ModuleLlvm,
         module_name: &str,
         kind: AllocatorKind,
         has_alloc_error_handler: bool,
-    ) {
-        unsafe { allocator::codegen(tcx, module_llvm, module_name, kind, has_alloc_error_handler) }
+    ) -> ModuleLlvm {
+        let mut module_llvm = ModuleLlvm::new_metadata(tcx, module_name);
+        unsafe {
+            allocator::codegen(tcx, &mut module_llvm, module_name, kind, has_alloc_error_handler);
+        }
+        module_llvm
     }
     fn compile_codegen_unit(
         &self,
@@ -210,9 +208,16 @@
     ) -> Result<(), FatalError> {
         back::write::optimize(cgcx, diag_handler, module, config)
     }
+    fn optimize_fat(
+        cgcx: &CodegenContext<Self>,
+        module: &mut ModuleCodegen<Self::Module>,
+    ) -> Result<(), FatalError> {
+        let diag_handler = cgcx.create_diag_handler();
+        back::lto::run_pass_manager(cgcx, &diag_handler, module, false)
+    }
     unsafe fn optimize_thin(
         cgcx: &CodegenContext<Self>,
-        thin: &mut ThinModule<Self>,
+        thin: ThinModule<Self>,
     ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
         back::lto::optimize_thin_module(thin, cgcx)
     }
@@ -230,15 +235,6 @@
     fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
         (module.name, back::lto::ModuleBuffer::new(module.module_llvm.llmod()))
     }
-    fn run_lto_pass_manager(
-        cgcx: &CodegenContext<Self>,
-        module: &ModuleCodegen<Self::Module>,
-        config: &ModuleConfig,
-        thin: bool,
-    ) -> Result<(), FatalError> {
-        let diag_handler = cgcx.create_diag_handler();
-        back::lto::run_pass_manager(cgcx, &diag_handler, module, config, thin)
-    }
 }
 
 unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 375b992..13baadd 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -1825,24 +1825,22 @@
 
     pub fn LLVMAddAnalysisPasses<'a>(T: &'a TargetMachine, PM: &PassManager<'a>);
 
-    pub fn LLVMPassManagerBuilderCreate() -> &'static mut PassManagerBuilder;
-    pub fn LLVMPassManagerBuilderDispose(PMB: &'static mut PassManagerBuilder);
-    pub fn LLVMPassManagerBuilderSetSizeLevel(PMB: &PassManagerBuilder, Value: Bool);
-    pub fn LLVMPassManagerBuilderSetDisableUnrollLoops(PMB: &PassManagerBuilder, Value: Bool);
-    pub fn LLVMPassManagerBuilderUseInlinerWithThreshold(
+    pub fn LLVMRustPassManagerBuilderCreate() -> &'static mut PassManagerBuilder;
+    pub fn LLVMRustPassManagerBuilderDispose(PMB: &'static mut PassManagerBuilder);
+    pub fn LLVMRustPassManagerBuilderUseInlinerWithThreshold(
         PMB: &PassManagerBuilder,
         threshold: c_uint,
     );
-    pub fn LLVMPassManagerBuilderPopulateModulePassManager(
+    pub fn LLVMRustPassManagerBuilderPopulateModulePassManager(
         PMB: &PassManagerBuilder,
         PM: &PassManager<'_>,
     );
 
-    pub fn LLVMPassManagerBuilderPopulateFunctionPassManager(
+    pub fn LLVMRustPassManagerBuilderPopulateFunctionPassManager(
         PMB: &PassManagerBuilder,
         PM: &PassManager<'_>,
     );
-    pub fn LLVMPassManagerBuilderPopulateLTOPassManager(
+    pub fn LLVMRustPassManagerBuilderPopulateLTOPassManager(
         PMB: &PassManagerBuilder,
         PM: &PassManager<'_>,
         Internalize: Bool,
@@ -2308,6 +2306,7 @@
         PGOGenPath: *const c_char,
         PGOUsePath: *const c_char,
         PGOSampleUsePath: *const c_char,
+        SizeLevel: c_int,
     );
     pub fn LLVMRustAddLibraryInfo<'a>(
         PM: &PassManager<'a>,
@@ -2537,4 +2536,6 @@
         remark_passes_len: usize,
     );
 
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustGetMangledName(V: &Value, out: &RustString);
 }
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index c24e369..7b407c9 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -542,6 +542,11 @@
     // The new pass manager is enabled by default for LLVM >= 13.
     // This matches Clang, which also enables it since Clang 13.
 
+    // Since LLVM 15, the legacy pass manager is no longer supported.
+    if llvm_util::get_version() >= (15, 0, 0) {
+        return true;
+    }
+
     // There are some perf issues with the new pass manager when targeting
     // s390x with LLVM 13, so enable the new pass manager only with LLVM 14.
     // See https://github.com/rust-lang/rust/issues/89609.
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 757aa9a..8628052 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -309,7 +309,7 @@
         scalar: Scalar,
         offset: Size,
     ) -> &'a Type {
-        match scalar.value {
+        match scalar.primitive() {
             Int(i, _) => cx.type_from_integer(i),
             F32 => cx.type_f32(),
             F64 => cx.type_f64(),
@@ -362,8 +362,7 @@
             return cx.type_i1();
         }
 
-        let offset =
-            if index == 0 { Size::ZERO } else { a.value.size(cx).align_to(b.value.align(cx).abi) };
+        let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
         self.scalar_llvm_type_at(cx, scalar, offset)
     }