Importing rustc-1.49.0
Bug: 176888219
Change-Id: Ib0805d37e7b485cd420bbff8a8b000cf87e7ede0
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index bc1d9e1..a5ea0b2 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -3,17 +3,23 @@
use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
use rustc_middle::bug;
use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::sym;
use crate::llvm::{self, False, True};
use crate::ModuleLlvm;
-pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut ModuleLlvm, kind: AllocatorKind) {
+pub(crate) unsafe fn codegen(
+ tcx: TyCtxt<'_>,
+ mods: &mut ModuleLlvm,
+ kind: AllocatorKind,
+ has_alloc_error_handler: bool,
+) {
let llcx = &*mods.llcx;
let llmod = mods.llmod();
- let usize = match &tcx.sess.target.target.target_pointer_width[..] {
- "16" => llvm::LLVMInt16TypeInContext(llcx),
- "32" => llvm::LLVMInt32TypeInContext(llcx),
- "64" => llvm::LLVMInt64TypeInContext(llcx),
+ let usize = match tcx.sess.target.pointer_width {
+ 16 => llvm::LLVMInt16TypeInContext(llcx),
+ 32 => llvm::LLVMInt32TypeInContext(llcx),
+ 64 => llvm::LLVMInt64TypeInContext(llcx),
tws => bug!("Unsupported target word size for int: {}", tws),
};
let i8 = llvm::LLVMInt8TypeInContext(llcx);
@@ -51,7 +57,7 @@
let name = format!("__rust_{}", method.name);
let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
- if tcx.sess.target.target.options.default_hidden_visibility {
+ if tcx.sess.target.default_hidden_visibility {
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
if tcx.sess.must_emit_unwind_tables() {
@@ -82,4 +88,41 @@
}
llvm::LLVMDisposeBuilder(llbuilder);
}
+
+ // rust alloc error handler
+ let args = [usize, usize]; // size, align
+
+ let ty = llvm::LLVMFunctionType(void, args.as_ptr(), args.len() as c_uint, False);
+ let name = "__rust_alloc_error_handler".to_string();
+ let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
+ // -> ! DIFlagNoReturn
+ llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
+
+ if tcx.sess.target.default_hidden_visibility {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ if tcx.sess.must_emit_unwind_tables() {
+ attributes::emit_uwtable(llfn, true);
+ }
+
+ let kind = if has_alloc_error_handler { AllocatorKind::Global } else { AllocatorKind::Default };
+ let callee = kind.fn_name(sym::oom);
+ let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
+ // -> ! DIFlagNoReturn
+ llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, callee);
+ llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+ let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
+
+ let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
+ llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
+ .collect::<Vec<_>>();
+ let ret = llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None);
+ llvm::LLVMSetTailCall(ret, True);
+ llvm::LLVMBuildRetVoid(llbuilder);
+ llvm::LLVMDisposeBuilder(llbuilder);
}
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index f801f84..b5d279e 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -12,8 +12,8 @@
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
-use rustc_middle::span_bug;
use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::{bug, span_bug};
use rustc_span::{Pos, Span};
use rustc_target::abi::*;
use rustc_target::asm::*;
@@ -60,7 +60,7 @@
// Default per-arch clobbers
// Basically what clang does
- let arch_clobbers = match &self.sess().target.target.arch[..] {
+ let arch_clobbers = match &self.sess().target.arch[..] {
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
"mips" | "mips64" => vec!["~{$1}"],
_ => Vec::new(),
@@ -259,7 +259,8 @@
InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {}
InlineAsmArch::Nvptx64 => {}
InlineAsmArch::Hexagon => {}
- InlineAsmArch::Mips => {}
+ InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
+ InlineAsmArch::SpirV => {}
}
}
if !options.contains(InlineAsmOptions::NOMEM) {
@@ -302,13 +303,11 @@
} else if options.contains(InlineAsmOptions::READONLY) {
llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result);
}
+ } else if options.contains(InlineAsmOptions::NOMEM) {
+ llvm::Attribute::InaccessibleMemOnly
+ .apply_callsite(llvm::AttributePlace::Function, result);
} else {
- if options.contains(InlineAsmOptions::NOMEM) {
- llvm::Attribute::InaccessibleMemOnly
- .apply_callsite(llvm::AttributePlace::Function, result);
- } else {
- // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
- }
+ // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
}
// Write results to outputs
@@ -520,6 +519,9 @@
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ }
}
.to_string(),
}
@@ -582,6 +584,9 @@
_ => unreachable!(),
},
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ }
}
}
@@ -621,6 +626,9 @@
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ }
}
}
@@ -710,6 +718,7 @@
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
+ Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
_ => value,
},
_ => value,
@@ -785,6 +794,7 @@
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
+ Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
_ => value,
},
_ => value,
@@ -854,6 +864,7 @@
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
Primitive::F32 => cx.type_i32(),
+ Primitive::F64 => cx.type_i64(),
_ => layout.llvm_type(cx),
},
_ => layout.llvm_type(cx),
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 73c3481..87bcce0 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -6,7 +6,7 @@
use rustc_data_structures::const_cstr;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::small_c_str::SmallCStr;
-use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_hir::def_id::DefId;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::query::Providers;
@@ -18,7 +18,7 @@
use crate::llvm::AttributePlace::Function;
use crate::llvm::{self, Attribute};
use crate::llvm_util;
-pub use rustc_attr::{InlineAttr, OptimizeAttr};
+pub use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr};
use crate::context::CodegenCx;
use crate::value::Value;
@@ -31,7 +31,7 @@
Hint => Attribute::InlineHint.apply_llfn(Function, val),
Always => Attribute::AlwaysInline.apply_llfn(Function, val),
Never => {
- if cx.tcx().sess.target.target.arch != "amdgpu" {
+ if cx.tcx().sess.target.arch != "amdgpu" {
Attribute::NoInline.apply_llfn(Function, val);
}
}
@@ -90,9 +90,7 @@
// The function name varies on platforms.
// See test/CodeGen/mcount.c in clang.
- let mcount_name =
- CString::new(cx.sess().target.target.options.target_mcount.as_str().as_bytes())
- .unwrap();
+ let mcount_name = CString::new(cx.sess().target.mcount.as_str().as_bytes()).unwrap();
llvm::AddFunctionAttrStringValue(
llfn,
@@ -106,7 +104,7 @@
fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
// Only use stack probes if the target specification indicates that we
// should be using stack probes
- if !cx.sess().target.target.options.stack_probes {
+ if !cx.sess().target.stack_probes {
return;
}
@@ -175,8 +173,6 @@
.split(',')
.filter(|f| !RUSTC_SPECIFIC_FEATURES.iter().any(|s| f.contains(s)));
sess.target
- .target
- .options
.features
.split(',')
.chain(cmdline)
@@ -194,6 +190,18 @@
);
}
+pub fn apply_tune_cpu_attr(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
+ if let Some(tune) = llvm_util::tune_cpu(cx.tcx.sess) {
+ let tune_cpu = SmallCStr::new(tune);
+ llvm::AddFunctionAttrStringValue(
+ llfn,
+ llvm::AttributePlace::Function,
+ const_cstr!("tune-cpu"),
+ tune_cpu.as_c_str(),
+ );
+ }
+}
+
/// Sets the `NonLazyBind` LLVM attribute on a given function,
/// assuming the codegen options allow skipping the PLT.
pub fn non_lazy_bind(sess: &Session, llfn: &'ll Value) {
@@ -303,6 +311,9 @@
// Without this, ThinLTO won't inline Rust functions into Clang generated
// functions (because Clang annotates functions this way too).
apply_target_cpu_attr(cx, llfn);
+ // tune-cpu is only conveyed through the attribute for our purpose.
+ // The target doesn't care; the subtarget reads our attribute.
+ apply_tune_cpu_attr(cx, llfn);
let features = llvm_target_features(cx.tcx.sess)
.map(|s| s.to_string())
@@ -310,6 +321,10 @@
let feature = &f.as_str();
format!("+{}", llvm_util::to_llvm_feature(cx.tcx.sess, feature))
}))
+ .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
+ InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
+ InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
+ }))
.collect::<Vec<String>>()
.join(",");
@@ -326,7 +341,7 @@
// Note that currently the `wasm-import-module` doesn't do anything, but
// eventually LLVM 7 should read this and ferry the appropriate import
// module to the output file.
- if cx.tcx.sess.target.target.arch == "wasm32" {
+ if cx.tcx.sess.target.arch == "wasm32" {
if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
llvm::AddFunctionAttrStringValue(
llfn,
@@ -348,25 +363,7 @@
}
}
-pub fn provide(providers: &mut Providers) {
- providers.supported_target_features = |tcx, cnum| {
- assert_eq!(cnum, LOCAL_CRATE);
- if tcx.sess.opts.actually_rustdoc {
- // rustdoc needs to be able to document functions that use all the features, so
- // provide them all.
- llvm_util::all_known_features().map(|(a, b)| (a.to_string(), b)).collect()
- } else {
- llvm_util::supported_target_features(tcx.sess)
- .iter()
- .map(|&(a, b)| (a.to_string(), b))
- .collect()
- }
- };
-
- provide_extern(providers);
-}
-
-pub fn provide_extern(providers: &mut Providers) {
+pub fn provide_both(providers: &mut Providers) {
providers.wasm_import_module_map = |tcx, cnum| {
// Build up a map from DefId to a `NativeLib` structure, where
// `NativeLib` internally contains information about
@@ -379,8 +376,8 @@
.collect::<FxHashMap<_, _>>();
let mut ret = FxHashMap::default();
- for lib in tcx.foreign_modules(cnum).iter() {
- let module = def_id_to_native_lib.get(&lib.def_id).and_then(|s| s.wasm_import_module);
+ for (def_id, lib) in tcx.foreign_modules(cnum).iter() {
+ let module = def_id_to_native_lib.get(&def_id).and_then(|s| s.wasm_import_module);
let module = match module {
Some(s) => s,
None => continue,
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index a115a1e..4e72138 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -206,7 +206,7 @@
}
fn llvm_archive_kind(&self) -> Result<ArchiveKind, &str> {
- let kind = &*self.config.sess.target.target.options.archive_format;
+ let kind = &*self.config.sess.target.archive_format;
kind.parse().map_err(|_| kind)
}
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 4b2d590..64fd1d0 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -2,14 +2,14 @@
self, save_temp_bitcode, to_llvm_opt_settings, with_llvm_pmb, DiagnosticHandlers,
};
use crate::llvm::archive_ro::ArchiveRO;
-use crate::llvm::{self, False, True};
+use crate::llvm::{self, build_string, False, True};
use crate::{LlvmCodegenBackend, ModuleLlvm};
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::symbol_export;
use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{FatalError, Handler};
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::bug;
@@ -22,16 +22,14 @@
use std::ffi::{CStr, CString};
use std::fs::File;
use std::io;
-use std::mem;
use std::path::Path;
use std::ptr;
use std::slice;
use std::sync::Arc;
-/// We keep track of past LTO imports that were used to produce the current set
-/// of compiled object files that we might choose to reuse during this
-/// compilation session.
-pub const THIN_LTO_IMPORTS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-imports.bin";
+/// We keep track of the computed LTO cache keys from the previous
+/// session to determine which CGUs we can reuse.
+pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
match crate_type {
@@ -485,31 +483,31 @@
)
.ok_or_else(|| write::llvm_err(&diag_handler, "failed to prepare thin LTO context"))?;
+ let data = ThinData(data);
+
info!("thin LTO data created");
- let (import_map_path, prev_import_map, curr_import_map) =
- if let Some(ref incr_comp_session_dir) = cgcx.incr_comp_session_dir {
- let path = incr_comp_session_dir.join(THIN_LTO_IMPORTS_INCR_COMP_FILE_NAME);
- // If previous imports have been deleted, or we get an IO error
- // reading the file storing them, then we'll just use `None` as the
- // prev_import_map, which will force the code to be recompiled.
- let prev = if path.exists() {
- ThinLTOImportMaps::load_from_file(&path).ok()
- } else {
- None
- };
- let curr = ThinLTOImportMaps::from_thin_lto_data(data);
- (Some(path), prev, curr)
- } else {
- // If we don't compile incrementally, we don't need to load the
- // import data from LLVM.
- assert!(green_modules.is_empty());
- let curr = ThinLTOImportMaps::default();
- (None, None, curr)
- };
- info!("thin LTO import map loaded");
-
- let data = ThinData(data);
+ let (key_map_path, prev_key_map, curr_key_map) = if let Some(ref incr_comp_session_dir) =
+ cgcx.incr_comp_session_dir
+ {
+ let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
+ // If the previous file was deleted, or we get an IO error
+ // reading the file, then we'll just use `None` as the
+ // prev_key_map, which will force the code to be recompiled.
+ let prev =
+ if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
+ let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
+ (Some(path), prev, curr)
+ } else {
+ // If we don't compile incrementally, we don't need to load the
+ // import data from LLVM.
+ assert!(green_modules.is_empty());
+ let curr = ThinLTOKeysMap::default();
+ (None, None, curr)
+ };
+ info!("thin LTO cache key map loaded");
+ info!("prev_key_map: {:#?}", prev_key_map);
+ info!("curr_key_map: {:#?}", curr_key_map);
// Throw our data in an `Arc` as we'll be sharing it across threads. We
// also put all memory referenced by the C++ data (buffers, ids, etc)
@@ -528,60 +526,14 @@
info!("checking which modules can be-reused and which have to be re-optimized.");
for (module_index, module_name) in shared.module_names.iter().enumerate() {
let module_name = module_name_to_str(module_name);
-
- // If (1.) the module hasn't changed, and (2.) none of the modules
- // it imports from have changed, *and* (3.) the import and export
- // sets themselves have not changed from the previous compile when
- // it was last ThinLTO'ed, then we can re-use the post-ThinLTO
- // version of the module. Otherwise, freshly perform LTO
- // optimization.
- //
- // (Note that globally, the export set is just the inverse of the
- // import set.)
- //
- // For further justification of why the above is necessary and sufficient,
- // see the LLVM blog post on ThinLTO:
- //
- // http://blog.llvm.org/2016/06/thinlto-scalable-and-incremental-lto.html
- //
- // which states the following:
- //
- // ```quote
- // any particular ThinLTO backend must be redone iff:
- //
- // 1. The corresponding (primary) module’s bitcode changed
- // 2. The list of imports into or exports from the module changed
- // 3. The bitcode for any module being imported from has changed
- // 4. Any global analysis result affecting either the primary module
- // or anything it imports has changed.
- // ```
- //
- // This strategy means we can always save the computed imports as
- // canon: when we reuse the post-ThinLTO version, condition (3.)
- // ensures that the current import set is the same as the previous
- // one. (And of course, when we don't reuse the post-ThinLTO
- // version, the current import set *is* the correct one, since we
- // are doing the ThinLTO in this current compilation cycle.)
- //
- // For more discussion, see rust-lang/rust#59535 (where the import
- // issue was discovered) and rust-lang/rust#69798 (where the
- // analogous export issue was discovered).
- if let (Some(prev_import_map), true) =
- (prev_import_map.as_ref(), green_modules.contains_key(module_name))
+ if let (Some(prev_key_map), true) =
+ (prev_key_map.as_ref(), green_modules.contains_key(module_name))
{
assert!(cgcx.incr_comp_session_dir.is_some());
- let prev_imports = prev_import_map.imports_of(module_name);
- let curr_imports = curr_import_map.imports_of(module_name);
- let prev_exports = prev_import_map.exports_of(module_name);
- let curr_exports = curr_import_map.exports_of(module_name);
- let imports_all_green = curr_imports
- .iter()
- .all(|imported_module| green_modules.contains_key(imported_module));
- if imports_all_green
- && equivalent_as_sets(prev_imports, curr_imports)
- && equivalent_as_sets(prev_exports, curr_exports)
- {
+ // If a module exists in both the current and the previous session,
+ // and has the same LTO cache key in both sessions, then we can re-use it
+ if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
let work_product = green_modules[module_name].clone();
copy_jobs.push(work_product);
info!(" - {}: re-used", module_name);
@@ -599,10 +551,10 @@
}
// Save the current ThinLTO import information for the next compilation
- // session, overwriting the previous serialized imports (if any).
- if let Some(path) = import_map_path {
- if let Err(err) = curr_import_map.save_to_file(&path) {
- let msg = format!("Error while writing ThinLTO import data: {}", err);
+ // session, overwriting the previous serialized data (if any).
+ if let Some(path) = key_map_path {
+ if let Err(err) = curr_key_map.save_to_file(&path) {
+ let msg = format!("Error while writing ThinLTO key data: {}", err);
return Err(write::llvm_err(&diag_handler, &msg));
}
}
@@ -611,24 +563,6 @@
}
}
-/// Given two slices, each with no repeat elements. returns true if and only if
-/// the two slices have the same contents when considered as sets (i.e. when
-/// element order is disregarded).
-fn equivalent_as_sets(a: &[String], b: &[String]) -> bool {
- // cheap path: unequal lengths means cannot possibly be set equivalent.
- if a.len() != b.len() {
- return false;
- }
- // fast path: before building new things, check if inputs are equivalent as is.
- if a == b {
- return true;
- }
- // slow path: general set comparison.
- let a: FxHashSet<&str> = a.iter().map(|s| s.as_str()).collect();
- let b: FxHashSet<&str> = b.iter().map(|s| s.as_str()).collect();
- a == b
-}
-
pub(crate) fn run_pass_manager(
cgcx: &CodegenContext<LlvmCodegenBackend>,
module: &ModuleCodegen<ModuleLlvm>,
@@ -942,113 +876,56 @@
Ok(module)
}
-/// Summarizes module import/export relationships used by LLVM's ThinLTO pass.
-///
-/// Note that we tend to have two such instances of `ThinLTOImportMaps` in use:
-/// one loaded from a file that represents the relationships used during the
-/// compilation associated with the incremetnal build artifacts we are
-/// attempting to reuse, and another constructed via `from_thin_lto_data`, which
-/// captures the relationships of ThinLTO in the current compilation.
+/// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys
#[derive(Debug, Default)]
-pub struct ThinLTOImportMaps {
- // key = llvm name of importing module, value = list of modules it imports from
- imports: FxHashMap<String, Vec<String>>,
- // key = llvm name of exporting module, value = list of modules it exports to
- exports: FxHashMap<String, Vec<String>>,
+pub struct ThinLTOKeysMap {
+ // key = llvm name of importing module, value = LLVM cache key
+ keys: FxHashMap<String, String>,
}
-impl ThinLTOImportMaps {
- /// Returns modules imported by `llvm_module_name` during some ThinLTO pass.
- fn imports_of(&self, llvm_module_name: &str) -> &[String] {
- self.imports.get(llvm_module_name).map(|v| &v[..]).unwrap_or(&[])
- }
-
- /// Returns modules exported by `llvm_module_name` during some ThinLTO pass.
- fn exports_of(&self, llvm_module_name: &str) -> &[String] {
- self.exports.get(llvm_module_name).map(|v| &v[..]).unwrap_or(&[])
- }
-
+impl ThinLTOKeysMap {
fn save_to_file(&self, path: &Path) -> io::Result<()> {
use std::io::Write;
let file = File::create(path)?;
let mut writer = io::BufWriter::new(file);
- for (importing_module_name, imported_modules) in &self.imports {
- writeln!(writer, "{}", importing_module_name)?;
- for imported_module in imported_modules {
- writeln!(writer, " {}", imported_module)?;
- }
- writeln!(writer)?;
+ for (module, key) in &self.keys {
+ writeln!(writer, "{} {}", module, key)?;
}
Ok(())
}
- fn load_from_file(path: &Path) -> io::Result<ThinLTOImportMaps> {
+ fn load_from_file(path: &Path) -> io::Result<Self> {
use std::io::BufRead;
- let mut imports = FxHashMap::default();
- let mut exports: FxHashMap<_, Vec<_>> = FxHashMap::default();
- let mut current_module: Option<String> = None;
- let mut current_imports: Vec<String> = vec![];
+ let mut keys = FxHashMap::default();
let file = File::open(path)?;
for line in io::BufReader::new(file).lines() {
let line = line?;
- if line.is_empty() {
- let importing_module = current_module.take().expect("Importing module not set");
- for imported in ¤t_imports {
- exports.entry(imported.clone()).or_default().push(importing_module.clone());
- }
- imports.insert(importing_module, mem::replace(&mut current_imports, vec![]));
- } else if line.starts_with(' ') {
- // Space marks an imported module
- assert_ne!(current_module, None);
- current_imports.push(line.trim().to_string());
- } else {
- // Otherwise, beginning of a new module (must be start or follow empty line)
- assert_eq!(current_module, None);
- current_module = Some(line.trim().to_string());
- }
+ let mut split = line.split(' ');
+ let module = split.next().unwrap();
+ let key = split.next().unwrap();
+ assert_eq!(split.next(), None, "Expected two space-separated values, found {:?}", line);
+ keys.insert(module.to_string(), key.to_string());
}
- Ok(ThinLTOImportMaps { imports, exports })
+ Ok(Self { keys })
}
- /// Loads the ThinLTO import map from ThinLTOData.
- unsafe fn from_thin_lto_data(data: *const llvm::ThinLTOData) -> ThinLTOImportMaps {
- unsafe extern "C" fn imported_module_callback(
- payload: *mut libc::c_void,
- importing_module_name: *const libc::c_char,
- imported_module_name: *const libc::c_char,
- ) {
- let map = &mut *(payload as *mut ThinLTOImportMaps);
- let importing_module_name = CStr::from_ptr(importing_module_name);
- let importing_module_name = module_name_to_str(&importing_module_name);
- let imported_module_name = CStr::from_ptr(imported_module_name);
- let imported_module_name = module_name_to_str(&imported_module_name);
-
- if !map.imports.contains_key(importing_module_name) {
- map.imports.insert(importing_module_name.to_owned(), vec![]);
- }
-
- map.imports
- .get_mut(importing_module_name)
- .unwrap()
- .push(imported_module_name.to_owned());
-
- if !map.exports.contains_key(imported_module_name) {
- map.exports.insert(imported_module_name.to_owned(), vec![]);
- }
-
- map.exports
- .get_mut(imported_module_name)
- .unwrap()
- .push(importing_module_name.to_owned());
- }
-
- let mut map = ThinLTOImportMaps::default();
- llvm::LLVMRustGetThinLTOModuleImports(
- data,
- imported_module_callback,
- &mut map as *mut _ as *mut libc::c_void,
- );
- map
+ fn from_thin_lto_modules(
+ data: &ThinData,
+ modules: &[llvm::ThinLTOModule],
+ names: &[CString],
+ ) -> Self {
+ let keys = modules
+ .iter()
+ .zip(names.iter())
+ .map(|(module, name)| {
+ let key = build_string(|rust_str| unsafe {
+ llvm::LLVMRustComputeLTOCacheKey(rust_str, module.identifier, data.0);
+ })
+ .expect("Invalid ThinLTO module key");
+ (name.clone().into_string().unwrap(), key)
+ })
+ .collect();
+ Self { keys }
}
}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index f35c101..e6acb68 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -128,40 +128,39 @@
let (opt_level, _) = to_llvm_opt_settings(optlvl);
let use_softfp = sess.opts.cg.soft_float;
- let ffunction_sections = sess.target.target.options.function_sections;
+ let ffunction_sections =
+ sess.opts.debugging_opts.function_sections.unwrap_or(sess.target.function_sections);
let fdata_sections = ffunction_sections;
let code_model = to_llvm_code_model(sess.code_model());
let features = attributes::llvm_target_features(sess).collect::<Vec<_>>();
- let mut singlethread = sess.target.target.options.singlethread;
+ let mut singlethread = sess.target.singlethread;
// On the wasm target once the `atomics` feature is enabled that means that
// we're no longer single-threaded, or otherwise we don't want LLVM to
// lower atomic operations to single-threaded operations.
if singlethread
- && sess.target.target.llvm_target.contains("wasm32")
+ && sess.target.llvm_target.contains("wasm32")
&& sess.target_features.contains(&sym::atomics)
{
singlethread = false;
}
- let triple = SmallCStr::new(&sess.target.target.llvm_target);
+ let triple = SmallCStr::new(&sess.target.llvm_target);
let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
let features = features.join(",");
let features = CString::new(features).unwrap();
- let abi = SmallCStr::new(&sess.target.target.options.llvm_abiname);
- let trap_unreachable = sess.target.target.options.trap_unreachable;
+ let abi = SmallCStr::new(&sess.target.llvm_abiname);
+ let trap_unreachable = sess.target.trap_unreachable;
let emit_stack_size_section = sess.opts.debugging_opts.emit_stack_sizes;
let asm_comments = sess.asm_comments();
- let relax_elf_relocations = sess.target.target.options.relax_elf_relocations;
+ let relax_elf_relocations =
+ sess.opts.debugging_opts.relax_elf_relocations.unwrap_or(sess.target.relax_elf_relocations);
- let use_init_array = !sess
- .opts
- .debugging_opts
- .use_ctors_section
- .unwrap_or(sess.target.target.options.use_ctors_section);
+ let use_init_array =
+ !sess.opts.debugging_opts.use_ctors_section.unwrap_or(sess.target.use_ctors_section);
Arc::new(move || {
let tm = unsafe {
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index f35708b..1090d4a 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -60,7 +60,7 @@
unsafe { llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr()) };
unsafe {
llvm::LLVMSetInitializer(llglobal, llconst);
- let section_name = metadata::metadata_section_name(&tcx.sess.target.target);
+ let section_name = metadata::metadata_section_name(&tcx.sess.target);
let name = SmallCStr::new(section_name);
llvm::LLVMSetSection(llglobal, name.as_ptr());
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 0c172dc..f122fa1 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -16,7 +16,7 @@
use rustc_hir::def_id::DefId;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_span::sym;
+use rustc_span::{sym, Span};
use rustc_target::abi::{self, Align, Size};
use rustc_target::spec::{HasTargetSpec, Target};
use std::borrow::Cow;
@@ -56,6 +56,7 @@
type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
+ type DILocation = <CodegenCx<'ll, 'tcx> as BackendTypes>::DILocation;
type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable;
}
@@ -139,6 +140,8 @@
unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
}
+ fn set_span(&mut self, _span: Span) {}
+
fn position_at_end(&mut self, llbb: &'ll BasicBlock) {
unsafe {
llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
@@ -306,8 +309,8 @@
use rustc_middle::ty::{Int, Uint};
let new_kind = match ty.kind() {
- Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.ptr_width)),
- Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.ptr_width)),
+ Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
+ Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
t @ (Uint(_) | Int(_)) => t.clone(),
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
};
@@ -539,7 +542,7 @@
}
fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
- if self.sess().target.target.arch == "amdgpu" {
+ if self.sess().target.arch == "amdgpu" {
// amdgpu/LLVM does something weird and thinks a i64 value is
// split into a v2i32, halving the bitwidth LLVM expects,
// tripping an assertion. So, for now, just disable this
@@ -669,7 +672,7 @@
// WebAssembly has saturating floating point to integer casts if the
// `nontrapping-fptoint` target feature is activated. We'll use those if
// they are available.
- if self.sess().target.target.arch == "wasm32"
+ if self.sess().target.arch == "wasm32"
&& self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
{
let src_ty = self.cx.val_ty(val);
@@ -694,7 +697,7 @@
// WebAssembly has saturating floating point to integer casts if the
// `nontrapping-fptoint` target feature is activated. We'll use those if
// they are available.
- if self.sess().target.target.arch == "wasm32"
+ if self.sess().target.arch == "wasm32"
&& self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
{
let src_ty = self.cx.val_ty(val);
@@ -729,10 +732,7 @@
let src_ty = self.cx.val_ty(val);
let float_width = self.cx.float_width(src_ty);
let int_width = self.cx.int_width(dest_ty);
- match (int_width, float_width) {
- (32, 32) | (32, 64) | (64, 32) | (64, 64) => true,
- _ => false,
- }
+ matches!((int_width, float_width), (32, 32) | (32, 64) | (64, 32) | (64, 64))
}
fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
@@ -1425,7 +1425,7 @@
}
fn wasm_and_missing_nontrapping_fptoint(&self) -> bool {
- self.sess().target.target.arch == "wasm32"
+ self.sess().target.arch == "wasm32"
&& !self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
}
}
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 4afd906..367c1f4 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -176,7 +176,7 @@
// should use dllimport for functions.
if cx.use_dll_storage_attrs
&& tcx.is_dllimport_foreign_item(instance_def_id)
- && tcx.sess.target.target.target_env != "gnu"
+ && tcx.sess.target.env != "gnu"
{
unsafe {
llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index 0992410a..34e1b7a 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -12,7 +12,7 @@
use rustc_codegen_ssa::traits::*;
use rustc_middle::bug;
use rustc_middle::mir::interpret::{Allocation, GlobalAlloc, Scalar};
-use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{layout::TyAndLayout, ScalarInt};
use rustc_span::symbol::Symbol;
use rustc_target::abi::{self, AddressSpace, HasDataLayout, LayoutOf, Pointer, Size};
@@ -80,6 +80,7 @@
impl BackendTypes for CodegenCx<'ll, 'tcx> {
type Value = &'ll Value;
+ // FIXME(eddyb) replace this with a `Function` "subclass" of `Value`.
type Function = &'ll Value;
type BasicBlock = &'ll BasicBlock;
@@ -87,6 +88,7 @@
type Funclet = Funclet<'ll>;
type DIScope = &'ll llvm::debuginfo::DIScope;
+ type DILocation = &'ll llvm::debuginfo::DILocation;
type DIVariable = &'ll llvm::debuginfo::DIVariable;
}
@@ -228,12 +230,12 @@
fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: &'ll Type) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
match cv {
- Scalar::Raw { size: 0, .. } => {
+ Scalar::Int(ScalarInt::ZST) => {
assert_eq!(0, layout.value.size(self).bytes());
self.const_undef(self.type_ix(0))
}
- Scalar::Raw { data, size } => {
- assert_eq!(size as u64, layout.value.size(self).bytes());
+ Scalar::Int(int) => {
+ let data = int.assert_bits(layout.value.size(self));
let llval = self.const_uint_big(self.type_ix(bitsize), data);
if layout.value == Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 6d3582d..14dd245 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -19,7 +19,6 @@
use rustc_middle::ty::{self, Instance, Ty};
use rustc_middle::{bug, span_bug};
use rustc_span::symbol::sym;
-use rustc_span::Span;
use rustc_target::abi::{AddressSpace, Align, HasDataLayout, LayoutOf, Primitive, Scalar, Size};
use tracing::debug;
@@ -92,7 +91,7 @@
// The target may require greater alignment for globals than the type does.
// Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
// which can force it to be smaller. Rust doesn't support this yet.
- if let Some(min) = cx.sess().target.target.options.min_global_align {
+ if let Some(min) = cx.sess().target.min_global_align {
match Align::from_bits(min) {
Ok(min) => align = align.max(min),
Err(err) => {
@@ -110,7 +109,7 @@
attrs: &CodegenFnAttrs,
ty: Ty<'tcx>,
sym: &str,
- span: Span,
+ span_def_id: DefId,
) -> &'ll Value {
let llty = cx.layout_of(ty).llvm_type(cx);
if let Some(linkage) = attrs.linkage {
@@ -125,7 +124,7 @@
cx.layout_of(mt.ty).llvm_type(cx)
} else {
cx.sess().span_fatal(
- span,
+ cx.tcx.def_span(span_def_id),
"must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
)
};
@@ -143,7 +142,10 @@
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(&sym);
let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| {
- cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym))
+ cx.sess().span_fatal(
+ cx.tcx.def_span(span_def_id),
+ &format!("symbol `{}` is already defined", &sym),
+ )
});
llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
llvm::LLVMSetInitializer(g2, g1);
@@ -210,21 +212,21 @@
debug!("get_static: sym={} instance={:?}", sym, instance);
- let g = if let Some(def_id) = def_id.as_local() {
- let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ let g = if let Some(local_def_id) = def_id.as_local() {
+ let id = self.tcx.hir().local_def_id_to_hir_id(local_def_id);
let llty = self.layout_of(ty).llvm_type(self);
// FIXME: refactor this to work without accessing the HIR
let (g, attrs) = match self.tcx.hir().get(id) {
- Node::Item(&hir::Item { attrs, span, kind: hir::ItemKind::Static(..), .. }) => {
+ Node::Item(&hir::Item { attrs, kind: hir::ItemKind::Static(..), .. }) => {
if let Some(g) = self.get_declared_value(sym) {
if self.val_ty(g) != self.type_ptr_to(llty) {
- span_bug!(span, "Conflicting types for static");
+ span_bug!(self.tcx.def_span(def_id), "Conflicting types for static");
}
}
let g = self.declare_global(sym, llty);
- if !self.tcx.is_reachable_non_generic(def_id) {
+ if !self.tcx.is_reachable_non_generic(local_def_id) {
unsafe {
llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden);
}
@@ -235,12 +237,11 @@
Node::ForeignItem(&hir::ForeignItem {
ref attrs,
- span,
kind: hir::ForeignItemKind::Static(..),
..
}) => {
- let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
- (check_and_apply_linkage(&self, &fn_attrs, ty, sym, span), &**attrs)
+ let fn_attrs = self.tcx.codegen_fn_attrs(local_def_id);
+ (check_and_apply_linkage(&self, &fn_attrs, ty, sym, def_id), &**attrs)
}
item => bug!("get_static: expected static, found {:?}", item),
@@ -260,8 +261,7 @@
debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id));
let attrs = self.tcx.codegen_fn_attrs(def_id);
- let span = self.tcx.def_span(def_id);
- let g = check_and_apply_linkage(&self, &attrs, ty, sym, span);
+ let g = check_and_apply_linkage(&self, &attrs, ty, sym, def_id);
// Thread-local statics in some other crate need to *always* be linked
// against in a thread-local fashion, so we need to be sure to apply the
@@ -283,7 +283,7 @@
// argument validation.
debug_assert!(
!(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
- && self.tcx.sess.target.target.options.is_like_windows
+ && self.tcx.sess.target.is_like_windows
&& self.tcx.sess.opts.cg.prefer_dynamic)
);
@@ -397,10 +397,8 @@
// As an optimization, all shared statics which do not have interior
// mutability are placed into read-only memory.
- if !is_mutable {
- if self.type_is_freeze(ty) {
- llvm::LLVMSetGlobalConstant(g, llvm::True);
- }
+ if !is_mutable && self.type_is_freeze(ty) {
+ llvm::LLVMSetGlobalConstant(g, llvm::True);
}
debuginfo::create_global_var_metadata(&self, def_id, g);
@@ -437,7 +435,7 @@
// will use load-unaligned instructions instead, and thus avoiding the crash.
//
// We could remove this hack whenever we decide to drop macOS 10.10 support.
- if self.tcx.sess.target.target.options.is_like_osx {
+ if self.tcx.sess.target.is_like_osx {
// The `inspect` method is okay here because we checked relocations, and
// because we are doing this access to inspect the final interpreter state
// (not as part of the interpreter execution).
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 1696f35..b6e922c 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -118,18 +118,18 @@
let mod_name = SmallCStr::new(mod_name);
let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
- let mut target_data_layout = sess.target.target.data_layout.clone();
+ let mut target_data_layout = sess.target.data_layout.clone();
if llvm_util::get_major_version() < 9 {
target_data_layout = strip_function_ptr_alignment(target_data_layout);
}
- if llvm_util::get_major_version() < 10 {
- if sess.target.target.arch == "x86" || sess.target.target.arch == "x86_64" {
- target_data_layout = strip_x86_address_spaces(target_data_layout);
- }
+ if llvm_util::get_major_version() < 10
+ && (sess.target.arch == "x86" || sess.target.arch == "x86_64")
+ {
+ target_data_layout = strip_x86_address_spaces(target_data_layout);
}
// Ensure the data-layout values hardcoded remain the defaults.
- if sess.target.target.options.is_builtin {
+ if sess.target.is_builtin {
let tm = crate::back::write::create_informational_target_machine(tcx.sess);
llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
llvm::LLVMRustDisposeTargetMachine(tm);
@@ -160,7 +160,7 @@
bug!(
"data-layout for builtin `{}` target, `{}`, \
differs from LLVM default, `{}`",
- sess.target.target.llvm_target,
+ sess.target.llvm_target,
target_data_layout,
llvm_data_layout
);
@@ -170,7 +170,7 @@
let data_layout = SmallCStr::new(&target_data_layout);
llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
- let llvm_target = SmallCStr::new(&sess.target.target.llvm_target);
+ let llvm_target = SmallCStr::new(&sess.target.llvm_target);
llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
if sess.relocation_model() == RelocModel::Pic {
@@ -190,7 +190,7 @@
}
// Control Flow Guard is currently only supported by the MSVC linker on Windows.
- if sess.target.target.options.is_like_msvc {
+ if sess.target.is_like_msvc {
match sess.opts.cg.control_flow_guard {
CFGuard::Disabled => {}
CFGuard::NoChecks => {
@@ -265,7 +265,7 @@
// linker will take care of everything. Fixing this problem will likely
// require adding a few attributes to Rust itself (feature gated at the
// start) and then strongly recommending static linkage on Windows!
- let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_windows;
+ let use_dll_storage_attrs = tcx.sess.target.is_like_windows;
let check_overflow = tcx.sess.overflow_checks();
@@ -324,8 +324,8 @@
}
#[inline]
- pub fn coverage_context(&'a self) -> &'a coverageinfo::CrateCoverageContext<'tcx> {
- self.coverage_cx.as_ref().unwrap()
+ pub fn coverage_context(&'a self) -> Option<&'a coverageinfo::CrateCoverageContext<'tcx>> {
+ self.coverage_cx.as_ref()
}
}
@@ -417,7 +417,8 @@
}
fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
- attributes::apply_target_cpu_attr(self, llfn)
+ attributes::apply_target_cpu_attr(self, llfn);
+ attributes::apply_tune_cpu_attr(self, llfn);
}
fn create_used_variable(&self) {
@@ -838,7 +839,7 @@
return eh_catch_typeinfo;
}
let tcx = self.tcx;
- assert!(self.sess().target.target.options.is_like_emscripten);
+ assert!(self.sess().target.is_like_emscripten);
let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
Some(def_id) => self.get_static(def_id),
_ => {
@@ -863,7 +864,7 @@
// user defined names
let mut name = String::with_capacity(prefix.len() + 6);
name.push_str(prefix);
- name.push_str(".");
+ name.push('.');
base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
name
}
@@ -877,7 +878,7 @@
impl HasTargetSpec for CodegenCx<'ll, 'tcx> {
fn target_spec(&self) -> &Target {
- &self.tcx.sess.target.target
+ &self.tcx.sess.target
}
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index ec6c177..41827a9 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -26,7 +26,10 @@
/// undocumented details in Clang's implementation (that may or may not be important) were also
/// replicated for Rust's Coverage Map.
pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
- let function_coverage_map = cx.coverage_context().take_function_coverage_map();
+ let function_coverage_map = match cx.coverage_context() {
+ Some(ctx) => ctx.take_function_coverage_map(),
+ None => return,
+ };
if function_coverage_map.is_empty() {
// This module has no functions with coverage instrumentation
return;
@@ -126,6 +129,7 @@
let (filenames_index, _) = self.filenames.insert_full(c_filename);
virtual_file_mapping.push(filenames_index as u32);
}
+ debug!("Adding counter {:?} to map for {:?}", counter, region);
mapping_regions.push(CounterMappingRegion::code_region(
counter,
current_file_id,
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index 2bd37bf..e21e038 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -12,7 +12,7 @@
use rustc_data_structures::fx::FxHashMap;
use rustc_llvm::RustString;
use rustc_middle::mir::coverage::{
- CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionIndex, Op,
+ CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId, Op,
};
use rustc_middle::ty::Instance;
@@ -27,8 +27,8 @@
/// A context object for maintaining all state needed by the coverageinfo module.
pub struct CrateCoverageContext<'tcx> {
- // Coverage region data for each instrumented function identified by DefId.
- pub(crate) function_coverage_map: RefCell<FxHashMap<Instance<'tcx>, FunctionCoverage>>,
+ // Coverage data for each instrumented function identified by DefId.
+ pub(crate) function_coverage_map: RefCell<FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>>>,
}
impl<'tcx> CrateCoverageContext<'tcx> {
@@ -36,7 +36,7 @@
Self { function_coverage_map: Default::default() }
}
- pub fn take_function_coverage_map(&self) -> FxHashMap<Instance<'tcx>, FunctionCoverage> {
+ pub fn take_function_coverage_map(&self) -> FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>> {
self.function_coverage_map.replace(FxHashMap::default())
}
}
@@ -58,56 +58,90 @@
unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar(llfn, mangled_fn_name.as_ptr()) }
}
- fn add_counter_region(
+ fn set_function_source_hash(
&mut self,
instance: Instance<'tcx>,
function_source_hash: u64,
- id: CounterValueReference,
- region: CodeRegion,
- ) {
- debug!(
- "adding counter to coverage_regions: instance={:?}, function_source_hash={}, id={:?}, \
- at {:?}",
- instance, function_source_hash, id, region,
- );
- let mut coverage_regions = self.coverage_context().function_coverage_map.borrow_mut();
- coverage_regions
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_counter(function_source_hash, id, region);
+ ) -> bool {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!(
+ "ensuring function source hash is set for instance={:?}; function_source_hash={}",
+ instance, function_source_hash,
+ );
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .set_function_source_hash(function_source_hash);
+ true
+ } else {
+ false
+ }
}
- fn add_counter_expression_region(
+ fn add_coverage_counter(
&mut self,
instance: Instance<'tcx>,
- id: InjectedExpressionIndex,
+ id: CounterValueReference,
+ region: CodeRegion,
+ ) -> bool {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!(
+ "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
+ instance, id, region,
+ );
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .add_counter(id, region);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn add_coverage_counter_expression(
+ &mut self,
+ instance: Instance<'tcx>,
+ id: InjectedExpressionId,
lhs: ExpressionOperandId,
op: Op,
rhs: ExpressionOperandId,
- region: CodeRegion,
- ) {
- debug!(
- "adding counter expression to coverage_regions: instance={:?}, id={:?}, {:?} {:?} {:?}, \
- at {:?}",
- instance, id, lhs, op, rhs, region,
- );
- let mut coverage_regions = self.coverage_context().function_coverage_map.borrow_mut();
- coverage_regions
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_counter_expression(id, lhs, op, rhs, region);
+ region: Option<CodeRegion>,
+ ) -> bool {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!(
+ "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; \
+ region: {:?}",
+ instance, id, lhs, op, rhs, region,
+ );
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .add_counter_expression(id, lhs, op, rhs, region);
+ true
+ } else {
+ false
+ }
}
- fn add_unreachable_region(&mut self, instance: Instance<'tcx>, region: CodeRegion) {
- debug!(
- "adding unreachable code to coverage_regions: instance={:?}, at {:?}",
- instance, region,
- );
- let mut coverage_regions = self.coverage_context().function_coverage_map.borrow_mut();
- coverage_regions
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_unreachable_region(region);
+ fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!(
+ "adding unreachable code to coverage_map: instance={:?}, at {:?}",
+ instance, region,
+ );
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .add_unreachable_region(region);
+ true
+ } else {
+ false
+ }
}
}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index 7f47b61..6737872 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -3,21 +3,26 @@
use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext};
use rustc_codegen_ssa::traits::*;
+use crate::abi::FnAbi;
use crate::common::CodegenCx;
use crate::llvm;
-use crate::llvm::debuginfo::{DIScope, DISubprogram};
+use crate::llvm::debuginfo::{DILocation, DIScope};
use rustc_middle::mir::{Body, SourceScope};
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_middle::ty::{self, Instance};
use rustc_session::config::DebugInfo;
use rustc_index::bit_set::BitSet;
use rustc_index::vec::Idx;
/// Produces DIScope DIEs for each MIR Scope which has variables defined in it.
+// FIXME(eddyb) almost all of this should be in `rustc_codegen_ssa::mir::debuginfo`.
pub fn compute_mir_scopes(
- cx: &CodegenCx<'ll, '_>,
- mir: &Body<'_>,
- fn_metadata: &'ll DISubprogram,
- debug_context: &mut FunctionDebugContext<&'ll DIScope>,
+ cx: &CodegenCx<'ll, 'tcx>,
+ instance: Instance<'tcx>,
+ mir: &Body<'tcx>,
+ fn_dbg_scope: &'ll DIScope,
+ debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
) {
// Find all the scopes with variables defined in them.
let mut has_variables = BitSet::new_empty(mir.source_scopes.len());
@@ -37,58 +42,82 @@
// Instantiate all scopes.
for idx in 0..mir.source_scopes.len() {
let scope = SourceScope::new(idx);
- make_mir_scope(cx, &mir, fn_metadata, &has_variables, debug_context, scope);
+ make_mir_scope(cx, instance, &mir, fn_dbg_scope, &has_variables, debug_context, scope);
}
}
fn make_mir_scope(
- cx: &CodegenCx<'ll, '_>,
- mir: &Body<'_>,
- fn_metadata: &'ll DISubprogram,
+ cx: &CodegenCx<'ll, 'tcx>,
+ instance: Instance<'tcx>,
+ mir: &Body<'tcx>,
+ fn_dbg_scope: &'ll DIScope,
has_variables: &BitSet<SourceScope>,
- debug_context: &mut FunctionDebugContext<&'ll DISubprogram>,
+ debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
scope: SourceScope,
) {
- if debug_context.scopes[scope].is_valid() {
+ if debug_context.scopes[scope].dbg_scope.is_some() {
return;
}
let scope_data = &mir.source_scopes[scope];
let parent_scope = if let Some(parent) = scope_data.parent_scope {
- make_mir_scope(cx, mir, fn_metadata, has_variables, debug_context, parent);
+ make_mir_scope(cx, instance, mir, fn_dbg_scope, has_variables, debug_context, parent);
debug_context.scopes[parent]
} else {
// The root is the function itself.
let loc = cx.lookup_debug_loc(mir.span.lo());
debug_context.scopes[scope] = DebugScope {
- scope_metadata: Some(fn_metadata),
+ dbg_scope: Some(fn_dbg_scope),
+ inlined_at: None,
file_start_pos: loc.file.start_pos,
file_end_pos: loc.file.end_pos,
};
return;
};
- if !has_variables.contains(scope) {
- // Do not create a DIScope if there are no variables
- // defined in this MIR Scope, to avoid debuginfo bloat.
+ if !has_variables.contains(scope) && scope_data.inlined.is_none() {
+ // Do not create a DIScope if there are no variables defined in this
+ // MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
debug_context.scopes[scope] = parent_scope;
return;
}
let loc = cx.lookup_debug_loc(scope_data.span.lo());
- let file_metadata = file_metadata(cx, &loc.file, debug_context.defining_crate);
+ let file_metadata = file_metadata(cx, &loc.file);
- let scope_metadata = unsafe {
- Some(llvm::LLVMRustDIBuilderCreateLexicalBlock(
- DIB(cx),
- parent_scope.scope_metadata.unwrap(),
- file_metadata,
- loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
- loc.col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
- ))
+ let dbg_scope = match scope_data.inlined {
+ Some((callee, _)) => {
+ // FIXME(eddyb) this would be `self.monomorphize(&callee)`
+ // if this is moved to `rustc_codegen_ssa::mir::debuginfo`.
+ let callee = cx.tcx.subst_and_normalize_erasing_regions(
+ instance.substs,
+ ty::ParamEnv::reveal_all(),
+ &callee,
+ );
+ let callee_fn_abi = FnAbi::of_instance(cx, callee, &[]);
+ cx.dbg_scope_fn(callee, &callee_fn_abi, None)
+ }
+ None => unsafe {
+ llvm::LLVMRustDIBuilderCreateLexicalBlock(
+ DIB(cx),
+ parent_scope.dbg_scope.unwrap(),
+ file_metadata,
+ loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
+ loc.col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
+ )
+ },
};
+
+ let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
+ // FIXME(eddyb) this doesn't account for the macro-related
+ // `Span` fixups that `rustc_codegen_ssa::mir::debuginfo` does.
+ let callsite_scope = parent_scope.adjust_dbg_scope_for_span(cx, callsite_span);
+ cx.dbg_loc(callsite_scope, parent_scope.inlined_at, callsite_span)
+ });
+
debug_context.scopes[scope] = DebugScope {
- scope_metadata,
+ dbg_scope: Some(dbg_scope),
+ inlined_at: inlined_at.or(parent_scope.inlined_at),
file_start_pos: loc.file.start_pos,
file_end_pos: loc.file.end_pos,
};
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs b/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs
index b3a8fa2..10dd590 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs
@@ -28,7 +28,7 @@
//! utilizing a cache. The way to get a shared metadata node when needed is
//! thus to just call the corresponding function in this module:
//!
-//! let file_metadata = file_metadata(crate_context, path);
+//! let file_metadata = file_metadata(cx, file);
//!
//! The function will take care of probing the cache for an existing node for
//! that exact file path.
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
index 29edd660..38f50a6 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -67,5 +67,5 @@
!omit_gdb_pretty_printer_section
&& cx.sess().opts.debuginfo != DebugInfo::None
- && cx.sess().target.target.options.emit_debug_gdb_scripts
+ && cx.sess().target.emit_debug_gdb_scripts
}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 987149c..27b81eb 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -26,10 +26,9 @@
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_fs_util::path_to_c_string;
use rustc_hir::def::CtorKind;
-use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::ich::NodeIdHashingMode;
-use rustc_middle::mir::interpret::truncate;
use rustc_middle::mir::{self, Field, GeneratorLayout};
use rustc_middle::ty::layout::{self, IntegerExt, PrimitiveExt, TyAndLayout};
use rustc_middle::ty::subst::GenericArgKind;
@@ -760,16 +759,12 @@
hex_string
}
-pub fn file_metadata(
- cx: &CodegenCx<'ll, '_>,
- source_file: &SourceFile,
- defining_crate: CrateNum,
-) -> &'ll DIFile {
- debug!("file_metadata: file_name: {}, defining_crate: {}", source_file.name, defining_crate);
+pub fn file_metadata(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> &'ll DIFile {
+ debug!("file_metadata: file_name: {}", source_file.name);
let hash = Some(&source_file.src_hash);
let file_name = Some(source_file.name.to_string());
- let directory = if defining_crate == LOCAL_CRATE {
+ let directory = if source_file.is_real_file() && !source_file.is_imported() {
Some(cx.sess().working_dir.0.to_string_lossy().to_string())
} else {
// If the path comes from an upstream crate we assume it has been made
@@ -805,6 +800,7 @@
let kind = match hash.kind {
rustc_span::SourceFileHashAlgorithm::Md5 => llvm::ChecksumKind::MD5,
rustc_span::SourceFileHashAlgorithm::Sha1 => llvm::ChecksumKind::SHA1,
+ rustc_span::SourceFileHashAlgorithm::Sha256 => llvm::ChecksumKind::SHA256,
};
(kind, hex_encode(hash.hash_bytes()))
}
@@ -874,7 +870,7 @@
// When targeting MSVC, emit MSVC style type names for compatibility with
// .natvis visualizers (and perhaps other existing native debuggers?)
- let msvc_like_names = cx.tcx.sess.target.target.options.is_like_msvc;
+ let msvc_like_names = cx.tcx.sess.target.is_like_msvc;
let (name, encoding) = match t.kind() {
ty::Never => ("!", DW_ATE_unsigned),
@@ -985,7 +981,7 @@
// if multiple object files with the same `DW_AT_name` are linked together.
// As a workaround we generate unique names for each object file. Those do
// not correspond to an actual source file but that should be harmless.
- if tcx.sess.target.target.options.is_like_osx {
+ if tcx.sess.target.is_like_osx {
name_in_debuginfo.push("@");
name_in_debuginfo.push(codegen_unit_name);
}
@@ -1401,7 +1397,7 @@
/// on MSVC we have to use the fallback mode, because LLVM doesn't
/// lower variant parts to PDB.
fn use_enum_fallback(cx: &CodegenCx<'_, '_>) -> bool {
- cx.sess().target.target.options.is_like_msvc
+ cx.sess().target.is_like_msvc
}
// FIXME(eddyb) maybe precompute this? Right now it's computed once
@@ -1696,7 +1692,7 @@
let value = (i.as_u32() as u128)
.wrapping_sub(niche_variants.start().as_u32() as u128)
.wrapping_add(niche_start);
- let value = truncate(value, tag.value.size(cx));
+ let value = tag.value.size(cx).truncate(value);
// NOTE(eddyb) do *NOT* remove this assert, until
// we pass the full 128-bit value to LLVM, otherwise
// truncation will be silent and remain undetected.
@@ -1835,7 +1831,7 @@
if !span.is_dummy() {
let loc = cx.lookup_debug_loc(span.lo());
return Some(SourceInfo {
- file: file_metadata(cx, &loc.file, def_id.krate),
+ file: file_metadata(cx, &loc.file),
line: loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
});
}
@@ -2474,7 +2470,7 @@
let (file_metadata, line_number) = if !span.is_dummy() {
let loc = cx.lookup_debug_loc(span.lo());
- (file_metadata(cx, &loc.file, LOCAL_CRATE), loc.line)
+ (file_metadata(cx, &loc.file), loc.line)
} else {
(unknown_file_metadata(cx), None)
};
@@ -2576,9 +2572,8 @@
pub fn extend_scope_to_file(
cx: &CodegenCx<'ll, '_>,
scope_metadata: &'ll DIScope,
- file: &rustc_span::SourceFile,
- defining_crate: CrateNum,
+ file: &SourceFile,
) -> &'ll DILexicalBlock {
- let file_metadata = file_metadata(cx, &file, defining_crate);
+ let file_metadata = file_metadata(cx, file);
unsafe { llvm::LLVMRustDIBuilderCreateLexicalBlockFile(DIB(cx), scope_metadata, file_metadata) }
}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 7cdd366..5065ff0 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -3,7 +3,8 @@
use rustc_codegen_ssa::mir::debuginfo::VariableKind::*;
-use self::metadata::{file_metadata, type_metadata, TypeMap, UNKNOWN_LINE_NUMBER};
+use self::metadata::{file_metadata, type_metadata, TypeMap};
+use self::metadata::{UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
use self::namespace::mangled_name_of_instance;
use self::type_names::compute_debuginfo_type_name;
use self::utils::{create_DIArray, is_node_local_to_unit, DIB};
@@ -13,7 +14,8 @@
use crate::common::CodegenCx;
use crate::llvm;
use crate::llvm::debuginfo::{
- DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DISPFlags, DIScope, DIType, DIVariable,
+ DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope, DIType,
+ DIVariable,
};
use crate::value::Value;
@@ -21,7 +23,8 @@
use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind};
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LOCAL_CRATE};
+use rustc_data_structures::sync::Lrc;
+use rustc_hir::def_id::{DefId, DefIdMap, LOCAL_CRATE};
use rustc_index::vec::IndexVec;
use rustc_middle::mir;
use rustc_middle::ty::layout::HasTyCtxt;
@@ -29,7 +32,7 @@
use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeFoldable};
use rustc_session::config::{self, DebugInfo};
use rustc_span::symbol::Symbol;
-use rustc_span::{self, BytePos, Span};
+use rustc_span::{self, BytePos, Pos, SourceFile, SourceFileAndLine, Span};
use rustc_target::abi::{LayoutOf, Primitive, Size};
use libc::c_uint;
@@ -41,7 +44,6 @@
pub mod gdb;
pub mod metadata;
mod namespace;
-mod source_loc;
mod utils;
pub use self::create_scope_map::compute_mir_scopes;
@@ -120,14 +122,12 @@
// for macOS to understand. For more info see #11352
// This can be overridden using --llvm-opts -dwarf-version,N.
// Android has the same issue (#22398)
- if cx.sess().target.target.options.is_like_osx
- || cx.sess().target.target.options.is_like_android
- {
- llvm::LLVMRustAddModuleFlag(cx.llmod, "Dwarf Version\0".as_ptr().cast(), 2)
+ if let Some(version) = cx.sess().target.dwarf_version {
+ llvm::LLVMRustAddModuleFlag(cx.llmod, "Dwarf Version\0".as_ptr().cast(), version)
}
// Indicate that we want CodeView debug information on MSVC
- if cx.sess().target.target.options.is_like_msvc {
+ if cx.sess().target.is_like_msvc {
llvm::LLVMRustAddModuleFlag(cx.llmod, "CodeView\0".as_ptr().cast(), 1)
}
@@ -143,14 +143,11 @@
fn dbg_var_addr(
&mut self,
dbg_var: &'ll DIVariable,
- scope_metadata: &'ll DIScope,
+ dbg_loc: &'ll DILocation,
variable_alloca: Self::Value,
direct_offset: Size,
indirect_offsets: &[Size],
- span: Span,
) {
- let cx = self.cx();
-
// Convert the direct and indirect offsets to address ops.
// FIXME(eddyb) use `const`s instead of getting the values via FFI,
// the values should match the ones in the DWARF standard anyway.
@@ -170,14 +167,10 @@
}
}
- // FIXME(eddyb) maybe this information could be extracted from `dbg_var`,
- // to avoid having to pass it down in both places?
- // NB: `var` doesn't seem to know about the column, so that's a limitation.
- let dbg_loc = cx.create_debug_loc(scope_metadata, span);
unsafe {
// FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
- DIB(cx),
+ DIB(self.cx()),
variable_alloca,
dbg_var,
addr_ops.as_ptr(),
@@ -188,15 +181,13 @@
}
}
- fn set_source_location(&mut self, scope: &'ll DIScope, span: Span) {
- debug!("set_source_location: {}", self.sess().source_map().span_to_string(span));
-
- let dbg_loc = self.cx().create_debug_loc(scope, span);
-
+ fn set_dbg_loc(&mut self, dbg_loc: &'ll DILocation) {
unsafe {
- llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc);
+ let dbg_loc_as_llval = llvm::LLVMRustMetadataAsValue(self.cx().llcx, dbg_loc);
+ llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc_as_llval);
}
}
+
fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
}
@@ -225,30 +216,95 @@
}
}
+/// A source code location used to generate debug information.
+// FIXME(eddyb) rename this to better indicate it's a duplicate of
+// `rustc_span::Loc` rather than `DILocation`, perhaps by making
+// `lookup_char_pos` return the right information instead.
+pub struct DebugLoc {
+ /// Information about the original source file.
+ pub file: Lrc<SourceFile>,
+ /// The (1-based) line number.
+ pub line: Option<u32>,
+ /// The (1-based) column number.
+ pub col: Option<u32>,
+}
+
+impl CodegenCx<'ll, '_> {
+ /// Looks up debug source information about a `BytePos`.
+ // FIXME(eddyb) rename this to better indicate it's a duplicate of
+ // `lookup_char_pos` rather than `dbg_loc`, perhaps by making
+ // `lookup_char_pos` return the right information instead.
+ pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
+ let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
+ Ok(SourceFileAndLine { sf: file, line }) => {
+ let line_pos = file.line_begin_pos(pos);
+
+ // Use 1-based indexing.
+ let line = (line + 1) as u32;
+ let col = (pos - line_pos).to_u32() + 1;
+
+ (file, Some(line), Some(col))
+ }
+ Err(file) => (file, None, None),
+ };
+
+ // For MSVC, omit the column number.
+ // Otherwise, emit it. This mimics clang behaviour.
+ // See discussion in https://github.com/rust-lang/rust/issues/42921
+ if self.sess().target.is_like_msvc {
+ DebugLoc { file, line, col: None }
+ } else {
+ DebugLoc { file, line, col }
+ }
+ }
+}
+
impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn create_function_debug_context(
&self,
instance: Instance<'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
llfn: &'ll Value,
- mir: &mir::Body<'_>,
- ) -> Option<FunctionDebugContext<&'ll DIScope>> {
+ mir: &mir::Body<'tcx>,
+ ) -> Option<FunctionDebugContext<&'ll DIScope, &'ll DILocation>> {
if self.sess().opts.debuginfo == DebugInfo::None {
return None;
}
- let span = mir.span;
+ // Initialize fn debug context (including scopes).
+ // FIXME(eddyb) figure out a way to not need `Option` for `dbg_scope`.
+ let empty_scope = DebugScope {
+ dbg_scope: None,
+ inlined_at: None,
+ file_start_pos: BytePos(0),
+ file_end_pos: BytePos(0),
+ };
+ let mut fn_debug_context =
+ FunctionDebugContext { scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes) };
- // This can be the case for functions inlined from another crate
- if span.is_dummy() {
- // FIXME(simulacrum): Probably can't happen; remove.
- return None;
- }
+ // Fill in all the scopes, with the information from the MIR body.
+ compute_mir_scopes(
+ self,
+ instance,
+ mir,
+ self.dbg_scope_fn(instance, fn_abi, Some(llfn)),
+ &mut fn_debug_context,
+ );
+ Some(fn_debug_context)
+ }
+
+ fn dbg_scope_fn(
+ &self,
+ instance: Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ maybe_definition_llfn: Option<&'ll Value>,
+ ) -> &'ll DIScope {
let def_id = instance.def_id();
let containing_scope = get_containing_scope(self, instance);
+ let span = self.tcx.def_span(def_id);
let loc = self.lookup_debug_loc(span.lo());
- let file_metadata = file_metadata(self, &loc.file, def_id.krate);
+ let file_metadata = file_metadata(self, &loc.file);
let function_type_metadata = unsafe {
let fn_signature = get_function_signature(self, fn_abi);
@@ -293,8 +349,8 @@
}
}
- let fn_metadata = unsafe {
- llvm::LLVMRustDIBuilderCreateFunction(
+ unsafe {
+ return llvm::LLVMRustDIBuilderCreateFunction(
DIB(self),
containing_scope,
name.as_ptr().cast(),
@@ -307,28 +363,11 @@
scope_line.unwrap_or(UNKNOWN_LINE_NUMBER),
flags,
spflags,
- llfn,
+ maybe_definition_llfn,
template_parameters,
None,
- )
- };
-
- // Initialize fn debug context (including scopes).
- // FIXME(eddyb) figure out a way to not need `Option` for `scope_metadata`.
- let null_scope = DebugScope {
- scope_metadata: None,
- file_start_pos: BytePos(0),
- file_end_pos: BytePos(0),
- };
- let mut fn_debug_context = FunctionDebugContext {
- scopes: IndexVec::from_elem(null_scope, &mir.source_scopes),
- defining_crate: def_id.krate,
- };
-
- // Fill in all the scopes, with the information from the MIR body.
- compute_mir_scopes(self, mir, fn_metadata, &mut fn_debug_context);
-
- return Some(fn_debug_context);
+ );
+ }
fn get_function_signature<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
@@ -348,7 +387,7 @@
});
// Arguments types
- if cx.sess().target.target.options.is_like_msvc {
+ if cx.sess().target.is_like_msvc {
// FIXME(#42800):
// There is a bug in MSDIA that leads to a crash when it encounters
// a fixed-size array of `u8` or something zero-sized in a
@@ -396,7 +435,7 @@
name_to_append_suffix_to.push('<');
for (i, actual_type) in substs.types().enumerate() {
if i != 0 {
- name_to_append_suffix_to.push_str(",");
+ name_to_append_suffix_to.push(',');
}
let actual_type =
@@ -503,6 +542,25 @@
}
}
+ fn dbg_loc(
+ &self,
+ scope: &'ll DIScope,
+ inlined_at: Option<&'ll DILocation>,
+ span: Span,
+ ) -> &'ll DILocation {
+ let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateDebugLocation(
+ utils::debug_context(self).llcontext,
+ line.unwrap_or(UNKNOWN_LINE_NUMBER),
+ col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
+ scope,
+ inlined_at,
+ )
+ }
+ }
+
fn create_vtable_metadata(&self, ty: Ty<'tcx>, vtable: Self::Value) {
metadata::create_vtable_metadata(self, ty, vtable)
}
@@ -511,9 +569,8 @@
&self,
scope_metadata: &'ll DIScope,
file: &rustc_span::SourceFile,
- defining_crate: CrateNum,
) -> &'ll DILexicalBlock {
- metadata::extend_scope_to_file(&self, scope_metadata, file, defining_crate)
+ metadata::extend_scope_to_file(&self, scope_metadata, file)
}
fn debuginfo_finalize(&self) {
@@ -524,7 +581,6 @@
// names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
fn create_dbg_var(
&self,
- dbg_context: &FunctionDebugContext<&'ll DIScope>,
variable_name: Symbol,
variable_type: Ty<'tcx>,
scope_metadata: &'ll DIScope,
@@ -532,7 +588,7 @@
span: Span,
) -> &'ll DIVariable {
let loc = self.lookup_debug_loc(span.lo());
- let file_metadata = file_metadata(self, &loc.file, dbg_context.defining_crate);
+ let file_metadata = file_metadata(self, &loc.file);
let type_metadata = type_metadata(self, variable_type, span);
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/source_loc.rs b/compiler/rustc_codegen_llvm/src/debuginfo/source_loc.rs
deleted file mode 100644
index 66ae9d7..0000000
--- a/compiler/rustc_codegen_llvm/src/debuginfo/source_loc.rs
+++ /dev/null
@@ -1,61 +0,0 @@
-use super::metadata::{UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
-use super::utils::debug_context;
-
-use crate::common::CodegenCx;
-use crate::llvm::debuginfo::DIScope;
-use crate::llvm::{self, Value};
-use rustc_codegen_ssa::traits::*;
-
-use rustc_data_structures::sync::Lrc;
-use rustc_span::{BytePos, Pos, SourceFile, SourceFileAndLine, Span};
-
-/// A source code location used to generate debug information.
-pub struct DebugLoc {
- /// Information about the original source file.
- pub file: Lrc<SourceFile>,
- /// The (1-based) line number.
- pub line: Option<u32>,
- /// The (1-based) column number.
- pub col: Option<u32>,
-}
-
-impl CodegenCx<'ll, '_> {
- /// Looks up debug source information about a `BytePos`.
- pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
- let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
- Ok(SourceFileAndLine { sf: file, line }) => {
- let line_pos = file.line_begin_pos(pos);
-
- // Use 1-based indexing.
- let line = (line + 1) as u32;
- let col = (pos - line_pos).to_u32() + 1;
-
- (file, Some(line), Some(col))
- }
- Err(file) => (file, None, None),
- };
-
- // For MSVC, omit the column number.
- // Otherwise, emit it. This mimics clang behaviour.
- // See discussion in https://github.com/rust-lang/rust/issues/42921
- if self.sess().target.target.options.is_like_msvc {
- DebugLoc { file, line, col: None }
- } else {
- DebugLoc { file, line, col }
- }
- }
-
- pub fn create_debug_loc(&self, scope: &'ll DIScope, span: Span) -> &'ll Value {
- let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
-
- unsafe {
- llvm::LLVMRustDIBuilderCreateDebugLocation(
- debug_context(self).llcontext,
- line.unwrap_or(UNKNOWN_LINE_NUMBER),
- col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
- scope,
- None,
- )
- }
- }
-}
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
index a3d6882..0591e0a 100644
--- a/compiler/rustc_codegen_llvm/src/declare.rs
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -42,7 +42,7 @@
// be merged.
llvm::SetUnnamedAddress(llfn, llvm::UnnamedAddr::Global);
- if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.target.options.disable_redzone) {
+ if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.disable_redzone) {
llvm::Attribute::NoRedZone.apply_llfn(Function, llfn);
}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 7f5b09e..d52b3be 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -334,8 +334,8 @@
self.call(expect, &[cond, self.const_bool(expected)], None)
}
- fn sideeffect(&mut self) {
- if self.tcx.sess.opts.debugging_opts.insert_sideeffect {
+ fn sideeffect(&mut self, unconditional: bool) {
+ if unconditional || self.tcx.sess.opts.debugging_opts.insert_sideeffect {
let fnname = self.get_intrinsic(&("llvm.sideeffect"));
self.call(fnname, &[], None);
}
@@ -367,7 +367,7 @@
bx.store(bx.const_i32(0), dest, ret_align);
} else if wants_msvc_seh(bx.sess()) {
codegen_msvc_try(bx, try_func, data, catch_func, dest);
- } else if bx.sess().target.target.options.is_like_emscripten {
+ } else if bx.sess().target.is_like_emscripten {
codegen_emcc_try(bx, try_func, data, catch_func, dest);
} else {
codegen_gnu_try(bx, try_func, data, catch_func, dest);
@@ -390,7 +390,7 @@
) {
let llfn = get_rust_try_fn(bx, &mut |mut bx| {
bx.set_personality_fn(bx.eh_personality());
- bx.sideeffect();
+ bx.sideeffect(false);
let mut normal = bx.build_sibling_block("normal");
let mut catchswitch = bx.build_sibling_block("catchswitch");
@@ -553,7 +553,7 @@
// call %catch_func(%data, %ptr)
// ret 1
- bx.sideeffect();
+ bx.sideeffect(false);
let mut then = bx.build_sibling_block("then");
let mut catch = bx.build_sibling_block("catch");
@@ -615,7 +615,7 @@
// call %catch_func(%data, %catch_data)
// ret 1
- bx.sideeffect();
+ bx.sideeffect(false);
let mut then = bx.build_sibling_block("then");
let mut catch = bx.build_sibling_block("catch");
@@ -673,17 +673,9 @@
fn gen_fn<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
name: &str,
- inputs: Vec<Ty<'tcx>>,
- output: Ty<'tcx>,
+ rust_fn_sig: ty::PolyFnSig<'tcx>,
codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
) -> &'ll Value {
- let rust_fn_sig = ty::Binder::bind(cx.tcx.mk_fn_sig(
- inputs.into_iter(),
- output,
- false,
- hir::Unsafety::Unsafe,
- Abi::Rust,
- ));
let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
let llfn = cx.declare_fn(name, &fn_abi);
cx.set_frame_pointer_elimination(llfn);
@@ -710,22 +702,31 @@
// Define the type up front for the signature of the rust_try function.
let tcx = cx.tcx;
let i8p = tcx.mk_mut_ptr(tcx.types.i8);
- let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
+ // `unsafe fn(*mut i8) -> ()`
+ let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
iter::once(i8p),
tcx.mk_unit(),
false,
hir::Unsafety::Unsafe,
Abi::Rust,
)));
- let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
+ // `unsafe fn(*mut i8, *mut i8) -> ()`
+ let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
[i8p, i8p].iter().cloned(),
tcx.mk_unit(),
false,
hir::Unsafety::Unsafe,
Abi::Rust,
)));
- let output = tcx.types.i32;
- let rust_try = gen_fn(cx, "__rust_try", vec![try_fn_ty, i8p, catch_fn_ty], output, codegen);
+ // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
+ let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
+ vec![try_fn_ty, i8p, catch_fn_ty].into_iter(),
+ tcx.types.i32,
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ ));
+ let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
cx.rust_try_fn.set(Some(rust_try));
rust_try
}
@@ -793,14 +794,18 @@
require_simd!(arg_tys[1], "argument");
let v_len = arg_tys[1].simd_size(tcx);
require!(
- m_len == v_len,
+ // Allow masks for vectors with fewer than 8 elements to be
+ // represented with a u8 or i8.
+ m_len == v_len || (m_len == 8 && v_len < 8),
"mismatched lengths: mask length `{}` != other vector length `{}`",
m_len,
v_len
);
let i1 = bx.type_i1();
- let i1xn = bx.type_vector(i1, m_len);
- let m_i1s = bx.bitcast(args[0].immediate(), i1xn);
+ let im = bx.type_ix(v_len);
+ let i1xn = bx.type_vector(i1, v_len);
+ let m_im = bx.trunc(args[0].immediate(), im);
+ let m_i1s = bx.bitcast(m_im, i1xn);
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
}
@@ -974,12 +979,14 @@
// Integer vector <i{in_bitwidth} x in_len>:
let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
- ty::Int(i) => {
- (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
- }
- ty::Uint(i) => {
- (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
- }
+ ty::Int(i) => (
+ args[0].immediate(),
+ i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
+ ),
+ ty::Uint(i) => (
+ args[0].immediate(),
+ i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
+ ),
_ => return_error!(
"vector argument `{}`'s element type `{}`, expected integer element type",
in_ty,
@@ -1718,10 +1725,10 @@
fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
match ty.kind() {
ty::Int(t) => {
- Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.ptr_width)), true))
+ Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), true))
}
ty::Uint(t) => {
- Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.ptr_width)), false))
+ Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), false))
}
_ => None,
}
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index f14493e..5974b59 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -23,18 +23,17 @@
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::ModuleCodegen;
use rustc_codegen_ssa::{CodegenResults, CompiledModule};
+use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{ErrorReported, FatalError, Handler};
-use rustc_middle::dep_graph::{DepGraph, WorkProduct};
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoaderDyn};
use rustc_middle::ty::{self, TyCtxt};
-use rustc_serialize::json;
-use rustc_session::config::{self, OptLevel, OutputFilenames, PrintRequest};
+use rustc_session::config::{OptLevel, OutputFilenames, PrintRequest};
use rustc_session::Session;
use rustc_span::symbol::Symbol;
use std::any::Any;
use std::ffi::CStr;
-use std::fs;
use std::sync::Arc;
mod back {
@@ -95,8 +94,9 @@
tcx: TyCtxt<'tcx>,
mods: &mut ModuleLlvm,
kind: AllocatorKind,
+ has_alloc_error_handler: bool,
) {
- unsafe { allocator::codegen(tcx, mods, kind) }
+ unsafe { allocator::codegen(tcx, mods, kind, has_alloc_error_handler) }
}
fn compile_codegen_unit(
&self,
@@ -115,6 +115,9 @@
fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
llvm_util::target_cpu(sess)
}
+ fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str> {
+ llvm_util::tune_cpu(sess)
+ }
}
impl WriteBackendMethods for LlvmCodegenBackend {
@@ -248,11 +251,11 @@
}
fn provide(&self, providers: &mut ty::query::Providers) {
- attributes::provide(providers);
+ attributes::provide_both(providers);
}
fn provide_extern(&self, providers: &mut ty::query::Providers) {
- attributes::provide_extern(providers);
+ attributes::provide_both(providers);
}
fn codegen_crate<'tcx>(
@@ -273,47 +276,27 @@
&self,
ongoing_codegen: Box<dyn Any>,
sess: &Session,
- dep_graph: &DepGraph,
- ) -> Result<Box<dyn Any>, ErrorReported> {
+ ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported> {
let (codegen_results, work_products) = ongoing_codegen
.downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
.expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
.join(sess);
- if sess.opts.debugging_opts.incremental_info {
- rustc_codegen_ssa::back::write::dump_incremental_data(&codegen_results);
- }
- sess.time("serialize_work_products", move || {
- rustc_incremental::save_work_product_index(sess, &dep_graph, work_products)
+ sess.time("llvm_dump_timing_file", || {
+ if sess.opts.debugging_opts.llvm_time_trace {
+ llvm_util::time_trace_profiler_finish("llvm_timings.json");
+ }
});
- sess.compile_status()?;
-
- Ok(Box::new(codegen_results))
+ Ok((codegen_results, work_products))
}
fn link(
&self,
sess: &Session,
- codegen_results: Box<dyn Any>,
+ codegen_results: CodegenResults,
outputs: &OutputFilenames,
) -> Result<(), ErrorReported> {
- let codegen_results = codegen_results
- .downcast::<CodegenResults>()
- .expect("Expected CodegenResults, found Box<Any>");
-
- if sess.opts.debugging_opts.no_link {
- // FIXME: use a binary format to encode the `.rlink` file
- let rlink_data = json::encode(&codegen_results).map_err(|err| {
- sess.fatal(&format!("failed to encode rlink: {}", err));
- })?;
- let rlink_file = outputs.with_extension(config::RLINK_EXT);
- fs::write(&rlink_file, rlink_data).map_err(|err| {
- sess.fatal(&format!("failed to write file {}: {}", rlink_file.display(), err));
- })?;
- return Ok(());
- }
-
// Run the linker on any artifacts that resulted from the LLVM run.
// This should produce either a finished executable or library.
sess.time("link_crate", || {
@@ -330,16 +313,6 @@
);
});
- // Now that we won't touch anything in the incremental compilation directory
- // any more, we can finalize it (which involves renaming it)
- rustc_incremental::finalize_session_directory(sess, codegen_results.crate_hash);
-
- sess.time("llvm_dump_timing_file", || {
- if sess.opts.debugging_opts.llvm_time_trace {
- llvm_util::time_trace_profiler_finish("llvm_timings.json");
- }
- });
-
Ok(())
}
}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index af3f3e7..8b15c8b 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -5,8 +5,9 @@
use super::debuginfo::{
DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator,
- DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DINameSpace, DISPFlags, DIScope,
- DISubprogram, DISubrange, DITemplateTypeParameter, DIType, DIVariable, DebugEmissionKind,
+ DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DILocation, DINameSpace,
+ DISPFlags, DIScope, DISubprogram, DISubrange, DITemplateTypeParameter, DIType, DIVariable,
+ DebugEmissionKind,
};
use libc::{c_char, c_int, c_uint, size_t};
@@ -557,6 +558,7 @@
None,
MD5,
SHA1,
+ SHA256,
}
extern "C" {
@@ -794,6 +796,7 @@
pub struct DIBuilder<'a>(InvariantOpaque<'a>);
pub type DIDescriptor = Metadata;
+ pub type DILocation = Metadata;
pub type DIScope = DIDescriptor;
pub type DIFile = DIScope;
pub type DILexicalBlock = DIScope;
@@ -1854,7 +1857,7 @@
ScopeLine: c_uint,
Flags: DIFlags,
SPFlags: DISPFlags,
- Fn: &'a Value,
+ MaybeFn: Option<&'a Value>,
TParam: &'a DIArray,
Decl: Option<&'a DIDescriptor>,
) -> &'a DISubprogram;
@@ -2005,7 +2008,7 @@
VarInfo: &'a DIVariable,
AddrOps: *const i64,
AddrOpsCount: c_uint,
- DL: &'a Value,
+ DL: &'a DILocation,
InsertAtEnd: &'a BasicBlock,
) -> &'a Value;
@@ -2093,8 +2096,8 @@
Line: c_uint,
Column: c_uint,
Scope: &'a DIScope,
- InlinedAt: Option<&'a Metadata>,
- ) -> &'a Value;
+ InlinedAt: Option<&'a DILocation>,
+ ) -> &'a DILocation;
pub fn LLVMRustDIBuilderCreateOpDeref() -> i64;
pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> i64;
@@ -2362,4 +2365,10 @@
bytecode_len: usize,
) -> bool;
pub fn LLVMRustLinkerFree(linker: &'a mut Linker<'a>);
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustComputeLTOCacheKey(
+ key_out: &RustString,
+ mod_id: *const c_char,
+ data: &ThinLTOData,
+ );
}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
index ed9b991..53a404e 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
@@ -118,11 +118,6 @@
}
}
-pub fn set_thread_local(global: &'a Value, is_thread_local: bool) {
- unsafe {
- LLVMSetThreadLocal(global, is_thread_local as Bool);
- }
-}
pub fn set_thread_local_mode(global: &'a Value, mode: ThreadLocalMode) {
unsafe {
LLVMSetThreadLocalMode(global, mode);
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 900f2df..ab70f72 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -1,12 +1,12 @@
use crate::back::write::create_informational_target_machine;
use crate::llvm;
use libc::c_int;
+use rustc_codegen_ssa::target_features::supported_target_features;
use rustc_data_structures::fx::FxHashSet;
use rustc_feature::UnstableFeatures;
use rustc_middle::bug;
use rustc_session::config::PrintRequest;
use rustc_session::Session;
-use rustc_span::symbol::sym;
use rustc_span::symbol::Symbol;
use rustc_target::spec::{MergeFunctions, PanicStrategy};
use std::ffi::CString;
@@ -46,7 +46,7 @@
}
unsafe fn configure_llvm(sess: &Session) {
- let n_args = sess.opts.cg.llvm_args.len() + sess.target.target.options.llvm_args.len();
+ let n_args = sess.opts.cg.llvm_args.len() + sess.target.llvm_args.len();
let mut llvm_c_strs = Vec::with_capacity(n_args + 1);
let mut llvm_args = Vec::with_capacity(n_args + 1);
@@ -57,7 +57,7 @@
}
let cg_opts = sess.opts.cg.llvm_args.iter();
- let tg_opts = sess.target.target.options.llvm_args.iter();
+ let tg_opts = sess.target.llvm_args.iter();
let sess_args = cg_opts.chain(tg_opts);
let user_specified_args: FxHashSet<_> =
@@ -84,21 +84,14 @@
if !sess.opts.debugging_opts.no_generate_arange_section {
add("-generate-arange-section", false);
}
- match sess
- .opts
- .debugging_opts
- .merge_functions
- .unwrap_or(sess.target.target.options.merge_functions)
- {
+ match sess.opts.debugging_opts.merge_functions.unwrap_or(sess.target.merge_functions) {
MergeFunctions::Disabled | MergeFunctions::Trampolines => {}
MergeFunctions::Aliases => {
add("-mergefunc-use-aliases", false);
}
}
- if sess.target.target.target_os == "emscripten"
- && sess.panic_strategy() == PanicStrategy::Unwind
- {
+ if sess.target.os == "emscripten" && sess.panic_strategy() == PanicStrategy::Unwind {
add("-enable-emscripten-cxx-exceptions", false);
}
@@ -122,7 +115,7 @@
llvm::LLVMInitializePasses();
- ::rustc_llvm::initialize_available_targets();
+ rustc_llvm::initialize_available_targets();
llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr());
}
@@ -139,142 +132,8 @@
// WARNING: the features after applying `to_llvm_feature` must be known
// to LLVM or the feature detection code will walk past the end of the feature
// array, leading to crashes.
-
-const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
- ("aclass", Some(sym::arm_target_feature)),
- ("mclass", Some(sym::arm_target_feature)),
- ("rclass", Some(sym::arm_target_feature)),
- ("dsp", Some(sym::arm_target_feature)),
- ("neon", Some(sym::arm_target_feature)),
- ("crc", Some(sym::arm_target_feature)),
- ("crypto", Some(sym::arm_target_feature)),
- ("v5te", Some(sym::arm_target_feature)),
- ("v6", Some(sym::arm_target_feature)),
- ("v6k", Some(sym::arm_target_feature)),
- ("v6t2", Some(sym::arm_target_feature)),
- ("v7", Some(sym::arm_target_feature)),
- ("v8", Some(sym::arm_target_feature)),
- ("vfp2", Some(sym::arm_target_feature)),
- ("vfp3", Some(sym::arm_target_feature)),
- ("vfp4", Some(sym::arm_target_feature)),
- // This is needed for inline assembly, but shouldn't be stabilized as-is
- // since it should be enabled per-function using #[instruction_set], not
- // #[target_feature].
- ("thumb-mode", Some(sym::arm_target_feature)),
-];
-
-const AARCH64_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
- ("fp", Some(sym::aarch64_target_feature)),
- ("neon", Some(sym::aarch64_target_feature)),
- ("sve", Some(sym::aarch64_target_feature)),
- ("crc", Some(sym::aarch64_target_feature)),
- ("crypto", Some(sym::aarch64_target_feature)),
- ("ras", Some(sym::aarch64_target_feature)),
- ("lse", Some(sym::aarch64_target_feature)),
- ("rdm", Some(sym::aarch64_target_feature)),
- ("fp16", Some(sym::aarch64_target_feature)),
- ("rcpc", Some(sym::aarch64_target_feature)),
- ("dotprod", Some(sym::aarch64_target_feature)),
- ("tme", Some(sym::aarch64_target_feature)),
- ("v8.1a", Some(sym::aarch64_target_feature)),
- ("v8.2a", Some(sym::aarch64_target_feature)),
- ("v8.3a", Some(sym::aarch64_target_feature)),
-];
-
-const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
- ("adx", Some(sym::adx_target_feature)),
- ("aes", None),
- ("avx", None),
- ("avx2", None),
- ("avx512bw", Some(sym::avx512_target_feature)),
- ("avx512cd", Some(sym::avx512_target_feature)),
- ("avx512dq", Some(sym::avx512_target_feature)),
- ("avx512er", Some(sym::avx512_target_feature)),
- ("avx512f", Some(sym::avx512_target_feature)),
- ("avx512ifma", Some(sym::avx512_target_feature)),
- ("avx512pf", Some(sym::avx512_target_feature)),
- ("avx512vbmi", Some(sym::avx512_target_feature)),
- ("avx512vl", Some(sym::avx512_target_feature)),
- ("avx512vpopcntdq", Some(sym::avx512_target_feature)),
- ("bmi1", None),
- ("bmi2", None),
- ("cmpxchg16b", Some(sym::cmpxchg16b_target_feature)),
- ("f16c", Some(sym::f16c_target_feature)),
- ("fma", None),
- ("fxsr", None),
- ("lzcnt", None),
- ("movbe", Some(sym::movbe_target_feature)),
- ("pclmulqdq", None),
- ("popcnt", None),
- ("rdrand", None),
- ("rdseed", None),
- ("rtm", Some(sym::rtm_target_feature)),
- ("sha", None),
- ("sse", None),
- ("sse2", None),
- ("sse3", None),
- ("sse4.1", None),
- ("sse4.2", None),
- ("sse4a", Some(sym::sse4a_target_feature)),
- ("ssse3", None),
- ("tbm", Some(sym::tbm_target_feature)),
- ("xsave", None),
- ("xsavec", None),
- ("xsaveopt", None),
- ("xsaves", None),
-];
-
-const HEXAGON_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
- ("hvx", Some(sym::hexagon_target_feature)),
- ("hvx-length128b", Some(sym::hexagon_target_feature)),
-];
-
-const POWERPC_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
- ("altivec", Some(sym::powerpc_target_feature)),
- ("power8-altivec", Some(sym::powerpc_target_feature)),
- ("power9-altivec", Some(sym::powerpc_target_feature)),
- ("power8-vector", Some(sym::powerpc_target_feature)),
- ("power9-vector", Some(sym::powerpc_target_feature)),
- ("vsx", Some(sym::powerpc_target_feature)),
-];
-
-const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] =
- &[("fp64", Some(sym::mips_target_feature)), ("msa", Some(sym::mips_target_feature))];
-
-const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
- ("m", Some(sym::riscv_target_feature)),
- ("a", Some(sym::riscv_target_feature)),
- ("c", Some(sym::riscv_target_feature)),
- ("f", Some(sym::riscv_target_feature)),
- ("d", Some(sym::riscv_target_feature)),
- ("e", Some(sym::riscv_target_feature)),
-];
-
-const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
- ("simd128", Some(sym::wasm_target_feature)),
- ("atomics", Some(sym::wasm_target_feature)),
- ("nontrapping-fptoint", Some(sym::wasm_target_feature)),
-];
-
-/// When rustdoc is running, provide a list of all known features so that all their respective
-/// primitives may be documented.
-///
-/// IMPORTANT: If you're adding another feature list above, make sure to add it to this iterator!
-pub fn all_known_features() -> impl Iterator<Item = (&'static str, Option<Symbol>)> {
- std::iter::empty()
- .chain(ARM_ALLOWED_FEATURES.iter())
- .chain(AARCH64_ALLOWED_FEATURES.iter())
- .chain(X86_ALLOWED_FEATURES.iter())
- .chain(HEXAGON_ALLOWED_FEATURES.iter())
- .chain(POWERPC_ALLOWED_FEATURES.iter())
- .chain(MIPS_ALLOWED_FEATURES.iter())
- .chain(RISCV_ALLOWED_FEATURES.iter())
- .chain(WASM_ALLOWED_FEATURES.iter())
- .cloned()
-}
-
pub fn to_llvm_feature<'a>(sess: &Session, s: &'a str) -> &'a str {
- let arch = if sess.target.target.arch == "x86_64" { "x86" } else { &*sess.target.target.arch };
+ let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch };
match (arch, s) {
("x86", "pclmulqdq") => "pclmul",
("x86", "rdrand") => "rdrnd",
@@ -306,20 +165,6 @@
.collect()
}
-pub fn supported_target_features(sess: &Session) -> &'static [(&'static str, Option<Symbol>)] {
- match &*sess.target.target.arch {
- "arm" => ARM_ALLOWED_FEATURES,
- "aarch64" => AARCH64_ALLOWED_FEATURES,
- "x86" | "x86_64" => X86_ALLOWED_FEATURES,
- "hexagon" => HEXAGON_ALLOWED_FEATURES,
- "mips" | "mips64" => MIPS_ALLOWED_FEATURES,
- "powerpc" | "powerpc64" => POWERPC_ALLOWED_FEATURES,
- "riscv32" | "riscv64" => RISCV_ALLOWED_FEATURES,
- "wasm32" => WASM_ALLOWED_FEATURES,
- _ => &[],
- }
-}
-
pub fn print_version() {
// Can be called without initializing LLVM
unsafe {
@@ -350,11 +195,7 @@
}
}
-pub fn target_cpu(sess: &Session) -> &str {
- let name = match sess.opts.cg.target_cpu {
- Some(ref s) => &**s,
- None => &*sess.target.target.options.cpu,
- };
+fn handle_native(name: &str) -> &str {
if name != "native" {
return name;
}
@@ -365,3 +206,19 @@
str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap()
}
}
+
+pub fn target_cpu(sess: &Session) -> &str {
+ let name = match sess.opts.cg.target_cpu {
+ Some(ref s) => &**s,
+ None => &*sess.target.cpu,
+ };
+
+ handle_native(name)
+}
+
+pub fn tune_cpu(sess: &Session) -> Option<&str> {
+ match sess.opts.debugging_opts.tune_cpu {
+ Some(ref s) => Some(handle_native(&**s)),
+ None => None,
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/metadata.rs b/compiler/rustc_codegen_llvm/src/metadata.rs
index 9036428..3912d6a 100644
--- a/compiler/rustc_codegen_llvm/src/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/metadata.rs
@@ -104,7 +104,7 @@
// As a result, we choose a slightly shorter name! As to why
// `.note.rustc` works on MinGW, that's another good question...
- if target.options.is_like_osx { "__DATA,.rustc" } else { ".rustc" }
+ if target.is_like_osx { "__DATA,.rustc" } else { ".rustc" }
}
fn read_metadata_section_name(_target: &Target) -> &'static str {
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index 22ed4dd..3fc56ee 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -52,7 +52,7 @@
let next = bx.inbounds_gep(addr, &[full_direct_size]);
bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
- if size.bytes() < slot_size.bytes() && &*bx.tcx().sess.target.target.target_endian == "big" {
+ if size.bytes() < slot_size.bytes() && &*bx.tcx().sess.target.endian == "big" {
let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
let adjusted = bx.inbounds_gep(addr, &[adjusted_size]);
(bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
@@ -105,7 +105,7 @@
let mut end = bx.build_sibling_block("va_arg.end");
let zero = bx.const_i32(0);
let offset_align = Align::from_bytes(4).unwrap();
- assert!(&*bx.tcx().sess.target.target.target_endian == "little");
+ assert!(&*bx.tcx().sess.target.endian == "little");
let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
let (reg_off, reg_top_index, slot_size) = if gr_type {
@@ -171,28 +171,26 @@
) -> &'ll Value {
// Determine the va_arg implementation to use. The LLVM va_arg instruction
// is lacking in some instances, so we should only use it as a fallback.
- let target = &bx.cx.tcx.sess.target.target;
- let arch = &bx.cx.tcx.sess.target.target.arch;
- match (&**arch, target.options.is_like_windows) {
+ let target = &bx.cx.tcx.sess.target;
+ let arch = &bx.cx.tcx.sess.target.arch;
+ match &**arch {
// Windows x86
- ("x86", true) => {
+ "x86" if target.is_like_windows => {
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
}
// Generic x86
- ("x86", _) => {
- emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true)
- }
+ "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true),
// Windows AArch64
- ("aarch64", true) => {
+ "aarch64" if target.is_like_windows => {
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
}
- // iOS AArch64
- ("aarch64", _) if target.target_os == "ios" => {
+ // macOS / iOS AArch64
+ "aarch64" if target.is_like_osx => {
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
}
- ("aarch64", _) => emit_aapcs_va_arg(bx, addr, target_ty),
+ "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
// Windows x86_64
- ("x86_64", true) => {
+ "x86_64" if target.is_like_windows => {
let target_ty_size = bx.cx.size_of(target_ty).bytes();
let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)