Importing rustc-1.48.0
Bug: 173721343
Change-Id: Ie8184d9a685086ca8a77266d6c608843f40dc9e1
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
new file mode 100644
index 0000000..7857ccb
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -0,0 +1,502 @@
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::llvm::{self, AttributePlace};
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+
+use rustc_codegen_ssa::mir::operand::OperandValue;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::MemFlags;
+use rustc_middle::bug;
+pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
+use rustc_middle::ty::Ty;
+use rustc_target::abi::call::ArgAbi;
+pub use rustc_target::abi::call::*;
+use rustc_target::abi::{self, HasDataLayout, Int, LayoutOf};
+pub use rustc_target::spec::abi::Abi;
+
+use libc::c_uint;
+
+macro_rules! for_each_kind {
+ ($flags: ident, $f: ident, $($kind: ident),+) => ({
+ $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
+ })
+}
+
+trait ArgAttributeExt {
+ fn for_each_kind<F>(&self, f: F)
+ where
+ F: FnMut(llvm::Attribute);
+}
+
+impl ArgAttributeExt for ArgAttribute {
+ fn for_each_kind<F>(&self, mut f: F)
+ where
+ F: FnMut(llvm::Attribute),
+ {
+ for_each_kind!(self, f, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
+ }
+}
+
+pub trait ArgAttributesExt {
+ fn apply_llfn(&self, idx: AttributePlace, llfn: &Value, ty: Option<&Type>);
+ fn apply_callsite(&self, idx: AttributePlace, callsite: &Value, ty: Option<&Type>);
+}
+
+impl ArgAttributesExt for ArgAttributes {
+ fn apply_llfn(&self, idx: AttributePlace, llfn: &Value, ty: Option<&Type>) {
+ let mut regular = self.regular;
+ unsafe {
+ let deref = self.pointee_size.bytes();
+ if deref != 0 {
+ if regular.contains(ArgAttribute::NonNull) {
+ llvm::LLVMRustAddDereferenceableAttr(llfn, idx.as_uint(), deref);
+ } else {
+ llvm::LLVMRustAddDereferenceableOrNullAttr(llfn, idx.as_uint(), deref);
+ }
+ regular -= ArgAttribute::NonNull;
+ }
+ if let Some(align) = self.pointee_align {
+ llvm::LLVMRustAddAlignmentAttr(llfn, idx.as_uint(), align.bytes() as u32);
+ }
+ if regular.contains(ArgAttribute::ByVal) {
+ llvm::LLVMRustAddByValAttr(llfn, idx.as_uint(), ty.unwrap());
+ }
+ regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
+ }
+ }
+
+ fn apply_callsite(&self, idx: AttributePlace, callsite: &Value, ty: Option<&Type>) {
+ let mut regular = self.regular;
+ unsafe {
+ let deref = self.pointee_size.bytes();
+ if deref != 0 {
+ if regular.contains(ArgAttribute::NonNull) {
+ llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, idx.as_uint(), deref);
+ } else {
+ llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(
+ callsite,
+ idx.as_uint(),
+ deref,
+ );
+ }
+ regular -= ArgAttribute::NonNull;
+ }
+ if let Some(align) = self.pointee_align {
+ llvm::LLVMRustAddAlignmentCallSiteAttr(
+ callsite,
+ idx.as_uint(),
+ align.bytes() as u32,
+ );
+ }
+ if regular.contains(ArgAttribute::ByVal) {
+ llvm::LLVMRustAddByValCallSiteAttr(callsite, idx.as_uint(), ty.unwrap());
+ }
+ regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
+ }
+ }
+}
+
+pub trait LlvmType {
+ fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
+}
+
+impl LlvmType for Reg {
+ fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
+ match self.kind {
+ RegKind::Integer => cx.type_ix(self.size.bits()),
+ RegKind::Float => match self.size.bits() {
+ 32 => cx.type_f32(),
+ 64 => cx.type_f64(),
+ _ => bug!("unsupported float: {:?}", self),
+ },
+ RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
+ }
+ }
+}
+
+impl LlvmType for CastTarget {
+ fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
+ let rest_ll_unit = self.rest.unit.llvm_type(cx);
+ let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
+ (0, 0)
+ } else {
+ (
+ self.rest.total.bytes() / self.rest.unit.size.bytes(),
+ self.rest.total.bytes() % self.rest.unit.size.bytes(),
+ )
+ };
+
+ if self.prefix.iter().all(|x| x.is_none()) {
+ // Simplify to a single unit when there is no prefix and size <= unit size
+ if self.rest.total <= self.rest.unit.size {
+ return rest_ll_unit;
+ }
+
+ // Simplify to array when all chunks are the same size and type
+ if rem_bytes == 0 {
+ return cx.type_array(rest_ll_unit, rest_count);
+ }
+ }
+
+ // Create list of fields in the main structure
+ let mut args: Vec<_> = self
+ .prefix
+ .iter()
+ .flat_map(|option_kind| {
+ option_kind.map(|kind| Reg { kind, size: self.prefix_chunk }.llvm_type(cx))
+ })
+ .chain((0..rest_count).map(|_| rest_ll_unit))
+ .collect();
+
+ // Append final integer
+ if rem_bytes != 0 {
+ // Only integers can be really split further.
+ assert_eq!(self.rest.unit.kind, RegKind::Integer);
+ args.push(cx.type_ix(rem_bytes * 8));
+ }
+
+ cx.type_struct(&args, false)
+ }
+}
+
+pub trait ArgAbiExt<'ll, 'tcx> {
+ fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
+ fn store(
+ &self,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ val: &'ll Value,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ );
+ fn store_fn_arg(
+ &self,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ idx: &mut usize,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ );
+}
+
+impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+ /// Gets the LLVM type for a place of the original Rust type of
+ /// this argument/return, i.e., the result of `type_of::type_of`.
+ fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
+ self.layout.llvm_type(cx)
+ }
+
+ /// Stores a direct/indirect value described by this ArgAbi into a
+ /// place for the original Rust type of this argument/return.
+ /// Can be used for both storing formal arguments into Rust variables
+ /// or results of call/invoke instructions into their destinations.
+ fn store(
+ &self,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ val: &'ll Value,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ ) {
+ if self.is_ignore() {
+ return;
+ }
+ if self.is_sized_indirect() {
+ OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
+ } else if self.is_unsized_indirect() {
+ bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
+ } else if let PassMode::Cast(cast) = self.mode {
+ // FIXME(eddyb): Figure out when the simpler Store is safe, clang
+ // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
+ let can_store_through_cast_ptr = false;
+ if can_store_through_cast_ptr {
+ let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
+ let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
+ bx.store(val, cast_dst, self.layout.align.abi);
+ } else {
+ // The actual return type is a struct, but the ABI
+ // adaptation code has cast it into some scalar type. The
+ // code that follows is the only reliable way I have
+ // found to do a transform like i64 -> {i32,i32}.
+ // Basically we dump the data onto the stack then memcpy it.
+ //
+ // Other approaches I tried:
+ // - Casting rust ret pointer to the foreign type and using Store
+ // is (a) unsafe if size of foreign type > size of rust type and
+ // (b) runs afoul of strict aliasing rules, yielding invalid
+ // assembly under -O (specifically, the store gets removed).
+ // - Truncating foreign type to correct integral type and then
+ // bitcasting to the struct type yields invalid cast errors.
+
+ // We instead thus allocate some scratch space...
+ let scratch_size = cast.size(bx);
+ let scratch_align = cast.align(bx);
+ let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
+ bx.lifetime_start(llscratch, scratch_size);
+
+ // ... where we first store the value...
+ bx.store(val, llscratch, scratch_align);
+
+ // ... and then memcpy it to the intended destination.
+ bx.memcpy(
+ dst.llval,
+ self.layout.align.abi,
+ llscratch,
+ scratch_align,
+ bx.const_usize(self.layout.size.bytes()),
+ MemFlags::empty(),
+ );
+
+ bx.lifetime_end(llscratch, scratch_size);
+ }
+ } else {
+ OperandValue::Immediate(val).store(bx, dst);
+ }
+ }
+
+ fn store_fn_arg(
+ &self,
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ idx: &mut usize,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ ) {
+ let mut next = || {
+ let val = llvm::get_param(bx.llfn(), *idx as c_uint);
+ *idx += 1;
+ val
+ };
+ match self.mode {
+ PassMode::Ignore => {}
+ PassMode::Pair(..) => {
+ OperandValue::Pair(next(), next()).store(bx, dst);
+ }
+ PassMode::Indirect(_, Some(_)) => {
+ OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
+ }
+ PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
+ let next_arg = next();
+ self.store(bx, next_arg, dst);
+ }
+ }
+ }
+}
+
+impl ArgAbiMethods<'tcx> for Builder<'a, 'll, 'tcx> {
+ fn store_fn_arg(
+ &mut self,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ idx: &mut usize,
+ dst: PlaceRef<'tcx, Self::Value>,
+ ) {
+ arg_abi.store_fn_arg(self, idx, dst)
+ }
+ fn store_arg(
+ &mut self,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ val: &'ll Value,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ ) {
+ arg_abi.store(self, val, dst)
+ }
+ fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
+ arg_abi.memory_ty(self)
+ }
+}
+
+pub trait FnAbiLlvmExt<'tcx> {
+ fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
+ fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
+ fn llvm_cconv(&self) -> llvm::CallConv;
+ fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
+ fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
+}
+
+impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
+ fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
+ let args_capacity: usize = self.args.iter().map(|arg|
+ if arg.pad.is_some() { 1 } else { 0 } +
+ if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
+ ).sum();
+ let mut llargument_tys = Vec::with_capacity(
+ if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity,
+ );
+
+ let llreturn_ty = match self.ret.mode {
+ PassMode::Ignore => cx.type_void(),
+ PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
+ PassMode::Cast(cast) => cast.llvm_type(cx),
+ PassMode::Indirect(..) => {
+ llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+ cx.type_void()
+ }
+ };
+
+ for arg in &self.args {
+ // add padding
+ if let Some(ty) = arg.pad {
+ llargument_tys.push(ty.llvm_type(cx));
+ }
+
+ let llarg_ty = match arg.mode {
+ PassMode::Ignore => continue,
+ PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
+ PassMode::Pair(..) => {
+ llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
+ llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
+ continue;
+ }
+ PassMode::Indirect(_, Some(_)) => {
+ let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
+ let ptr_layout = cx.layout_of(ptr_ty);
+ llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
+ llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
+ continue;
+ }
+ PassMode::Cast(cast) => cast.llvm_type(cx),
+ PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
+ };
+ llargument_tys.push(llarg_ty);
+ }
+
+ if self.c_variadic {
+ cx.type_variadic_func(&llargument_tys, llreturn_ty)
+ } else {
+ cx.type_func(&llargument_tys, llreturn_ty)
+ }
+ }
+
+ fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
+ unsafe {
+ llvm::LLVMPointerType(
+ self.llvm_type(cx),
+ cx.data_layout().instruction_address_space.0 as c_uint,
+ )
+ }
+ }
+
+ fn llvm_cconv(&self) -> llvm::CallConv {
+ match self.conv {
+ Conv::C | Conv::Rust => llvm::CCallConv,
+ Conv::AmdGpuKernel => llvm::AmdGpuKernel,
+ Conv::AvrInterrupt => llvm::AvrInterrupt,
+ Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
+ Conv::ArmAapcs => llvm::ArmAapcsCallConv,
+ Conv::Msp430Intr => llvm::Msp430Intr,
+ Conv::PtxKernel => llvm::PtxKernel,
+ Conv::X86Fastcall => llvm::X86FastcallCallConv,
+ Conv::X86Intr => llvm::X86_Intr,
+ Conv::X86Stdcall => llvm::X86StdcallCallConv,
+ Conv::X86ThisCall => llvm::X86_ThisCall,
+ Conv::X86VectorCall => llvm::X86_VectorCall,
+ Conv::X86_64SysV => llvm::X86_64_SysV,
+ Conv::X86_64Win64 => llvm::X86_64_Win64,
+ }
+ }
+
+ fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
+ // FIXME(eddyb) can this also be applied to callsites?
+ if self.ret.layout.abi.is_uninhabited() {
+ llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
+ }
+
+ // FIXME(eddyb, wesleywiser): apply this to callsites as well?
+ if !self.can_unwind {
+ llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn);
+ }
+
+ let mut i = 0;
+ let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
+ attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn, ty);
+ i += 1;
+ };
+ match self.ret.mode {
+ PassMode::Direct(ref attrs) => {
+ attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn, None);
+ }
+ PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.llvm_type(cx))),
+ _ => {}
+ }
+ for arg in &self.args {
+ if arg.pad.is_some() {
+ apply(&ArgAttributes::new(), None);
+ }
+ match arg.mode {
+ PassMode::Ignore => {}
+ PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
+ apply(attrs, Some(arg.layout.llvm_type(cx)))
+ }
+ PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
+ apply(attrs, None);
+ apply(extra_attrs, None);
+ }
+ PassMode::Pair(ref a, ref b) => {
+ apply(a, None);
+ apply(b, None);
+ }
+ PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
+ }
+ }
+ }
+
+ fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
+ // FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite.
+
+ let mut i = 0;
+ let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
+ attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite, ty);
+ i += 1;
+ };
+ match self.ret.mode {
+ PassMode::Direct(ref attrs) => {
+ attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite, None);
+ }
+ PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.llvm_type(bx))),
+ _ => {}
+ }
+ if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi {
+ // If the value is a boolean, the range is 0..2 and that ultimately
+ // become 0..0 when the type becomes i1, which would be rejected
+ // by the LLVM verifier.
+ if let Int(..) = scalar.value {
+ if !scalar.is_bool() {
+ let range = scalar.valid_range_exclusive(bx);
+ if range.start != range.end {
+ bx.range_metadata(callsite, range);
+ }
+ }
+ }
+ }
+ for arg in &self.args {
+ if arg.pad.is_some() {
+ apply(&ArgAttributes::new(), None);
+ }
+ match arg.mode {
+ PassMode::Ignore => {}
+ PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
+ apply(attrs, Some(arg.layout.llvm_type(bx)))
+ }
+ PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
+ apply(attrs, None);
+ apply(extra_attrs, None);
+ }
+ PassMode::Pair(ref a, ref b) => {
+ apply(a, None);
+ apply(b, None);
+ }
+ PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
+ }
+ }
+
+ let cconv = self.llvm_cconv();
+ if cconv != llvm::CCallConv {
+ llvm::SetInstructionCallConv(callsite, cconv);
+ }
+ }
+}
+
+impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
+ fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
+ fn_abi.apply_attrs_callsite(self, callsite)
+ }
+
+ fn get_param(&self, index: usize) -> Self::Value {
+ llvm::get_param(self.llfn(), index as c_uint)
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
new file mode 100644
index 0000000..bc1d9e1
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -0,0 +1,85 @@
+use crate::attributes;
+use libc::c_uint;
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+
+use crate::llvm::{self, False, True};
+use crate::ModuleLlvm;
+
+pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut ModuleLlvm, kind: AllocatorKind) {
+ let llcx = &*mods.llcx;
+ let llmod = mods.llmod();
+ let usize = match &tcx.sess.target.target.target_pointer_width[..] {
+ "16" => llvm::LLVMInt16TypeInContext(llcx),
+ "32" => llvm::LLVMInt32TypeInContext(llcx),
+ "64" => llvm::LLVMInt64TypeInContext(llcx),
+ tws => bug!("Unsupported target word size for int: {}", tws),
+ };
+ let i8 = llvm::LLVMInt8TypeInContext(llcx);
+ let i8p = llvm::LLVMPointerType(i8, 0);
+ let void = llvm::LLVMVoidTypeInContext(llcx);
+
+ for method in ALLOCATOR_METHODS {
+ let mut args = Vec::with_capacity(method.inputs.len());
+ for ty in method.inputs.iter() {
+ match *ty {
+ AllocatorTy::Layout => {
+ args.push(usize); // size
+ args.push(usize); // align
+ }
+ AllocatorTy::Ptr => args.push(i8p),
+ AllocatorTy::Usize => args.push(usize),
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+ }
+ }
+ let output = match method.output {
+ AllocatorTy::ResultPtr => Some(i8p),
+ AllocatorTy::Unit => None,
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("invalid allocator output")
+ }
+ };
+ let ty = llvm::LLVMFunctionType(
+ output.unwrap_or(void),
+ args.as_ptr(),
+ args.len() as c_uint,
+ False,
+ );
+ let name = format!("__rust_{}", method.name);
+ let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
+
+ if tcx.sess.target.target.options.default_hidden_visibility {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ if tcx.sess.must_emit_unwind_tables() {
+ attributes::emit_uwtable(llfn, true);
+ }
+
+ let callee = kind.fn_name(method.name);
+ let callee =
+ llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
+ llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+ let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
+
+ let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
+ llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
+ .collect::<Vec<_>>();
+ let ret =
+ llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None);
+ llvm::LLVMSetTailCall(ret, True);
+ if output.is_some() {
+ llvm::LLVMBuildRet(llbuilder, ret);
+ } else {
+ llvm::LLVMBuildRetVoid(llbuilder);
+ }
+ llvm::LLVMDisposeBuilder(llbuilder);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
new file mode 100644
index 0000000..f801f84
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -0,0 +1,861 @@
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+
+use rustc_ast::LlvmAsmDialect;
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::mir::operand::OperandValue;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_middle::span_bug;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_span::{Pos, Span};
+use rustc_target::abi::*;
+use rustc_target::asm::*;
+
+use libc::{c_char, c_uint};
+use tracing::debug;
+
+impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
+ fn codegen_llvm_inline_asm(
+ &mut self,
+ ia: &hir::LlvmInlineAsmInner,
+ outputs: Vec<PlaceRef<'tcx, &'ll Value>>,
+ mut inputs: Vec<&'ll Value>,
+ span: Span,
+ ) -> bool {
+ let mut ext_constraints = vec![];
+ let mut output_types = vec![];
+
+ // Prepare the output operands
+ let mut indirect_outputs = vec![];
+ for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() {
+ if out.is_rw {
+ let operand = self.load_operand(place);
+ if let OperandValue::Immediate(_) = operand.val {
+ inputs.push(operand.immediate());
+ }
+ ext_constraints.push(i.to_string());
+ }
+ if out.is_indirect {
+ let operand = self.load_operand(place);
+ if let OperandValue::Immediate(_) = operand.val {
+ indirect_outputs.push(operand.immediate());
+ }
+ } else {
+ output_types.push(place.layout.llvm_type(self.cx));
+ }
+ }
+ if !indirect_outputs.is_empty() {
+ indirect_outputs.extend_from_slice(&inputs);
+ inputs = indirect_outputs;
+ }
+
+ let clobbers = ia.clobbers.iter().map(|s| format!("~{{{}}}", &s));
+
+ // Default per-arch clobbers
+ // Basically what clang does
+ let arch_clobbers = match &self.sess().target.target.arch[..] {
+ "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
+ "mips" | "mips64" => vec!["~{$1}"],
+ _ => Vec::new(),
+ };
+
+ let all_constraints = ia
+ .outputs
+ .iter()
+ .map(|out| out.constraint.to_string())
+ .chain(ia.inputs.iter().map(|s| s.to_string()))
+ .chain(ext_constraints)
+ .chain(clobbers)
+ .chain(arch_clobbers.iter().map(|s| (*s).to_string()))
+ .collect::<Vec<String>>()
+ .join(",");
+
+ debug!("Asm Constraints: {}", &all_constraints);
+
+ // Depending on how many outputs we have, the return type is different
+ let num_outputs = output_types.len();
+ let output_type = match num_outputs {
+ 0 => self.type_void(),
+ 1 => output_types[0],
+ _ => self.type_struct(&output_types, false),
+ };
+
+ let asm = ia.asm.as_str();
+ let r = inline_asm_call(
+ self,
+ &asm,
+ &all_constraints,
+ &inputs,
+ output_type,
+ ia.volatile,
+ ia.alignstack,
+ ia.dialect,
+ &[span],
+ );
+ if r.is_none() {
+ return false;
+ }
+ let r = r.unwrap();
+
+ // Again, based on how many outputs we have
+ let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
+ for (i, (_, &place)) in outputs.enumerate() {
+ let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) };
+ OperandValue::Immediate(v).store(self, place);
+ }
+
+ true
+ }
+
+ fn codegen_inline_asm(
+ &mut self,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperandRef<'tcx, Self>],
+ options: InlineAsmOptions,
+ line_spans: &[Span],
+ ) {
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+ // Collect the types of output operands
+ let mut constraints = vec![];
+ let mut output_types = vec![];
+ let mut op_idx = FxHashMap::default();
+ for (idx, op) in operands.iter().enumerate() {
+ match *op {
+ InlineAsmOperandRef::Out { reg, late, place } => {
+ let mut layout = None;
+ let ty = if let Some(ref place) = place {
+ layout = Some(&place.layout);
+ llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout)
+ } else {
+ // If the output is discarded, we don't really care what
+ // type is used. We're just using this to tell LLVM to
+ // reserve the register.
+ dummy_output_type(self.cx, reg.reg_class())
+ };
+ output_types.push(ty);
+ op_idx.insert(idx, constraints.len());
+ let prefix = if late { "=" } else { "=&" };
+ constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, layout)));
+ }
+ InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+ let layout = if let Some(ref out_place) = out_place {
+ &out_place.layout
+ } else {
+ // LLVM required tied operands to have the same type,
+ // so we just use the type of the input.
+ &in_value.layout
+ };
+ let ty = llvm_fixup_output_type(self.cx, reg.reg_class(), layout);
+ output_types.push(ty);
+ op_idx.insert(idx, constraints.len());
+ let prefix = if late { "=" } else { "=&" };
+ constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, Some(layout))));
+ }
+ _ => {}
+ }
+ }
+
+ // Collect input operands
+ let mut inputs = vec![];
+ for (idx, op) in operands.iter().enumerate() {
+ match *op {
+ InlineAsmOperandRef::In { reg, value } => {
+ let llval =
+ llvm_fixup_input(self, value.immediate(), reg.reg_class(), &value.layout);
+ inputs.push(llval);
+ op_idx.insert(idx, constraints.len());
+ constraints.push(reg_to_llvm(reg, Some(&value.layout)));
+ }
+ InlineAsmOperandRef::InOut { reg, late: _, in_value, out_place: _ } => {
+ let value = llvm_fixup_input(
+ self,
+ in_value.immediate(),
+ reg.reg_class(),
+ &in_value.layout,
+ );
+ inputs.push(value);
+ constraints.push(format!("{}", op_idx[&idx]));
+ }
+ InlineAsmOperandRef::SymFn { instance } => {
+ inputs.push(self.cx.get_fn(instance));
+ op_idx.insert(idx, constraints.len());
+ constraints.push("s".to_string());
+ }
+ InlineAsmOperandRef::SymStatic { def_id } => {
+ inputs.push(self.cx.get_static(def_id));
+ op_idx.insert(idx, constraints.len());
+ constraints.push("s".to_string());
+ }
+ _ => {}
+ }
+ }
+
+ // Build the template string
+ let mut template_str = String::new();
+ for piece in template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref s) => {
+ if s.contains('$') {
+ for c in s.chars() {
+ if c == '$' {
+ template_str.push_str("$$");
+ } else {
+ template_str.push(c);
+ }
+ }
+ } else {
+ template_str.push_str(s)
+ }
+ }
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
+ match operands[operand_idx] {
+ InlineAsmOperandRef::In { reg, .. }
+ | InlineAsmOperandRef::Out { reg, .. }
+ | InlineAsmOperandRef::InOut { reg, .. } => {
+ let modifier = modifier_to_llvm(asm_arch, reg.reg_class(), modifier);
+ if let Some(modifier) = modifier {
+ template_str.push_str(&format!(
+ "${{{}:{}}}",
+ op_idx[&operand_idx], modifier
+ ));
+ } else {
+ template_str.push_str(&format!("${{{}}}", op_idx[&operand_idx]));
+ }
+ }
+ InlineAsmOperandRef::Const { ref string } => {
+ // Const operands get injected directly into the template
+ template_str.push_str(string);
+ }
+ InlineAsmOperandRef::SymFn { .. }
+ | InlineAsmOperandRef::SymStatic { .. } => {
+ // Only emit the raw symbol name
+ template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx]));
+ }
+ }
+ }
+ }
+ }
+
+ if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
+ match asm_arch {
+ InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
+ constraints.push("~{cc}".to_string());
+ }
+ InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
+ constraints.extend_from_slice(&[
+ "~{dirflag}".to_string(),
+ "~{fpsr}".to_string(),
+ "~{flags}".to_string(),
+ ]);
+ }
+ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {}
+ InlineAsmArch::Nvptx64 => {}
+ InlineAsmArch::Hexagon => {}
+ InlineAsmArch::Mips => {}
+ }
+ }
+ if !options.contains(InlineAsmOptions::NOMEM) {
+ // This is actually ignored by LLVM, but it's probably best to keep
+ // it just in case. LLVM instead uses the ReadOnly/ReadNone
+ // attributes on the call instruction to optimize.
+ constraints.push("~{memory}".to_string());
+ }
+ let volatile = !options.contains(InlineAsmOptions::PURE);
+ let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
+ let output_type = match &output_types[..] {
+ [] => self.type_void(),
+ [ty] => ty,
+ tys => self.type_struct(&tys, false),
+ };
+ let dialect = match asm_arch {
+ InlineAsmArch::X86 | InlineAsmArch::X86_64
+ if !options.contains(InlineAsmOptions::ATT_SYNTAX) =>
+ {
+ LlvmAsmDialect::Intel
+ }
+ _ => LlvmAsmDialect::Att,
+ };
+ let result = inline_asm_call(
+ self,
+ &template_str,
+ &constraints.join(","),
+ &inputs,
+ output_type,
+ volatile,
+ alignstack,
+ dialect,
+ line_spans,
+ )
+ .unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed"));
+
+ if options.contains(InlineAsmOptions::PURE) {
+ if options.contains(InlineAsmOptions::NOMEM) {
+ llvm::Attribute::ReadNone.apply_callsite(llvm::AttributePlace::Function, result);
+ } else if options.contains(InlineAsmOptions::READONLY) {
+ llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result);
+ }
+ } else {
+ if options.contains(InlineAsmOptions::NOMEM) {
+ llvm::Attribute::InaccessibleMemOnly
+ .apply_callsite(llvm::AttributePlace::Function, result);
+ } else {
+ // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
+ }
+ }
+
+ // Write results to outputs
+ for (idx, op) in operands.iter().enumerate() {
+ if let InlineAsmOperandRef::Out { reg, place: Some(place), .. }
+ | InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op
+ {
+ let value = if output_types.len() == 1 {
+ result
+ } else {
+ self.extract_value(result, op_idx[&idx] as u64)
+ };
+ let value = llvm_fixup_output(self, value, reg.reg_class(), &place.layout);
+ OperandValue::Immediate(value).store(self, place);
+ }
+ }
+ }
+}
+
+impl AsmMethods for CodegenCx<'ll, 'tcx> {
+ fn codegen_global_asm(&self, ga: &hir::GlobalAsm) {
+ let asm = ga.asm.as_str();
+ unsafe {
+ llvm::LLVMRustAppendModuleInlineAsm(self.llmod, asm.as_ptr().cast(), asm.len());
+ }
+ }
+}
+
+fn inline_asm_call(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ asm: &str,
+ cons: &str,
+ inputs: &[&'ll Value],
+ output: &'ll llvm::Type,
+ volatile: bool,
+ alignstack: bool,
+ dia: LlvmAsmDialect,
+ line_spans: &[Span],
+) -> Option<&'ll Value> {
+ let volatile = if volatile { llvm::True } else { llvm::False };
+ let alignstack = if alignstack { llvm::True } else { llvm::False };
+
+ let argtys = inputs
+ .iter()
+ .map(|v| {
+ debug!("Asm Input Type: {:?}", *v);
+ bx.cx.val_ty(*v)
+ })
+ .collect::<Vec<_>>();
+
+ debug!("Asm Output Type: {:?}", output);
+ let fty = bx.cx.type_func(&argtys[..], output);
+ unsafe {
+ // Ask LLVM to verify that the constraints are well-formed.
+ let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
+ debug!("constraint verification result: {:?}", constraints_ok);
+ if constraints_ok {
+ let v = llvm::LLVMRustInlineAsm(
+ fty,
+ asm.as_ptr().cast(),
+ asm.len(),
+ cons.as_ptr().cast(),
+ cons.len(),
+ volatile,
+ alignstack,
+ llvm::AsmDialect::from_generic(dia),
+ );
+ let call = bx.call(v, inputs, None);
+
+ // Store mark in a metadata node so we can map LLVM errors
+ // back to source locations. See #17552.
+ let key = "srcloc";
+ let kind = llvm::LLVMGetMDKindIDInContext(
+ bx.llcx,
+ key.as_ptr() as *const c_char,
+ key.len() as c_uint,
+ );
+
+ // srcloc contains one integer for each line of assembly code.
+ // Unfortunately this isn't enough to encode a full span so instead
+ // we just encode the start position of each line.
+ // FIXME: Figure out a way to pass the entire line spans.
+ let mut srcloc = vec![];
+ if dia == LlvmAsmDialect::Intel && line_spans.len() > 1 {
+ // LLVM inserts an extra line to add the ".intel_syntax", so add
+ // a dummy srcloc entry for it.
+ //
+ // Don't do this if we only have 1 line span since that may be
+ // due to the asm template string coming from a macro. LLVM will
+ // default to the first srcloc for lines that don't have an
+ // associated srcloc.
+ srcloc.push(bx.const_i32(0));
+ }
+ srcloc.extend(line_spans.iter().map(|span| bx.const_i32(span.lo().to_u32() as i32)));
+ let md = llvm::LLVMMDNodeInContext(bx.llcx, srcloc.as_ptr(), srcloc.len() as u32);
+ llvm::LLVMSetMetadata(call, kind, md);
+
+ Some(call)
+ } else {
+ // LLVM has detected an issue with our constraints, bail out
+ None
+ }
+ }
+}
+
+/// If the register is an xmm/ymm/zmm register then return its index.
+fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
+ match reg {
+ InlineAsmReg::X86(reg)
+ if reg as u32 >= X86InlineAsmReg::xmm0 as u32
+ && reg as u32 <= X86InlineAsmReg::xmm15 as u32 =>
+ {
+ Some(reg as u32 - X86InlineAsmReg::xmm0 as u32)
+ }
+ InlineAsmReg::X86(reg)
+ if reg as u32 >= X86InlineAsmReg::ymm0 as u32
+ && reg as u32 <= X86InlineAsmReg::ymm15 as u32 =>
+ {
+ Some(reg as u32 - X86InlineAsmReg::ymm0 as u32)
+ }
+ InlineAsmReg::X86(reg)
+ if reg as u32 >= X86InlineAsmReg::zmm0 as u32
+ && reg as u32 <= X86InlineAsmReg::zmm31 as u32 =>
+ {
+ Some(reg as u32 - X86InlineAsmReg::zmm0 as u32)
+ }
+ _ => None,
+ }
+}
+
+/// If the register is an AArch64 vector register then return its index.
+fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
+ match reg {
+ InlineAsmReg::AArch64(reg)
+ if reg as u32 >= AArch64InlineAsmReg::v0 as u32
+ && reg as u32 <= AArch64InlineAsmReg::v31 as u32 =>
+ {
+ Some(reg as u32 - AArch64InlineAsmReg::v0 as u32)
+ }
+ _ => None,
+ }
+}
+
+/// Converts a register class to an LLVM constraint code.
+fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'tcx>>) -> String {
+ match reg {
+ // For vector registers LLVM wants the register name to match the type size.
+ InlineAsmRegOrRegClass::Reg(reg) => {
+ if let Some(idx) = xmm_reg_index(reg) {
+ let class = if let Some(layout) = layout {
+ match layout.size.bytes() {
+ 64 => 'z',
+ 32 => 'y',
+ _ => 'x',
+ }
+ } else {
+ // We use f32 as the type for discarded outputs
+ 'x'
+ };
+ format!("{{{}mm{}}}", class, idx)
+ } else if let Some(idx) = a64_vreg_index(reg) {
+ let class = if let Some(layout) = layout {
+ match layout.size.bytes() {
+ 16 => 'q',
+ 8 => 'd',
+ 4 => 's',
+ 2 => 'h',
+ 1 => 'd', // We fixup i8 to i8x8
+ _ => unreachable!(),
+ }
+ } else {
+ // We use i64x2 as the type for discarded outputs
+ 'q'
+ };
+ format!("{{{}{}}}", class, idx)
+ } else if reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) {
+ // LLVM doesn't recognize x30
+ "{lr}".to_string()
+ } else {
+ format!("{{{}}}", reg.name())
+ }
+ }
+ InlineAsmRegOrRegClass::RegClass(reg) => match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => "l",
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
+ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
+ }
+ .to_string(),
+ }
+}
+
+/// Converts a modifier into LLVM's equivalent modifier.
+fn modifier_to_llvm(
+ arch: InlineAsmArch,
+ reg: InlineAsmRegClass,
+ modifier: Option<char>,
+) -> Option<char> {
+ match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+ if modifier == Some('v') { None } else { modifier }
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => None,
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+ if modifier.is_none() {
+ Some('q')
+ } else {
+ modifier
+ }
+ }
+ InlineAsmRegClass::Hexagon(_) => None,
+ InlineAsmRegClass::Mips(_) => None,
+ InlineAsmRegClass::Nvptx(_) => None,
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
+ | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
+ None if arch == InlineAsmArch::X86_64 => Some('q'),
+ None => Some('k'),
+ Some('l') => Some('b'),
+ Some('h') => Some('h'),
+ Some('x') => Some('w'),
+ Some('e') => Some('k'),
+ Some('r') => Some('q'),
+ _ => unreachable!(),
+ },
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
+ InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
+ | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
+ (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
+ (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
+ (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
+ (_, Some('x')) => Some('x'),
+ (_, Some('y')) => Some('t'),
+ (_, Some('z')) => Some('g'),
+ _ => unreachable!(),
+ },
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
+ }
+}
+
+/// Type to use for outputs that are discarded. It doesn't really matter what
+/// the type is, as long as it is valid for the constraint code.
+fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll Type {
+ match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+ cx.type_vector(cx.type_i64(), 2)
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => cx.type_i32(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+ cx.type_vector(cx.type_i64(), 2)
+ }
+ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+ }
+}
+
+/// Helper function to get the LLVM type for a Scalar. Pointers are returned as
+/// the equivalent integer type.
+fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: &Scalar) -> &'ll Type {
+ match scalar.value {
+ Primitive::Int(Integer::I8, _) => cx.type_i8(),
+ Primitive::Int(Integer::I16, _) => cx.type_i16(),
+ Primitive::Int(Integer::I32, _) => cx.type_i32(),
+ Primitive::Int(Integer::I64, _) => cx.type_i64(),
+ Primitive::F32 => cx.type_f32(),
+ Primitive::F64 => cx.type_f64(),
+ Primitive::Pointer => cx.type_isize(),
+ _ => unreachable!(),
+ }
+}
+
+/// Fix up an input value to work around LLVM bugs.
+fn llvm_fixup_input(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ mut value: &'ll Value,
+ reg: InlineAsmRegClass,
+ layout: &TyAndLayout<'tcx>,
+) -> &'ll Value {
+ match (reg, &layout.abi) {
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+ if let Primitive::Int(Integer::I8, _) = s.value {
+ let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
+ bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
+ } else {
+ value
+ }
+ }
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
+ let elem_ty = llvm_asm_scalar_type(bx.cx, s);
+ let count = 16 / layout.size.bytes();
+ let vec_ty = bx.cx.type_vector(elem_ty, count);
+ if let Primitive::Pointer = s.value {
+ value = bx.ptrtoint(value, bx.cx.type_isize());
+ }
+ bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
+ }
+ (
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
+ Abi::Vector { element, count },
+ ) if layout.size.bytes() == 8 => {
+ let elem_ty = llvm_asm_scalar_type(bx.cx, element);
+ let vec_ty = bx.cx.type_vector(elem_ty, *count);
+ let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
+ bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
+ }
+ (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+ if s.value == Primitive::F64 =>
+ {
+ bx.bitcast(value, bx.cx.type_i64())
+ }
+ (
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+ Abi::Vector { .. },
+ ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
+ (
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I32, _) = s.value {
+ bx.bitcast(value, bx.cx.type_f32())
+ } else {
+ value
+ }
+ }
+ (
+ InlineAsmRegClass::Arm(
+ ArmInlineAsmRegClass::dreg
+ | ArmInlineAsmRegClass::dreg_low8
+ | ArmInlineAsmRegClass::dreg_low16,
+ ),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I64, _) = s.value {
+ bx.bitcast(value, bx.cx.type_f64())
+ } else {
+ value
+ }
+ }
+ (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
+ // MIPS only supports register-length arithmetics.
+ Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
+ Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
+ _ => value,
+ },
+ _ => value,
+ }
+}
+
+/// Fix up an output value to work around LLVM bugs.
+fn llvm_fixup_output(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ mut value: &'ll Value,
+ reg: InlineAsmRegClass,
+ layout: &TyAndLayout<'tcx>,
+) -> &'ll Value {
+ match (reg, &layout.abi) {
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+ if let Primitive::Int(Integer::I8, _) = s.value {
+ bx.extract_element(value, bx.const_i32(0))
+ } else {
+ value
+ }
+ }
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
+ value = bx.extract_element(value, bx.const_i32(0));
+ if let Primitive::Pointer = s.value {
+ value = bx.inttoptr(value, layout.llvm_type(bx.cx));
+ }
+ value
+ }
+ (
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
+ Abi::Vector { element, count },
+ ) if layout.size.bytes() == 8 => {
+ let elem_ty = llvm_asm_scalar_type(bx.cx, element);
+ let vec_ty = bx.cx.type_vector(elem_ty, *count * 2);
+ let indices: Vec<_> = (0..*count).map(|x| bx.const_i32(x as i32)).collect();
+ bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
+ }
+ (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+ if s.value == Primitive::F64 =>
+ {
+ bx.bitcast(value, bx.cx.type_f64())
+ }
+ (
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+ Abi::Vector { .. },
+ ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
+ (
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I32, _) = s.value {
+ bx.bitcast(value, bx.cx.type_i32())
+ } else {
+ value
+ }
+ }
+ (
+ InlineAsmRegClass::Arm(
+ ArmInlineAsmRegClass::dreg
+ | ArmInlineAsmRegClass::dreg_low8
+ | ArmInlineAsmRegClass::dreg_low16,
+ ),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I64, _) = s.value {
+ bx.bitcast(value, bx.cx.type_i64())
+ } else {
+ value
+ }
+ }
+ (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
+ // MIPS only supports register-length arithmetics.
+ Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
+ Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
+ Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
+ _ => value,
+ },
+ _ => value,
+ }
+}
+
+/// Output type to use for llvm_fixup_output.
+fn llvm_fixup_output_type(
+ cx: &CodegenCx<'ll, 'tcx>,
+ reg: InlineAsmRegClass,
+ layout: &TyAndLayout<'tcx>,
+) -> &'ll Type {
+ match (reg, &layout.abi) {
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+ if let Primitive::Int(Integer::I8, _) = s.value {
+ cx.type_vector(cx.type_i8(), 8)
+ } else {
+ layout.llvm_type(cx)
+ }
+ }
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
+ let elem_ty = llvm_asm_scalar_type(cx, s);
+ let count = 16 / layout.size.bytes();
+ cx.type_vector(elem_ty, count)
+ }
+ (
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
+ Abi::Vector { element, count },
+ ) if layout.size.bytes() == 8 => {
+ let elem_ty = llvm_asm_scalar_type(cx, element);
+ cx.type_vector(elem_ty, count * 2)
+ }
+ (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+ if s.value == Primitive::F64 =>
+ {
+ cx.type_i64()
+ }
+ (
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+ Abi::Vector { .. },
+ ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
+ (
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I32, _) = s.value {
+ cx.type_f32()
+ } else {
+ layout.llvm_type(cx)
+ }
+ }
+ (
+ InlineAsmRegClass::Arm(
+ ArmInlineAsmRegClass::dreg
+ | ArmInlineAsmRegClass::dreg_low8
+ | ArmInlineAsmRegClass::dreg_low16,
+ ),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I64, _) = s.value {
+ cx.type_f64()
+ } else {
+ layout.llvm_type(cx)
+ }
+ }
+ (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
+ // MIPS only supports register-length arithmetics.
+ Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
+ Primitive::F32 => cx.type_i32(),
+ _ => layout.llvm_type(cx),
+ },
+ _ => layout.llvm_type(cx),
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
new file mode 100644
index 0000000..73c3481
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -0,0 +1,400 @@
+//! Set and unset common attributes on LLVM values.
+
+use std::ffi::CString;
+
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::const_cstr;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_session::config::{OptLevel, SanitizerSet};
+use rustc_session::Session;
+
+use crate::attributes;
+use crate::llvm::AttributePlace::Function;
+use crate::llvm::{self, Attribute};
+use crate::llvm_util;
+pub use rustc_attr::{InlineAttr, OptimizeAttr};
+
+use crate::context::CodegenCx;
+use crate::value::Value;
+
+/// Mark LLVM function to use provided inline heuristic.
+#[inline]
+fn inline(cx: &CodegenCx<'ll, '_>, val: &'ll Value, inline: InlineAttr) {
+ use self::InlineAttr::*;
+ match inline {
+ Hint => Attribute::InlineHint.apply_llfn(Function, val),
+ Always => Attribute::AlwaysInline.apply_llfn(Function, val),
+ Never => {
+ if cx.tcx().sess.target.target.arch != "amdgpu" {
+ Attribute::NoInline.apply_llfn(Function, val);
+ }
+ }
+ None => {
+ Attribute::InlineHint.unapply_llfn(Function, val);
+ Attribute::AlwaysInline.unapply_llfn(Function, val);
+ Attribute::NoInline.unapply_llfn(Function, val);
+ }
+ };
+}
+
+/// Apply LLVM sanitize attributes.
+#[inline]
+pub fn sanitize(cx: &CodegenCx<'ll, '_>, no_sanitize: SanitizerSet, llfn: &'ll Value) {
+ let enabled = cx.tcx.sess.opts.debugging_opts.sanitizer - no_sanitize;
+ if enabled.contains(SanitizerSet::ADDRESS) {
+ llvm::Attribute::SanitizeAddress.apply_llfn(Function, llfn);
+ }
+ if enabled.contains(SanitizerSet::MEMORY) {
+ llvm::Attribute::SanitizeMemory.apply_llfn(Function, llfn);
+ }
+ if enabled.contains(SanitizerSet::THREAD) {
+ llvm::Attribute::SanitizeThread.apply_llfn(Function, llfn);
+ }
+}
+
+/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
+#[inline]
+pub fn emit_uwtable(val: &'ll Value, emit: bool) {
+ Attribute::UWTable.toggle_llfn(Function, val, emit);
+}
+
+/// Tell LLVM if this function should be 'naked', i.e., skip the epilogue and prologue.
+#[inline]
+fn naked(val: &'ll Value, is_naked: bool) {
+ Attribute::Naked.toggle_llfn(Function, val, is_naked);
+}
+
+pub fn set_frame_pointer_elimination(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
+ if cx.sess().must_not_eliminate_frame_pointers() {
+ llvm::AddFunctionAttrStringValue(
+ llfn,
+ llvm::AttributePlace::Function,
+ const_cstr!("frame-pointer"),
+ const_cstr!("all"),
+ );
+ }
+}
+
+/// Tell LLVM what instrument function to insert.
+#[inline]
+fn set_instrument_function(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
+ if cx.sess().instrument_mcount() {
+ // Similar to `clang -pg` behavior. Handled by the
+ // `post-inline-ee-instrument` LLVM pass.
+
+ // The function name varies on platforms.
+ // See test/CodeGen/mcount.c in clang.
+ let mcount_name =
+ CString::new(cx.sess().target.target.options.target_mcount.as_str().as_bytes())
+ .unwrap();
+
+ llvm::AddFunctionAttrStringValue(
+ llfn,
+ llvm::AttributePlace::Function,
+ const_cstr!("instrument-function-entry-inlined"),
+ &mcount_name,
+ );
+ }
+}
+
+fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
+ // Only use stack probes if the target specification indicates that we
+ // should be using stack probes
+ if !cx.sess().target.target.options.stack_probes {
+ return;
+ }
+
+ // Currently stack probes seem somewhat incompatible with the address
+ // sanitizer and thread sanitizer. With asan we're already protected from
+ // stack overflow anyway so we don't really need stack probes regardless.
+ if cx
+ .sess()
+ .opts
+ .debugging_opts
+ .sanitizer
+ .intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD)
+ {
+ return;
+ }
+
+ // probestack doesn't play nice either with `-C profile-generate`.
+ if cx.sess().opts.cg.profile_generate.enabled() {
+ return;
+ }
+
+ // probestack doesn't play nice either with gcov profiling.
+ if cx.sess().opts.debugging_opts.profile {
+ return;
+ }
+
+ // FIXME(richkadel): Make sure probestack plays nice with `-Z instrument-coverage`
+ // or disable it if not, similar to above early exits.
+
+ // Flag our internal `__rust_probestack` function as the stack probe symbol.
+ // This is defined in the `compiler-builtins` crate for each architecture.
+ llvm::AddFunctionAttrStringValue(
+ llfn,
+ llvm::AttributePlace::Function,
+ const_cstr!("probe-stack"),
+ const_cstr!("__rust_probestack"),
+ );
+}
+
+fn translate_obsolete_target_features(feature: &str) -> &str {
+ const LLVM9_FEATURE_CHANGES: &[(&str, &str)] =
+ &[("+fp-only-sp", "-fp64"), ("-fp-only-sp", "+fp64"), ("+d16", "-d32"), ("-d16", "+d32")];
+ if llvm_util::get_major_version() >= 9 {
+ for &(old, new) in LLVM9_FEATURE_CHANGES {
+ if feature == old {
+ return new;
+ }
+ }
+ } else {
+ for &(old, new) in LLVM9_FEATURE_CHANGES {
+ if feature == new {
+ return old;
+ }
+ }
+ }
+ feature
+}
+
+pub fn llvm_target_features(sess: &Session) -> impl Iterator<Item = &str> {
+ const RUSTC_SPECIFIC_FEATURES: &[&str] = &["crt-static"];
+
+ let cmdline = sess
+ .opts
+ .cg
+ .target_feature
+ .split(',')
+ .filter(|f| !RUSTC_SPECIFIC_FEATURES.iter().any(|s| f.contains(s)));
+ sess.target
+ .target
+ .options
+ .features
+ .split(',')
+ .chain(cmdline)
+ .filter(|l| !l.is_empty())
+ .map(translate_obsolete_target_features)
+}
+
+pub fn apply_target_cpu_attr(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
+ let target_cpu = SmallCStr::new(llvm_util::target_cpu(cx.tcx.sess));
+ llvm::AddFunctionAttrStringValue(
+ llfn,
+ llvm::AttributePlace::Function,
+ const_cstr!("target-cpu"),
+ target_cpu.as_c_str(),
+ );
+}
+
+/// Sets the `NonLazyBind` LLVM attribute on a given function,
+/// assuming the codegen options allow skipping the PLT.
+pub fn non_lazy_bind(sess: &Session, llfn: &'ll Value) {
+ // Don't generate calls through PLT if it's not necessary
+ if !sess.needs_plt() {
+ Attribute::NonLazyBind.apply_llfn(Function, llfn);
+ }
+}
+
+pub(crate) fn default_optimisation_attrs(sess: &Session, llfn: &'ll Value) {
+ match sess.opts.optimize {
+ OptLevel::Size => {
+ llvm::Attribute::MinSize.unapply_llfn(Function, llfn);
+ llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn);
+ llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn);
+ }
+ OptLevel::SizeMin => {
+ llvm::Attribute::MinSize.apply_llfn(Function, llfn);
+ llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn);
+ llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn);
+ }
+ OptLevel::No => {
+ llvm::Attribute::MinSize.unapply_llfn(Function, llfn);
+ llvm::Attribute::OptimizeForSize.unapply_llfn(Function, llfn);
+ llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn);
+ }
+ _ => {}
+ }
+}
+
+/// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`)
+/// attributes.
+pub fn from_fn_attrs(cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value, instance: ty::Instance<'tcx>) {
+ let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
+
+ match codegen_fn_attrs.optimize {
+ OptimizeAttr::None => {
+ default_optimisation_attrs(cx.tcx.sess, llfn);
+ }
+ OptimizeAttr::Speed => {
+ llvm::Attribute::MinSize.unapply_llfn(Function, llfn);
+ llvm::Attribute::OptimizeForSize.unapply_llfn(Function, llfn);
+ llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn);
+ }
+ OptimizeAttr::Size => {
+ llvm::Attribute::MinSize.apply_llfn(Function, llfn);
+ llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn);
+ llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn);
+ }
+ }
+
+ // FIXME(eddyb) consolidate these two `inline` calls (and avoid overwrites).
+ if instance.def.requires_inline(cx.tcx) {
+ inline(cx, llfn, attributes::InlineAttr::Hint);
+ }
+
+ inline(cx, llfn, codegen_fn_attrs.inline.clone());
+
+ // The `uwtable` attribute according to LLVM is:
+ //
+ // This attribute indicates that the ABI being targeted requires that an
+ // unwind table entry be produced for this function even if we can show
+ // that no exceptions passes by it. This is normally the case for the
+ // ELF x86-64 abi, but it can be disabled for some compilation units.
+ //
+ // Typically when we're compiling with `-C panic=abort` (which implies this
+ // `no_landing_pads` check) we don't need `uwtable` because we can't
+ // generate any exceptions! On Windows, however, exceptions include other
+ // events such as illegal instructions, segfaults, etc. This means that on
+ // Windows we end up still needing the `uwtable` attribute even if the `-C
+ // panic=abort` flag is passed.
+ //
+ // You can also find more info on why Windows always requires uwtables here:
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=1302078
+ if cx.sess().must_emit_unwind_tables() {
+ attributes::emit_uwtable(llfn, true);
+ }
+
+ set_frame_pointer_elimination(cx, llfn);
+ set_instrument_function(cx, llfn);
+ set_probestack(cx, llfn);
+
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
+ Attribute::Cold.apply_llfn(Function, llfn);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_RETURNS_TWICE) {
+ Attribute::ReturnsTwice.apply_llfn(Function, llfn);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
+ Attribute::ReadOnly.apply_llfn(Function, llfn);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
+ Attribute::ReadNone.apply_llfn(Function, llfn);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+ naked(llfn, true);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
+ Attribute::NoAlias.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) {
+ llvm::AddFunctionAttrString(llfn, Function, const_cstr!("cmse_nonsecure_entry"));
+ }
+ sanitize(cx, codegen_fn_attrs.no_sanitize, llfn);
+
+ // Always annotate functions with the target-cpu they are compiled for.
+ // Without this, ThinLTO won't inline Rust functions into Clang generated
+ // functions (because Clang annotates functions this way too).
+ apply_target_cpu_attr(cx, llfn);
+
+ let features = llvm_target_features(cx.tcx.sess)
+ .map(|s| s.to_string())
+ .chain(codegen_fn_attrs.target_features.iter().map(|f| {
+ let feature = &f.as_str();
+ format!("+{}", llvm_util::to_llvm_feature(cx.tcx.sess, feature))
+ }))
+ .collect::<Vec<String>>()
+ .join(",");
+
+ if !features.is_empty() {
+ let val = CString::new(features).unwrap();
+ llvm::AddFunctionAttrStringValue(
+ llfn,
+ llvm::AttributePlace::Function,
+ const_cstr!("target-features"),
+ &val,
+ );
+ }
+
+ // Note that currently the `wasm-import-module` doesn't do anything, but
+ // eventually LLVM 7 should read this and ferry the appropriate import
+ // module to the output file.
+ if cx.tcx.sess.target.target.arch == "wasm32" {
+ if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
+ llvm::AddFunctionAttrStringValue(
+ llfn,
+ llvm::AttributePlace::Function,
+ const_cstr!("wasm-import-module"),
+ &module,
+ );
+
+ let name =
+ codegen_fn_attrs.link_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id()));
+ let name = CString::new(&name.as_str()[..]).unwrap();
+ llvm::AddFunctionAttrStringValue(
+ llfn,
+ llvm::AttributePlace::Function,
+ const_cstr!("wasm-import-name"),
+ &name,
+ );
+ }
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.supported_target_features = |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ if tcx.sess.opts.actually_rustdoc {
+ // rustdoc needs to be able to document functions that use all the features, so
+ // provide them all.
+ llvm_util::all_known_features().map(|(a, b)| (a.to_string(), b)).collect()
+ } else {
+ llvm_util::supported_target_features(tcx.sess)
+ .iter()
+ .map(|&(a, b)| (a.to_string(), b))
+ .collect()
+ }
+ };
+
+ provide_extern(providers);
+}
+
+pub fn provide_extern(providers: &mut Providers) {
+ providers.wasm_import_module_map = |tcx, cnum| {
+ // Build up a map from DefId to a `NativeLib` structure, where
+ // `NativeLib` internally contains information about
+ // `#[link(wasm_import_module = "...")]` for example.
+ let native_libs = tcx.native_libraries(cnum);
+
+ let def_id_to_native_lib = native_libs
+ .iter()
+ .filter_map(|lib| lib.foreign_module.map(|id| (id, lib)))
+ .collect::<FxHashMap<_, _>>();
+
+ let mut ret = FxHashMap::default();
+ for lib in tcx.foreign_modules(cnum).iter() {
+ let module = def_id_to_native_lib.get(&lib.def_id).and_then(|s| s.wasm_import_module);
+ let module = match module {
+ Some(s) => s,
+ None => continue,
+ };
+ ret.extend(lib.foreign_items.iter().map(|id| {
+ assert_eq!(id.krate, cnum);
+ (*id, module.to_string())
+ }));
+ }
+
+ ret
+ };
+}
+
+fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<CString> {
+ tcx.wasm_import_module_map(id.krate).get(&id).map(|s| CString::new(&s[..]).unwrap())
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
new file mode 100644
index 0000000..a115a1e
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -0,0 +1,316 @@
+//! A helper class for dealing with static archives
+
+use std::ffi::{CStr, CString};
+use std::io;
+use std::mem;
+use std::path::{Path, PathBuf};
+use std::ptr;
+use std::str;
+
+use crate::llvm::archive_ro::{ArchiveRO, Child};
+use crate::llvm::{self, ArchiveKind};
+use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder};
+use rustc_codegen_ssa::{looks_like_rust_object_file, METADATA_FILENAME};
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+
+struct ArchiveConfig<'a> {
+ pub sess: &'a Session,
+ pub dst: PathBuf,
+ pub src: Option<PathBuf>,
+ pub lib_search_paths: Vec<PathBuf>,
+}
+
+/// Helper for adding many files to an archive.
+#[must_use = "must call build() to finish building the archive"]
+pub struct LlvmArchiveBuilder<'a> {
+ config: ArchiveConfig<'a>,
+ removals: Vec<String>,
+ additions: Vec<Addition>,
+ should_update_symbols: bool,
+ src_archive: Option<Option<ArchiveRO>>,
+}
+
+enum Addition {
+ File { path: PathBuf, name_in_archive: String },
+ Archive { path: PathBuf, archive: ArchiveRO, skip: Box<dyn FnMut(&str) -> bool> },
+}
+
+impl Addition {
+ fn path(&self) -> &Path {
+ match self {
+ Addition::File { path, .. } | Addition::Archive { path, .. } => path,
+ }
+ }
+}
+
+fn is_relevant_child(c: &Child<'_>) -> bool {
+ match c.name() {
+ Some(name) => !name.contains("SYMDEF"),
+ None => false,
+ }
+}
+
+fn archive_config<'a>(sess: &'a Session, output: &Path, input: Option<&Path>) -> ArchiveConfig<'a> {
+ use rustc_codegen_ssa::back::link::archive_search_paths;
+ ArchiveConfig {
+ sess,
+ dst: output.to_path_buf(),
+ src: input.map(|p| p.to_path_buf()),
+ lib_search_paths: archive_search_paths(sess),
+ }
+}
+
+impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> {
+ /// Creates a new static archive, ready for modifying the archive specified
+ /// by `config`.
+ fn new(sess: &'a Session, output: &Path, input: Option<&Path>) -> LlvmArchiveBuilder<'a> {
+ let config = archive_config(sess, output, input);
+ LlvmArchiveBuilder {
+ config,
+ removals: Vec::new(),
+ additions: Vec::new(),
+ should_update_symbols: false,
+ src_archive: None,
+ }
+ }
+
+ /// Removes a file from this archive
+ fn remove_file(&mut self, file: &str) {
+ self.removals.push(file.to_string());
+ }
+
+ /// Lists all files in an archive
+ fn src_files(&mut self) -> Vec<String> {
+ if self.src_archive().is_none() {
+ return Vec::new();
+ }
+
+ let archive = self.src_archive.as_ref().unwrap().as_ref().unwrap();
+
+ archive
+ .iter()
+ .filter_map(|child| child.ok())
+ .filter(is_relevant_child)
+ .filter_map(|child| child.name())
+ .filter(|name| !self.removals.iter().any(|x| x == name))
+ .map(|name| name.to_owned())
+ .collect()
+ }
+
+ /// Adds all of the contents of a native library to this archive. This will
+ /// search in the relevant locations for a library named `name`.
+ fn add_native_library(&mut self, name: Symbol) {
+ let location = find_library(name, &self.config.lib_search_paths, self.config.sess);
+ self.add_archive(&location, |_| false).unwrap_or_else(|e| {
+ self.config.sess.fatal(&format!(
+ "failed to add native library {}: {}",
+ location.to_string_lossy(),
+ e
+ ));
+ });
+ }
+
+ /// Adds all of the contents of the rlib at the specified path to this
+ /// archive.
+ ///
+ /// This ignores adding the bytecode from the rlib, and if LTO is enabled
+ /// then the object file also isn't added.
+ fn add_rlib(
+ &mut self,
+ rlib: &Path,
+ name: &str,
+ lto: bool,
+ skip_objects: bool,
+ ) -> io::Result<()> {
+ // Ignoring obj file starting with the crate name
+ // as simple comparison is not enough - there
+ // might be also an extra name suffix
+ let obj_start = name.to_owned();
+
+ self.add_archive(rlib, move |fname: &str| {
+ // Ignore metadata files, no matter the name.
+ if fname == METADATA_FILENAME {
+ return true;
+ }
+
+ // Don't include Rust objects if LTO is enabled
+ if lto && looks_like_rust_object_file(fname) {
+ return true;
+ }
+
+ // Otherwise if this is *not* a rust object and we're skipping
+ // objects then skip this file
+ if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
+ return true;
+ }
+
+ // ok, don't skip this
+ false
+ })
+ }
+
+ /// Adds an arbitrary file to this archive
+ fn add_file(&mut self, file: &Path) {
+ let name = file.file_name().unwrap().to_str().unwrap();
+ self.additions
+ .push(Addition::File { path: file.to_path_buf(), name_in_archive: name.to_owned() });
+ }
+
+ /// Indicate that the next call to `build` should update all symbols in
+ /// the archive (equivalent to running 'ar s' over it).
+ fn update_symbols(&mut self) {
+ self.should_update_symbols = true;
+ }
+
+ /// Combine the provided files, rlibs, and native libraries into a single
+ /// `Archive`.
+ fn build(mut self) {
+ let kind = self.llvm_archive_kind().unwrap_or_else(|kind| {
+ self.config.sess.fatal(&format!("Don't know how to build archive of type: {}", kind))
+ });
+
+ if let Err(e) = self.build_with_llvm(kind) {
+ self.config.sess.fatal(&format!("failed to build archive: {}", e));
+ }
+ }
+}
+
+impl<'a> LlvmArchiveBuilder<'a> {
+ fn src_archive(&mut self) -> Option<&ArchiveRO> {
+ if let Some(ref a) = self.src_archive {
+ return a.as_ref();
+ }
+ let src = self.config.src.as_ref()?;
+ self.src_archive = Some(ArchiveRO::open(src).ok());
+ self.src_archive.as_ref().unwrap().as_ref()
+ }
+
+ fn add_archive<F>(&mut self, archive: &Path, skip: F) -> io::Result<()>
+ where
+ F: FnMut(&str) -> bool + 'static,
+ {
+ let archive_ro = match ArchiveRO::open(archive) {
+ Ok(ar) => ar,
+ Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)),
+ };
+ if self.additions.iter().any(|ar| ar.path() == archive) {
+ return Ok(());
+ }
+ self.additions.push(Addition::Archive {
+ path: archive.to_path_buf(),
+ archive: archive_ro,
+ skip: Box::new(skip),
+ });
+ Ok(())
+ }
+
+ fn llvm_archive_kind(&self) -> Result<ArchiveKind, &str> {
+ let kind = &*self.config.sess.target.target.options.archive_format;
+ kind.parse().map_err(|_| kind)
+ }
+
+ fn build_with_llvm(&mut self, kind: ArchiveKind) -> io::Result<()> {
+ let removals = mem::take(&mut self.removals);
+ let mut additions = mem::take(&mut self.additions);
+ let mut strings = Vec::new();
+ let mut members = Vec::new();
+
+ let dst = CString::new(self.config.dst.to_str().unwrap())?;
+ let should_update_symbols = self.should_update_symbols;
+
+ unsafe {
+ if let Some(archive) = self.src_archive() {
+ for child in archive.iter() {
+ let child = child.map_err(string_to_io_error)?;
+ let child_name = match child.name() {
+ Some(s) => s,
+ None => continue,
+ };
+ if removals.iter().any(|r| r == child_name) {
+ continue;
+ }
+
+ let name = CString::new(child_name)?;
+ members.push(llvm::LLVMRustArchiveMemberNew(
+ ptr::null(),
+ name.as_ptr(),
+ Some(child.raw),
+ ));
+ strings.push(name);
+ }
+ }
+ for addition in &mut additions {
+ match addition {
+ Addition::File { path, name_in_archive } => {
+ let path = CString::new(path.to_str().unwrap())?;
+ let name = CString::new(name_in_archive.clone())?;
+ members.push(llvm::LLVMRustArchiveMemberNew(
+ path.as_ptr(),
+ name.as_ptr(),
+ None,
+ ));
+ strings.push(path);
+ strings.push(name);
+ }
+ Addition::Archive { archive, skip, .. } => {
+ for child in archive.iter() {
+ let child = child.map_err(string_to_io_error)?;
+ if !is_relevant_child(&child) {
+ continue;
+ }
+ let child_name = child.name().unwrap();
+ if skip(child_name) {
+ continue;
+ }
+
+ // It appears that LLVM's archive writer is a little
+ // buggy if the name we pass down isn't just the
+ // filename component, so chop that off here and
+ // pass it in.
+ //
+ // See LLVM bug 25877 for more info.
+ let child_name =
+ Path::new(child_name).file_name().unwrap().to_str().unwrap();
+ let name = CString::new(child_name)?;
+ let m = llvm::LLVMRustArchiveMemberNew(
+ ptr::null(),
+ name.as_ptr(),
+ Some(child.raw),
+ );
+ members.push(m);
+ strings.push(name);
+ }
+ }
+ }
+ }
+
+ let r = llvm::LLVMRustWriteArchive(
+ dst.as_ptr(),
+ members.len() as libc::size_t,
+ members.as_ptr() as *const &_,
+ should_update_symbols,
+ kind,
+ );
+ let ret = if r.into_result().is_err() {
+ let err = llvm::LLVMRustGetLastError();
+ let msg = if err.is_null() {
+ "failed to write archive".into()
+ } else {
+ String::from_utf8_lossy(CStr::from_ptr(err).to_bytes())
+ };
+ Err(io::Error::new(io::ErrorKind::Other, msg))
+ } else {
+ Ok(())
+ };
+ for member in members {
+ llvm::LLVMRustArchiveMemberFree(member);
+ }
+ ret
+ }
+ }
+}
+
+fn string_to_io_error(s: String) -> io::Error {
+ io::Error::new(io::ErrorKind::Other, format!("bad archive: {}", s))
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
new file mode 100644
index 0000000..4b2d590
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -0,0 +1,1075 @@
+use crate::back::write::{
+ self, save_temp_bitcode, to_llvm_opt_settings, with_llvm_pmb, DiagnosticHandlers,
+};
+use crate::llvm::archive_ro::ArchiveRO;
+use crate::llvm::{self, False, True};
+use crate::{LlvmCodegenBackend, ModuleLlvm};
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
+use rustc_codegen_ssa::back::symbol_export;
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{FatalError, Handler};
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::bug;
+use rustc_middle::dep_graph::WorkProduct;
+use rustc_middle::middle::exported_symbols::SymbolExportLevel;
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{self, CrateType, Lto};
+use tracing::{debug, info};
+
+use std::ffi::{CStr, CString};
+use std::fs::File;
+use std::io;
+use std::mem;
+use std::path::Path;
+use std::ptr;
+use std::slice;
+use std::sync::Arc;
+
+/// We keep track of past LTO imports that were used to produce the current set
+/// of compiled object files that we might choose to reuse during this
+/// compilation session.
+pub const THIN_LTO_IMPORTS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-imports.bin";
+
+pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
+ match crate_type {
+ CrateType::Executable | CrateType::Staticlib | CrateType::Cdylib => true,
+ CrateType::Dylib | CrateType::Rlib | CrateType::ProcMacro => false,
+ }
+}
+
+fn prepare_lto(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError> {
+ let export_threshold = match cgcx.lto {
+ // We're just doing LTO for our one crate
+ Lto::ThinLocal => SymbolExportLevel::Rust,
+
+ // We're doing LTO for the entire crate graph
+ Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&cgcx.crate_types),
+
+ Lto::No => panic!("didn't request LTO but we're doing LTO"),
+ };
+
+ let symbol_filter = &|&(ref name, level): &(String, SymbolExportLevel)| {
+ if level.is_below_threshold(export_threshold) {
+ Some(CString::new(name.as_str()).unwrap())
+ } else {
+ None
+ }
+ };
+ let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+ let mut symbols_below_threshold = {
+ let _timer = cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
+ exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<CString>>()
+ };
+ info!("{} symbols to preserve in this crate", symbols_below_threshold.len());
+
+ // If we're performing LTO for the entire crate graph, then for each of our
+ // upstream dependencies, find the corresponding rlib and load the bitcode
+ // from the archive.
+ //
+ // We save off all the bytecode and LLVM module ids for later processing
+ // with either fat or thin LTO
+ let mut upstream_modules = Vec::new();
+ if cgcx.lto != Lto::ThinLocal {
+ if cgcx.opts.cg.prefer_dynamic {
+ diag_handler
+ .struct_err("cannot prefer dynamic linking when performing LTO")
+ .note(
+ "only 'staticlib', 'bin', and 'cdylib' outputs are \
+ supported with LTO",
+ )
+ .emit();
+ return Err(FatalError);
+ }
+
+ // Make sure we actually can run LTO
+ for crate_type in cgcx.crate_types.iter() {
+ if !crate_type_allows_lto(*crate_type) {
+ let e = diag_handler.fatal(
+ "lto can only be run for executables, cdylibs and \
+ static library outputs",
+ );
+ return Err(e);
+ }
+ }
+
+ for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
+ let exported_symbols =
+ cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+ {
+ let _timer =
+ cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
+ symbols_below_threshold
+ .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
+ }
+
+ let archive = ArchiveRO::open(&path).expect("wanted an rlib");
+ let obj_files = archive
+ .iter()
+ .filter_map(|child| child.ok().and_then(|c| c.name().map(|name| (name, c))))
+ .filter(|&(name, _)| looks_like_rust_object_file(name));
+ for (name, child) in obj_files {
+ info!("adding bitcode from {}", name);
+ match get_bitcode_slice_from_object_data(child.data()) {
+ Ok(data) => {
+ let module = SerializedModule::FromRlib(data.to_vec());
+ upstream_modules.push((module, CString::new(name).unwrap()));
+ }
+ Err(msg) => return Err(diag_handler.fatal(&msg)),
+ }
+ }
+ }
+ }
+
+ Ok((symbols_below_threshold, upstream_modules))
+}
+
+fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], String> {
+ let mut len = 0;
+ let data =
+ unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
+ if !data.is_null() {
+ assert!(len != 0);
+ let bc = unsafe { slice::from_raw_parts(data, len) };
+
+ // `bc` must be a sub-slice of `obj`.
+ assert!(obj.as_ptr() <= bc.as_ptr());
+ assert!(bc[bc.len()..bc.len()].as_ptr() <= obj[obj.len()..obj.len()].as_ptr());
+
+ Ok(bc)
+ } else {
+ assert!(len == 0);
+ let msg = llvm::last_error().unwrap_or_else(|| "unknown LLVM error".to_string());
+ Err(format!("failed to get bitcode from object file for LTO ({})", msg))
+ }
+}
+
+/// Performs fat LTO by merging all modules into a single one and returning it
+/// for further optimization.
+pub(crate) fn run_fat(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?;
+ let symbols_below_threshold =
+ symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
+ fat_lto(
+ cgcx,
+ &diag_handler,
+ modules,
+ cached_modules,
+ upstream_modules,
+ &symbols_below_threshold,
+ )
+}
+
+/// Performs thin LTO by performing necessary global analysis and returning two
+/// lists, one of the modules that need optimization and another for modules that
+/// can simply be copied over from the incr. comp. cache.
+pub(crate) fn run_thin(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ modules: Vec<(String, ThinBuffer)>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?;
+ let symbols_below_threshold =
+ symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
+ if cgcx.opts.cg.linker_plugin_lto.enabled() {
+ unreachable!(
+ "We should never reach this case if the LTO step \
+ is deferred to the linker"
+ );
+ }
+ thin_lto(
+ cgcx,
+ &diag_handler,
+ modules,
+ upstream_modules,
+ cached_modules,
+ &symbols_below_threshold,
+ )
+}
+
+pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBuffer) {
+ let name = module.name.clone();
+ let buffer = ThinBuffer::new(module.module_llvm.llmod());
+ (name, buffer)
+}
+
+fn fat_lto(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+ mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+ symbols_below_threshold: &[*const libc::c_char],
+) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
+ let _timer = cgcx.prof.generic_activity("LLVM_fat_lto_build_monolithic_module");
+ info!("going for a fat lto");
+
+ // Sort out all our lists of incoming modules into two lists.
+ //
+ // * `serialized_modules` (also and argument to this function) contains all
+ // modules that are serialized in-memory.
+ // * `in_memory` contains modules which are already parsed and in-memory,
+ // such as from multi-CGU builds.
+ //
+ // All of `cached_modules` (cached from previous incremental builds) can
+ // immediately go onto the `serialized_modules` modules list and then we can
+ // split the `modules` array into these two lists.
+ let mut in_memory = Vec::new();
+ serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| {
+ info!("pushing cached module {:?}", wp.cgu_name);
+ (buffer, CString::new(wp.cgu_name).unwrap())
+ }));
+ for module in modules {
+ match module {
+ FatLTOInput::InMemory(m) => in_memory.push(m),
+ FatLTOInput::Serialized { name, buffer } => {
+ info!("pushing serialized module {:?}", name);
+ let buffer = SerializedModule::Local(buffer);
+ serialized_modules.push((buffer, CString::new(name).unwrap()));
+ }
+ }
+ }
+
+ // Find the "costliest" module and merge everything into that codegen unit.
+ // All the other modules will be serialized and reparsed into the new
+ // context, so this hopefully avoids serializing and parsing the largest
+ // codegen unit.
+ //
+ // Additionally use a regular module as the base here to ensure that various
+ // file copy operations in the backend work correctly. The only other kind
+ // of module here should be an allocator one, and if your crate is smaller
+ // than the allocator module then the size doesn't really matter anyway.
+ let costliest_module = in_memory
+ .iter()
+ .enumerate()
+ .filter(|&(_, module)| module.kind == ModuleKind::Regular)
+ .map(|(i, module)| {
+ let cost = unsafe { llvm::LLVMRustModuleCost(module.module_llvm.llmod()) };
+ (cost, i)
+ })
+ .max();
+
+ // If we found a costliest module, we're good to go. Otherwise all our
+ // inputs were serialized which could happen in the case, for example, that
+ // all our inputs were incrementally reread from the cache and we're just
+ // re-executing the LTO passes. If that's the case deserialize the first
+ // module and create a linker with it.
+ let module: ModuleCodegen<ModuleLlvm> = match costliest_module {
+ Some((_cost, i)) => in_memory.remove(i),
+ None => {
+ assert!(!serialized_modules.is_empty(), "must have at least one serialized module");
+ let (buffer, name) = serialized_modules.remove(0);
+ info!("no in-memory regular modules to choose from, parsing {:?}", name);
+ ModuleCodegen {
+ module_llvm: ModuleLlvm::parse(cgcx, &name, buffer.data(), diag_handler)?,
+ name: name.into_string().unwrap(),
+ kind: ModuleKind::Regular,
+ }
+ }
+ };
+ let mut serialized_bitcode = Vec::new();
+ {
+ let (llcx, llmod) = {
+ let llvm = &module.module_llvm;
+ (&llvm.llcx, llvm.llmod())
+ };
+ info!("using {:?} as a base module", module.name);
+
+ // The linking steps below may produce errors and diagnostics within LLVM
+ // which we'd like to handle and print, so set up our diagnostic handlers
+ // (which get unregistered when they go out of scope below).
+ let _handler = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+ // For all other modules we codegened we'll need to link them into our own
+ // bitcode. All modules were codegened in their own LLVM context, however,
+ // and we want to move everything to the same LLVM context. Currently the
+ // way we know of to do that is to serialize them to a string and them parse
+ // them later. Not great but hey, that's why it's "fat" LTO, right?
+ for module in in_memory {
+ let buffer = ModuleBuffer::new(module.module_llvm.llmod());
+ let llmod_id = CString::new(&module.name[..]).unwrap();
+ serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
+ }
+ // Sort the modules to ensure we produce deterministic results.
+ serialized_modules.sort_by(|module1, module2| module1.1.cmp(&module2.1));
+
+ // For all serialized bitcode files we parse them and link them in as we did
+ // above, this is all mostly handled in C++. Like above, though, we don't
+ // know much about the memory management here so we err on the side of being
+ // save and persist everything with the original module.
+ let mut linker = Linker::new(llmod);
+ for (bc_decoded, name) in serialized_modules {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_fat_lto_link_module", format!("{:?}", name));
+ info!("linking {:?}", name);
+ let data = bc_decoded.data();
+ linker.add(&data).map_err(|()| {
+ let msg = format!("failed to load bc of {:?}", name);
+ write::llvm_err(&diag_handler, &msg)
+ })?;
+ serialized_bitcode.push(bc_decoded);
+ }
+ drop(linker);
+ save_temp_bitcode(&cgcx, &module, "lto.input");
+
+ // Internalize everything below threshold to help strip out more modules and such.
+ unsafe {
+ let ptr = symbols_below_threshold.as_ptr();
+ llvm::LLVMRustRunRestrictionPass(
+ llmod,
+ ptr as *const *const libc::c_char,
+ symbols_below_threshold.len() as libc::size_t,
+ );
+ save_temp_bitcode(&cgcx, &module, "lto.after-restriction");
+ }
+
+ if cgcx.no_landing_pads {
+ unsafe {
+ llvm::LLVMRustMarkAllFunctionsNounwind(llmod);
+ }
+ save_temp_bitcode(&cgcx, &module, "lto.after-nounwind");
+ }
+ }
+
+ Ok(LtoModuleCodegen::Fat { module: Some(module), _serialized_bitcode: serialized_bitcode })
+}
+
+crate struct Linker<'a>(&'a mut llvm::Linker<'a>);
+
+impl Linker<'a> {
+ crate fn new(llmod: &'a llvm::Module) -> Self {
+ unsafe { Linker(llvm::LLVMRustLinkerNew(llmod)) }
+ }
+
+ crate fn add(&mut self, bytecode: &[u8]) -> Result<(), ()> {
+ unsafe {
+ if llvm::LLVMRustLinkerAdd(
+ self.0,
+ bytecode.as_ptr() as *const libc::c_char,
+ bytecode.len(),
+ ) {
+ Ok(())
+ } else {
+ Err(())
+ }
+ }
+ }
+}
+
+impl Drop for Linker<'a> {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustLinkerFree(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+/// Prepare "thin" LTO to get run on these modules.
+///
+/// The general structure of ThinLTO is quite different from the structure of
+/// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
+/// one giant LLVM module, and then we run more optimization passes over this
+/// big module after internalizing most symbols. Thin LTO, on the other hand,
+/// avoid this large bottleneck through more targeted optimization.
+///
+/// At a high level Thin LTO looks like:
+///
+/// 1. Prepare a "summary" of each LLVM module in question which describes
+/// the values inside, cost of the values, etc.
+/// 2. Merge the summaries of all modules in question into one "index"
+/// 3. Perform some global analysis on this index
+/// 4. For each module, use the index and analysis calculated previously to
+/// perform local transformations on the module, for example inlining
+/// small functions from other modules.
+/// 5. Run thin-specific optimization passes over each module, and then code
+/// generate everything at the end.
+///
+/// The summary for each module is intended to be quite cheap, and the global
+/// index is relatively quite cheap to create as well. As a result, the goal of
+/// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
+/// situations. For example one cheap optimization is that we can parallelize
+/// all codegen modules, easily making use of all the cores on a machine.
+///
+/// With all that in mind, the function here is designed at specifically just
+/// calculating the *index* for ThinLTO. This index will then be shared amongst
+/// all of the `LtoModuleCodegen` units returned below and destroyed once
+/// they all go out of scope.
+fn thin_lto(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ modules: Vec<(String, ThinBuffer)>,
+ serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+ symbols_below_threshold: &[*const libc::c_char],
+) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
+ let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
+ unsafe {
+ info!("going for that thin, thin LTO");
+
+ let green_modules: FxHashMap<_, _> =
+ cached_modules.iter().map(|&(_, ref wp)| (wp.cgu_name.clone(), wp.clone())).collect();
+
+ let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
+ let mut thin_buffers = Vec::with_capacity(modules.len());
+ let mut module_names = Vec::with_capacity(full_scope_len);
+ let mut thin_modules = Vec::with_capacity(full_scope_len);
+
+ for (i, (name, buffer)) in modules.into_iter().enumerate() {
+ info!("local module: {} - {}", i, name);
+ let cname = CString::new(name.clone()).unwrap();
+ thin_modules.push(llvm::ThinLTOModule {
+ identifier: cname.as_ptr(),
+ data: buffer.data().as_ptr(),
+ len: buffer.data().len(),
+ });
+ thin_buffers.push(buffer);
+ module_names.push(cname);
+ }
+
+ // FIXME: All upstream crates are deserialized internally in the
+ // function below to extract their summary and modules. Note that
+ // unlike the loop above we *must* decode and/or read something
+ // here as these are all just serialized files on disk. An
+ // improvement, however, to make here would be to store the
+ // module summary separately from the actual module itself. Right
+ // now this is store in one large bitcode file, and the entire
+ // file is deflate-compressed. We could try to bypass some of the
+ // decompression by storing the index uncompressed and only
+ // lazily decompressing the bytecode if necessary.
+ //
+ // Note that truly taking advantage of this optimization will
+ // likely be further down the road. We'd have to implement
+ // incremental ThinLTO first where we could actually avoid
+ // looking at upstream modules entirely sometimes (the contents,
+ // we must always unconditionally look at the index).
+ let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len());
+
+ let cached_modules =
+ cached_modules.into_iter().map(|(sm, wp)| (sm, CString::new(wp.cgu_name).unwrap()));
+
+ for (module, name) in serialized_modules.into_iter().chain(cached_modules) {
+ info!("upstream or cached module {:?}", name);
+ thin_modules.push(llvm::ThinLTOModule {
+ identifier: name.as_ptr(),
+ data: module.data().as_ptr(),
+ len: module.data().len(),
+ });
+ serialized.push(module);
+ module_names.push(name);
+ }
+
+ // Sanity check
+ assert_eq!(thin_modules.len(), module_names.len());
+
+ // Delegate to the C++ bindings to create some data here. Once this is a
+ // tried-and-true interface we may wish to try to upstream some of this
+ // to LLVM itself, right now we reimplement a lot of what they do
+ // upstream...
+ let data = llvm::LLVMRustCreateThinLTOData(
+ thin_modules.as_ptr(),
+ thin_modules.len() as u32,
+ symbols_below_threshold.as_ptr(),
+ symbols_below_threshold.len() as u32,
+ )
+ .ok_or_else(|| write::llvm_err(&diag_handler, "failed to prepare thin LTO context"))?;
+
+ info!("thin LTO data created");
+
+ let (import_map_path, prev_import_map, curr_import_map) =
+ if let Some(ref incr_comp_session_dir) = cgcx.incr_comp_session_dir {
+ let path = incr_comp_session_dir.join(THIN_LTO_IMPORTS_INCR_COMP_FILE_NAME);
+ // If previous imports have been deleted, or we get an IO error
+ // reading the file storing them, then we'll just use `None` as the
+ // prev_import_map, which will force the code to be recompiled.
+ let prev = if path.exists() {
+ ThinLTOImportMaps::load_from_file(&path).ok()
+ } else {
+ None
+ };
+ let curr = ThinLTOImportMaps::from_thin_lto_data(data);
+ (Some(path), prev, curr)
+ } else {
+ // If we don't compile incrementally, we don't need to load the
+ // import data from LLVM.
+ assert!(green_modules.is_empty());
+ let curr = ThinLTOImportMaps::default();
+ (None, None, curr)
+ };
+ info!("thin LTO import map loaded");
+
+ let data = ThinData(data);
+
+ // Throw our data in an `Arc` as we'll be sharing it across threads. We
+ // also put all memory referenced by the C++ data (buffers, ids, etc)
+ // into the arc as well. After this we'll create a thin module
+ // codegen per module in this data.
+ let shared = Arc::new(ThinShared {
+ data,
+ thin_buffers,
+ serialized_modules: serialized,
+ module_names,
+ });
+
+ let mut copy_jobs = vec![];
+ let mut opt_jobs = vec![];
+
+ info!("checking which modules can be-reused and which have to be re-optimized.");
+ for (module_index, module_name) in shared.module_names.iter().enumerate() {
+ let module_name = module_name_to_str(module_name);
+
+ // If (1.) the module hasn't changed, and (2.) none of the modules
+ // it imports from have changed, *and* (3.) the import and export
+ // sets themselves have not changed from the previous compile when
+ // it was last ThinLTO'ed, then we can re-use the post-ThinLTO
+ // version of the module. Otherwise, freshly perform LTO
+ // optimization.
+ //
+ // (Note that globally, the export set is just the inverse of the
+ // import set.)
+ //
+ // For further justification of why the above is necessary and sufficient,
+ // see the LLVM blog post on ThinLTO:
+ //
+ // http://blog.llvm.org/2016/06/thinlto-scalable-and-incremental-lto.html
+ //
+ // which states the following:
+ //
+ // ```quote
+ // any particular ThinLTO backend must be redone iff:
+ //
+ // 1. The corresponding (primary) module’s bitcode changed
+ // 2. The list of imports into or exports from the module changed
+ // 3. The bitcode for any module being imported from has changed
+ // 4. Any global analysis result affecting either the primary module
+ // or anything it imports has changed.
+ // ```
+ //
+ // This strategy means we can always save the computed imports as
+ // canon: when we reuse the post-ThinLTO version, condition (3.)
+ // ensures that the current import set is the same as the previous
+ // one. (And of course, when we don't reuse the post-ThinLTO
+ // version, the current import set *is* the correct one, since we
+ // are doing the ThinLTO in this current compilation cycle.)
+ //
+ // For more discussion, see rust-lang/rust#59535 (where the import
+ // issue was discovered) and rust-lang/rust#69798 (where the
+ // analogous export issue was discovered).
+ if let (Some(prev_import_map), true) =
+ (prev_import_map.as_ref(), green_modules.contains_key(module_name))
+ {
+ assert!(cgcx.incr_comp_session_dir.is_some());
+
+ let prev_imports = prev_import_map.imports_of(module_name);
+ let curr_imports = curr_import_map.imports_of(module_name);
+ let prev_exports = prev_import_map.exports_of(module_name);
+ let curr_exports = curr_import_map.exports_of(module_name);
+ let imports_all_green = curr_imports
+ .iter()
+ .all(|imported_module| green_modules.contains_key(imported_module));
+ if imports_all_green
+ && equivalent_as_sets(prev_imports, curr_imports)
+ && equivalent_as_sets(prev_exports, curr_exports)
+ {
+ let work_product = green_modules[module_name].clone();
+ copy_jobs.push(work_product);
+ info!(" - {}: re-used", module_name);
+ assert!(cgcx.incr_comp_session_dir.is_some());
+ cgcx.cgu_reuse_tracker.set_actual_reuse(module_name, CguReuse::PostLto);
+ continue;
+ }
+ }
+
+ info!(" - {}: re-compiled", module_name);
+ opt_jobs.push(LtoModuleCodegen::Thin(ThinModule {
+ shared: shared.clone(),
+ idx: module_index,
+ }));
+ }
+
+ // Save the current ThinLTO import information for the next compilation
+ // session, overwriting the previous serialized imports (if any).
+ if let Some(path) = import_map_path {
+ if let Err(err) = curr_import_map.save_to_file(&path) {
+ let msg = format!("Error while writing ThinLTO import data: {}", err);
+ return Err(write::llvm_err(&diag_handler, &msg));
+ }
+ }
+
+ Ok((opt_jobs, copy_jobs))
+ }
+}
+
+/// Given two slices, each with no repeat elements. returns true if and only if
+/// the two slices have the same contents when considered as sets (i.e. when
+/// element order is disregarded).
+fn equivalent_as_sets(a: &[String], b: &[String]) -> bool {
+ // cheap path: unequal lengths means cannot possibly be set equivalent.
+ if a.len() != b.len() {
+ return false;
+ }
+ // fast path: before building new things, check if inputs are equivalent as is.
+ if a == b {
+ return true;
+ }
+ // slow path: general set comparison.
+ let a: FxHashSet<&str> = a.iter().map(|s| s.as_str()).collect();
+ let b: FxHashSet<&str> = b.iter().map(|s| s.as_str()).collect();
+ a == b
+}
+
+pub(crate) fn run_pass_manager(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ module: &ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+ thin: bool,
+) {
+ let _timer = cgcx.prof.extra_verbose_generic_activity("LLVM_lto_optimize", &module.name[..]);
+
+ // Now we have one massive module inside of llmod. Time to run the
+ // LTO-specific optimization passes that LLVM provides.
+ //
+ // This code is based off the code found in llvm's LTO code generator:
+ // tools/lto/LTOCodeGenerator.cpp
+ debug!("running the pass manager");
+ unsafe {
+ if write::should_use_new_llvm_pass_manager(config) {
+ let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
+ let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
+ // See comment below for why this is necessary.
+ let opt_level = if let config::OptLevel::No = opt_level {
+ config::OptLevel::Less
+ } else {
+ opt_level
+ };
+ write::optimize_with_new_llvm_pass_manager(cgcx, module, config, opt_level, opt_stage);
+ debug!("lto done");
+ return;
+ }
+
+ let pm = llvm::LLVMCreatePassManager();
+ llvm::LLVMAddAnalysisPasses(module.module_llvm.tm, pm);
+
+ if config.verify_llvm_ir {
+ let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast());
+ llvm::LLVMRustAddPass(pm, pass.unwrap());
+ }
+
+ // When optimizing for LTO we don't actually pass in `-O0`, but we force
+ // it to always happen at least with `-O1`.
+ //
+ // With ThinLTO we mess around a lot with symbol visibility in a way
+ // that will actually cause linking failures if we optimize at O0 which
+ // notable is lacking in dead code elimination. To ensure we at least
+ // get some optimizations and correctly link we forcibly switch to `-O1`
+ // to get dead code elimination.
+ //
+ // Note that in general this shouldn't matter too much as you typically
+ // only turn on ThinLTO when you're compiling with optimizations
+ // otherwise.
+ let opt_level = config
+ .opt_level
+ .map(|x| to_llvm_opt_settings(x).0)
+ .unwrap_or(llvm::CodeGenOptLevel::None);
+ let opt_level = match opt_level {
+ llvm::CodeGenOptLevel::None => llvm::CodeGenOptLevel::Less,
+ level => level,
+ };
+ with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| {
+ if thin {
+ llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm);
+ } else {
+ llvm::LLVMPassManagerBuilderPopulateLTOPassManager(
+ b, pm, /* Internalize = */ False, /* RunInliner = */ True,
+ );
+ }
+ });
+
+ // We always generate bitcode through ThinLTOBuffers,
+ // which do not support anonymous globals
+ if config.bitcode_needed() {
+ let pass = llvm::LLVMRustFindAndCreatePass("name-anon-globals\0".as_ptr().cast());
+ llvm::LLVMRustAddPass(pm, pass.unwrap());
+ }
+
+ if config.verify_llvm_ir {
+ let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast());
+ llvm::LLVMRustAddPass(pm, pass.unwrap());
+ }
+
+ llvm::LLVMRunPassManager(pm, module.module_llvm.llmod());
+
+ llvm::LLVMDisposePassManager(pm);
+ }
+ debug!("lto done");
+}
+
+pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer);
+
+unsafe impl Send for ModuleBuffer {}
+unsafe impl Sync for ModuleBuffer {}
+
+impl ModuleBuffer {
+ pub fn new(m: &llvm::Module) -> ModuleBuffer {
+ ModuleBuffer(unsafe { llvm::LLVMRustModuleBufferCreate(m) })
+ }
+}
+
+impl ModuleBufferMethods for ModuleBuffer {
+ fn data(&self) -> &[u8] {
+ unsafe {
+ let ptr = llvm::LLVMRustModuleBufferPtr(self.0);
+ let len = llvm::LLVMRustModuleBufferLen(self.0);
+ slice::from_raw_parts(ptr, len)
+ }
+ }
+}
+
+impl Drop for ModuleBuffer {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustModuleBufferFree(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+pub struct ThinData(&'static mut llvm::ThinLTOData);
+
+unsafe impl Send for ThinData {}
+unsafe impl Sync for ThinData {}
+
+impl Drop for ThinData {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustFreeThinLTOData(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+pub struct ThinBuffer(&'static mut llvm::ThinLTOBuffer);
+
+unsafe impl Send for ThinBuffer {}
+unsafe impl Sync for ThinBuffer {}
+
+impl ThinBuffer {
+ pub fn new(m: &llvm::Module) -> ThinBuffer {
+ unsafe {
+ let buffer = llvm::LLVMRustThinLTOBufferCreate(m);
+ ThinBuffer(buffer)
+ }
+ }
+}
+
+impl ThinBufferMethods for ThinBuffer {
+ fn data(&self) -> &[u8] {
+ unsafe {
+ let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _;
+ let len = llvm::LLVMRustThinLTOBufferLen(self.0);
+ slice::from_raw_parts(ptr, len)
+ }
+ }
+}
+
+impl Drop for ThinBuffer {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustThinLTOBufferFree(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+pub unsafe fn optimize_thin_module(
+ thin_module: &mut ThinModule<LlvmCodegenBackend>,
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ let tm = (cgcx.tm_factory.0)().map_err(|e| write::llvm_err(&diag_handler, &e))?;
+
+ // Right now the implementation we've got only works over serialized
+ // modules, so we create a fresh new LLVM context and parse the module
+ // into that context. One day, however, we may do this for upstream
+ // crates but for locally codegened modules we may be able to reuse
+ // that LLVM Context and Module.
+ let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
+ let llmod_raw = parse_module(
+ llcx,
+ &thin_module.shared.module_names[thin_module.idx],
+ thin_module.data(),
+ &diag_handler,
+ )? as *const _;
+ let module = ModuleCodegen {
+ module_llvm: ModuleLlvm { llmod_raw, llcx, tm },
+ name: thin_module.name().to_string(),
+ kind: ModuleKind::Regular,
+ };
+ {
+ let target = &*module.module_llvm.tm;
+ let llmod = module.module_llvm.llmod();
+ save_temp_bitcode(&cgcx, &module, "thin-lto-input");
+
+ // Before we do much else find the "main" `DICompileUnit` that we'll be
+ // using below. If we find more than one though then rustc has changed
+ // in a way we're not ready for, so generate an ICE by returning
+ // an error.
+ let mut cu1 = ptr::null_mut();
+ let mut cu2 = ptr::null_mut();
+ llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2);
+ if !cu2.is_null() {
+ let msg = "multiple source DICompileUnits found";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+
+ // Like with "fat" LTO, get some better optimizations if landing pads
+ // are disabled by removing all landing pads.
+ if cgcx.no_landing_pads {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_remove_landing_pads", thin_module.name());
+ llvm::LLVMRustMarkAllFunctionsNounwind(llmod);
+ save_temp_bitcode(&cgcx, &module, "thin-lto-after-nounwind");
+ }
+
+ // Up next comes the per-module local analyses that we do for Thin LTO.
+ // Each of these functions is basically copied from the LLVM
+ // implementation and then tailored to suit this implementation. Ideally
+ // each of these would be supported by upstream LLVM but that's perhaps
+ // a patch for another day!
+ //
+ // You can find some more comments about these functions in the LLVM
+ // bindings we've got (currently `PassWrapper.cpp`)
+ {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
+ }
+
+ {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
+ }
+
+ {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
+ }
+
+ {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
+ }
+
+ // Ok now this is a bit unfortunate. This is also something you won't
+ // find upstream in LLVM's ThinLTO passes! This is a hack for now to
+ // work around bugs in LLVM.
+ //
+ // First discovered in #45511 it was found that as part of ThinLTO
+ // importing passes LLVM will import `DICompileUnit` metadata
+ // information across modules. This means that we'll be working with one
+ // LLVM module that has multiple `DICompileUnit` instances in it (a
+ // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of
+ // bugs in LLVM's backend which generates invalid DWARF in a situation
+ // like this:
+ //
+ // https://bugs.llvm.org/show_bug.cgi?id=35212
+ // https://bugs.llvm.org/show_bug.cgi?id=35562
+ //
+ // While the first bug there is fixed the second ended up causing #46346
+ // which was basically a resurgence of #45511 after LLVM's bug 35212 was
+ // fixed.
+ //
+ // This function below is a huge hack around this problem. The function
+ // below is defined in `PassWrapper.cpp` and will basically "merge"
+ // all `DICompileUnit` instances in a module. Basically it'll take all
+ // the objects, rewrite all pointers of `DISubprogram` to point to the
+ // first `DICompileUnit`, and then delete all the other units.
+ //
+ // This is probably mangling to the debug info slightly (but hopefully
+ // not too much) but for now at least gets LLVM to emit valid DWARF (or
+ // so it appears). Hopefully we can remove this once upstream bugs are
+ // fixed in LLVM.
+ {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_patch_debuginfo", thin_module.name());
+ llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1);
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-patch");
+ }
+
+ // Alright now that we've done everything related to the ThinLTO
+ // analysis it's time to run some optimizations! Here we use the same
+ // `run_pass_manager` as the "fat" LTO above except that we tell it to
+ // populate a thin-specific pass manager, which presumably LLVM treats a
+ // little differently.
+ {
+ info!("running thin lto passes over {}", module.name);
+ let config = cgcx.config(module.kind);
+ run_pass_manager(cgcx, &module, config, true);
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
+ }
+ }
+ Ok(module)
+}
+
+/// Summarizes module import/export relationships used by LLVM's ThinLTO pass.
+///
+/// Note that we tend to have two such instances of `ThinLTOImportMaps` in use:
+/// one loaded from a file that represents the relationships used during the
+/// compilation associated with the incremetnal build artifacts we are
+/// attempting to reuse, and another constructed via `from_thin_lto_data`, which
+/// captures the relationships of ThinLTO in the current compilation.
+#[derive(Debug, Default)]
+pub struct ThinLTOImportMaps {
+ // key = llvm name of importing module, value = list of modules it imports from
+ imports: FxHashMap<String, Vec<String>>,
+ // key = llvm name of exporting module, value = list of modules it exports to
+ exports: FxHashMap<String, Vec<String>>,
+}
+
+impl ThinLTOImportMaps {
+ /// Returns modules imported by `llvm_module_name` during some ThinLTO pass.
+ fn imports_of(&self, llvm_module_name: &str) -> &[String] {
+ self.imports.get(llvm_module_name).map(|v| &v[..]).unwrap_or(&[])
+ }
+
+ /// Returns modules exported by `llvm_module_name` during some ThinLTO pass.
+ fn exports_of(&self, llvm_module_name: &str) -> &[String] {
+ self.exports.get(llvm_module_name).map(|v| &v[..]).unwrap_or(&[])
+ }
+
+ fn save_to_file(&self, path: &Path) -> io::Result<()> {
+ use std::io::Write;
+ let file = File::create(path)?;
+ let mut writer = io::BufWriter::new(file);
+ for (importing_module_name, imported_modules) in &self.imports {
+ writeln!(writer, "{}", importing_module_name)?;
+ for imported_module in imported_modules {
+ writeln!(writer, " {}", imported_module)?;
+ }
+ writeln!(writer)?;
+ }
+ Ok(())
+ }
+
+ fn load_from_file(path: &Path) -> io::Result<ThinLTOImportMaps> {
+ use std::io::BufRead;
+ let mut imports = FxHashMap::default();
+ let mut exports: FxHashMap<_, Vec<_>> = FxHashMap::default();
+ let mut current_module: Option<String> = None;
+ let mut current_imports: Vec<String> = vec![];
+ let file = File::open(path)?;
+ for line in io::BufReader::new(file).lines() {
+ let line = line?;
+ if line.is_empty() {
+ let importing_module = current_module.take().expect("Importing module not set");
+ for imported in ¤t_imports {
+ exports.entry(imported.clone()).or_default().push(importing_module.clone());
+ }
+ imports.insert(importing_module, mem::replace(&mut current_imports, vec![]));
+ } else if line.starts_with(' ') {
+ // Space marks an imported module
+ assert_ne!(current_module, None);
+ current_imports.push(line.trim().to_string());
+ } else {
+ // Otherwise, beginning of a new module (must be start or follow empty line)
+ assert_eq!(current_module, None);
+ current_module = Some(line.trim().to_string());
+ }
+ }
+ Ok(ThinLTOImportMaps { imports, exports })
+ }
+
+ /// Loads the ThinLTO import map from ThinLTOData.
+ unsafe fn from_thin_lto_data(data: *const llvm::ThinLTOData) -> ThinLTOImportMaps {
+ unsafe extern "C" fn imported_module_callback(
+ payload: *mut libc::c_void,
+ importing_module_name: *const libc::c_char,
+ imported_module_name: *const libc::c_char,
+ ) {
+ let map = &mut *(payload as *mut ThinLTOImportMaps);
+ let importing_module_name = CStr::from_ptr(importing_module_name);
+ let importing_module_name = module_name_to_str(&importing_module_name);
+ let imported_module_name = CStr::from_ptr(imported_module_name);
+ let imported_module_name = module_name_to_str(&imported_module_name);
+
+ if !map.imports.contains_key(importing_module_name) {
+ map.imports.insert(importing_module_name.to_owned(), vec![]);
+ }
+
+ map.imports
+ .get_mut(importing_module_name)
+ .unwrap()
+ .push(imported_module_name.to_owned());
+
+ if !map.exports.contains_key(imported_module_name) {
+ map.exports.insert(imported_module_name.to_owned(), vec![]);
+ }
+
+ map.exports
+ .get_mut(imported_module_name)
+ .unwrap()
+ .push(importing_module_name.to_owned());
+ }
+
+ let mut map = ThinLTOImportMaps::default();
+ llvm::LLVMRustGetThinLTOModuleImports(
+ data,
+ imported_module_callback,
+ &mut map as *mut _ as *mut libc::c_void,
+ );
+ map
+ }
+}
+
+fn module_name_to_str(c_str: &CStr) -> &str {
+ c_str.to_str().unwrap_or_else(|e| {
+ bug!("Encountered non-utf8 LLVM module name `{}`: {}", c_str.to_string_lossy(), e)
+ })
+}
+
+pub fn parse_module<'a>(
+ cx: &'a llvm::Context,
+ name: &CStr,
+ data: &[u8],
+ diag_handler: &Handler,
+) -> Result<&'a llvm::Module, FatalError> {
+ unsafe {
+ llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr()).ok_or_else(
+ || {
+ let msg = "failed to parse bitcode for LTO module";
+ write::llvm_err(&diag_handler, msg)
+ },
+ )
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/profiling.rs b/compiler/rustc_codegen_llvm/src/back/profiling.rs
new file mode 100644
index 0000000..2741f7d
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/profiling.rs
@@ -0,0 +1,58 @@
+use measureme::{event_id::SEPARATOR_BYTE, EventId, StringComponent, StringId};
+use rustc_data_structures::profiling::{SelfProfiler, TimingGuard};
+use std::ffi::{c_void, CStr};
+use std::os::raw::c_char;
+use std::sync::Arc;
+
+fn llvm_args_to_string_id(profiler: &SelfProfiler, pass_name: &str, ir_name: &str) -> EventId {
+ let pass_name = profiler.get_or_alloc_cached_string(pass_name);
+ let mut components = vec![StringComponent::Ref(pass_name)];
+ // handle that LazyCallGraph::SCC is a comma separated list within parentheses
+ let parentheses: &[_] = &['(', ')'];
+ let trimmed = ir_name.trim_matches(parentheses);
+ for part in trimmed.split(", ") {
+ let demangled_ir_name = rustc_demangle::demangle(part).to_string();
+ let ir_name = profiler.get_or_alloc_cached_string(demangled_ir_name);
+ components.push(StringComponent::Value(SEPARATOR_BYTE));
+ components.push(StringComponent::Ref(ir_name));
+ }
+ EventId::from_label(profiler.alloc_string(components.as_slice()))
+}
+
+pub struct LlvmSelfProfiler<'a> {
+ profiler: Arc<SelfProfiler>,
+ stack: Vec<TimingGuard<'a>>,
+ llvm_pass_event_kind: StringId,
+}
+
+impl<'a> LlvmSelfProfiler<'a> {
+ pub fn new(profiler: Arc<SelfProfiler>) -> Self {
+ let llvm_pass_event_kind = profiler.alloc_string("LLVM Pass");
+ Self { profiler, stack: Vec::default(), llvm_pass_event_kind }
+ }
+
+ fn before_pass_callback(&'a mut self, pass_name: &str, ir_name: &str) {
+ let event_id = llvm_args_to_string_id(&self.profiler, pass_name, ir_name);
+
+ self.stack.push(TimingGuard::start(&self.profiler, self.llvm_pass_event_kind, event_id));
+ }
+ fn after_pass_callback(&mut self) {
+ self.stack.pop();
+ }
+}
+
+pub unsafe extern "C" fn selfprofile_before_pass_callback(
+ llvm_self_profiler: *mut c_void,
+ pass_name: *const c_char,
+ ir_name: *const c_char,
+) {
+ let llvm_self_profiler = &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>);
+ let pass_name = CStr::from_ptr(pass_name).to_str().expect("valid UTF-8");
+ let ir_name = CStr::from_ptr(ir_name).to_str().expect("valid UTF-8");
+ llvm_self_profiler.before_pass_callback(pass_name, ir_name);
+}
+
+pub unsafe extern "C" fn selfprofile_after_pass_callback(llvm_self_profiler: *mut c_void) {
+ let llvm_self_profiler = &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>);
+ llvm_self_profiler.after_pass_callback();
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
new file mode 100644
index 0000000..f35c101
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -0,0 +1,1067 @@
+use crate::attributes;
+use crate::back::lto::ThinBuffer;
+use crate::back::profiling::{
+ selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
+};
+use crate::base;
+use crate::common;
+use crate::consts;
+use crate::llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic};
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::LlvmCodegenBackend;
+use crate::ModuleLlvm;
+use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, ModuleConfig};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_errors::{FatalError, Handler, Level};
+use rustc_fs_util::{link_or_copy, path_to_c_string};
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{self, Lto, OutputType, Passes, SanitizerSet, SwitchWithOptPath};
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::InnerSpan;
+use rustc_target::spec::{CodeModel, RelocModel};
+use tracing::debug;
+
+use libc::{c_char, c_int, c_uint, c_void, size_t};
+use std::ffi::CString;
+use std::fs;
+use std::io::{self, Write};
+use std::path::{Path, PathBuf};
+use std::slice;
+use std::str;
+use std::sync::Arc;
+
+pub fn llvm_err(handler: &rustc_errors::Handler, msg: &str) -> FatalError {
+ match llvm::last_error() {
+ Some(err) => handler.fatal(&format!("{}: {}", msg, err)),
+ None => handler.fatal(&msg),
+ }
+}
+
+pub fn write_output_file(
+ handler: &rustc_errors::Handler,
+ target: &'ll llvm::TargetMachine,
+ pm: &llvm::PassManager<'ll>,
+ m: &'ll llvm::Module,
+ output: &Path,
+ file_type: llvm::FileType,
+) -> Result<(), FatalError> {
+ unsafe {
+ let output_c = path_to_c_string(output);
+ let result = llvm::LLVMRustWriteOutputFile(target, pm, m, output_c.as_ptr(), file_type);
+ result.into_result().map_err(|()| {
+ let msg = format!("could not write output to {}", output.display());
+ llvm_err(handler, &msg)
+ })
+ }
+}
+
+pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm::TargetMachine {
+ target_machine_factory(sess, config::OptLevel::No)()
+ .unwrap_or_else(|err| llvm_err(sess.diagnostic(), &err).raise())
+}
+
+pub fn create_target_machine(tcx: TyCtxt<'_>) -> &'static mut llvm::TargetMachine {
+ target_machine_factory(&tcx.sess, tcx.backend_optimization_level(LOCAL_CRATE))()
+ .unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), &err).raise())
+}
+
+pub fn to_llvm_opt_settings(
+ cfg: config::OptLevel,
+) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) {
+ use self::config::OptLevel::*;
+ match cfg {
+ No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone),
+ Less => (llvm::CodeGenOptLevel::Less, llvm::CodeGenOptSizeNone),
+ Default => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeNone),
+ Aggressive => (llvm::CodeGenOptLevel::Aggressive, llvm::CodeGenOptSizeNone),
+ Size => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeDefault),
+ SizeMin => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeAggressive),
+ }
+}
+
+fn to_pass_builder_opt_level(cfg: config::OptLevel) -> llvm::PassBuilderOptLevel {
+ use config::OptLevel::*;
+ match cfg {
+ No => llvm::PassBuilderOptLevel::O0,
+ Less => llvm::PassBuilderOptLevel::O1,
+ Default => llvm::PassBuilderOptLevel::O2,
+ Aggressive => llvm::PassBuilderOptLevel::O3,
+ Size => llvm::PassBuilderOptLevel::Os,
+ SizeMin => llvm::PassBuilderOptLevel::Oz,
+ }
+}
+
+fn to_llvm_relocation_model(relocation_model: RelocModel) -> llvm::RelocModel {
+ match relocation_model {
+ RelocModel::Static => llvm::RelocModel::Static,
+ RelocModel::Pic => llvm::RelocModel::PIC,
+ RelocModel::DynamicNoPic => llvm::RelocModel::DynamicNoPic,
+ RelocModel::Ropi => llvm::RelocModel::ROPI,
+ RelocModel::Rwpi => llvm::RelocModel::RWPI,
+ RelocModel::RopiRwpi => llvm::RelocModel::ROPI_RWPI,
+ }
+}
+
+fn to_llvm_code_model(code_model: Option<CodeModel>) -> llvm::CodeModel {
+ match code_model {
+ Some(CodeModel::Tiny) => llvm::CodeModel::Tiny,
+ Some(CodeModel::Small) => llvm::CodeModel::Small,
+ Some(CodeModel::Kernel) => llvm::CodeModel::Kernel,
+ Some(CodeModel::Medium) => llvm::CodeModel::Medium,
+ Some(CodeModel::Large) => llvm::CodeModel::Large,
+ None => llvm::CodeModel::None,
+ }
+}
+
+pub fn target_machine_factory(
+ sess: &Session,
+ optlvl: config::OptLevel,
+) -> Arc<dyn Fn() -> Result<&'static mut llvm::TargetMachine, String> + Send + Sync> {
+ let reloc_model = to_llvm_relocation_model(sess.relocation_model());
+
+ let (opt_level, _) = to_llvm_opt_settings(optlvl);
+ let use_softfp = sess.opts.cg.soft_float;
+
+ let ffunction_sections = sess.target.target.options.function_sections;
+ let fdata_sections = ffunction_sections;
+
+ let code_model = to_llvm_code_model(sess.code_model());
+
+ let features = attributes::llvm_target_features(sess).collect::<Vec<_>>();
+ let mut singlethread = sess.target.target.options.singlethread;
+
+ // On the wasm target once the `atomics` feature is enabled that means that
+ // we're no longer single-threaded, or otherwise we don't want LLVM to
+ // lower atomic operations to single-threaded operations.
+ if singlethread
+ && sess.target.target.llvm_target.contains("wasm32")
+ && sess.target_features.contains(&sym::atomics)
+ {
+ singlethread = false;
+ }
+
+ let triple = SmallCStr::new(&sess.target.target.llvm_target);
+ let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
+ let features = features.join(",");
+ let features = CString::new(features).unwrap();
+ let abi = SmallCStr::new(&sess.target.target.options.llvm_abiname);
+ let trap_unreachable = sess.target.target.options.trap_unreachable;
+ let emit_stack_size_section = sess.opts.debugging_opts.emit_stack_sizes;
+
+ let asm_comments = sess.asm_comments();
+ let relax_elf_relocations = sess.target.target.options.relax_elf_relocations;
+
+ let use_init_array = !sess
+ .opts
+ .debugging_opts
+ .use_ctors_section
+ .unwrap_or(sess.target.target.options.use_ctors_section);
+
+ Arc::new(move || {
+ let tm = unsafe {
+ llvm::LLVMRustCreateTargetMachine(
+ triple.as_ptr(),
+ cpu.as_ptr(),
+ features.as_ptr(),
+ abi.as_ptr(),
+ code_model,
+ reloc_model,
+ opt_level,
+ use_softfp,
+ ffunction_sections,
+ fdata_sections,
+ trap_unreachable,
+ singlethread,
+ asm_comments,
+ emit_stack_size_section,
+ relax_elf_relocations,
+ use_init_array,
+ )
+ };
+
+ tm.ok_or_else(|| {
+ format!("Could not create LLVM TargetMachine for triple: {}", triple.to_str().unwrap())
+ })
+ })
+}
+
+pub(crate) fn save_temp_bitcode(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ module: &ModuleCodegen<ModuleLlvm>,
+ name: &str,
+) {
+ if !cgcx.save_temps {
+ return;
+ }
+ unsafe {
+ let ext = format!("{}.bc", name);
+ let cgu = Some(&module.name[..]);
+ let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
+ let cstr = path_to_c_string(&path);
+ let llmod = module.module_llvm.llmod();
+ llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
+ }
+}
+
+pub struct DiagnosticHandlers<'a> {
+ data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
+ llcx: &'a llvm::Context,
+}
+
+impl<'a> DiagnosticHandlers<'a> {
+ pub fn new(
+ cgcx: &'a CodegenContext<LlvmCodegenBackend>,
+ handler: &'a Handler,
+ llcx: &'a llvm::Context,
+ ) -> Self {
+ let data = Box::into_raw(Box::new((cgcx, handler)));
+ unsafe {
+ llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
+ llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data.cast());
+ }
+ DiagnosticHandlers { data, llcx }
+ }
+}
+
+impl<'a> Drop for DiagnosticHandlers<'a> {
+ fn drop(&mut self) {
+ use std::ptr::null_mut;
+ unsafe {
+ llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
+ llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut());
+ drop(Box::from_raw(self.data));
+ }
+ }
+}
+
+fn report_inline_asm(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ msg: String,
+ level: llvm::DiagnosticLevel,
+ mut cookie: c_uint,
+ source: Option<(String, Vec<InnerSpan>)>,
+) {
+ // In LTO build we may get srcloc values from other crates which are invalid
+ // since they use a different source map. To be safe we just suppress these
+ // in LTO builds.
+ if matches!(cgcx.lto, Lto::Fat | Lto::Thin) {
+ cookie = 0;
+ }
+ let level = match level {
+ llvm::DiagnosticLevel::Error => Level::Error,
+ llvm::DiagnosticLevel::Warning => Level::Warning,
+ llvm::DiagnosticLevel::Note | llvm::DiagnosticLevel::Remark => Level::Note,
+ };
+ cgcx.diag_emitter.inline_asm_error(cookie as u32, msg, level, source);
+}
+
+unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, user: *const c_void, cookie: c_uint) {
+ if user.is_null() {
+ return;
+ }
+ let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
+
+ // Recover the post-substitution assembly code from LLVM for better
+ // diagnostics.
+ let mut have_source = false;
+ let mut buffer = String::new();
+ let mut level = llvm::DiagnosticLevel::Error;
+ let mut loc = 0;
+ let mut ranges = [0; 8];
+ let mut num_ranges = ranges.len() / 2;
+ let msg = llvm::build_string(|msg| {
+ buffer = llvm::build_string(|buffer| {
+ have_source = llvm::LLVMRustUnpackSMDiagnostic(
+ diag,
+ msg,
+ buffer,
+ &mut level,
+ &mut loc,
+ ranges.as_mut_ptr(),
+ &mut num_ranges,
+ );
+ })
+ .expect("non-UTF8 inline asm");
+ })
+ .expect("non-UTF8 SMDiagnostic");
+
+ let source = have_source.then(|| {
+ let mut spans = vec![InnerSpan::new(loc as usize, loc as usize)];
+ for i in 0..num_ranges {
+ spans.push(InnerSpan::new(ranges[i * 2] as usize, ranges[i * 2 + 1] as usize));
+ }
+ (buffer, spans)
+ });
+
+ report_inline_asm(cgcx, msg, level, cookie, source);
+}
+
+unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
+ if user.is_null() {
+ return;
+ }
+ let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
+
+ match llvm::diagnostic::Diagnostic::unpack(info) {
+ llvm::diagnostic::InlineAsm(inline) => {
+ report_inline_asm(
+ cgcx,
+ llvm::twine_to_string(inline.message),
+ inline.level,
+ inline.cookie,
+ None,
+ );
+ }
+
+ llvm::diagnostic::Optimization(opt) => {
+ let enabled = match cgcx.remark {
+ Passes::All => true,
+ Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
+ };
+
+ if enabled {
+ diag_handler.note_without_error(&format!(
+ "optimization {} for {} at {}:{}:{}: {}",
+ opt.kind.describe(),
+ opt.pass_name,
+ opt.filename,
+ opt.line,
+ opt.column,
+ opt.message
+ ));
+ }
+ }
+ llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
+ let msg = llvm::build_string(|s| {
+ llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
+ })
+ .expect("non-UTF8 diagnostic");
+ diag_handler.warn(&msg);
+ }
+ llvm::diagnostic::Unsupported(diagnostic_ref) => {
+ let msg = llvm::build_string(|s| {
+ llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
+ })
+ .expect("non-UTF8 diagnostic");
+ diag_handler.err(&msg);
+ }
+ llvm::diagnostic::UnknownDiagnostic(..) => {}
+ }
+}
+
+fn get_pgo_gen_path(config: &ModuleConfig) -> Option<CString> {
+ match config.pgo_gen {
+ SwitchWithOptPath::Enabled(ref opt_dir_path) => {
+ let path = if let Some(dir_path) = opt_dir_path {
+ dir_path.join("default_%m.profraw")
+ } else {
+ PathBuf::from("default_%m.profraw")
+ };
+
+ Some(CString::new(format!("{}", path.display())).unwrap())
+ }
+ SwitchWithOptPath::Disabled => None,
+ }
+}
+
+fn get_pgo_use_path(config: &ModuleConfig) -> Option<CString> {
+ config
+ .pgo_use
+ .as_ref()
+ .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
+}
+
+pub(crate) fn should_use_new_llvm_pass_manager(config: &ModuleConfig) -> bool {
+ // We only support the new pass manager starting with LLVM 9.
+ if llvm_util::get_major_version() < 9 {
+ return false;
+ }
+
+ // The new pass manager is disabled by default.
+ config.new_llvm_pass_manager
+}
+
+pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ module: &ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+ opt_level: config::OptLevel,
+ opt_stage: llvm::OptStage,
+) {
+ let unroll_loops =
+ opt_level != config::OptLevel::Size && opt_level != config::OptLevel::SizeMin;
+ let using_thin_buffers = opt_stage == llvm::OptStage::PreLinkThinLTO || config.bitcode_needed();
+ let pgo_gen_path = get_pgo_gen_path(config);
+ let pgo_use_path = get_pgo_use_path(config);
+ let is_lto = opt_stage == llvm::OptStage::ThinLTO || opt_stage == llvm::OptStage::FatLTO;
+ // Sanitizer instrumentation is only inserted during the pre-link optimization stage.
+ let sanitizer_options = if !is_lto {
+ Some(llvm::SanitizerOptions {
+ sanitize_address: config.sanitizer.contains(SanitizerSet::ADDRESS),
+ sanitize_address_recover: config.sanitizer_recover.contains(SanitizerSet::ADDRESS),
+ sanitize_memory: config.sanitizer.contains(SanitizerSet::MEMORY),
+ sanitize_memory_recover: config.sanitizer_recover.contains(SanitizerSet::MEMORY),
+ sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int,
+ sanitize_thread: config.sanitizer.contains(SanitizerSet::THREAD),
+ })
+ } else {
+ None
+ };
+
+ let llvm_selfprofiler = if cgcx.prof.llvm_recording_enabled() {
+ let mut llvm_profiler = LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap());
+ &mut llvm_profiler as *mut _ as *mut c_void
+ } else {
+ std::ptr::null_mut()
+ };
+
+ // FIXME: NewPM doesn't provide a facility to pass custom InlineParams.
+ // We would have to add upstream support for this first, before we can support
+ // config.inline_threshold and our more aggressive default thresholds.
+ // FIXME: NewPM uses an different and more explicit way to textually represent
+ // pass pipelines. It would probably make sense to expose this, but it would
+ // require a different format than the current -C passes.
+ llvm::LLVMRustOptimizeWithNewPassManager(
+ module.module_llvm.llmod(),
+ &*module.module_llvm.tm,
+ to_pass_builder_opt_level(opt_level),
+ opt_stage,
+ config.no_prepopulate_passes,
+ config.verify_llvm_ir,
+ using_thin_buffers,
+ config.merge_functions,
+ unroll_loops,
+ config.vectorize_slp,
+ config.vectorize_loop,
+ config.no_builtins,
+ config.emit_lifetime_markers,
+ sanitizer_options.as_ref(),
+ pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+ pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+ llvm_selfprofiler,
+ selfprofile_before_pass_callback,
+ selfprofile_after_pass_callback,
+ );
+}
+
+// Unsafe due to LLVM calls.
+pub(crate) unsafe fn optimize(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+) -> Result<(), FatalError> {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &module.name[..]);
+
+ let llmod = module.module_llvm.llmod();
+ let llcx = &*module.module_llvm.llcx;
+ let tm = &*module.module_llvm.tm;
+ let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+ let module_name = module.name.clone();
+ let module_name = Some(&module_name[..]);
+
+ if config.emit_no_opt_bc {
+ let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
+ let out = path_to_c_string(&out);
+ llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
+ }
+
+ if let Some(opt_level) = config.opt_level {
+ if should_use_new_llvm_pass_manager(config) {
+ let opt_stage = match cgcx.lto {
+ Lto::Fat => llvm::OptStage::PreLinkFatLTO,
+ Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
+ _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
+ _ => llvm::OptStage::PreLinkNoLTO,
+ };
+ optimize_with_new_llvm_pass_manager(cgcx, module, config, opt_level, opt_stage);
+ return Ok(());
+ }
+
+ if cgcx.prof.llvm_recording_enabled() {
+ diag_handler
+ .warn("`-Z self-profile-events = llvm` requires `-Z new-llvm-pass-manager`");
+ }
+
+ // Create the two optimizing pass managers. These mirror what clang
+ // does, and are by populated by LLVM's default PassManagerBuilder.
+ // Each manager has a different set of passes, but they also share
+ // some common passes.
+ let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
+ let mpm = llvm::LLVMCreatePassManager();
+
+ {
+ let find_pass = |pass_name: &str| {
+ let pass_name = SmallCStr::new(pass_name);
+ llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr())
+ };
+
+ if config.verify_llvm_ir {
+ // Verification should run as the very first pass.
+ llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap());
+ }
+
+ let mut extra_passes = Vec::new();
+ let mut have_name_anon_globals_pass = false;
+
+ for pass_name in &config.passes {
+ if pass_name == "lint" {
+ // Linting should also be performed early, directly on the generated IR.
+ llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
+ continue;
+ }
+
+ if let Some(pass) = find_pass(pass_name) {
+ extra_passes.push(pass);
+ } else {
+ diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name));
+ }
+
+ if pass_name == "name-anon-globals" {
+ have_name_anon_globals_pass = true;
+ }
+ }
+
+ add_sanitizer_passes(config, &mut extra_passes);
+
+ // Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
+ // to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
+ // we'll get errors in LLVM.
+ let using_thin_buffers = config.bitcode_needed();
+ if !config.no_prepopulate_passes {
+ llvm::LLVMAddAnalysisPasses(tm, fpm);
+ llvm::LLVMAddAnalysisPasses(tm, mpm);
+ let opt_level = to_llvm_opt_settings(opt_level).0;
+ let prepare_for_thin_lto = cgcx.lto == Lto::Thin
+ || cgcx.lto == Lto::ThinLocal
+ || (cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
+ with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| {
+ llvm::LLVMRustAddLastExtensionPasses(
+ b,
+ extra_passes.as_ptr(),
+ extra_passes.len() as size_t,
+ );
+ llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
+ llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
+ });
+
+ have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
+ if using_thin_buffers && !prepare_for_thin_lto {
+ llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
+ have_name_anon_globals_pass = true;
+ }
+ } else {
+ // If we don't use the standard pipeline, directly populate the MPM
+ // with the extra passes.
+ for pass in extra_passes {
+ llvm::LLVMRustAddPass(mpm, pass);
+ }
+ }
+
+ if using_thin_buffers && !have_name_anon_globals_pass {
+ // As described above, this will probably cause an error in LLVM
+ if config.no_prepopulate_passes {
+ diag_handler.err(
+ "The current compilation is going to use thin LTO buffers \
+ without running LLVM's NameAnonGlobals pass. \
+ This will likely cause errors in LLVM. Consider adding \
+ -C passes=name-anon-globals to the compiler command line.",
+ );
+ } else {
+ bug!(
+ "We are using thin LTO buffers without running the NameAnonGlobals pass. \
+ This will likely cause errors in LLVM and should never happen."
+ );
+ }
+ }
+ }
+
+ diag_handler.abort_if_errors();
+
+ // Finally, run the actual optimization passes
+ {
+ let _timer = cgcx.prof.extra_verbose_generic_activity(
+ "LLVM_module_optimize_function_passes",
+ &module.name[..],
+ );
+ llvm::LLVMRustRunFunctionPassManager(fpm, llmod);
+ }
+ {
+ let _timer = cgcx.prof.extra_verbose_generic_activity(
+ "LLVM_module_optimize_module_passes",
+ &module.name[..],
+ );
+ llvm::LLVMRunPassManager(mpm, llmod);
+ }
+
+ // Deallocate managers that we're now done with
+ llvm::LLVMDisposePassManager(fpm);
+ llvm::LLVMDisposePassManager(mpm);
+ }
+ Ok(())
+}
+
+unsafe fn add_sanitizer_passes(config: &ModuleConfig, passes: &mut Vec<&'static mut llvm::Pass>) {
+ if config.sanitizer.contains(SanitizerSet::ADDRESS) {
+ let recover = config.sanitizer_recover.contains(SanitizerSet::ADDRESS);
+ passes.push(llvm::LLVMRustCreateAddressSanitizerFunctionPass(recover));
+ passes.push(llvm::LLVMRustCreateModuleAddressSanitizerPass(recover));
+ }
+ if config.sanitizer.contains(SanitizerSet::MEMORY) {
+ let track_origins = config.sanitizer_memory_track_origins as c_int;
+ let recover = config.sanitizer_recover.contains(SanitizerSet::MEMORY);
+ passes.push(llvm::LLVMRustCreateMemorySanitizerPass(track_origins, recover));
+ }
+ if config.sanitizer.contains(SanitizerSet::THREAD) {
+ passes.push(llvm::LLVMRustCreateThreadSanitizerPass());
+ }
+}
+
+pub(crate) fn link(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ mut modules: Vec<ModuleCodegen<ModuleLlvm>>,
+) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
+ use super::lto::{Linker, ModuleBuffer};
+ // Sort the modules by name to ensure to ensure deterministic behavior.
+ modules.sort_by(|a, b| a.name.cmp(&b.name));
+ let (first, elements) =
+ modules.split_first().expect("Bug! modules must contain at least one module.");
+
+ let mut linker = Linker::new(first.module_llvm.llmod());
+ for module in elements {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_link_module", format!("{:?}", module.name));
+ let buffer = ModuleBuffer::new(module.module_llvm.llmod());
+ linker.add(&buffer.data()).map_err(|()| {
+ let msg = format!("failed to serialize module {:?}", module.name);
+ llvm_err(&diag_handler, &msg)
+ })?;
+ }
+ drop(linker);
+ Ok(modules.remove(0))
+}
+
+pub(crate) unsafe fn codegen(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+) -> Result<CompiledModule, FatalError> {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &module.name[..]);
+ {
+ let llmod = module.module_llvm.llmod();
+ let llcx = &*module.module_llvm.llcx;
+ let tm = &*module.module_llvm.tm;
+ let module_name = module.name.clone();
+ let module_name = Some(&module_name[..]);
+ let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+ if cgcx.msvc_imps_needed {
+ create_msvc_imps(cgcx, llcx, llmod);
+ }
+
+ // A codegen-specific pass manager is used to generate object
+ // files for an LLVM module.
+ //
+ // Apparently each of these pass managers is a one-shot kind of
+ // thing, so we create a new one for each type of output. The
+ // pass manager passed to the closure should be ensured to not
+ // escape the closure itself, and the manager should only be
+ // used once.
+ unsafe fn with_codegen<'ll, F, R>(
+ tm: &'ll llvm::TargetMachine,
+ llmod: &'ll llvm::Module,
+ no_builtins: bool,
+ f: F,
+ ) -> R
+ where
+ F: FnOnce(&'ll mut PassManager<'ll>) -> R,
+ {
+ let cpm = llvm::LLVMCreatePassManager();
+ llvm::LLVMAddAnalysisPasses(tm, cpm);
+ llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
+ f(cpm)
+ }
+
+ // Two things to note:
+ // - If object files are just LLVM bitcode we write bitcode, copy it to
+ // the .o file, and delete the bitcode if it wasn't otherwise
+ // requested.
+ // - If we don't have the integrated assembler then we need to emit
+ // asm from LLVM and use `gcc` to create the object file.
+
+ let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
+ let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
+
+ if config.bitcode_needed() {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &module.name[..]);
+ let thin = ThinBuffer::new(llmod);
+ let data = thin.data();
+
+ if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
+ let _timer = cgcx.prof.generic_activity_with_arg(
+ "LLVM_module_codegen_emit_bitcode",
+ &module.name[..],
+ );
+ if let Err(e) = fs::write(&bc_out, data) {
+ let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
+ diag_handler.err(&msg);
+ }
+ }
+
+ if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
+ let _timer = cgcx.prof.generic_activity_with_arg(
+ "LLVM_module_codegen_embed_bitcode",
+ &module.name[..],
+ );
+ embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data);
+ }
+ }
+
+ if config.emit_ir {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_ir", &module.name[..]);
+ let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
+ let out_c = path_to_c_string(&out);
+
+ extern "C" fn demangle_callback(
+ input_ptr: *const c_char,
+ input_len: size_t,
+ output_ptr: *mut c_char,
+ output_len: size_t,
+ ) -> size_t {
+ let input =
+ unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) };
+
+ let input = match str::from_utf8(input) {
+ Ok(s) => s,
+ Err(_) => return 0,
+ };
+
+ let output = unsafe {
+ slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
+ };
+ let mut cursor = io::Cursor::new(output);
+
+ let demangled = match rustc_demangle::try_demangle(input) {
+ Ok(d) => d,
+ Err(_) => return 0,
+ };
+
+ if write!(cursor, "{:#}", demangled).is_err() {
+ // Possible only if provided buffer is not big enough
+ return 0;
+ }
+
+ cursor.position() as size_t
+ }
+
+ let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
+ result.into_result().map_err(|()| {
+ let msg = format!("failed to write LLVM IR to {}", out.display());
+ llvm_err(diag_handler, &msg)
+ })?;
+ }
+
+ if config.emit_asm {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &module.name[..]);
+ let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
+
+ // We can't use the same module for asm and object code output,
+ // because that triggers various errors like invalid IR or broken
+ // binaries. So we must clone the module to produce the asm output
+ // if we are also producing object code.
+ let llmod = if let EmitObj::ObjectCode(_) = config.emit_obj {
+ llvm::LLVMCloneModule(llmod)
+ } else {
+ llmod
+ };
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(diag_handler, tm, cpm, llmod, &path, llvm::FileType::AssemblyFile)
+ })?;
+ }
+
+ match config.emit_obj {
+ EmitObj::ObjectCode(_) => {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]);
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(
+ diag_handler,
+ tm,
+ cpm,
+ llmod,
+ &obj_out,
+ llvm::FileType::ObjectFile,
+ )
+ })?;
+ }
+
+ EmitObj::Bitcode => {
+ debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
+ if let Err(e) = link_or_copy(&bc_out, &obj_out) {
+ diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
+ }
+
+ if !config.emit_bc {
+ debug!("removing_bitcode {:?}", bc_out);
+ if let Err(e) = fs::remove_file(&bc_out) {
+ diag_handler.err(&format!("failed to remove bitcode: {}", e));
+ }
+ }
+ }
+
+ EmitObj::None => {}
+ }
+
+ drop(handlers);
+ }
+
+ Ok(module.into_compiled_module(
+ config.emit_obj != EmitObj::None,
+ config.emit_bc,
+ &cgcx.output_filenames,
+ ))
+}
+
+/// Embed the bitcode of an LLVM module in the LLVM module itself.
+///
+/// This is done primarily for iOS where it appears to be standard to compile C
+/// code at least with `-fembed-bitcode` which creates two sections in the
+/// executable:
+///
+/// * __LLVM,__bitcode
+/// * __LLVM,__cmdline
+///
+/// It appears *both* of these sections are necessary to get the linker to
+/// recognize what's going on. A suitable cmdline value is taken from the
+/// target spec.
+///
+/// Furthermore debug/O1 builds don't actually embed bitcode but rather just
+/// embed an empty section.
+///
+/// Basically all of this is us attempting to follow in the footsteps of clang
+/// on iOS. See #35968 for lots more info.
+unsafe fn embed_bitcode(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ llcx: &llvm::Context,
+ llmod: &llvm::Module,
+ cmdline: &str,
+ bitcode: &[u8],
+) {
+ let llconst = common::bytes_in_context(llcx, bitcode);
+ let llglobal = llvm::LLVMAddGlobal(
+ llmod,
+ common::val_ty(llconst),
+ "rustc.embedded.module\0".as_ptr().cast(),
+ );
+ llvm::LLVMSetInitializer(llglobal, llconst);
+
+ let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
+ || cgcx.opts.target_triple.triple().contains("-darwin")
+ || cgcx.opts.target_triple.triple().contains("-tvos");
+
+ let section = if is_apple { "__LLVM,__bitcode\0" } else { ".llvmbc\0" };
+ llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
+ llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+ llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
+
+ let llconst = common::bytes_in_context(llcx, cmdline.as_bytes());
+ let llglobal = llvm::LLVMAddGlobal(
+ llmod,
+ common::val_ty(llconst),
+ "rustc.embedded.cmdline\0".as_ptr().cast(),
+ );
+ llvm::LLVMSetInitializer(llglobal, llconst);
+ let section = if is_apple { "__LLVM,__cmdline\0" } else { ".llvmcmd\0" };
+ llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
+ llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+
+ // We're adding custom sections to the output object file, but we definitely
+ // do not want these custom sections to make their way into the final linked
+ // executable. The purpose of these custom sections is for tooling
+ // surrounding object files to work with the LLVM IR, if necessary. For
+ // example rustc's own LTO will look for LLVM IR inside of the object file
+ // in these sections by default.
+ //
+ // To handle this is a bit different depending on the object file format
+ // used by the backend, broken down into a few different categories:
+ //
+ // * Mach-O - this is for macOS. Inspecting the source code for the native
+ // linker here shows that the `.llvmbc` and `.llvmcmd` sections are
+ // automatically skipped by the linker. In that case there's nothing extra
+ // that we need to do here.
+ //
+ // * Wasm - the native LLD linker is hard-coded to skip `.llvmbc` and
+ // `.llvmcmd` sections, so there's nothing extra we need to do.
+ //
+ // * COFF - if we don't do anything the linker will by default copy all
+ // these sections to the output artifact, not what we want! To subvert
+ // this we want to flag the sections we inserted here as
+ // `IMAGE_SCN_LNK_REMOVE`. Unfortunately though LLVM has no native way to
+ // do this. Thankfully though we can do this with some inline assembly,
+ // which is easy enough to add via module-level global inline asm.
+ //
+ // * ELF - this is very similar to COFF above. One difference is that these
+ // sections are removed from the output linked artifact when
+ // `--gc-sections` is passed, which we pass by default. If that flag isn't
+ // passed though then these sections will show up in the final output.
+ // Additionally the flag that we need to set here is `SHF_EXCLUDE`.
+ if is_apple
+ || cgcx.opts.target_triple.triple().starts_with("wasm")
+ || cgcx.opts.target_triple.triple().starts_with("asmjs")
+ {
+ // nothing to do here
+ } else if cgcx.opts.target_triple.triple().contains("windows")
+ || cgcx.opts.target_triple.triple().contains("uefi")
+ {
+ let asm = "
+ .section .llvmbc,\"n\"
+ .section .llvmcmd,\"n\"
+ ";
+ llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+ } else {
+ let asm = "
+ .section .llvmbc,\"e\"
+ .section .llvmcmd,\"e\"
+ ";
+ llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+ }
+}
+
+pub unsafe fn with_llvm_pmb(
+ llmod: &llvm::Module,
+ config: &ModuleConfig,
+ opt_level: llvm::CodeGenOptLevel,
+ prepare_for_thin_lto: bool,
+ f: &mut dyn FnMut(&llvm::PassManagerBuilder),
+) {
+ use std::ptr;
+
+ // Create the PassManagerBuilder for LLVM. We configure it with
+ // reasonable defaults and prepare it to actually populate the pass
+ // manager.
+ let builder = llvm::LLVMPassManagerBuilderCreate();
+ let opt_size =
+ config.opt_size.map(|x| to_llvm_opt_settings(x).1).unwrap_or(llvm::CodeGenOptSizeNone);
+ let inline_threshold = config.inline_threshold;
+ let pgo_gen_path = get_pgo_gen_path(config);
+ let pgo_use_path = get_pgo_use_path(config);
+
+ llvm::LLVMRustConfigurePassManagerBuilder(
+ builder,
+ opt_level,
+ config.merge_functions,
+ config.vectorize_slp,
+ config.vectorize_loop,
+ prepare_for_thin_lto,
+ pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+ pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+ );
+
+ llvm::LLVMPassManagerBuilderSetSizeLevel(builder, opt_size as u32);
+
+ if opt_size != llvm::CodeGenOptSizeNone {
+ llvm::LLVMPassManagerBuilderSetDisableUnrollLoops(builder, 1);
+ }
+
+ llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins);
+
+ // Here we match what clang does (kinda). For O0 we only inline
+ // always-inline functions (but don't add lifetime intrinsics), at O1 we
+ // inline with lifetime intrinsics, and O2+ we add an inliner with a
+ // thresholds copied from clang.
+ match (opt_level, opt_size, inline_threshold) {
+ (.., Some(t)) => {
+ llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t as u32);
+ }
+ (llvm::CodeGenOptLevel::Aggressive, ..) => {
+ llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275);
+ }
+ (_, llvm::CodeGenOptSizeDefault, _) => {
+ llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 75);
+ }
+ (_, llvm::CodeGenOptSizeAggressive, _) => {
+ llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25);
+ }
+ (llvm::CodeGenOptLevel::None, ..) => {
+ llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
+ }
+ (llvm::CodeGenOptLevel::Less, ..) => {
+ llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
+ }
+ (llvm::CodeGenOptLevel::Default, ..) => {
+ llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225);
+ }
+ }
+
+ f(builder);
+ llvm::LLVMPassManagerBuilderDispose(builder);
+}
+
+// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
+// This is required to satisfy `dllimport` references to static data in .rlibs
+// when using MSVC linker. We do this only for data, as linker can fix up
+// code references on its own.
+// See #26591, #27438
+fn create_msvc_imps(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ llcx: &llvm::Context,
+ llmod: &llvm::Module,
+) {
+ if !cgcx.msvc_imps_needed {
+ return;
+ }
+ // The x86 ABI seems to require that leading underscores are added to symbol
+ // names, so we need an extra underscore on x86. There's also a leading
+ // '\x01' here which disables LLVM's symbol mangling (e.g., no extra
+ // underscores added in front).
+ let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
+
+ unsafe {
+ let i8p_ty = Type::i8p_llcx(llcx);
+ let globals = base::iter_globals(llmod)
+ .filter(|&val| {
+ llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
+ && llvm::LLVMIsDeclaration(val) == 0
+ })
+ .filter_map(|val| {
+ // Exclude some symbols that we know are not Rust symbols.
+ let name = llvm::get_value_name(val);
+ if ignored(name) { None } else { Some((val, name)) }
+ })
+ .map(move |(val, name)| {
+ let mut imp_name = prefix.as_bytes().to_vec();
+ imp_name.extend(name);
+ let imp_name = CString::new(imp_name).unwrap();
+ (imp_name, val)
+ })
+ .collect::<Vec<_>>();
+
+ for (imp_name, val) in globals {
+ let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast());
+ llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
+ llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
+ }
+ }
+
+ // Use this function to exclude certain symbols from `__imp` generation.
+ fn ignored(symbol_name: &[u8]) -> bool {
+ // These are symbols generated by LLVM's profiling instrumentation
+ symbol_name.starts_with(b"__llvm_profile_")
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
new file mode 100644
index 0000000..f35708b
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -0,0 +1,205 @@
+//! Codegen the completed AST to the LLVM IR.
+//!
+//! Some functions here, such as codegen_block and codegen_expr, return a value --
+//! the result of the codegen to LLVM -- while others, such as codegen_fn
+//! and mono_item, are called only for the side effect of adding a
+//! particular definition to the LLVM IR output we're producing.
+//!
+//! Hopefully useful general knowledge about codegen:
+//!
+//! * There's no way to find out the `Ty` type of a Value. Doing so
+//! would be "trying to get the eggs out of an omelette" (credit:
+//! pcwalton). You can, instead, find out its `llvm::Type` by calling `val_ty`,
+//! but one `llvm::Type` corresponds to many `Ty`s; for instance, `tup(int, int,
+//! int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`.
+
+use super::ModuleLlvm;
+
+use crate::attributes;
+use crate::builder::Builder;
+use crate::common;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::metadata;
+use crate::value::Value;
+
+use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
+use rustc_codegen_ssa::mono_item::MonoItemExt;
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_middle::dep_graph;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::middle::exported_symbols;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{DebugInfo, SanitizerSet};
+use rustc_span::symbol::Symbol;
+
+use std::ffi::CString;
+use std::time::Instant;
+
+pub fn write_compressed_metadata<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ metadata: &EncodedMetadata,
+ llvm_module: &mut ModuleLlvm,
+) {
+ use snap::write::FrameEncoder;
+ use std::io::Write;
+
+ let (metadata_llcx, metadata_llmod) = (&*llvm_module.llcx, llvm_module.llmod());
+ let mut compressed = tcx.metadata_encoding_version();
+ FrameEncoder::new(&mut compressed).write_all(&metadata.raw_data).unwrap();
+
+ let llmeta = common::bytes_in_context(metadata_llcx, &compressed);
+ let llconst = common::struct_in_context(metadata_llcx, &[llmeta], false);
+ let name = exported_symbols::metadata_symbol_name(tcx);
+ let buf = CString::new(name).unwrap();
+ let llglobal =
+ unsafe { llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr()) };
+ unsafe {
+ llvm::LLVMSetInitializer(llglobal, llconst);
+ let section_name = metadata::metadata_section_name(&tcx.sess.target.target);
+ let name = SmallCStr::new(section_name);
+ llvm::LLVMSetSection(llglobal, name.as_ptr());
+
+ // Also generate a .section directive to force no
+ // flags, at least for ELF outputs, so that the
+ // metadata doesn't get loaded into memory.
+ let directive = format!(".section {}", section_name);
+ llvm::LLVMSetModuleInlineAsm2(metadata_llmod, directive.as_ptr().cast(), directive.len())
+ }
+}
+
+pub struct ValueIter<'ll> {
+ cur: Option<&'ll Value>,
+ step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>,
+}
+
+impl Iterator for ValueIter<'ll> {
+ type Item = &'ll Value;
+
+ fn next(&mut self) -> Option<&'ll Value> {
+ let old = self.cur;
+ if let Some(old) = old {
+ self.cur = unsafe { (self.step)(old) };
+ }
+ old
+ }
+}
+
+pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> {
+ unsafe { ValueIter { cur: llvm::LLVMGetFirstGlobal(llmod), step: llvm::LLVMGetNextGlobal } }
+}
+
+pub fn compile_codegen_unit(
+ tcx: TyCtxt<'tcx>,
+ cgu_name: Symbol,
+) -> (ModuleCodegen<ModuleLlvm>, u64) {
+ let prof_timer = tcx.prof.generic_activity_with_arg("codegen_module", cgu_name.to_string());
+ let start_time = Instant::now();
+
+ let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
+ let (module, _) =
+ tcx.dep_graph.with_task(dep_node, tcx, cgu_name, module_codegen, dep_graph::hash_result);
+ let time_to_codegen = start_time.elapsed();
+ drop(prof_timer);
+
+ // We assume that the cost to run LLVM on a CGU is proportional to
+ // the time we needed for codegenning it.
+ let cost = time_to_codegen.as_nanos() as u64;
+
+ fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<ModuleLlvm> {
+ let cgu = tcx.codegen_unit(cgu_name);
+ // Instantiate monomorphizations without filling out definitions yet...
+ let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
+ {
+ let cx = CodegenCx::new(tcx, cgu, &llvm_module);
+ let mono_items = cx.codegen_unit.items_in_deterministic_order(cx.tcx);
+ for &(mono_item, (linkage, visibility)) in &mono_items {
+ mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
+ }
+
+ // ... and now that we have everything pre-defined, fill out those definitions.
+ for &(mono_item, _) in &mono_items {
+ mono_item.define::<Builder<'_, '_, '_>>(&cx);
+ }
+
+ // If this codegen unit contains the main function, also create the
+ // wrapper here
+ if let Some(entry) = maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx) {
+ attributes::sanitize(&cx, SanitizerSet::empty(), entry);
+ }
+
+ // Run replace-all-uses-with for statics that need it
+ for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
+ unsafe {
+ let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
+ llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
+ llvm::LLVMDeleteGlobal(old_g);
+ }
+ }
+
+ // Finalize code coverage by injecting the coverage map. Note, the coverage map will
+ // also be added to the `llvm.used` variable, created next.
+ if cx.sess().opts.debugging_opts.instrument_coverage {
+ cx.coverageinfo_finalize();
+ }
+
+ // Create the llvm.used variable
+ // This variable has type [N x i8*] and is stored in the llvm.metadata section
+ if !cx.used_statics().borrow().is_empty() {
+ cx.create_used_variable()
+ }
+
+ // Finalize debuginfo
+ if cx.sess().opts.debuginfo != DebugInfo::None {
+ cx.debuginfo_finalize();
+ }
+ }
+
+ ModuleCodegen {
+ name: cgu_name.to_string(),
+ module_llvm: llvm_module,
+ kind: ModuleKind::Regular,
+ }
+ }
+
+ (module, cost)
+}
+
+pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
+ let sect = match attrs.link_section {
+ Some(name) => name,
+ None => return,
+ };
+ unsafe {
+ let buf = SmallCStr::new(§.as_str());
+ llvm::LLVMSetSection(llval, buf.as_ptr());
+ }
+}
+
+pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
+ match linkage {
+ Linkage::External => llvm::Linkage::ExternalLinkage,
+ Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage,
+ Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage,
+ Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage,
+ Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage,
+ Linkage::WeakODR => llvm::Linkage::WeakODRLinkage,
+ Linkage::Appending => llvm::Linkage::AppendingLinkage,
+ Linkage::Internal => llvm::Linkage::InternalLinkage,
+ Linkage::Private => llvm::Linkage::PrivateLinkage,
+ Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage,
+ Linkage::Common => llvm::Linkage::CommonLinkage,
+ }
+}
+
+pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility {
+ match linkage {
+ Visibility::Default => llvm::Visibility::Default,
+ Visibility::Hidden => llvm::Visibility::Hidden,
+ Visibility::Protected => llvm::Visibility::Protected,
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
new file mode 100644
index 0000000..0c172dc
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -0,0 +1,1431 @@
+use crate::common::Funclet;
+use crate::context::CodegenCx;
+use crate::llvm::{self, BasicBlock, False};
+use crate::llvm::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope};
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use libc::{c_char, c_uint};
+use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, TypeKind};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::MemFlags;
+use rustc_data_structures::const_cstr;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::sym;
+use rustc_target::abi::{self, Align, Size};
+use rustc_target::spec::{HasTargetSpec, Target};
+use std::borrow::Cow;
+use std::ffi::CStr;
+use std::ops::{Deref, Range};
+use std::ptr;
+use tracing::debug;
+
+// All Builders must have an llfn associated with them
+#[must_use]
+pub struct Builder<'a, 'll, 'tcx> {
+ pub llbuilder: &'ll mut llvm::Builder<'ll>,
+ pub cx: &'a CodegenCx<'ll, 'tcx>,
+}
+
+impl Drop for Builder<'a, 'll, 'tcx> {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
+ }
+ }
+}
+
+// FIXME(eddyb) use a checked constructor when they become `const fn`.
+const EMPTY_C_STR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") };
+
+/// Empty string, to be used where LLVM expects an instruction name, indicating
+/// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
+// FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
+const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr();
+
+impl BackendTypes for Builder<'_, 'll, 'tcx> {
+ type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
+ type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
+ type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
+ type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
+ type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
+
+ type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
+ type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable;
+}
+
+impl abi::HasDataLayout for Builder<'_, '_, '_> {
+ fn data_layout(&self) -> &abi::TargetDataLayout {
+ self.cx.data_layout()
+ }
+}
+
+impl ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.cx.tcx
+ }
+}
+
+impl ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.cx.param_env()
+ }
+}
+
+impl HasTargetSpec for Builder<'_, '_, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.cx.target_spec()
+ }
+}
+
+impl abi::LayoutOf for Builder<'_, '_, 'tcx> {
+ type Ty = Ty<'tcx>;
+ type TyAndLayout = TyAndLayout<'tcx>;
+
+ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
+ self.cx.layout_of(ty)
+ }
+}
+
+impl Deref for Builder<'_, 'll, 'tcx> {
+ type Target = CodegenCx<'ll, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ self.cx
+ }
+}
+
+impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
+ type CodegenCx = CodegenCx<'ll, 'tcx>;
+}
+
+macro_rules! builder_methods_for_value_instructions {
+ ($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
+ $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
+ unsafe {
+ llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED)
+ }
+ })+
+ }
+}
+
+impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
+ fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b str) -> Self {
+ let mut bx = Builder::with_cx(cx);
+ let llbb = unsafe {
+ let name = SmallCStr::new(name);
+ llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, name.as_ptr())
+ };
+ bx.position_at_end(llbb);
+ bx
+ }
+
+ fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
+ // Create a fresh builder from the crate context.
+ let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) };
+ Builder { llbuilder, cx }
+ }
+
+ fn build_sibling_block(&self, name: &str) -> Self {
+ Builder::new_block(self.cx, self.llfn(), name)
+ }
+
+ fn llbb(&self) -> &'ll BasicBlock {
+ unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
+ }
+
+ fn position_at_end(&mut self, llbb: &'ll BasicBlock) {
+ unsafe {
+ llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
+ }
+ }
+
+ fn ret_void(&mut self) {
+ unsafe {
+ llvm::LLVMBuildRetVoid(self.llbuilder);
+ }
+ }
+
+ fn ret(&mut self, v: &'ll Value) {
+ unsafe {
+ llvm::LLVMBuildRet(self.llbuilder, v);
+ }
+ }
+
+ fn br(&mut self, dest: &'ll BasicBlock) {
+ unsafe {
+ llvm::LLVMBuildBr(self.llbuilder, dest);
+ }
+ }
+
+ fn cond_br(
+ &mut self,
+ cond: &'ll Value,
+ then_llbb: &'ll BasicBlock,
+ else_llbb: &'ll BasicBlock,
+ ) {
+ unsafe {
+ llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
+ }
+ }
+
+ fn switch(
+ &mut self,
+ v: &'ll Value,
+ else_llbb: &'ll BasicBlock,
+ cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)>,
+ ) {
+ let switch =
+ unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) };
+ for (on_val, dest) in cases {
+ let on_val = self.const_uint_big(self.val_ty(v), on_val);
+ unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
+ }
+ }
+
+ fn invoke(
+ &mut self,
+ llfn: &'ll Value,
+ args: &[&'ll Value],
+ then: &'ll BasicBlock,
+ catch: &'ll BasicBlock,
+ funclet: Option<&Funclet<'ll>>,
+ ) -> &'ll Value {
+ debug!("invoke {:?} with args ({:?})", llfn, args);
+
+ let args = self.check_call("invoke", llfn, args);
+ let bundle = funclet.map(|funclet| funclet.bundle());
+ let bundle = bundle.as_ref().map(|b| &*b.raw);
+
+ unsafe {
+ llvm::LLVMRustBuildInvoke(
+ self.llbuilder,
+ llfn,
+ args.as_ptr(),
+ args.len() as c_uint,
+ then,
+ catch,
+ bundle,
+ UNNAMED,
+ )
+ }
+ }
+
+ fn unreachable(&mut self) {
+ unsafe {
+ llvm::LLVMBuildUnreachable(self.llbuilder);
+ }
+ }
+
+ builder_methods_for_value_instructions! {
+ add(a, b) => LLVMBuildAdd,
+ fadd(a, b) => LLVMBuildFAdd,
+ sub(a, b) => LLVMBuildSub,
+ fsub(a, b) => LLVMBuildFSub,
+ mul(a, b) => LLVMBuildMul,
+ fmul(a, b) => LLVMBuildFMul,
+ udiv(a, b) => LLVMBuildUDiv,
+ exactudiv(a, b) => LLVMBuildExactUDiv,
+ sdiv(a, b) => LLVMBuildSDiv,
+ exactsdiv(a, b) => LLVMBuildExactSDiv,
+ fdiv(a, b) => LLVMBuildFDiv,
+ urem(a, b) => LLVMBuildURem,
+ srem(a, b) => LLVMBuildSRem,
+ frem(a, b) => LLVMBuildFRem,
+ shl(a, b) => LLVMBuildShl,
+ lshr(a, b) => LLVMBuildLShr,
+ ashr(a, b) => LLVMBuildAShr,
+ and(a, b) => LLVMBuildAnd,
+ or(a, b) => LLVMBuildOr,
+ xor(a, b) => LLVMBuildXor,
+ neg(x) => LLVMBuildNeg,
+ fneg(x) => LLVMBuildFNeg,
+ not(x) => LLVMBuildNot,
+ unchecked_sadd(x, y) => LLVMBuildNSWAdd,
+ unchecked_uadd(x, y) => LLVMBuildNUWAdd,
+ unchecked_ssub(x, y) => LLVMBuildNSWSub,
+ unchecked_usub(x, y) => LLVMBuildNUWSub,
+ unchecked_smul(x, y) => LLVMBuildNSWMul,
+ unchecked_umul(x, y) => LLVMBuildNUWMul,
+ }
+
+ fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+ instr
+ }
+ }
+
+ fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+ instr
+ }
+ }
+
+ fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+ instr
+ }
+ }
+
+ fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+ instr
+ }
+ }
+
+ fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+ instr
+ }
+ }
+
+ fn checked_binop(
+ &mut self,
+ oop: OverflowOp,
+ ty: Ty<'_>,
+ lhs: Self::Value,
+ rhs: Self::Value,
+ ) -> (Self::Value, Self::Value) {
+ use rustc_ast::IntTy::*;
+ use rustc_ast::UintTy::*;
+ use rustc_middle::ty::{Int, Uint};
+
+ let new_kind = match ty.kind() {
+ Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.ptr_width)),
+ Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.ptr_width)),
+ t @ (Uint(_) | Int(_)) => t.clone(),
+ _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
+ };
+
+ let name = match oop {
+ OverflowOp::Add => match new_kind {
+ Int(I8) => "llvm.sadd.with.overflow.i8",
+ Int(I16) => "llvm.sadd.with.overflow.i16",
+ Int(I32) => "llvm.sadd.with.overflow.i32",
+ Int(I64) => "llvm.sadd.with.overflow.i64",
+ Int(I128) => "llvm.sadd.with.overflow.i128",
+
+ Uint(U8) => "llvm.uadd.with.overflow.i8",
+ Uint(U16) => "llvm.uadd.with.overflow.i16",
+ Uint(U32) => "llvm.uadd.with.overflow.i32",
+ Uint(U64) => "llvm.uadd.with.overflow.i64",
+ Uint(U128) => "llvm.uadd.with.overflow.i128",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Sub => match new_kind {
+ Int(I8) => "llvm.ssub.with.overflow.i8",
+ Int(I16) => "llvm.ssub.with.overflow.i16",
+ Int(I32) => "llvm.ssub.with.overflow.i32",
+ Int(I64) => "llvm.ssub.with.overflow.i64",
+ Int(I128) => "llvm.ssub.with.overflow.i128",
+
+ Uint(U8) => "llvm.usub.with.overflow.i8",
+ Uint(U16) => "llvm.usub.with.overflow.i16",
+ Uint(U32) => "llvm.usub.with.overflow.i32",
+ Uint(U64) => "llvm.usub.with.overflow.i64",
+ Uint(U128) => "llvm.usub.with.overflow.i128",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Mul => match new_kind {
+ Int(I8) => "llvm.smul.with.overflow.i8",
+ Int(I16) => "llvm.smul.with.overflow.i16",
+ Int(I32) => "llvm.smul.with.overflow.i32",
+ Int(I64) => "llvm.smul.with.overflow.i64",
+ Int(I128) => "llvm.smul.with.overflow.i128",
+
+ Uint(U8) => "llvm.umul.with.overflow.i8",
+ Uint(U16) => "llvm.umul.with.overflow.i16",
+ Uint(U32) => "llvm.umul.with.overflow.i32",
+ Uint(U64) => "llvm.umul.with.overflow.i64",
+ Uint(U128) => "llvm.umul.with.overflow.i128",
+
+ _ => unreachable!(),
+ },
+ };
+
+ let intrinsic = self.get_intrinsic(&name);
+ let res = self.call(intrinsic, &[lhs, rhs], None);
+ (self.extract_value(res, 0), self.extract_value(res, 1))
+ }
+
+ fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
+ if self.cx().val_ty(val) == self.cx().type_i1() {
+ self.zext(val, self.cx().type_i8())
+ } else {
+ val
+ }
+ }
+ fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value {
+ if scalar.is_bool() {
+ return self.trunc(val, self.cx().type_i1());
+ }
+ val
+ }
+
+ fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
+ let mut bx = Builder::with_cx(self.cx);
+ bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
+ bx.dynamic_alloca(ty, align)
+ }
+
+ fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
+ unsafe {
+ let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
+ llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
+ alloca
+ }
+ }
+
+ fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value {
+ unsafe {
+ let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
+ llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
+ alloca
+ }
+ }
+
+ fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
+ unsafe {
+ let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
+ llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
+ load
+ }
+ }
+
+ fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
+ unsafe {
+ let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
+ llvm::LLVMSetVolatile(load, llvm::True);
+ load
+ }
+ }
+
+ fn atomic_load(
+ &mut self,
+ ptr: &'ll Value,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ size: Size,
+ ) -> &'ll Value {
+ unsafe {
+ let load = llvm::LLVMRustBuildAtomicLoad(
+ self.llbuilder,
+ ptr,
+ UNNAMED,
+ AtomicOrdering::from_generic(order),
+ );
+ // LLVM requires the alignment of atomic loads to be at least the size of the type.
+ llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
+ load
+ }
+ }
+
+ fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> {
+ debug!("PlaceRef::load: {:?}", place);
+
+ assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
+
+ if place.layout.is_zst() {
+ return OperandRef::new_zst(self, place.layout);
+ }
+
+ fn scalar_load_metadata<'a, 'll, 'tcx>(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ load: &'ll Value,
+ scalar: &abi::Scalar,
+ ) {
+ let vr = scalar.valid_range.clone();
+ match scalar.value {
+ abi::Int(..) => {
+ let range = scalar.valid_range_exclusive(bx);
+ if range.start != range.end {
+ bx.range_metadata(load, range);
+ }
+ }
+ abi::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
+ bx.nonnull_metadata(load);
+ }
+ _ => {}
+ }
+ }
+
+ let val = if let Some(llextra) = place.llextra {
+ OperandValue::Ref(place.llval, Some(llextra), place.align)
+ } else if place.layout.is_llvm_immediate() {
+ let mut const_llval = None;
+ unsafe {
+ if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
+ if llvm::LLVMIsGlobalConstant(global) == llvm::True {
+ const_llval = llvm::LLVMGetInitializer(global);
+ }
+ }
+ }
+ let llval = const_llval.unwrap_or_else(|| {
+ let load = self.load(place.llval, place.align);
+ if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
+ scalar_load_metadata(self, load, scalar);
+ }
+ load
+ });
+ OperandValue::Immediate(self.to_immediate(llval, place.layout))
+ } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
+ let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
+
+ let mut load = |i, scalar: &abi::Scalar, align| {
+ let llptr = self.struct_gep(place.llval, i as u64);
+ let load = self.load(llptr, align);
+ scalar_load_metadata(self, load, scalar);
+ self.to_immediate_scalar(load, scalar)
+ };
+
+ OperandValue::Pair(
+ load(0, a, place.align),
+ load(1, b, place.align.restrict_for_offset(b_offset)),
+ )
+ } else {
+ OperandValue::Ref(place.llval, None, place.align)
+ };
+
+ OperandRef { val, layout: place.layout }
+ }
+
+ fn write_operand_repeatedly(
+ mut self,
+ cg_elem: OperandRef<'tcx, &'ll Value>,
+ count: u64,
+ dest: PlaceRef<'tcx, &'ll Value>,
+ ) -> Self {
+ let zero = self.const_usize(0);
+ let count = self.const_usize(count);
+ let start = dest.project_index(&mut self, zero).llval;
+ let end = dest.project_index(&mut self, count).llval;
+
+ let mut header_bx = self.build_sibling_block("repeat_loop_header");
+ let mut body_bx = self.build_sibling_block("repeat_loop_body");
+ let next_bx = self.build_sibling_block("repeat_loop_next");
+
+ self.br(header_bx.llbb());
+ let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
+
+ let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
+ header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
+
+ let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
+ cg_elem
+ .val
+ .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
+
+ let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]);
+ body_bx.br(header_bx.llbb());
+ header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
+
+ next_bx
+ }
+
+ fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
+ if self.sess().target.target.arch == "amdgpu" {
+ // amdgpu/LLVM does something weird and thinks a i64 value is
+ // split into a v2i32, halving the bitwidth LLVM expects,
+ // tripping an assertion. So, for now, just disable this
+ // optimization.
+ return;
+ }
+
+ unsafe {
+ let llty = self.cx.val_ty(load);
+ let v = [
+ self.cx.const_uint_big(llty, range.start),
+ self.cx.const_uint_big(llty, range.end),
+ ];
+
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_range as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
+ );
+ }
+ }
+
+ fn nonnull_metadata(&mut self, load: &'ll Value) {
+ unsafe {
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_nonnull as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
+ );
+ }
+ }
+
+ fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
+ self.store_with_flags(val, ptr, align, MemFlags::empty())
+ }
+
+ fn store_with_flags(
+ &mut self,
+ val: &'ll Value,
+ ptr: &'ll Value,
+ align: Align,
+ flags: MemFlags,
+ ) -> &'ll Value {
+ debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
+ let ptr = self.check_store(val, ptr);
+ unsafe {
+ let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
+ let align =
+ if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
+ llvm::LLVMSetAlignment(store, align);
+ if flags.contains(MemFlags::VOLATILE) {
+ llvm::LLVMSetVolatile(store, llvm::True);
+ }
+ if flags.contains(MemFlags::NONTEMPORAL) {
+ // According to LLVM [1] building a nontemporal store must
+ // *always* point to a metadata value of the integer 1.
+ //
+ // [1]: http://llvm.org/docs/LangRef.html#store-instruction
+ let one = self.cx.const_i32(1);
+ let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
+ llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
+ }
+ store
+ }
+ }
+
+ fn atomic_store(
+ &mut self,
+ val: &'ll Value,
+ ptr: &'ll Value,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ size: Size,
+ ) {
+ debug!("Store {:?} -> {:?}", val, ptr);
+ let ptr = self.check_store(val, ptr);
+ unsafe {
+ let store = llvm::LLVMRustBuildAtomicStore(
+ self.llbuilder,
+ val,
+ ptr,
+ AtomicOrdering::from_generic(order),
+ );
+ // LLVM requires the alignment of atomic stores to be at least the size of the type.
+ llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
+ }
+ }
+
+ fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
+ unsafe {
+ llvm::LLVMBuildGEP(
+ self.llbuilder,
+ ptr,
+ indices.as_ptr(),
+ indices.len() as c_uint,
+ UNNAMED,
+ )
+ }
+ }
+
+ fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
+ unsafe {
+ llvm::LLVMBuildInBoundsGEP(
+ self.llbuilder,
+ ptr,
+ indices.as_ptr(),
+ indices.len() as c_uint,
+ UNNAMED,
+ )
+ }
+ }
+
+ fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value {
+ assert_eq!(idx as c_uint as u64, idx);
+ unsafe { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, UNNAMED) }
+ }
+
+ /* Casts */
+ fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
+ // WebAssembly has saturating floating point to integer casts if the
+ // `nontrapping-fptoint` target feature is activated. We'll use those if
+ // they are available.
+ if self.sess().target.target.arch == "wasm32"
+ && self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
+ {
+ let src_ty = self.cx.val_ty(val);
+ let float_width = self.cx.float_width(src_ty);
+ let int_width = self.cx.int_width(dest_ty);
+ let name = match (int_width, float_width) {
+ (32, 32) => Some("llvm.wasm.trunc.saturate.unsigned.i32.f32"),
+ (32, 64) => Some("llvm.wasm.trunc.saturate.unsigned.i32.f64"),
+ (64, 32) => Some("llvm.wasm.trunc.saturate.unsigned.i64.f32"),
+ (64, 64) => Some("llvm.wasm.trunc.saturate.unsigned.i64.f64"),
+ _ => None,
+ };
+ if let Some(name) = name {
+ let intrinsic = self.get_intrinsic(name);
+ return Some(self.call(intrinsic, &[val], None));
+ }
+ }
+ None
+ }
+
+ fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
+ // WebAssembly has saturating floating point to integer casts if the
+ // `nontrapping-fptoint` target feature is activated. We'll use those if
+ // they are available.
+ if self.sess().target.target.arch == "wasm32"
+ && self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
+ {
+ let src_ty = self.cx.val_ty(val);
+ let float_width = self.cx.float_width(src_ty);
+ let int_width = self.cx.int_width(dest_ty);
+ let name = match (int_width, float_width) {
+ (32, 32) => Some("llvm.wasm.trunc.saturate.signed.i32.f32"),
+ (32, 64) => Some("llvm.wasm.trunc.saturate.signed.i32.f64"),
+ (64, 32) => Some("llvm.wasm.trunc.saturate.signed.i64.f32"),
+ (64, 64) => Some("llvm.wasm.trunc.saturate.signed.i64.f64"),
+ _ => None,
+ };
+ if let Some(name) = name {
+ let intrinsic = self.get_intrinsic(name);
+ return Some(self.call(intrinsic, &[val], None));
+ }
+ }
+ None
+ }
+
+ fn fptosui_may_trap(&self, val: &'ll Value, dest_ty: &'ll Type) -> bool {
+ // Most of the time we'll be generating the `fptosi` or `fptoui`
+ // instruction for floating-point-to-integer conversions. These
+ // instructions by definition in LLVM do not trap. For the WebAssembly
+ // target, however, we'll lower in some cases to intrinsic calls instead
+ // which may trap. If we detect that this is a situation where we'll be
+ // using the intrinsics then we report that the call map trap, which
+ // callers might need to handle.
+ if !self.wasm_and_missing_nontrapping_fptoint() {
+ return false;
+ }
+ let src_ty = self.cx.val_ty(val);
+ let float_width = self.cx.float_width(src_ty);
+ let int_width = self.cx.int_width(dest_ty);
+ match (int_width, float_width) {
+ (32, 32) | (32, 64) | (64, 32) | (64, 64) => true,
+ _ => false,
+ }
+ }
+
+ fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ // When we can, use the native wasm intrinsics which have tighter
+ // codegen. Note that this has a semantic difference in that the
+ // intrinsic can trap whereas `fptoui` never traps. That difference,
+ // however, is handled by `fptosui_may_trap` above.
+ //
+ // Note that we skip the wasm intrinsics for vector types where `fptoui`
+ // must be used instead.
+ if self.wasm_and_missing_nontrapping_fptoint() {
+ let src_ty = self.cx.val_ty(val);
+ if self.cx.type_kind(src_ty) != TypeKind::Vector {
+ let float_width = self.cx.float_width(src_ty);
+ let int_width = self.cx.int_width(dest_ty);
+ let name = match (int_width, float_width) {
+ (32, 32) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
+ (32, 64) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
+ (64, 32) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
+ (64, 64) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
+ _ => None,
+ };
+ if let Some(name) = name {
+ let intrinsic = self.get_intrinsic(name);
+ return self.call(intrinsic, &[val], None);
+ }
+ }
+ }
+ unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ if self.wasm_and_missing_nontrapping_fptoint() {
+ let src_ty = self.cx.val_ty(val);
+ if self.cx.type_kind(src_ty) != TypeKind::Vector {
+ let float_width = self.cx.float_width(src_ty);
+ let int_width = self.cx.int_width(dest_ty);
+ let name = match (int_width, float_width) {
+ (32, 32) => Some("llvm.wasm.trunc.signed.i32.f32"),
+ (32, 64) => Some("llvm.wasm.trunc.signed.i32.f64"),
+ (64, 32) => Some("llvm.wasm.trunc.signed.i64.f32"),
+ (64, 64) => Some("llvm.wasm.trunc.signed.i64.f64"),
+ _ => None,
+ };
+ if let Some(name) = name {
+ let intrinsic = self.get_intrinsic(name);
+ return self.call(intrinsic, &[val], None);
+ }
+ }
+ }
+ unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) }
+ }
+
+ fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ /* Comparisons */
+ fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ let op = llvm::IntPredicate::from_generic(op);
+ unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
+ }
+
+ fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
+ }
+
+ /* Miscellaneous instructions */
+ fn memcpy(
+ &mut self,
+ dst: &'ll Value,
+ dst_align: Align,
+ src: &'ll Value,
+ src_align: Align,
+ size: &'ll Value,
+ flags: MemFlags,
+ ) {
+ if flags.contains(MemFlags::NONTEMPORAL) {
+ // HACK(nox): This is inefficient but there is no nontemporal memcpy.
+ let val = self.load(src, src_align);
+ let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
+ self.store_with_flags(val, ptr, dst_align, flags);
+ return;
+ }
+ let size = self.intcast(size, self.type_isize(), false);
+ let is_volatile = flags.contains(MemFlags::VOLATILE);
+ let dst = self.pointercast(dst, self.type_i8p());
+ let src = self.pointercast(src, self.type_i8p());
+ unsafe {
+ llvm::LLVMRustBuildMemCpy(
+ self.llbuilder,
+ dst,
+ dst_align.bytes() as c_uint,
+ src,
+ src_align.bytes() as c_uint,
+ size,
+ is_volatile,
+ );
+ }
+ }
+
+ fn memmove(
+ &mut self,
+ dst: &'ll Value,
+ dst_align: Align,
+ src: &'ll Value,
+ src_align: Align,
+ size: &'ll Value,
+ flags: MemFlags,
+ ) {
+ if flags.contains(MemFlags::NONTEMPORAL) {
+ // HACK(nox): This is inefficient but there is no nontemporal memmove.
+ let val = self.load(src, src_align);
+ let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
+ self.store_with_flags(val, ptr, dst_align, flags);
+ return;
+ }
+ let size = self.intcast(size, self.type_isize(), false);
+ let is_volatile = flags.contains(MemFlags::VOLATILE);
+ let dst = self.pointercast(dst, self.type_i8p());
+ let src = self.pointercast(src, self.type_i8p());
+ unsafe {
+ llvm::LLVMRustBuildMemMove(
+ self.llbuilder,
+ dst,
+ dst_align.bytes() as c_uint,
+ src,
+ src_align.bytes() as c_uint,
+ size,
+ is_volatile,
+ );
+ }
+ }
+
+ fn memset(
+ &mut self,
+ ptr: &'ll Value,
+ fill_byte: &'ll Value,
+ size: &'ll Value,
+ align: Align,
+ flags: MemFlags,
+ ) {
+ let is_volatile = flags.contains(MemFlags::VOLATILE);
+ let ptr = self.pointercast(ptr, self.type_i8p());
+ unsafe {
+ llvm::LLVMRustBuildMemSet(
+ self.llbuilder,
+ ptr,
+ align.bytes() as c_uint,
+ fill_byte,
+ size,
+ is_volatile,
+ );
+ }
+ }
+
+ fn select(
+ &mut self,
+ cond: &'ll Value,
+ then_val: &'ll Value,
+ else_val: &'ll Value,
+ ) -> &'ll Value {
+ unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
+ }
+
+ fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
+ }
+
+ fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
+ }
+
+ fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
+ unsafe {
+ let elt_ty = self.cx.val_ty(elt);
+ let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
+ let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
+ let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
+ self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
+ }
+ }
+
+ fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
+ assert_eq!(idx as c_uint as u64, idx);
+ unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }
+ }
+
+ fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value {
+ assert_eq!(idx as c_uint as u64, idx);
+ unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) }
+ }
+
+ fn landing_pad(
+ &mut self,
+ ty: &'ll Type,
+ pers_fn: &'ll Value,
+ num_clauses: usize,
+ ) -> &'ll Value {
+ unsafe {
+ llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED)
+ }
+ }
+
+ fn set_cleanup(&mut self, landing_pad: &'ll Value) {
+ unsafe {
+ llvm::LLVMSetCleanup(landing_pad, llvm::True);
+ }
+ }
+
+ fn resume(&mut self, exn: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
+ }
+
+ fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> {
+ let name = const_cstr!("cleanuppad");
+ let ret = unsafe {
+ llvm::LLVMRustBuildCleanupPad(
+ self.llbuilder,
+ parent,
+ args.len() as c_uint,
+ args.as_ptr(),
+ name.as_ptr(),
+ )
+ };
+ Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
+ }
+
+ fn cleanup_ret(
+ &mut self,
+ funclet: &Funclet<'ll>,
+ unwind: Option<&'ll BasicBlock>,
+ ) -> &'ll Value {
+ let ret =
+ unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) };
+ ret.expect("LLVM does not have support for cleanupret")
+ }
+
+ fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> {
+ let name = const_cstr!("catchpad");
+ let ret = unsafe {
+ llvm::LLVMRustBuildCatchPad(
+ self.llbuilder,
+ parent,
+ args.len() as c_uint,
+ args.as_ptr(),
+ name.as_ptr(),
+ )
+ };
+ Funclet::new(ret.expect("LLVM does not have support for catchpad"))
+ }
+
+ fn catch_switch(
+ &mut self,
+ parent: Option<&'ll Value>,
+ unwind: Option<&'ll BasicBlock>,
+ num_handlers: usize,
+ ) -> &'ll Value {
+ let name = const_cstr!("catchswitch");
+ let ret = unsafe {
+ llvm::LLVMRustBuildCatchSwitch(
+ self.llbuilder,
+ parent,
+ unwind,
+ num_handlers as c_uint,
+ name.as_ptr(),
+ )
+ };
+ ret.expect("LLVM does not have support for catchswitch")
+ }
+
+ fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
+ unsafe {
+ llvm::LLVMRustAddHandler(catch_switch, handler);
+ }
+ }
+
+ fn set_personality_fn(&mut self, personality: &'ll Value) {
+ unsafe {
+ llvm::LLVMSetPersonalityFn(self.llfn(), personality);
+ }
+ }
+
+ // Atomic Operations
+ fn atomic_cmpxchg(
+ &mut self,
+ dst: &'ll Value,
+ cmp: &'ll Value,
+ src: &'ll Value,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ failure_order: rustc_codegen_ssa::common::AtomicOrdering,
+ weak: bool,
+ ) -> &'ll Value {
+ let weak = if weak { llvm::True } else { llvm::False };
+ unsafe {
+ llvm::LLVMRustBuildAtomicCmpXchg(
+ self.llbuilder,
+ dst,
+ cmp,
+ src,
+ AtomicOrdering::from_generic(order),
+ AtomicOrdering::from_generic(failure_order),
+ weak,
+ )
+ }
+ }
+ fn atomic_rmw(
+ &mut self,
+ op: rustc_codegen_ssa::common::AtomicRmwBinOp,
+ dst: &'ll Value,
+ src: &'ll Value,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ ) -> &'ll Value {
+ unsafe {
+ llvm::LLVMBuildAtomicRMW(
+ self.llbuilder,
+ AtomicRmwBinOp::from_generic(op),
+ dst,
+ src,
+ AtomicOrdering::from_generic(order),
+ False,
+ )
+ }
+ }
+
+ fn atomic_fence(
+ &mut self,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ scope: rustc_codegen_ssa::common::SynchronizationScope,
+ ) {
+ unsafe {
+ llvm::LLVMRustBuildAtomicFence(
+ self.llbuilder,
+ AtomicOrdering::from_generic(order),
+ SynchronizationScope::from_generic(scope),
+ );
+ }
+ }
+
+ fn set_invariant_load(&mut self, load: &'ll Value) {
+ unsafe {
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_invariant_load as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
+ );
+ }
+ }
+
+ fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
+ self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
+ }
+
+ fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
+ self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
+ }
+
+ fn instrprof_increment(
+ &mut self,
+ fn_name: &'ll Value,
+ hash: &'ll Value,
+ num_counters: &'ll Value,
+ index: &'ll Value,
+ ) {
+ debug!(
+ "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
+ fn_name, hash, num_counters, index
+ );
+
+ let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
+ let args = &[fn_name, hash, num_counters, index];
+ let args = self.check_call("call", llfn, args);
+
+ unsafe {
+ let _ = llvm::LLVMRustBuildCall(
+ self.llbuilder,
+ llfn,
+ args.as_ptr() as *const &llvm::Value,
+ args.len() as c_uint,
+ None,
+ );
+ }
+ }
+
+ fn call(
+ &mut self,
+ llfn: &'ll Value,
+ args: &[&'ll Value],
+ funclet: Option<&Funclet<'ll>>,
+ ) -> &'ll Value {
+ debug!("call {:?} with args ({:?})", llfn, args);
+
+ let args = self.check_call("call", llfn, args);
+ let bundle = funclet.map(|funclet| funclet.bundle());
+ let bundle = bundle.as_ref().map(|b| &*b.raw);
+
+ unsafe {
+ llvm::LLVMRustBuildCall(
+ self.llbuilder,
+ llfn,
+ args.as_ptr() as *const &llvm::Value,
+ args.len() as c_uint,
+ bundle,
+ )
+ }
+ }
+
+ fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
+ self.cx
+ }
+
+ unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) {
+ llvm::LLVMDeleteBasicBlock(bb);
+ }
+
+ fn do_not_inline(&mut self, llret: &'ll Value) {
+ llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
+ }
+}
+
+impl StaticBuilderMethods for Builder<'a, 'll, 'tcx> {
+ fn get_static(&mut self, def_id: DefId) -> &'ll Value {
+ // Forward to the `get_static` method of `CodegenCx`
+ self.cx().get_static(def_id)
+ }
+}
+
+impl Builder<'a, 'll, 'tcx> {
+ pub fn llfn(&self) -> &'ll Value {
+ unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) }
+ }
+
+ fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
+ unsafe {
+ llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
+ }
+ }
+
+ pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
+ }
+
+ pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
+ }
+
+ pub fn insert_element(
+ &mut self,
+ vec: &'ll Value,
+ elt: &'ll Value,
+ idx: &'ll Value,
+ ) -> &'ll Value {
+ unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) }
+ }
+
+ pub fn shuffle_vector(
+ &mut self,
+ v1: &'ll Value,
+ v2: &'ll Value,
+ mask: &'ll Value,
+ ) -> &'ll Value {
+ unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) }
+ }
+
+ pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
+ }
+ pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
+ }
+ pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
+ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+ instr
+ }
+ }
+ pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
+ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+ instr
+ }
+ }
+ pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false)
+ }
+ }
+ pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false)
+ }
+ }
+ pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr =
+ llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
+ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+ instr
+ }
+ }
+ pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr =
+ llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
+ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+ instr
+ }
+ }
+ pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
+ }
+ pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
+ }
+
+ pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
+ unsafe {
+ llvm::LLVMAddClause(landing_pad, clause);
+ }
+ }
+
+ pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
+ let ret =
+ unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) };
+ ret.expect("LLVM does not have support for catchret")
+ }
+
+ fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
+ let dest_ptr_ty = self.cx.val_ty(ptr);
+ let stored_ty = self.cx.val_ty(val);
+ let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
+
+ assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
+
+ if dest_ptr_ty == stored_ptr_ty {
+ ptr
+ } else {
+ debug!(
+ "type mismatch in store. \
+ Expected {:?}, got {:?}; inserting bitcast",
+ dest_ptr_ty, stored_ptr_ty
+ );
+ self.bitcast(ptr, stored_ptr_ty)
+ }
+ }
+
+ fn check_call<'b>(
+ &mut self,
+ typ: &str,
+ llfn: &'ll Value,
+ args: &'b [&'ll Value],
+ ) -> Cow<'b, [&'ll Value]> {
+ let mut fn_ty = self.cx.val_ty(llfn);
+ // Strip off pointers
+ while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
+ fn_ty = self.cx.element_type(fn_ty);
+ }
+
+ assert!(
+ self.cx.type_kind(fn_ty) == TypeKind::Function,
+ "builder::{} not passed a function, but {:?}",
+ typ,
+ fn_ty
+ );
+
+ let param_tys = self.cx.func_params_types(fn_ty);
+
+ let all_args_match = param_tys
+ .iter()
+ .zip(args.iter().map(|&v| self.val_ty(v)))
+ .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
+
+ if all_args_match {
+ return Cow::Borrowed(args);
+ }
+
+ let casted_args: Vec<_> = param_tys
+ .into_iter()
+ .zip(args.iter())
+ .enumerate()
+ .map(|(i, (expected_ty, &actual_val))| {
+ let actual_ty = self.val_ty(actual_val);
+ if expected_ty != actual_ty {
+ debug!(
+ "type mismatch in function call of {:?}. \
+ Expected {:?} for param {}, got {:?}; injecting bitcast",
+ llfn, expected_ty, i, actual_ty
+ );
+ self.bitcast(actual_val, expected_ty)
+ } else {
+ actual_val
+ }
+ })
+ .collect();
+
+ Cow::Owned(casted_args)
+ }
+
+ pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
+ }
+
+ fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
+ let size = size.bytes();
+ if size == 0 {
+ return;
+ }
+
+ if !self.cx().sess().emit_lifetime_markers() {
+ return;
+ }
+
+ let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
+
+ let ptr = self.pointercast(ptr, self.cx.type_i8p());
+ self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
+ }
+
+ pub(crate) fn phi(
+ &mut self,
+ ty: &'ll Type,
+ vals: &[&'ll Value],
+ bbs: &[&'ll BasicBlock],
+ ) -> &'ll Value {
+ assert_eq!(vals.len(), bbs.len());
+ let phi = unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) };
+ unsafe {
+ llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint);
+ phi
+ }
+ }
+
+ fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
+ unsafe {
+ llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
+ }
+ }
+
+ fn wasm_and_missing_nontrapping_fptoint(&self) -> bool {
+ self.sess().target.target.arch == "wasm32"
+ && !self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
new file mode 100644
index 0000000..4afd906
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -0,0 +1,192 @@
+//! Handles codegen of callees as well as other call-related
+//! things. Callees are a superset of normal rust values and sometimes
+//! have different representations. In particular, top-level fn items
+//! and methods are represented as just a fn ptr and not a full
+//! closure.
+
+use crate::abi::{FnAbi, FnAbiLlvmExt};
+use crate::attributes;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::value::Value;
+use rustc_codegen_ssa::traits::*;
+use tracing::debug;
+
+use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
+use rustc_middle::ty::{self, Instance, TypeFoldable};
+
+/// Codegens a reference to a fn/method item, monomorphizing and
+/// inlining as it goes.
+///
+/// # Parameters
+///
+/// - `cx`: the crate context
+/// - `instance`: the instance to be instantiated
+pub fn get_fn(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> &'ll Value {
+ let tcx = cx.tcx();
+
+ debug!("get_fn(instance={:?})", instance);
+
+ assert!(!instance.substs.needs_infer());
+ assert!(!instance.substs.has_escaping_bound_vars());
+
+ if let Some(&llfn) = cx.instances.borrow().get(&instance) {
+ return llfn;
+ }
+
+ let sym = tcx.symbol_name(instance).name;
+ debug!(
+ "get_fn({:?}: {:?}) => {}",
+ instance,
+ instance.ty(cx.tcx(), ty::ParamEnv::reveal_all()),
+ sym
+ );
+
+ let fn_abi = FnAbi::of_instance(cx, instance, &[]);
+
+ let llfn = if let Some(llfn) = cx.get_declared_value(&sym) {
+ // Create a fn pointer with the new signature.
+ let llptrty = fn_abi.ptr_to_llvm_type(cx);
+
+ // This is subtle and surprising, but sometimes we have to bitcast
+ // the resulting fn pointer. The reason has to do with external
+ // functions. If you have two crates that both bind the same C
+ // library, they may not use precisely the same types: for
+ // example, they will probably each declare their own structs,
+ // which are distinct types from LLVM's point of view (nominal
+ // types).
+ //
+ // Now, if those two crates are linked into an application, and
+ // they contain inlined code, you can wind up with a situation
+ // where both of those functions wind up being loaded into this
+ // application simultaneously. In that case, the same function
+ // (from LLVM's point of view) requires two types. But of course
+ // LLVM won't allow one function to have two types.
+ //
+ // What we currently do, therefore, is declare the function with
+ // one of the two types (whichever happens to come first) and then
+ // bitcast as needed when the function is referenced to make sure
+ // it has the type we expect.
+ //
+ // This can occur on either a crate-local or crate-external
+ // reference. It also occurs when testing libcore and in some
+ // other weird situations. Annoying.
+ if cx.val_ty(llfn) != llptrty {
+ debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
+ cx.const_ptrcast(llfn, llptrty)
+ } else {
+ debug!("get_fn: not casting pointer!");
+ llfn
+ }
+ } else {
+ let llfn = cx.declare_fn(&sym, &fn_abi);
+ debug!("get_fn: not casting pointer!");
+
+ attributes::from_fn_attrs(cx, llfn, instance);
+
+ let instance_def_id = instance.def_id();
+
+ // Apply an appropriate linkage/visibility value to our item that we
+ // just declared.
+ //
+ // This is sort of subtle. Inside our codegen unit we started off
+ // compilation by predefining all our own `MonoItem` instances. That
+ // is, everything we're codegenning ourselves is already defined. That
+ // means that anything we're actually codegenning in this codegen unit
+ // will have hit the above branch in `get_declared_value`. As a result,
+ // we're guaranteed here that we're declaring a symbol that won't get
+ // defined, or in other words we're referencing a value from another
+ // codegen unit or even another crate.
+ //
+ // So because this is a foreign value we blanket apply an external
+ // linkage directive because it's coming from a different object file.
+ // The visibility here is where it gets tricky. This symbol could be
+ // referencing some foreign crate or foreign library (an `extern`
+ // block) in which case we want to leave the default visibility. We may
+ // also, though, have multiple codegen units. It could be a
+ // monomorphization, in which case its expected visibility depends on
+ // whether we are sharing generics or not. The important thing here is
+ // that the visibility we apply to the declaration is the same one that
+ // has been applied to the definition (wherever that definition may be).
+ unsafe {
+ llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
+
+ let is_generic = instance.substs.non_erasable_generics().next().is_some();
+
+ if is_generic {
+ // This is a monomorphization. Its expected visibility depends
+ // on whether we are in share-generics mode.
+
+ if cx.tcx.sess.opts.share_generics() {
+ // We are in share_generics mode.
+
+ if let Some(instance_def_id) = instance_def_id.as_local() {
+ // This is a definition from the current crate. If the
+ // definition is unreachable for downstream crates or
+ // the current crate does not re-export generics, the
+ // definition of the instance will have been declared
+ // as `hidden`.
+ if cx.tcx.is_unreachable_local_definition(instance_def_id)
+ || !cx.tcx.local_crate_exports_generics()
+ {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ } else {
+ // This is a monomorphization of a generic function
+ // defined in an upstream crate.
+ if instance.upstream_monomorphization(tcx).is_some() {
+ // This is instantiated in another crate. It cannot
+ // be `hidden`.
+ } else {
+ // This is a local instantiation of an upstream definition.
+ // If the current crate does not re-export it
+ // (because it is a C library or an executable), it
+ // will have been declared `hidden`.
+ if !cx.tcx.local_crate_exports_generics() {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ }
+ }
+ } else {
+ // When not sharing generics, all instances are in the same
+ // crate and have hidden visibility
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ } else {
+ // This is a non-generic function
+ if cx.tcx.is_codegened_item(instance_def_id) {
+ // This is a function that is instantiated in the local crate
+
+ if instance_def_id.is_local() {
+ // This is function that is defined in the local crate.
+ // If it is not reachable, it is hidden.
+ if !cx.tcx.is_reachable_non_generic(instance_def_id) {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ } else {
+ // This is a function from an upstream crate that has
+ // been instantiated here. These are always hidden.
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ }
+ }
+ }
+
+ // MinGW: For backward compatibility we rely on the linker to decide whether it
+ // should use dllimport for functions.
+ if cx.use_dll_storage_attrs
+ && tcx.is_dllimport_foreign_item(instance_def_id)
+ && tcx.sess.target.target.target_env != "gnu"
+ {
+ unsafe {
+ llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
+ }
+ }
+
+ llfn
+ };
+
+ cx.instances.borrow_mut().insert(instance, llfn);
+
+ llfn
+}
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
new file mode 100644
index 0000000..0992410a
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -0,0 +1,339 @@
+//! Code that is useful in various codegen modules.
+
+use crate::consts::{self, const_alloc_to_llvm};
+pub use crate::context::CodegenCx;
+use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+
+use rustc_ast::Mutability;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_middle::bug;
+use rustc_middle::mir::interpret::{Allocation, GlobalAlloc, Scalar};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_span::symbol::Symbol;
+use rustc_target::abi::{self, AddressSpace, HasDataLayout, LayoutOf, Pointer, Size};
+
+use libc::{c_char, c_uint};
+use tracing::debug;
+
+/*
+* A note on nomenclature of linking: "extern", "foreign", and "upcall".
+*
+* An "extern" is an LLVM symbol we wind up emitting an undefined external
+* reference to. This means "we don't have the thing in this compilation unit,
+* please make sure you link it in at runtime". This could be a reference to
+* C code found in a C library, or rust code found in a rust crate.
+*
+* Most "externs" are implicitly declared (automatically) as a result of a
+* user declaring an extern _module_ dependency; this causes the rust driver
+* to locate an extern crate, scan its compilation metadata, and emit extern
+* declarations for any symbols used by the declaring crate.
+*
+* A "foreign" is an extern that references C (or other non-rust ABI) code.
+* There is no metadata to scan for extern references so in these cases either
+* a header-digester like bindgen, or manual function prototypes, have to
+* serve as declarators. So these are usually given explicitly as prototype
+* declarations, in rust code, with ABI attributes on them noting which ABI to
+* link via.
+*
+* An "upcall" is a foreign call generated by the compiler (not corresponding
+* to any user-written call in the code) into the runtime library, to perform
+* some helper task such as bringing a task to life, allocating memory, etc.
+*
+*/
+
+/// A structure representing an active landing pad for the duration of a basic
+/// block.
+///
+/// Each `Block` may contain an instance of this, indicating whether the block
+/// is part of a landing pad or not. This is used to make decision about whether
+/// to emit `invoke` instructions (e.g., in a landing pad we don't continue to
+/// use `invoke`) and also about various function call metadata.
+///
+/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is
+/// just a bunch of `None` instances (not too interesting), but for MSVC
+/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data.
+/// When inside of a landing pad, each function call in LLVM IR needs to be
+/// annotated with which landing pad it's a part of. This is accomplished via
+/// the `OperandBundleDef` value created for MSVC landing pads.
+pub struct Funclet<'ll> {
+ cleanuppad: &'ll Value,
+ operand: OperandBundleDef<'ll>,
+}
+
+impl Funclet<'ll> {
+ pub fn new(cleanuppad: &'ll Value) -> Self {
+ Funclet { cleanuppad, operand: OperandBundleDef::new("funclet", &[cleanuppad]) }
+ }
+
+ pub fn cleanuppad(&self) -> &'ll Value {
+ self.cleanuppad
+ }
+
+ pub fn bundle(&self) -> &OperandBundleDef<'ll> {
+ &self.operand
+ }
+}
+
+impl BackendTypes for CodegenCx<'ll, 'tcx> {
+ type Value = &'ll Value;
+ type Function = &'ll Value;
+
+ type BasicBlock = &'ll BasicBlock;
+ type Type = &'ll Type;
+ type Funclet = Funclet<'ll>;
+
+ type DIScope = &'ll llvm::debuginfo::DIScope;
+ type DIVariable = &'ll llvm::debuginfo::DIVariable;
+}
+
+impl CodegenCx<'ll, 'tcx> {
+ pub fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
+ unsafe { llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint) }
+ }
+
+ pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value {
+ unsafe { llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint) }
+ }
+
+ pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value {
+ bytes_in_context(self.llcx, bytes)
+ }
+
+ fn const_cstr(&self, s: Symbol, null_terminated: bool) -> &'ll Value {
+ unsafe {
+ if let Some(&llval) = self.const_cstr_cache.borrow().get(&s) {
+ return llval;
+ }
+
+ let s_str = s.as_str();
+ let sc = llvm::LLVMConstStringInContext(
+ self.llcx,
+ s_str.as_ptr() as *const c_char,
+ s_str.len() as c_uint,
+ !null_terminated as Bool,
+ );
+ let sym = self.generate_local_symbol_name("str");
+ let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(|| {
+ bug!("symbol `{}` is already defined", sym);
+ });
+ llvm::LLVMSetInitializer(g, sc);
+ llvm::LLVMSetGlobalConstant(g, True);
+ llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
+
+ self.const_cstr_cache.borrow_mut().insert(s, g);
+ g
+ }
+ }
+
+ pub fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value {
+ unsafe {
+ assert_eq!(idx as c_uint as u64, idx);
+ let us = &[idx as c_uint];
+ let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
+
+ debug!("const_get_elt(v={:?}, idx={}, r={:?})", v, idx, r);
+
+ r
+ }
+ }
+}
+
+impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn const_null(&self, t: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMConstNull(t) }
+ }
+
+ fn const_undef(&self, t: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMGetUndef(t) }
+ }
+
+ fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
+ unsafe { llvm::LLVMConstInt(t, i as u64, True) }
+ }
+
+ fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
+ unsafe { llvm::LLVMConstInt(t, i, False) }
+ }
+
+ fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
+ unsafe {
+ let words = [u as u64, (u >> 64) as u64];
+ llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
+ }
+ }
+
+ fn const_bool(&self, val: bool) -> &'ll Value {
+ self.const_uint(self.type_i1(), val as u64)
+ }
+
+ fn const_i32(&self, i: i32) -> &'ll Value {
+ self.const_int(self.type_i32(), i as i64)
+ }
+
+ fn const_u32(&self, i: u32) -> &'ll Value {
+ self.const_uint(self.type_i32(), i as u64)
+ }
+
+ fn const_u64(&self, i: u64) -> &'ll Value {
+ self.const_uint(self.type_i64(), i)
+ }
+
+ fn const_usize(&self, i: u64) -> &'ll Value {
+ let bit_size = self.data_layout().pointer_size.bits();
+ if bit_size < 64 {
+ // make sure it doesn't overflow
+ assert!(i < (1 << bit_size));
+ }
+
+ self.const_uint(self.isize_ty, i)
+ }
+
+ fn const_u8(&self, i: u8) -> &'ll Value {
+ self.const_uint(self.type_i8(), i as u64)
+ }
+
+ fn const_real(&self, t: &'ll Type, val: f64) -> &'ll Value {
+ unsafe { llvm::LLVMConstReal(t, val) }
+ }
+
+ fn const_str(&self, s: Symbol) -> (&'ll Value, &'ll Value) {
+ let len = s.as_str().len();
+ let cs = consts::ptrcast(
+ self.const_cstr(s, false),
+ self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)),
+ );
+ (cs, self.const_usize(len as u64))
+ }
+
+ fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value {
+ struct_in_context(self.llcx, elts, packed)
+ }
+
+ fn const_to_opt_uint(&self, v: &'ll Value) -> Option<u64> {
+ try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) })
+ }
+
+ fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> {
+ try_as_const_integral(v).and_then(|v| unsafe {
+ let (mut lo, mut hi) = (0u64, 0u64);
+ let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo);
+ success.then_some(hi_lo_to_u128(lo, hi))
+ })
+ }
+
+ fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: &'ll Type) -> &'ll Value {
+ let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
+ match cv {
+ Scalar::Raw { size: 0, .. } => {
+ assert_eq!(0, layout.value.size(self).bytes());
+ self.const_undef(self.type_ix(0))
+ }
+ Scalar::Raw { data, size } => {
+ assert_eq!(size as u64, layout.value.size(self).bytes());
+ let llval = self.const_uint_big(self.type_ix(bitsize), data);
+ if layout.value == Pointer {
+ unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
+ } else {
+ self.const_bitcast(llval, llty)
+ }
+ }
+ Scalar::Ptr(ptr) => {
+ let (base_addr, base_addr_space) = match self.tcx.global_alloc(ptr.alloc_id) {
+ GlobalAlloc::Memory(alloc) => {
+ let init = const_alloc_to_llvm(self, alloc);
+ let value = match alloc.mutability {
+ Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
+ _ => self.static_addr_of(init, alloc.align, None),
+ };
+ if !self.sess().fewer_names() {
+ llvm::set_value_name(value, format!("{:?}", ptr.alloc_id).as_bytes());
+ }
+ (value, AddressSpace::DATA)
+ }
+ GlobalAlloc::Function(fn_instance) => (
+ self.get_fn_addr(fn_instance.polymorphize(self.tcx)),
+ self.data_layout().instruction_address_space,
+ ),
+ GlobalAlloc::Static(def_id) => {
+ assert!(self.tcx.is_static(def_id));
+ assert!(!self.tcx.is_thread_local_static(def_id));
+ (self.get_static(def_id), AddressSpace::DATA)
+ }
+ };
+ let llval = unsafe {
+ llvm::LLVMConstInBoundsGEP(
+ self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)),
+ &self.const_usize(ptr.offset.bytes()),
+ 1,
+ )
+ };
+ if layout.value != Pointer {
+ unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
+ } else {
+ self.const_bitcast(llval, llty)
+ }
+ }
+ }
+ }
+
+ fn from_const_alloc(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ alloc: &Allocation,
+ offset: Size,
+ ) -> PlaceRef<'tcx, &'ll Value> {
+ assert_eq!(alloc.align, layout.align.abi);
+ let llty = self.type_ptr_to(layout.llvm_type(self));
+ let llval = if layout.size == Size::ZERO {
+ let llval = self.const_usize(alloc.align.bytes());
+ unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
+ } else {
+ let init = const_alloc_to_llvm(self, alloc);
+ let base_addr = self.static_addr_of(init, alloc.align, None);
+
+ let llval = unsafe {
+ llvm::LLVMConstInBoundsGEP(
+ self.const_bitcast(base_addr, self.type_i8p()),
+ &self.const_usize(offset.bytes()),
+ 1,
+ )
+ };
+ self.const_bitcast(llval, llty)
+ };
+ PlaceRef::new_sized(llval, layout)
+ }
+
+ fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ consts::ptrcast(val, ty)
+ }
+}
+
+pub fn val_ty(v: &Value) -> &Type {
+ unsafe { llvm::LLVMTypeOf(v) }
+}
+
+pub fn bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
+ unsafe {
+ let ptr = bytes.as_ptr() as *const c_char;
+ llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True)
+ }
+}
+
+pub fn struct_in_context(llcx: &'a llvm::Context, elts: &[&'a Value], packed: bool) -> &'a Value {
+ unsafe {
+ llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), elts.len() as c_uint, packed as Bool)
+ }
+}
+
+#[inline]
+fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 {
+ ((hi as u128) << 64) | (lo as u128)
+}
+
+fn try_as_const_integral(v: &Value) -> Option<&ConstantInt> {
+ unsafe { llvm::LLVMIsAConstantInt(v) }
+}
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
new file mode 100644
index 0000000..6d3582d
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -0,0 +1,508 @@
+use crate::base;
+use crate::common::CodegenCx;
+use crate::debuginfo;
+use crate::llvm::{self, True};
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use libc::c_uint;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::const_cstr;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::Node;
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::interpret::{
+ read_target_uint, Allocation, ErrorHandled, GlobalAlloc, Pointer,
+};
+use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_middle::{bug, span_bug};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use rustc_target::abi::{AddressSpace, Align, HasDataLayout, LayoutOf, Primitive, Scalar, Size};
+use tracing::debug;
+
+pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
+ let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
+ let dl = cx.data_layout();
+ let pointer_size = dl.pointer_size.bytes() as usize;
+
+ let mut next_offset = 0;
+ for &(offset, ((), alloc_id)) in alloc.relocations().iter() {
+ let offset = offset.bytes();
+ assert_eq!(offset as usize as u64, offset);
+ let offset = offset as usize;
+ if offset > next_offset {
+ // This `inspect` is okay since we have checked that it is not within a relocation, it
+ // is within the bounds of the allocation, and it doesn't affect interpreter execution
+ // (we inspect the result after interpreter execution). Any undef byte is replaced with
+ // some arbitrary byte value.
+ //
+ // FIXME: relay undef bytes to codegen as undef const bytes
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
+ llvals.push(cx.const_bytes(bytes));
+ }
+ let ptr_offset = read_target_uint(
+ dl.endian,
+ // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
+ // affect interpreter execution (we inspect the result after interpreter execution),
+ // and we properly interpret the relocation as a relocation pointer offset.
+ alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
+ )
+ .expect("const_alloc_to_llvm: could not read relocation pointer")
+ as u64;
+
+ let address_space = match cx.tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Function(..) => cx.data_layout().instruction_address_space,
+ GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) => AddressSpace::DATA,
+ };
+
+ llvals.push(cx.scalar_to_backend(
+ Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
+ &Scalar { value: Primitive::Pointer, valid_range: 0..=!0 },
+ cx.type_i8p_ext(address_space),
+ ));
+ next_offset = offset + pointer_size;
+ }
+ if alloc.len() >= next_offset {
+ let range = next_offset..alloc.len();
+ // This `inspect` is okay since we have check that it is after all relocations, it is
+ // within the bounds of the allocation, and it doesn't affect interpreter execution (we
+ // inspect the result after interpreter execution). Any undef byte is replaced with some
+ // arbitrary byte value.
+ //
+ // FIXME: relay undef bytes to codegen as undef const bytes
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+ llvals.push(cx.const_bytes(bytes));
+ }
+
+ cx.const_struct(&llvals, true)
+}
+
+pub fn codegen_static_initializer(
+ cx: &CodegenCx<'ll, 'tcx>,
+ def_id: DefId,
+) -> Result<(&'ll Value, &'tcx Allocation), ErrorHandled> {
+ let alloc = cx.tcx.eval_static_initializer(def_id)?;
+ Ok((const_alloc_to_llvm(cx, alloc), alloc))
+}
+
+fn set_global_alignment(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) {
+ // The target may require greater alignment for globals than the type does.
+ // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
+ // which can force it to be smaller. Rust doesn't support this yet.
+ if let Some(min) = cx.sess().target.target.options.min_global_align {
+ match Align::from_bits(min) {
+ Ok(min) => align = align.max(min),
+ Err(err) => {
+ cx.sess().err(&format!("invalid minimum global alignment: {}", err));
+ }
+ }
+ }
+ unsafe {
+ llvm::LLVMSetAlignment(gv, align.bytes() as u32);
+ }
+}
+
+fn check_and_apply_linkage(
+ cx: &CodegenCx<'ll, 'tcx>,
+ attrs: &CodegenFnAttrs,
+ ty: Ty<'tcx>,
+ sym: &str,
+ span: Span,
+) -> &'ll Value {
+ let llty = cx.layout_of(ty).llvm_type(cx);
+ if let Some(linkage) = attrs.linkage {
+ debug!("get_static: sym={} linkage={:?}", sym, linkage);
+
+ // If this is a static with a linkage specified, then we need to handle
+ // it a little specially. The typesystem prevents things like &T and
+ // extern "C" fn() from being non-null, so we can't just declare a
+ // static and call it a day. Some linkages (like weak) will make it such
+ // that the static actually has a null value.
+ let llty2 = if let ty::RawPtr(ref mt) = ty.kind() {
+ cx.layout_of(mt.ty).llvm_type(cx)
+ } else {
+ cx.sess().span_fatal(
+ span,
+ "must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
+ )
+ };
+ unsafe {
+ // Declare a symbol `foo` with the desired linkage.
+ let g1 = cx.declare_global(&sym, llty2);
+ llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
+
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
+ let mut real_name = "_rust_extern_with_linkage_".to_string();
+ real_name.push_str(&sym);
+ let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| {
+ cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym))
+ });
+ llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
+ llvm::LLVMSetInitializer(g2, g1);
+ g2
+ }
+ } else {
+ // Generate an external declaration.
+ // FIXME(nagisa): investigate whether it can be changed into define_global
+ cx.declare_global(&sym, llty)
+ }
+}
+
+pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMConstPointerCast(val, ty) }
+}
+
+impl CodegenCx<'ll, 'tcx> {
+ crate fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMConstBitCast(val, ty) }
+ }
+
+ crate fn static_addr_of_mut(
+ &self,
+ cv: &'ll Value,
+ align: Align,
+ kind: Option<&str>,
+ ) -> &'ll Value {
+ unsafe {
+ let gv = match kind {
+ Some(kind) if !self.tcx.sess.fewer_names() => {
+ let name = self.generate_local_symbol_name(kind);
+ let gv = self.define_global(&name[..], self.val_ty(cv)).unwrap_or_else(|| {
+ bug!("symbol `{}` is already defined", name);
+ });
+ llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
+ gv
+ }
+ _ => self.define_private_global(self.val_ty(cv)),
+ };
+ llvm::LLVMSetInitializer(gv, cv);
+ set_global_alignment(&self, gv, align);
+ llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global);
+ gv
+ }
+ }
+
+ crate fn get_static(&self, def_id: DefId) -> &'ll Value {
+ let instance = Instance::mono(self.tcx, def_id);
+ if let Some(&g) = self.instances.borrow().get(&instance) {
+ return g;
+ }
+
+ let defined_in_current_codegen_unit =
+ self.codegen_unit.items().contains_key(&MonoItem::Static(def_id));
+ assert!(
+ !defined_in_current_codegen_unit,
+ "consts::get_static() should always hit the cache for \
+ statics defined in the same CGU, but did not for `{:?}`",
+ def_id
+ );
+
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let sym = self.tcx.symbol_name(instance).name;
+
+ debug!("get_static: sym={} instance={:?}", sym, instance);
+
+ let g = if let Some(def_id) = def_id.as_local() {
+ let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ let llty = self.layout_of(ty).llvm_type(self);
+ // FIXME: refactor this to work without accessing the HIR
+ let (g, attrs) = match self.tcx.hir().get(id) {
+ Node::Item(&hir::Item { attrs, span, kind: hir::ItemKind::Static(..), .. }) => {
+ if let Some(g) = self.get_declared_value(sym) {
+ if self.val_ty(g) != self.type_ptr_to(llty) {
+ span_bug!(span, "Conflicting types for static");
+ }
+ }
+
+ let g = self.declare_global(sym, llty);
+
+ if !self.tcx.is_reachable_non_generic(def_id) {
+ unsafe {
+ llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden);
+ }
+ }
+
+ (g, attrs)
+ }
+
+ Node::ForeignItem(&hir::ForeignItem {
+ ref attrs,
+ span,
+ kind: hir::ForeignItemKind::Static(..),
+ ..
+ }) => {
+ let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+ (check_and_apply_linkage(&self, &fn_attrs, ty, sym, span), &**attrs)
+ }
+
+ item => bug!("get_static: expected static, found {:?}", item),
+ };
+
+ debug!("get_static: sym={} attrs={:?}", sym, attrs);
+
+ for attr in attrs {
+ if self.tcx.sess.check_name(attr, sym::thread_local) {
+ llvm::set_thread_local_mode(g, self.tls_model);
+ }
+ }
+
+ g
+ } else {
+ // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
+ debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id));
+
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+ let span = self.tcx.def_span(def_id);
+ let g = check_and_apply_linkage(&self, &attrs, ty, sym, span);
+
+ // Thread-local statics in some other crate need to *always* be linked
+ // against in a thread-local fashion, so we need to be sure to apply the
+ // thread-local attribute locally if it was present remotely. If we
+ // don't do this then linker errors can be generated where the linker
+ // complains that one object files has a thread local version of the
+ // symbol and another one doesn't.
+ if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+ llvm::set_thread_local_mode(g, self.tls_model);
+ }
+
+ let needs_dll_storage_attr = self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
+ // ThinLTO can't handle this workaround in all cases, so we don't
+ // emit the attrs. Instead we make them unnecessary by disallowing
+ // dynamic linking when linker plugin based LTO is enabled.
+ !self.tcx.sess.opts.cg.linker_plugin_lto.enabled();
+
+ // If this assertion triggers, there's something wrong with commandline
+ // argument validation.
+ debug_assert!(
+ !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
+ && self.tcx.sess.target.target.options.is_like_windows
+ && self.tcx.sess.opts.cg.prefer_dynamic)
+ );
+
+ if needs_dll_storage_attr {
+ // This item is external but not foreign, i.e., it originates from an external Rust
+ // crate. Since we don't know whether this crate will be linked dynamically or
+ // statically in the final application, we always mark such symbols as 'dllimport'.
+ // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
+ // to make things work.
+ //
+ // However, in some scenarios we defer emission of statics to downstream
+ // crates, so there are cases where a static with an upstream DefId
+ // is actually present in the current crate. We can find out via the
+ // is_codegened_item query.
+ if !self.tcx.is_codegened_item(def_id) {
+ unsafe {
+ llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
+ }
+ }
+ }
+ g
+ };
+
+ if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) {
+ // For foreign (native) libs we know the exact storage type to use.
+ unsafe {
+ llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
+ }
+ }
+
+ self.instances.borrow_mut().insert(instance, g);
+ g
+ }
+}
+
+impl StaticMethods for CodegenCx<'ll, 'tcx> {
+ fn static_addr_of(&self, cv: &'ll Value, align: Align, kind: Option<&str>) -> &'ll Value {
+ if let Some(&gv) = self.const_globals.borrow().get(&cv) {
+ unsafe {
+ // Upgrade the alignment in cases where the same constant is used with different
+ // alignment requirements
+ let llalign = align.bytes() as u32;
+ if llalign > llvm::LLVMGetAlignment(gv) {
+ llvm::LLVMSetAlignment(gv, llalign);
+ }
+ }
+ return gv;
+ }
+ let gv = self.static_addr_of_mut(cv, align, kind);
+ unsafe {
+ llvm::LLVMSetGlobalConstant(gv, True);
+ }
+ self.const_globals.borrow_mut().insert(cv, gv);
+ gv
+ }
+
+ fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
+ unsafe {
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+
+ let (v, alloc) = match codegen_static_initializer(&self, def_id) {
+ Ok(v) => v,
+ // Error has already been reported
+ Err(_) => return,
+ };
+
+ let g = self.get_static(def_id);
+
+ // boolean SSA values are i1, but they have to be stored in i8 slots,
+ // otherwise some LLVM optimization passes don't work as expected
+ let mut val_llty = self.val_ty(v);
+ let v = if val_llty == self.type_i1() {
+ val_llty = self.type_i8();
+ llvm::LLVMConstZExt(v, val_llty)
+ } else {
+ v
+ };
+
+ let instance = Instance::mono(self.tcx, def_id);
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let llty = self.layout_of(ty).llvm_type(self);
+ let g = if val_llty == llty {
+ g
+ } else {
+ // If we created the global with the wrong type,
+ // correct the type.
+ let name = llvm::get_value_name(g).to_vec();
+ llvm::set_value_name(g, b"");
+
+ let linkage = llvm::LLVMRustGetLinkage(g);
+ let visibility = llvm::LLVMRustGetVisibility(g);
+
+ let new_g = llvm::LLVMRustGetOrInsertGlobal(
+ self.llmod,
+ name.as_ptr().cast(),
+ name.len(),
+ val_llty,
+ );
+
+ llvm::LLVMRustSetLinkage(new_g, linkage);
+ llvm::LLVMRustSetVisibility(new_g, visibility);
+
+ // To avoid breaking any invariants, we leave around the old
+ // global for the moment; we'll replace all references to it
+ // with the new global later. (See base::codegen_backend.)
+ self.statics_to_rauw.borrow_mut().push((g, new_g));
+ new_g
+ };
+ set_global_alignment(&self, g, self.align_of(ty));
+ llvm::LLVMSetInitializer(g, v);
+
+ // As an optimization, all shared statics which do not have interior
+ // mutability are placed into read-only memory.
+ if !is_mutable {
+ if self.type_is_freeze(ty) {
+ llvm::LLVMSetGlobalConstant(g, llvm::True);
+ }
+ }
+
+ debuginfo::create_global_var_metadata(&self, def_id, g);
+
+ if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+ llvm::set_thread_local_mode(g, self.tls_model);
+
+ // Do not allow LLVM to change the alignment of a TLS on macOS.
+ //
+ // By default a global's alignment can be freely increased.
+ // This allows LLVM to generate more performant instructions
+ // e.g., using load-aligned into a SIMD register.
+ //
+ // However, on macOS 10.10 or below, the dynamic linker does not
+ // respect any alignment given on the TLS (radar 24221680).
+ // This will violate the alignment assumption, and causing segfault at runtime.
+ //
+ // This bug is very easy to trigger. In `println!` and `panic!`,
+ // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
+ // which the values would be `mem::replace`d on initialization.
+ // The implementation of `mem::replace` will use SIMD
+ // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
+ // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
+ // which macOS's dyld disregarded and causing crashes
+ // (see issues #51794, #51758, #50867, #48866 and #44056).
+ //
+ // To workaround the bug, we trick LLVM into not increasing
+ // the global's alignment by explicitly assigning a section to it
+ // (equivalent to automatically generating a `#[link_section]` attribute).
+ // See the comment in the `GlobalValue::canIncreaseAlignment()` function
+ // of `lib/IR/Globals.cpp` for why this works.
+ //
+ // When the alignment is not increased, the optimized `mem::replace`
+ // will use load-unaligned instructions instead, and thus avoiding the crash.
+ //
+ // We could remove this hack whenever we decide to drop macOS 10.10 support.
+ if self.tcx.sess.target.target.options.is_like_osx {
+ // The `inspect` method is okay here because we checked relocations, and
+ // because we are doing this access to inspect the final interpreter state
+ // (not as part of the interpreter execution).
+ //
+ // FIXME: This check requires that the (arbitrary) value of undefined bytes
+ // happens to be zero. Instead, we should only check the value of defined bytes
+ // and set all undefined bytes to zero if this allocation is headed for the
+ // BSS.
+ let all_bytes_are_zero = alloc.relocations().is_empty()
+ && alloc
+ .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
+ .iter()
+ .all(|&byte| byte == 0);
+
+ let sect_name = if all_bytes_are_zero {
+ const_cstr!("__DATA,__thread_bss")
+ } else {
+ const_cstr!("__DATA,__thread_data")
+ };
+ llvm::LLVMSetSection(g, sect_name.as_ptr());
+ }
+ }
+
+ // Wasm statics with custom link sections get special treatment as they
+ // go into custom sections of the wasm executable.
+ if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
+ if let Some(section) = attrs.link_section {
+ let section = llvm::LLVMMDStringInContext(
+ self.llcx,
+ section.as_str().as_ptr().cast(),
+ section.as_str().len() as c_uint,
+ );
+ assert!(alloc.relocations().is_empty());
+
+ // The `inspect` method is okay here because we checked relocations, and
+ // because we are doing this access to inspect the final interpreter state (not
+ // as part of the interpreter execution).
+ let bytes =
+ alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len());
+ let alloc = llvm::LLVMMDStringInContext(
+ self.llcx,
+ bytes.as_ptr().cast(),
+ bytes.len() as c_uint,
+ );
+ let data = [section, alloc];
+ let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2);
+ llvm::LLVMAddNamedMetadataOperand(
+ self.llmod,
+ "wasm.custom_sections\0".as_ptr().cast(),
+ meta,
+ );
+ }
+ } else {
+ base::set_link_section(g, &attrs);
+ }
+
+ if attrs.flags.contains(CodegenFnAttrFlags::USED) {
+ self.add_used_global(g);
+ }
+ }
+ }
+
+ /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
+ fn add_used_global(&self, global: &'ll Value) {
+ let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
+ self.used_statics.borrow_mut().push(cast);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
new file mode 100644
index 0000000..1696f35
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -0,0 +1,913 @@
+use crate::attributes;
+use crate::callee::get_fn;
+use crate::coverageinfo;
+use crate::debuginfo;
+use crate::llvm;
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::value::Value;
+
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::base_n;
+use rustc_data_structures::const_cstr;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_middle::bug;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::ty::layout::{HasParamEnv, LayoutError, TyAndLayout};
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_session::config::{CFGuard, CrateType, DebugInfo};
+use rustc_session::Session;
+use rustc_span::source_map::{Span, DUMMY_SP};
+use rustc_span::symbol::Symbol;
+use rustc_target::abi::{HasDataLayout, LayoutOf, PointeeInfo, Size, TargetDataLayout, VariantIdx};
+use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel};
+
+use std::cell::{Cell, RefCell};
+use std::ffi::CStr;
+use std::str;
+
+/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
+/// `llvm::Context` so that several compilation units may be optimized in parallel.
+/// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`.
+pub struct CodegenCx<'ll, 'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+ pub check_overflow: bool,
+ pub use_dll_storage_attrs: bool,
+ pub tls_model: llvm::ThreadLocalMode,
+
+ pub llmod: &'ll llvm::Module,
+ pub llcx: &'ll llvm::Context,
+ pub codegen_unit: &'tcx CodegenUnit<'tcx>,
+
+ /// Cache instances of monomorphic and polymorphic items
+ pub instances: RefCell<FxHashMap<Instance<'tcx>, &'ll Value>>,
+ /// Cache generated vtables
+ pub vtables:
+ RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>,
+ /// Cache of constant strings,
+ pub const_cstr_cache: RefCell<FxHashMap<Symbol, &'ll Value>>,
+
+ /// Reverse-direction for const ptrs cast from globals.
+ ///
+ /// Key is a Value holding a `*T`,
+ /// Val is a Value holding a `*[T]`.
+ ///
+ /// Needed because LLVM loses pointer->pointee association
+ /// when we ptrcast, and we have to ptrcast during codegen
+ /// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not
+ /// a pointer to an LLVM array type. Similar for trait objects.
+ pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
+
+ /// Cache of emitted const globals (value -> global)
+ pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
+
+ /// List of globals for static variables which need to be passed to the
+ /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete.
+ /// (We have to make sure we don't invalidate any Values referring
+ /// to constants.)
+ pub statics_to_rauw: RefCell<Vec<(&'ll Value, &'ll Value)>>,
+
+ /// Statics that will be placed in the llvm.used variable
+ /// See <http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable> for details
+ pub used_statics: RefCell<Vec<&'ll Value>>,
+
+ pub lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), &'ll Type>>,
+ pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>,
+ pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
+ pub isize_ty: &'ll Type,
+
+ pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'tcx>>,
+ pub dbg_cx: Option<debuginfo::CrateDebugContext<'ll, 'tcx>>,
+
+ eh_personality: Cell<Option<&'ll Value>>,
+ eh_catch_typeinfo: Cell<Option<&'ll Value>>,
+ pub rust_try_fn: Cell<Option<&'ll Value>>,
+
+ intrinsics: RefCell<FxHashMap<&'static str, &'ll Value>>,
+
+ /// A counter that is used for generating local symbol names
+ local_gen_sym_counter: Cell<usize>,
+}
+
+fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
+ match tls_model {
+ TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic,
+ TlsModel::LocalDynamic => llvm::ThreadLocalMode::LocalDynamic,
+ TlsModel::InitialExec => llvm::ThreadLocalMode::InitialExec,
+ TlsModel::LocalExec => llvm::ThreadLocalMode::LocalExec,
+ }
+}
+
+fn strip_function_ptr_alignment(data_layout: String) -> String {
+ // FIXME: Make this more general.
+ data_layout.replace("-Fi8-", "-")
+}
+
+fn strip_x86_address_spaces(data_layout: String) -> String {
+ data_layout.replace("-p270:32:32-p271:32:32-p272:64:64-", "-")
+}
+
+pub unsafe fn create_module(
+ tcx: TyCtxt<'_>,
+ llcx: &'ll llvm::Context,
+ mod_name: &str,
+) -> &'ll llvm::Module {
+ let sess = tcx.sess;
+ let mod_name = SmallCStr::new(mod_name);
+ let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
+
+ let mut target_data_layout = sess.target.target.data_layout.clone();
+ if llvm_util::get_major_version() < 9 {
+ target_data_layout = strip_function_ptr_alignment(target_data_layout);
+ }
+ if llvm_util::get_major_version() < 10 {
+ if sess.target.target.arch == "x86" || sess.target.target.arch == "x86_64" {
+ target_data_layout = strip_x86_address_spaces(target_data_layout);
+ }
+ }
+
+ // Ensure the data-layout values hardcoded remain the defaults.
+ if sess.target.target.options.is_builtin {
+ let tm = crate::back::write::create_informational_target_machine(tcx.sess);
+ llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
+ llvm::LLVMRustDisposeTargetMachine(tm);
+
+ let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
+ let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
+ .expect("got a non-UTF8 data-layout from LLVM");
+
+ // Unfortunately LLVM target specs change over time, and right now we
+ // don't have proper support to work with any more than one
+ // `data_layout` than the one that is in the rust-lang/rust repo. If
+ // this compiler is configured against a custom LLVM, we may have a
+ // differing data layout, even though we should update our own to use
+ // that one.
+ //
+ // As an interim hack, if CFG_LLVM_ROOT is not an empty string then we
+ // disable this check entirely as we may be configured with something
+ // that has a different target layout.
+ //
+ // Unsure if this will actually cause breakage when rustc is configured
+ // as such.
+ //
+ // FIXME(#34960)
+ let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or("");
+ let custom_llvm_used = cfg_llvm_root.trim() != "";
+
+ if !custom_llvm_used && target_data_layout != llvm_data_layout {
+ bug!(
+ "data-layout for builtin `{}` target, `{}`, \
+ differs from LLVM default, `{}`",
+ sess.target.target.llvm_target,
+ target_data_layout,
+ llvm_data_layout
+ );
+ }
+ }
+
+ let data_layout = SmallCStr::new(&target_data_layout);
+ llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
+
+ let llvm_target = SmallCStr::new(&sess.target.target.llvm_target);
+ llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
+
+ if sess.relocation_model() == RelocModel::Pic {
+ llvm::LLVMRustSetModulePICLevel(llmod);
+ // PIE is potentially more effective than PIC, but can only be used in executables.
+ // If all our outputs are executables, then we can relax PIC to PIE.
+ if sess.crate_types().iter().all(|ty| *ty == CrateType::Executable) {
+ llvm::LLVMRustSetModulePIELevel(llmod);
+ }
+ }
+
+ // If skipping the PLT is enabled, we need to add some module metadata
+ // to ensure intrinsic calls don't use it.
+ if !sess.needs_plt() {
+ let avoid_plt = "RtLibUseGOT\0".as_ptr().cast();
+ llvm::LLVMRustAddModuleFlag(llmod, avoid_plt, 1);
+ }
+
+ // Control Flow Guard is currently only supported by the MSVC linker on Windows.
+ if sess.target.target.options.is_like_msvc {
+ match sess.opts.cg.control_flow_guard {
+ CFGuard::Disabled => {}
+ CFGuard::NoChecks => {
+ // Set `cfguard=1` module flag to emit metadata only.
+ llvm::LLVMRustAddModuleFlag(llmod, "cfguard\0".as_ptr() as *const _, 1)
+ }
+ CFGuard::Checks => {
+ // Set `cfguard=2` module flag to emit metadata and checks.
+ llvm::LLVMRustAddModuleFlag(llmod, "cfguard\0".as_ptr() as *const _, 2)
+ }
+ }
+ }
+
+ llmod
+}
+
+impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
+ crate fn new(
+ tcx: TyCtxt<'tcx>,
+ codegen_unit: &'tcx CodegenUnit<'tcx>,
+ llvm_module: &'ll crate::ModuleLlvm,
+ ) -> Self {
+ // An interesting part of Windows which MSVC forces our hand on (and
+ // apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
+ // attributes in LLVM IR as well as native dependencies (in C these
+ // correspond to `__declspec(dllimport)`).
+ //
+ // LD (BFD) in MinGW mode can often correctly guess `dllexport` but
+ // relying on that can result in issues like #50176.
+ // LLD won't support that and expects symbols with proper attributes.
+ // Because of that we make MinGW target emit dllexport just like MSVC.
+ // When it comes to dllimport we use it for constants but for functions
+ // rely on the linker to do the right thing. Opposed to dllexport this
+ // task is easy for them (both LD and LLD) and allows us to easily use
+ // symbols from static libraries in shared libraries.
+ //
+ // Whenever a dynamic library is built on Windows it must have its public
+ // interface specified by functions tagged with `dllexport` or otherwise
+ // they're not available to be linked against. This poses a few problems
+ // for the compiler, some of which are somewhat fundamental, but we use
+ // the `use_dll_storage_attrs` variable below to attach the `dllexport`
+ // attribute to all LLVM functions that are exported e.g., they're
+ // already tagged with external linkage). This is suboptimal for a few
+ // reasons:
+ //
+ // * If an object file will never be included in a dynamic library,
+ // there's no need to attach the dllexport attribute. Most object
+ // files in Rust are not destined to become part of a dll as binaries
+ // are statically linked by default.
+ // * If the compiler is emitting both an rlib and a dylib, the same
+ // source object file is currently used but with MSVC this may be less
+ // feasible. The compiler may be able to get around this, but it may
+ // involve some invasive changes to deal with this.
+ //
+ // The flipside of this situation is that whenever you link to a dll and
+ // you import a function from it, the import should be tagged with
+ // `dllimport`. At this time, however, the compiler does not emit
+ // `dllimport` for any declarations other than constants (where it is
+ // required), which is again suboptimal for even more reasons!
+ //
+ // * Calling a function imported from another dll without using
+ // `dllimport` causes the linker/compiler to have extra overhead (one
+ // `jmp` instruction on x86) when calling the function.
+ // * The same object file may be used in different circumstances, so a
+ // function may be imported from a dll if the object is linked into a
+ // dll, but it may be just linked against if linked into an rlib.
+ // * The compiler has no knowledge about whether native functions should
+ // be tagged dllimport or not.
+ //
+ // For now the compiler takes the perf hit (I do not have any numbers to
+ // this effect) by marking very little as `dllimport` and praying the
+ // linker will take care of everything. Fixing this problem will likely
+ // require adding a few attributes to Rust itself (feature gated at the
+ // start) and then strongly recommending static linkage on Windows!
+ let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_windows;
+
+ let check_overflow = tcx.sess.overflow_checks();
+
+ let tls_model = to_llvm_tls_model(tcx.sess.tls_model());
+
+ let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
+
+ let coverage_cx = if tcx.sess.opts.debugging_opts.instrument_coverage {
+ let covctx = coverageinfo::CrateCoverageContext::new();
+ Some(covctx)
+ } else {
+ None
+ };
+
+ let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
+ let dctx = debuginfo::CrateDebugContext::new(llmod);
+ debuginfo::metadata::compile_unit_metadata(tcx, &codegen_unit.name().as_str(), &dctx);
+ Some(dctx)
+ } else {
+ None
+ };
+
+ let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits());
+
+ CodegenCx {
+ tcx,
+ check_overflow,
+ use_dll_storage_attrs,
+ tls_model,
+ llmod,
+ llcx,
+ codegen_unit,
+ instances: Default::default(),
+ vtables: Default::default(),
+ const_cstr_cache: Default::default(),
+ const_unsized: Default::default(),
+ const_globals: Default::default(),
+ statics_to_rauw: RefCell::new(Vec::new()),
+ used_statics: RefCell::new(Vec::new()),
+ lltypes: Default::default(),
+ scalar_lltypes: Default::default(),
+ pointee_infos: Default::default(),
+ isize_ty,
+ coverage_cx,
+ dbg_cx,
+ eh_personality: Cell::new(None),
+ eh_catch_typeinfo: Cell::new(None),
+ rust_try_fn: Cell::new(None),
+ intrinsics: Default::default(),
+ local_gen_sym_counter: Cell::new(0),
+ }
+ }
+
+ crate fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
+ &self.statics_to_rauw
+ }
+
+ #[inline]
+ pub fn coverage_context(&'a self) -> &'a coverageinfo::CrateCoverageContext<'tcx> {
+ self.coverage_cx.as_ref().unwrap()
+ }
+}
+
+impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn vtables(
+ &self,
+ ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>
+ {
+ &self.vtables
+ }
+
+ fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
+ get_fn(self, instance)
+ }
+
+ fn get_fn_addr(&self, instance: Instance<'tcx>) -> &'ll Value {
+ get_fn(self, instance)
+ }
+
+ fn eh_personality(&self) -> &'ll Value {
+ // The exception handling personality function.
+ //
+ // If our compilation unit has the `eh_personality` lang item somewhere
+ // within it, then we just need to codegen that. Otherwise, we're
+ // building an rlib which will depend on some upstream implementation of
+ // this function, so we just codegen a generic reference to it. We don't
+ // specify any of the types for the function, we just make it a symbol
+ // that LLVM can later use.
+ //
+ // Note that MSVC is a little special here in that we don't use the
+ // `eh_personality` lang item at all. Currently LLVM has support for
+ // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
+ // *name of the personality function* to decide what kind of unwind side
+ // tables/landing pads to emit. It looks like Dwarf is used by default,
+ // injecting a dependency on the `_Unwind_Resume` symbol for resuming
+ // an "exception", but for MSVC we want to force SEH. This means that we
+ // can't actually have the personality function be our standard
+ // `rust_eh_personality` function, but rather we wired it up to the
+ // CRT's custom personality function, which forces LLVM to consider
+ // landing pads as "landing pads for SEH".
+ if let Some(llpersonality) = self.eh_personality.get() {
+ return llpersonality;
+ }
+ let tcx = self.tcx;
+ let llfn = match tcx.lang_items().eh_personality() {
+ Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
+ ty::Instance::resolve(
+ tcx,
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ tcx.intern_substs(&[]),
+ )
+ .unwrap()
+ .unwrap(),
+ ),
+ _ => {
+ let name = if wants_msvc_seh(self.sess()) {
+ "__CxxFrameHandler3"
+ } else {
+ "rust_eh_personality"
+ };
+ let fty = self.type_variadic_func(&[], self.type_i32());
+ self.declare_cfn(name, fty)
+ }
+ };
+ attributes::apply_target_cpu_attr(self, llfn);
+ self.eh_personality.set(Some(llfn));
+ llfn
+ }
+
+ fn sess(&self) -> &Session {
+ &self.tcx.sess
+ }
+
+ fn check_overflow(&self) -> bool {
+ self.check_overflow
+ }
+
+ fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
+ self.codegen_unit
+ }
+
+ fn used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
+ &self.used_statics
+ }
+
+ fn set_frame_pointer_elimination(&self, llfn: &'ll Value) {
+ attributes::set_frame_pointer_elimination(self, llfn)
+ }
+
+ fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
+ attributes::apply_target_cpu_attr(self, llfn)
+ }
+
+ fn create_used_variable(&self) {
+ let name = const_cstr!("llvm.used");
+ let section = const_cstr!("llvm.metadata");
+ let array =
+ self.const_array(&self.type_ptr_to(self.type_i8()), &*self.used_statics.borrow());
+
+ unsafe {
+ let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
+ llvm::LLVMSetInitializer(g, array);
+ llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
+ llvm::LLVMSetSection(g, section.as_ptr());
+ }
+ }
+
+ fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
+ if self.get_declared_value("main").is_none() {
+ Some(self.declare_cfn("main", fn_type))
+ } else {
+ // If the symbol already exists, it is an error: for example, the user wrote
+ // #[no_mangle] extern "C" fn main(..) {..}
+ // instead of #[start]
+ None
+ }
+ }
+}
+
+impl CodegenCx<'b, 'tcx> {
+ crate fn get_intrinsic(&self, key: &str) -> &'b Value {
+ if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
+ return v;
+ }
+
+ self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key))
+ }
+
+ fn insert_intrinsic(
+ &self,
+ name: &'static str,
+ args: Option<&[&'b llvm::Type]>,
+ ret: &'b llvm::Type,
+ ) -> &'b llvm::Value {
+ let fn_ty = if let Some(args) = args {
+ self.type_func(args, ret)
+ } else {
+ self.type_variadic_func(&[], ret)
+ };
+ let f = self.declare_cfn(name, fn_ty);
+ llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
+ self.intrinsics.borrow_mut().insert(name, f);
+ f
+ }
+
+ fn declare_intrinsic(&self, key: &str) -> Option<&'b Value> {
+ macro_rules! ifn {
+ ($name:expr, fn() -> $ret:expr) => (
+ if key == $name {
+ return Some(self.insert_intrinsic($name, Some(&[]), $ret));
+ }
+ );
+ ($name:expr, fn(...) -> $ret:expr) => (
+ if key == $name {
+ return Some(self.insert_intrinsic($name, None, $ret));
+ }
+ );
+ ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
+ if key == $name {
+ return Some(self.insert_intrinsic($name, Some(&[$($arg),*]), $ret));
+ }
+ );
+ }
+ macro_rules! mk_struct {
+ ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
+ }
+
+ let i8p = self.type_i8p();
+ let void = self.type_void();
+ let i1 = self.type_i1();
+ let t_i8 = self.type_i8();
+ let t_i16 = self.type_i16();
+ let t_i32 = self.type_i32();
+ let t_i64 = self.type_i64();
+ let t_i128 = self.type_i128();
+ let t_f32 = self.type_f32();
+ let t_f64 = self.type_f64();
+
+ macro_rules! vector_types {
+ ($id_out:ident: $elem_ty:ident, $len:expr) => {
+ let $id_out = self.type_vector($elem_ty, $len);
+ };
+ ($($id_out:ident: $elem_ty:ident, $len:expr;)*) => {
+ $(vector_types!($id_out: $elem_ty, $len);)*
+ }
+ }
+ vector_types! {
+ t_v2f32: t_f32, 2;
+ t_v4f32: t_f32, 4;
+ t_v8f32: t_f32, 8;
+ t_v16f32: t_f32, 16;
+
+ t_v2f64: t_f64, 2;
+ t_v4f64: t_f64, 4;
+ t_v8f64: t_f64, 8;
+ }
+
+ ifn!("llvm.wasm.trunc.saturate.unsigned.i32.f32", fn(t_f32) -> t_i32);
+ ifn!("llvm.wasm.trunc.saturate.unsigned.i32.f64", fn(t_f64) -> t_i32);
+ ifn!("llvm.wasm.trunc.saturate.unsigned.i64.f32", fn(t_f32) -> t_i64);
+ ifn!("llvm.wasm.trunc.saturate.unsigned.i64.f64", fn(t_f64) -> t_i64);
+ ifn!("llvm.wasm.trunc.saturate.signed.i32.f32", fn(t_f32) -> t_i32);
+ ifn!("llvm.wasm.trunc.saturate.signed.i32.f64", fn(t_f64) -> t_i32);
+ ifn!("llvm.wasm.trunc.saturate.signed.i64.f32", fn(t_f32) -> t_i64);
+ ifn!("llvm.wasm.trunc.saturate.signed.i64.f64", fn(t_f64) -> t_i64);
+ ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
+ ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
+ ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
+ ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64);
+ ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32);
+ ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32);
+ ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
+ ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
+
+ ifn!("llvm.trap", fn() -> void);
+ ifn!("llvm.debugtrap", fn() -> void);
+ ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
+ ifn!("llvm.sideeffect", fn() -> void);
+
+ ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
+ ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32);
+ ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32);
+ ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32);
+ ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32);
+ ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
+ ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64);
+ ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64);
+ ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64);
+
+ ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32);
+ ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32);
+ ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32);
+ ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32);
+ ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
+ ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64);
+ ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64);
+ ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32);
+ ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32);
+ ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32);
+ ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32);
+ ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64);
+ ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64);
+ ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32);
+ ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32);
+ ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32);
+ ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32);
+ ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64);
+ ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64);
+ ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32);
+ ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32);
+ ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32);
+ ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32);
+ ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64);
+ ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64);
+ ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32);
+ ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32);
+ ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32);
+ ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32);
+ ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64);
+ ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64);
+ ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32);
+ ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32);
+ ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32);
+ ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32);
+ ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64);
+ ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64);
+ ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32);
+ ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32);
+ ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32);
+ ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32);
+ ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64);
+ ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64);
+ ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32);
+ ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32);
+ ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32);
+ ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32);
+ ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64);
+ ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64);
+ ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32);
+ ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32);
+ ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32);
+ ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32);
+ ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64);
+ ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64);
+ ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
+ ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32);
+ ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32);
+ ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32);
+ ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32);
+ ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
+ ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64);
+ ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64);
+ ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32);
+ ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32);
+ ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32);
+ ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32);
+ ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64);
+ ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64);
+ ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
+ ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
+
+ ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32);
+ ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32);
+ ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32);
+ ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32);
+ ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64);
+ ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64);
+ ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32);
+ ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32);
+ ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32);
+ ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32);
+ ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64);
+ ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64);
+ ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64);
+
+ ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
+ ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
+ ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
+ ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
+ ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
+ ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128);
+
+ ifn!("llvm.ctlz.i8", fn(t_i8, i1) -> t_i8);
+ ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
+ ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
+ ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
+ ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128);
+
+ ifn!("llvm.cttz.i8", fn(t_i8, i1) -> t_i8);
+ ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
+ ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
+ ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
+ ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128);
+
+ ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
+ ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
+ ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
+ ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128);
+
+ ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8);
+ ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16);
+ ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32);
+ ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
+ ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
+
+ ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
+ ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
+ ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
+ ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
+ ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
+ ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
+ ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
+ ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
+ ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.sadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
+ ifn!("llvm.sadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
+ ifn!("llvm.sadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
+ ifn!("llvm.sadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
+ ifn!("llvm.sadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.uadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
+ ifn!("llvm.uadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
+ ifn!("llvm.uadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
+ ifn!("llvm.uadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
+ ifn!("llvm.uadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.ssub.sat.i8", fn(t_i8, t_i8) -> t_i8);
+ ifn!("llvm.ssub.sat.i16", fn(t_i16, t_i16) -> t_i16);
+ ifn!("llvm.ssub.sat.i32", fn(t_i32, t_i32) -> t_i32);
+ ifn!("llvm.ssub.sat.i64", fn(t_i64, t_i64) -> t_i64);
+ ifn!("llvm.ssub.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.usub.sat.i8", fn(t_i8, t_i8) -> t_i8);
+ ifn!("llvm.usub.sat.i16", fn(t_i16, t_i16) -> t_i16);
+ ifn!("llvm.usub.sat.i32", fn(t_i32, t_i32) -> t_i32);
+ ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
+ ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void);
+ ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void);
+
+ ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
+ ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
+ ifn!("llvm.localescape", fn(...) -> void);
+ ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
+ ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
+
+ ifn!("llvm.assume", fn(i1) -> void);
+ ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
+
+ // variadic intrinsics
+ ifn!("llvm.va_start", fn(i8p) -> void);
+ ifn!("llvm.va_end", fn(i8p) -> void);
+ ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
+
+ if self.sess().opts.debugging_opts.instrument_coverage {
+ ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
+ }
+
+ if self.sess().opts.debuginfo != DebugInfo::None {
+ ifn!("llvm.dbg.declare", fn(self.type_metadata(), self.type_metadata()) -> void);
+ ifn!("llvm.dbg.value", fn(self.type_metadata(), t_i64, self.type_metadata()) -> void);
+ }
+ None
+ }
+
+ crate fn eh_catch_typeinfo(&self) -> &'b Value {
+ if let Some(eh_catch_typeinfo) = self.eh_catch_typeinfo.get() {
+ return eh_catch_typeinfo;
+ }
+ let tcx = self.tcx;
+ assert!(self.sess().target.target.options.is_like_emscripten);
+ let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
+ Some(def_id) => self.get_static(def_id),
+ _ => {
+ let ty = self
+ .type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false);
+ self.declare_global("rust_eh_catch_typeinfo", ty)
+ }
+ };
+ let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p());
+ self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
+ eh_catch_typeinfo
+ }
+}
+
+impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
+ /// Generates a new symbol name with the given prefix. This symbol name must
+ /// only be used for definitions with `internal` or `private` linkage.
+ pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
+ let idx = self.local_gen_sym_counter.get();
+ self.local_gen_sym_counter.set(idx + 1);
+ // Include a '.' character, so there can be no accidental conflicts with
+ // user defined names
+ let mut name = String::with_capacity(prefix.len() + 6);
+ name.push_str(prefix);
+ name.push_str(".");
+ base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
+ name
+ }
+}
+
+impl HasDataLayout for CodegenCx<'ll, 'tcx> {
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl HasTargetSpec for CodegenCx<'ll, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.tcx.sess.target.target
+ }
+}
+
+impl ty::layout::HasTyCtxt<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl LayoutOf for CodegenCx<'ll, 'tcx> {
+ type Ty = Ty<'tcx>;
+ type TyAndLayout = TyAndLayout<'tcx>;
+
+ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
+ self.spanned_layout_of(ty, DUMMY_SP)
+ }
+
+ fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::TyAndLayout {
+ self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)).unwrap_or_else(|e| {
+ if let LayoutError::SizeOverflow(_) = e {
+ self.sess().span_fatal(span, &e.to_string())
+ } else {
+ bug!("failed to get layout for `{}`: {}", ty, e)
+ }
+ })
+ }
+}
+
+impl<'tcx, 'll> HasParamEnv<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ ty::ParamEnv::reveal_all()
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
new file mode 100644
index 0000000..ec6c177
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -0,0 +1,229 @@
+use crate::common::CodegenCx;
+use crate::coverageinfo;
+use crate::llvm;
+
+use llvm::coverageinfo::CounterMappingRegion;
+use rustc_codegen_ssa::coverageinfo::map::{Counter, CounterExpression};
+use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods};
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_llvm::RustString;
+use rustc_middle::mir::coverage::CodeRegion;
+
+use std::ffi::CString;
+
+use tracing::debug;
+
+/// Generates and exports the Coverage Map.
+///
+/// This Coverage Map complies with Coverage Mapping Format version 3 (zero-based encoded as 2),
+/// as defined at [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/llvmorg-8.0.0/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format)
+/// and published in Rust's current (July 2020) fork of LLVM. This version is supported by the
+/// LLVM coverage tools (`llvm-profdata` and `llvm-cov`) bundled with Rust's fork of LLVM.
+///
+/// Consequently, Rust's bundled version of Clang also generates Coverage Maps compliant with
+/// version 3. Clang's implementation of Coverage Map generation was referenced when implementing
+/// this Rust version, and though the format documentation is very explicit and detailed, some
+/// undocumented details in Clang's implementation (that may or may not be important) were also
+/// replicated for Rust's Coverage Map.
+pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
+ let function_coverage_map = cx.coverage_context().take_function_coverage_map();
+ if function_coverage_map.is_empty() {
+ // This module has no functions with coverage instrumentation
+ return;
+ }
+
+ let mut mapgen = CoverageMapGenerator::new();
+
+ // Encode coverage mappings and generate function records
+ let mut function_records = Vec::<&'ll llvm::Value>::new();
+ let coverage_mappings_buffer = llvm::build_byte_buffer(|coverage_mappings_buffer| {
+ for (instance, function_coverage) in function_coverage_map.into_iter() {
+ debug!("Generate coverage map for: {:?}", instance);
+
+ let mangled_function_name = cx.tcx.symbol_name(instance).to_string();
+ let function_source_hash = function_coverage.source_hash();
+ let (expressions, counter_regions) =
+ function_coverage.get_expressions_and_counter_regions();
+
+ let old_len = coverage_mappings_buffer.len();
+ mapgen.write_coverage_mappings(expressions, counter_regions, coverage_mappings_buffer);
+ let mapping_data_size = coverage_mappings_buffer.len() - old_len;
+ debug_assert!(
+ mapping_data_size > 0,
+ "Every `FunctionCoverage` should have at least one counter"
+ );
+
+ let function_record = mapgen.make_function_record(
+ cx,
+ mangled_function_name,
+ function_source_hash,
+ mapping_data_size,
+ );
+ function_records.push(function_record);
+ }
+ });
+
+ // Encode all filenames referenced by counters/expressions in this module
+ let filenames_buffer = llvm::build_byte_buffer(|filenames_buffer| {
+ coverageinfo::write_filenames_section_to_buffer(&mapgen.filenames, filenames_buffer);
+ });
+
+ // Generate the LLVM IR representation of the coverage map and store it in a well-known global
+ mapgen.save_generated_coverage_map(
+ cx,
+ function_records,
+ filenames_buffer,
+ coverage_mappings_buffer,
+ );
+}
+
+struct CoverageMapGenerator {
+ filenames: FxIndexSet<CString>,
+}
+
+impl CoverageMapGenerator {
+ fn new() -> Self {
+ Self { filenames: FxIndexSet::default() }
+ }
+
+ /// Using the `expressions` and `counter_regions` collected for the current function, generate
+ /// the `mapping_regions` and `virtual_file_mapping`, and capture any new filenames. Then use
+ /// LLVM APIs to encode the `virtual_file_mapping`, `expressions`, and `mapping_regions` into
+ /// the given `coverage_mappings` byte buffer, compliant with the LLVM Coverage Mapping format.
+ fn write_coverage_mappings(
+ &mut self,
+ expressions: Vec<CounterExpression>,
+ counter_regions: impl Iterator<Item = (Counter, &'a CodeRegion)>,
+ coverage_mappings_buffer: &RustString,
+ ) {
+ let mut counter_regions = counter_regions.collect::<Vec<_>>();
+ if counter_regions.is_empty() {
+ return;
+ }
+
+ let mut virtual_file_mapping = Vec::new();
+ let mut mapping_regions = Vec::new();
+ let mut current_file_name = None;
+ let mut current_file_id = 0;
+
+ // Convert the list of (Counter, CodeRegion) pairs to an array of `CounterMappingRegion`, sorted
+ // by filename and position. Capture any new files to compute the `CounterMappingRegion`s
+ // `file_id` (indexing files referenced by the current function), and construct the
+ // function-specific `virtual_file_mapping` from `file_id` to its index in the module's
+ // `filenames` array.
+ counter_regions.sort_unstable_by_key(|(_counter, region)| *region);
+ for (counter, region) in counter_regions {
+ let CodeRegion { file_name, start_line, start_col, end_line, end_col } = *region;
+ let same_file = current_file_name.as_ref().map_or(false, |p| *p == file_name);
+ if !same_file {
+ if current_file_name.is_some() {
+ current_file_id += 1;
+ }
+ current_file_name = Some(file_name);
+ let c_filename = CString::new(file_name.to_string())
+ .expect("null error converting filename to C string");
+ debug!(" file_id: {} = '{:?}'", current_file_id, c_filename);
+ let (filenames_index, _) = self.filenames.insert_full(c_filename);
+ virtual_file_mapping.push(filenames_index as u32);
+ }
+ mapping_regions.push(CounterMappingRegion::code_region(
+ counter,
+ current_file_id,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ ));
+ }
+
+ // Encode and append the current function's coverage mapping data
+ coverageinfo::write_mapping_to_buffer(
+ virtual_file_mapping,
+ expressions,
+ mapping_regions,
+ coverage_mappings_buffer,
+ );
+ }
+
+ /// Generate and return the function record `Value`
+ fn make_function_record(
+ &mut self,
+ cx: &CodegenCx<'ll, 'tcx>,
+ mangled_function_name: String,
+ function_source_hash: u64,
+ mapping_data_size: usize,
+ ) -> &'ll llvm::Value {
+ let name_ref = coverageinfo::compute_hash(&mangled_function_name);
+ let name_ref_val = cx.const_u64(name_ref);
+ let mapping_data_size_val = cx.const_u32(mapping_data_size as u32);
+ let func_hash_val = cx.const_u64(function_source_hash);
+ cx.const_struct(
+ &[name_ref_val, mapping_data_size_val, func_hash_val],
+ /*packed=*/ true,
+ )
+ }
+
+ /// Combine the filenames and coverage mappings buffers, construct coverage map header and the
+ /// array of function records, and combine everything into the complete coverage map. Save the
+ /// coverage map data into the LLVM IR as a static global using a specific, well-known section
+ /// and name.
+ fn save_generated_coverage_map(
+ self,
+ cx: &CodegenCx<'ll, 'tcx>,
+ function_records: Vec<&'ll llvm::Value>,
+ filenames_buffer: Vec<u8>,
+ mut coverage_mappings_buffer: Vec<u8>,
+ ) {
+ // Concatenate the encoded filenames and encoded coverage mappings, and add additional zero
+ // bytes as-needed to ensure 8-byte alignment.
+ let mut coverage_size = coverage_mappings_buffer.len();
+ let filenames_size = filenames_buffer.len();
+ let remaining_bytes =
+ (filenames_size + coverage_size) % coverageinfo::COVMAP_VAR_ALIGN_BYTES;
+ if remaining_bytes > 0 {
+ let pad = coverageinfo::COVMAP_VAR_ALIGN_BYTES - remaining_bytes;
+ coverage_mappings_buffer.append(&mut [0].repeat(pad));
+ coverage_size += pad;
+ }
+ let filenames_and_coverage_mappings = [filenames_buffer, coverage_mappings_buffer].concat();
+ let filenames_and_coverage_mappings_val =
+ cx.const_bytes(&filenames_and_coverage_mappings[..]);
+
+ debug!(
+ "cov map: n_records = {}, filenames_size = {}, coverage_size = {}, 0-based version = {}",
+ function_records.len(),
+ filenames_size,
+ coverage_size,
+ coverageinfo::mapping_version()
+ );
+
+ // Create the coverage data header
+ let n_records_val = cx.const_u32(function_records.len() as u32);
+ let filenames_size_val = cx.const_u32(filenames_size as u32);
+ let coverage_size_val = cx.const_u32(coverage_size as u32);
+ let version_val = cx.const_u32(coverageinfo::mapping_version());
+ let cov_data_header_val = cx.const_struct(
+ &[n_records_val, filenames_size_val, coverage_size_val, version_val],
+ /*packed=*/ false,
+ );
+
+ // Create the function records array
+ let name_ref_from_u64 = cx.type_i64();
+ let mapping_data_size_from_u32 = cx.type_i32();
+ let func_hash_from_u64 = cx.type_i64();
+ let function_record_ty = cx.type_struct(
+ &[name_ref_from_u64, mapping_data_size_from_u32, func_hash_from_u64],
+ /*packed=*/ true,
+ );
+ let function_records_val = cx.const_array(function_record_ty, &function_records[..]);
+
+ // Create the complete LLVM coverage data value to add to the LLVM IR
+ let cov_data_val = cx.const_struct(
+ &[cov_data_header_val, function_records_val, filenames_and_coverage_mappings_val],
+ /*packed=*/ false,
+ );
+
+ // Save the coverage data value to LLVM IR
+ coverageinfo::save_map_to_mod(cx, cov_data_val);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
new file mode 100644
index 0000000..2bd37bf
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -0,0 +1,179 @@
+use crate::llvm;
+
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+
+use libc::c_uint;
+use llvm::coverageinfo::CounterMappingRegion;
+use rustc_codegen_ssa::coverageinfo::map::{CounterExpression, FunctionCoverage};
+use rustc_codegen_ssa::traits::{
+ BaseTypeMethods, CoverageInfoBuilderMethods, CoverageInfoMethods, MiscMethods, StaticMethods,
+};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_llvm::RustString;
+use rustc_middle::mir::coverage::{
+ CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionIndex, Op,
+};
+use rustc_middle::ty::Instance;
+
+use std::cell::RefCell;
+use std::ffi::CString;
+
+use tracing::debug;
+
+pub mod mapgen;
+
+const COVMAP_VAR_ALIGN_BYTES: usize = 8;
+
+/// A context object for maintaining all state needed by the coverageinfo module.
+pub struct CrateCoverageContext<'tcx> {
+ // Coverage region data for each instrumented function identified by DefId.
+ pub(crate) function_coverage_map: RefCell<FxHashMap<Instance<'tcx>, FunctionCoverage>>,
+}
+
+impl<'tcx> CrateCoverageContext<'tcx> {
+ pub fn new() -> Self {
+ Self { function_coverage_map: Default::default() }
+ }
+
+ pub fn take_function_coverage_map(&self) -> FxHashMap<Instance<'tcx>, FunctionCoverage> {
+ self.function_coverage_map.replace(FxHashMap::default())
+ }
+}
+
+impl CoverageInfoMethods for CodegenCx<'ll, 'tcx> {
+ fn coverageinfo_finalize(&self) {
+ mapgen::finalize(self)
+ }
+}
+
+impl CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
+ /// Calls llvm::createPGOFuncNameVar() with the given function instance's mangled function name.
+ /// The LLVM API returns an llvm::GlobalVariable containing the function name, with the specific
+ /// variable name and linkage required by LLVM InstrProf source-based coverage instrumentation.
+ fn create_pgo_func_name_var(&self, instance: Instance<'tcx>) -> Self::Value {
+ let llfn = self.cx.get_fn(instance);
+ let mangled_fn_name = CString::new(self.tcx.symbol_name(instance).name)
+ .expect("error converting function name to C string");
+ unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar(llfn, mangled_fn_name.as_ptr()) }
+ }
+
+ fn add_counter_region(
+ &mut self,
+ instance: Instance<'tcx>,
+ function_source_hash: u64,
+ id: CounterValueReference,
+ region: CodeRegion,
+ ) {
+ debug!(
+ "adding counter to coverage_regions: instance={:?}, function_source_hash={}, id={:?}, \
+ at {:?}",
+ instance, function_source_hash, id, region,
+ );
+ let mut coverage_regions = self.coverage_context().function_coverage_map.borrow_mut();
+ coverage_regions
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .add_counter(function_source_hash, id, region);
+ }
+
+ fn add_counter_expression_region(
+ &mut self,
+ instance: Instance<'tcx>,
+ id: InjectedExpressionIndex,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ region: CodeRegion,
+ ) {
+ debug!(
+ "adding counter expression to coverage_regions: instance={:?}, id={:?}, {:?} {:?} {:?}, \
+ at {:?}",
+ instance, id, lhs, op, rhs, region,
+ );
+ let mut coverage_regions = self.coverage_context().function_coverage_map.borrow_mut();
+ coverage_regions
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .add_counter_expression(id, lhs, op, rhs, region);
+ }
+
+ fn add_unreachable_region(&mut self, instance: Instance<'tcx>, region: CodeRegion) {
+ debug!(
+ "adding unreachable code to coverage_regions: instance={:?}, at {:?}",
+ instance, region,
+ );
+ let mut coverage_regions = self.coverage_context().function_coverage_map.borrow_mut();
+ coverage_regions
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .add_unreachable_region(region);
+ }
+}
+
+pub(crate) fn write_filenames_section_to_buffer<'a>(
+ filenames: impl IntoIterator<Item = &'a CString>,
+ buffer: &RustString,
+) {
+ let c_str_vec = filenames.into_iter().map(|cstring| cstring.as_ptr()).collect::<Vec<_>>();
+ unsafe {
+ llvm::LLVMRustCoverageWriteFilenamesSectionToBuffer(
+ c_str_vec.as_ptr(),
+ c_str_vec.len(),
+ buffer,
+ );
+ }
+}
+
+pub(crate) fn write_mapping_to_buffer(
+ virtual_file_mapping: Vec<u32>,
+ expressions: Vec<CounterExpression>,
+ mut mapping_regions: Vec<CounterMappingRegion>,
+ buffer: &RustString,
+) {
+ unsafe {
+ llvm::LLVMRustCoverageWriteMappingToBuffer(
+ virtual_file_mapping.as_ptr(),
+ virtual_file_mapping.len() as c_uint,
+ expressions.as_ptr(),
+ expressions.len() as c_uint,
+ mapping_regions.as_mut_ptr(),
+ mapping_regions.len() as c_uint,
+ buffer,
+ );
+ }
+}
+
+pub(crate) fn compute_hash(name: &str) -> u64 {
+ let name = CString::new(name).expect("null error converting hashable name to C string");
+ unsafe { llvm::LLVMRustCoverageComputeHash(name.as_ptr()) }
+}
+
+pub(crate) fn mapping_version() -> u32 {
+ unsafe { llvm::LLVMRustCoverageMappingVersion() }
+}
+
+pub(crate) fn save_map_to_mod<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ cov_data_val: &'ll llvm::Value,
+) {
+ let covmap_var_name = llvm::build_string(|s| unsafe {
+ llvm::LLVMRustCoverageWriteMappingVarNameToString(s);
+ })
+ .expect("Rust Coverage Mapping var name failed UTF-8 conversion");
+ debug!("covmap var name: {:?}", covmap_var_name);
+
+ let covmap_section_name = llvm::build_string(|s| unsafe {
+ llvm::LLVMRustCoverageWriteSectionNameToString(cx.llmod, s);
+ })
+ .expect("Rust Coverage section name failed UTF-8 conversion");
+ debug!("covmap section name: {:?}", covmap_section_name);
+
+ let llglobal = llvm::add_global(cx.llmod, cx.val_ty(cov_data_val), &covmap_var_name);
+ llvm::set_initializer(llglobal, cov_data_val);
+ llvm::set_global_constant(llglobal, true);
+ llvm::set_linkage(llglobal, llvm::Linkage::InternalLinkage);
+ llvm::set_section(llglobal, &covmap_section_name);
+ llvm::set_alignment(llglobal, COVMAP_VAR_ALIGN_BYTES);
+ cx.add_used_global(llglobal);
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
new file mode 100644
index 0000000..7f47b61
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -0,0 +1,95 @@
+use super::metadata::{file_metadata, UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
+use super::utils::DIB;
+use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext};
+use rustc_codegen_ssa::traits::*;
+
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{DIScope, DISubprogram};
+use rustc_middle::mir::{Body, SourceScope};
+use rustc_session::config::DebugInfo;
+
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::Idx;
+
+/// Produces DIScope DIEs for each MIR Scope which has variables defined in it.
+pub fn compute_mir_scopes(
+ cx: &CodegenCx<'ll, '_>,
+ mir: &Body<'_>,
+ fn_metadata: &'ll DISubprogram,
+ debug_context: &mut FunctionDebugContext<&'ll DIScope>,
+) {
+ // Find all the scopes with variables defined in them.
+ let mut has_variables = BitSet::new_empty(mir.source_scopes.len());
+
+ // Only consider variables when they're going to be emitted.
+ // FIXME(eddyb) don't even allocate `has_variables` otherwise.
+ if cx.sess().opts.debuginfo == DebugInfo::Full {
+ // FIXME(eddyb) take into account that arguments always have debuginfo,
+ // irrespective of their name (assuming full debuginfo is enabled).
+ // NOTE(eddyb) actually, on second thought, those are always in the
+ // function scope, which always exists.
+ for var_debug_info in &mir.var_debug_info {
+ has_variables.insert(var_debug_info.source_info.scope);
+ }
+ }
+
+ // Instantiate all scopes.
+ for idx in 0..mir.source_scopes.len() {
+ let scope = SourceScope::new(idx);
+ make_mir_scope(cx, &mir, fn_metadata, &has_variables, debug_context, scope);
+ }
+}
+
+fn make_mir_scope(
+ cx: &CodegenCx<'ll, '_>,
+ mir: &Body<'_>,
+ fn_metadata: &'ll DISubprogram,
+ has_variables: &BitSet<SourceScope>,
+ debug_context: &mut FunctionDebugContext<&'ll DISubprogram>,
+ scope: SourceScope,
+) {
+ if debug_context.scopes[scope].is_valid() {
+ return;
+ }
+
+ let scope_data = &mir.source_scopes[scope];
+ let parent_scope = if let Some(parent) = scope_data.parent_scope {
+ make_mir_scope(cx, mir, fn_metadata, has_variables, debug_context, parent);
+ debug_context.scopes[parent]
+ } else {
+ // The root is the function itself.
+ let loc = cx.lookup_debug_loc(mir.span.lo());
+ debug_context.scopes[scope] = DebugScope {
+ scope_metadata: Some(fn_metadata),
+ file_start_pos: loc.file.start_pos,
+ file_end_pos: loc.file.end_pos,
+ };
+ return;
+ };
+
+ if !has_variables.contains(scope) {
+ // Do not create a DIScope if there are no variables
+ // defined in this MIR Scope, to avoid debuginfo bloat.
+ debug_context.scopes[scope] = parent_scope;
+ return;
+ }
+
+ let loc = cx.lookup_debug_loc(scope_data.span.lo());
+ let file_metadata = file_metadata(cx, &loc.file, debug_context.defining_crate);
+
+ let scope_metadata = unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateLexicalBlock(
+ DIB(cx),
+ parent_scope.scope_metadata.unwrap(),
+ file_metadata,
+ loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
+ loc.col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
+ ))
+ };
+ debug_context.scopes[scope] = DebugScope {
+ scope_metadata,
+ file_start_pos: loc.file.start_pos,
+ file_end_pos: loc.file.end_pos,
+ };
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs b/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs
new file mode 100644
index 0000000..b3a8fa2
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/doc.rs
@@ -0,0 +1,179 @@
+//! # Debug Info Module
+//!
+//! This module serves the purpose of generating debug symbols. We use LLVM's
+//! [source level debugging](https://llvm.org/docs/SourceLevelDebugging.html)
+//! features for generating the debug information. The general principle is
+//! this:
+//!
+//! Given the right metadata in the LLVM IR, the LLVM code generator is able to
+//! create DWARF debug symbols for the given code. The
+//! [metadata](https://llvm.org/docs/LangRef.html#metadata-type) is structured
+//! much like DWARF *debugging information entries* (DIE), representing type
+//! information such as datatype layout, function signatures, block layout,
+//! variable location and scope information, etc. It is the purpose of this
+//! module to generate correct metadata and insert it into the LLVM IR.
+//!
+//! As the exact format of metadata trees may change between different LLVM
+//! versions, we now use LLVM
+//! [DIBuilder](https://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html)
+//! to create metadata where possible. This will hopefully ease the adaption of
+//! this module to future LLVM versions.
+//!
+//! The public API of the module is a set of functions that will insert the
+//! correct metadata into the LLVM IR when called with the right parameters.
+//! The module is thus driven from an outside client with functions like
+//! `debuginfo::create_local_var_metadata(bx: block, local: &ast::local)`.
+//!
+//! Internally the module will try to reuse already created metadata by
+//! utilizing a cache. The way to get a shared metadata node when needed is
+//! thus to just call the corresponding function in this module:
+//!
+//! let file_metadata = file_metadata(crate_context, path);
+//!
+//! The function will take care of probing the cache for an existing node for
+//! that exact file path.
+//!
+//! All private state used by the module is stored within either the
+//! CrateDebugContext struct (owned by the CodegenCx) or the
+//! FunctionDebugContext (owned by the FunctionCx).
+//!
+//! This file consists of three conceptual sections:
+//! 1. The public interface of the module
+//! 2. Module-internal metadata creation functions
+//! 3. Minor utility functions
+//!
+//!
+//! ## Recursive Types
+//!
+//! Some kinds of types, such as structs and enums can be recursive. That means
+//! that the type definition of some type X refers to some other type which in
+//! turn (transitively) refers to X. This introduces cycles into the type
+//! referral graph. A naive algorithm doing an on-demand, depth-first traversal
+//! of this graph when describing types, can get trapped in an endless loop
+//! when it reaches such a cycle.
+//!
+//! For example, the following simple type for a singly-linked list...
+//!
+//! ```
+//! struct List {
+//! value: i32,
+//! tail: Option<Box<List>>,
+//! }
+//! ```
+//!
+//! will generate the following callstack with a naive DFS algorithm:
+//!
+//! ```
+//! describe(t = List)
+//! describe(t = i32)
+//! describe(t = Option<Box<List>>)
+//! describe(t = Box<List>)
+//! describe(t = List) // at the beginning again...
+//! ...
+//! ```
+//!
+//! To break cycles like these, we use "forward declarations". That is, when
+//! the algorithm encounters a possibly recursive type (any struct or enum), it
+//! immediately creates a type description node and inserts it into the cache
+//! *before* describing the members of the type. This type description is just
+//! a stub (as type members are not described and added to it yet) but it
+//! allows the algorithm to already refer to the type. After the stub is
+//! inserted into the cache, the algorithm continues as before. If it now
+//! encounters a recursive reference, it will hit the cache and does not try to
+//! describe the type anew.
+//!
+//! This behavior is encapsulated in the 'RecursiveTypeDescription' enum,
+//! which represents a kind of continuation, storing all state needed to
+//! continue traversal at the type members after the type has been registered
+//! with the cache. (This implementation approach might be a tad over-
+//! engineered and may change in the future)
+//!
+//!
+//! ## Source Locations and Line Information
+//!
+//! In addition to data type descriptions the debugging information must also
+//! allow to map machine code locations back to source code locations in order
+//! to be useful. This functionality is also handled in this module. The
+//! following functions allow to control source mappings:
+//!
+//! + set_source_location()
+//! + clear_source_location()
+//! + start_emitting_source_locations()
+//!
+//! `set_source_location()` allows to set the current source location. All IR
+//! instructions created after a call to this function will be linked to the
+//! given source location, until another location is specified with
+//! `set_source_location()` or the source location is cleared with
+//! `clear_source_location()`. In the later case, subsequent IR instruction
+//! will not be linked to any source location. As you can see, this is a
+//! stateful API (mimicking the one in LLVM), so be careful with source
+//! locations set by previous calls. It's probably best to not rely on any
+//! specific state being present at a given point in code.
+//!
+//! One topic that deserves some extra attention is *function prologues*. At
+//! the beginning of a function's machine code there are typically a few
+//! instructions for loading argument values into allocas and checking if
+//! there's enough stack space for the function to execute. This *prologue* is
+//! not visible in the source code and LLVM puts a special PROLOGUE END marker
+//! into the line table at the first non-prologue instruction of the function.
+//! In order to find out where the prologue ends, LLVM looks for the first
+//! instruction in the function body that is linked to a source location. So,
+//! when generating prologue instructions we have to make sure that we don't
+//! emit source location information until the 'real' function body begins. For
+//! this reason, source location emission is disabled by default for any new
+//! function being codegened and is only activated after a call to the third
+//! function from the list above, `start_emitting_source_locations()`. This
+//! function should be called right before regularly starting to codegen the
+//! top-level block of the given function.
+//!
+//! There is one exception to the above rule: `llvm.dbg.declare` instruction
+//! must be linked to the source location of the variable being declared. For
+//! function parameters these `llvm.dbg.declare` instructions typically occur
+//! in the middle of the prologue, however, they are ignored by LLVM's prologue
+//! detection. The `create_argument_metadata()` and related functions take care
+//! of linking the `llvm.dbg.declare` instructions to the correct source
+//! locations even while source location emission is still disabled, so there
+//! is no need to do anything special with source location handling here.
+//!
+//! ## Unique Type Identification
+//!
+//! In order for link-time optimization to work properly, LLVM needs a unique
+//! type identifier that tells it across compilation units which types are the
+//! same as others. This type identifier is created by
+//! `TypeMap::get_unique_type_id_of_type()` using the following algorithm:
+//!
+//! (1) Primitive types have their name as ID
+//! (2) Structs, enums and traits have a multipart identifier
+//!
+//! (1) The first part is the SVH (strict version hash) of the crate they
+//! were originally defined in
+//!
+//! (2) The second part is the ast::NodeId of the definition in their
+//! original crate
+//!
+//! (3) The final part is a concatenation of the type IDs of their concrete
+//! type arguments if they are generic types.
+//!
+//! (3) Tuple-, pointer and function types are structurally identified, which
+//! means that they are equivalent if their component types are equivalent
+//! (i.e., (i32, i32) is the same regardless in which crate it is used).
+//!
+//! This algorithm also provides a stable ID for types that are defined in one
+//! crate but instantiated from metadata within another crate. We just have to
+//! take care to always map crate and `NodeId`s back to the original crate
+//! context.
+//!
+//! As a side-effect these unique type IDs also help to solve a problem arising
+//! from lifetime parameters. Since lifetime parameters are completely omitted
+//! in debuginfo, more than one `Ty` instance may map to the same debuginfo
+//! type metadata, that is, some struct `Struct<'a>` may have N instantiations
+//! with different concrete substitutions for `'a`, and thus there will be N
+//! `Ty` instances for the type `Struct<'a>` even though it is not generic
+//! otherwise. Unfortunately this means that we cannot use `ty::type_id()` as
+//! cheap identifier for type metadata -- we have done this in the past, but it
+//! led to unnecessary metadata duplication in the best case and LLVM
+//! assertions in the worst. However, the unique type ID as described above
+//! *can* be used as identifier. Since it is comparatively expensive to
+//! construct, though, `ty::type_id()` is still used additionally as an
+//! optimization for cases where the exact same type has been seen before
+//! (which is most of the time).
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
new file mode 100644
index 0000000..29edd660
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -0,0 +1,71 @@
+// .debug_gdb_scripts binary section.
+
+use crate::llvm;
+
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+use crate::value::Value;
+use rustc_codegen_ssa::traits::*;
+use rustc_middle::bug;
+use rustc_session::config::DebugInfo;
+
+use rustc_span::symbol::sym;
+
+/// Inserts a side-effect free instruction sequence that makes sure that the
+/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
+pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) {
+ if needs_gdb_debug_scripts_section(bx) {
+ let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx);
+ // Load just the first byte as that's all that's necessary to force
+ // LLVM to keep around the reference to the global.
+ let indices = [bx.const_i32(0), bx.const_i32(0)];
+ let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
+ let volative_load_instruction = bx.volatile_load(element);
+ unsafe {
+ llvm::LLVMSetAlignment(volative_load_instruction, 1);
+ }
+ }
+}
+
+/// Allocates the global variable responsible for the .debug_gdb_scripts binary
+/// section.
+pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) -> &'ll Value {
+ let c_section_var_name = "__rustc_debug_gdb_scripts_section__\0";
+ let section_var_name = &c_section_var_name[..c_section_var_name.len() - 1];
+
+ let section_var =
+ unsafe { llvm::LLVMGetNamedGlobal(cx.llmod, c_section_var_name.as_ptr().cast()) };
+
+ section_var.unwrap_or_else(|| {
+ let section_name = b".debug_gdb_scripts\0";
+ let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0";
+
+ unsafe {
+ let llvm_type = cx.type_array(cx.type_i8(), section_contents.len() as u64);
+
+ let section_var = cx
+ .define_global(section_var_name, llvm_type)
+ .unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name));
+ llvm::LLVMSetSection(section_var, section_name.as_ptr().cast());
+ llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents));
+ llvm::LLVMSetGlobalConstant(section_var, llvm::True);
+ llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global);
+ llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
+ // This should make sure that the whole section is not larger than
+ // the string it contains. Otherwise we get a warning from GDB.
+ llvm::LLVMSetAlignment(section_var, 1);
+ section_var
+ }
+ })
+}
+
+pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool {
+ let omit_gdb_pretty_printer_section = cx
+ .tcx
+ .sess
+ .contains_name(&cx.tcx.hir().krate_attrs(), sym::omit_gdb_pretty_printer_section);
+
+ !omit_gdb_pretty_printer_section
+ && cx.sess().opts.debuginfo != DebugInfo::None
+ && cx.sess().target.target.options.emit_debug_gdb_scripts
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
new file mode 100644
index 0000000..987149c
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -0,0 +1,2584 @@
+use self::EnumTagInfo::*;
+use self::MemberDescriptionFactory::*;
+use self::RecursiveTypeDescription::*;
+
+use super::namespace::mangled_name_of_instance;
+use super::type_names::compute_debuginfo_type_name;
+use super::utils::{
+ create_DIArray, debug_context, get_namespace_for_item, is_node_local_to_unit, DIB,
+};
+use super::CrateDebugContext;
+
+use crate::abi;
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{
+ DIArray, DICompositeType, DIDescriptor, DIFile, DIFlags, DILexicalBlock, DIScope, DIType,
+ DebugEmissionKind,
+};
+use crate::value::Value;
+
+use rustc_ast as ast;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::const_cstr;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_fs_util::path_to_c_string;
+use rustc_hir::def::CtorKind;
+use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::ich::NodeIdHashingMode;
+use rustc_middle::mir::interpret::truncate;
+use rustc_middle::mir::{self, Field, GeneratorLayout};
+use rustc_middle::ty::layout::{self, IntegerExt, PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::Instance;
+use rustc_middle::ty::{self, AdtKind, GeneratorSubsts, ParamEnv, Ty, TyCtxt};
+use rustc_middle::{bug, span_bug};
+use rustc_session::config::{self, DebugInfo};
+use rustc_span::symbol::{Interner, Symbol};
+use rustc_span::{self, SourceFile, SourceFileHash, Span};
+use rustc_target::abi::{Abi, Align, HasDataLayout, Integer, LayoutOf, TagEncoding};
+use rustc_target::abi::{Int, Pointer, F32, F64};
+use rustc_target::abi::{Primitive, Size, VariantIdx, Variants};
+use tracing::debug;
+
+use libc::{c_longlong, c_uint};
+use std::collections::hash_map::Entry;
+use std::fmt::{self, Write};
+use std::hash::{Hash, Hasher};
+use std::iter;
+use std::path::{Path, PathBuf};
+use std::ptr;
+
+impl PartialEq for llvm::Metadata {
+ fn eq(&self, other: &Self) -> bool {
+ ptr::eq(self, other)
+ }
+}
+
+impl Eq for llvm::Metadata {}
+
+impl Hash for llvm::Metadata {
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ (self as *const Self).hash(hasher);
+ }
+}
+
+impl fmt::Debug for llvm::Metadata {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (self as *const Self).fmt(f)
+ }
+}
+
+// From DWARF 5.
+// See http://www.dwarfstd.org/ShowIssue.php?issue=140129.1.
+const DW_LANG_RUST: c_uint = 0x1c;
+#[allow(non_upper_case_globals)]
+const DW_ATE_boolean: c_uint = 0x02;
+#[allow(non_upper_case_globals)]
+const DW_ATE_float: c_uint = 0x04;
+#[allow(non_upper_case_globals)]
+const DW_ATE_signed: c_uint = 0x05;
+#[allow(non_upper_case_globals)]
+const DW_ATE_unsigned: c_uint = 0x07;
+#[allow(non_upper_case_globals)]
+const DW_ATE_unsigned_char: c_uint = 0x08;
+
+pub const UNKNOWN_LINE_NUMBER: c_uint = 0;
+pub const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
+
+pub const NO_SCOPE_METADATA: Option<&DIScope> = None;
+
+#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)]
+pub struct UniqueTypeId(Symbol);
+
+/// The `TypeMap` is where the `CrateDebugContext` holds the type metadata nodes
+/// created so far. The metadata nodes are indexed by `UniqueTypeId`, and, for
+/// faster lookup, also by `Ty`. The `TypeMap` is responsible for creating
+/// `UniqueTypeId`s.
+#[derive(Default)]
+pub struct TypeMap<'ll, 'tcx> {
+ /// The `UniqueTypeId`s created so far.
+ unique_id_interner: Interner,
+ /// A map from `UniqueTypeId` to debuginfo metadata for that type. This is a 1:1 mapping.
+ unique_id_to_metadata: FxHashMap<UniqueTypeId, &'ll DIType>,
+ /// A map from types to debuginfo metadata. This is an N:1 mapping.
+ type_to_metadata: FxHashMap<Ty<'tcx>, &'ll DIType>,
+ /// A map from types to `UniqueTypeId`. This is an N:1 mapping.
+ type_to_unique_id: FxHashMap<Ty<'tcx>, UniqueTypeId>,
+}
+
+impl TypeMap<'ll, 'tcx> {
+ /// Adds a Ty to metadata mapping to the TypeMap. The method will fail if
+ /// the mapping already exists.
+ fn register_type_with_metadata(&mut self, type_: Ty<'tcx>, metadata: &'ll DIType) {
+ if self.type_to_metadata.insert(type_, metadata).is_some() {
+ bug!("type metadata for `Ty` '{}' is already in the `TypeMap`!", type_);
+ }
+ }
+
+ /// Removes a `Ty`-to-metadata mapping.
+ /// This is useful when computing the metadata for a potentially
+ /// recursive type (e.g., a function pointer of the form:
+ ///
+ /// fn foo() -> impl Copy { foo }
+ ///
+ /// This kind of type cannot be properly represented
+ /// via LLVM debuginfo. As a workaround,
+ /// we register a temporary Ty to metadata mapping
+ /// for the function before we compute its actual metadata.
+ /// If the metadata computation ends up recursing back to the
+ /// original function, it will use the temporary mapping
+ /// for the inner self-reference, preventing us from
+ /// recursing forever.
+ ///
+ /// This function is used to remove the temporary metadata
+ /// mapping after we've computed the actual metadata.
+ fn remove_type(&mut self, type_: Ty<'tcx>) {
+ if self.type_to_metadata.remove(type_).is_none() {
+ bug!("type metadata `Ty` '{}' is not in the `TypeMap`!", type_);
+ }
+ }
+
+ /// Adds a `UniqueTypeId` to metadata mapping to the `TypeMap`. The method will
+ /// fail if the mapping already exists.
+ fn register_unique_id_with_metadata(
+ &mut self,
+ unique_type_id: UniqueTypeId,
+ metadata: &'ll DIType,
+ ) {
+ if self.unique_id_to_metadata.insert(unique_type_id, metadata).is_some() {
+ bug!(
+ "type metadata for unique ID '{}' is already in the `TypeMap`!",
+ self.get_unique_type_id_as_string(unique_type_id)
+ );
+ }
+ }
+
+ fn find_metadata_for_type(&self, type_: Ty<'tcx>) -> Option<&'ll DIType> {
+ self.type_to_metadata.get(&type_).cloned()
+ }
+
+ fn find_metadata_for_unique_id(&self, unique_type_id: UniqueTypeId) -> Option<&'ll DIType> {
+ self.unique_id_to_metadata.get(&unique_type_id).cloned()
+ }
+
+ /// Gets the string representation of a `UniqueTypeId`. This method will fail if
+ /// the ID is unknown.
+ fn get_unique_type_id_as_string(&self, unique_type_id: UniqueTypeId) -> &str {
+ let UniqueTypeId(interner_key) = unique_type_id;
+ self.unique_id_interner.get(interner_key)
+ }
+
+ /// Gets the `UniqueTypeId` for the given type. If the `UniqueTypeId` for the given
+ /// type has been requested before, this is just a table lookup. Otherwise, an
+ /// ID will be generated and stored for later lookup.
+ fn get_unique_type_id_of_type<'a>(
+ &mut self,
+ cx: &CodegenCx<'a, 'tcx>,
+ type_: Ty<'tcx>,
+ ) -> UniqueTypeId {
+ // Let's see if we already have something in the cache.
+ if let Some(unique_type_id) = self.type_to_unique_id.get(&type_).cloned() {
+ return unique_type_id;
+ }
+ // If not, generate one.
+
+ // The hasher we are using to generate the UniqueTypeId. We want
+ // something that provides more than the 64 bits of the DefaultHasher.
+ let mut hasher = StableHasher::new();
+ let mut hcx = cx.tcx.create_stable_hashing_context();
+ let type_ = cx.tcx.erase_regions(&type_);
+ hcx.while_hashing_spans(false, |hcx| {
+ hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+ type_.hash_stable(hcx, &mut hasher);
+ });
+ });
+ let unique_type_id = hasher.finish::<Fingerprint>().to_hex();
+
+ let key = self.unique_id_interner.intern(&unique_type_id);
+ self.type_to_unique_id.insert(type_, UniqueTypeId(key));
+
+ UniqueTypeId(key)
+ }
+
+ /// Gets the `UniqueTypeId` for an enum variant. Enum variants are not really
+ /// types of their own, so they need special handling. We still need a
+ /// `UniqueTypeId` for them, since to debuginfo they *are* real types.
+ fn get_unique_type_id_of_enum_variant<'a>(
+ &mut self,
+ cx: &CodegenCx<'a, 'tcx>,
+ enum_type: Ty<'tcx>,
+ variant_name: &str,
+ ) -> UniqueTypeId {
+ let enum_type_id = self.get_unique_type_id_of_type(cx, enum_type);
+ let enum_variant_type_id =
+ format!("{}::{}", self.get_unique_type_id_as_string(enum_type_id), variant_name);
+ let interner_key = self.unique_id_interner.intern(&enum_variant_type_id);
+ UniqueTypeId(interner_key)
+ }
+
+ /// Gets the unique type ID string for an enum variant part.
+ /// Variant parts are not types and shouldn't really have their own ID,
+ /// but it makes `set_members_of_composite_type()` simpler.
+ fn get_unique_type_id_str_of_enum_variant_part(
+ &mut self,
+ enum_type_id: UniqueTypeId,
+ ) -> String {
+ format!("{}_variant_part", self.get_unique_type_id_as_string(enum_type_id))
+ }
+}
+
+/// A description of some recursive type. It can either be already finished (as
+/// with `FinalMetadata`) or it is not yet finished, but contains all information
+/// needed to generate the missing parts of the description. See the
+/// documentation section on Recursive Types at the top of this file for more
+/// information.
+enum RecursiveTypeDescription<'ll, 'tcx> {
+ UnfinishedMetadata {
+ unfinished_type: Ty<'tcx>,
+ unique_type_id: UniqueTypeId,
+ metadata_stub: &'ll DICompositeType,
+ member_holding_stub: &'ll DICompositeType,
+ member_description_factory: MemberDescriptionFactory<'ll, 'tcx>,
+ },
+ FinalMetadata(&'ll DICompositeType),
+}
+
+fn create_and_register_recursive_type_forward_declaration(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unfinished_type: Ty<'tcx>,
+ unique_type_id: UniqueTypeId,
+ metadata_stub: &'ll DICompositeType,
+ member_holding_stub: &'ll DICompositeType,
+ member_description_factory: MemberDescriptionFactory<'ll, 'tcx>,
+) -> RecursiveTypeDescription<'ll, 'tcx> {
+ // Insert the stub into the `TypeMap` in order to allow for recursive references.
+ let mut type_map = debug_context(cx).type_map.borrow_mut();
+ type_map.register_unique_id_with_metadata(unique_type_id, metadata_stub);
+ type_map.register_type_with_metadata(unfinished_type, metadata_stub);
+
+ UnfinishedMetadata {
+ unfinished_type,
+ unique_type_id,
+ metadata_stub,
+ member_holding_stub,
+ member_description_factory,
+ }
+}
+
+impl RecursiveTypeDescription<'ll, 'tcx> {
+ /// Finishes up the description of the type in question (mostly by providing
+ /// descriptions of the fields of the given type) and returns the final type
+ /// metadata.
+ fn finalize(&self, cx: &CodegenCx<'ll, 'tcx>) -> MetadataCreationResult<'ll> {
+ match *self {
+ FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false),
+ UnfinishedMetadata {
+ unfinished_type,
+ unique_type_id,
+ metadata_stub,
+ member_holding_stub,
+ ref member_description_factory,
+ } => {
+ // Make sure that we have a forward declaration of the type in
+ // the TypeMap so that recursive references are possible. This
+ // will always be the case if the RecursiveTypeDescription has
+ // been properly created through the
+ // `create_and_register_recursive_type_forward_declaration()`
+ // function.
+ {
+ let type_map = debug_context(cx).type_map.borrow();
+ if type_map.find_metadata_for_unique_id(unique_type_id).is_none()
+ || type_map.find_metadata_for_type(unfinished_type).is_none()
+ {
+ bug!(
+ "Forward declaration of potentially recursive type \
+ '{:?}' was not found in TypeMap!",
+ unfinished_type
+ );
+ }
+ }
+
+ // ... then create the member descriptions ...
+ let member_descriptions = member_description_factory.create_member_descriptions(cx);
+
+ // ... and attach them to the stub to complete it.
+ set_members_of_composite_type(
+ cx,
+ unfinished_type,
+ member_holding_stub,
+ member_descriptions,
+ );
+ MetadataCreationResult::new(metadata_stub, true)
+ }
+ }
+ }
+}
+
+/// Returns from the enclosing function if the type metadata with the given
+/// unique ID can be found in the type map.
+macro_rules! return_if_metadata_created_in_meantime {
+ ($cx: expr, $unique_type_id: expr) => {
+ if let Some(metadata) =
+ debug_context($cx).type_map.borrow().find_metadata_for_unique_id($unique_type_id)
+ {
+ return MetadataCreationResult::new(metadata, true);
+ }
+ };
+}
+
+fn fixed_vec_metadata(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId,
+ array_or_slice_type: Ty<'tcx>,
+ element_type: Ty<'tcx>,
+ span: Span,
+) -> MetadataCreationResult<'ll> {
+ let element_type_metadata = type_metadata(cx, element_type, span);
+
+ return_if_metadata_created_in_meantime!(cx, unique_type_id);
+
+ let (size, align) = cx.size_and_align_of(array_or_slice_type);
+
+ let upper_bound = match array_or_slice_type.kind() {
+ ty::Array(_, len) => len.eval_usize(cx.tcx, ty::ParamEnv::reveal_all()) as c_longlong,
+ _ => -1,
+ };
+
+ let subrange =
+ unsafe { Some(llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)) };
+
+ let subscripts = create_DIArray(DIB(cx), &[subrange]);
+ let metadata = unsafe {
+ llvm::LLVMRustDIBuilderCreateArrayType(
+ DIB(cx),
+ size.bits(),
+ align.bits() as u32,
+ element_type_metadata,
+ subscripts,
+ )
+ };
+
+ MetadataCreationResult::new(metadata, false)
+}
+
+fn vec_slice_metadata(
+ cx: &CodegenCx<'ll, 'tcx>,
+ slice_ptr_type: Ty<'tcx>,
+ element_type: Ty<'tcx>,
+ unique_type_id: UniqueTypeId,
+ span: Span,
+) -> MetadataCreationResult<'ll> {
+ let data_ptr_type = cx.tcx.mk_imm_ptr(element_type);
+
+ let data_ptr_metadata = type_metadata(cx, data_ptr_type, span);
+
+ return_if_metadata_created_in_meantime!(cx, unique_type_id);
+
+ let slice_type_name = compute_debuginfo_type_name(cx.tcx, slice_ptr_type, true);
+
+ let (pointer_size, pointer_align) = cx.size_and_align_of(data_ptr_type);
+ let (usize_size, usize_align) = cx.size_and_align_of(cx.tcx.types.usize);
+
+ let member_descriptions = vec![
+ MemberDescription {
+ name: "data_ptr".to_owned(),
+ type_metadata: data_ptr_metadata,
+ offset: Size::ZERO,
+ size: pointer_size,
+ align: pointer_align,
+ flags: DIFlags::FlagZero,
+ discriminant: None,
+ source_info: None,
+ },
+ MemberDescription {
+ name: "length".to_owned(),
+ type_metadata: type_metadata(cx, cx.tcx.types.usize, span),
+ offset: pointer_size,
+ size: usize_size,
+ align: usize_align,
+ flags: DIFlags::FlagZero,
+ discriminant: None,
+ source_info: None,
+ },
+ ];
+
+ let file_metadata = unknown_file_metadata(cx);
+
+ let metadata = composite_type_metadata(
+ cx,
+ slice_ptr_type,
+ &slice_type_name[..],
+ unique_type_id,
+ member_descriptions,
+ NO_SCOPE_METADATA,
+ file_metadata,
+ span,
+ );
+ MetadataCreationResult::new(metadata, false)
+}
+
+fn subroutine_type_metadata(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId,
+ signature: ty::PolyFnSig<'tcx>,
+ span: Span,
+) -> MetadataCreationResult<'ll> {
+ let signature =
+ cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &signature);
+
+ let signature_metadata: Vec<_> = iter::once(
+ // return type
+ match signature.output().kind() {
+ ty::Tuple(ref tys) if tys.is_empty() => None,
+ _ => Some(type_metadata(cx, signature.output(), span)),
+ },
+ )
+ .chain(
+ // regular arguments
+ signature.inputs().iter().map(|argument_type| Some(type_metadata(cx, argument_type, span))),
+ )
+ .collect();
+
+ return_if_metadata_created_in_meantime!(cx, unique_type_id);
+
+ MetadataCreationResult::new(
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateSubroutineType(
+ DIB(cx),
+ create_DIArray(DIB(cx), &signature_metadata[..]),
+ )
+ },
+ false,
+ )
+}
+
+// FIXME(1563): This is all a bit of a hack because 'trait pointer' is an ill-
+// defined concept. For the case of an actual trait pointer (i.e., `Box<Trait>`,
+// `&Trait`), `trait_object_type` should be the whole thing (e.g, `Box<Trait>`) and
+// `trait_type` should be the actual trait (e.g., `Trait`). Where the trait is part
+// of a DST struct, there is no `trait_object_type` and the results of this
+// function will be a little bit weird.
+fn trait_pointer_metadata(
+ cx: &CodegenCx<'ll, 'tcx>,
+ trait_type: Ty<'tcx>,
+ trait_object_type: Option<Ty<'tcx>>,
+ unique_type_id: UniqueTypeId,
+) -> &'ll DIType {
+ // The implementation provided here is a stub. It makes sure that the trait
+ // type is assigned the correct name, size, namespace, and source location.
+ // However, it does not describe the trait's methods.
+
+ let containing_scope = match trait_type.kind() {
+ ty::Dynamic(ref data, ..) => {
+ data.principal_def_id().map(|did| get_namespace_for_item(cx, did))
+ }
+ _ => {
+ bug!(
+ "debuginfo: unexpected trait-object type in \
+ trait_pointer_metadata(): {:?}",
+ trait_type
+ );
+ }
+ };
+
+ let trait_object_type = trait_object_type.unwrap_or(trait_type);
+ let trait_type_name = compute_debuginfo_type_name(cx.tcx, trait_object_type, false);
+
+ let file_metadata = unknown_file_metadata(cx);
+
+ let layout = cx.layout_of(cx.tcx.mk_mut_ptr(trait_type));
+
+ assert_eq!(abi::FAT_PTR_ADDR, 0);
+ assert_eq!(abi::FAT_PTR_EXTRA, 1);
+
+ let data_ptr_field = layout.field(cx, 0);
+ let vtable_field = layout.field(cx, 1);
+ let member_descriptions = vec![
+ MemberDescription {
+ name: "pointer".to_owned(),
+ type_metadata: type_metadata(
+ cx,
+ cx.tcx.mk_mut_ptr(cx.tcx.types.u8),
+ rustc_span::DUMMY_SP,
+ ),
+ offset: layout.fields.offset(0),
+ size: data_ptr_field.size,
+ align: data_ptr_field.align.abi,
+ flags: DIFlags::FlagArtificial,
+ discriminant: None,
+ source_info: None,
+ },
+ MemberDescription {
+ name: "vtable".to_owned(),
+ type_metadata: type_metadata(cx, vtable_field.ty, rustc_span::DUMMY_SP),
+ offset: layout.fields.offset(1),
+ size: vtable_field.size,
+ align: vtable_field.align.abi,
+ flags: DIFlags::FlagArtificial,
+ discriminant: None,
+ source_info: None,
+ },
+ ];
+
+ composite_type_metadata(
+ cx,
+ trait_object_type,
+ &trait_type_name[..],
+ unique_type_id,
+ member_descriptions,
+ containing_scope,
+ file_metadata,
+ rustc_span::DUMMY_SP,
+ )
+}
+
+pub fn type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>, usage_site_span: Span) -> &'ll DIType {
+ // Get the unique type ID of this type.
+ let unique_type_id = {
+ let mut type_map = debug_context(cx).type_map.borrow_mut();
+ // First, try to find the type in `TypeMap`. If we have seen it before, we
+ // can exit early here.
+ match type_map.find_metadata_for_type(t) {
+ Some(metadata) => {
+ return metadata;
+ }
+ None => {
+ // The Ty is not in the `TypeMap` but maybe we have already seen
+ // an equivalent type (e.g., only differing in region arguments).
+ // In order to find out, generate the unique type ID and look
+ // that up.
+ let unique_type_id = type_map.get_unique_type_id_of_type(cx, t);
+ match type_map.find_metadata_for_unique_id(unique_type_id) {
+ Some(metadata) => {
+ // There is already an equivalent type in the TypeMap.
+ // Register this Ty as an alias in the cache and
+ // return the cached metadata.
+ type_map.register_type_with_metadata(t, metadata);
+ return metadata;
+ }
+ None => {
+ // There really is no type metadata for this type, so
+ // proceed by creating it.
+ unique_type_id
+ }
+ }
+ }
+ }
+ };
+
+ debug!("type_metadata: {:?}", t);
+
+ let ptr_metadata = |ty: Ty<'tcx>| match *ty.kind() {
+ ty::Slice(typ) => Ok(vec_slice_metadata(cx, t, typ, unique_type_id, usage_site_span)),
+ ty::Str => Ok(vec_slice_metadata(cx, t, cx.tcx.types.u8, unique_type_id, usage_site_span)),
+ ty::Dynamic(..) => Ok(MetadataCreationResult::new(
+ trait_pointer_metadata(cx, ty, Some(t), unique_type_id),
+ false,
+ )),
+ _ => {
+ let pointee_metadata = type_metadata(cx, ty, usage_site_span);
+
+ if let Some(metadata) =
+ debug_context(cx).type_map.borrow().find_metadata_for_unique_id(unique_type_id)
+ {
+ return Err(metadata);
+ }
+
+ Ok(MetadataCreationResult::new(pointer_type_metadata(cx, t, pointee_metadata), false))
+ }
+ };
+
+ let MetadataCreationResult { metadata, already_stored_in_typemap } = match *t.kind() {
+ ty::Never | ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => {
+ MetadataCreationResult::new(basic_type_metadata(cx, t), false)
+ }
+ ty::Tuple(ref elements) if elements.is_empty() => {
+ MetadataCreationResult::new(basic_type_metadata(cx, t), false)
+ }
+ ty::Array(typ, _) | ty::Slice(typ) => {
+ fixed_vec_metadata(cx, unique_type_id, t, typ, usage_site_span)
+ }
+ ty::Str => fixed_vec_metadata(cx, unique_type_id, t, cx.tcx.types.i8, usage_site_span),
+ ty::Dynamic(..) => {
+ MetadataCreationResult::new(trait_pointer_metadata(cx, t, None, unique_type_id), false)
+ }
+ ty::Foreign(..) => {
+ MetadataCreationResult::new(foreign_type_metadata(cx, t, unique_type_id), false)
+ }
+ ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _) => match ptr_metadata(ty) {
+ Ok(res) => res,
+ Err(metadata) => return metadata,
+ },
+ ty::Adt(def, _) if def.is_box() => match ptr_metadata(t.boxed_ty()) {
+ Ok(res) => res,
+ Err(metadata) => return metadata,
+ },
+ ty::FnDef(..) | ty::FnPtr(_) => {
+ if let Some(metadata) =
+ debug_context(cx).type_map.borrow().find_metadata_for_unique_id(unique_type_id)
+ {
+ return metadata;
+ }
+
+ // It's possible to create a self-referential
+ // type in Rust by using 'impl trait':
+ //
+ // fn foo() -> impl Copy { foo }
+ //
+ // See `TypeMap::remove_type` for more detals
+ // about the workaround.
+
+ let temp_type = {
+ unsafe {
+ // The choice of type here is pretty arbitrary -
+ // anything reading the debuginfo for a recursive
+ // type is going to see *something* weird - the only
+ // question is what exactly it will see.
+ let name = "<recur_type>";
+ llvm::LLVMRustDIBuilderCreateBasicType(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ cx.size_of(t).bits(),
+ DW_ATE_unsigned,
+ )
+ }
+ };
+
+ let type_map = &debug_context(cx).type_map;
+ type_map.borrow_mut().register_type_with_metadata(t, temp_type);
+
+ let fn_metadata =
+ subroutine_type_metadata(cx, unique_type_id, t.fn_sig(cx.tcx), usage_site_span)
+ .metadata;
+
+ type_map.borrow_mut().remove_type(t);
+
+ // This is actually a function pointer, so wrap it in pointer DI.
+ MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false)
+ }
+ ty::Closure(def_id, substs) => {
+ let upvar_tys: Vec<_> = substs.as_closure().upvar_tys().collect();
+ let containing_scope = get_namespace_for_item(cx, def_id);
+ prepare_tuple_metadata(
+ cx,
+ t,
+ &upvar_tys,
+ unique_type_id,
+ usage_site_span,
+ Some(containing_scope),
+ )
+ .finalize(cx)
+ }
+ ty::Generator(def_id, substs, _) => {
+ let upvar_tys: Vec<_> = substs
+ .as_generator()
+ .prefix_tys()
+ .map(|t| cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
+ .collect();
+ prepare_enum_metadata(cx, t, def_id, unique_type_id, usage_site_span, upvar_tys)
+ .finalize(cx)
+ }
+ ty::Adt(def, ..) => match def.adt_kind() {
+ AdtKind::Struct => {
+ prepare_struct_metadata(cx, t, unique_type_id, usage_site_span).finalize(cx)
+ }
+ AdtKind::Union => {
+ prepare_union_metadata(cx, t, unique_type_id, usage_site_span).finalize(cx)
+ }
+ AdtKind::Enum => {
+ prepare_enum_metadata(cx, t, def.did, unique_type_id, usage_site_span, vec![])
+ .finalize(cx)
+ }
+ },
+ ty::Tuple(ref elements) => {
+ let tys: Vec<_> = elements.iter().map(|k| k.expect_ty()).collect();
+ prepare_tuple_metadata(cx, t, &tys, unique_type_id, usage_site_span, NO_SCOPE_METADATA)
+ .finalize(cx)
+ }
+ // Type parameters from polymorphized functions.
+ ty::Param(_) => MetadataCreationResult::new(param_type_metadata(cx, t), false),
+ _ => bug!("debuginfo: unexpected type in type_metadata: {:?}", t),
+ };
+
+ {
+ let mut type_map = debug_context(cx).type_map.borrow_mut();
+
+ if already_stored_in_typemap {
+ // Also make sure that we already have a `TypeMap` entry for the unique type ID.
+ let metadata_for_uid = match type_map.find_metadata_for_unique_id(unique_type_id) {
+ Some(metadata) => metadata,
+ None => {
+ span_bug!(
+ usage_site_span,
+ "expected type metadata for unique \
+ type ID '{}' to already be in \
+ the `debuginfo::TypeMap` but it \
+ was not. (Ty = {})",
+ type_map.get_unique_type_id_as_string(unique_type_id),
+ t
+ );
+ }
+ };
+
+ match type_map.find_metadata_for_type(t) {
+ Some(metadata) => {
+ if metadata != metadata_for_uid {
+ span_bug!(
+ usage_site_span,
+ "mismatch between `Ty` and \
+ `UniqueTypeId` maps in \
+ `debuginfo::TypeMap`. \
+ UniqueTypeId={}, Ty={}",
+ type_map.get_unique_type_id_as_string(unique_type_id),
+ t
+ );
+ }
+ }
+ None => {
+ type_map.register_type_with_metadata(t, metadata);
+ }
+ }
+ } else {
+ type_map.register_type_with_metadata(t, metadata);
+ type_map.register_unique_id_with_metadata(unique_type_id, metadata);
+ }
+ }
+
+ metadata
+}
+
+fn hex_encode(data: &[u8]) -> String {
+ let mut hex_string = String::with_capacity(data.len() * 2);
+ for byte in data.iter() {
+ write!(&mut hex_string, "{:02x}", byte).unwrap();
+ }
+ hex_string
+}
+
+pub fn file_metadata(
+ cx: &CodegenCx<'ll, '_>,
+ source_file: &SourceFile,
+ defining_crate: CrateNum,
+) -> &'ll DIFile {
+ debug!("file_metadata: file_name: {}, defining_crate: {}", source_file.name, defining_crate);
+
+ let hash = Some(&source_file.src_hash);
+ let file_name = Some(source_file.name.to_string());
+ let directory = if defining_crate == LOCAL_CRATE {
+ Some(cx.sess().working_dir.0.to_string_lossy().to_string())
+ } else {
+ // If the path comes from an upstream crate we assume it has been made
+ // independent of the compiler's working directory one way or another.
+ None
+ };
+ file_metadata_raw(cx, file_name, directory, hash)
+}
+
+pub fn unknown_file_metadata(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile {
+ file_metadata_raw(cx, None, None, None)
+}
+
+fn file_metadata_raw(
+ cx: &CodegenCx<'ll, '_>,
+ file_name: Option<String>,
+ directory: Option<String>,
+ hash: Option<&SourceFileHash>,
+) -> &'ll DIFile {
+ let key = (file_name, directory);
+
+ match debug_context(cx).created_files.borrow_mut().entry(key) {
+ Entry::Occupied(o) => o.get(),
+ Entry::Vacant(v) => {
+ let (file_name, directory) = v.key();
+ debug!("file_metadata: file_name: {:?}, directory: {:?}", file_name, directory);
+
+ let file_name = file_name.as_deref().unwrap_or("<unknown>");
+ let directory = directory.as_deref().unwrap_or("");
+
+ let (hash_kind, hash_value) = match hash {
+ Some(hash) => {
+ let kind = match hash.kind {
+ rustc_span::SourceFileHashAlgorithm::Md5 => llvm::ChecksumKind::MD5,
+ rustc_span::SourceFileHashAlgorithm::Sha1 => llvm::ChecksumKind::SHA1,
+ };
+ (kind, hex_encode(hash.hash_bytes()))
+ }
+ None => (llvm::ChecksumKind::None, String::new()),
+ };
+
+ let file_metadata = unsafe {
+ llvm::LLVMRustDIBuilderCreateFile(
+ DIB(cx),
+ file_name.as_ptr().cast(),
+ file_name.len(),
+ directory.as_ptr().cast(),
+ directory.len(),
+ hash_kind,
+ hash_value.as_ptr().cast(),
+ hash_value.len(),
+ )
+ };
+
+ v.insert(file_metadata);
+ file_metadata
+ }
+ }
+}
+
+trait MsvcBasicName {
+ fn msvc_basic_name(self) -> &'static str;
+}
+
+impl MsvcBasicName for ast::IntTy {
+ fn msvc_basic_name(self) -> &'static str {
+ match self {
+ ast::IntTy::Isize => "ptrdiff_t",
+ ast::IntTy::I8 => "__int8",
+ ast::IntTy::I16 => "__int16",
+ ast::IntTy::I32 => "__int32",
+ ast::IntTy::I64 => "__int64",
+ ast::IntTy::I128 => "__int128",
+ }
+ }
+}
+
+impl MsvcBasicName for ast::UintTy {
+ fn msvc_basic_name(self) -> &'static str {
+ match self {
+ ast::UintTy::Usize => "size_t",
+ ast::UintTy::U8 => "unsigned __int8",
+ ast::UintTy::U16 => "unsigned __int16",
+ ast::UintTy::U32 => "unsigned __int32",
+ ast::UintTy::U64 => "unsigned __int64",
+ ast::UintTy::U128 => "unsigned __int128",
+ }
+ }
+}
+
+impl MsvcBasicName for ast::FloatTy {
+ fn msvc_basic_name(self) -> &'static str {
+ match self {
+ ast::FloatTy::F32 => "float",
+ ast::FloatTy::F64 => "double",
+ }
+ }
+}
+
+fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
+ debug!("basic_type_metadata: {:?}", t);
+
+ // When targeting MSVC, emit MSVC style type names for compatibility with
+ // .natvis visualizers (and perhaps other existing native debuggers?)
+ let msvc_like_names = cx.tcx.sess.target.target.options.is_like_msvc;
+
+ let (name, encoding) = match t.kind() {
+ ty::Never => ("!", DW_ATE_unsigned),
+ ty::Tuple(ref elements) if elements.is_empty() => ("()", DW_ATE_unsigned),
+ ty::Bool => ("bool", DW_ATE_boolean),
+ ty::Char => ("char", DW_ATE_unsigned_char),
+ ty::Int(int_ty) if msvc_like_names => (int_ty.msvc_basic_name(), DW_ATE_signed),
+ ty::Uint(uint_ty) if msvc_like_names => (uint_ty.msvc_basic_name(), DW_ATE_unsigned),
+ ty::Float(float_ty) if msvc_like_names => (float_ty.msvc_basic_name(), DW_ATE_float),
+ ty::Int(int_ty) => (int_ty.name_str(), DW_ATE_signed),
+ ty::Uint(uint_ty) => (uint_ty.name_str(), DW_ATE_unsigned),
+ ty::Float(float_ty) => (float_ty.name_str(), DW_ATE_float),
+ _ => bug!("debuginfo::basic_type_metadata - `t` is invalid type"),
+ };
+
+ let ty_metadata = unsafe {
+ llvm::LLVMRustDIBuilderCreateBasicType(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ cx.size_of(t).bits(),
+ encoding,
+ )
+ };
+
+ if !msvc_like_names {
+ return ty_metadata;
+ }
+
+ let typedef_name = match t.kind() {
+ ty::Int(int_ty) => int_ty.name_str(),
+ ty::Uint(uint_ty) => uint_ty.name_str(),
+ ty::Float(float_ty) => float_ty.name_str(),
+ _ => return ty_metadata,
+ };
+
+ let typedef_metadata = unsafe {
+ llvm::LLVMRustDIBuilderCreateTypedef(
+ DIB(cx),
+ ty_metadata,
+ typedef_name.as_ptr().cast(),
+ typedef_name.len(),
+ unknown_file_metadata(cx),
+ 0,
+ None,
+ )
+ };
+
+ typedef_metadata
+}
+
+fn foreign_type_metadata(
+ cx: &CodegenCx<'ll, 'tcx>,
+ t: Ty<'tcx>,
+ unique_type_id: UniqueTypeId,
+) -> &'ll DIType {
+ debug!("foreign_type_metadata: {:?}", t);
+
+ let name = compute_debuginfo_type_name(cx.tcx, t, false);
+ create_struct_stub(cx, t, &name, unique_type_id, NO_SCOPE_METADATA, DIFlags::FlagZero)
+}
+
+fn pointer_type_metadata(
+ cx: &CodegenCx<'ll, 'tcx>,
+ pointer_type: Ty<'tcx>,
+ pointee_type_metadata: &'ll DIType,
+) -> &'ll DIType {
+ let (pointer_size, pointer_align) = cx.size_and_align_of(pointer_type);
+ let name = compute_debuginfo_type_name(cx.tcx, pointer_type, false);
+ unsafe {
+ llvm::LLVMRustDIBuilderCreatePointerType(
+ DIB(cx),
+ pointee_type_metadata,
+ pointer_size.bits(),
+ pointer_align.bits() as u32,
+ 0, // Ignore DWARF address space.
+ name.as_ptr().cast(),
+ name.len(),
+ )
+ }
+}
+
+fn param_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
+ debug!("param_type_metadata: {:?}", t);
+ let name = format!("{:?}", t);
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateBasicType(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ Size::ZERO.bits(),
+ DW_ATE_unsigned,
+ )
+ }
+}
+
+pub fn compile_unit_metadata(
+ tcx: TyCtxt<'_>,
+ codegen_unit_name: &str,
+ debug_context: &CrateDebugContext<'ll, '_>,
+) -> &'ll DIDescriptor {
+ let mut name_in_debuginfo = match tcx.sess.local_crate_source_file {
+ Some(ref path) => path.clone(),
+ None => PathBuf::from(&*tcx.crate_name(LOCAL_CRATE).as_str()),
+ };
+
+ // The OSX linker has an idiosyncrasy where it will ignore some debuginfo
+ // if multiple object files with the same `DW_AT_name` are linked together.
+ // As a workaround we generate unique names for each object file. Those do
+ // not correspond to an actual source file but that should be harmless.
+ if tcx.sess.target.target.options.is_like_osx {
+ name_in_debuginfo.push("@");
+ name_in_debuginfo.push(codegen_unit_name);
+ }
+
+ debug!("compile_unit_metadata: {:?}", name_in_debuginfo);
+ let rustc_producer =
+ format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION"),);
+ // FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice.
+ let producer = format!("clang LLVM ({})", rustc_producer);
+
+ let name_in_debuginfo = name_in_debuginfo.to_string_lossy();
+ let work_dir = tcx.sess.working_dir.0.to_string_lossy();
+ let flags = "\0";
+ let split_name = "";
+
+ // FIXME(#60020):
+ //
+ // This should actually be
+ //
+ // let kind = DebugEmissionKind::from_generic(tcx.sess.opts.debuginfo);
+ //
+ // That is, we should set LLVM's emission kind to `LineTablesOnly` if
+ // we are compiling with "limited" debuginfo. However, some of the
+ // existing tools relied on slightly more debuginfo being generated than
+ // would be the case with `LineTablesOnly`, and we did not want to break
+ // these tools in a "drive-by fix", without a good idea or plan about
+ // what limited debuginfo should exactly look like. So for now we keep
+ // the emission kind as `FullDebug`.
+ //
+ // See https://github.com/rust-lang/rust/issues/60020 for details.
+ let kind = DebugEmissionKind::FullDebug;
+ assert!(tcx.sess.opts.debuginfo != DebugInfo::None);
+
+ unsafe {
+ let file_metadata = llvm::LLVMRustDIBuilderCreateFile(
+ debug_context.builder,
+ name_in_debuginfo.as_ptr().cast(),
+ name_in_debuginfo.len(),
+ work_dir.as_ptr().cast(),
+ work_dir.len(),
+ llvm::ChecksumKind::None,
+ ptr::null(),
+ 0,
+ );
+
+ let unit_metadata = llvm::LLVMRustDIBuilderCreateCompileUnit(
+ debug_context.builder,
+ DW_LANG_RUST,
+ file_metadata,
+ producer.as_ptr().cast(),
+ producer.len(),
+ tcx.sess.opts.optimize != config::OptLevel::No,
+ flags.as_ptr().cast(),
+ 0,
+ split_name.as_ptr().cast(),
+ split_name.len(),
+ kind,
+ );
+
+ if tcx.sess.opts.debugging_opts.profile {
+ let cu_desc_metadata =
+ llvm::LLVMRustMetadataAsValue(debug_context.llcontext, unit_metadata);
+ let default_gcda_path = &tcx.output_filenames(LOCAL_CRATE).with_extension("gcda");
+ let gcda_path =
+ tcx.sess.opts.debugging_opts.profile_emit.as_ref().unwrap_or(default_gcda_path);
+
+ let gcov_cu_info = [
+ path_to_mdstring(
+ debug_context.llcontext,
+ &tcx.output_filenames(LOCAL_CRATE).with_extension("gcno"),
+ ),
+ path_to_mdstring(debug_context.llcontext, &gcda_path),
+ cu_desc_metadata,
+ ];
+ let gcov_metadata = llvm::LLVMMDNodeInContext(
+ debug_context.llcontext,
+ gcov_cu_info.as_ptr(),
+ gcov_cu_info.len() as c_uint,
+ );
+
+ let llvm_gcov_ident = const_cstr!("llvm.gcov");
+ llvm::LLVMAddNamedMetadataOperand(
+ debug_context.llmod,
+ llvm_gcov_ident.as_ptr(),
+ gcov_metadata,
+ );
+ }
+
+ // Insert `llvm.ident` metadata on the wasm32 targets since that will
+ // get hooked up to the "producer" sections `processed-by` information.
+ if tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
+ let name_metadata = llvm::LLVMMDStringInContext(
+ debug_context.llcontext,
+ rustc_producer.as_ptr().cast(),
+ rustc_producer.as_bytes().len() as c_uint,
+ );
+ llvm::LLVMAddNamedMetadataOperand(
+ debug_context.llmod,
+ const_cstr!("llvm.ident").as_ptr(),
+ llvm::LLVMMDNodeInContext(debug_context.llcontext, &name_metadata, 1),
+ );
+ }
+
+ return unit_metadata;
+ };
+
+ fn path_to_mdstring(llcx: &'ll llvm::Context, path: &Path) -> &'ll Value {
+ let path_str = path_to_c_string(path);
+ unsafe {
+ llvm::LLVMMDStringInContext(
+ llcx,
+ path_str.as_ptr(),
+ path_str.as_bytes().len() as c_uint,
+ )
+ }
+ }
+}
+
+struct MetadataCreationResult<'ll> {
+ metadata: &'ll DIType,
+ already_stored_in_typemap: bool,
+}
+
+impl MetadataCreationResult<'ll> {
+ fn new(metadata: &'ll DIType, already_stored_in_typemap: bool) -> Self {
+ MetadataCreationResult { metadata, already_stored_in_typemap }
+ }
+}
+
+#[derive(Debug)]
+struct SourceInfo<'ll> {
+ file: &'ll DIFile,
+ line: u32,
+}
+
+/// Description of a type member, which can either be a regular field (as in
+/// structs or tuples) or an enum variant.
+#[derive(Debug)]
+struct MemberDescription<'ll> {
+ name: String,
+ type_metadata: &'ll DIType,
+ offset: Size,
+ size: Size,
+ align: Align,
+ flags: DIFlags,
+ discriminant: Option<u64>,
+ source_info: Option<SourceInfo<'ll>>,
+}
+
+impl<'ll> MemberDescription<'ll> {
+ fn into_metadata(
+ self,
+ cx: &CodegenCx<'ll, '_>,
+ composite_type_metadata: &'ll DIScope,
+ ) -> &'ll DIType {
+ let (file, line) = self
+ .source_info
+ .map(|info| (info.file, info.line))
+ .unwrap_or_else(|| (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER));
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateVariantMemberType(
+ DIB(cx),
+ composite_type_metadata,
+ self.name.as_ptr().cast(),
+ self.name.len(),
+ file,
+ line,
+ self.size.bits(),
+ self.align.bits() as u32,
+ self.offset.bits(),
+ match self.discriminant {
+ None => None,
+ Some(value) => Some(cx.const_u64(value)),
+ },
+ self.flags,
+ self.type_metadata,
+ )
+ }
+ }
+}
+
+/// A factory for `MemberDescription`s. It produces a list of member descriptions
+/// for some record-like type. `MemberDescriptionFactory`s are used to defer the
+/// creation of type member descriptions in order to break cycles arising from
+/// recursive type definitions.
+enum MemberDescriptionFactory<'ll, 'tcx> {
+ StructMDF(StructMemberDescriptionFactory<'tcx>),
+ TupleMDF(TupleMemberDescriptionFactory<'tcx>),
+ EnumMDF(EnumMemberDescriptionFactory<'ll, 'tcx>),
+ UnionMDF(UnionMemberDescriptionFactory<'tcx>),
+ VariantMDF(VariantMemberDescriptionFactory<'ll, 'tcx>),
+}
+
+impl MemberDescriptionFactory<'ll, 'tcx> {
+ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> {
+ match *self {
+ StructMDF(ref this) => this.create_member_descriptions(cx),
+ TupleMDF(ref this) => this.create_member_descriptions(cx),
+ EnumMDF(ref this) => this.create_member_descriptions(cx),
+ UnionMDF(ref this) => this.create_member_descriptions(cx),
+ VariantMDF(ref this) => this.create_member_descriptions(cx),
+ }
+ }
+}
+
+//=-----------------------------------------------------------------------------
+// Structs
+//=-----------------------------------------------------------------------------
+
+/// Creates `MemberDescription`s for the fields of a struct.
+struct StructMemberDescriptionFactory<'tcx> {
+ ty: Ty<'tcx>,
+ variant: &'tcx ty::VariantDef,
+ span: Span,
+}
+
+impl<'tcx> StructMemberDescriptionFactory<'tcx> {
+ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> {
+ let layout = cx.layout_of(self.ty);
+ self.variant
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let name = if self.variant.ctor_kind == CtorKind::Fn {
+ format!("__{}", i)
+ } else {
+ f.ident.to_string()
+ };
+ let field = layout.field(cx, i);
+ MemberDescription {
+ name,
+ type_metadata: type_metadata(cx, field.ty, self.span),
+ offset: layout.fields.offset(i),
+ size: field.size,
+ align: field.align.abi,
+ flags: DIFlags::FlagZero,
+ discriminant: None,
+ source_info: None,
+ }
+ })
+ .collect()
+ }
+}
+
+fn prepare_struct_metadata(
+ cx: &CodegenCx<'ll, 'tcx>,
+ struct_type: Ty<'tcx>,
+ unique_type_id: UniqueTypeId,
+ span: Span,
+) -> RecursiveTypeDescription<'ll, 'tcx> {
+ let struct_name = compute_debuginfo_type_name(cx.tcx, struct_type, false);
+
+ let (struct_def_id, variant) = match struct_type.kind() {
+ ty::Adt(def, _) => (def.did, def.non_enum_variant()),
+ _ => bug!("prepare_struct_metadata on a non-ADT"),
+ };
+
+ let containing_scope = get_namespace_for_item(cx, struct_def_id);
+
+ let struct_metadata_stub = create_struct_stub(
+ cx,
+ struct_type,
+ &struct_name,
+ unique_type_id,
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ );
+
+ create_and_register_recursive_type_forward_declaration(
+ cx,
+ struct_type,
+ unique_type_id,
+ struct_metadata_stub,
+ struct_metadata_stub,
+ StructMDF(StructMemberDescriptionFactory { ty: struct_type, variant, span }),
+ )
+}
+
+//=-----------------------------------------------------------------------------
+// Tuples
+//=-----------------------------------------------------------------------------
+
+/// Creates `MemberDescription`s for the fields of a tuple.
+struct TupleMemberDescriptionFactory<'tcx> {
+ ty: Ty<'tcx>,
+ component_types: Vec<Ty<'tcx>>,
+ span: Span,
+}
+
+impl<'tcx> TupleMemberDescriptionFactory<'tcx> {
+ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> {
+ let layout = cx.layout_of(self.ty);
+ self.component_types
+ .iter()
+ .enumerate()
+ .map(|(i, &component_type)| {
+ let (size, align) = cx.size_and_align_of(component_type);
+ MemberDescription {
+ name: format!("__{}", i),
+ type_metadata: type_metadata(cx, component_type, self.span),
+ offset: layout.fields.offset(i),
+ size,
+ align,
+ flags: DIFlags::FlagZero,
+ discriminant: None,
+ source_info: None,
+ }
+ })
+ .collect()
+ }
+}
+
+fn prepare_tuple_metadata(
+ cx: &CodegenCx<'ll, 'tcx>,
+ tuple_type: Ty<'tcx>,
+ component_types: &[Ty<'tcx>],
+ unique_type_id: UniqueTypeId,
+ span: Span,
+ containing_scope: Option<&'ll DIScope>,
+) -> RecursiveTypeDescription<'ll, 'tcx> {
+ let tuple_name = compute_debuginfo_type_name(cx.tcx, tuple_type, false);
+
+ let struct_stub = create_struct_stub(
+ cx,
+ tuple_type,
+ &tuple_name[..],
+ unique_type_id,
+ containing_scope,
+ DIFlags::FlagZero,
+ );
+
+ create_and_register_recursive_type_forward_declaration(
+ cx,
+ tuple_type,
+ unique_type_id,
+ struct_stub,
+ struct_stub,
+ TupleMDF(TupleMemberDescriptionFactory {
+ ty: tuple_type,
+ component_types: component_types.to_vec(),
+ span,
+ }),
+ )
+}
+
+//=-----------------------------------------------------------------------------
+// Unions
+//=-----------------------------------------------------------------------------
+
+struct UnionMemberDescriptionFactory<'tcx> {
+ layout: TyAndLayout<'tcx>,
+ variant: &'tcx ty::VariantDef,
+ span: Span,
+}
+
+impl<'tcx> UnionMemberDescriptionFactory<'tcx> {
+ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> {
+ self.variant
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let field = self.layout.field(cx, i);
+ MemberDescription {
+ name: f.ident.to_string(),
+ type_metadata: type_metadata(cx, field.ty, self.span),
+ offset: Size::ZERO,
+ size: field.size,
+ align: field.align.abi,
+ flags: DIFlags::FlagZero,
+ discriminant: None,
+ source_info: None,
+ }
+ })
+ .collect()
+ }
+}
+
+fn prepare_union_metadata(
+ cx: &CodegenCx<'ll, 'tcx>,
+ union_type: Ty<'tcx>,
+ unique_type_id: UniqueTypeId,
+ span: Span,
+) -> RecursiveTypeDescription<'ll, 'tcx> {
+ let union_name = compute_debuginfo_type_name(cx.tcx, union_type, false);
+
+ let (union_def_id, variant) = match union_type.kind() {
+ ty::Adt(def, _) => (def.did, def.non_enum_variant()),
+ _ => bug!("prepare_union_metadata on a non-ADT"),
+ };
+
+ let containing_scope = get_namespace_for_item(cx, union_def_id);
+
+ let union_metadata_stub =
+ create_union_stub(cx, union_type, &union_name, unique_type_id, containing_scope);
+
+ create_and_register_recursive_type_forward_declaration(
+ cx,
+ union_type,
+ unique_type_id,
+ union_metadata_stub,
+ union_metadata_stub,
+ UnionMDF(UnionMemberDescriptionFactory { layout: cx.layout_of(union_type), variant, span }),
+ )
+}
+
+//=-----------------------------------------------------------------------------
+// Enums
+//=-----------------------------------------------------------------------------
+
+/// DWARF variant support is only available starting in LLVM 8, but
+/// on MSVC we have to use the fallback mode, because LLVM doesn't
+/// lower variant parts to PDB.
+fn use_enum_fallback(cx: &CodegenCx<'_, '_>) -> bool {
+ cx.sess().target.target.options.is_like_msvc
+}
+
+// FIXME(eddyb) maybe precompute this? Right now it's computed once
+// per generator monomorphization, but it doesn't depend on substs.
+fn generator_layout_and_saved_local_names(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+) -> (&'tcx GeneratorLayout<'tcx>, IndexVec<mir::GeneratorSavedLocal, Option<Symbol>>) {
+ let body = tcx.optimized_mir(def_id);
+ let generator_layout = body.generator_layout.as_ref().unwrap();
+ let mut generator_saved_local_names = IndexVec::from_elem(None, &generator_layout.field_tys);
+
+ let state_arg = mir::Local::new(1);
+ for var in &body.var_debug_info {
+ if var.place.local != state_arg {
+ continue;
+ }
+ match var.place.projection[..] {
+ [
+ // Deref of the `Pin<&mut Self>` state argument.
+ mir::ProjectionElem::Field(..),
+ mir::ProjectionElem::Deref,
+
+ // Field of a variant of the state.
+ mir::ProjectionElem::Downcast(_, variant),
+ mir::ProjectionElem::Field(field, _),
+ ] => {
+ let name = &mut generator_saved_local_names[
+ generator_layout.variant_fields[variant][field]
+ ];
+ if name.is_none() {
+ name.replace(var.name);
+ }
+ }
+ _ => {}
+ }
+ }
+ (generator_layout, generator_saved_local_names)
+}
+
+/// Describes the members of an enum value; an enum is described as a union of
+/// structs in DWARF. This `MemberDescriptionFactory` provides the description for
+/// the members of this union; so for every variant of the given enum, this
+/// factory will produce one `MemberDescription` (all with no name and a fixed
+/// offset of zero bytes).
+struct EnumMemberDescriptionFactory<'ll, 'tcx> {
+ enum_type: Ty<'tcx>,
+ layout: TyAndLayout<'tcx>,
+ tag_type_metadata: Option<&'ll DIType>,
+ containing_scope: &'ll DIScope,
+ span: Span,
+}
+
+impl EnumMemberDescriptionFactory<'ll, 'tcx> {
+ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> {
+ let generator_variant_info_data = match *self.enum_type.kind() {
+ ty::Generator(def_id, ..) => {
+ Some(generator_layout_and_saved_local_names(cx.tcx, def_id))
+ }
+ _ => None,
+ };
+
+ let variant_info_for = |index: VariantIdx| match *self.enum_type.kind() {
+ ty::Adt(adt, _) => VariantInfo::Adt(&adt.variants[index]),
+ ty::Generator(def_id, _, _) => {
+ let (generator_layout, generator_saved_local_names) =
+ generator_variant_info_data.as_ref().unwrap();
+ VariantInfo::Generator {
+ def_id,
+ generator_layout: *generator_layout,
+ generator_saved_local_names,
+ variant_index: index,
+ }
+ }
+ _ => bug!(),
+ };
+
+ // This will always find the metadata in the type map.
+ let fallback = use_enum_fallback(cx);
+ let self_metadata = if fallback {
+ self.containing_scope
+ } else {
+ type_metadata(cx, self.enum_type, self.span)
+ };
+ let flags = match self.enum_type.kind() {
+ ty::Generator(..) => DIFlags::FlagArtificial,
+ _ => DIFlags::FlagZero,
+ };
+
+ match self.layout.variants {
+ Variants::Single { index } => {
+ if let ty::Adt(adt, _) = self.enum_type.kind() {
+ if adt.variants.is_empty() {
+ return vec![];
+ }
+ }
+
+ let variant_info = variant_info_for(index);
+ let (variant_type_metadata, member_description_factory) = describe_enum_variant(
+ cx,
+ self.layout,
+ variant_info,
+ NoTag,
+ self_metadata,
+ self.span,
+ );
+
+ let member_descriptions = member_description_factory.create_member_descriptions(cx);
+
+ set_members_of_composite_type(
+ cx,
+ self.enum_type,
+ variant_type_metadata,
+ member_descriptions,
+ );
+ vec![MemberDescription {
+ name: if fallback { String::new() } else { variant_info.variant_name() },
+ type_metadata: variant_type_metadata,
+ offset: Size::ZERO,
+ size: self.layout.size,
+ align: self.layout.align.abi,
+ flags,
+ discriminant: None,
+ source_info: variant_info.source_info(cx),
+ }]
+ }
+ Variants::Multiple {
+ tag_encoding: TagEncoding::Direct,
+ tag_field,
+ ref variants,
+ ..
+ } => {
+ let tag_info = if fallback {
+ RegularTag {
+ tag_field: Field::from(tag_field),
+ tag_type_metadata: self.tag_type_metadata.unwrap(),
+ }
+ } else {
+ // This doesn't matter in this case.
+ NoTag
+ };
+ variants
+ .iter_enumerated()
+ .map(|(i, _)| {
+ let variant = self.layout.for_variant(cx, i);
+ let variant_info = variant_info_for(i);
+ let (variant_type_metadata, member_desc_factory) = describe_enum_variant(
+ cx,
+ variant,
+ variant_info,
+ tag_info,
+ self_metadata,
+ self.span,
+ );
+
+ let member_descriptions =
+ member_desc_factory.create_member_descriptions(cx);
+
+ set_members_of_composite_type(
+ cx,
+ self.enum_type,
+ variant_type_metadata,
+ member_descriptions,
+ );
+
+ MemberDescription {
+ name: if fallback {
+ String::new()
+ } else {
+ variant_info.variant_name()
+ },
+ type_metadata: variant_type_metadata,
+ offset: Size::ZERO,
+ size: self.layout.size,
+ align: self.layout.align.abi,
+ flags,
+ discriminant: Some(
+ self.layout.ty.discriminant_for_variant(cx.tcx, i).unwrap().val
+ as u64,
+ ),
+ source_info: variant_info.source_info(cx),
+ }
+ })
+ .collect()
+ }
+ Variants::Multiple {
+ tag_encoding:
+ TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant },
+ ref tag,
+ ref variants,
+ tag_field,
+ } => {
+ if fallback {
+ let variant = self.layout.for_variant(cx, dataful_variant);
+ // Create a description of the non-null variant.
+ let (variant_type_metadata, member_description_factory) = describe_enum_variant(
+ cx,
+ variant,
+ variant_info_for(dataful_variant),
+ OptimizedTag,
+ self.containing_scope,
+ self.span,
+ );
+
+ let variant_member_descriptions =
+ member_description_factory.create_member_descriptions(cx);
+
+ set_members_of_composite_type(
+ cx,
+ self.enum_type,
+ variant_type_metadata,
+ variant_member_descriptions,
+ );
+
+ // Encode the information about the null variant in the union
+ // member's name.
+ let mut name = String::from("RUST$ENCODED$ENUM$");
+ // Right now it's not even going to work for `niche_start > 0`,
+ // and for multiple niche variants it only supports the first.
+ fn compute_field_path<'a, 'tcx>(
+ cx: &CodegenCx<'a, 'tcx>,
+ name: &mut String,
+ layout: TyAndLayout<'tcx>,
+ offset: Size,
+ size: Size,
+ ) {
+ for i in 0..layout.fields.count() {
+ let field_offset = layout.fields.offset(i);
+ if field_offset > offset {
+ continue;
+ }
+ let inner_offset = offset - field_offset;
+ let field = layout.field(cx, i);
+ if inner_offset + size <= field.size {
+ write!(name, "{}$", i).unwrap();
+ compute_field_path(cx, name, field, inner_offset, size);
+ }
+ }
+ }
+ compute_field_path(
+ cx,
+ &mut name,
+ self.layout,
+ self.layout.fields.offset(tag_field),
+ self.layout.field(cx, tag_field).size,
+ );
+ let variant_info = variant_info_for(*niche_variants.start());
+ variant_info.map_struct_name(|variant_name| {
+ name.push_str(variant_name);
+ });
+
+ // Create the (singleton) list of descriptions of union members.
+ vec![MemberDescription {
+ name,
+ type_metadata: variant_type_metadata,
+ offset: Size::ZERO,
+ size: variant.size,
+ align: variant.align.abi,
+ flags,
+ discriminant: None,
+ source_info: variant_info.source_info(cx),
+ }]
+ } else {
+ variants
+ .iter_enumerated()
+ .map(|(i, _)| {
+ let variant = self.layout.for_variant(cx, i);
+ let variant_info = variant_info_for(i);
+ let (variant_type_metadata, member_desc_factory) =
+ describe_enum_variant(
+ cx,
+ variant,
+ variant_info,
+ OptimizedTag,
+ self_metadata,
+ self.span,
+ );
+
+ let member_descriptions =
+ member_desc_factory.create_member_descriptions(cx);
+
+ set_members_of_composite_type(
+ cx,
+ self.enum_type,
+ variant_type_metadata,
+ member_descriptions,
+ );
+
+ let niche_value = if i == dataful_variant {
+ None
+ } else {
+ let value = (i.as_u32() as u128)
+ .wrapping_sub(niche_variants.start().as_u32() as u128)
+ .wrapping_add(niche_start);
+ let value = truncate(value, tag.value.size(cx));
+ // NOTE(eddyb) do *NOT* remove this assert, until
+ // we pass the full 128-bit value to LLVM, otherwise
+ // truncation will be silent and remain undetected.
+ assert_eq!(value as u64 as u128, value);
+ Some(value as u64)
+ };
+
+ MemberDescription {
+ name: variant_info.variant_name(),
+ type_metadata: variant_type_metadata,
+ offset: Size::ZERO,
+ size: self.layout.size,
+ align: self.layout.align.abi,
+ flags,
+ discriminant: niche_value,
+ source_info: variant_info.source_info(cx),
+ }
+ })
+ .collect()
+ }
+ }
+ }
+ }
+}
+
+// Creates `MemberDescription`s for the fields of a single enum variant.
+struct VariantMemberDescriptionFactory<'ll, 'tcx> {
+ /// Cloned from the `layout::Struct` describing the variant.
+ offsets: Vec<Size>,
+ args: Vec<(String, Ty<'tcx>)>,
+ tag_type_metadata: Option<&'ll DIType>,
+ span: Span,
+}
+
+impl VariantMemberDescriptionFactory<'ll, 'tcx> {
+ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> {
+ self.args
+ .iter()
+ .enumerate()
+ .map(|(i, &(ref name, ty))| {
+ // Discriminant is always the first field of our variant
+ // when using the enum fallback.
+ let is_artificial_discr = use_enum_fallback(cx) && i == 0;
+ let (size, align) = cx.size_and_align_of(ty);
+ MemberDescription {
+ name: name.to_string(),
+ type_metadata: if is_artificial_discr {
+ self.tag_type_metadata.unwrap_or_else(|| type_metadata(cx, ty, self.span))
+ } else {
+ type_metadata(cx, ty, self.span)
+ },
+ offset: self.offsets[i],
+ size,
+ align,
+ flags: if is_artificial_discr {
+ DIFlags::FlagArtificial
+ } else {
+ DIFlags::FlagZero
+ },
+ discriminant: None,
+ source_info: None,
+ }
+ })
+ .collect()
+ }
+}
+
+// FIXME: terminology here should be aligned with `abi::TagEncoding`.
+// `OptimizedTag` is `TagEncoding::Niche`, `RegularTag` is `TagEncoding::Direct`.
+// `NoTag` should be removed; users should use `Option<EnumTagInfo>` instead.
+#[derive(Copy, Clone)]
+enum EnumTagInfo<'ll> {
+ RegularTag { tag_field: Field, tag_type_metadata: &'ll DIType },
+ OptimizedTag,
+ NoTag,
+}
+
+#[derive(Copy, Clone)]
+enum VariantInfo<'a, 'tcx> {
+ Adt(&'tcx ty::VariantDef),
+ Generator {
+ def_id: DefId,
+ generator_layout: &'tcx GeneratorLayout<'tcx>,
+ generator_saved_local_names: &'a IndexVec<mir::GeneratorSavedLocal, Option<Symbol>>,
+ variant_index: VariantIdx,
+ },
+}
+
+impl<'tcx> VariantInfo<'_, 'tcx> {
+ fn map_struct_name<R>(&self, f: impl FnOnce(&str) -> R) -> R {
+ match self {
+ VariantInfo::Adt(variant) => f(&variant.ident.as_str()),
+ VariantInfo::Generator { variant_index, .. } => {
+ f(&GeneratorSubsts::variant_name(*variant_index))
+ }
+ }
+ }
+
+ fn variant_name(&self) -> String {
+ match self {
+ VariantInfo::Adt(variant) => variant.ident.to_string(),
+ VariantInfo::Generator { variant_index, .. } => {
+ // Since GDB currently prints out the raw discriminant along
+ // with every variant, make each variant name be just the value
+ // of the discriminant. The struct name for the variant includes
+ // the actual variant description.
+ format!("{}", variant_index.as_usize())
+ }
+ }
+ }
+
+ fn field_name(&self, i: usize) -> String {
+ let field_name = match *self {
+ VariantInfo::Adt(variant) if variant.ctor_kind != CtorKind::Fn => {
+ Some(variant.fields[i].ident.name)
+ }
+ VariantInfo::Generator {
+ generator_layout,
+ generator_saved_local_names,
+ variant_index,
+ ..
+ } => {
+ generator_saved_local_names
+ [generator_layout.variant_fields[variant_index][i.into()]]
+ }
+ _ => None,
+ };
+ field_name.map(|name| name.to_string()).unwrap_or_else(|| format!("__{}", i))
+ }
+
+ fn source_info(&self, cx: &CodegenCx<'ll, 'tcx>) -> Option<SourceInfo<'ll>> {
+ match self {
+ VariantInfo::Generator { def_id, variant_index, .. } => {
+ let span =
+ cx.tcx.generator_layout(*def_id).variant_source_info[*variant_index].span;
+ if !span.is_dummy() {
+ let loc = cx.lookup_debug_loc(span.lo());
+ return Some(SourceInfo {
+ file: file_metadata(cx, &loc.file, def_id.krate),
+ line: loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
+ });
+ }
+ }
+ _ => {}
+ }
+ None
+ }
+
+ fn is_artificial(&self) -> bool {
+ match self {
+ VariantInfo::Generator { .. } => true,
+ VariantInfo::Adt(..) => false,
+ }
+ }
+}
+
+/// Returns a tuple of (1) `type_metadata_stub` of the variant, (2) a
+/// `MemberDescriptionFactory` for producing the descriptions of the
+/// fields of the variant. This is a rudimentary version of a full
+/// `RecursiveTypeDescription`.
+fn describe_enum_variant(
+ cx: &CodegenCx<'ll, 'tcx>,
+ layout: layout::TyAndLayout<'tcx>,
+ variant: VariantInfo<'_, 'tcx>,
+ discriminant_info: EnumTagInfo<'ll>,
+ containing_scope: &'ll DIScope,
+ span: Span,
+) -> (&'ll DICompositeType, MemberDescriptionFactory<'ll, 'tcx>) {
+ let metadata_stub = variant.map_struct_name(|variant_name| {
+ let unique_type_id = debug_context(cx)
+ .type_map
+ .borrow_mut()
+ .get_unique_type_id_of_enum_variant(cx, layout.ty, &variant_name);
+ create_struct_stub(
+ cx,
+ layout.ty,
+ &variant_name,
+ unique_type_id,
+ Some(containing_scope),
+ // FIXME(tmandry): This doesn't seem to have any effect.
+ if variant.is_artificial() { DIFlags::FlagArtificial } else { DIFlags::FlagZero },
+ )
+ });
+
+ // Build an array of (field name, field type) pairs to be captured in the factory closure.
+ let (offsets, args) = if use_enum_fallback(cx) {
+ // If this is not a univariant enum, there is also the discriminant field.
+ let (discr_offset, discr_arg) = match discriminant_info {
+ RegularTag { tag_field, .. } => {
+ // We have the layout of an enum variant, we need the layout of the outer enum
+ let enum_layout = cx.layout_of(layout.ty);
+ let offset = enum_layout.fields.offset(tag_field.as_usize());
+ let args =
+ ("RUST$ENUM$DISR".to_owned(), enum_layout.field(cx, tag_field.as_usize()).ty);
+ (Some(offset), Some(args))
+ }
+ _ => (None, None),
+ };
+ (
+ discr_offset
+ .into_iter()
+ .chain((0..layout.fields.count()).map(|i| layout.fields.offset(i)))
+ .collect(),
+ discr_arg
+ .into_iter()
+ .chain(
+ (0..layout.fields.count())
+ .map(|i| (variant.field_name(i), layout.field(cx, i).ty)),
+ )
+ .collect(),
+ )
+ } else {
+ (
+ (0..layout.fields.count()).map(|i| layout.fields.offset(i)).collect(),
+ (0..layout.fields.count())
+ .map(|i| (variant.field_name(i), layout.field(cx, i).ty))
+ .collect(),
+ )
+ };
+
+ let member_description_factory = VariantMDF(VariantMemberDescriptionFactory {
+ offsets,
+ args,
+ tag_type_metadata: match discriminant_info {
+ RegularTag { tag_type_metadata, .. } => Some(tag_type_metadata),
+ _ => None,
+ },
+ span,
+ });
+
+ (metadata_stub, member_description_factory)
+}
+
+fn prepare_enum_metadata(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type: Ty<'tcx>,
+ enum_def_id: DefId,
+ unique_type_id: UniqueTypeId,
+ span: Span,
+ outer_field_tys: Vec<Ty<'tcx>>,
+) -> RecursiveTypeDescription<'ll, 'tcx> {
+ let tcx = cx.tcx;
+ let enum_name = compute_debuginfo_type_name(tcx, enum_type, false);
+ // FIXME(tmandry): This doesn't seem to have any effect.
+ let enum_flags = match enum_type.kind() {
+ ty::Generator(..) => DIFlags::FlagArtificial,
+ _ => DIFlags::FlagZero,
+ };
+
+ let containing_scope = get_namespace_for_item(cx, enum_def_id);
+ // FIXME: This should emit actual file metadata for the enum, but we
+ // currently can't get the necessary information when it comes to types
+ // imported from other crates. Formerly we violated the ODR when performing
+ // LTO because we emitted debuginfo for the same type with varying file
+ // metadata, so as a workaround we pretend that the type comes from
+ // <unknown>
+ let file_metadata = unknown_file_metadata(cx);
+
+ let discriminant_type_metadata = |discr: Primitive| {
+ let enumerators_metadata: Vec<_> = match enum_type.kind() {
+ ty::Adt(def, _) => def
+ .discriminants(tcx)
+ .zip(&def.variants)
+ .map(|((_, discr), v)| {
+ let name = v.ident.as_str();
+ let is_unsigned = match discr.ty.kind() {
+ ty::Int(_) => false,
+ ty::Uint(_) => true,
+ _ => bug!("non integer discriminant"),
+ };
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateEnumerator(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ // FIXME: what if enumeration has i128 discriminant?
+ discr.val as i64,
+ is_unsigned,
+ ))
+ }
+ })
+ .collect(),
+ ty::Generator(_, substs, _) => substs
+ .as_generator()
+ .variant_range(enum_def_id, tcx)
+ .map(|variant_index| {
+ debug_assert_eq!(tcx.types.u32, substs.as_generator().discr_ty(tcx));
+ let name = GeneratorSubsts::variant_name(variant_index);
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateEnumerator(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ // Generators use u32 as discriminant type, verified above.
+ variant_index.as_u32().into(),
+ true, // IsUnsigned
+ ))
+ }
+ })
+ .collect(),
+ _ => bug!(),
+ };
+
+ let disr_type_key = (enum_def_id, discr);
+ let cached_discriminant_type_metadata =
+ debug_context(cx).created_enum_disr_types.borrow().get(&disr_type_key).cloned();
+ match cached_discriminant_type_metadata {
+ Some(discriminant_type_metadata) => discriminant_type_metadata,
+ None => {
+ let (discriminant_size, discriminant_align) = (discr.size(cx), discr.align(cx));
+ let discriminant_base_type_metadata =
+ type_metadata(cx, discr.to_ty(tcx), rustc_span::DUMMY_SP);
+
+ let item_name;
+ let discriminant_name = match enum_type.kind() {
+ ty::Adt(..) => {
+ item_name = tcx.item_name(enum_def_id).as_str();
+ &*item_name
+ }
+ ty::Generator(..) => enum_name.as_str(),
+ _ => bug!(),
+ };
+
+ let discriminant_type_metadata = unsafe {
+ llvm::LLVMRustDIBuilderCreateEnumerationType(
+ DIB(cx),
+ containing_scope,
+ discriminant_name.as_ptr().cast(),
+ discriminant_name.len(),
+ file_metadata,
+ UNKNOWN_LINE_NUMBER,
+ discriminant_size.bits(),
+ discriminant_align.abi.bits() as u32,
+ create_DIArray(DIB(cx), &enumerators_metadata),
+ discriminant_base_type_metadata,
+ true,
+ )
+ };
+
+ debug_context(cx)
+ .created_enum_disr_types
+ .borrow_mut()
+ .insert(disr_type_key, discriminant_type_metadata);
+
+ discriminant_type_metadata
+ }
+ }
+ };
+
+ let layout = cx.layout_of(enum_type);
+
+ if let (
+ &Abi::Scalar(_),
+ &Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. },
+ ) = (&layout.abi, &layout.variants)
+ {
+ return FinalMetadata(discriminant_type_metadata(tag.value));
+ }
+
+ if use_enum_fallback(cx) {
+ let discriminant_type_metadata = match layout.variants {
+ Variants::Single { .. }
+ | Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, .. } => None,
+ Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. } => {
+ Some(discriminant_type_metadata(tag.value))
+ }
+ };
+
+ let enum_metadata = {
+ let type_map = debug_context(cx).type_map.borrow();
+ let unique_type_id_str = type_map.get_unique_type_id_as_string(unique_type_id);
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateUnionType(
+ DIB(cx),
+ containing_scope,
+ enum_name.as_ptr().cast(),
+ enum_name.len(),
+ file_metadata,
+ UNKNOWN_LINE_NUMBER,
+ layout.size.bits(),
+ layout.align.abi.bits() as u32,
+ enum_flags,
+ None,
+ 0, // RuntimeLang
+ unique_type_id_str.as_ptr().cast(),
+ unique_type_id_str.len(),
+ )
+ }
+ };
+
+ return create_and_register_recursive_type_forward_declaration(
+ cx,
+ enum_type,
+ unique_type_id,
+ enum_metadata,
+ enum_metadata,
+ EnumMDF(EnumMemberDescriptionFactory {
+ enum_type,
+ layout,
+ tag_type_metadata: discriminant_type_metadata,
+ containing_scope,
+ span,
+ }),
+ );
+ }
+
+ let discriminator_name = match enum_type.kind() {
+ ty::Generator(..) => "__state",
+ _ => "",
+ };
+ let discriminator_metadata = match layout.variants {
+ // A single-variant enum has no discriminant.
+ Variants::Single { .. } => None,
+
+ Variants::Multiple {
+ tag_encoding: TagEncoding::Niche { .. }, ref tag, tag_field, ..
+ } => {
+ // Find the integer type of the correct size.
+ let size = tag.value.size(cx);
+ let align = tag.value.align(cx);
+
+ let tag_type = match tag.value {
+ Int(t, _) => t,
+ F32 => Integer::I32,
+ F64 => Integer::I64,
+ Pointer => cx.data_layout().ptr_sized_integer(),
+ }
+ .to_ty(cx.tcx, false);
+
+ let tag_metadata = basic_type_metadata(cx, tag_type);
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ containing_scope,
+ discriminator_name.as_ptr().cast(),
+ discriminator_name.len(),
+ file_metadata,
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.abi.bits() as u32,
+ layout.fields.offset(tag_field).bits(),
+ DIFlags::FlagArtificial,
+ tag_metadata,
+ ))
+ }
+ }
+
+ Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, tag_field, .. } => {
+ let discr_type = tag.value.to_ty(cx.tcx);
+ let (size, align) = cx.size_and_align_of(discr_type);
+
+ let discr_metadata = basic_type_metadata(cx, discr_type);
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ containing_scope,
+ discriminator_name.as_ptr().cast(),
+ discriminator_name.len(),
+ file_metadata,
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ layout.fields.offset(tag_field).bits(),
+ DIFlags::FlagArtificial,
+ discr_metadata,
+ ))
+ }
+ }
+ };
+
+ let mut outer_fields = match layout.variants {
+ Variants::Single { .. } => vec![],
+ Variants::Multiple { .. } => {
+ let tuple_mdf = TupleMemberDescriptionFactory {
+ ty: enum_type,
+ component_types: outer_field_tys,
+ span,
+ };
+ tuple_mdf
+ .create_member_descriptions(cx)
+ .into_iter()
+ .map(|desc| Some(desc.into_metadata(cx, containing_scope)))
+ .collect()
+ }
+ };
+
+ let variant_part_unique_type_id_str = debug_context(cx)
+ .type_map
+ .borrow_mut()
+ .get_unique_type_id_str_of_enum_variant_part(unique_type_id);
+ let empty_array = create_DIArray(DIB(cx), &[]);
+ let name = "";
+ let variant_part = unsafe {
+ llvm::LLVMRustDIBuilderCreateVariantPart(
+ DIB(cx),
+ containing_scope,
+ name.as_ptr().cast(),
+ name.len(),
+ file_metadata,
+ UNKNOWN_LINE_NUMBER,
+ layout.size.bits(),
+ layout.align.abi.bits() as u32,
+ enum_flags,
+ discriminator_metadata,
+ empty_array,
+ variant_part_unique_type_id_str.as_ptr().cast(),
+ variant_part_unique_type_id_str.len(),
+ )
+ };
+ outer_fields.push(Some(variant_part));
+
+ let struct_wrapper = {
+ // The variant part must be wrapped in a struct according to DWARF.
+ let type_array = create_DIArray(DIB(cx), &outer_fields);
+
+ let type_map = debug_context(cx).type_map.borrow();
+ let unique_type_id_str = type_map.get_unique_type_id_as_string(unique_type_id);
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateStructType(
+ DIB(cx),
+ Some(containing_scope),
+ enum_name.as_ptr().cast(),
+ enum_name.len(),
+ file_metadata,
+ UNKNOWN_LINE_NUMBER,
+ layout.size.bits(),
+ layout.align.abi.bits() as u32,
+ enum_flags,
+ None,
+ type_array,
+ 0,
+ None,
+ unique_type_id_str.as_ptr().cast(),
+ unique_type_id_str.len(),
+ )
+ }
+ };
+
+ create_and_register_recursive_type_forward_declaration(
+ cx,
+ enum_type,
+ unique_type_id,
+ struct_wrapper,
+ variant_part,
+ EnumMDF(EnumMemberDescriptionFactory {
+ enum_type,
+ layout,
+ tag_type_metadata: None,
+ containing_scope,
+ span,
+ }),
+ )
+}
+
+/// Creates debug information for a composite type, that is, anything that
+/// results in a LLVM struct.
+///
+/// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums.
+fn composite_type_metadata(
+ cx: &CodegenCx<'ll, 'tcx>,
+ composite_type: Ty<'tcx>,
+ composite_type_name: &str,
+ composite_type_unique_id: UniqueTypeId,
+ member_descriptions: Vec<MemberDescription<'ll>>,
+ containing_scope: Option<&'ll DIScope>,
+
+ // Ignore source location information as long as it
+ // can't be reconstructed for non-local crates.
+ _file_metadata: &'ll DIFile,
+ _definition_span: Span,
+) -> &'ll DICompositeType {
+ // Create the (empty) struct metadata node ...
+ let composite_type_metadata = create_struct_stub(
+ cx,
+ composite_type,
+ composite_type_name,
+ composite_type_unique_id,
+ containing_scope,
+ DIFlags::FlagZero,
+ );
+ // ... and immediately create and add the member descriptions.
+ set_members_of_composite_type(cx, composite_type, composite_type_metadata, member_descriptions);
+
+ composite_type_metadata
+}
+
+fn set_members_of_composite_type(
+ cx: &CodegenCx<'ll, 'tcx>,
+ composite_type: Ty<'tcx>,
+ composite_type_metadata: &'ll DICompositeType,
+ member_descriptions: Vec<MemberDescription<'ll>>,
+) {
+ // In some rare cases LLVM metadata uniquing would lead to an existing type
+ // description being used instead of a new one created in
+ // create_struct_stub. This would cause a hard to trace assertion in
+ // DICompositeType::SetTypeArray(). The following check makes sure that we
+ // get a better error message if this should happen again due to some
+ // regression.
+ {
+ let mut composite_types_completed =
+ debug_context(cx).composite_types_completed.borrow_mut();
+ if !composite_types_completed.insert(&composite_type_metadata) {
+ bug!(
+ "debuginfo::set_members_of_composite_type() - \
+ Already completed forward declaration re-encountered."
+ );
+ }
+ }
+
+ let member_metadata: Vec<_> = member_descriptions
+ .into_iter()
+ .map(|desc| Some(desc.into_metadata(cx, composite_type_metadata)))
+ .collect();
+
+ let type_params = compute_type_parameters(cx, composite_type);
+ unsafe {
+ let type_array = create_DIArray(DIB(cx), &member_metadata[..]);
+ llvm::LLVMRustDICompositeTypeReplaceArrays(
+ DIB(cx),
+ composite_type_metadata,
+ Some(type_array),
+ type_params,
+ );
+ }
+}
+
+/// Computes the type parameters for a type, if any, for the given metadata.
+fn compute_type_parameters(cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>) -> Option<&'ll DIArray> {
+ if let ty::Adt(def, substs) = *ty.kind() {
+ if substs.types().next().is_some() {
+ let generics = cx.tcx.generics_of(def.did);
+ let names = get_parameter_names(cx, generics);
+ let template_params: Vec<_> = substs
+ .iter()
+ .zip(names)
+ .filter_map(|(kind, name)| {
+ if let GenericArgKind::Type(ty) = kind.unpack() {
+ let actual_type =
+ cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
+ let actual_type_metadata =
+ type_metadata(cx, actual_type, rustc_span::DUMMY_SP);
+ let name = &name.as_str();
+ Some(unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
+ DIB(cx),
+ None,
+ name.as_ptr().cast(),
+ name.len(),
+ actual_type_metadata,
+ ))
+ })
+ } else {
+ None
+ }
+ })
+ .collect();
+
+ return Some(create_DIArray(DIB(cx), &template_params[..]));
+ }
+ }
+ return Some(create_DIArray(DIB(cx), &[]));
+
+ fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
+ let mut names = generics
+ .parent
+ .map_or(vec![], |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
+ names.extend(generics.params.iter().map(|param| param.name));
+ names
+ }
+}
+
+/// A convenience wrapper around `LLVMRustDIBuilderCreateStructType()`. Does not do
+/// any caching, does not add any fields to the struct. This can be done later
+/// with `set_members_of_composite_type()`.
+fn create_struct_stub(
+ cx: &CodegenCx<'ll, 'tcx>,
+ struct_type: Ty<'tcx>,
+ struct_type_name: &str,
+ unique_type_id: UniqueTypeId,
+ containing_scope: Option<&'ll DIScope>,
+ flags: DIFlags,
+) -> &'ll DICompositeType {
+ let (struct_size, struct_align) = cx.size_and_align_of(struct_type);
+
+ let type_map = debug_context(cx).type_map.borrow();
+ let unique_type_id = type_map.get_unique_type_id_as_string(unique_type_id);
+
+ let metadata_stub = unsafe {
+ // `LLVMRustDIBuilderCreateStructType()` wants an empty array. A null
+ // pointer will lead to hard to trace and debug LLVM assertions
+ // later on in `llvm/lib/IR/Value.cpp`.
+ let empty_array = create_DIArray(DIB(cx), &[]);
+
+ llvm::LLVMRustDIBuilderCreateStructType(
+ DIB(cx),
+ containing_scope,
+ struct_type_name.as_ptr().cast(),
+ struct_type_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ struct_size.bits(),
+ struct_align.bits() as u32,
+ flags,
+ None,
+ empty_array,
+ 0,
+ None,
+ unique_type_id.as_ptr().cast(),
+ unique_type_id.len(),
+ )
+ };
+
+ metadata_stub
+}
+
+fn create_union_stub(
+ cx: &CodegenCx<'ll, 'tcx>,
+ union_type: Ty<'tcx>,
+ union_type_name: &str,
+ unique_type_id: UniqueTypeId,
+ containing_scope: &'ll DIScope,
+) -> &'ll DICompositeType {
+ let (union_size, union_align) = cx.size_and_align_of(union_type);
+
+ let type_map = debug_context(cx).type_map.borrow();
+ let unique_type_id = type_map.get_unique_type_id_as_string(unique_type_id);
+
+ let metadata_stub = unsafe {
+ // `LLVMRustDIBuilderCreateUnionType()` wants an empty array. A null
+ // pointer will lead to hard to trace and debug LLVM assertions
+ // later on in `llvm/lib/IR/Value.cpp`.
+ let empty_array = create_DIArray(DIB(cx), &[]);
+
+ llvm::LLVMRustDIBuilderCreateUnionType(
+ DIB(cx),
+ containing_scope,
+ union_type_name.as_ptr().cast(),
+ union_type_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ union_size.bits(),
+ union_align.bits() as u32,
+ DIFlags::FlagZero,
+ Some(empty_array),
+ 0, // RuntimeLang
+ unique_type_id.as_ptr().cast(),
+ unique_type_id.len(),
+ )
+ };
+
+ metadata_stub
+}
+
+/// Creates debug information for the given global variable.
+///
+/// Adds the created metadata nodes directly to the crate's IR.
+pub fn create_global_var_metadata(cx: &CodegenCx<'ll, '_>, def_id: DefId, global: &'ll Value) {
+ if cx.dbg_cx.is_none() {
+ return;
+ }
+
+ // Only create type information if full debuginfo is enabled
+ if cx.sess().opts.debuginfo != DebugInfo::Full {
+ return;
+ }
+
+ let tcx = cx.tcx;
+
+ // We may want to remove the namespace scope if we're in an extern block (see
+ // https://github.com/rust-lang/rust/pull/46457#issuecomment-351750952).
+ let var_scope = get_namespace_for_item(cx, def_id);
+ let span = tcx.def_span(def_id);
+
+ let (file_metadata, line_number) = if !span.is_dummy() {
+ let loc = cx.lookup_debug_loc(span.lo());
+ (file_metadata(cx, &loc.file, LOCAL_CRATE), loc.line)
+ } else {
+ (unknown_file_metadata(cx), None)
+ };
+
+ let is_local_to_unit = is_node_local_to_unit(cx, def_id);
+ let variable_type = Instance::mono(cx.tcx, def_id).ty(cx.tcx, ty::ParamEnv::reveal_all());
+ let type_metadata = type_metadata(cx, variable_type, span);
+ let var_name = tcx.item_name(def_id).as_str();
+ let linkage_name = mangled_name_of_instance(cx, Instance::mono(tcx, def_id)).name;
+ // When empty, linkage_name field is omitted,
+ // which is what we want for no_mangle statics
+ let linkage_name = if var_name == linkage_name { "" } else { linkage_name };
+
+ let global_align = cx.align_of(variable_type);
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateStaticVariable(
+ DIB(cx),
+ Some(var_scope),
+ var_name.as_ptr().cast(),
+ var_name.len(),
+ linkage_name.as_ptr().cast(),
+ linkage_name.len(),
+ file_metadata,
+ line_number.unwrap_or(UNKNOWN_LINE_NUMBER),
+ type_metadata,
+ is_local_to_unit,
+ global,
+ None,
+ global_align.bytes() as u32,
+ );
+ }
+}
+
+/// Creates debug information for the given vtable, which is for the
+/// given type.
+///
+/// Adds the created metadata nodes directly to the crate's IR.
+pub fn create_vtable_metadata(cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>, vtable: &'ll Value) {
+ if cx.dbg_cx.is_none() {
+ return;
+ }
+
+ // Only create type information if full debuginfo is enabled
+ if cx.sess().opts.debuginfo != DebugInfo::Full {
+ return;
+ }
+
+ let type_metadata = type_metadata(cx, ty, rustc_span::DUMMY_SP);
+
+ unsafe {
+ // `LLVMRustDIBuilderCreateStructType()` wants an empty array. A null
+ // pointer will lead to hard to trace and debug LLVM assertions
+ // later on in `llvm/lib/IR/Value.cpp`.
+ let empty_array = create_DIArray(DIB(cx), &[]);
+ let name = "vtable";
+
+ // Create a new one each time. We don't want metadata caching
+ // here, because each vtable will refer to a unique containing
+ // type.
+ let vtable_type = llvm::LLVMRustDIBuilderCreateStructType(
+ DIB(cx),
+ NO_SCOPE_METADATA,
+ name.as_ptr().cast(),
+ name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ Size::ZERO.bits(),
+ cx.tcx.data_layout.pointer_align.abi.bits() as u32,
+ DIFlags::FlagArtificial,
+ None,
+ empty_array,
+ 0,
+ Some(type_metadata),
+ name.as_ptr().cast(),
+ name.len(),
+ );
+
+ let linkage_name = "";
+ llvm::LLVMRustDIBuilderCreateStaticVariable(
+ DIB(cx),
+ NO_SCOPE_METADATA,
+ name.as_ptr().cast(),
+ name.len(),
+ linkage_name.as_ptr().cast(),
+ linkage_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ vtable_type,
+ true,
+ vtable,
+ None,
+ 0,
+ );
+ }
+}
+
+/// Creates an "extension" of an existing `DIScope` into another file.
+pub fn extend_scope_to_file(
+ cx: &CodegenCx<'ll, '_>,
+ scope_metadata: &'ll DIScope,
+ file: &rustc_span::SourceFile,
+ defining_crate: CrateNum,
+) -> &'ll DILexicalBlock {
+ let file_metadata = file_metadata(cx, &file, defining_crate);
+ unsafe { llvm::LLVMRustDIBuilderCreateLexicalBlockFile(DIB(cx), scope_metadata, file_metadata) }
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
new file mode 100644
index 0000000..7cdd366
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -0,0 +1,563 @@
+// See doc.rs for documentation.
+mod doc;
+
+use rustc_codegen_ssa::mir::debuginfo::VariableKind::*;
+
+use self::metadata::{file_metadata, type_metadata, TypeMap, UNKNOWN_LINE_NUMBER};
+use self::namespace::mangled_name_of_instance;
+use self::type_names::compute_debuginfo_type_name;
+use self::utils::{create_DIArray, is_node_local_to_unit, DIB};
+
+use crate::abi::FnAbi;
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{
+ DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DISPFlags, DIScope, DIType, DIVariable,
+};
+use crate::value::Value;
+
+use rustc_codegen_ssa::debuginfo::type_names;
+use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind};
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LOCAL_CRATE};
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir;
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeFoldable};
+use rustc_session::config::{self, DebugInfo};
+use rustc_span::symbol::Symbol;
+use rustc_span::{self, BytePos, Span};
+use rustc_target::abi::{LayoutOf, Primitive, Size};
+
+use libc::c_uint;
+use smallvec::SmallVec;
+use std::cell::RefCell;
+use tracing::debug;
+
+mod create_scope_map;
+pub mod gdb;
+pub mod metadata;
+mod namespace;
+mod source_loc;
+mod utils;
+
+pub use self::create_scope_map::compute_mir_scopes;
+pub use self::metadata::create_global_var_metadata;
+pub use self::metadata::extend_scope_to_file;
+
+#[allow(non_upper_case_globals)]
+const DW_TAG_auto_variable: c_uint = 0x100;
+#[allow(non_upper_case_globals)]
+const DW_TAG_arg_variable: c_uint = 0x101;
+
+/// A context object for maintaining all state needed by the debuginfo module.
+pub struct CrateDebugContext<'a, 'tcx> {
+ llcontext: &'a llvm::Context,
+ llmod: &'a llvm::Module,
+ builder: &'a mut DIBuilder<'a>,
+ created_files: RefCell<FxHashMap<(Option<String>, Option<String>), &'a DIFile>>,
+ created_enum_disr_types: RefCell<FxHashMap<(DefId, Primitive), &'a DIType>>,
+
+ type_map: RefCell<TypeMap<'a, 'tcx>>,
+ namespace_map: RefCell<DefIdMap<&'a DIScope>>,
+
+ // This collection is used to assert that composite types (structs, enums,
+ // ...) have their members only set once:
+ composite_types_completed: RefCell<FxHashSet<&'a DIType>>,
+}
+
+impl Drop for CrateDebugContext<'a, 'tcx> {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustDIBuilderDispose(&mut *(self.builder as *mut _));
+ }
+ }
+}
+
+impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> {
+ pub fn new(llmod: &'a llvm::Module) -> Self {
+ debug!("CrateDebugContext::new");
+ let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) };
+ // DIBuilder inherits context from the module, so we'd better use the same one
+ let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) };
+ CrateDebugContext {
+ llcontext,
+ llmod,
+ builder,
+ created_files: Default::default(),
+ created_enum_disr_types: Default::default(),
+ type_map: Default::default(),
+ namespace_map: RefCell::new(Default::default()),
+ composite_types_completed: Default::default(),
+ }
+ }
+}
+
+/// Creates any deferred debug metadata nodes
+pub fn finalize(cx: &CodegenCx<'_, '_>) {
+ if cx.dbg_cx.is_none() {
+ return;
+ }
+
+ debug!("finalize");
+
+ if gdb::needs_gdb_debug_scripts_section(cx) {
+ // Add a .debug_gdb_scripts section to this compile-unit. This will
+ // cause GDB to try and load the gdb_load_rust_pretty_printers.py file,
+ // which activates the Rust pretty printers for binary this section is
+ // contained in.
+ gdb::get_or_insert_gdb_debug_scripts_section_global(cx);
+ }
+
+ unsafe {
+ llvm::LLVMRustDIBuilderFinalize(DIB(cx));
+ // Debuginfo generation in LLVM by default uses a higher
+ // version of dwarf than macOS currently understands. We can
+ // instruct LLVM to emit an older version of dwarf, however,
+ // for macOS to understand. For more info see #11352
+ // This can be overridden using --llvm-opts -dwarf-version,N.
+ // Android has the same issue (#22398)
+ if cx.sess().target.target.options.is_like_osx
+ || cx.sess().target.target.options.is_like_android
+ {
+ llvm::LLVMRustAddModuleFlag(cx.llmod, "Dwarf Version\0".as_ptr().cast(), 2)
+ }
+
+ // Indicate that we want CodeView debug information on MSVC
+ if cx.sess().target.target.options.is_like_msvc {
+ llvm::LLVMRustAddModuleFlag(cx.llmod, "CodeView\0".as_ptr().cast(), 1)
+ }
+
+ // Prevent bitcode readers from deleting the debug info.
+ let ptr = "Debug Info Version\0".as_ptr();
+ llvm::LLVMRustAddModuleFlag(cx.llmod, ptr.cast(), llvm::LLVMRustDebugMetadataVersion());
+ };
+}
+
+impl DebugInfoBuilderMethods for Builder<'a, 'll, 'tcx> {
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn dbg_var_addr(
+ &mut self,
+ dbg_var: &'ll DIVariable,
+ scope_metadata: &'ll DIScope,
+ variable_alloca: Self::Value,
+ direct_offset: Size,
+ indirect_offsets: &[Size],
+ span: Span,
+ ) {
+ let cx = self.cx();
+
+ // Convert the direct and indirect offsets to address ops.
+ // FIXME(eddyb) use `const`s instead of getting the values via FFI,
+ // the values should match the ones in the DWARF standard anyway.
+ let op_deref = || unsafe { llvm::LLVMRustDIBuilderCreateOpDeref() };
+ let op_plus_uconst = || unsafe { llvm::LLVMRustDIBuilderCreateOpPlusUconst() };
+ let mut addr_ops = SmallVec::<[_; 8]>::new();
+
+ if direct_offset.bytes() > 0 {
+ addr_ops.push(op_plus_uconst());
+ addr_ops.push(direct_offset.bytes() as i64);
+ }
+ for &offset in indirect_offsets {
+ addr_ops.push(op_deref());
+ if offset.bytes() > 0 {
+ addr_ops.push(op_plus_uconst());
+ addr_ops.push(offset.bytes() as i64);
+ }
+ }
+
+ // FIXME(eddyb) maybe this information could be extracted from `dbg_var`,
+ // to avoid having to pass it down in both places?
+ // NB: `var` doesn't seem to know about the column, so that's a limitation.
+ let dbg_loc = cx.create_debug_loc(scope_metadata, span);
+ unsafe {
+ // FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
+ llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
+ DIB(cx),
+ variable_alloca,
+ dbg_var,
+ addr_ops.as_ptr(),
+ addr_ops.len() as c_uint,
+ dbg_loc,
+ self.llbb(),
+ );
+ }
+ }
+
+ fn set_source_location(&mut self, scope: &'ll DIScope, span: Span) {
+ debug!("set_source_location: {}", self.sess().source_map().span_to_string(span));
+
+ let dbg_loc = self.cx().create_debug_loc(scope, span);
+
+ unsafe {
+ llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc);
+ }
+ }
+ fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
+ gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
+ }
+
+ fn set_var_name(&mut self, value: &'ll Value, name: &str) {
+ // Avoid wasting time if LLVM value names aren't even enabled.
+ if self.sess().fewer_names() {
+ return;
+ }
+
+ // Only function parameters and instructions are local to a function,
+ // don't change the name of anything else (e.g. globals).
+ let param_or_inst = unsafe {
+ llvm::LLVMIsAArgument(value).is_some() || llvm::LLVMIsAInstruction(value).is_some()
+ };
+ if !param_or_inst {
+ return;
+ }
+
+ // Avoid replacing the name if it already exists.
+ // While we could combine the names somehow, it'd
+ // get noisy quick, and the usefulness is dubious.
+ if llvm::get_value_name(value).is_empty() {
+ llvm::set_value_name(value, name.as_bytes());
+ }
+ }
+}
+
+impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn create_function_debug_context(
+ &self,
+ instance: Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ llfn: &'ll Value,
+ mir: &mir::Body<'_>,
+ ) -> Option<FunctionDebugContext<&'ll DIScope>> {
+ if self.sess().opts.debuginfo == DebugInfo::None {
+ return None;
+ }
+
+ let span = mir.span;
+
+ // This can be the case for functions inlined from another crate
+ if span.is_dummy() {
+ // FIXME(simulacrum): Probably can't happen; remove.
+ return None;
+ }
+
+ let def_id = instance.def_id();
+ let containing_scope = get_containing_scope(self, instance);
+ let loc = self.lookup_debug_loc(span.lo());
+ let file_metadata = file_metadata(self, &loc.file, def_id.krate);
+
+ let function_type_metadata = unsafe {
+ let fn_signature = get_function_signature(self, fn_abi);
+ llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature)
+ };
+
+ // Find the enclosing function, in case this is a closure.
+ let def_key = self.tcx().def_key(def_id);
+ let mut name = def_key.disambiguated_data.data.to_string();
+
+ let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id);
+
+ // Get_template_parameters() will append a `<...>` clause to the function
+ // name if necessary.
+ let generics = self.tcx().generics_of(enclosing_fn_def_id);
+ let substs = instance.substs.truncate_to(self.tcx(), generics);
+ let template_parameters = get_template_parameters(self, &generics, substs, &mut name);
+
+ let linkage_name = &mangled_name_of_instance(self, instance).name;
+ // Omit the linkage_name if it is the same as subprogram name.
+ let linkage_name = if &name == linkage_name { "" } else { linkage_name };
+
+ // FIXME(eddyb) does this need to be separate from `loc.line` for some reason?
+ let scope_line = loc.line;
+
+ let mut flags = DIFlags::FlagPrototyped;
+
+ if fn_abi.ret.layout.abi.is_uninhabited() {
+ flags |= DIFlags::FlagNoReturn;
+ }
+
+ let mut spflags = DISPFlags::SPFlagDefinition;
+ if is_node_local_to_unit(self, def_id) {
+ spflags |= DISPFlags::SPFlagLocalToUnit;
+ }
+ if self.sess().opts.optimize != config::OptLevel::No {
+ spflags |= DISPFlags::SPFlagOptimized;
+ }
+ if let Some((id, _)) = self.tcx.entry_fn(LOCAL_CRATE) {
+ if id.to_def_id() == def_id {
+ spflags |= DISPFlags::SPFlagMainSubprogram;
+ }
+ }
+
+ let fn_metadata = unsafe {
+ llvm::LLVMRustDIBuilderCreateFunction(
+ DIB(self),
+ containing_scope,
+ name.as_ptr().cast(),
+ name.len(),
+ linkage_name.as_ptr().cast(),
+ linkage_name.len(),
+ file_metadata,
+ loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
+ function_type_metadata,
+ scope_line.unwrap_or(UNKNOWN_LINE_NUMBER),
+ flags,
+ spflags,
+ llfn,
+ template_parameters,
+ None,
+ )
+ };
+
+ // Initialize fn debug context (including scopes).
+ // FIXME(eddyb) figure out a way to not need `Option` for `scope_metadata`.
+ let null_scope = DebugScope {
+ scope_metadata: None,
+ file_start_pos: BytePos(0),
+ file_end_pos: BytePos(0),
+ };
+ let mut fn_debug_context = FunctionDebugContext {
+ scopes: IndexVec::from_elem(null_scope, &mir.source_scopes),
+ defining_crate: def_id.krate,
+ };
+
+ // Fill in all the scopes, with the information from the MIR body.
+ compute_mir_scopes(self, mir, fn_metadata, &mut fn_debug_context);
+
+ return Some(fn_debug_context);
+
+ fn get_function_signature<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ ) -> &'ll DIArray {
+ if cx.sess().opts.debuginfo == DebugInfo::Limited {
+ return create_DIArray(DIB(cx), &[]);
+ }
+
+ let mut signature = Vec::with_capacity(fn_abi.args.len() + 1);
+
+ // Return type -- llvm::DIBuilder wants this at index 0
+ signature.push(if fn_abi.ret.is_ignore() {
+ None
+ } else {
+ Some(type_metadata(cx, fn_abi.ret.layout.ty, rustc_span::DUMMY_SP))
+ });
+
+ // Arguments types
+ if cx.sess().target.target.options.is_like_msvc {
+ // FIXME(#42800):
+ // There is a bug in MSDIA that leads to a crash when it encounters
+ // a fixed-size array of `u8` or something zero-sized in a
+ // function-type (see #40477).
+ // As a workaround, we replace those fixed-size arrays with a
+ // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would
+ // appear as `fn foo(a: u8, b: *const u8)` in debuginfo,
+ // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`.
+ // This transformed type is wrong, but these function types are
+ // already inaccurate due to ABI adjustments (see #42800).
+ signature.extend(fn_abi.args.iter().map(|arg| {
+ let t = arg.layout.ty;
+ let t = match t.kind() {
+ ty::Array(ct, _)
+ if (*ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() =>
+ {
+ cx.tcx.mk_imm_ptr(ct)
+ }
+ _ => t,
+ };
+ Some(type_metadata(cx, t, rustc_span::DUMMY_SP))
+ }));
+ } else {
+ signature.extend(
+ fn_abi
+ .args
+ .iter()
+ .map(|arg| Some(type_metadata(cx, arg.layout.ty, rustc_span::DUMMY_SP))),
+ );
+ }
+
+ create_DIArray(DIB(cx), &signature[..])
+ }
+
+ fn get_template_parameters<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ generics: &ty::Generics,
+ substs: SubstsRef<'tcx>,
+ name_to_append_suffix_to: &mut String,
+ ) -> &'ll DIArray {
+ if substs.types().next().is_none() {
+ return create_DIArray(DIB(cx), &[]);
+ }
+
+ name_to_append_suffix_to.push('<');
+ for (i, actual_type) in substs.types().enumerate() {
+ if i != 0 {
+ name_to_append_suffix_to.push_str(",");
+ }
+
+ let actual_type =
+ cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type);
+ // Add actual type name to <...> clause of function name
+ let actual_type_name = compute_debuginfo_type_name(cx.tcx(), actual_type, true);
+ name_to_append_suffix_to.push_str(&actual_type_name[..]);
+ }
+ name_to_append_suffix_to.push('>');
+
+ // Again, only create type information if full debuginfo is enabled
+ let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full {
+ let names = get_parameter_names(cx, generics);
+ substs
+ .iter()
+ .zip(names)
+ .filter_map(|(kind, name)| {
+ if let GenericArgKind::Type(ty) = kind.unpack() {
+ let actual_type =
+ cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
+ let actual_type_metadata =
+ type_metadata(cx, actual_type, rustc_span::DUMMY_SP);
+ let name = name.as_str();
+ Some(unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
+ DIB(cx),
+ None,
+ name.as_ptr().cast(),
+ name.len(),
+ actual_type_metadata,
+ ))
+ })
+ } else {
+ None
+ }
+ })
+ .collect()
+ } else {
+ vec![]
+ };
+
+ create_DIArray(DIB(cx), &template_params[..])
+ }
+
+ fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
+ let mut names = generics
+ .parent
+ .map_or(vec![], |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
+ names.extend(generics.params.iter().map(|param| param.name));
+ names
+ }
+
+ fn get_containing_scope<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ instance: Instance<'tcx>,
+ ) -> &'ll DIScope {
+ // First, let's see if this is a method within an inherent impl. Because
+ // if yes, we want to make the result subroutine DIE a child of the
+ // subroutine's self-type.
+ let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| {
+ // If the method does *not* belong to a trait, proceed
+ if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
+ let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
+ instance.substs,
+ ty::ParamEnv::reveal_all(),
+ &cx.tcx.type_of(impl_def_id),
+ );
+
+ // Only "class" methods are generally understood by LLVM,
+ // so avoid methods on other types (e.g., `<*mut T>::null`).
+ match impl_self_ty.kind() {
+ ty::Adt(def, ..) if !def.is_box() => {
+ // Again, only create type information if full debuginfo is enabled
+ if cx.sess().opts.debuginfo == DebugInfo::Full
+ && !impl_self_ty.needs_subst()
+ {
+ Some(type_metadata(cx, impl_self_ty, rustc_span::DUMMY_SP))
+ } else {
+ Some(namespace::item_namespace(cx, def.did))
+ }
+ }
+ _ => None,
+ }
+ } else {
+ // For trait method impls we still use the "parallel namespace"
+ // strategy
+ None
+ }
+ });
+
+ self_type.unwrap_or_else(|| {
+ namespace::item_namespace(
+ cx,
+ DefId {
+ krate: instance.def_id().krate,
+ index: cx
+ .tcx
+ .def_key(instance.def_id())
+ .parent
+ .expect("get_containing_scope: missing parent?"),
+ },
+ )
+ })
+ }
+ }
+
+ fn create_vtable_metadata(&self, ty: Ty<'tcx>, vtable: Self::Value) {
+ metadata::create_vtable_metadata(self, ty, vtable)
+ }
+
+ fn extend_scope_to_file(
+ &self,
+ scope_metadata: &'ll DIScope,
+ file: &rustc_span::SourceFile,
+ defining_crate: CrateNum,
+ ) -> &'ll DILexicalBlock {
+ metadata::extend_scope_to_file(&self, scope_metadata, file, defining_crate)
+ }
+
+ fn debuginfo_finalize(&self) {
+ finalize(self)
+ }
+
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn create_dbg_var(
+ &self,
+ dbg_context: &FunctionDebugContext<&'ll DIScope>,
+ variable_name: Symbol,
+ variable_type: Ty<'tcx>,
+ scope_metadata: &'ll DIScope,
+ variable_kind: VariableKind,
+ span: Span,
+ ) -> &'ll DIVariable {
+ let loc = self.lookup_debug_loc(span.lo());
+ let file_metadata = file_metadata(self, &loc.file, dbg_context.defining_crate);
+
+ let type_metadata = type_metadata(self, variable_type, span);
+
+ let (argument_index, dwarf_tag) = match variable_kind {
+ ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
+ LocalVariable => (0, DW_TAG_auto_variable),
+ };
+ let align = self.align_of(variable_type);
+
+ let name = variable_name.as_str();
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateVariable(
+ DIB(self),
+ dwarf_tag,
+ scope_metadata,
+ name.as_ptr().cast(),
+ name.len(),
+ file_metadata,
+ loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
+ type_metadata,
+ true,
+ DIFlags::FlagZero,
+ argument_index,
+ align.bytes() as u32,
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
new file mode 100644
index 0000000..9945d4f
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
@@ -0,0 +1,55 @@
+// Namespace Handling.
+
+use super::utils::{debug_context, DIB};
+use rustc_middle::ty::{self, Instance};
+
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::DIScope;
+use rustc_hir::def_id::DefId;
+use rustc_hir::definitions::DefPathData;
+
+pub fn mangled_name_of_instance<'a, 'tcx>(
+ cx: &CodegenCx<'a, 'tcx>,
+ instance: Instance<'tcx>,
+) -> ty::SymbolName<'tcx> {
+ let tcx = cx.tcx;
+ tcx.symbol_name(instance)
+}
+
+pub fn item_namespace(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
+ if let Some(&scope) = debug_context(cx).namespace_map.borrow().get(&def_id) {
+ return scope;
+ }
+
+ let def_key = cx.tcx.def_key(def_id);
+ let parent_scope = def_key
+ .parent
+ .map(|parent| item_namespace(cx, DefId { krate: def_id.krate, index: parent }));
+
+ let crate_name_as_str;
+ let name_to_string;
+ let namespace_name = match def_key.disambiguated_data.data {
+ DefPathData::CrateRoot => {
+ crate_name_as_str = cx.tcx.crate_name(def_id.krate).as_str();
+ &*crate_name_as_str
+ }
+ data => {
+ name_to_string = data.to_string();
+ &*name_to_string
+ }
+ };
+
+ let scope = unsafe {
+ llvm::LLVMRustDIBuilderCreateNameSpace(
+ DIB(cx),
+ parent_scope,
+ namespace_name.as_ptr().cast(),
+ namespace_name.len(),
+ false, // ExportSymbols (only relevant for C++ anonymous namespaces)
+ )
+ };
+
+ debug_context(cx).namespace_map.borrow_mut().insert(def_id, scope);
+ scope
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/source_loc.rs b/compiler/rustc_codegen_llvm/src/debuginfo/source_loc.rs
new file mode 100644
index 0000000..66ae9d7
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/source_loc.rs
@@ -0,0 +1,61 @@
+use super::metadata::{UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
+use super::utils::debug_context;
+
+use crate::common::CodegenCx;
+use crate::llvm::debuginfo::DIScope;
+use crate::llvm::{self, Value};
+use rustc_codegen_ssa::traits::*;
+
+use rustc_data_structures::sync::Lrc;
+use rustc_span::{BytePos, Pos, SourceFile, SourceFileAndLine, Span};
+
+/// A source code location used to generate debug information.
+pub struct DebugLoc {
+ /// Information about the original source file.
+ pub file: Lrc<SourceFile>,
+ /// The (1-based) line number.
+ pub line: Option<u32>,
+ /// The (1-based) column number.
+ pub col: Option<u32>,
+}
+
+impl CodegenCx<'ll, '_> {
+ /// Looks up debug source information about a `BytePos`.
+ pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
+ let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
+ Ok(SourceFileAndLine { sf: file, line }) => {
+ let line_pos = file.line_begin_pos(pos);
+
+ // Use 1-based indexing.
+ let line = (line + 1) as u32;
+ let col = (pos - line_pos).to_u32() + 1;
+
+ (file, Some(line), Some(col))
+ }
+ Err(file) => (file, None, None),
+ };
+
+ // For MSVC, omit the column number.
+ // Otherwise, emit it. This mimics clang behaviour.
+ // See discussion in https://github.com/rust-lang/rust/issues/42921
+ if self.sess().target.target.options.is_like_msvc {
+ DebugLoc { file, line, col: None }
+ } else {
+ DebugLoc { file, line, col }
+ }
+ }
+
+ pub fn create_debug_loc(&self, scope: &'ll DIScope, span: Span) -> &'ll Value {
+ let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateDebugLocation(
+ debug_context(self).llcontext,
+ line.unwrap_or(UNKNOWN_LINE_NUMBER),
+ col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
+ scope,
+ None,
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
new file mode 100644
index 0000000..ee188e6
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -0,0 +1,43 @@
+// Utility Functions.
+
+use super::namespace::item_namespace;
+use super::CrateDebugContext;
+
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::DefIdTree;
+
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{DIArray, DIBuilder, DIDescriptor, DIScope};
+
+pub fn is_node_local_to_unit(cx: &CodegenCx<'_, '_>, def_id: DefId) -> bool {
+ // The is_local_to_unit flag indicates whether a function is local to the
+ // current compilation unit (i.e., if it is *static* in the C-sense). The
+ // *reachable* set should provide a good approximation of this, as it
+ // contains everything that might leak out of the current crate (by being
+ // externally visible or by being inlined into something externally
+ // visible). It might better to use the `exported_items` set from
+ // `driver::CrateAnalysis` in the future, but (atm) this set is not
+ // available in the codegen pass.
+ !cx.tcx.is_reachable_non_generic(def_id)
+}
+
+#[allow(non_snake_case)]
+pub fn create_DIArray(builder: &DIBuilder<'ll>, arr: &[Option<&'ll DIDescriptor>]) -> &'ll DIArray {
+ unsafe { llvm::LLVMRustDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32) }
+}
+
+#[inline]
+pub fn debug_context(cx: &'a CodegenCx<'ll, 'tcx>) -> &'a CrateDebugContext<'ll, 'tcx> {
+ cx.dbg_cx.as_ref().unwrap()
+}
+
+#[inline]
+#[allow(non_snake_case)]
+pub fn DIB(cx: &'a CodegenCx<'ll, '_>) -> &'a DIBuilder<'ll> {
+ cx.dbg_cx.as_ref().unwrap().builder
+}
+
+pub fn get_namespace_for_item(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
+ item_namespace(cx, cx.tcx.parent(def_id).expect("get_namespace_for_item: missing parent?"))
+}
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
new file mode 100644
index 0000000..a3d6882
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -0,0 +1,122 @@
+//! Declare various LLVM values.
+//!
+//! Prefer using functions and methods from this module rather than calling LLVM
+//! functions directly. These functions do some additional work to ensure we do
+//! the right thing given the preconceptions of codegen.
+//!
+//! Some useful guidelines:
+//!
+//! * Use declare_* family of methods if you are declaring, but are not
+//! interested in defining the Value they return.
+//! * Use define_* family of methods when you might be defining the Value.
+//! * When in doubt, define.
+
+use crate::abi::{FnAbi, FnAbiLlvmExt};
+use crate::attributes;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::llvm::AttributePlace::Function;
+use crate::type_::Type;
+use crate::value::Value;
+use rustc_codegen_ssa::traits::*;
+use rustc_middle::ty::Ty;
+use tracing::debug;
+
+/// Declare a function.
+///
+/// If there’s a value with the same name already declared, the function will
+/// update the declaration and return existing Value instead.
+fn declare_raw_fn(
+ cx: &CodegenCx<'ll, '_>,
+ name: &str,
+ callconv: llvm::CallConv,
+ ty: &'ll Type,
+) -> &'ll Value {
+ debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
+ let llfn = unsafe {
+ llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty)
+ };
+
+ llvm::SetFunctionCallConv(llfn, callconv);
+ // Function addresses in Rust are never significant, allowing functions to
+ // be merged.
+ llvm::SetUnnamedAddress(llfn, llvm::UnnamedAddr::Global);
+
+ if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.target.options.disable_redzone) {
+ llvm::Attribute::NoRedZone.apply_llfn(Function, llfn);
+ }
+
+ attributes::default_optimisation_attrs(cx.tcx.sess, llfn);
+ attributes::non_lazy_bind(cx.sess(), llfn);
+ llfn
+}
+
+impl CodegenCx<'ll, 'tcx> {
+ /// Declare a global value.
+ ///
+ /// If there’s a value with the same name already declared, the function will
+ /// return its Value instead.
+ pub fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value {
+ debug!("declare_global(name={:?})", name);
+ unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_ptr().cast(), name.len(), ty) }
+ }
+
+ /// Declare a C ABI function.
+ ///
+ /// Only use this for foreign function ABIs and glue. For Rust functions use
+ /// `declare_fn` instead.
+ ///
+ /// If there’s a value with the same name already declared, the function will
+ /// update the declaration and return existing Value instead.
+ pub fn declare_cfn(&self, name: &str, fn_type: &'ll Type) -> &'ll Value {
+ declare_raw_fn(self, name, llvm::CCallConv, fn_type)
+ }
+
+ /// Declare a Rust function.
+ ///
+ /// If there’s a value with the same name already declared, the function will
+ /// update the declaration and return existing Value instead.
+ pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Value {
+ debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
+
+ let llfn = declare_raw_fn(self, name, fn_abi.llvm_cconv(), fn_abi.llvm_type(self));
+ fn_abi.apply_attrs_llfn(self, llfn);
+ llfn
+ }
+
+ /// Declare a global with an intention to define it.
+ ///
+ /// Use this function when you intend to define a global. This function will
+ /// return `None` if the name already has a definition associated with it. In that
+ /// case an error should be reported to the user, because it usually happens due
+ /// to user’s fault (e.g., misuse of `#[no_mangle]` or `#[export_name]` attributes).
+ pub fn define_global(&self, name: &str, ty: &'ll Type) -> Option<&'ll Value> {
+ if self.get_defined_value(name).is_some() {
+ None
+ } else {
+ Some(self.declare_global(name, ty))
+ }
+ }
+
+ /// Declare a private global
+ ///
+ /// Use this function when you intend to define a global without a name.
+ pub fn define_private_global(&self, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) }
+ }
+
+ /// Gets declared value by name.
+ pub fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
+ debug!("get_declared_value(name={:?})", name);
+ unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_ptr().cast(), name.len()) }
+ }
+
+ /// Gets defined or externally defined (AvailableExternally linkage) value by
+ /// name.
+ pub fn get_defined_value(&self, name: &str) -> Option<&'ll Value> {
+ self.get_declared_value(name).and_then(|val| {
+ let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 };
+ if !declaration { Some(val) } else { None }
+ })
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
new file mode 100644
index 0000000..7f5b09e
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -0,0 +1,1728 @@
+use crate::abi::{Abi, FnAbi, LlvmType, PassMode};
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::va_arg::emit_va_arg;
+use crate::value::Value;
+
+use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
+use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
+use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir as hir;
+use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
+use rustc_middle::ty::{self, Ty};
+use rustc_middle::{bug, span_bug};
+use rustc_span::{sym, symbol::kw, Span, Symbol};
+use rustc_target::abi::{self, HasDataLayout, LayoutOf, Primitive};
+use rustc_target::spec::PanicStrategy;
+
+use std::cmp::Ordering;
+use std::iter;
+
+fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Value> {
+ let llvm_name = match name {
+ sym::sqrtf32 => "llvm.sqrt.f32",
+ sym::sqrtf64 => "llvm.sqrt.f64",
+ sym::powif32 => "llvm.powi.f32",
+ sym::powif64 => "llvm.powi.f64",
+ sym::sinf32 => "llvm.sin.f32",
+ sym::sinf64 => "llvm.sin.f64",
+ sym::cosf32 => "llvm.cos.f32",
+ sym::cosf64 => "llvm.cos.f64",
+ sym::powf32 => "llvm.pow.f32",
+ sym::powf64 => "llvm.pow.f64",
+ sym::expf32 => "llvm.exp.f32",
+ sym::expf64 => "llvm.exp.f64",
+ sym::exp2f32 => "llvm.exp2.f32",
+ sym::exp2f64 => "llvm.exp2.f64",
+ sym::logf32 => "llvm.log.f32",
+ sym::logf64 => "llvm.log.f64",
+ sym::log10f32 => "llvm.log10.f32",
+ sym::log10f64 => "llvm.log10.f64",
+ sym::log2f32 => "llvm.log2.f32",
+ sym::log2f64 => "llvm.log2.f64",
+ sym::fmaf32 => "llvm.fma.f32",
+ sym::fmaf64 => "llvm.fma.f64",
+ sym::fabsf32 => "llvm.fabs.f32",
+ sym::fabsf64 => "llvm.fabs.f64",
+ sym::minnumf32 => "llvm.minnum.f32",
+ sym::minnumf64 => "llvm.minnum.f64",
+ sym::maxnumf32 => "llvm.maxnum.f32",
+ sym::maxnumf64 => "llvm.maxnum.f64",
+ sym::copysignf32 => "llvm.copysign.f32",
+ sym::copysignf64 => "llvm.copysign.f64",
+ sym::floorf32 => "llvm.floor.f32",
+ sym::floorf64 => "llvm.floor.f64",
+ sym::ceilf32 => "llvm.ceil.f32",
+ sym::ceilf64 => "llvm.ceil.f64",
+ sym::truncf32 => "llvm.trunc.f32",
+ sym::truncf64 => "llvm.trunc.f64",
+ sym::rintf32 => "llvm.rint.f32",
+ sym::rintf64 => "llvm.rint.f64",
+ sym::nearbyintf32 => "llvm.nearbyint.f32",
+ sym::nearbyintf64 => "llvm.nearbyint.f64",
+ sym::roundf32 => "llvm.round.f32",
+ sym::roundf64 => "llvm.round.f64",
+ _ => return None,
+ };
+ Some(cx.get_intrinsic(&llvm_name))
+}
+
+impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
+ fn codegen_intrinsic_call(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ args: &[OperandRef<'tcx, &'ll Value>],
+ llresult: &'ll Value,
+ span: Span,
+ ) {
+ let tcx = self.tcx;
+ let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
+
+ let (def_id, substs) = match *callee_ty.kind() {
+ ty::FnDef(def_id, substs) => (def_id, substs),
+ _ => bug!("expected fn item type, found {}", callee_ty),
+ };
+
+ let sig = callee_ty.fn_sig(tcx);
+ let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
+ let arg_tys = sig.inputs();
+ let ret_ty = sig.output();
+ let name = tcx.item_name(def_id);
+ let name_str = &*name.as_str();
+
+ let llret_ty = self.layout_of(ret_ty).llvm_type(self);
+ let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
+
+ let simple = get_simple_intrinsic(self, name);
+ let llval = match name {
+ _ if simple.is_some() => self.call(
+ simple.unwrap(),
+ &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
+ None,
+ ),
+ sym::likely => {
+ let expect = self.get_intrinsic(&("llvm.expect.i1"));
+ self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
+ }
+ sym::unlikely => {
+ let expect = self.get_intrinsic(&("llvm.expect.i1"));
+ self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
+ }
+ kw::Try => {
+ try_intrinsic(
+ self,
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ llresult,
+ );
+ return;
+ }
+ sym::breakpoint => {
+ let llfn = self.get_intrinsic(&("llvm.debugtrap"));
+ self.call(llfn, &[], None)
+ }
+ sym::va_copy => {
+ let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
+ self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
+ }
+ sym::va_arg => {
+ match fn_abi.ret.layout.abi {
+ abi::Abi::Scalar(ref scalar) => {
+ match scalar.value {
+ Primitive::Int(..) => {
+ if self.cx().size_of(ret_ty).bytes() < 4 {
+ // `va_arg` should not be called on a integer type
+ // less than 4 bytes in length. If it is, promote
+ // the integer to a `i32` and truncate the result
+ // back to the smaller type.
+ let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
+ self.trunc(promoted_result, llret_ty)
+ } else {
+ emit_va_arg(self, args[0], ret_ty)
+ }
+ }
+ Primitive::F64 | Primitive::Pointer => {
+ emit_va_arg(self, args[0], ret_ty)
+ }
+ // `va_arg` should never be used with the return type f32.
+ Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
+ }
+ }
+ _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
+ }
+ }
+
+ sym::volatile_load | sym::unaligned_volatile_load => {
+ let tp_ty = substs.type_at(0);
+ let mut ptr = args[0].immediate();
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
+ }
+ let load = self.volatile_load(ptr);
+ let align = if name == sym::unaligned_volatile_load {
+ 1
+ } else {
+ self.align_of(tp_ty).bytes() as u32
+ };
+ unsafe {
+ llvm::LLVMSetAlignment(load, align);
+ }
+ self.to_immediate(load, self.layout_of(tp_ty))
+ }
+ sym::volatile_store => {
+ let dst = args[0].deref(self.cx());
+ args[1].val.volatile_store(self, dst);
+ return;
+ }
+ sym::unaligned_volatile_store => {
+ let dst = args[0].deref(self.cx());
+ args[1].val.unaligned_volatile_store(self, dst);
+ return;
+ }
+ sym::prefetch_read_data
+ | sym::prefetch_write_data
+ | sym::prefetch_read_instruction
+ | sym::prefetch_write_instruction => {
+ let expect = self.get_intrinsic(&("llvm.prefetch"));
+ let (rw, cache_type) = match name {
+ sym::prefetch_read_data => (0, 1),
+ sym::prefetch_write_data => (1, 1),
+ sym::prefetch_read_instruction => (0, 0),
+ sym::prefetch_write_instruction => (1, 0),
+ _ => bug!(),
+ };
+ self.call(
+ expect,
+ &[
+ args[0].immediate(),
+ self.const_i32(rw),
+ args[1].immediate(),
+ self.const_i32(cache_type),
+ ],
+ None,
+ )
+ }
+ sym::ctlz
+ | sym::ctlz_nonzero
+ | sym::cttz
+ | sym::cttz_nonzero
+ | sym::ctpop
+ | sym::bswap
+ | sym::bitreverse
+ | sym::rotate_left
+ | sym::rotate_right
+ | sym::saturating_add
+ | sym::saturating_sub => {
+ let ty = arg_tys[0];
+ match int_type_width_signed(ty, self) {
+ Some((width, signed)) => match name {
+ sym::ctlz | sym::cttz => {
+ let y = self.const_bool(false);
+ let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
+ self.call(llfn, &[args[0].immediate(), y], None)
+ }
+ sym::ctlz_nonzero | sym::cttz_nonzero => {
+ let y = self.const_bool(true);
+ let llvm_name = &format!("llvm.{}.i{}", &name_str[..4], width);
+ let llfn = self.get_intrinsic(llvm_name);
+ self.call(llfn, &[args[0].immediate(), y], None)
+ }
+ sym::ctpop => self.call(
+ self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
+ &[args[0].immediate()],
+ None,
+ ),
+ sym::bswap => {
+ if width == 8 {
+ args[0].immediate() // byte swap a u8/i8 is just a no-op
+ } else {
+ self.call(
+ self.get_intrinsic(&format!("llvm.bswap.i{}", width)),
+ &[args[0].immediate()],
+ None,
+ )
+ }
+ }
+ sym::bitreverse => self.call(
+ self.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
+ &[args[0].immediate()],
+ None,
+ ),
+ sym::rotate_left | sym::rotate_right => {
+ let is_left = name == sym::rotate_left;
+ let val = args[0].immediate();
+ let raw_shift = args[1].immediate();
+ // rotate = funnel shift with first two args the same
+ let llvm_name =
+ &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
+ let llfn = self.get_intrinsic(llvm_name);
+ self.call(llfn, &[val, val, raw_shift], None)
+ }
+ sym::saturating_add | sym::saturating_sub => {
+ let is_add = name == sym::saturating_add;
+ let lhs = args[0].immediate();
+ let rhs = args[1].immediate();
+ let llvm_name = &format!(
+ "llvm.{}{}.sat.i{}",
+ if signed { 's' } else { 'u' },
+ if is_add { "add" } else { "sub" },
+ width
+ );
+ let llfn = self.get_intrinsic(llvm_name);
+ self.call(llfn, &[lhs, rhs], None)
+ }
+ _ => bug!(),
+ },
+ None => {
+ span_invalid_monomorphization_error(
+ tcx.sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic integer type, found `{}`",
+ name, ty
+ ),
+ );
+ return;
+ }
+ }
+ }
+
+ _ if name_str.starts_with("simd_") => {
+ match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+ Ok(llval) => llval,
+ Err(()) => return,
+ }
+ }
+
+ _ => bug!("unknown intrinsic '{}'", name),
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
+ let ptr = self.pointercast(result.llval, ptr_llty);
+ self.store(llval, ptr, result.align);
+ } else {
+ OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
+ .val
+ .store(self, result);
+ }
+ }
+ }
+
+ fn abort(&mut self) {
+ let fnname = self.get_intrinsic(&("llvm.trap"));
+ self.call(fnname, &[], None);
+ }
+
+ fn assume(&mut self, val: Self::Value) {
+ let assume_intrinsic = self.get_intrinsic("llvm.assume");
+ self.call(assume_intrinsic, &[val], None);
+ }
+
+ fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
+ let expect = self.get_intrinsic(&"llvm.expect.i1");
+ self.call(expect, &[cond, self.const_bool(expected)], None)
+ }
+
+ fn sideeffect(&mut self) {
+ if self.tcx.sess.opts.debugging_opts.insert_sideeffect {
+ let fnname = self.get_intrinsic(&("llvm.sideeffect"));
+ self.call(fnname, &[], None);
+ }
+ }
+
+ fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
+ let intrinsic = self.cx().get_intrinsic("llvm.va_start");
+ self.call(intrinsic, &[va_list], None)
+ }
+
+ fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
+ let intrinsic = self.cx().get_intrinsic("llvm.va_end");
+ self.call(intrinsic, &[va_list], None)
+ }
+}
+
+fn try_intrinsic(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ if bx.sess().panic_strategy() == PanicStrategy::Abort {
+ bx.call(try_func, &[data], None);
+ // Return 0 unconditionally from the intrinsic call;
+ // we can never unwind.
+ let ret_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(bx.const_i32(0), dest, ret_align);
+ } else if wants_msvc_seh(bx.sess()) {
+ codegen_msvc_try(bx, try_func, data, catch_func, dest);
+ } else if bx.sess().target.target.options.is_like_emscripten {
+ codegen_emcc_try(bx, try_func, data, catch_func, dest);
+ } else {
+ codegen_gnu_try(bx, try_func, data, catch_func, dest);
+ }
+}
+
+// MSVC's definition of the `rust_try` function.
+//
+// This implementation uses the new exception handling instructions in LLVM
+// which have support in LLVM for SEH on MSVC targets. Although these
+// instructions are meant to work for all targets, as of the time of this
+// writing, however, LLVM does not recommend the usage of these new instructions
+// as the old ones are still more optimized.
+fn codegen_msvc_try(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ let llfn = get_rust_try_fn(bx, &mut |mut bx| {
+ bx.set_personality_fn(bx.eh_personality());
+ bx.sideeffect();
+
+ let mut normal = bx.build_sibling_block("normal");
+ let mut catchswitch = bx.build_sibling_block("catchswitch");
+ let mut catchpad_rust = bx.build_sibling_block("catchpad_rust");
+ let mut catchpad_foreign = bx.build_sibling_block("catchpad_foreign");
+ let mut caught = bx.build_sibling_block("caught");
+
+ let try_func = llvm::get_param(bx.llfn(), 0);
+ let data = llvm::get_param(bx.llfn(), 1);
+ let catch_func = llvm::get_param(bx.llfn(), 2);
+
+ // We're generating an IR snippet that looks like:
+ //
+ // declare i32 @rust_try(%try_func, %data, %catch_func) {
+ // %slot = alloca i8*
+ // invoke %try_func(%data) to label %normal unwind label %catchswitch
+ //
+ // normal:
+ // ret i32 0
+ //
+ // catchswitch:
+ // %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
+ //
+ // catchpad_rust:
+ // %tok = catchpad within %cs [%type_descriptor, 8, %slot]
+ // %ptr = load %slot
+ // call %catch_func(%data, %ptr)
+ // catchret from %tok to label %caught
+ //
+ // catchpad_foreign:
+ // %tok = catchpad within %cs [null, 64, null]
+ // call %catch_func(%data, null)
+ // catchret from %tok to label %caught
+ //
+ // caught:
+ // ret i32 1
+ // }
+ //
+ // This structure follows the basic usage of throw/try/catch in LLVM.
+ // For example, compile this C++ snippet to see what LLVM generates:
+ //
+ // struct rust_panic {
+ // rust_panic(const rust_panic&);
+ // ~rust_panic();
+ //
+ // void* x[2];
+ // };
+ //
+ // int __rust_try(
+ // void (*try_func)(void*),
+ // void *data,
+ // void (*catch_func)(void*, void*) noexcept
+ // ) {
+ // try {
+ // try_func(data);
+ // return 0;
+ // } catch(rust_panic& a) {
+ // catch_func(data, &a);
+ // return 1;
+ // } catch(...) {
+ // catch_func(data, NULL);
+ // return 1;
+ // }
+ // }
+ //
+ // More information can be found in libstd's seh.rs implementation.
+ let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+ let slot = bx.alloca(bx.type_i8p(), ptr_align);
+ bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
+
+ normal.ret(bx.const_i32(0));
+
+ let cs = catchswitch.catch_switch(None, None, 2);
+ catchswitch.add_handler(cs, catchpad_rust.llbb());
+ catchswitch.add_handler(cs, catchpad_foreign.llbb());
+
+ // We can't use the TypeDescriptor defined in libpanic_unwind because it
+ // might be in another DLL and the SEH encoding only supports specifying
+ // a TypeDescriptor from the current module.
+ //
+ // However this isn't an issue since the MSVC runtime uses string
+ // comparison on the type name to match TypeDescriptors rather than
+ // pointer equality.
+ //
+ // So instead we generate a new TypeDescriptor in each module that uses
+ // `try` and let the linker merge duplicate definitions in the same
+ // module.
+ //
+ // When modifying, make sure that the type_name string exactly matches
+ // the one used in src/libpanic_unwind/seh.rs.
+ let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
+ let type_name = bx.const_bytes(b"rust_panic\0");
+ let type_info =
+ bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
+ let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
+ unsafe {
+ llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
+ llvm::SetUniqueComdat(bx.llmod, tydesc);
+ llvm::LLVMSetInitializer(tydesc, type_info);
+ }
+
+ // The flag value of 8 indicates that we are catching the exception by
+ // reference instead of by value. We can't use catch by value because
+ // that requires copying the exception object, which we don't support
+ // since our exception object effectively contains a Box.
+ //
+ // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
+ let flags = bx.const_i32(8);
+ let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]);
+ let ptr = catchpad_rust.load(slot, ptr_align);
+ catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet));
+ catchpad_rust.catch_ret(&funclet, caught.llbb());
+
+ // The flag value of 64 indicates a "catch-all".
+ let flags = bx.const_i32(64);
+ let null = bx.const_null(bx.type_i8p());
+ let funclet = catchpad_foreign.catch_pad(cs, &[null, flags, null]);
+ catchpad_foreign.call(catch_func, &[data, null], Some(&funclet));
+ catchpad_foreign.catch_ret(&funclet, caught.llbb());
+
+ caught.ret(bx.const_i32(1));
+ });
+
+ // Note that no invoke is used here because by definition this function
+ // can't panic (that's what it's catching).
+ let ret = bx.call(llfn, &[try_func, data, catch_func], None);
+ let i32_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(ret, dest, i32_align);
+}
+
+// Definition of the standard `try` function for Rust using the GNU-like model
+// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
+// instructions).
+//
+// This codegen is a little surprising because we always call a shim
+// function instead of inlining the call to `invoke` manually here. This is done
+// because in LLVM we're only allowed to have one personality per function
+// definition. The call to the `try` intrinsic is being inlined into the
+// function calling it, and that function may already have other personality
+// functions in play. By calling a shim we're guaranteed that our shim will have
+// the right personality function.
+fn codegen_gnu_try(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ let llfn = get_rust_try_fn(bx, &mut |mut bx| {
+ // Codegens the shims described above:
+ //
+ // bx:
+ // invoke %try_func(%data) normal %normal unwind %catch
+ //
+ // normal:
+ // ret 0
+ //
+ // catch:
+ // (%ptr, _) = landingpad
+ // call %catch_func(%data, %ptr)
+ // ret 1
+
+ bx.sideeffect();
+
+ let mut then = bx.build_sibling_block("then");
+ let mut catch = bx.build_sibling_block("catch");
+
+ let try_func = llvm::get_param(bx.llfn(), 0);
+ let data = llvm::get_param(bx.llfn(), 1);
+ let catch_func = llvm::get_param(bx.llfn(), 2);
+ bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
+ then.ret(bx.const_i32(0));
+
+ // Type indicator for the exception being thrown.
+ //
+ // The first value in this tuple is a pointer to the exception object
+ // being thrown. The second value is a "selector" indicating which of
+ // the landing pad clauses the exception's type had been matched to.
+ // rust_try ignores the selector.
+ let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+ let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
+ let tydesc = bx.const_null(bx.type_i8p());
+ catch.add_clause(vals, tydesc);
+ let ptr = catch.extract_value(vals, 0);
+ catch.call(catch_func, &[data, ptr], None);
+ catch.ret(bx.const_i32(1));
+ });
+
+ // Note that no invoke is used here because by definition this function
+ // can't panic (that's what it's catching).
+ let ret = bx.call(llfn, &[try_func, data, catch_func], None);
+ let i32_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(ret, dest, i32_align);
+}
+
+// Variant of codegen_gnu_try used for emscripten where Rust panics are
+// implemented using C++ exceptions. Here we use exceptions of a specific type
+// (`struct rust_panic`) to represent Rust panics.
+fn codegen_emcc_try(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ let llfn = get_rust_try_fn(bx, &mut |mut bx| {
+ // Codegens the shims described above:
+ //
+ // bx:
+ // invoke %try_func(%data) normal %normal unwind %catch
+ //
+ // normal:
+ // ret 0
+ //
+ // catch:
+ // (%ptr, %selector) = landingpad
+ // %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
+ // %is_rust_panic = %selector == %rust_typeid
+ // %catch_data = alloca { i8*, i8 }
+ // %catch_data[0] = %ptr
+ // %catch_data[1] = %is_rust_panic
+ // call %catch_func(%data, %catch_data)
+ // ret 1
+
+ bx.sideeffect();
+
+ let mut then = bx.build_sibling_block("then");
+ let mut catch = bx.build_sibling_block("catch");
+
+ let try_func = llvm::get_param(bx.llfn(), 0);
+ let data = llvm::get_param(bx.llfn(), 1);
+ let catch_func = llvm::get_param(bx.llfn(), 2);
+ bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
+ then.ret(bx.const_i32(0));
+
+ // Type indicator for the exception being thrown.
+ //
+ // The first value in this tuple is a pointer to the exception object
+ // being thrown. The second value is a "selector" indicating which of
+ // the landing pad clauses the exception's type had been matched to.
+ let tydesc = bx.eh_catch_typeinfo();
+ let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+ let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 2);
+ catch.add_clause(vals, tydesc);
+ catch.add_clause(vals, bx.const_null(bx.type_i8p()));
+ let ptr = catch.extract_value(vals, 0);
+ let selector = catch.extract_value(vals, 1);
+
+ // Check if the typeid we got is the one for a Rust panic.
+ let llvm_eh_typeid_for = bx.get_intrinsic("llvm.eh.typeid.for");
+ let rust_typeid = catch.call(llvm_eh_typeid_for, &[tydesc], None);
+ let is_rust_panic = catch.icmp(IntPredicate::IntEQ, selector, rust_typeid);
+ let is_rust_panic = catch.zext(is_rust_panic, bx.type_bool());
+
+ // We need to pass two values to catch_func (ptr and is_rust_panic), so
+ // create an alloca and pass a pointer to that.
+ let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+ let i8_align = bx.tcx().data_layout.i8_align.abi;
+ let catch_data =
+ catch.alloca(bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false), ptr_align);
+ let catch_data_0 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
+ catch.store(ptr, catch_data_0, ptr_align);
+ let catch_data_1 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
+ catch.store(is_rust_panic, catch_data_1, i8_align);
+ let catch_data = catch.bitcast(catch_data, bx.type_i8p());
+
+ catch.call(catch_func, &[data, catch_data], None);
+ catch.ret(bx.const_i32(1));
+ });
+
+ // Note that no invoke is used here because by definition this function
+ // can't panic (that's what it's catching).
+ let ret = bx.call(llfn, &[try_func, data, catch_func], None);
+ let i32_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(ret, dest, i32_align);
+}
+
+// Helper function to give a Block to a closure to codegen a shim function.
+// This is currently primarily used for the `try` intrinsic functions above.
+fn gen_fn<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ name: &str,
+ inputs: Vec<Ty<'tcx>>,
+ output: Ty<'tcx>,
+ codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
+) -> &'ll Value {
+ let rust_fn_sig = ty::Binder::bind(cx.tcx.mk_fn_sig(
+ inputs.into_iter(),
+ output,
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ ));
+ let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
+ let llfn = cx.declare_fn(name, &fn_abi);
+ cx.set_frame_pointer_elimination(llfn);
+ cx.apply_target_cpu_attr(llfn);
+ // FIXME(eddyb) find a nicer way to do this.
+ unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
+ let bx = Builder::new_block(cx, llfn, "entry-block");
+ codegen(bx);
+ llfn
+}
+
+// Helper function used to get a handle to the `__rust_try` function used to
+// catch exceptions.
+//
+// This function is only generated once and is then cached.
+fn get_rust_try_fn<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
+) -> &'ll Value {
+ if let Some(llfn) = cx.rust_try_fn.get() {
+ return llfn;
+ }
+
+ // Define the type up front for the signature of the rust_try function.
+ let tcx = cx.tcx;
+ let i8p = tcx.mk_mut_ptr(tcx.types.i8);
+ let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
+ iter::once(i8p),
+ tcx.mk_unit(),
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ )));
+ let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
+ [i8p, i8p].iter().cloned(),
+ tcx.mk_unit(),
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ )));
+ let output = tcx.types.i32;
+ let rust_try = gen_fn(cx, "__rust_try", vec![try_fn_ty, i8p, catch_fn_ty], output, codegen);
+ cx.rust_try_fn.set(Some(rust_try));
+ rust_try
+}
+
+fn generic_simd_intrinsic(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ name: Symbol,
+ callee_ty: Ty<'tcx>,
+ args: &[OperandRef<'tcx, &'ll Value>],
+ ret_ty: Ty<'tcx>,
+ llret_ty: &'ll Type,
+ span: Span,
+) -> Result<&'ll Value, ()> {
+ // macros for error handling:
+ macro_rules! emit_error {
+ ($msg: tt) => {
+ emit_error!($msg, )
+ };
+ ($msg: tt, $($fmt: tt)*) => {
+ span_invalid_monomorphization_error(
+ bx.sess(), span,
+ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
+ name, $($fmt)*));
+ }
+ }
+
+ macro_rules! return_error {
+ ($($fmt: tt)*) => {
+ {
+ emit_error!($($fmt)*);
+ return Err(());
+ }
+ }
+ }
+
+ macro_rules! require {
+ ($cond: expr, $($fmt: tt)*) => {
+ if !$cond {
+ return_error!($($fmt)*);
+ }
+ };
+ }
+
+ macro_rules! require_simd {
+ ($ty: expr, $position: expr) => {
+ require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
+ };
+ }
+
+ let tcx = bx.tcx();
+ let sig = tcx
+ .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &callee_ty.fn_sig(tcx));
+ let arg_tys = sig.inputs();
+ let name_str = &*name.as_str();
+
+ if name == sym::simd_select_bitmask {
+ let in_ty = arg_tys[0];
+ let m_len = match in_ty.kind() {
+ // Note that this `.unwrap()` crashes for isize/usize, that's sort
+ // of intentional as there's not currently a use case for that.
+ ty::Int(i) => i.bit_width().unwrap(),
+ ty::Uint(i) => i.bit_width().unwrap(),
+ _ => return_error!("`{}` is not an integral type", in_ty),
+ };
+ require_simd!(arg_tys[1], "argument");
+ let v_len = arg_tys[1].simd_size(tcx);
+ require!(
+ m_len == v_len,
+ "mismatched lengths: mask length `{}` != other vector length `{}`",
+ m_len,
+ v_len
+ );
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, m_len);
+ let m_i1s = bx.bitcast(args[0].immediate(), i1xn);
+ return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
+ }
+
+ // every intrinsic below takes a SIMD vector as its first argument
+ require_simd!(arg_tys[0], "input");
+ let in_ty = arg_tys[0];
+ let in_elem = arg_tys[0].simd_type(tcx);
+ let in_len = arg_tys[0].simd_size(tcx);
+
+ let comparison = match name {
+ sym::simd_eq => Some(hir::BinOpKind::Eq),
+ sym::simd_ne => Some(hir::BinOpKind::Ne),
+ sym::simd_lt => Some(hir::BinOpKind::Lt),
+ sym::simd_le => Some(hir::BinOpKind::Le),
+ sym::simd_gt => Some(hir::BinOpKind::Gt),
+ sym::simd_ge => Some(hir::BinOpKind::Ge),
+ _ => None,
+ };
+
+ if let Some(cmp_op) = comparison {
+ require_simd!(ret_ty, "return");
+
+ let out_len = ret_ty.simd_size(tcx);
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+ require!(
+ bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
+ "expected return type with integer elements, found `{}` with non-integer `{}`",
+ ret_ty,
+ ret_ty.simd_type(tcx)
+ );
+
+ return Ok(compare_simd_types(
+ bx,
+ args[0].immediate(),
+ args[1].immediate(),
+ in_elem,
+ llret_ty,
+ cmp_op,
+ ));
+ }
+
+ if name_str.starts_with("simd_shuffle") {
+ let n: u64 = name_str["simd_shuffle".len()..].parse().unwrap_or_else(|_| {
+ span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
+ });
+
+ require_simd!(ret_ty, "return");
+
+ let out_len = ret_ty.simd_size(tcx);
+ require!(
+ out_len == n,
+ "expected return type of length {}, found `{}` with length {}",
+ n,
+ ret_ty,
+ out_len
+ );
+ require!(
+ in_elem == ret_ty.simd_type(tcx),
+ "expected return element type `{}` (element of input `{}`), \
+ found `{}` with element type `{}`",
+ in_elem,
+ in_ty,
+ ret_ty,
+ ret_ty.simd_type(tcx)
+ );
+
+ let total_len = u128::from(in_len) * 2;
+
+ let vector = args[2].immediate();
+
+ let indices: Option<Vec<_>> = (0..n)
+ .map(|i| {
+ let arg_idx = i;
+ let val = bx.const_get_elt(vector, i as u64);
+ match bx.const_to_opt_u128(val, true) {
+ None => {
+ emit_error!("shuffle index #{} is not a constant", arg_idx);
+ None
+ }
+ Some(idx) if idx >= total_len => {
+ emit_error!(
+ "shuffle index #{} is out of bounds (limit {})",
+ arg_idx,
+ total_len
+ );
+ None
+ }
+ Some(idx) => Some(bx.const_i32(idx as i32)),
+ }
+ })
+ .collect();
+ let indices = match indices {
+ Some(i) => i,
+ None => return Ok(bx.const_null(llret_ty)),
+ };
+
+ return Ok(bx.shuffle_vector(
+ args[0].immediate(),
+ args[1].immediate(),
+ bx.const_vector(&indices),
+ ));
+ }
+
+ if name == sym::simd_insert {
+ require!(
+ in_elem == arg_tys[2],
+ "expected inserted type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ arg_tys[2]
+ );
+ return Ok(bx.insert_element(
+ args[0].immediate(),
+ args[2].immediate(),
+ args[1].immediate(),
+ ));
+ }
+ if name == sym::simd_extract {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
+ }
+
+ if name == sym::simd_select {
+ let m_elem_ty = in_elem;
+ let m_len = in_len;
+ require_simd!(arg_tys[1], "argument");
+ let v_len = arg_tys[1].simd_size(tcx);
+ require!(
+ m_len == v_len,
+ "mismatched lengths: mask length `{}` != other vector length `{}`",
+ m_len,
+ v_len
+ );
+ match m_elem_ty.kind() {
+ ty::Int(_) => {}
+ _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
+ }
+ // truncate the mask to a vector of i1s
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, m_len as u64);
+ let m_i1s = bx.trunc(args[0].immediate(), i1xn);
+ return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
+ }
+
+ if name == sym::simd_bitmask {
+ // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
+ // vector mask and returns an unsigned integer containing the most
+ // significant bit (MSB) of each lane.
+
+ // If the vector has less than 8 lanes, an u8 is returned with zeroed
+ // trailing bits.
+ let expected_int_bits = in_len.max(8);
+ match ret_ty.kind() {
+ ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
+ _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
+ }
+
+ // Integer vector <i{in_bitwidth} x in_len>:
+ let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
+ ty::Int(i) => {
+ (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
+ }
+ ty::Uint(i) => {
+ (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
+ }
+ _ => return_error!(
+ "vector argument `{}`'s element type `{}`, expected integer element type",
+ in_ty,
+ in_elem
+ ),
+ };
+
+ // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
+ let shift_indices =
+ vec![
+ bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
+ in_len as _
+ ];
+ let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
+ // Truncate vector to an <i1 x N>
+ let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
+ // Bitcast <i1 x N> to iN:
+ let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
+ // Zero-extend iN to the bitmask type:
+ return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
+ }
+
+ fn simd_simple_float_intrinsic(
+ name: &str,
+ in_elem: &::rustc_middle::ty::TyS<'_>,
+ in_ty: &::rustc_middle::ty::TyS<'_>,
+ in_len: u64,
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ span: Span,
+ args: &[OperandRef<'tcx, &'ll Value>],
+ ) -> Result<&'ll Value, ()> {
+ macro_rules! emit_error {
+ ($msg: tt) => {
+ emit_error!($msg, )
+ };
+ ($msg: tt, $($fmt: tt)*) => {
+ span_invalid_monomorphization_error(
+ bx.sess(), span,
+ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
+ name, $($fmt)*));
+ }
+ }
+ macro_rules! return_error {
+ ($($fmt: tt)*) => {
+ {
+ emit_error!($($fmt)*);
+ return Err(());
+ }
+ }
+ }
+ let ety = match in_elem.kind() {
+ ty::Float(f) if f.bit_width() == 32 => {
+ if in_len < 2 || in_len > 16 {
+ return_error!(
+ "unsupported floating-point vector `{}` with length `{}` \
+ out-of-range [2, 16]",
+ in_ty,
+ in_len
+ );
+ }
+ "f32"
+ }
+ ty::Float(f) if f.bit_width() == 64 => {
+ if in_len < 2 || in_len > 8 {
+ return_error!(
+ "unsupported floating-point vector `{}` with length `{}` \
+ out-of-range [2, 8]",
+ in_ty,
+ in_len
+ );
+ }
+ "f64"
+ }
+ ty::Float(f) => {
+ return_error!(
+ "unsupported element type `{}` of floating-point vector `{}`",
+ f.name_str(),
+ in_ty
+ );
+ }
+ _ => {
+ return_error!("`{}` is not a floating-point type", in_ty);
+ }
+ };
+
+ let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
+ let intrinsic = bx.get_intrinsic(&llvm_name);
+ let c =
+ bx.call(intrinsic, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
+ unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
+ Ok(c)
+ }
+
+ match name {
+ sym::simd_fsqrt => {
+ return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_fsin => {
+ return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_fcos => {
+ return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_fabs => {
+ return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_floor => {
+ return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_ceil => {
+ return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_fexp => {
+ return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_fexp2 => {
+ return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_flog10 => {
+ return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_flog2 => {
+ return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_flog => {
+ return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_fpowi => {
+ return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_fpow => {
+ return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
+ }
+ sym::simd_fma => {
+ return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
+ }
+ _ => { /* fallthrough */ }
+ }
+
+ // FIXME: use:
+ // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
+ // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
+ fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
+ let p0s: String = "p0".repeat(no_pointers);
+ match *elem_ty.kind() {
+ ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
+ ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
+ ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
+ _ => unreachable!(),
+ }
+ }
+
+ fn llvm_vector_ty(
+ cx: &CodegenCx<'ll, '_>,
+ elem_ty: Ty<'_>,
+ vec_len: u64,
+ mut no_pointers: usize,
+ ) -> &'ll Type {
+ // FIXME: use cx.layout_of(ty).llvm_type() ?
+ let mut elem_ty = match *elem_ty.kind() {
+ ty::Int(v) => cx.type_int_from_ty(v),
+ ty::Uint(v) => cx.type_uint_from_ty(v),
+ ty::Float(v) => cx.type_float_from_ty(v),
+ _ => unreachable!(),
+ };
+ while no_pointers > 0 {
+ elem_ty = cx.type_ptr_to(elem_ty);
+ no_pointers -= 1;
+ }
+ cx.type_vector(elem_ty, vec_len)
+ }
+
+ if name == sym::simd_gather {
+ // simd_gather(values: <N x T>, pointers: <N x *_ T>,
+ // mask: <N x i{M}>) -> <N x T>
+ // * N: number of elements in the input vectors
+ // * T: type of the element to load
+ // * M: any integer width is supported, will be truncated to i1
+
+ // All types must be simd vector types
+ require_simd!(in_ty, "first");
+ require_simd!(arg_tys[1], "second");
+ require_simd!(arg_tys[2], "third");
+ require_simd!(ret_ty, "return");
+
+ // Of the same length:
+ require!(
+ in_len == arg_tys[1].simd_size(tcx),
+ "expected {} argument with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ "second",
+ in_len,
+ in_ty,
+ arg_tys[1],
+ arg_tys[1].simd_size(tcx)
+ );
+ require!(
+ in_len == arg_tys[2].simd_size(tcx),
+ "expected {} argument with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ "third",
+ in_len,
+ in_ty,
+ arg_tys[2],
+ arg_tys[2].simd_size(tcx)
+ );
+
+ // The return type must match the first argument type
+ require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
+
+ // This counts how many pointers
+ fn ptr_count(t: Ty<'_>) -> usize {
+ match t.kind() {
+ ty::RawPtr(p) => 1 + ptr_count(p.ty),
+ _ => 0,
+ }
+ }
+
+ // Non-ptr type
+ fn non_ptr(t: Ty<'_>) -> Ty<'_> {
+ match t.kind() {
+ ty::RawPtr(p) => non_ptr(p.ty),
+ _ => t,
+ }
+ }
+
+ // The second argument must be a simd vector with an element type that's a pointer
+ // to the element type of the first argument
+ let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).kind() {
+ ty::RawPtr(p) if p.ty == in_elem => {
+ (ptr_count(arg_tys[1].simd_type(tcx)), non_ptr(arg_tys[1].simd_type(tcx)))
+ }
+ _ => {
+ require!(
+ false,
+ "expected element type `{}` of second argument `{}` \
+ to be a pointer to the element type `{}` of the first \
+ argument `{}`, found `{}` != `*_ {}`",
+ arg_tys[1].simd_type(tcx),
+ arg_tys[1],
+ in_elem,
+ in_ty,
+ arg_tys[1].simd_type(tcx),
+ in_elem
+ );
+ unreachable!();
+ }
+ };
+ assert!(pointer_count > 0);
+ assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
+ assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
+
+ // The element type of the third argument must be a signed integer type of any width:
+ match arg_tys[2].simd_type(tcx).kind() {
+ ty::Int(_) => (),
+ _ => {
+ require!(
+ false,
+ "expected element type `{}` of third argument `{}` \
+ to be a signed integer type",
+ arg_tys[2].simd_type(tcx),
+ arg_tys[2]
+ );
+ }
+ }
+
+ // Alignment of T, must be a constant integer value:
+ let alignment_ty = bx.type_i32();
+ let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
+
+ // Truncate the mask vector to a vector of i1s:
+ let (mask, mask_ty) = {
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, in_len);
+ (bx.trunc(args[2].immediate(), i1xn), i1xn)
+ };
+
+ // Type of the vector of pointers:
+ let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
+ let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
+
+ // Type of the vector of elements:
+ let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
+ let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
+
+ let llvm_intrinsic =
+ format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
+ let f = bx.declare_cfn(
+ &llvm_intrinsic,
+ bx.type_func(
+ &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
+ llvm_elem_vec_ty,
+ ),
+ );
+ llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
+ let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
+ return Ok(v);
+ }
+
+ if name == sym::simd_scatter {
+ // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
+ // mask: <N x i{M}>) -> ()
+ // * N: number of elements in the input vectors
+ // * T: type of the element to load
+ // * M: any integer width is supported, will be truncated to i1
+
+ // All types must be simd vector types
+ require_simd!(in_ty, "first");
+ require_simd!(arg_tys[1], "second");
+ require_simd!(arg_tys[2], "third");
+
+ // Of the same length:
+ require!(
+ in_len == arg_tys[1].simd_size(tcx),
+ "expected {} argument with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ "second",
+ in_len,
+ in_ty,
+ arg_tys[1],
+ arg_tys[1].simd_size(tcx)
+ );
+ require!(
+ in_len == arg_tys[2].simd_size(tcx),
+ "expected {} argument with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ "third",
+ in_len,
+ in_ty,
+ arg_tys[2],
+ arg_tys[2].simd_size(tcx)
+ );
+
+ // This counts how many pointers
+ fn ptr_count(t: Ty<'_>) -> usize {
+ match t.kind() {
+ ty::RawPtr(p) => 1 + ptr_count(p.ty),
+ _ => 0,
+ }
+ }
+
+ // Non-ptr type
+ fn non_ptr(t: Ty<'_>) -> Ty<'_> {
+ match t.kind() {
+ ty::RawPtr(p) => non_ptr(p.ty),
+ _ => t,
+ }
+ }
+
+ // The second argument must be a simd vector with an element type that's a pointer
+ // to the element type of the first argument
+ let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).kind() {
+ ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
+ (ptr_count(arg_tys[1].simd_type(tcx)), non_ptr(arg_tys[1].simd_type(tcx)))
+ }
+ _ => {
+ require!(
+ false,
+ "expected element type `{}` of second argument `{}` \
+ to be a pointer to the element type `{}` of the first \
+ argument `{}`, found `{}` != `*mut {}`",
+ arg_tys[1].simd_type(tcx),
+ arg_tys[1],
+ in_elem,
+ in_ty,
+ arg_tys[1].simd_type(tcx),
+ in_elem
+ );
+ unreachable!();
+ }
+ };
+ assert!(pointer_count > 0);
+ assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
+ assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
+
+ // The element type of the third argument must be a signed integer type of any width:
+ match arg_tys[2].simd_type(tcx).kind() {
+ ty::Int(_) => (),
+ _ => {
+ require!(
+ false,
+ "expected element type `{}` of third argument `{}` \
+ to be a signed integer type",
+ arg_tys[2].simd_type(tcx),
+ arg_tys[2]
+ );
+ }
+ }
+
+ // Alignment of T, must be a constant integer value:
+ let alignment_ty = bx.type_i32();
+ let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
+
+ // Truncate the mask vector to a vector of i1s:
+ let (mask, mask_ty) = {
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, in_len);
+ (bx.trunc(args[2].immediate(), i1xn), i1xn)
+ };
+
+ let ret_t = bx.type_void();
+
+ // Type of the vector of pointers:
+ let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
+ let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
+
+ // Type of the vector of elements:
+ let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
+ let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
+
+ let llvm_intrinsic =
+ format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
+ let f = bx.declare_cfn(
+ &llvm_intrinsic,
+ bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
+ );
+ llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
+ let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
+ return Ok(v);
+ }
+
+ macro_rules! arith_red {
+ ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
+ $identity:expr) => {
+ if name == sym::$name {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let r = bx.$integer_reduce(args[0].immediate());
+ if $ordered {
+ // if overflow occurs, the result is the
+ // mathematical result modulo 2^n:
+ Ok(bx.$op(args[1].immediate(), r))
+ } else {
+ Ok(bx.$integer_reduce(args[0].immediate()))
+ }
+ }
+ ty::Float(f) => {
+ let acc = if $ordered {
+ // ordered arithmetic reductions take an accumulator
+ args[1].immediate()
+ } else {
+ // unordered arithmetic reductions use the identity accumulator
+ match f.bit_width() {
+ 32 => bx.const_real(bx.type_f32(), $identity),
+ 64 => bx.const_real(bx.type_f64(), $identity),
+ v => return_error!(
+ r#"
+unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
+ sym::$name,
+ in_ty,
+ in_elem,
+ v,
+ ret_ty
+ ),
+ }
+ };
+ Ok(bx.$float_reduce(acc, args[0].immediate()))
+ }
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
+ arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
+ arith_red!(
+ simd_reduce_add_unordered: vector_reduce_add,
+ vector_reduce_fadd_fast,
+ false,
+ add,
+ 0.0
+ );
+ arith_red!(
+ simd_reduce_mul_unordered: vector_reduce_mul,
+ vector_reduce_fmul_fast,
+ false,
+ mul,
+ 1.0
+ );
+
+ macro_rules! minmax_red {
+ ($name:ident: $int_red:ident, $float_red:ident) => {
+ if name == sym::$name {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return match in_elem.kind() {
+ ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
+ ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
+ ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
+ minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
+
+ minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
+ minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
+
+ macro_rules! bitwise_red {
+ ($name:ident : $red:ident, $boolean:expr) => {
+ if name == sym::$name {
+ let input = if !$boolean {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ args[0].immediate()
+ } else {
+ match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {}
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ }
+
+ // boolean reductions operate on vectors of i1s:
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, in_len as u64);
+ bx.trunc(args[0].immediate(), i1xn)
+ };
+ return match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let r = bx.$red(input);
+ Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
+ }
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ bitwise_red!(simd_reduce_and: vector_reduce_and, false);
+ bitwise_red!(simd_reduce_or: vector_reduce_or, false);
+ bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
+ bitwise_red!(simd_reduce_all: vector_reduce_and, true);
+ bitwise_red!(simd_reduce_any: vector_reduce_or, true);
+
+ if name == sym::simd_cast {
+ require_simd!(ret_ty, "return");
+ let out_len = ret_ty.simd_size(tcx);
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+ // casting cares about nominal type, not just structural type
+ let out_elem = ret_ty.simd_type(tcx);
+
+ if in_elem == out_elem {
+ return Ok(args[0].immediate());
+ }
+
+ enum Style {
+ Float,
+ Int(/* is signed? */ bool),
+ Unsupported,
+ }
+
+ let (in_style, in_width) = match in_elem.kind() {
+ // vectors of pointer-sized integers should've been
+ // disallowed before here, so this unwrap is safe.
+ ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
+ ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
+ ty::Float(f) => (Style::Float, f.bit_width()),
+ _ => (Style::Unsupported, 0),
+ };
+ let (out_style, out_width) = match out_elem.kind() {
+ ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
+ ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
+ ty::Float(f) => (Style::Float, f.bit_width()),
+ _ => (Style::Unsupported, 0),
+ };
+
+ match (in_style, out_style) {
+ (Style::Int(in_is_signed), Style::Int(_)) => {
+ return Ok(match in_width.cmp(&out_width) {
+ Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
+ Ordering::Equal => args[0].immediate(),
+ Ordering::Less => {
+ if in_is_signed {
+ bx.sext(args[0].immediate(), llret_ty)
+ } else {
+ bx.zext(args[0].immediate(), llret_ty)
+ }
+ }
+ });
+ }
+ (Style::Int(in_is_signed), Style::Float) => {
+ return Ok(if in_is_signed {
+ bx.sitofp(args[0].immediate(), llret_ty)
+ } else {
+ bx.uitofp(args[0].immediate(), llret_ty)
+ });
+ }
+ (Style::Float, Style::Int(out_is_signed)) => {
+ return Ok(if out_is_signed {
+ bx.fptosi(args[0].immediate(), llret_ty)
+ } else {
+ bx.fptoui(args[0].immediate(), llret_ty)
+ });
+ }
+ (Style::Float, Style::Float) => {
+ return Ok(match in_width.cmp(&out_width) {
+ Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
+ Ordering::Equal => args[0].immediate(),
+ Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
+ });
+ }
+ _ => { /* Unsupported. Fallthrough. */ }
+ }
+ require!(
+ false,
+ "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
+ in_ty,
+ in_elem,
+ ret_ty,
+ out_elem
+ );
+ }
+ macro_rules! arith {
+ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+ $(if name == sym::$name {
+ match in_elem.kind() {
+ $($(ty::$p(_))|* => {
+ return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
+ })*
+ _ => {},
+ }
+ require!(false,
+ "unsupported operation on `{}` with element `{}`",
+ in_ty,
+ in_elem)
+ })*
+ }
+ }
+ arith! {
+ simd_add: Uint, Int => add, Float => fadd;
+ simd_sub: Uint, Int => sub, Float => fsub;
+ simd_mul: Uint, Int => mul, Float => fmul;
+ simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
+ simd_rem: Uint => urem, Int => srem, Float => frem;
+ simd_shl: Uint, Int => shl;
+ simd_shr: Uint => lshr, Int => ashr;
+ simd_and: Uint, Int => and;
+ simd_or: Uint, Int => or;
+ simd_xor: Uint, Int => xor;
+ simd_fmax: Float => maxnum;
+ simd_fmin: Float => minnum;
+
+ }
+
+ if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
+ let lhs = args[0].immediate();
+ let rhs = args[1].immediate();
+ let is_add = name == sym::simd_saturating_add;
+ let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
+ let (signed, elem_width, elem_ty) = match *in_elem.kind() {
+ ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
+ ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
+ _ => {
+ return_error!(
+ "expected element type `{}` of vector type `{}` \
+ to be a signed or unsigned integer type",
+ arg_tys[0].simd_type(tcx),
+ arg_tys[0]
+ );
+ }
+ };
+ let llvm_intrinsic = &format!(
+ "llvm.{}{}.sat.v{}i{}",
+ if signed { 's' } else { 'u' },
+ if is_add { "add" } else { "sub" },
+ in_len,
+ elem_width
+ );
+ let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
+
+ let f = bx.declare_cfn(&llvm_intrinsic, bx.type_func(&[vec_ty, vec_ty], vec_ty));
+ llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
+ let v = bx.call(f, &[lhs, rhs], None);
+ return Ok(v);
+ }
+
+ span_bug!(span, "unknown SIMD intrinsic");
+}
+
+// Returns the width of an int Ty, and if it's signed or not
+// Returns None if the type is not an integer
+// FIXME: there’s multiple of this functions, investigate using some of the already existing
+// stuffs.
+fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
+ match ty.kind() {
+ ty::Int(t) => {
+ Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.ptr_width)), true))
+ }
+ ty::Uint(t) => {
+ Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.ptr_width)), false))
+ }
+ _ => None,
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
new file mode 100644
index 0000000..f14493e
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -0,0 +1,406 @@
+//! The Rust compiler.
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(bool_to_option)]
+#![feature(const_cstr_unchecked)]
+#![feature(crate_visibility_modifier)]
+#![feature(extern_types)]
+#![feature(in_band_lifetimes)]
+#![feature(nll)]
+#![feature(or_patterns)]
+#![recursion_limit = "256"]
+
+use back::write::{create_informational_target_machine, create_target_machine};
+
+pub use llvm_util::target_features;
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::ModuleCodegen;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule};
+use rustc_errors::{ErrorReported, FatalError, Handler};
+use rustc_middle::dep_graph::{DepGraph, WorkProduct};
+use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoaderDyn};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_serialize::json;
+use rustc_session::config::{self, OptLevel, OutputFilenames, PrintRequest};
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+
+use std::any::Any;
+use std::ffi::CStr;
+use std::fs;
+use std::sync::Arc;
+
+mod back {
+ pub mod archive;
+ pub mod lto;
+ mod profiling;
+ pub mod write;
+}
+
+mod abi;
+mod allocator;
+mod asm;
+mod attributes;
+mod base;
+mod builder;
+mod callee;
+mod common;
+mod consts;
+mod context;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod intrinsic;
+
+// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912.
+#[path = "llvm/mod.rs"]
+mod llvm_;
+pub mod llvm {
+ pub use super::llvm_::*;
+}
+
+mod llvm_util;
+mod metadata;
+mod mono_item;
+mod type_;
+mod type_of;
+mod va_arg;
+mod value;
+
+#[derive(Clone)]
+pub struct LlvmCodegenBackend(());
+
+impl ExtraBackendMethods for LlvmCodegenBackend {
+ fn new_metadata(&self, tcx: TyCtxt<'_>, mod_name: &str) -> ModuleLlvm {
+ ModuleLlvm::new_metadata(tcx, mod_name)
+ }
+
+ fn write_compressed_metadata<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ metadata: &EncodedMetadata,
+ llvm_module: &mut ModuleLlvm,
+ ) {
+ base::write_compressed_metadata(tcx, metadata, llvm_module)
+ }
+ fn codegen_allocator<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ mods: &mut ModuleLlvm,
+ kind: AllocatorKind,
+ ) {
+ unsafe { allocator::codegen(tcx, mods, kind) }
+ }
+ fn compile_codegen_unit(
+ &self,
+ tcx: TyCtxt<'_>,
+ cgu_name: Symbol,
+ ) -> (ModuleCodegen<ModuleLlvm>, u64) {
+ base::compile_codegen_unit(tcx, cgu_name)
+ }
+ fn target_machine_factory(
+ &self,
+ sess: &Session,
+ optlvl: OptLevel,
+ ) -> Arc<dyn Fn() -> Result<&'static mut llvm::TargetMachine, String> + Send + Sync> {
+ back::write::target_machine_factory(sess, optlvl)
+ }
+ fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
+ llvm_util::target_cpu(sess)
+ }
+}
+
+impl WriteBackendMethods for LlvmCodegenBackend {
+ type Module = ModuleLlvm;
+ type ModuleBuffer = back::lto::ModuleBuffer;
+ type Context = llvm::Context;
+ type TargetMachine = &'static mut llvm::TargetMachine;
+ type ThinData = back::lto::ThinData;
+ type ThinBuffer = back::lto::ThinBuffer;
+ fn print_pass_timings(&self) {
+ unsafe {
+ llvm::LLVMRustPrintPassTimings();
+ }
+ }
+ fn run_link(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ modules: Vec<ModuleCodegen<Self::Module>>,
+ ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+ back::write::link(cgcx, diag_handler, modules)
+ }
+ fn run_fat_lto(
+ cgcx: &CodegenContext<Self>,
+ modules: Vec<FatLTOInput<Self>>,
+ cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+ ) -> Result<LtoModuleCodegen<Self>, FatalError> {
+ back::lto::run_fat(cgcx, modules, cached_modules)
+ }
+ fn run_thin_lto(
+ cgcx: &CodegenContext<Self>,
+ modules: Vec<(String, Self::ThinBuffer)>,
+ cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+ ) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
+ back::lto::run_thin(cgcx, modules, cached_modules)
+ }
+ unsafe fn optimize(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ ) -> Result<(), FatalError> {
+ back::write::optimize(cgcx, diag_handler, module, config)
+ }
+ unsafe fn optimize_thin(
+ cgcx: &CodegenContext<Self>,
+ thin: &mut ThinModule<Self>,
+ ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+ back::lto::optimize_thin_module(thin, cgcx)
+ }
+ unsafe fn codegen(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ module: ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ ) -> Result<CompiledModule, FatalError> {
+ back::write::codegen(cgcx, diag_handler, module, config)
+ }
+ fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
+ back::lto::prepare_thin(module)
+ }
+ fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
+ (module.name, back::lto::ModuleBuffer::new(module.module_llvm.llmod()))
+ }
+ fn run_lto_pass_manager(
+ cgcx: &CodegenContext<Self>,
+ module: &ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ thin: bool,
+ ) {
+ back::lto::run_pass_manager(cgcx, module, config, thin)
+ }
+}
+
+unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis
+unsafe impl Sync for LlvmCodegenBackend {}
+
+impl LlvmCodegenBackend {
+ pub fn new() -> Box<dyn CodegenBackend> {
+ Box::new(LlvmCodegenBackend(()))
+ }
+}
+
+impl CodegenBackend for LlvmCodegenBackend {
+ fn init(&self, sess: &Session) {
+ llvm_util::init(sess); // Make sure llvm is inited
+ }
+
+ fn print(&self, req: PrintRequest, sess: &Session) {
+ match req {
+ PrintRequest::RelocationModels => {
+ println!("Available relocation models:");
+ for name in
+ &["static", "pic", "dynamic-no-pic", "ropi", "rwpi", "ropi-rwpi", "default"]
+ {
+ println!(" {}", name);
+ }
+ println!();
+ }
+ PrintRequest::CodeModels => {
+ println!("Available code models:");
+ for name in &["tiny", "small", "kernel", "medium", "large"] {
+ println!(" {}", name);
+ }
+ println!();
+ }
+ PrintRequest::TlsModels => {
+ println!("Available TLS models:");
+ for name in &["global-dynamic", "local-dynamic", "initial-exec", "local-exec"] {
+ println!(" {}", name);
+ }
+ println!();
+ }
+ req => llvm_util::print(req, sess),
+ }
+ }
+
+ fn print_passes(&self) {
+ llvm_util::print_passes();
+ }
+
+ fn print_version(&self) {
+ llvm_util::print_version();
+ }
+
+ fn target_features(&self, sess: &Session) -> Vec<Symbol> {
+ target_features(sess)
+ }
+
+ fn metadata_loader(&self) -> Box<MetadataLoaderDyn> {
+ Box::new(metadata::LlvmMetadataLoader)
+ }
+
+ fn provide(&self, providers: &mut ty::query::Providers) {
+ attributes::provide(providers);
+ }
+
+ fn provide_extern(&self, providers: &mut ty::query::Providers) {
+ attributes::provide_extern(providers);
+ }
+
+ fn codegen_crate<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+ ) -> Box<dyn Any> {
+ Box::new(rustc_codegen_ssa::base::codegen_crate(
+ LlvmCodegenBackend(()),
+ tcx,
+ metadata,
+ need_metadata_module,
+ ))
+ }
+
+ fn join_codegen(
+ &self,
+ ongoing_codegen: Box<dyn Any>,
+ sess: &Session,
+ dep_graph: &DepGraph,
+ ) -> Result<Box<dyn Any>, ErrorReported> {
+ let (codegen_results, work_products) = ongoing_codegen
+ .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
+ .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
+ .join(sess);
+ if sess.opts.debugging_opts.incremental_info {
+ rustc_codegen_ssa::back::write::dump_incremental_data(&codegen_results);
+ }
+
+ sess.time("serialize_work_products", move || {
+ rustc_incremental::save_work_product_index(sess, &dep_graph, work_products)
+ });
+
+ sess.compile_status()?;
+
+ Ok(Box::new(codegen_results))
+ }
+
+ fn link(
+ &self,
+ sess: &Session,
+ codegen_results: Box<dyn Any>,
+ outputs: &OutputFilenames,
+ ) -> Result<(), ErrorReported> {
+ let codegen_results = codegen_results
+ .downcast::<CodegenResults>()
+ .expect("Expected CodegenResults, found Box<Any>");
+
+ if sess.opts.debugging_opts.no_link {
+ // FIXME: use a binary format to encode the `.rlink` file
+ let rlink_data = json::encode(&codegen_results).map_err(|err| {
+ sess.fatal(&format!("failed to encode rlink: {}", err));
+ })?;
+ let rlink_file = outputs.with_extension(config::RLINK_EXT);
+ fs::write(&rlink_file, rlink_data).map_err(|err| {
+ sess.fatal(&format!("failed to write file {}: {}", rlink_file.display(), err));
+ })?;
+ return Ok(());
+ }
+
+ // Run the linker on any artifacts that resulted from the LLVM run.
+ // This should produce either a finished executable or library.
+ sess.time("link_crate", || {
+ use crate::back::archive::LlvmArchiveBuilder;
+ use rustc_codegen_ssa::back::link::link_binary;
+
+ let target_cpu = crate::llvm_util::target_cpu(sess);
+ link_binary::<LlvmArchiveBuilder<'_>>(
+ sess,
+ &codegen_results,
+ outputs,
+ &codegen_results.crate_name.as_str(),
+ target_cpu,
+ );
+ });
+
+ // Now that we won't touch anything in the incremental compilation directory
+ // any more, we can finalize it (which involves renaming it)
+ rustc_incremental::finalize_session_directory(sess, codegen_results.crate_hash);
+
+ sess.time("llvm_dump_timing_file", || {
+ if sess.opts.debugging_opts.llvm_time_trace {
+ llvm_util::time_trace_profiler_finish("llvm_timings.json");
+ }
+ });
+
+ Ok(())
+ }
+}
+
+pub struct ModuleLlvm {
+ llcx: &'static mut llvm::Context,
+ llmod_raw: *const llvm::Module,
+ tm: &'static mut llvm::TargetMachine,
+}
+
+unsafe impl Send for ModuleLlvm {}
+unsafe impl Sync for ModuleLlvm {}
+
+impl ModuleLlvm {
+ fn new(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
+ unsafe {
+ let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
+ let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
+ ModuleLlvm { llmod_raw, llcx, tm: create_target_machine(tcx) }
+ }
+ }
+
+ fn new_metadata(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
+ unsafe {
+ let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
+ let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
+ ModuleLlvm { llmod_raw, llcx, tm: create_informational_target_machine(tcx.sess) }
+ }
+ }
+
+ fn parse(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ name: &CStr,
+ buffer: &[u8],
+ handler: &Handler,
+ ) -> Result<Self, FatalError> {
+ unsafe {
+ let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
+ let llmod_raw = back::lto::parse_module(llcx, name, buffer, handler)?;
+ let tm = match (cgcx.tm_factory.0)() {
+ Ok(m) => m,
+ Err(e) => {
+ handler.struct_err(&e).emit();
+ return Err(FatalError);
+ }
+ };
+
+ Ok(ModuleLlvm { llmod_raw, llcx, tm })
+ }
+ }
+
+ fn llmod(&self) -> &llvm::Module {
+ unsafe { &*self.llmod_raw }
+ }
+}
+
+impl Drop for ModuleLlvm {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
+ llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs
new file mode 100644
index 0000000..64db4f7
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs
@@ -0,0 +1,105 @@
+//! A wrapper around LLVM's archive (.a) code
+
+use rustc_fs_util::path_to_c_string;
+use std::path::Path;
+use std::slice;
+use std::str;
+
+pub struct ArchiveRO {
+ pub raw: &'static mut super::Archive,
+}
+
+unsafe impl Send for ArchiveRO {}
+
+pub struct Iter<'a> {
+ raw: &'a mut super::ArchiveIterator<'a>,
+}
+
+pub struct Child<'a> {
+ pub raw: &'a mut super::ArchiveChild<'a>,
+}
+
+impl ArchiveRO {
+ /// Opens a static archive for read-only purposes. This is more optimized
+ /// than the `open` method because it uses LLVM's internal `Archive` class
+ /// rather than shelling out to `ar` for everything.
+ ///
+ /// If this archive is used with a mutable method, then an error will be
+ /// raised.
+ pub fn open(dst: &Path) -> Result<ArchiveRO, String> {
+ unsafe {
+ let s = path_to_c_string(dst);
+ let ar = super::LLVMRustOpenArchive(s.as_ptr()).ok_or_else(|| {
+ super::last_error().unwrap_or_else(|| "failed to open archive".to_owned())
+ })?;
+ Ok(ArchiveRO { raw: ar })
+ }
+ }
+
+ pub fn iter(&self) -> Iter<'_> {
+ unsafe { Iter { raw: super::LLVMRustArchiveIteratorNew(self.raw) } }
+ }
+}
+
+impl Drop for ArchiveRO {
+ fn drop(&mut self) {
+ unsafe {
+ super::LLVMRustDestroyArchive(&mut *(self.raw as *mut _));
+ }
+ }
+}
+
+impl<'a> Iterator for Iter<'a> {
+ type Item = Result<Child<'a>, String>;
+
+ fn next(&mut self) -> Option<Result<Child<'a>, String>> {
+ unsafe {
+ match super::LLVMRustArchiveIteratorNext(self.raw) {
+ Some(raw) => Some(Ok(Child { raw })),
+ None => super::last_error().map(Err),
+ }
+ }
+ }
+}
+
+impl<'a> Drop for Iter<'a> {
+ fn drop(&mut self) {
+ unsafe {
+ super::LLVMRustArchiveIteratorFree(&mut *(self.raw as *mut _));
+ }
+ }
+}
+
+impl<'a> Child<'a> {
+ pub fn name(&self) -> Option<&'a str> {
+ unsafe {
+ let mut name_len = 0;
+ let name_ptr = super::LLVMRustArchiveChildName(self.raw, &mut name_len);
+ if name_ptr.is_null() {
+ None
+ } else {
+ let name = slice::from_raw_parts(name_ptr as *const u8, name_len as usize);
+ str::from_utf8(name).ok().map(|s| s.trim())
+ }
+ }
+ }
+
+ pub fn data(&self) -> &'a [u8] {
+ unsafe {
+ let mut data_len = 0;
+ let data_ptr = super::LLVMRustArchiveChildData(self.raw, &mut data_len);
+ if data_ptr.is_null() {
+ panic!("failed to read data from archive child");
+ }
+ slice::from_raw_parts(data_ptr as *const u8, data_len as usize)
+ }
+ }
+}
+
+impl<'a> Drop for Child<'a> {
+ fn drop(&mut self) {
+ unsafe {
+ super::LLVMRustArchiveChildFree(&mut *(self.raw as *mut _));
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
new file mode 100644
index 0000000..ccd3e42
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
@@ -0,0 +1,168 @@
+//! LLVM diagnostic reports.
+
+pub use self::Diagnostic::*;
+pub use self::OptimizationDiagnosticKind::*;
+
+use crate::value::Value;
+use libc::c_uint;
+
+use super::{DiagnosticInfo, Twine};
+
+#[derive(Copy, Clone)]
+pub enum OptimizationDiagnosticKind {
+ OptimizationRemark,
+ OptimizationMissed,
+ OptimizationAnalysis,
+ OptimizationAnalysisFPCommute,
+ OptimizationAnalysisAliasing,
+ OptimizationFailure,
+ OptimizationRemarkOther,
+}
+
+impl OptimizationDiagnosticKind {
+ pub fn describe(self) -> &'static str {
+ match self {
+ OptimizationRemark | OptimizationRemarkOther => "remark",
+ OptimizationMissed => "missed",
+ OptimizationAnalysis => "analysis",
+ OptimizationAnalysisFPCommute => "floating-point",
+ OptimizationAnalysisAliasing => "aliasing",
+ OptimizationFailure => "failure",
+ }
+ }
+}
+
+pub struct OptimizationDiagnostic<'ll> {
+ pub kind: OptimizationDiagnosticKind,
+ pub pass_name: String,
+ pub function: &'ll Value,
+ pub line: c_uint,
+ pub column: c_uint,
+ pub filename: String,
+ pub message: String,
+}
+
+impl OptimizationDiagnostic<'ll> {
+ unsafe fn unpack(kind: OptimizationDiagnosticKind, di: &'ll DiagnosticInfo) -> Self {
+ let mut function = None;
+ let mut line = 0;
+ let mut column = 0;
+
+ let mut message = None;
+ let mut filename = None;
+ let pass_name = super::build_string(|pass_name| {
+ message = super::build_string(|message| {
+ filename = super::build_string(|filename| {
+ super::LLVMRustUnpackOptimizationDiagnostic(
+ di,
+ pass_name,
+ &mut function,
+ &mut line,
+ &mut column,
+ filename,
+ message,
+ )
+ })
+ .ok()
+ })
+ .ok()
+ })
+ .ok();
+
+ let mut filename = filename.unwrap_or_default();
+ if filename.is_empty() {
+ filename.push_str("<unknown file>");
+ }
+
+ OptimizationDiagnostic {
+ kind,
+ pass_name: pass_name.expect("got a non-UTF8 pass name from LLVM"),
+ function: function.unwrap(),
+ line,
+ column,
+ filename,
+ message: message.expect("got a non-UTF8 OptimizationDiagnostic message from LLVM"),
+ }
+ }
+}
+
+#[derive(Copy, Clone)]
+pub struct InlineAsmDiagnostic<'ll> {
+ pub level: super::DiagnosticLevel,
+ pub cookie: c_uint,
+ pub message: &'ll Twine,
+ pub instruction: Option<&'ll Value>,
+}
+
+impl InlineAsmDiagnostic<'ll> {
+ unsafe fn unpack(di: &'ll DiagnosticInfo) -> Self {
+ let mut cookie = 0;
+ let mut message = None;
+ let mut instruction = None;
+ let mut level = super::DiagnosticLevel::Error;
+
+ super::LLVMRustUnpackInlineAsmDiagnostic(
+ di,
+ &mut level,
+ &mut cookie,
+ &mut message,
+ &mut instruction,
+ );
+
+ InlineAsmDiagnostic { level, cookie, message: message.unwrap(), instruction }
+ }
+}
+
+pub enum Diagnostic<'ll> {
+ Optimization(OptimizationDiagnostic<'ll>),
+ InlineAsm(InlineAsmDiagnostic<'ll>),
+ PGO(&'ll DiagnosticInfo),
+ Linker(&'ll DiagnosticInfo),
+ Unsupported(&'ll DiagnosticInfo),
+
+ /// LLVM has other types that we do not wrap here.
+ UnknownDiagnostic(&'ll DiagnosticInfo),
+}
+
+impl Diagnostic<'ll> {
+ pub unsafe fn unpack(di: &'ll DiagnosticInfo) -> Self {
+ use super::DiagnosticKind as Dk;
+ let kind = super::LLVMRustGetDiagInfoKind(di);
+
+ match kind {
+ Dk::InlineAsm => InlineAsm(InlineAsmDiagnostic::unpack(di)),
+
+ Dk::OptimizationRemark => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationRemark, di))
+ }
+ Dk::OptimizationRemarkOther => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationRemarkOther, di))
+ }
+ Dk::OptimizationRemarkMissed => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationMissed, di))
+ }
+
+ Dk::OptimizationRemarkAnalysis => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysis, di))
+ }
+
+ Dk::OptimizationRemarkAnalysisFPCommute => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisFPCommute, di))
+ }
+
+ Dk::OptimizationRemarkAnalysisAliasing => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisAliasing, di))
+ }
+
+ Dk::OptimizationFailure => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationFailure, di))
+ }
+
+ Dk::PGOProfile => PGO(di),
+ Dk::Linker => Linker(di),
+ Dk::Unsupported => Unsupported(di),
+
+ _ => UnknownDiagnostic(di),
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
new file mode 100644
index 0000000..af3f3e7
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -0,0 +1,2365 @@
+#![allow(non_camel_case_types)]
+#![allow(non_upper_case_globals)]
+
+use rustc_codegen_ssa::coverageinfo::map as coverage_map;
+
+use super::debuginfo::{
+ DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator,
+ DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DINameSpace, DISPFlags, DIScope,
+ DISubprogram, DISubrange, DITemplateTypeParameter, DIType, DIVariable, DebugEmissionKind,
+};
+
+use libc::{c_char, c_int, c_uint, size_t};
+use libc::{c_ulonglong, c_void};
+
+use std::marker::PhantomData;
+
+use super::RustString;
+
+pub type Bool = c_uint;
+
+pub const True: Bool = 1 as Bool;
+pub const False: Bool = 0 as Bool;
+
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum LLVMRustResult {
+ Success,
+ Failure,
+}
+// Consts for the LLVM CallConv type, pre-cast to usize.
+
+/// LLVM CallingConv::ID. Should we wrap this?
+#[derive(Copy, Clone, PartialEq, Debug)]
+#[repr(C)]
+pub enum CallConv {
+ CCallConv = 0,
+ FastCallConv = 8,
+ ColdCallConv = 9,
+ X86StdcallCallConv = 64,
+ X86FastcallCallConv = 65,
+ ArmAapcsCallConv = 67,
+ Msp430Intr = 69,
+ X86_ThisCall = 70,
+ PtxKernel = 71,
+ X86_64_SysV = 78,
+ X86_64_Win64 = 79,
+ X86_VectorCall = 80,
+ X86_Intr = 83,
+ AvrNonBlockingInterrupt = 84,
+ AvrInterrupt = 85,
+ AmdGpuKernel = 91,
+}
+
+/// LLVMRustLinkage
+#[derive(PartialEq)]
+#[repr(C)]
+pub enum Linkage {
+ ExternalLinkage = 0,
+ AvailableExternallyLinkage = 1,
+ LinkOnceAnyLinkage = 2,
+ LinkOnceODRLinkage = 3,
+ WeakAnyLinkage = 4,
+ WeakODRLinkage = 5,
+ AppendingLinkage = 6,
+ InternalLinkage = 7,
+ PrivateLinkage = 8,
+ ExternalWeakLinkage = 9,
+ CommonLinkage = 10,
+}
+
+// LLVMRustVisibility
+#[repr(C)]
+pub enum Visibility {
+ Default = 0,
+ Hidden = 1,
+ Protected = 2,
+}
+
+/// LLVMUnnamedAddr
+#[repr(C)]
+pub enum UnnamedAddr {
+ No,
+ Local,
+ Global,
+}
+
+/// LLVMDLLStorageClass
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum DLLStorageClass {
+ #[allow(dead_code)]
+ Default = 0,
+ DllImport = 1, // Function to be imported from DLL.
+ #[allow(dead_code)]
+ DllExport = 2, // Function to be accessible from DLL.
+}
+
+/// Matches LLVMRustAttribute in LLVMWrapper.h
+/// Semantically a subset of the C++ enum llvm::Attribute::AttrKind,
+/// though it is not ABI compatible (since it's a C++ enum)
+#[repr(C)]
+#[derive(Copy, Clone, Debug)]
+pub enum Attribute {
+ AlwaysInline = 0,
+ ByVal = 1,
+ Cold = 2,
+ InlineHint = 3,
+ MinSize = 4,
+ Naked = 5,
+ NoAlias = 6,
+ NoCapture = 7,
+ NoInline = 8,
+ NonNull = 9,
+ NoRedZone = 10,
+ NoReturn = 11,
+ NoUnwind = 12,
+ OptimizeForSize = 13,
+ ReadOnly = 14,
+ SExt = 15,
+ StructRet = 16,
+ UWTable = 17,
+ ZExt = 18,
+ InReg = 19,
+ SanitizeThread = 20,
+ SanitizeAddress = 21,
+ SanitizeMemory = 22,
+ NonLazyBind = 23,
+ OptimizeNone = 24,
+ ReturnsTwice = 25,
+ ReadNone = 26,
+ InaccessibleMemOnly = 27,
+}
+
+/// LLVMIntPredicate
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum IntPredicate {
+ IntEQ = 32,
+ IntNE = 33,
+ IntUGT = 34,
+ IntUGE = 35,
+ IntULT = 36,
+ IntULE = 37,
+ IntSGT = 38,
+ IntSGE = 39,
+ IntSLT = 40,
+ IntSLE = 41,
+}
+
+impl IntPredicate {
+ pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self {
+ match intpre {
+ rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ,
+ rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE,
+ rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT,
+ rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE,
+ rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT,
+ rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE,
+ rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT,
+ rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE,
+ rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT,
+ rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE,
+ }
+ }
+}
+
+/// LLVMRealPredicate
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum RealPredicate {
+ RealPredicateFalse = 0,
+ RealOEQ = 1,
+ RealOGT = 2,
+ RealOGE = 3,
+ RealOLT = 4,
+ RealOLE = 5,
+ RealONE = 6,
+ RealORD = 7,
+ RealUNO = 8,
+ RealUEQ = 9,
+ RealUGT = 10,
+ RealUGE = 11,
+ RealULT = 12,
+ RealULE = 13,
+ RealUNE = 14,
+ RealPredicateTrue = 15,
+}
+
+impl RealPredicate {
+ pub fn from_generic(realpred: rustc_codegen_ssa::common::RealPredicate) -> Self {
+ match realpred {
+ rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse => {
+ RealPredicate::RealPredicateFalse
+ }
+ rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ,
+ rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT,
+ rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE,
+ rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT,
+ rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE,
+ rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE,
+ rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD,
+ rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO,
+ rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ,
+ rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT,
+ rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE,
+ rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT,
+ rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE,
+ rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE,
+ rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue => {
+ RealPredicate::RealPredicateTrue
+ }
+ }
+ }
+}
+
+/// LLVMTypeKind
+#[derive(Copy, Clone, PartialEq, Debug)]
+#[repr(C)]
+pub enum TypeKind {
+ Void = 0,
+ Half = 1,
+ Float = 2,
+ Double = 3,
+ X86_FP80 = 4,
+ FP128 = 5,
+ PPC_FP128 = 6,
+ Label = 7,
+ Integer = 8,
+ Function = 9,
+ Struct = 10,
+ Array = 11,
+ Pointer = 12,
+ Vector = 13,
+ Metadata = 14,
+ X86_MMX = 15,
+ Token = 16,
+ ScalableVector = 17,
+ BFloat = 18,
+}
+
+impl TypeKind {
+ pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind {
+ match self {
+ TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void,
+ TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half,
+ TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float,
+ TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double,
+ TypeKind::X86_FP80 => rustc_codegen_ssa::common::TypeKind::X86_FP80,
+ TypeKind::FP128 => rustc_codegen_ssa::common::TypeKind::FP128,
+ TypeKind::PPC_FP128 => rustc_codegen_ssa::common::TypeKind::PPC_FP128,
+ TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label,
+ TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer,
+ TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function,
+ TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct,
+ TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array,
+ TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer,
+ TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector,
+ TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata,
+ TypeKind::X86_MMX => rustc_codegen_ssa::common::TypeKind::X86_MMX,
+ TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token,
+ TypeKind::ScalableVector => rustc_codegen_ssa::common::TypeKind::ScalableVector,
+ TypeKind::BFloat => rustc_codegen_ssa::common::TypeKind::BFloat,
+ }
+ }
+}
+
+/// LLVMAtomicRmwBinOp
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum AtomicRmwBinOp {
+ AtomicXchg = 0,
+ AtomicAdd = 1,
+ AtomicSub = 2,
+ AtomicAnd = 3,
+ AtomicNand = 4,
+ AtomicOr = 5,
+ AtomicXor = 6,
+ AtomicMax = 7,
+ AtomicMin = 8,
+ AtomicUMax = 9,
+ AtomicUMin = 10,
+}
+
+impl AtomicRmwBinOp {
+ pub fn from_generic(op: rustc_codegen_ssa::common::AtomicRmwBinOp) -> Self {
+ match op {
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin,
+ }
+ }
+}
+
+/// LLVMAtomicOrdering
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum AtomicOrdering {
+ #[allow(dead_code)]
+ NotAtomic = 0,
+ Unordered = 1,
+ Monotonic = 2,
+ // Consume = 3, // Not specified yet.
+ Acquire = 4,
+ Release = 5,
+ AcquireRelease = 6,
+ SequentiallyConsistent = 7,
+}
+
+impl AtomicOrdering {
+ pub fn from_generic(ao: rustc_codegen_ssa::common::AtomicOrdering) -> Self {
+ match ao {
+ rustc_codegen_ssa::common::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
+ rustc_codegen_ssa::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered,
+ rustc_codegen_ssa::common::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic,
+ rustc_codegen_ssa::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire,
+ rustc_codegen_ssa::common::AtomicOrdering::Release => AtomicOrdering::Release,
+ rustc_codegen_ssa::common::AtomicOrdering::AcquireRelease => {
+ AtomicOrdering::AcquireRelease
+ }
+ rustc_codegen_ssa::common::AtomicOrdering::SequentiallyConsistent => {
+ AtomicOrdering::SequentiallyConsistent
+ }
+ }
+ }
+}
+
+/// LLVMRustSynchronizationScope
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum SynchronizationScope {
+ SingleThread,
+ CrossThread,
+}
+
+impl SynchronizationScope {
+ pub fn from_generic(sc: rustc_codegen_ssa::common::SynchronizationScope) -> Self {
+ match sc {
+ rustc_codegen_ssa::common::SynchronizationScope::SingleThread => {
+ SynchronizationScope::SingleThread
+ }
+ rustc_codegen_ssa::common::SynchronizationScope::CrossThread => {
+ SynchronizationScope::CrossThread
+ }
+ }
+ }
+}
+
+/// LLVMRustFileType
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum FileType {
+ AssemblyFile,
+ ObjectFile,
+}
+
+/// LLVMMetadataType
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum MetadataType {
+ MD_dbg = 0,
+ MD_tbaa = 1,
+ MD_prof = 2,
+ MD_fpmath = 3,
+ MD_range = 4,
+ MD_tbaa_struct = 5,
+ MD_invariant_load = 6,
+ MD_alias_scope = 7,
+ MD_noalias = 8,
+ MD_nontemporal = 9,
+ MD_mem_parallel_loop_access = 10,
+ MD_nonnull = 11,
+}
+
+/// LLVMRustAsmDialect
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum AsmDialect {
+ Att,
+ Intel,
+}
+
+impl AsmDialect {
+ pub fn from_generic(asm: rustc_ast::LlvmAsmDialect) -> Self {
+ match asm {
+ rustc_ast::LlvmAsmDialect::Att => AsmDialect::Att,
+ rustc_ast::LlvmAsmDialect::Intel => AsmDialect::Intel,
+ }
+ }
+}
+
+/// LLVMRustCodeGenOptLevel
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum CodeGenOptLevel {
+ None,
+ Less,
+ Default,
+ Aggressive,
+}
+
+/// LLVMRustPassBuilderOptLevel
+#[repr(C)]
+pub enum PassBuilderOptLevel {
+ O0,
+ O1,
+ O2,
+ O3,
+ Os,
+ Oz,
+}
+
+/// LLVMRustOptStage
+#[derive(PartialEq)]
+#[repr(C)]
+pub enum OptStage {
+ PreLinkNoLTO,
+ PreLinkThinLTO,
+ PreLinkFatLTO,
+ ThinLTO,
+ FatLTO,
+}
+
+/// LLVMRustSanitizerOptions
+#[repr(C)]
+pub struct SanitizerOptions {
+ pub sanitize_address: bool,
+ pub sanitize_address_recover: bool,
+ pub sanitize_memory: bool,
+ pub sanitize_memory_recover: bool,
+ pub sanitize_memory_track_origins: c_int,
+ pub sanitize_thread: bool,
+}
+
+/// LLVMRelocMode
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum RelocModel {
+ Static,
+ PIC,
+ DynamicNoPic,
+ ROPI,
+ RWPI,
+ ROPI_RWPI,
+}
+
+/// LLVMRustCodeModel
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum CodeModel {
+ Tiny,
+ Small,
+ Kernel,
+ Medium,
+ Large,
+ None,
+}
+
+/// LLVMRustDiagnosticKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum DiagnosticKind {
+ Other,
+ InlineAsm,
+ StackSize,
+ DebugMetadataVersion,
+ SampleProfile,
+ OptimizationRemark,
+ OptimizationRemarkMissed,
+ OptimizationRemarkAnalysis,
+ OptimizationRemarkAnalysisFPCommute,
+ OptimizationRemarkAnalysisAliasing,
+ OptimizationRemarkOther,
+ OptimizationFailure,
+ PGOProfile,
+ Linker,
+ Unsupported,
+}
+
+/// LLVMRustDiagnosticLevel
+#[derive(Copy, Clone)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum DiagnosticLevel {
+ Error,
+ Warning,
+ Note,
+ Remark,
+}
+
+/// LLVMRustArchiveKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum ArchiveKind {
+ K_GNU,
+ K_BSD,
+ K_DARWIN,
+ K_COFF,
+}
+
+/// LLVMRustPassKind
+#[derive(Copy, Clone, PartialEq, Debug)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum PassKind {
+ Other,
+ Function,
+ Module,
+}
+
+/// LLVMRustThinLTOData
+extern "C" {
+ pub type ThinLTOData;
+}
+
+/// LLVMRustThinLTOBuffer
+extern "C" {
+ pub type ThinLTOBuffer;
+}
+
+// LLVMRustModuleNameCallback
+pub type ThinLTOModuleNameCallback =
+ unsafe extern "C" fn(*mut c_void, *const c_char, *const c_char);
+
+/// LLVMRustThinLTOModule
+#[repr(C)]
+pub struct ThinLTOModule {
+ pub identifier: *const c_char,
+ pub data: *const u8,
+ pub len: usize,
+}
+
+/// LLVMThreadLocalMode
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum ThreadLocalMode {
+ NotThreadLocal,
+ GeneralDynamic,
+ LocalDynamic,
+ InitialExec,
+ LocalExec,
+}
+
+/// LLVMRustChecksumKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum ChecksumKind {
+ None,
+ MD5,
+ SHA1,
+}
+
+extern "C" {
+ type Opaque;
+}
+#[repr(C)]
+struct InvariantOpaque<'a> {
+ _marker: PhantomData<&'a mut &'a ()>,
+ _opaque: Opaque,
+}
+
+// Opaque pointer types
+extern "C" {
+ pub type Module;
+}
+extern "C" {
+ pub type Context;
+}
+extern "C" {
+ pub type Type;
+}
+extern "C" {
+ pub type Value;
+}
+extern "C" {
+ pub type ConstantInt;
+}
+extern "C" {
+ pub type Metadata;
+}
+extern "C" {
+ pub type BasicBlock;
+}
+#[repr(C)]
+pub struct Builder<'a>(InvariantOpaque<'a>);
+extern "C" {
+ pub type MemoryBuffer;
+}
+#[repr(C)]
+pub struct PassManager<'a>(InvariantOpaque<'a>);
+extern "C" {
+ pub type PassManagerBuilder;
+}
+extern "C" {
+ pub type ObjectFile;
+}
+#[repr(C)]
+pub struct SectionIterator<'a>(InvariantOpaque<'a>);
+extern "C" {
+ pub type Pass;
+}
+extern "C" {
+ pub type TargetMachine;
+}
+extern "C" {
+ pub type Archive;
+}
+#[repr(C)]
+pub struct ArchiveIterator<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct ArchiveChild<'a>(InvariantOpaque<'a>);
+extern "C" {
+ pub type Twine;
+}
+extern "C" {
+ pub type DiagnosticInfo;
+}
+extern "C" {
+ pub type SMDiagnostic;
+}
+#[repr(C)]
+pub struct RustArchiveMember<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct OperandBundleDef<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct Linker<'a>(InvariantOpaque<'a>);
+
+pub type DiagnosticHandler = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void);
+pub type InlineAsmDiagHandler = unsafe extern "C" fn(&SMDiagnostic, *const c_void, c_uint);
+
+pub mod coverageinfo {
+ use super::coverage_map;
+
+ /// Aligns with [llvm::coverage::CounterMappingRegion::RegionKind](https://github.com/rust-lang/llvm-project/blob/rustc/10.0-2020-05-05/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L205-L221)
+ #[derive(Copy, Clone, Debug)]
+ #[repr(C)]
+ pub enum RegionKind {
+ /// A CodeRegion associates some code with a counter
+ CodeRegion = 0,
+
+ /// An ExpansionRegion represents a file expansion region that associates
+ /// a source range with the expansion of a virtual source file, such as
+ /// for a macro instantiation or #include file.
+ ExpansionRegion = 1,
+
+ /// A SkippedRegion represents a source range with code that was skipped
+ /// by a preprocessor or similar means.
+ SkippedRegion = 2,
+
+ /// A GapRegion is like a CodeRegion, but its count is only set as the
+ /// line execution count when its the only region in the line.
+ GapRegion = 3,
+ }
+
+ /// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the
+ /// coverage map, in accordance with the
+ /// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/llvmorg-8.0.0/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
+ /// The struct composes fields representing the `Counter` type and value(s) (injected counter
+ /// ID, or expression type and operands), the source file (an indirect index into a "filenames
+ /// array", encoded separately), and source location (start and end positions of the represented
+ /// code region).
+ ///
+ /// Aligns with [llvm::coverage::CounterMappingRegion](https://github.com/rust-lang/llvm-project/blob/rustc/10.0-2020-05-05/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L223-L226)
+ /// Important: The Rust struct layout (order and types of fields) must match its C++
+ /// counterpart.
+ #[derive(Copy, Clone, Debug)]
+ #[repr(C)]
+ pub struct CounterMappingRegion {
+ /// The counter type and type-dependent counter data, if any.
+ counter: coverage_map::Counter,
+
+ /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the
+ /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes
+ /// that, in turn, are used to look up the filename for this region.
+ file_id: u32,
+
+ /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find
+ /// the mapping regions created as a result of macro expansion, by checking if their file id
+ /// matches the expanded file id.
+ expanded_file_id: u32,
+
+ /// 1-based starting line of the mapping region.
+ start_line: u32,
+
+ /// 1-based starting column of the mapping region.
+ start_col: u32,
+
+ /// 1-based ending line of the mapping region.
+ end_line: u32,
+
+ /// 1-based ending column of the mapping region. If the high bit is set, the current
+ /// mapping region is a gap area.
+ end_col: u32,
+
+ kind: RegionKind,
+ }
+
+ impl CounterMappingRegion {
+ pub fn code_region(
+ counter: coverage_map::Counter,
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter,
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::CodeRegion,
+ }
+ }
+
+ pub fn expansion_region(
+ file_id: u32,
+ expanded_file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter: coverage_map::Counter::zero(),
+ file_id,
+ expanded_file_id,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::ExpansionRegion,
+ }
+ }
+
+ pub fn skipped_region(
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter: coverage_map::Counter::zero(),
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::SkippedRegion,
+ }
+ }
+
+ pub fn gap_region(
+ counter: coverage_map::Counter,
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter,
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col: ((1 as u32) << 31) | end_col,
+ kind: RegionKind::GapRegion,
+ }
+ }
+ }
+}
+
+pub mod debuginfo {
+ use super::{InvariantOpaque, Metadata};
+ use bitflags::bitflags;
+
+ #[repr(C)]
+ pub struct DIBuilder<'a>(InvariantOpaque<'a>);
+
+ pub type DIDescriptor = Metadata;
+ pub type DIScope = DIDescriptor;
+ pub type DIFile = DIScope;
+ pub type DILexicalBlock = DIScope;
+ pub type DISubprogram = DIScope;
+ pub type DINameSpace = DIScope;
+ pub type DIType = DIDescriptor;
+ pub type DIBasicType = DIType;
+ pub type DIDerivedType = DIType;
+ pub type DICompositeType = DIDerivedType;
+ pub type DIVariable = DIDescriptor;
+ pub type DIGlobalVariableExpression = DIDescriptor;
+ pub type DIArray = DIDescriptor;
+ pub type DISubrange = DIDescriptor;
+ pub type DIEnumerator = DIDescriptor;
+ pub type DITemplateTypeParameter = DIDescriptor;
+
+ // These values **must** match with LLVMRustDIFlags!!
+ bitflags! {
+ #[repr(transparent)]
+ #[derive(Default)]
+ pub struct DIFlags: u32 {
+ const FlagZero = 0;
+ const FlagPrivate = 1;
+ const FlagProtected = 2;
+ const FlagPublic = 3;
+ const FlagFwdDecl = (1 << 2);
+ const FlagAppleBlock = (1 << 3);
+ const FlagBlockByrefStruct = (1 << 4);
+ const FlagVirtual = (1 << 5);
+ const FlagArtificial = (1 << 6);
+ const FlagExplicit = (1 << 7);
+ const FlagPrototyped = (1 << 8);
+ const FlagObjcClassComplete = (1 << 9);
+ const FlagObjectPointer = (1 << 10);
+ const FlagVector = (1 << 11);
+ const FlagStaticMember = (1 << 12);
+ const FlagLValueReference = (1 << 13);
+ const FlagRValueReference = (1 << 14);
+ const FlagExternalTypeRef = (1 << 15);
+ const FlagIntroducedVirtual = (1 << 18);
+ const FlagBitField = (1 << 19);
+ const FlagNoReturn = (1 << 20);
+ }
+ }
+
+ // These values **must** match with LLVMRustDISPFlags!!
+ bitflags! {
+ #[repr(transparent)]
+ #[derive(Default)]
+ pub struct DISPFlags: u32 {
+ const SPFlagZero = 0;
+ const SPFlagVirtual = 1;
+ const SPFlagPureVirtual = 2;
+ const SPFlagLocalToUnit = (1 << 2);
+ const SPFlagDefinition = (1 << 3);
+ const SPFlagOptimized = (1 << 4);
+ const SPFlagMainSubprogram = (1 << 5);
+ }
+ }
+
+ /// LLVMRustDebugEmissionKind
+ #[derive(Copy, Clone)]
+ #[repr(C)]
+ pub enum DebugEmissionKind {
+ NoDebug,
+ FullDebug,
+ LineTablesOnly,
+ }
+
+ impl DebugEmissionKind {
+ pub fn from_generic(kind: rustc_session::config::DebugInfo) -> Self {
+ use rustc_session::config::DebugInfo;
+ match kind {
+ DebugInfo::None => DebugEmissionKind::NoDebug,
+ DebugInfo::Limited => DebugEmissionKind::LineTablesOnly,
+ DebugInfo::Full => DebugEmissionKind::FullDebug,
+ }
+ }
+ }
+}
+
+extern "C" {
+ pub type ModuleBuffer;
+}
+
+pub type SelfProfileBeforePassCallback =
+ unsafe extern "C" fn(*mut c_void, *const c_char, *const c_char);
+pub type SelfProfileAfterPassCallback = unsafe extern "C" fn(*mut c_void);
+
+extern "C" {
+ pub fn LLVMRustInstallFatalErrorHandler();
+
+ // Create and destroy contexts.
+ pub fn LLVMRustContextCreate(shouldDiscardNames: bool) -> &'static mut Context;
+ pub fn LLVMContextDispose(C: &'static mut Context);
+ pub fn LLVMGetMDKindIDInContext(C: &Context, Name: *const c_char, SLen: c_uint) -> c_uint;
+
+ // Create modules.
+ pub fn LLVMModuleCreateWithNameInContext(ModuleID: *const c_char, C: &Context) -> &Module;
+ pub fn LLVMGetModuleContext(M: &Module) -> &Context;
+ pub fn LLVMCloneModule(M: &Module) -> &Module;
+
+ /// Data layout. See Module::getDataLayout.
+ pub fn LLVMGetDataLayoutStr(M: &Module) -> *const c_char;
+ pub fn LLVMSetDataLayout(M: &Module, Triple: *const c_char);
+
+ /// See Module::setModuleInlineAsm.
+ pub fn LLVMSetModuleInlineAsm2(M: &Module, Asm: *const c_char, AsmLen: size_t);
+ pub fn LLVMRustAppendModuleInlineAsm(M: &Module, Asm: *const c_char, AsmLen: size_t);
+
+ /// See llvm::LLVMTypeKind::getTypeID.
+ pub fn LLVMRustGetTypeKind(Ty: &Type) -> TypeKind;
+
+ // Operations on integer types
+ pub fn LLVMInt1TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMInt8TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMInt16TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMInt32TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMInt64TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMIntTypeInContext(C: &Context, NumBits: c_uint) -> &Type;
+
+ pub fn LLVMGetIntTypeWidth(IntegerTy: &Type) -> c_uint;
+
+ // Operations on real types
+ pub fn LLVMFloatTypeInContext(C: &Context) -> &Type;
+ pub fn LLVMDoubleTypeInContext(C: &Context) -> &Type;
+
+ // Operations on function types
+ pub fn LLVMFunctionType(
+ ReturnType: &'a Type,
+ ParamTypes: *const &'a Type,
+ ParamCount: c_uint,
+ IsVarArg: Bool,
+ ) -> &'a Type;
+ pub fn LLVMCountParamTypes(FunctionTy: &Type) -> c_uint;
+ pub fn LLVMGetParamTypes(FunctionTy: &'a Type, Dest: *mut &'a Type);
+
+ // Operations on struct types
+ pub fn LLVMStructTypeInContext(
+ C: &'a Context,
+ ElementTypes: *const &'a Type,
+ ElementCount: c_uint,
+ Packed: Bool,
+ ) -> &'a Type;
+
+ // Operations on array, pointer, and vector types (sequence types)
+ pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type;
+ pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type;
+ pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type;
+
+ pub fn LLVMGetElementType(Ty: &Type) -> &Type;
+ pub fn LLVMGetVectorSize(VectorTy: &Type) -> c_uint;
+
+ // Operations on other types
+ pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
+ pub fn LLVMRustMetadataTypeInContext(C: &Context) -> &Type;
+
+ // Operations on all values
+ pub fn LLVMTypeOf(Val: &Value) -> &Type;
+ pub fn LLVMGetValueName2(Val: &Value, Length: *mut size_t) -> *const c_char;
+ pub fn LLVMSetValueName2(Val: &Value, Name: *const c_char, NameLen: size_t);
+ pub fn LLVMReplaceAllUsesWith(OldVal: &'a Value, NewVal: &'a Value);
+ pub fn LLVMSetMetadata(Val: &'a Value, KindID: c_uint, Node: &'a Value);
+
+ // Operations on constants of any type
+ pub fn LLVMConstNull(Ty: &Type) -> &Value;
+ pub fn LLVMGetUndef(Ty: &Type) -> &Value;
+
+ // Operations on metadata
+ pub fn LLVMMDStringInContext(C: &Context, Str: *const c_char, SLen: c_uint) -> &Value;
+ pub fn LLVMMDNodeInContext(C: &'a Context, Vals: *const &'a Value, Count: c_uint) -> &'a Value;
+ pub fn LLVMAddNamedMetadataOperand(M: &'a Module, Name: *const c_char, Val: &'a Value);
+
+ // Operations on scalar constants
+ pub fn LLVMConstInt(IntTy: &Type, N: c_ulonglong, SignExtend: Bool) -> &Value;
+ pub fn LLVMConstIntOfArbitraryPrecision(IntTy: &Type, Wn: c_uint, Ws: *const u64) -> &Value;
+ pub fn LLVMConstReal(RealTy: &Type, N: f64) -> &Value;
+ pub fn LLVMConstIntGetZExtValue(ConstantVal: &ConstantInt) -> c_ulonglong;
+ pub fn LLVMRustConstInt128Get(
+ ConstantVal: &ConstantInt,
+ SExt: bool,
+ high: &mut u64,
+ low: &mut u64,
+ ) -> bool;
+
+ // Operations on composite constants
+ pub fn LLVMConstStringInContext(
+ C: &Context,
+ Str: *const c_char,
+ Length: c_uint,
+ DontNullTerminate: Bool,
+ ) -> &Value;
+ pub fn LLVMConstStructInContext(
+ C: &'a Context,
+ ConstantVals: *const &'a Value,
+ Count: c_uint,
+ Packed: Bool,
+ ) -> &'a Value;
+
+ pub fn LLVMConstArray(
+ ElementTy: &'a Type,
+ ConstantVals: *const &'a Value,
+ Length: c_uint,
+ ) -> &'a Value;
+ pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value;
+
+ // Constant expressions
+ pub fn LLVMConstInBoundsGEP(
+ ConstantVal: &'a Value,
+ ConstantIndices: *const &'a Value,
+ NumIndices: c_uint,
+ ) -> &'a Value;
+ pub fn LLVMConstZExt(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstPtrToInt(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstIntToPtr(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstBitCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstPointerCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstExtractValue(
+ AggConstant: &Value,
+ IdxList: *const c_uint,
+ NumIdx: c_uint,
+ ) -> &Value;
+
+ // Operations on global variables, functions, and aliases (globals)
+ pub fn LLVMIsDeclaration(Global: &Value) -> Bool;
+ pub fn LLVMRustGetLinkage(Global: &Value) -> Linkage;
+ pub fn LLVMRustSetLinkage(Global: &Value, RustLinkage: Linkage);
+ pub fn LLVMSetSection(Global: &Value, Section: *const c_char);
+ pub fn LLVMRustGetVisibility(Global: &Value) -> Visibility;
+ pub fn LLVMRustSetVisibility(Global: &Value, Viz: Visibility);
+ pub fn LLVMGetAlignment(Global: &Value) -> c_uint;
+ pub fn LLVMSetAlignment(Global: &Value, Bytes: c_uint);
+ pub fn LLVMSetDLLStorageClass(V: &Value, C: DLLStorageClass);
+
+ // Operations on global variables
+ pub fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>;
+ pub fn LLVMAddGlobal(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value;
+ pub fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>;
+ pub fn LLVMRustGetOrInsertGlobal(
+ M: &'a Module,
+ Name: *const c_char,
+ NameLen: size_t,
+ T: &'a Type,
+ ) -> &'a Value;
+ pub fn LLVMRustInsertPrivateGlobal(M: &'a Module, T: &'a Type) -> &'a Value;
+ pub fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>;
+ pub fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>;
+ pub fn LLVMDeleteGlobal(GlobalVar: &Value);
+ pub fn LLVMGetInitializer(GlobalVar: &Value) -> Option<&Value>;
+ pub fn LLVMSetInitializer(GlobalVar: &'a Value, ConstantVal: &'a Value);
+ pub fn LLVMSetThreadLocal(GlobalVar: &Value, IsThreadLocal: Bool);
+ pub fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode);
+ pub fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool;
+ pub fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool);
+ pub fn LLVMRustGetNamedValue(
+ M: &Module,
+ Name: *const c_char,
+ NameLen: size_t,
+ ) -> Option<&Value>;
+ pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool);
+
+ // Operations on functions
+ pub fn LLVMRustGetOrInsertFunction(
+ M: &'a Module,
+ Name: *const c_char,
+ NameLen: size_t,
+ FunctionTy: &'a Type,
+ ) -> &'a Value;
+ pub fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint);
+ pub fn LLVMRustAddAlignmentAttr(Fn: &Value, index: c_uint, bytes: u32);
+ pub fn LLVMRustAddDereferenceableAttr(Fn: &Value, index: c_uint, bytes: u64);
+ pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: &Value, index: c_uint, bytes: u64);
+ pub fn LLVMRustAddByValAttr(Fn: &Value, index: c_uint, ty: &Type);
+ pub fn LLVMRustAddFunctionAttribute(Fn: &Value, index: c_uint, attr: Attribute);
+ pub fn LLVMRustAddFunctionAttrStringValue(
+ Fn: &Value,
+ index: c_uint,
+ Name: *const c_char,
+ Value: *const c_char,
+ );
+ pub fn LLVMRustRemoveFunctionAttributes(Fn: &Value, index: c_uint, attr: Attribute);
+
+ // Operations on parameters
+ pub fn LLVMIsAArgument(Val: &Value) -> Option<&Value>;
+ pub fn LLVMCountParams(Fn: &Value) -> c_uint;
+ pub fn LLVMGetParam(Fn: &Value, Index: c_uint) -> &Value;
+
+ // Operations on basic blocks
+ pub fn LLVMGetBasicBlockParent(BB: &BasicBlock) -> &Value;
+ pub fn LLVMAppendBasicBlockInContext(
+ C: &'a Context,
+ Fn: &'a Value,
+ Name: *const c_char,
+ ) -> &'a BasicBlock;
+ pub fn LLVMDeleteBasicBlock(BB: &BasicBlock);
+
+ // Operations on instructions
+ pub fn LLVMIsAInstruction(Val: &Value) -> Option<&Value>;
+ pub fn LLVMGetFirstBasicBlock(Fn: &Value) -> &BasicBlock;
+
+ // Operations on call sites
+ pub fn LLVMSetInstructionCallConv(Instr: &Value, CC: c_uint);
+ pub fn LLVMRustAddCallSiteAttribute(Instr: &Value, index: c_uint, attr: Attribute);
+ pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: &Value, index: c_uint, bytes: u32);
+ pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: &Value, index: c_uint, bytes: u64);
+ pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: &Value, index: c_uint, bytes: u64);
+ pub fn LLVMRustAddByValCallSiteAttr(Instr: &Value, index: c_uint, ty: &Type);
+
+ // Operations on load/store instructions (only)
+ pub fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool);
+
+ // Operations on phi nodes
+ pub fn LLVMAddIncoming(
+ PhiNode: &'a Value,
+ IncomingValues: *const &'a Value,
+ IncomingBlocks: *const &'a BasicBlock,
+ Count: c_uint,
+ );
+
+ // Instruction builders
+ pub fn LLVMCreateBuilderInContext(C: &'a Context) -> &'a mut Builder<'a>;
+ pub fn LLVMPositionBuilderAtEnd(Builder: &Builder<'a>, Block: &'a BasicBlock);
+ pub fn LLVMGetInsertBlock(Builder: &Builder<'a>) -> &'a BasicBlock;
+ pub fn LLVMDisposeBuilder(Builder: &'a mut Builder<'a>);
+
+ // Metadata
+ pub fn LLVMSetCurrentDebugLocation(Builder: &Builder<'a>, L: &'a Value);
+
+ // Terminators
+ pub fn LLVMBuildRetVoid(B: &Builder<'a>) -> &'a Value;
+ pub fn LLVMBuildRet(B: &Builder<'a>, V: &'a Value) -> &'a Value;
+ pub fn LLVMBuildBr(B: &Builder<'a>, Dest: &'a BasicBlock) -> &'a Value;
+ pub fn LLVMBuildCondBr(
+ B: &Builder<'a>,
+ If: &'a Value,
+ Then: &'a BasicBlock,
+ Else: &'a BasicBlock,
+ ) -> &'a Value;
+ pub fn LLVMBuildSwitch(
+ B: &Builder<'a>,
+ V: &'a Value,
+ Else: &'a BasicBlock,
+ NumCases: c_uint,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildInvoke(
+ B: &Builder<'a>,
+ Fn: &'a Value,
+ Args: *const &'a Value,
+ NumArgs: c_uint,
+ Then: &'a BasicBlock,
+ Catch: &'a BasicBlock,
+ Bundle: Option<&OperandBundleDef<'a>>,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildLandingPad(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ PersFn: &'a Value,
+ NumClauses: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildResume(B: &Builder<'a>, Exn: &'a Value) -> &'a Value;
+ pub fn LLVMBuildUnreachable(B: &Builder<'a>) -> &'a Value;
+
+ pub fn LLVMRustBuildCleanupPad(
+ B: &Builder<'a>,
+ ParentPad: Option<&'a Value>,
+ ArgCnt: c_uint,
+ Args: *const &'a Value,
+ Name: *const c_char,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustBuildCleanupRet(
+ B: &Builder<'a>,
+ CleanupPad: &'a Value,
+ UnwindBB: Option<&'a BasicBlock>,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustBuildCatchPad(
+ B: &Builder<'a>,
+ ParentPad: &'a Value,
+ ArgCnt: c_uint,
+ Args: *const &'a Value,
+ Name: *const c_char,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustBuildCatchRet(
+ B: &Builder<'a>,
+ Pad: &'a Value,
+ BB: &'a BasicBlock,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustBuildCatchSwitch(
+ Builder: &Builder<'a>,
+ ParentPad: Option<&'a Value>,
+ BB: Option<&'a BasicBlock>,
+ NumHandlers: c_uint,
+ Name: *const c_char,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustAddHandler(CatchSwitch: &'a Value, Handler: &'a BasicBlock);
+ pub fn LLVMSetPersonalityFn(Func: &'a Value, Pers: &'a Value);
+
+ // Add a case to the switch instruction
+ pub fn LLVMAddCase(Switch: &'a Value, OnVal: &'a Value, Dest: &'a BasicBlock);
+
+ // Add a clause to the landing pad instruction
+ pub fn LLVMAddClause(LandingPad: &'a Value, ClauseVal: &'a Value);
+
+ // Set the cleanup on a landing pad instruction
+ pub fn LLVMSetCleanup(LandingPad: &Value, Val: Bool);
+
+ // Arithmetic
+ pub fn LLVMBuildAdd(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFAdd(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSub(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFSub(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildMul(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFMul(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildUDiv(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildExactUDiv(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSDiv(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildExactSDiv(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFDiv(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildURem(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSRem(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFRem(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildShl(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildLShr(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildAShr(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNSWAdd(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNUWAdd(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNSWSub(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNUWSub(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNSWMul(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNUWMul(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildAnd(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildOr(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildXor(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNeg(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
+ pub fn LLVMBuildFNeg(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
+ pub fn LLVMBuildNot(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
+ pub fn LLVMRustSetHasUnsafeAlgebra(Instr: &Value);
+
+ // Memory
+ pub fn LLVMBuildAlloca(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value;
+ pub fn LLVMBuildArrayAlloca(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Val: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildLoad(B: &Builder<'a>, PointerVal: &'a Value, Name: *const c_char) -> &'a Value;
+
+ pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value;
+
+ pub fn LLVMBuildGEP(
+ B: &Builder<'a>,
+ Pointer: &'a Value,
+ Indices: *const &'a Value,
+ NumIndices: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildInBoundsGEP(
+ B: &Builder<'a>,
+ Pointer: &'a Value,
+ Indices: *const &'a Value,
+ NumIndices: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildStructGEP(
+ B: &Builder<'a>,
+ Pointer: &'a Value,
+ Idx: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+
+ // Casts
+ pub fn LLVMBuildTrunc(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildZExt(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSExt(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFPToUI(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFPToSI(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildUIToFP(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSIToFP(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFPTrunc(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFPExt(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildPtrToInt(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildIntToPtr(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildBitCast(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildPointerCast(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildIntCast(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ IsSized: bool,
+ ) -> &'a Value;
+
+ // Comparisons
+ pub fn LLVMBuildICmp(
+ B: &Builder<'a>,
+ Op: c_uint,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFCmp(
+ B: &Builder<'a>,
+ Op: c_uint,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+
+ // Miscellaneous instructions
+ pub fn LLVMBuildPhi(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value;
+ pub fn LLVMRustGetInstrProfIncrementIntrinsic(M: &Module) -> &'a Value;
+ pub fn LLVMRustBuildCall(
+ B: &Builder<'a>,
+ Fn: &'a Value,
+ Args: *const &'a Value,
+ NumArgs: c_uint,
+ Bundle: Option<&OperandBundleDef<'a>>,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildMemCpy(
+ B: &Builder<'a>,
+ Dst: &'a Value,
+ DstAlign: c_uint,
+ Src: &'a Value,
+ SrcAlign: c_uint,
+ Size: &'a Value,
+ IsVolatile: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildMemMove(
+ B: &Builder<'a>,
+ Dst: &'a Value,
+ DstAlign: c_uint,
+ Src: &'a Value,
+ SrcAlign: c_uint,
+ Size: &'a Value,
+ IsVolatile: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildMemSet(
+ B: &Builder<'a>,
+ Dst: &'a Value,
+ DstAlign: c_uint,
+ Val: &'a Value,
+ Size: &'a Value,
+ IsVolatile: bool,
+ ) -> &'a Value;
+ pub fn LLVMBuildSelect(
+ B: &Builder<'a>,
+ If: &'a Value,
+ Then: &'a Value,
+ Else: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildVAArg(
+ B: &Builder<'a>,
+ list: &'a Value,
+ Ty: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildExtractElement(
+ B: &Builder<'a>,
+ VecVal: &'a Value,
+ Index: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildInsertElement(
+ B: &Builder<'a>,
+ VecVal: &'a Value,
+ EltVal: &'a Value,
+ Index: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildShuffleVector(
+ B: &Builder<'a>,
+ V1: &'a Value,
+ V2: &'a Value,
+ Mask: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildExtractValue(
+ B: &Builder<'a>,
+ AggVal: &'a Value,
+ Index: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildInsertValue(
+ B: &Builder<'a>,
+ AggVal: &'a Value,
+ EltVal: &'a Value,
+ Index: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildVectorReduceFAdd(
+ B: &Builder<'a>,
+ Acc: &'a Value,
+ Src: &'a Value,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceFMul(
+ B: &Builder<'a>,
+ Acc: &'a Value,
+ Src: &'a Value,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceAdd(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceMul(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceAnd(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceOr(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceXor(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceMin(
+ B: &Builder<'a>,
+ Src: &'a Value,
+ IsSigned: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceMax(
+ B: &Builder<'a>,
+ Src: &'a Value,
+ IsSigned: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceFMin(B: &Builder<'a>, Src: &'a Value, IsNaN: bool)
+ -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceFMax(B: &Builder<'a>, Src: &'a Value, IsNaN: bool)
+ -> &'a Value;
+
+ pub fn LLVMRustBuildMinNum(B: &Builder<'a>, LHS: &'a Value, LHS: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildMaxNum(B: &Builder<'a>, LHS: &'a Value, LHS: &'a Value) -> &'a Value;
+
+ // Atomic Operations
+ pub fn LLVMRustBuildAtomicLoad(
+ B: &Builder<'a>,
+ PointerVal: &'a Value,
+ Name: *const c_char,
+ Order: AtomicOrdering,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildAtomicStore(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ Ptr: &'a Value,
+ Order: AtomicOrdering,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildAtomicCmpXchg(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ CMP: &'a Value,
+ RHS: &'a Value,
+ Order: AtomicOrdering,
+ FailureOrder: AtomicOrdering,
+ Weak: Bool,
+ ) -> &'a Value;
+
+ pub fn LLVMBuildAtomicRMW(
+ B: &Builder<'a>,
+ Op: AtomicRmwBinOp,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Order: AtomicOrdering,
+ SingleThreaded: Bool,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildAtomicFence(
+ B: &Builder<'_>,
+ Order: AtomicOrdering,
+ Scope: SynchronizationScope,
+ );
+
+ /// Writes a module to the specified path. Returns 0 on success.
+ pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int;
+
+ /// Creates a pass manager.
+ pub fn LLVMCreatePassManager() -> &'a mut PassManager<'a>;
+
+ /// Creates a function-by-function pass manager
+ pub fn LLVMCreateFunctionPassManagerForModule(M: &'a Module) -> &'a mut PassManager<'a>;
+
+ /// Disposes a pass manager.
+ pub fn LLVMDisposePassManager(PM: &'a mut PassManager<'a>);
+
+ /// Runs a pass manager on a module.
+ pub fn LLVMRunPassManager(PM: &PassManager<'a>, M: &'a Module) -> Bool;
+
+ pub fn LLVMInitializePasses();
+
+ pub fn LLVMTimeTraceProfilerInitialize();
+
+ pub fn LLVMTimeTraceProfilerFinish(FileName: *const c_char);
+
+ pub fn LLVMAddAnalysisPasses(T: &'a TargetMachine, PM: &PassManager<'a>);
+
+ pub fn LLVMPassManagerBuilderCreate() -> &'static mut PassManagerBuilder;
+ pub fn LLVMPassManagerBuilderDispose(PMB: &'static mut PassManagerBuilder);
+ pub fn LLVMPassManagerBuilderSetSizeLevel(PMB: &PassManagerBuilder, Value: Bool);
+ pub fn LLVMPassManagerBuilderSetDisableUnrollLoops(PMB: &PassManagerBuilder, Value: Bool);
+ pub fn LLVMPassManagerBuilderUseInlinerWithThreshold(
+ PMB: &PassManagerBuilder,
+ threshold: c_uint,
+ );
+ pub fn LLVMPassManagerBuilderPopulateModulePassManager(
+ PMB: &PassManagerBuilder,
+ PM: &PassManager<'_>,
+ );
+
+ pub fn LLVMPassManagerBuilderPopulateFunctionPassManager(
+ PMB: &PassManagerBuilder,
+ PM: &PassManager<'_>,
+ );
+ pub fn LLVMPassManagerBuilderPopulateLTOPassManager(
+ PMB: &PassManagerBuilder,
+ PM: &PassManager<'_>,
+ Internalize: Bool,
+ RunInliner: Bool,
+ );
+ pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
+ PMB: &PassManagerBuilder,
+ PM: &PassManager<'_>,
+ );
+
+ // Stuff that's in llvm-wrapper/ because it's not upstream yet.
+
+ /// Opens an object file.
+ pub fn LLVMCreateObjectFile(
+ MemBuf: &'static mut MemoryBuffer,
+ ) -> Option<&'static mut ObjectFile>;
+ /// Closes an object file.
+ pub fn LLVMDisposeObjectFile(ObjFile: &'static mut ObjectFile);
+
+ /// Enumerates the sections in an object file.
+ pub fn LLVMGetSections(ObjFile: &'a ObjectFile) -> &'a mut SectionIterator<'a>;
+ /// Destroys a section iterator.
+ pub fn LLVMDisposeSectionIterator(SI: &'a mut SectionIterator<'a>);
+ /// Returns `true` if the section iterator is at the end of the section
+ /// list:
+ pub fn LLVMIsSectionIteratorAtEnd(ObjFile: &'a ObjectFile, SI: &SectionIterator<'a>) -> Bool;
+ /// Moves the section iterator to point to the next section.
+ pub fn LLVMMoveToNextSection(SI: &SectionIterator<'_>);
+ /// Returns the current section size.
+ pub fn LLVMGetSectionSize(SI: &SectionIterator<'_>) -> c_ulonglong;
+ /// Returns the current section contents as a string buffer.
+ pub fn LLVMGetSectionContents(SI: &SectionIterator<'_>) -> *const c_char;
+
+ /// Reads the given file and returns it as a memory buffer. Use
+ /// LLVMDisposeMemoryBuffer() to get rid of it.
+ pub fn LLVMRustCreateMemoryBufferWithContentsOfFile(
+ Path: *const c_char,
+ ) -> Option<&'static mut MemoryBuffer>;
+
+ pub fn LLVMStartMultithreaded() -> Bool;
+
+ /// Returns a string describing the last error caused by an LLVMRust* call.
+ pub fn LLVMRustGetLastError() -> *const c_char;
+
+ /// Print the pass timings since static dtors aren't picking them up.
+ pub fn LLVMRustPrintPassTimings();
+
+ pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type;
+
+ pub fn LLVMStructSetBody(
+ StructTy: &'a Type,
+ ElementTypes: *const &'a Type,
+ ElementCount: c_uint,
+ Packed: Bool,
+ );
+
+ /// Prepares inline assembly.
+ pub fn LLVMRustInlineAsm(
+ Ty: &Type,
+ AsmString: *const c_char,
+ AsmStringLen: size_t,
+ Constraints: *const c_char,
+ ConstraintsLen: size_t,
+ SideEffects: Bool,
+ AlignStack: Bool,
+ Dialect: AsmDialect,
+ ) -> &Value;
+ pub fn LLVMRustInlineAsmVerify(
+ Ty: &Type,
+ Constraints: *const c_char,
+ ConstraintsLen: size_t,
+ ) -> bool;
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteFilenamesSectionToBuffer(
+ Filenames: *const *const c_char,
+ FilenamesLen: size_t,
+ BufferOut: &RustString,
+ );
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteMappingToBuffer(
+ VirtualFileMappingIDs: *const c_uint,
+ NumVirtualFileMappingIDs: c_uint,
+ Expressions: *const coverage_map::CounterExpression,
+ NumExpressions: c_uint,
+ MappingRegions: *mut coverageinfo::CounterMappingRegion,
+ NumMappingRegions: c_uint,
+ BufferOut: &RustString,
+ );
+
+ pub fn LLVMRustCoverageCreatePGOFuncNameVar(F: &'a Value, FuncName: *const c_char)
+ -> &'a Value;
+ pub fn LLVMRustCoverageComputeHash(Name: *const c_char) -> u64;
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteSectionNameToString(M: &Module, Str: &RustString);
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteMappingVarNameToString(Str: &RustString);
+
+ pub fn LLVMRustCoverageMappingVersion() -> u32;
+ pub fn LLVMRustDebugMetadataVersion() -> u32;
+ pub fn LLVMRustVersionMajor() -> u32;
+ pub fn LLVMRustVersionMinor() -> u32;
+
+ pub fn LLVMRustAddModuleFlag(M: &Module, name: *const c_char, value: u32);
+
+ pub fn LLVMRustMetadataAsValue(C: &'a Context, MD: &'a Metadata) -> &'a Value;
+
+ pub fn LLVMRustDIBuilderCreate(M: &'a Module) -> &'a mut DIBuilder<'a>;
+
+ pub fn LLVMRustDIBuilderDispose(Builder: &'a mut DIBuilder<'a>);
+
+ pub fn LLVMRustDIBuilderFinalize(Builder: &DIBuilder<'_>);
+
+ pub fn LLVMRustDIBuilderCreateCompileUnit(
+ Builder: &DIBuilder<'a>,
+ Lang: c_uint,
+ File: &'a DIFile,
+ Producer: *const c_char,
+ ProducerLen: size_t,
+ isOptimized: bool,
+ Flags: *const c_char,
+ RuntimeVer: c_uint,
+ SplitName: *const c_char,
+ SplitNameLen: size_t,
+ kind: DebugEmissionKind,
+ ) -> &'a DIDescriptor;
+
+ pub fn LLVMRustDIBuilderCreateFile(
+ Builder: &DIBuilder<'a>,
+ Filename: *const c_char,
+ FilenameLen: size_t,
+ Directory: *const c_char,
+ DirectoryLen: size_t,
+ CSKind: ChecksumKind,
+ Checksum: *const c_char,
+ ChecksumLen: size_t,
+ ) -> &'a DIFile;
+
+ pub fn LLVMRustDIBuilderCreateSubroutineType(
+ Builder: &DIBuilder<'a>,
+ ParameterTypes: &'a DIArray,
+ ) -> &'a DICompositeType;
+
+ pub fn LLVMRustDIBuilderCreateFunction(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIDescriptor,
+ Name: *const c_char,
+ NameLen: size_t,
+ LinkageName: *const c_char,
+ LinkageNameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Ty: &'a DIType,
+ ScopeLine: c_uint,
+ Flags: DIFlags,
+ SPFlags: DISPFlags,
+ Fn: &'a Value,
+ TParam: &'a DIArray,
+ Decl: Option<&'a DIDescriptor>,
+ ) -> &'a DISubprogram;
+
+ pub fn LLVMRustDIBuilderCreateBasicType(
+ Builder: &DIBuilder<'a>,
+ Name: *const c_char,
+ NameLen: size_t,
+ SizeInBits: u64,
+ Encoding: c_uint,
+ ) -> &'a DIBasicType;
+
+ pub fn LLVMRustDIBuilderCreateTypedef(
+ Builder: &DIBuilder<'a>,
+ Type: &'a DIBasicType,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Scope: Option<&'a DIScope>,
+ ) -> &'a DIDerivedType;
+
+ pub fn LLVMRustDIBuilderCreatePointerType(
+ Builder: &DIBuilder<'a>,
+ PointeeTy: &'a DIType,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ AddressSpace: c_uint,
+ Name: *const c_char,
+ NameLen: size_t,
+ ) -> &'a DIDerivedType;
+
+ pub fn LLVMRustDIBuilderCreateStructType(
+ Builder: &DIBuilder<'a>,
+ Scope: Option<&'a DIDescriptor>,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Flags: DIFlags,
+ DerivedFrom: Option<&'a DIType>,
+ Elements: &'a DIArray,
+ RunTimeLang: c_uint,
+ VTableHolder: Option<&'a DIType>,
+ UniqueId: *const c_char,
+ UniqueIdLen: size_t,
+ ) -> &'a DICompositeType;
+
+ pub fn LLVMRustDIBuilderCreateMemberType(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIDescriptor,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ OffsetInBits: u64,
+ Flags: DIFlags,
+ Ty: &'a DIType,
+ ) -> &'a DIDerivedType;
+
+ pub fn LLVMRustDIBuilderCreateVariantMemberType(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ OffsetInBits: u64,
+ Discriminant: Option<&'a Value>,
+ Flags: DIFlags,
+ Ty: &'a DIType,
+ ) -> &'a DIType;
+
+ pub fn LLVMRustDIBuilderCreateLexicalBlock(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ File: &'a DIFile,
+ Line: c_uint,
+ Col: c_uint,
+ ) -> &'a DILexicalBlock;
+
+ pub fn LLVMRustDIBuilderCreateLexicalBlockFile(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ File: &'a DIFile,
+ ) -> &'a DILexicalBlock;
+
+ pub fn LLVMRustDIBuilderCreateStaticVariable(
+ Builder: &DIBuilder<'a>,
+ Context: Option<&'a DIScope>,
+ Name: *const c_char,
+ NameLen: size_t,
+ LinkageName: *const c_char,
+ LinkageNameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Ty: &'a DIType,
+ isLocalToUnit: bool,
+ Val: &'a Value,
+ Decl: Option<&'a DIDescriptor>,
+ AlignInBits: u32,
+ ) -> &'a DIGlobalVariableExpression;
+
+ pub fn LLVMRustDIBuilderCreateVariable(
+ Builder: &DIBuilder<'a>,
+ Tag: c_uint,
+ Scope: &'a DIDescriptor,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Ty: &'a DIType,
+ AlwaysPreserve: bool,
+ Flags: DIFlags,
+ ArgNo: c_uint,
+ AlignInBits: u32,
+ ) -> &'a DIVariable;
+
+ pub fn LLVMRustDIBuilderCreateArrayType(
+ Builder: &DIBuilder<'a>,
+ Size: u64,
+ AlignInBits: u32,
+ Ty: &'a DIType,
+ Subscripts: &'a DIArray,
+ ) -> &'a DIType;
+
+ pub fn LLVMRustDIBuilderGetOrCreateSubrange(
+ Builder: &DIBuilder<'a>,
+ Lo: i64,
+ Count: i64,
+ ) -> &'a DISubrange;
+
+ pub fn LLVMRustDIBuilderGetOrCreateArray(
+ Builder: &DIBuilder<'a>,
+ Ptr: *const Option<&'a DIDescriptor>,
+ Count: c_uint,
+ ) -> &'a DIArray;
+
+ pub fn LLVMRustDIBuilderInsertDeclareAtEnd(
+ Builder: &DIBuilder<'a>,
+ Val: &'a Value,
+ VarInfo: &'a DIVariable,
+ AddrOps: *const i64,
+ AddrOpsCount: c_uint,
+ DL: &'a Value,
+ InsertAtEnd: &'a BasicBlock,
+ ) -> &'a Value;
+
+ pub fn LLVMRustDIBuilderCreateEnumerator(
+ Builder: &DIBuilder<'a>,
+ Name: *const c_char,
+ NameLen: size_t,
+ Value: i64,
+ IsUnsigned: bool,
+ ) -> &'a DIEnumerator;
+
+ pub fn LLVMRustDIBuilderCreateEnumerationType(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Elements: &'a DIArray,
+ ClassType: &'a DIType,
+ IsScoped: bool,
+ ) -> &'a DIType;
+
+ pub fn LLVMRustDIBuilderCreateUnionType(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Flags: DIFlags,
+ Elements: Option<&'a DIArray>,
+ RunTimeLang: c_uint,
+ UniqueId: *const c_char,
+ UniqueIdLen: size_t,
+ ) -> &'a DIType;
+
+ pub fn LLVMRustDIBuilderCreateVariantPart(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Flags: DIFlags,
+ Discriminator: Option<&'a DIDerivedType>,
+ Elements: &'a DIArray,
+ UniqueId: *const c_char,
+ UniqueIdLen: size_t,
+ ) -> &'a DIDerivedType;
+
+ pub fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr);
+
+ pub fn LLVMRustDIBuilderCreateTemplateTypeParameter(
+ Builder: &DIBuilder<'a>,
+ Scope: Option<&'a DIScope>,
+ Name: *const c_char,
+ NameLen: size_t,
+ Ty: &'a DIType,
+ ) -> &'a DITemplateTypeParameter;
+
+ pub fn LLVMRustDIBuilderCreateNameSpace(
+ Builder: &DIBuilder<'a>,
+ Scope: Option<&'a DIScope>,
+ Name: *const c_char,
+ NameLen: size_t,
+ ExportSymbols: bool,
+ ) -> &'a DINameSpace;
+
+ pub fn LLVMRustDICompositeTypeReplaceArrays(
+ Builder: &DIBuilder<'a>,
+ CompositeType: &'a DIType,
+ Elements: Option<&'a DIArray>,
+ Params: Option<&'a DIArray>,
+ );
+
+ pub fn LLVMRustDIBuilderCreateDebugLocation(
+ Context: &'a Context,
+ Line: c_uint,
+ Column: c_uint,
+ Scope: &'a DIScope,
+ InlinedAt: Option<&'a Metadata>,
+ ) -> &'a Value;
+ pub fn LLVMRustDIBuilderCreateOpDeref() -> i64;
+ pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> i64;
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustWriteTypeToString(Type: &Type, s: &RustString);
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustWriteValueToString(value_ref: &Value, s: &RustString);
+
+ pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>;
+
+ pub fn LLVMRustPassKind(Pass: &Pass) -> PassKind;
+ pub fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> Option<&'static mut Pass>;
+ pub fn LLVMRustCreateAddressSanitizerFunctionPass(Recover: bool) -> &'static mut Pass;
+ pub fn LLVMRustCreateModuleAddressSanitizerPass(Recover: bool) -> &'static mut Pass;
+ pub fn LLVMRustCreateMemorySanitizerPass(
+ TrackOrigins: c_int,
+ Recover: bool,
+ ) -> &'static mut Pass;
+ pub fn LLVMRustCreateThreadSanitizerPass() -> &'static mut Pass;
+ pub fn LLVMRustAddPass(PM: &PassManager<'_>, Pass: &'static mut Pass);
+ pub fn LLVMRustAddLastExtensionPasses(
+ PMB: &PassManagerBuilder,
+ Passes: *const &'static mut Pass,
+ NumPasses: size_t,
+ );
+
+ pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool;
+
+ pub fn LLVMRustPrintTargetCPUs(T: &TargetMachine);
+ pub fn LLVMRustPrintTargetFeatures(T: &TargetMachine);
+
+ pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
+ pub fn LLVMRustCreateTargetMachine(
+ Triple: *const c_char,
+ CPU: *const c_char,
+ Features: *const c_char,
+ Abi: *const c_char,
+ Model: CodeModel,
+ Reloc: RelocModel,
+ Level: CodeGenOptLevel,
+ UseSoftFP: bool,
+ FunctionSections: bool,
+ DataSections: bool,
+ TrapUnreachable: bool,
+ Singlethread: bool,
+ AsmComments: bool,
+ EmitStackSizeSection: bool,
+ RelaxELFRelocations: bool,
+ UseInitArray: bool,
+ ) -> Option<&'static mut TargetMachine>;
+ pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
+ pub fn LLVMRustAddBuilderLibraryInfo(
+ PMB: &'a PassManagerBuilder,
+ M: &'a Module,
+ DisableSimplifyLibCalls: bool,
+ );
+ pub fn LLVMRustConfigurePassManagerBuilder(
+ PMB: &PassManagerBuilder,
+ OptLevel: CodeGenOptLevel,
+ MergeFunctions: bool,
+ SLPVectorize: bool,
+ LoopVectorize: bool,
+ PrepareForThinLTO: bool,
+ PGOGenPath: *const c_char,
+ PGOUsePath: *const c_char,
+ );
+ pub fn LLVMRustAddLibraryInfo(
+ PM: &PassManager<'a>,
+ M: &'a Module,
+ DisableSimplifyLibCalls: bool,
+ );
+ pub fn LLVMRustRunFunctionPassManager(PM: &PassManager<'a>, M: &'a Module);
+ pub fn LLVMRustWriteOutputFile(
+ T: &'a TargetMachine,
+ PM: &PassManager<'a>,
+ M: &'a Module,
+ Output: *const c_char,
+ FileType: FileType,
+ ) -> LLVMRustResult;
+ pub fn LLVMRustOptimizeWithNewPassManager(
+ M: &'a Module,
+ TM: &'a TargetMachine,
+ OptLevel: PassBuilderOptLevel,
+ OptStage: OptStage,
+ NoPrepopulatePasses: bool,
+ VerifyIR: bool,
+ UseThinLTOBuffers: bool,
+ MergeFunctions: bool,
+ UnrollLoops: bool,
+ SLPVectorize: bool,
+ LoopVectorize: bool,
+ DisableSimplifyLibCalls: bool,
+ EmitLifetimeMarkers: bool,
+ SanitizerOptions: Option<&SanitizerOptions>,
+ PGOGenPath: *const c_char,
+ PGOUsePath: *const c_char,
+ llvm_selfprofiler: *mut c_void,
+ begin_callback: SelfProfileBeforePassCallback,
+ end_callback: SelfProfileAfterPassCallback,
+ );
+ pub fn LLVMRustPrintModule(
+ M: &'a Module,
+ Output: *const c_char,
+ Demangle: extern "C" fn(*const c_char, size_t, *mut c_char, size_t) -> size_t,
+ ) -> LLVMRustResult;
+ pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char);
+ pub fn LLVMRustPrintPasses();
+ pub fn LLVMRustGetInstructionCount(M: &Module) -> u32;
+ pub fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char);
+ pub fn LLVMRustAddAlwaysInlinePass(P: &PassManagerBuilder, AddLifetimes: bool);
+ pub fn LLVMRustRunRestrictionPass(M: &Module, syms: *const *const c_char, len: size_t);
+ pub fn LLVMRustMarkAllFunctionsNounwind(M: &Module);
+
+ pub fn LLVMRustOpenArchive(path: *const c_char) -> Option<&'static mut Archive>;
+ pub fn LLVMRustArchiveIteratorNew(AR: &'a Archive) -> &'a mut ArchiveIterator<'a>;
+ pub fn LLVMRustArchiveIteratorNext(
+ AIR: &ArchiveIterator<'a>,
+ ) -> Option<&'a mut ArchiveChild<'a>>;
+ pub fn LLVMRustArchiveChildName(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char;
+ pub fn LLVMRustArchiveChildData(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char;
+ pub fn LLVMRustArchiveChildFree(ACR: &'a mut ArchiveChild<'a>);
+ pub fn LLVMRustArchiveIteratorFree(AIR: &'a mut ArchiveIterator<'a>);
+ pub fn LLVMRustDestroyArchive(AR: &'static mut Archive);
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustGetSectionName(
+ SI: &SectionIterator<'_>,
+ data: &mut Option<std::ptr::NonNull<c_char>>,
+ ) -> size_t;
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustWriteTwineToString(T: &Twine, s: &RustString);
+
+ pub fn LLVMContextSetDiagnosticHandler(
+ C: &Context,
+ Handler: DiagnosticHandler,
+ DiagnosticContext: *mut c_void,
+ );
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustUnpackOptimizationDiagnostic(
+ DI: &'a DiagnosticInfo,
+ pass_name_out: &RustString,
+ function_out: &mut Option<&'a Value>,
+ loc_line_out: &mut c_uint,
+ loc_column_out: &mut c_uint,
+ loc_filename_out: &RustString,
+ message_out: &RustString,
+ );
+
+ pub fn LLVMRustUnpackInlineAsmDiagnostic(
+ DI: &'a DiagnosticInfo,
+ level_out: &mut DiagnosticLevel,
+ cookie_out: &mut c_uint,
+ message_out: &mut Option<&'a Twine>,
+ instruction_out: &mut Option<&'a Value>,
+ );
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustWriteDiagnosticInfoToString(DI: &DiagnosticInfo, s: &RustString);
+ pub fn LLVMRustGetDiagInfoKind(DI: &DiagnosticInfo) -> DiagnosticKind;
+
+ pub fn LLVMRustSetInlineAsmDiagnosticHandler(
+ C: &Context,
+ H: InlineAsmDiagHandler,
+ CX: *mut c_void,
+ );
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustUnpackSMDiagnostic(
+ d: &SMDiagnostic,
+ message_out: &RustString,
+ buffer_out: &RustString,
+ level_out: &mut DiagnosticLevel,
+ loc_out: &mut c_uint,
+ ranges_out: *mut c_uint,
+ num_ranges: &mut usize,
+ ) -> bool;
+
+ pub fn LLVMRustWriteArchive(
+ Dst: *const c_char,
+ NumMembers: size_t,
+ Members: *const &RustArchiveMember<'_>,
+ WriteSymbtab: bool,
+ Kind: ArchiveKind,
+ ) -> LLVMRustResult;
+ pub fn LLVMRustArchiveMemberNew(
+ Filename: *const c_char,
+ Name: *const c_char,
+ Child: Option<&ArchiveChild<'a>>,
+ ) -> &'a mut RustArchiveMember<'a>;
+ pub fn LLVMRustArchiveMemberFree(Member: &'a mut RustArchiveMember<'a>);
+
+ pub fn LLVMRustSetDataLayoutFromTargetMachine(M: &'a Module, TM: &'a TargetMachine);
+
+ pub fn LLVMRustBuildOperandBundleDef(
+ Name: *const c_char,
+ Inputs: *const &'a Value,
+ NumInputs: c_uint,
+ ) -> &'a mut OperandBundleDef<'a>;
+ pub fn LLVMRustFreeOperandBundleDef(Bundle: &'a mut OperandBundleDef<'a>);
+
+ pub fn LLVMRustPositionBuilderAtStart(B: &Builder<'a>, BB: &'a BasicBlock);
+
+ pub fn LLVMRustSetComdat(M: &'a Module, V: &'a Value, Name: *const c_char, NameLen: size_t);
+ pub fn LLVMRustUnsetComdat(V: &Value);
+ pub fn LLVMRustSetModulePICLevel(M: &Module);
+ pub fn LLVMRustSetModulePIELevel(M: &Module);
+ pub fn LLVMRustModuleBufferCreate(M: &Module) -> &'static mut ModuleBuffer;
+ pub fn LLVMRustModuleBufferPtr(p: &ModuleBuffer) -> *const u8;
+ pub fn LLVMRustModuleBufferLen(p: &ModuleBuffer) -> usize;
+ pub fn LLVMRustModuleBufferFree(p: &'static mut ModuleBuffer);
+ pub fn LLVMRustModuleCost(M: &Module) -> u64;
+
+ pub fn LLVMRustThinLTOBufferCreate(M: &Module) -> &'static mut ThinLTOBuffer;
+ pub fn LLVMRustThinLTOBufferFree(M: &'static mut ThinLTOBuffer);
+ pub fn LLVMRustThinLTOBufferPtr(M: &ThinLTOBuffer) -> *const c_char;
+ pub fn LLVMRustThinLTOBufferLen(M: &ThinLTOBuffer) -> size_t;
+ pub fn LLVMRustCreateThinLTOData(
+ Modules: *const ThinLTOModule,
+ NumModules: c_uint,
+ PreservedSymbols: *const *const c_char,
+ PreservedSymbolsLen: c_uint,
+ ) -> Option<&'static mut ThinLTOData>;
+ pub fn LLVMRustPrepareThinLTORename(
+ Data: &ThinLTOData,
+ Module: &Module,
+ Target: &TargetMachine,
+ ) -> bool;
+ pub fn LLVMRustPrepareThinLTOResolveWeak(Data: &ThinLTOData, Module: &Module) -> bool;
+ pub fn LLVMRustPrepareThinLTOInternalize(Data: &ThinLTOData, Module: &Module) -> bool;
+ pub fn LLVMRustPrepareThinLTOImport(
+ Data: &ThinLTOData,
+ Module: &Module,
+ Target: &TargetMachine,
+ ) -> bool;
+ pub fn LLVMRustGetThinLTOModuleImports(
+ Data: *const ThinLTOData,
+ ModuleNameCallback: ThinLTOModuleNameCallback,
+ CallbackPayload: *mut c_void,
+ );
+ pub fn LLVMRustFreeThinLTOData(Data: &'static mut ThinLTOData);
+ pub fn LLVMRustParseBitcodeForLTO(
+ Context: &Context,
+ Data: *const u8,
+ len: usize,
+ Identifier: *const c_char,
+ ) -> Option<&Module>;
+ pub fn LLVMRustGetBitcodeSliceFromObjectData(
+ Data: *const u8,
+ len: usize,
+ out_len: &mut usize,
+ ) -> *const u8;
+ pub fn LLVMRustThinLTOGetDICompileUnit(
+ M: &Module,
+ CU1: &mut *mut c_void,
+ CU2: &mut *mut c_void,
+ );
+ pub fn LLVMRustThinLTOPatchDICompileUnit(M: &Module, CU: *mut c_void);
+
+ pub fn LLVMRustLinkerNew(M: &'a Module) -> &'a mut Linker<'a>;
+ pub fn LLVMRustLinkerAdd(
+ linker: &Linker<'_>,
+ bytecode: *const c_char,
+ bytecode_len: usize,
+ ) -> bool;
+ pub fn LLVMRustLinkerFree(linker: &'a mut Linker<'a>);
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
new file mode 100644
index 0000000..ed9b991
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
@@ -0,0 +1,316 @@
+#![allow(non_snake_case)]
+
+pub use self::AtomicRmwBinOp::*;
+pub use self::CallConv::*;
+pub use self::CodeGenOptSize::*;
+pub use self::IntPredicate::*;
+pub use self::Linkage::*;
+pub use self::MetadataType::*;
+pub use self::RealPredicate::*;
+
+use libc::c_uint;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_llvm::RustString;
+use std::cell::RefCell;
+use std::ffi::{CStr, CString};
+use std::str::FromStr;
+use std::string::FromUtf8Error;
+
+pub mod archive_ro;
+pub mod diagnostic;
+mod ffi;
+
+pub use self::ffi::*;
+
+impl LLVMRustResult {
+ pub fn into_result(self) -> Result<(), ()> {
+ match self {
+ LLVMRustResult::Success => Ok(()),
+ LLVMRustResult::Failure => Err(()),
+ }
+ }
+}
+
+pub fn AddFunctionAttrStringValue(llfn: &'a Value, idx: AttributePlace, attr: &CStr, value: &CStr) {
+ unsafe {
+ LLVMRustAddFunctionAttrStringValue(llfn, idx.as_uint(), attr.as_ptr(), value.as_ptr())
+ }
+}
+
+pub fn AddFunctionAttrString(llfn: &'a Value, idx: AttributePlace, attr: &CStr) {
+ unsafe {
+ LLVMRustAddFunctionAttrStringValue(llfn, idx.as_uint(), attr.as_ptr(), std::ptr::null())
+ }
+}
+
+#[derive(Copy, Clone)]
+pub enum AttributePlace {
+ ReturnValue,
+ Argument(u32),
+ Function,
+}
+
+impl AttributePlace {
+ pub fn as_uint(self) -> c_uint {
+ match self {
+ AttributePlace::ReturnValue => 0,
+ AttributePlace::Argument(i) => 1 + i,
+ AttributePlace::Function => !0,
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum CodeGenOptSize {
+ CodeGenOptSizeNone = 0,
+ CodeGenOptSizeDefault = 1,
+ CodeGenOptSizeAggressive = 2,
+}
+
+impl FromStr for ArchiveKind {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "gnu" => Ok(ArchiveKind::K_GNU),
+ "bsd" => Ok(ArchiveKind::K_BSD),
+ "darwin" => Ok(ArchiveKind::K_DARWIN),
+ "coff" => Ok(ArchiveKind::K_COFF),
+ _ => Err(()),
+ }
+ }
+}
+
+pub fn SetInstructionCallConv(instr: &'a Value, cc: CallConv) {
+ unsafe {
+ LLVMSetInstructionCallConv(instr, cc as c_uint);
+ }
+}
+pub fn SetFunctionCallConv(fn_: &'a Value, cc: CallConv) {
+ unsafe {
+ LLVMSetFunctionCallConv(fn_, cc as c_uint);
+ }
+}
+
+// Externally visible symbols that might appear in multiple codegen units need to appear in
+// their own comdat section so that the duplicates can be discarded at link time. This can for
+// example happen for generics when using multiple codegen units. This function simply uses the
+// value's name as the comdat value to make sure that it is in a 1-to-1 relationship to the
+// function.
+// For more details on COMDAT sections see e.g., http://www.airs.com/blog/archives/52
+pub fn SetUniqueComdat(llmod: &Module, val: &'a Value) {
+ unsafe {
+ let name = get_value_name(val);
+ LLVMRustSetComdat(llmod, val, name.as_ptr().cast(), name.len());
+ }
+}
+
+pub fn UnsetComdat(val: &'a Value) {
+ unsafe {
+ LLVMRustUnsetComdat(val);
+ }
+}
+
+pub fn SetUnnamedAddress(global: &'a Value, unnamed: UnnamedAddr) {
+ unsafe {
+ LLVMSetUnnamedAddress(global, unnamed);
+ }
+}
+
+pub fn set_thread_local(global: &'a Value, is_thread_local: bool) {
+ unsafe {
+ LLVMSetThreadLocal(global, is_thread_local as Bool);
+ }
+}
+pub fn set_thread_local_mode(global: &'a Value, mode: ThreadLocalMode) {
+ unsafe {
+ LLVMSetThreadLocalMode(global, mode);
+ }
+}
+
+impl Attribute {
+ pub fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
+ unsafe { LLVMRustAddFunctionAttribute(llfn, idx.as_uint(), *self) }
+ }
+
+ pub fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
+ unsafe { LLVMRustAddCallSiteAttribute(callsite, idx.as_uint(), *self) }
+ }
+
+ pub fn unapply_llfn(&self, idx: AttributePlace, llfn: &Value) {
+ unsafe { LLVMRustRemoveFunctionAttributes(llfn, idx.as_uint(), *self) }
+ }
+
+ pub fn toggle_llfn(&self, idx: AttributePlace, llfn: &Value, set: bool) {
+ if set {
+ self.apply_llfn(idx, llfn);
+ } else {
+ self.unapply_llfn(idx, llfn);
+ }
+ }
+}
+
+// Memory-managed interface to object files.
+
+pub struct ObjectFile {
+ pub llof: &'static mut ffi::ObjectFile,
+}
+
+unsafe impl Send for ObjectFile {}
+
+impl ObjectFile {
+ // This will take ownership of llmb
+ pub fn new(llmb: &'static mut MemoryBuffer) -> Option<ObjectFile> {
+ unsafe {
+ let llof = LLVMCreateObjectFile(llmb)?;
+ Some(ObjectFile { llof })
+ }
+ }
+}
+
+impl Drop for ObjectFile {
+ fn drop(&mut self) {
+ unsafe {
+ LLVMDisposeObjectFile(&mut *(self.llof as *mut _));
+ }
+ }
+}
+
+// Memory-managed interface to section iterators.
+
+pub struct SectionIter<'a> {
+ pub llsi: &'a mut SectionIterator<'a>,
+}
+
+impl Drop for SectionIter<'a> {
+ fn drop(&mut self) {
+ unsafe {
+ LLVMDisposeSectionIterator(&mut *(self.llsi as *mut _));
+ }
+ }
+}
+
+pub fn mk_section_iter(llof: &ffi::ObjectFile) -> SectionIter<'_> {
+ unsafe { SectionIter { llsi: LLVMGetSections(llof) } }
+}
+
+pub fn set_section(llglobal: &Value, section_name: &str) {
+ let section_name_cstr = CString::new(section_name).expect("unexpected CString error");
+ unsafe {
+ LLVMSetSection(llglobal, section_name_cstr.as_ptr());
+ }
+}
+
+pub fn add_global<'a>(llmod: &'a Module, ty: &'a Type, name: &str) -> &'a Value {
+ let name_cstr = CString::new(name).expect("unexpected CString error");
+ unsafe { LLVMAddGlobal(llmod, ty, name_cstr.as_ptr()) }
+}
+
+pub fn set_initializer(llglobal: &Value, constant_val: &Value) {
+ unsafe {
+ LLVMSetInitializer(llglobal, constant_val);
+ }
+}
+
+pub fn set_global_constant(llglobal: &Value, is_constant: bool) {
+ unsafe {
+ LLVMSetGlobalConstant(llglobal, if is_constant { ffi::True } else { ffi::False });
+ }
+}
+
+pub fn set_linkage(llglobal: &Value, linkage: Linkage) {
+ unsafe {
+ LLVMRustSetLinkage(llglobal, linkage);
+ }
+}
+
+pub fn set_alignment(llglobal: &Value, bytes: usize) {
+ unsafe {
+ ffi::LLVMSetAlignment(llglobal, bytes as c_uint);
+ }
+}
+
+/// Safe wrapper around `LLVMGetParam`, because segfaults are no fun.
+pub fn get_param(llfn: &Value, index: c_uint) -> &Value {
+ unsafe {
+ assert!(
+ index < LLVMCountParams(llfn),
+ "out of bounds argument access: {} out of {} arguments",
+ index,
+ LLVMCountParams(llfn)
+ );
+ LLVMGetParam(llfn, index)
+ }
+}
+
+/// Safe wrapper for `LLVMGetValueName2` into a byte slice
+pub fn get_value_name(value: &Value) -> &[u8] {
+ unsafe {
+ let mut len = 0;
+ let data = LLVMGetValueName2(value, &mut len);
+ std::slice::from_raw_parts(data.cast(), len)
+ }
+}
+
+/// Safe wrapper for `LLVMSetValueName2` from a byte slice
+pub fn set_value_name(value: &Value, name: &[u8]) {
+ unsafe {
+ let data = name.as_ptr().cast();
+ LLVMSetValueName2(value, data, name.len());
+ }
+}
+
+pub fn build_string(f: impl FnOnce(&RustString)) -> Result<String, FromUtf8Error> {
+ let sr = RustString { bytes: RefCell::new(Vec::new()) };
+ f(&sr);
+ String::from_utf8(sr.bytes.into_inner())
+}
+
+pub fn build_byte_buffer(f: impl FnOnce(&RustString)) -> Vec<u8> {
+ let sr = RustString { bytes: RefCell::new(Vec::new()) };
+ f(&sr);
+ sr.bytes.into_inner()
+}
+
+pub fn twine_to_string(tr: &Twine) -> String {
+ unsafe {
+ build_string(|s| LLVMRustWriteTwineToString(tr, s)).expect("got a non-UTF8 Twine from LLVM")
+ }
+}
+
+pub fn last_error() -> Option<String> {
+ unsafe {
+ let cstr = LLVMRustGetLastError();
+ if cstr.is_null() {
+ None
+ } else {
+ let err = CStr::from_ptr(cstr).to_bytes();
+ let err = String::from_utf8_lossy(err).to_string();
+ libc::free(cstr as *mut _);
+ Some(err)
+ }
+ }
+}
+
+pub struct OperandBundleDef<'a> {
+ pub raw: &'a mut ffi::OperandBundleDef<'a>,
+}
+
+impl OperandBundleDef<'a> {
+ pub fn new(name: &str, vals: &[&'a Value]) -> Self {
+ let name = SmallCStr::new(name);
+ let def = unsafe {
+ LLVMRustBuildOperandBundleDef(name.as_ptr(), vals.as_ptr(), vals.len() as c_uint)
+ };
+ OperandBundleDef { raw: def }
+ }
+}
+
+impl Drop for OperandBundleDef<'a> {
+ fn drop(&mut self) {
+ unsafe {
+ LLVMRustFreeOperandBundleDef(&mut *(self.raw as *mut _));
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
new file mode 100644
index 0000000..900f2df
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -0,0 +1,367 @@
+use crate::back::write::create_informational_target_machine;
+use crate::llvm;
+use libc::c_int;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_feature::UnstableFeatures;
+use rustc_middle::bug;
+use rustc_session::config::PrintRequest;
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::{MergeFunctions, PanicStrategy};
+use std::ffi::CString;
+
+use std::slice;
+use std::str;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::Once;
+
+static POISONED: AtomicBool = AtomicBool::new(false);
+static INIT: Once = Once::new();
+
+pub(crate) fn init(sess: &Session) {
+ unsafe {
+ // Before we touch LLVM, make sure that multithreading is enabled.
+ INIT.call_once(|| {
+ if llvm::LLVMStartMultithreaded() != 1 {
+ // use an extra bool to make sure that all future usage of LLVM
+ // cannot proceed despite the Once not running more than once.
+ POISONED.store(true, Ordering::SeqCst);
+ }
+
+ configure_llvm(sess);
+ });
+
+ if POISONED.load(Ordering::SeqCst) {
+ bug!("couldn't enable multi-threaded LLVM");
+ }
+ }
+}
+
+fn require_inited() {
+ INIT.call_once(|| bug!("llvm is not initialized"));
+ if POISONED.load(Ordering::SeqCst) {
+ bug!("couldn't enable multi-threaded LLVM");
+ }
+}
+
+unsafe fn configure_llvm(sess: &Session) {
+ let n_args = sess.opts.cg.llvm_args.len() + sess.target.target.options.llvm_args.len();
+ let mut llvm_c_strs = Vec::with_capacity(n_args + 1);
+ let mut llvm_args = Vec::with_capacity(n_args + 1);
+
+ llvm::LLVMRustInstallFatalErrorHandler();
+
+ fn llvm_arg_to_arg_name(full_arg: &str) -> &str {
+ full_arg.trim().split(|c: char| c == '=' || c.is_whitespace()).next().unwrap_or("")
+ }
+
+ let cg_opts = sess.opts.cg.llvm_args.iter();
+ let tg_opts = sess.target.target.options.llvm_args.iter();
+ let sess_args = cg_opts.chain(tg_opts);
+
+ let user_specified_args: FxHashSet<_> =
+ sess_args.clone().map(|s| llvm_arg_to_arg_name(s)).filter(|s| !s.is_empty()).collect();
+
+ {
+ // This adds the given argument to LLVM. Unless `force` is true
+ // user specified arguments are *not* overridden.
+ let mut add = |arg: &str, force: bool| {
+ if force || !user_specified_args.contains(llvm_arg_to_arg_name(arg)) {
+ let s = CString::new(arg).unwrap();
+ llvm_args.push(s.as_ptr());
+ llvm_c_strs.push(s);
+ }
+ };
+ // Set the llvm "program name" to make usage and invalid argument messages more clear.
+ add("rustc -Cllvm-args=\"...\" with", true);
+ if sess.time_llvm_passes() {
+ add("-time-passes", false);
+ }
+ if sess.print_llvm_passes() {
+ add("-debug-pass=Structure", false);
+ }
+ if !sess.opts.debugging_opts.no_generate_arange_section {
+ add("-generate-arange-section", false);
+ }
+ match sess
+ .opts
+ .debugging_opts
+ .merge_functions
+ .unwrap_or(sess.target.target.options.merge_functions)
+ {
+ MergeFunctions::Disabled | MergeFunctions::Trampolines => {}
+ MergeFunctions::Aliases => {
+ add("-mergefunc-use-aliases", false);
+ }
+ }
+
+ if sess.target.target.target_os == "emscripten"
+ && sess.panic_strategy() == PanicStrategy::Unwind
+ {
+ add("-enable-emscripten-cxx-exceptions", false);
+ }
+
+ // HACK(eddyb) LLVM inserts `llvm.assume` calls to preserve align attributes
+ // during inlining. Unfortunately these may block other optimizations.
+ add("-preserve-alignment-assumptions-during-inlining=false", false);
+
+ for arg in sess_args {
+ add(&(*arg), true);
+ }
+ }
+
+ if sess.opts.debugging_opts.llvm_time_trace && get_major_version() >= 9 {
+ // time-trace is not thread safe and running it in parallel will cause seg faults.
+ if !sess.opts.debugging_opts.no_parallel_llvm {
+ bug!("`-Z llvm-time-trace` requires `-Z no-parallel-llvm")
+ }
+
+ llvm::LLVMTimeTraceProfilerInitialize();
+ }
+
+ llvm::LLVMInitializePasses();
+
+ ::rustc_llvm::initialize_available_targets();
+
+ llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr());
+}
+
+pub fn time_trace_profiler_finish(file_name: &str) {
+ unsafe {
+ if get_major_version() >= 9 {
+ let file_name = CString::new(file_name).unwrap();
+ llvm::LLVMTimeTraceProfilerFinish(file_name.as_ptr());
+ }
+ }
+}
+
+// WARNING: the features after applying `to_llvm_feature` must be known
+// to LLVM or the feature detection code will walk past the end of the feature
+// array, leading to crashes.
+
+const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("aclass", Some(sym::arm_target_feature)),
+ ("mclass", Some(sym::arm_target_feature)),
+ ("rclass", Some(sym::arm_target_feature)),
+ ("dsp", Some(sym::arm_target_feature)),
+ ("neon", Some(sym::arm_target_feature)),
+ ("crc", Some(sym::arm_target_feature)),
+ ("crypto", Some(sym::arm_target_feature)),
+ ("v5te", Some(sym::arm_target_feature)),
+ ("v6", Some(sym::arm_target_feature)),
+ ("v6k", Some(sym::arm_target_feature)),
+ ("v6t2", Some(sym::arm_target_feature)),
+ ("v7", Some(sym::arm_target_feature)),
+ ("v8", Some(sym::arm_target_feature)),
+ ("vfp2", Some(sym::arm_target_feature)),
+ ("vfp3", Some(sym::arm_target_feature)),
+ ("vfp4", Some(sym::arm_target_feature)),
+ // This is needed for inline assembly, but shouldn't be stabilized as-is
+ // since it should be enabled per-function using #[instruction_set], not
+ // #[target_feature].
+ ("thumb-mode", Some(sym::arm_target_feature)),
+];
+
+const AARCH64_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("fp", Some(sym::aarch64_target_feature)),
+ ("neon", Some(sym::aarch64_target_feature)),
+ ("sve", Some(sym::aarch64_target_feature)),
+ ("crc", Some(sym::aarch64_target_feature)),
+ ("crypto", Some(sym::aarch64_target_feature)),
+ ("ras", Some(sym::aarch64_target_feature)),
+ ("lse", Some(sym::aarch64_target_feature)),
+ ("rdm", Some(sym::aarch64_target_feature)),
+ ("fp16", Some(sym::aarch64_target_feature)),
+ ("rcpc", Some(sym::aarch64_target_feature)),
+ ("dotprod", Some(sym::aarch64_target_feature)),
+ ("tme", Some(sym::aarch64_target_feature)),
+ ("v8.1a", Some(sym::aarch64_target_feature)),
+ ("v8.2a", Some(sym::aarch64_target_feature)),
+ ("v8.3a", Some(sym::aarch64_target_feature)),
+];
+
+const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("adx", Some(sym::adx_target_feature)),
+ ("aes", None),
+ ("avx", None),
+ ("avx2", None),
+ ("avx512bw", Some(sym::avx512_target_feature)),
+ ("avx512cd", Some(sym::avx512_target_feature)),
+ ("avx512dq", Some(sym::avx512_target_feature)),
+ ("avx512er", Some(sym::avx512_target_feature)),
+ ("avx512f", Some(sym::avx512_target_feature)),
+ ("avx512ifma", Some(sym::avx512_target_feature)),
+ ("avx512pf", Some(sym::avx512_target_feature)),
+ ("avx512vbmi", Some(sym::avx512_target_feature)),
+ ("avx512vl", Some(sym::avx512_target_feature)),
+ ("avx512vpopcntdq", Some(sym::avx512_target_feature)),
+ ("bmi1", None),
+ ("bmi2", None),
+ ("cmpxchg16b", Some(sym::cmpxchg16b_target_feature)),
+ ("f16c", Some(sym::f16c_target_feature)),
+ ("fma", None),
+ ("fxsr", None),
+ ("lzcnt", None),
+ ("movbe", Some(sym::movbe_target_feature)),
+ ("pclmulqdq", None),
+ ("popcnt", None),
+ ("rdrand", None),
+ ("rdseed", None),
+ ("rtm", Some(sym::rtm_target_feature)),
+ ("sha", None),
+ ("sse", None),
+ ("sse2", None),
+ ("sse3", None),
+ ("sse4.1", None),
+ ("sse4.2", None),
+ ("sse4a", Some(sym::sse4a_target_feature)),
+ ("ssse3", None),
+ ("tbm", Some(sym::tbm_target_feature)),
+ ("xsave", None),
+ ("xsavec", None),
+ ("xsaveopt", None),
+ ("xsaves", None),
+];
+
+const HEXAGON_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("hvx", Some(sym::hexagon_target_feature)),
+ ("hvx-length128b", Some(sym::hexagon_target_feature)),
+];
+
+const POWERPC_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("altivec", Some(sym::powerpc_target_feature)),
+ ("power8-altivec", Some(sym::powerpc_target_feature)),
+ ("power9-altivec", Some(sym::powerpc_target_feature)),
+ ("power8-vector", Some(sym::powerpc_target_feature)),
+ ("power9-vector", Some(sym::powerpc_target_feature)),
+ ("vsx", Some(sym::powerpc_target_feature)),
+];
+
+const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] =
+ &[("fp64", Some(sym::mips_target_feature)), ("msa", Some(sym::mips_target_feature))];
+
+const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("m", Some(sym::riscv_target_feature)),
+ ("a", Some(sym::riscv_target_feature)),
+ ("c", Some(sym::riscv_target_feature)),
+ ("f", Some(sym::riscv_target_feature)),
+ ("d", Some(sym::riscv_target_feature)),
+ ("e", Some(sym::riscv_target_feature)),
+];
+
+const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("simd128", Some(sym::wasm_target_feature)),
+ ("atomics", Some(sym::wasm_target_feature)),
+ ("nontrapping-fptoint", Some(sym::wasm_target_feature)),
+];
+
+/// When rustdoc is running, provide a list of all known features so that all their respective
+/// primitives may be documented.
+///
+/// IMPORTANT: If you're adding another feature list above, make sure to add it to this iterator!
+pub fn all_known_features() -> impl Iterator<Item = (&'static str, Option<Symbol>)> {
+ std::iter::empty()
+ .chain(ARM_ALLOWED_FEATURES.iter())
+ .chain(AARCH64_ALLOWED_FEATURES.iter())
+ .chain(X86_ALLOWED_FEATURES.iter())
+ .chain(HEXAGON_ALLOWED_FEATURES.iter())
+ .chain(POWERPC_ALLOWED_FEATURES.iter())
+ .chain(MIPS_ALLOWED_FEATURES.iter())
+ .chain(RISCV_ALLOWED_FEATURES.iter())
+ .chain(WASM_ALLOWED_FEATURES.iter())
+ .cloned()
+}
+
+pub fn to_llvm_feature<'a>(sess: &Session, s: &'a str) -> &'a str {
+ let arch = if sess.target.target.arch == "x86_64" { "x86" } else { &*sess.target.target.arch };
+ match (arch, s) {
+ ("x86", "pclmulqdq") => "pclmul",
+ ("x86", "rdrand") => "rdrnd",
+ ("x86", "bmi1") => "bmi",
+ ("x86", "cmpxchg16b") => "cx16",
+ ("aarch64", "fp") => "fp-armv8",
+ ("aarch64", "fp16") => "fullfp16",
+ (_, s) => s,
+ }
+}
+
+pub fn target_features(sess: &Session) -> Vec<Symbol> {
+ let target_machine = create_informational_target_machine(sess);
+ supported_target_features(sess)
+ .iter()
+ .filter_map(|&(feature, gate)| {
+ if UnstableFeatures::from_environment().is_nightly_build() || gate.is_none() {
+ Some(feature)
+ } else {
+ None
+ }
+ })
+ .filter(|feature| {
+ let llvm_feature = to_llvm_feature(sess, feature);
+ let cstr = CString::new(llvm_feature).unwrap();
+ unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) }
+ })
+ .map(|feature| Symbol::intern(feature))
+ .collect()
+}
+
+pub fn supported_target_features(sess: &Session) -> &'static [(&'static str, Option<Symbol>)] {
+ match &*sess.target.target.arch {
+ "arm" => ARM_ALLOWED_FEATURES,
+ "aarch64" => AARCH64_ALLOWED_FEATURES,
+ "x86" | "x86_64" => X86_ALLOWED_FEATURES,
+ "hexagon" => HEXAGON_ALLOWED_FEATURES,
+ "mips" | "mips64" => MIPS_ALLOWED_FEATURES,
+ "powerpc" | "powerpc64" => POWERPC_ALLOWED_FEATURES,
+ "riscv32" | "riscv64" => RISCV_ALLOWED_FEATURES,
+ "wasm32" => WASM_ALLOWED_FEATURES,
+ _ => &[],
+ }
+}
+
+pub fn print_version() {
+ // Can be called without initializing LLVM
+ unsafe {
+ println!("LLVM version: {}.{}", llvm::LLVMRustVersionMajor(), llvm::LLVMRustVersionMinor());
+ }
+}
+
+pub fn get_major_version() -> u32 {
+ unsafe { llvm::LLVMRustVersionMajor() }
+}
+
+pub fn print_passes() {
+ // Can be called without initializing LLVM
+ unsafe {
+ llvm::LLVMRustPrintPasses();
+ }
+}
+
+pub(crate) fn print(req: PrintRequest, sess: &Session) {
+ require_inited();
+ let tm = create_informational_target_machine(sess);
+ unsafe {
+ match req {
+ PrintRequest::TargetCPUs => llvm::LLVMRustPrintTargetCPUs(tm),
+ PrintRequest::TargetFeatures => llvm::LLVMRustPrintTargetFeatures(tm),
+ _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
+ }
+ }
+}
+
+pub fn target_cpu(sess: &Session) -> &str {
+ let name = match sess.opts.cg.target_cpu {
+ Some(ref s) => &**s,
+ None => &*sess.target.target.options.cpu,
+ };
+ if name != "native" {
+ return name;
+ }
+
+ unsafe {
+ let mut len = 0;
+ let ptr = llvm::LLVMRustGetHostCPUName(&mut len);
+ str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap()
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/metadata.rs b/compiler/rustc_codegen_llvm/src/metadata.rs
new file mode 100644
index 0000000..9036428
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/metadata.rs
@@ -0,0 +1,112 @@
+use crate::llvm;
+use crate::llvm::archive_ro::ArchiveRO;
+use crate::llvm::{mk_section_iter, False, ObjectFile};
+use rustc_middle::middle::cstore::MetadataLoader;
+use rustc_target::spec::Target;
+
+use rustc_codegen_ssa::METADATA_FILENAME;
+use rustc_data_structures::owning_ref::OwningRef;
+use rustc_data_structures::rustc_erase_owner;
+use tracing::debug;
+
+use rustc_fs_util::path_to_c_string;
+use std::path::Path;
+use std::slice;
+
+pub use rustc_data_structures::sync::MetadataRef;
+
+pub struct LlvmMetadataLoader;
+
+impl MetadataLoader for LlvmMetadataLoader {
+ fn get_rlib_metadata(&self, _: &Target, filename: &Path) -> Result<MetadataRef, String> {
+ // Use ArchiveRO for speed here, it's backed by LLVM and uses mmap
+ // internally to read the file. We also avoid even using a memcpy by
+ // just keeping the archive along while the metadata is in use.
+ let archive =
+ ArchiveRO::open(filename).map(|ar| OwningRef::new(Box::new(ar))).map_err(|e| {
+ debug!("llvm didn't like `{}`: {}", filename.display(), e);
+ format!("failed to read rlib metadata in '{}': {}", filename.display(), e)
+ })?;
+ let buf: OwningRef<_, [u8]> = archive.try_map(|ar| {
+ ar.iter()
+ .filter_map(|s| s.ok())
+ .find(|sect| sect.name() == Some(METADATA_FILENAME))
+ .map(|s| s.data())
+ .ok_or_else(|| {
+ debug!("didn't find '{}' in the archive", METADATA_FILENAME);
+ format!("failed to read rlib metadata: '{}'", filename.display())
+ })
+ })?;
+ Ok(rustc_erase_owner!(buf))
+ }
+
+ fn get_dylib_metadata(&self, target: &Target, filename: &Path) -> Result<MetadataRef, String> {
+ unsafe {
+ let buf = path_to_c_string(filename);
+ let mb = llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf.as_ptr())
+ .ok_or_else(|| format!("error reading library: '{}'", filename.display()))?;
+ let of =
+ ObjectFile::new(mb).map(|of| OwningRef::new(Box::new(of))).ok_or_else(|| {
+ format!("provided path not an object file: '{}'", filename.display())
+ })?;
+ let buf = of.try_map(|of| search_meta_section(of, target, filename))?;
+ Ok(rustc_erase_owner!(buf))
+ }
+ }
+}
+
+fn search_meta_section<'a>(
+ of: &'a ObjectFile,
+ target: &Target,
+ filename: &Path,
+) -> Result<&'a [u8], String> {
+ unsafe {
+ let si = mk_section_iter(of.llof);
+ while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False {
+ let mut name_buf = None;
+ let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf);
+ let name = name_buf.map_or(
+ String::new(), // We got a NULL ptr, ignore `name_len`.
+ |buf| {
+ String::from_utf8(
+ slice::from_raw_parts(buf.as_ptr() as *const u8, name_len as usize)
+ .to_vec(),
+ )
+ .unwrap()
+ },
+ );
+ debug!("get_metadata_section: name {}", name);
+ if read_metadata_section_name(target) == name {
+ let cbuf = llvm::LLVMGetSectionContents(si.llsi);
+ let csz = llvm::LLVMGetSectionSize(si.llsi) as usize;
+ // The buffer is valid while the object file is around
+ let buf: &'a [u8] = slice::from_raw_parts(cbuf as *const u8, csz);
+ return Ok(buf);
+ }
+ llvm::LLVMMoveToNextSection(si.llsi);
+ }
+ }
+ Err(format!("metadata not found: '{}'", filename.display()))
+}
+
+pub fn metadata_section_name(target: &Target) -> &'static str {
+ // Historical note:
+ //
+ // When using link.exe it was seen that the section name `.note.rustc`
+ // was getting shortened to `.note.ru`, and according to the PE and COFF
+ // specification:
+ //
+ // > Executable images do not use a string table and do not support
+ // > section names longer than 8 characters
+ //
+ // https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
+ //
+ // As a result, we choose a slightly shorter name! As to why
+ // `.note.rustc` works on MinGW, that's another good question...
+
+ if target.options.is_like_osx { "__DATA,.rustc" } else { ".rustc" }
+}
+
+fn read_metadata_section_name(_target: &Target) -> &'static str {
+ ".rustc"
+}
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
new file mode 100644
index 0000000..992e83d
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -0,0 +1,84 @@
+use crate::abi::FnAbi;
+use crate::attributes;
+use crate::base;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::type_of::LayoutLlvmExt;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+pub use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_middle::ty::{self, Instance, TypeFoldable};
+use rustc_target::abi::LayoutOf;
+use tracing::debug;
+
+impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn predefine_static(
+ &self,
+ def_id: DefId,
+ linkage: Linkage,
+ visibility: Visibility,
+ symbol_name: &str,
+ ) {
+ let instance = Instance::mono(self.tcx, def_id);
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let llty = self.layout_of(ty).llvm_type(self);
+
+ let g = self.define_global(symbol_name, llty).unwrap_or_else(|| {
+ self.sess().span_fatal(
+ self.tcx.def_span(def_id),
+ &format!("symbol `{}` is already defined", symbol_name),
+ )
+ });
+
+ unsafe {
+ llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage));
+ llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility));
+ }
+
+ self.instances.borrow_mut().insert(instance, g);
+ }
+
+ fn predefine_fn(
+ &self,
+ instance: Instance<'tcx>,
+ linkage: Linkage,
+ visibility: Visibility,
+ symbol_name: &str,
+ ) {
+ assert!(!instance.substs.needs_infer());
+
+ let fn_abi = FnAbi::of_instance(self, instance, &[]);
+ let lldecl = self.declare_fn(symbol_name, &fn_abi);
+ unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) };
+ let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+ base::set_link_section(lldecl, &attrs);
+ if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR {
+ llvm::SetUniqueComdat(self.llmod, lldecl);
+ }
+
+ // If we're compiling the compiler-builtins crate, e.g., the equivalent of
+ // compiler-rt, then we want to implicitly compile everything with hidden
+ // visibility as we're going to link this object all over the place but
+ // don't want the symbols to get exported.
+ if linkage != Linkage::Internal
+ && linkage != Linkage::Private
+ && self.tcx.is_compiler_builtins(LOCAL_CRATE)
+ {
+ unsafe {
+ llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden);
+ }
+ } else {
+ unsafe {
+ llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility));
+ }
+ }
+
+ debug!("predefine_fn: instance = {:?}", instance);
+
+ attributes::from_fn_attrs(self, lldecl, instance);
+
+ self.instances.borrow_mut().insert(instance, lldecl);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
new file mode 100644
index 0000000..a43724f
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -0,0 +1,285 @@
+pub use crate::llvm::Type;
+
+use crate::abi::{FnAbiLlvmExt, LlvmType};
+use crate::common;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::llvm::{Bool, False, True};
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use rustc_ast as ast;
+use rustc_codegen_ssa::common::TypeKind;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
+use rustc_target::abi::{AddressSpace, Align, Integer, Size};
+
+use std::fmt;
+use std::ptr;
+
+use libc::c_uint;
+
+impl PartialEq for Type {
+ fn eq(&self, other: &Self) -> bool {
+ ptr::eq(self, other)
+ }
+}
+
+impl fmt::Debug for Type {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(
+ &llvm::build_string(|s| unsafe {
+ llvm::LLVMRustWriteTypeToString(self, s);
+ })
+ .expect("non-UTF8 type description from LLVM"),
+ )
+ }
+}
+
+impl CodegenCx<'ll, 'tcx> {
+ crate fn type_named_struct(&self, name: &str) -> &'ll Type {
+ let name = SmallCStr::new(name);
+ unsafe { llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr()) }
+ }
+
+ crate fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) {
+ unsafe { llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed as Bool) }
+ }
+
+ crate fn type_void(&self) -> &'ll Type {
+ unsafe { llvm::LLVMVoidTypeInContext(self.llcx) }
+ }
+
+ crate fn type_metadata(&self) -> &'ll Type {
+ unsafe { llvm::LLVMRustMetadataTypeInContext(self.llcx) }
+ }
+
+ ///x Creates an integer type with the given number of bits, e.g., i24
+ crate fn type_ix(&self, num_bits: u64) -> &'ll Type {
+ unsafe { llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) }
+ }
+
+ crate fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type {
+ unsafe { llvm::LLVMVectorType(ty, len as c_uint) }
+ }
+
+ crate fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> {
+ unsafe {
+ let n_args = llvm::LLVMCountParamTypes(ty) as usize;
+ let mut args = Vec::with_capacity(n_args);
+ llvm::LLVMGetParamTypes(ty, args.as_mut_ptr());
+ args.set_len(n_args);
+ args
+ }
+ }
+
+ crate fn type_bool(&self) -> &'ll Type {
+ self.type_i8()
+ }
+
+ crate fn type_int_from_ty(&self, t: ast::IntTy) -> &'ll Type {
+ match t {
+ ast::IntTy::Isize => self.type_isize(),
+ ast::IntTy::I8 => self.type_i8(),
+ ast::IntTy::I16 => self.type_i16(),
+ ast::IntTy::I32 => self.type_i32(),
+ ast::IntTy::I64 => self.type_i64(),
+ ast::IntTy::I128 => self.type_i128(),
+ }
+ }
+
+ crate fn type_uint_from_ty(&self, t: ast::UintTy) -> &'ll Type {
+ match t {
+ ast::UintTy::Usize => self.type_isize(),
+ ast::UintTy::U8 => self.type_i8(),
+ ast::UintTy::U16 => self.type_i16(),
+ ast::UintTy::U32 => self.type_i32(),
+ ast::UintTy::U64 => self.type_i64(),
+ ast::UintTy::U128 => self.type_i128(),
+ }
+ }
+
+ crate fn type_float_from_ty(&self, t: ast::FloatTy) -> &'ll Type {
+ match t {
+ ast::FloatTy::F32 => self.type_f32(),
+ ast::FloatTy::F64 => self.type_f64(),
+ }
+ }
+
+ crate fn type_pointee_for_align(&self, align: Align) -> &'ll Type {
+ // FIXME(eddyb) We could find a better approximation if ity.align < align.
+ let ity = Integer::approximate_align(self, align);
+ self.type_from_integer(ity)
+ }
+
+ /// Return a LLVM type that has at most the required alignment,
+ /// and exactly the required size, as a best-effort padding array.
+ crate fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type {
+ let unit = Integer::approximate_align(self, align);
+ let size = size.bytes();
+ let unit_size = unit.size().bytes();
+ assert_eq!(size % unit_size, 0);
+ self.type_array(self.type_from_integer(unit), size / unit_size)
+ }
+
+ crate fn type_variadic_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type {
+ unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) }
+ }
+
+ crate fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type {
+ unsafe { llvm::LLVMRustArrayType(ty, len) }
+ }
+}
+
+impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn type_i1(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt1TypeInContext(self.llcx) }
+ }
+
+ fn type_i8(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt8TypeInContext(self.llcx) }
+ }
+
+ fn type_i16(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt16TypeInContext(self.llcx) }
+ }
+
+ fn type_i32(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt32TypeInContext(self.llcx) }
+ }
+
+ fn type_i64(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt64TypeInContext(self.llcx) }
+ }
+
+ fn type_i128(&self) -> &'ll Type {
+ unsafe { llvm::LLVMIntTypeInContext(self.llcx, 128) }
+ }
+
+ fn type_isize(&self) -> &'ll Type {
+ self.isize_ty
+ }
+
+ fn type_f32(&self) -> &'ll Type {
+ unsafe { llvm::LLVMFloatTypeInContext(self.llcx) }
+ }
+
+ fn type_f64(&self) -> &'ll Type {
+ unsafe { llvm::LLVMDoubleTypeInContext(self.llcx) }
+ }
+
+ fn type_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type {
+ unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, False) }
+ }
+
+ fn type_struct(&self, els: &[&'ll Type], packed: bool) -> &'ll Type {
+ unsafe {
+ llvm::LLVMStructTypeInContext(
+ self.llcx,
+ els.as_ptr(),
+ els.len() as c_uint,
+ packed as Bool,
+ )
+ }
+ }
+
+ fn type_kind(&self, ty: &'ll Type) -> TypeKind {
+ unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() }
+ }
+
+ fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
+ assert_ne!(
+ self.type_kind(ty),
+ TypeKind::Function,
+ "don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense"
+ );
+ ty.ptr_to(AddressSpace::DATA)
+ }
+
+ fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type {
+ ty.ptr_to(address_space)
+ }
+
+ fn element_type(&self, ty: &'ll Type) -> &'ll Type {
+ unsafe { llvm::LLVMGetElementType(ty) }
+ }
+
+ fn vector_length(&self, ty: &'ll Type) -> usize {
+ unsafe { llvm::LLVMGetVectorSize(ty) as usize }
+ }
+
+ fn float_width(&self, ty: &'ll Type) -> usize {
+ match self.type_kind(ty) {
+ TypeKind::Float => 32,
+ TypeKind::Double => 64,
+ TypeKind::X86_FP80 => 80,
+ TypeKind::FP128 | TypeKind::PPC_FP128 => 128,
+ _ => bug!("llvm_float_width called on a non-float type"),
+ }
+ }
+
+ fn int_width(&self, ty: &'ll Type) -> u64 {
+ unsafe { llvm::LLVMGetIntTypeWidth(ty) as u64 }
+ }
+
+ fn val_ty(&self, v: &'ll Value) -> &'ll Type {
+ common::val_ty(v)
+ }
+}
+
+impl Type {
+ pub fn i8_llcx(llcx: &llvm::Context) -> &Type {
+ unsafe { llvm::LLVMInt8TypeInContext(llcx) }
+ }
+
+ // Creates an integer type with the given number of bits, e.g., i24
+ pub fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type {
+ unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) }
+ }
+
+ pub fn i8p_llcx(llcx: &llvm::Context) -> &Type {
+ Type::i8_llcx(llcx).ptr_to(AddressSpace::DATA)
+ }
+
+ fn ptr_to(&self, address_space: AddressSpace) -> &Type {
+ unsafe { llvm::LLVMPointerType(&self, address_space.0) }
+ }
+}
+
+impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn backend_type(&self, layout: TyAndLayout<'tcx>) -> &'ll Type {
+ layout.llvm_type(self)
+ }
+ fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> &'ll Type {
+ layout.immediate_llvm_type(self)
+ }
+ fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool {
+ layout.is_llvm_immediate()
+ }
+ fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
+ layout.is_llvm_scalar_pair()
+ }
+ fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
+ layout.llvm_field_index(index)
+ }
+ fn scalar_pair_element_backend_type(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ index: usize,
+ immediate: bool,
+ ) -> &'ll Type {
+ layout.scalar_pair_element_llvm_type(self, index, immediate)
+ }
+ fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type {
+ ty.llvm_type(self)
+ }
+ fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
+ fn_abi.ptr_to_llvm_type(self)
+ }
+ fn reg_backend_type(&self, ty: &Reg) -> &'ll Type {
+ ty.llvm_type(self)
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
new file mode 100644
index 0000000..e0754d2
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -0,0 +1,379 @@
+use crate::abi::FnAbi;
+use crate::common::*;
+use crate::type_::Type;
+use rustc_codegen_ssa::traits::*;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::{FnAbiExt, TyAndLayout};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, Ty, TypeFoldable};
+use rustc_target::abi::{Abi, AddressSpace, Align, FieldsShape};
+use rustc_target::abi::{Int, Pointer, F32, F64};
+use rustc_target::abi::{LayoutOf, PointeeInfo, Scalar, Size, TyAndLayoutMethods, Variants};
+use tracing::debug;
+
+use std::fmt::Write;
+
+fn uncached_llvm_type<'a, 'tcx>(
+ cx: &CodegenCx<'a, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
+) -> &'a Type {
+ match layout.abi {
+ Abi::Scalar(_) => bug!("handled elsewhere"),
+ Abi::Vector { ref element, count } => {
+ let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
+ return cx.type_vector(element, count);
+ }
+ Abi::ScalarPair(..) => {
+ return cx.type_struct(
+ &[
+ layout.scalar_pair_element_llvm_type(cx, 0, false),
+ layout.scalar_pair_element_llvm_type(cx, 1, false),
+ ],
+ false,
+ );
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {}
+ }
+
+ let name = match layout.ty.kind() {
+ // FIXME(eddyb) producing readable type names for trait objects can result
+ // in problematically distinct types due to HRTB and subtyping (see #47638).
+ // ty::Dynamic(..) |
+ ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
+ if !cx.sess().fewer_names() =>
+ {
+ let mut name = with_no_trimmed_paths(|| layout.ty.to_string());
+ if let (&ty::Adt(def, _), &Variants::Single { index }) =
+ (layout.ty.kind(), &layout.variants)
+ {
+ if def.is_enum() && !def.variants.is_empty() {
+ write!(&mut name, "::{}", def.variants[index].ident).unwrap();
+ }
+ }
+ if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
+ (layout.ty.kind(), &layout.variants)
+ {
+ write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
+ }
+ Some(name)
+ }
+ ty::Adt(..) => {
+ // If `Some` is returned then a named struct is created in LLVM. Name collisions are
+ // avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that
+ // can improve perf.
+ Some(String::new())
+ }
+ _ => None,
+ };
+
+ match layout.fields {
+ FieldsShape::Primitive | FieldsShape::Union(_) => {
+ let fill = cx.type_padding_filler(layout.size, layout.align.abi);
+ let packed = false;
+ match name {
+ None => cx.type_struct(&[fill], packed),
+ Some(ref name) => {
+ let llty = cx.type_named_struct(name);
+ cx.set_struct_body(llty, &[fill], packed);
+ llty
+ }
+ }
+ }
+ FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count),
+ FieldsShape::Arbitrary { .. } => match name {
+ None => {
+ let (llfields, packed) = struct_llfields(cx, layout);
+ cx.type_struct(&llfields, packed)
+ }
+ Some(ref name) => {
+ let llty = cx.type_named_struct(name);
+ *defer = Some((llty, layout));
+ llty
+ }
+ },
+ }
+}
+
+fn struct_llfields<'a, 'tcx>(
+ cx: &CodegenCx<'a, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+) -> (Vec<&'a Type>, bool) {
+ debug!("struct_llfields: {:#?}", layout);
+ let field_count = layout.fields.count();
+
+ let mut packed = false;
+ let mut offset = Size::ZERO;
+ let mut prev_effective_align = layout.align.abi;
+ let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
+ for i in layout.fields.index_by_increasing_offset() {
+ let target_offset = layout.fields.offset(i as usize);
+ let field = layout.field(cx, i);
+ let effective_field_align =
+ layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
+ packed |= effective_field_align < field.align.abi;
+
+ debug!(
+ "struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
+ effective_field_align: {}",
+ i,
+ field,
+ offset,
+ target_offset,
+ effective_field_align.bytes()
+ );
+ assert!(target_offset >= offset);
+ let padding = target_offset - offset;
+ let padding_align = prev_effective_align.min(effective_field_align);
+ assert_eq!(offset.align_to(padding_align) + padding, target_offset);
+ result.push(cx.type_padding_filler(padding, padding_align));
+ debug!(" padding before: {:?}", padding);
+
+ result.push(field.llvm_type(cx));
+ offset = target_offset + field.size;
+ prev_effective_align = effective_field_align;
+ }
+ if !layout.is_unsized() && field_count > 0 {
+ if offset > layout.size {
+ bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
+ }
+ let padding = layout.size - offset;
+ let padding_align = prev_effective_align;
+ assert_eq!(offset.align_to(padding_align) + padding, layout.size);
+ debug!(
+ "struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
+ padding, offset, layout.size
+ );
+ result.push(cx.type_padding_filler(padding, padding_align));
+ assert_eq!(result.len(), 1 + field_count * 2);
+ } else {
+ debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
+ }
+
+ (result, packed)
+}
+
+impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
+ pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
+ self.layout_of(ty).align.abi
+ }
+
+ pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
+ self.layout_of(ty).size
+ }
+
+ pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
+ let layout = self.layout_of(ty);
+ (layout.size, layout.align.abi)
+ }
+}
+
+pub trait LayoutLlvmExt<'tcx> {
+ fn is_llvm_immediate(&self) -> bool;
+ fn is_llvm_scalar_pair(&self) -> bool;
+ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
+ fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
+ fn scalar_llvm_type_at<'a>(
+ &self,
+ cx: &CodegenCx<'a, 'tcx>,
+ scalar: &Scalar,
+ offset: Size,
+ ) -> &'a Type;
+ fn scalar_pair_element_llvm_type<'a>(
+ &self,
+ cx: &CodegenCx<'a, 'tcx>,
+ index: usize,
+ immediate: bool,
+ ) -> &'a Type;
+ fn llvm_field_index(&self, index: usize) -> u64;
+ fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo>;
+}
+
+impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
+ fn is_llvm_immediate(&self) -> bool {
+ match self.abi {
+ Abi::Scalar(_) | Abi::Vector { .. } => true,
+ Abi::ScalarPair(..) => false,
+ Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
+ }
+ }
+
+ fn is_llvm_scalar_pair(&self) -> bool {
+ match self.abi {
+ Abi::ScalarPair(..) => true,
+ Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+ }
+ }
+
+ /// Gets the LLVM type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
+ /// The pointee type of the pointer in `PlaceRef` is always this type.
+ /// For sized types, it is also the right LLVM type for an `alloca`
+ /// containing a value of that type, and most immediates (except `bool`).
+ /// Unsized types, however, are represented by a "minimal unit", e.g.
+ /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
+ /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
+ /// If the type is an unsized struct, the regular layout is generated,
+ /// with the inner-most trailing unsized field using the "minimal unit"
+ /// of that field's type - this is useful for taking the address of
+ /// that field and ensuring the struct has the right alignment.
+ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+ if let Abi::Scalar(ref scalar) = self.abi {
+ // Use a different cache for scalars because pointers to DSTs
+ // can be either fat or thin (data pointers of fat pointers).
+ if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
+ return llty;
+ }
+ let llty = match *self.ty.kind() {
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
+ cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
+ }
+ ty::Adt(def, _) if def.is_box() => {
+ cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
+ }
+ ty::FnPtr(sig) => cx.fn_ptr_backend_type(&FnAbi::of_fn_ptr(cx, sig, &[])),
+ _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO),
+ };
+ cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
+ return llty;
+ }
+
+ // Check the cache.
+ let variant_index = match self.variants {
+ Variants::Single { index } => Some(index),
+ _ => None,
+ };
+ if let Some(&llty) = cx.lltypes.borrow().get(&(self.ty, variant_index)) {
+ return llty;
+ }
+
+ debug!("llvm_type({:#?})", self);
+
+ assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
+
+ // Make sure lifetimes are erased, to avoid generating distinct LLVM
+ // types for Rust types that only differ in the choice of lifetimes.
+ let normal_ty = cx.tcx.erase_regions(&self.ty);
+
+ let mut defer = None;
+ let llty = if self.ty != normal_ty {
+ let mut layout = cx.layout_of(normal_ty);
+ if let Some(v) = variant_index {
+ layout = layout.for_variant(cx, v);
+ }
+ layout.llvm_type(cx)
+ } else {
+ uncached_llvm_type(cx, *self, &mut defer)
+ };
+ debug!("--> mapped {:#?} to llty={:?}", self, llty);
+
+ cx.lltypes.borrow_mut().insert((self.ty, variant_index), llty);
+
+ if let Some((llty, layout)) = defer {
+ let (llfields, packed) = struct_llfields(cx, layout);
+ cx.set_struct_body(llty, &llfields, packed)
+ }
+
+ llty
+ }
+
+ fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+ if let Abi::Scalar(ref scalar) = self.abi {
+ if scalar.is_bool() {
+ return cx.type_i1();
+ }
+ }
+ self.llvm_type(cx)
+ }
+
+ fn scalar_llvm_type_at<'a>(
+ &self,
+ cx: &CodegenCx<'a, 'tcx>,
+ scalar: &Scalar,
+ offset: Size,
+ ) -> &'a Type {
+ match scalar.value {
+ Int(i, _) => cx.type_from_integer(i),
+ F32 => cx.type_f32(),
+ F64 => cx.type_f64(),
+ Pointer => {
+ // If we know the alignment, pick something better than i8.
+ let (pointee, address_space) =
+ if let Some(pointee) = self.pointee_info_at(cx, offset) {
+ (cx.type_pointee_for_align(pointee.align), pointee.address_space)
+ } else {
+ (cx.type_i8(), AddressSpace::DATA)
+ };
+ cx.type_ptr_to_ext(pointee, address_space)
+ }
+ }
+ }
+
+ fn scalar_pair_element_llvm_type<'a>(
+ &self,
+ cx: &CodegenCx<'a, 'tcx>,
+ index: usize,
+ immediate: bool,
+ ) -> &'a Type {
+ // HACK(eddyb) special-case fat pointers until LLVM removes
+ // pointee types, to avoid bitcasting every `OperandRef::deref`.
+ match self.ty.kind() {
+ ty::Ref(..) | ty::RawPtr(_) => {
+ return self.field(cx, index).llvm_type(cx);
+ }
+ ty::Adt(def, _) if def.is_box() => {
+ let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
+ return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
+ }
+ _ => {}
+ }
+
+ let (a, b) = match self.abi {
+ Abi::ScalarPair(ref a, ref b) => (a, b),
+ _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
+ };
+ let scalar = [a, b][index];
+
+ // Make sure to return the same type `immediate_llvm_type` would when
+ // dealing with an immediate pair. This means that `(bool, bool)` is
+ // effectively represented as `{i8, i8}` in memory and two `i1`s as an
+ // immediate, just like `bool` is typically `i8` in memory and only `i1`
+ // when immediate. We need to load/store `bool` as `i8` to avoid
+ // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
+ if immediate && scalar.is_bool() {
+ return cx.type_i1();
+ }
+
+ let offset =
+ if index == 0 { Size::ZERO } else { a.value.size(cx).align_to(b.value.align(cx).abi) };
+ self.scalar_llvm_type_at(cx, scalar, offset)
+ }
+
+ fn llvm_field_index(&self, index: usize) -> u64 {
+ match self.abi {
+ Abi::Scalar(_) | Abi::ScalarPair(..) => {
+ bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
+ }
+ _ => {}
+ }
+ match self.fields {
+ FieldsShape::Primitive | FieldsShape::Union(_) => {
+ bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
+ }
+
+ FieldsShape::Array { .. } => index as u64,
+
+ FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2,
+ }
+ }
+
+ fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
+ if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
+ return pointee;
+ }
+
+ let result = Ty::pointee_info_at(*self, cx, offset);
+
+ cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
+ result
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
new file mode 100644
index 0000000..22ed4dd
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -0,0 +1,205 @@
+use crate::builder::Builder;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::{
+ common::IntPredicate,
+ traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods},
+};
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size};
+
+fn round_pointer_up_to_alignment(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ addr: &'ll Value,
+ align: Align,
+ ptr_ty: &'ll Type,
+) -> &'ll Value {
+ let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
+ ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
+ ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
+ bx.inttoptr(ptr_as_int, ptr_ty)
+}
+
+fn emit_direct_ptr_va_arg(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ list: OperandRef<'tcx, &'ll Value>,
+ llty: &'ll Type,
+ size: Size,
+ align: Align,
+ slot_size: Align,
+ allow_higher_align: bool,
+) -> (&'ll Value, Align) {
+ let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p());
+ let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
+ bx.bitcast(list.immediate(), va_list_ptr_ty)
+ } else {
+ list.immediate()
+ };
+
+ let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+
+ let (addr, addr_align) = if allow_higher_align && align > slot_size {
+ (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
+ } else {
+ (ptr, slot_size)
+ };
+
+ let aligned_size = size.align_to(slot_size).bytes() as i32;
+ let full_direct_size = bx.cx().const_i32(aligned_size);
+ let next = bx.inbounds_gep(addr, &[full_direct_size]);
+ bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+
+ if size.bytes() < slot_size.bytes() && &*bx.tcx().sess.target.target.target_endian == "big" {
+ let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
+ let adjusted = bx.inbounds_gep(addr, &[adjusted_size]);
+ (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
+ } else {
+ (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
+ }
+}
+
+fn emit_ptr_va_arg(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ list: OperandRef<'tcx, &'ll Value>,
+ target_ty: Ty<'tcx>,
+ indirect: bool,
+ slot_size: Align,
+ allow_higher_align: bool,
+) -> &'ll Value {
+ let layout = bx.cx.layout_of(target_ty);
+ let (llty, size, align) = if indirect {
+ (
+ bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx),
+ bx.cx.data_layout().pointer_size,
+ bx.cx.data_layout().pointer_align,
+ )
+ } else {
+ (layout.llvm_type(bx.cx), layout.size, layout.align)
+ };
+ let (addr, addr_align) =
+ emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
+ if indirect {
+ let tmp_ret = bx.load(addr, addr_align);
+ bx.load(tmp_ret, align.abi)
+ } else {
+ bx.load(addr, addr_align)
+ }
+}
+
+fn emit_aapcs_va_arg(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ list: OperandRef<'tcx, &'ll Value>,
+ target_ty: Ty<'tcx>,
+) -> &'ll Value {
+ // Implementation of the AAPCS64 calling convention for va_args see
+ // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
+ let va_list_addr = list.immediate();
+ let layout = bx.cx.layout_of(target_ty);
+
+ let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg");
+ let mut in_reg = bx.build_sibling_block("va_arg.in_reg");
+ let mut on_stack = bx.build_sibling_block("va_arg.on_stack");
+ let mut end = bx.build_sibling_block("va_arg.end");
+ let zero = bx.const_i32(0);
+ let offset_align = Align::from_bytes(4).unwrap();
+ assert!(&*bx.tcx().sess.target.target.target_endian == "little");
+
+ let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
+ let (reg_off, reg_top_index, slot_size) = if gr_type {
+ let gr_offs = bx.struct_gep(va_list_addr, 7);
+ let nreg = (layout.size.bytes() + 7) / 8;
+ (gr_offs, 3, nreg * 8)
+ } else {
+ let vr_off = bx.struct_gep(va_list_addr, 9);
+ let nreg = (layout.size.bytes() + 15) / 16;
+ (vr_off, 5, nreg * 16)
+ };
+
+ // if the offset >= 0 then the value will be on the stack
+ let mut reg_off_v = bx.load(reg_off, offset_align);
+ let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
+ bx.cond_br(use_stack, &on_stack.llbb(), &maybe_reg.llbb());
+
+ // The value at this point might be in a register, but there is a chance that
+ // it could be on the stack so we have to update the offset and then check
+ // the offset again.
+
+ if gr_type && layout.align.abi.bytes() > 8 {
+ reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(15));
+ reg_off_v = maybe_reg.and(reg_off_v, bx.const_i32(-16));
+ }
+ let new_reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(slot_size as i32));
+
+ maybe_reg.store(new_reg_off_v, reg_off, offset_align);
+
+ // Check to see if we have overflowed the registers as a result of this.
+ // If we have then we need to use the stack for this value
+ let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
+ maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb());
+
+ let top = in_reg.struct_gep(va_list_addr, reg_top_index);
+ let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi);
+
+ // reg_value = *(@top + reg_off_v);
+ let top = in_reg.gep(top, &[reg_off_v]);
+ let top = in_reg.bitcast(top, bx.cx.type_ptr_to(layout.llvm_type(bx)));
+ let reg_value = in_reg.load(top, layout.align.abi);
+ in_reg.br(&end.llbb());
+
+ // On Stack block
+ let stack_value =
+ emit_ptr_va_arg(&mut on_stack, list, target_ty, false, Align::from_bytes(8).unwrap(), true);
+ on_stack.br(&end.llbb());
+
+ let val = end.phi(
+ layout.immediate_llvm_type(bx),
+ &[reg_value, stack_value],
+ &[&in_reg.llbb(), &on_stack.llbb()],
+ );
+
+ *bx = end;
+ val
+}
+
+pub(super) fn emit_va_arg(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ addr: OperandRef<'tcx, &'ll Value>,
+ target_ty: Ty<'tcx>,
+) -> &'ll Value {
+ // Determine the va_arg implementation to use. The LLVM va_arg instruction
+ // is lacking in some instances, so we should only use it as a fallback.
+ let target = &bx.cx.tcx.sess.target.target;
+ let arch = &bx.cx.tcx.sess.target.target.arch;
+ match (&**arch, target.options.is_like_windows) {
+ // Windows x86
+ ("x86", true) => {
+ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
+ }
+ // Generic x86
+ ("x86", _) => {
+ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true)
+ }
+ // Windows AArch64
+ ("aarch64", true) => {
+ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
+ }
+ // iOS AArch64
+ ("aarch64", _) if target.target_os == "ios" => {
+ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
+ }
+ ("aarch64", _) => emit_aapcs_va_arg(bx, addr, target_ty),
+ // Windows x86_64
+ ("x86_64", true) => {
+ let target_ty_size = bx.cx.size_of(target_ty).bytes();
+ let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
+ emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
+ }
+ // For all other architecture/OS combinations fall back to using
+ // the LLVM va_arg instruction.
+ // https://llvm.org/docs/LangRef.html#va-arg-instruction
+ _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).llvm_type(bx.cx)),
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/value.rs b/compiler/rustc_codegen_llvm/src/value.rs
new file mode 100644
index 0000000..1338a22
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/value.rs
@@ -0,0 +1,32 @@
+pub use crate::llvm::Value;
+
+use crate::llvm;
+
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::ptr;
+
+impl PartialEq for Value {
+ fn eq(&self, other: &Self) -> bool {
+ ptr::eq(self, other)
+ }
+}
+
+impl Eq for Value {}
+
+impl Hash for Value {
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ (self as *const Self).hash(hasher);
+ }
+}
+
+impl fmt::Debug for Value {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(
+ &llvm::build_string(|s| unsafe {
+ llvm::LLVMRustWriteValueToString(self, s);
+ })
+ .expect("non-UTF8 value description from LLVM"),
+ )
+ }
+}