Importing rustc-1.52.1
Change-Id: I3598a97301b4b2e71385e5a519f6d2ad946548b6
diff --git a/compiler/rustc/src/main.rs b/compiler/rustc/src/main.rs
index 6bc5aa6..8590289 100644
--- a/compiler/rustc/src/main.rs
+++ b/compiler/rustc/src/main.rs
@@ -4,7 +4,7 @@
// Note that we're pulling in a static copy of jemalloc which means that to
// pull it in we need to actually reference its symbols for it to get
// linked. The two crates we link to here, std and rustc_driver, are both
- // dynamic libraries. That means to pull in jemalloc we need to actually
+ // dynamic libraries. That means to pull in jemalloc we actually need to
// reference allocation symbols one way or another (as this file is the only
// object code in the rustc executable).
#[cfg(feature = "jemalloc-sys")]
@@ -24,6 +24,20 @@
static _F5: unsafe extern "C" fn(*mut c_void, usize) -> *mut c_void = jemalloc_sys::realloc;
#[used]
static _F6: unsafe extern "C" fn(*mut c_void) = jemalloc_sys::free;
+
+ // On OSX, jemalloc doesn't directly override malloc/free, but instead
+ // registers itself with the allocator's zone APIs in a ctor. However,
+ // the linker doesn't seem to consider ctors as "used" when statically
+ // linking, so we need to explicitly depend on the function.
+ #[cfg(target_os = "macos")]
+ {
+ extern "C" {
+ fn _rjem_je_zone_register();
+ }
+
+ #[used]
+ static _F7: unsafe extern "C" fn() = _rjem_je_zone_register;
+ }
}
rustc_driver::set_sigpipe_handler();
diff --git a/compiler/rustc_apfloat/Cargo.toml b/compiler/rustc_apfloat/Cargo.toml
index 306513f..103e64b 100644
--- a/compiler/rustc_apfloat/Cargo.toml
+++ b/compiler/rustc_apfloat/Cargo.toml
@@ -6,4 +6,4 @@
[dependencies]
bitflags = "1.2.1"
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_arena/Cargo.toml b/compiler/rustc_arena/Cargo.toml
index 29caa85..5d4d475 100644
--- a/compiler/rustc_arena/Cargo.toml
+++ b/compiler/rustc_arena/Cargo.toml
@@ -5,4 +5,5 @@
edition = "2018"
[dependencies]
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+rustc_data_structures = { path = "../rustc_data_structures" }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs
index 651f4c6..c3198fb 100644
--- a/compiler/rustc_arena/src/lib.rs
+++ b/compiler/rustc_arena/src/lib.rs
@@ -14,10 +14,10 @@
#![feature(dropck_eyepatch)]
#![feature(new_uninit)]
#![feature(maybe_uninit_slice)]
-#![cfg_attr(bootstrap, feature(min_const_generics))]
#![feature(min_specialization)]
#![cfg_attr(test, feature(test))]
+use rustc_data_structures::sync;
use smallvec::SmallVec;
use std::alloc::Layout;
@@ -298,22 +298,6 @@
}
}
- /// Clears the arena. Deallocates all but the longest chunk which may be reused.
- pub fn clear(&mut self) {
- unsafe {
- // Clear the last chunk, which is partially filled.
- let mut chunks_borrow = self.chunks.borrow_mut();
- if let Some(mut last_chunk) = chunks_borrow.last_mut() {
- self.clear_last_chunk(&mut last_chunk);
- let len = chunks_borrow.len();
- // If `T` is ZST, code below has no effect.
- for mut chunk in chunks_borrow.drain(..len - 1) {
- chunk.destroy(chunk.entries);
- }
- }
- }
- }
-
// Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
// chunks.
fn clear_last_chunk(&self, last_chunk: &mut TypedArenaChunk<T>) {
@@ -557,8 +541,19 @@
obj: *mut u8,
}
-unsafe fn drop_for_type<T>(to_drop: *mut u8) {
- std::ptr::drop_in_place(to_drop as *mut T)
+// SAFETY: we require `T: Send` before type-erasing into `DropType`.
+#[cfg(parallel_compiler)]
+unsafe impl sync::Send for DropType {}
+
+impl DropType {
+ #[inline]
+ unsafe fn new<T: sync::Send>(obj: *mut T) -> Self {
+ unsafe fn drop_for_type<T>(to_drop: *mut u8) {
+ std::ptr::drop_in_place(to_drop as *mut T)
+ }
+
+ DropType { drop_fn: drop_for_type::<T>, obj: obj as *mut u8 }
+ }
}
impl Drop for DropType {
@@ -568,10 +563,13 @@
}
/// An arena which can be used to allocate any type.
+///
+/// # Safety
+///
/// Allocating in this arena is unsafe since the type system
/// doesn't know which types it contains. In order to
-/// allocate safely, you must store a PhantomData<T>
-/// alongside this arena for each type T you allocate.
+/// allocate safely, you must store a `PhantomData<T>`
+/// alongside this arena for each type `T` you allocate.
#[derive(Default)]
pub struct DropArena {
/// A list of destructors to run when the arena drops.
@@ -583,21 +581,26 @@
impl DropArena {
#[inline]
- pub unsafe fn alloc<T>(&self, object: T) -> &mut T {
+ pub unsafe fn alloc<T>(&self, object: T) -> &mut T
+ where
+ T: sync::Send,
+ {
let mem = self.arena.alloc_raw(Layout::new::<T>()) as *mut T;
// Write into uninitialized memory.
ptr::write(mem, object);
let result = &mut *mem;
// Record the destructor after doing the allocation as that may panic
- // and would cause `object`'s destructor to run twice if it was recorded before
- self.destructors
- .borrow_mut()
- .push(DropType { drop_fn: drop_for_type::<T>, obj: result as *mut T as *mut u8 });
+ // and would cause `object`'s destructor to run twice if it was recorded before.
+ self.destructors.borrow_mut().push(DropType::new(result));
result
}
#[inline]
- pub unsafe fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
+ pub unsafe fn alloc_from_iter<T, I>(&self, iter: I) -> &mut [T]
+ where
+ T: sync::Send,
+ I: IntoIterator<Item = T>,
+ {
let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
if vec.is_empty() {
return &mut [];
@@ -607,19 +610,18 @@
let start_ptr = self.arena.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
let mut destructors = self.destructors.borrow_mut();
- // Reserve space for the destructors so we can't panic while adding them
+ // Reserve space for the destructors so we can't panic while adding them.
destructors.reserve(len);
// Move the content to the arena by copying it and then forgetting
- // the content of the SmallVec
+ // the content of the SmallVec.
vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
mem::forget(vec.drain(..));
// Record the destructors after doing the allocation as that may panic
- // and would cause `object`'s destructor to run twice if it was recorded before
+ // and would cause `object`'s destructor to run twice if it was recorded before.
for i in 0..len {
- destructors
- .push(DropType { drop_fn: drop_for_type::<T>, obj: start_ptr.add(i) as *mut u8 });
+ destructors.push(DropType::new(start_ptr.add(i)));
}
slice::from_raw_parts_mut(start_ptr, len)
diff --git a/compiler/rustc_arena/src/tests.rs b/compiler/rustc_arena/src/tests.rs
index e8a1f2d..911e577 100644
--- a/compiler/rustc_arena/src/tests.rs
+++ b/compiler/rustc_arena/src/tests.rs
@@ -11,6 +11,24 @@
z: i32,
}
+impl<T> TypedArena<T> {
+ /// Clears the arena. Deallocates all but the longest chunk which may be reused.
+ fn clear(&mut self) {
+ unsafe {
+ // Clear the last chunk, which is partially filled.
+ let mut chunks_borrow = self.chunks.borrow_mut();
+ if let Some(mut last_chunk) = chunks_borrow.last_mut() {
+ self.clear_last_chunk(&mut last_chunk);
+ let len = chunks_borrow.len();
+ // If `T` is ZST, code below has no effect.
+ for mut chunk in chunks_borrow.drain(..len - 1) {
+ chunk.destroy(chunk.entries);
+ }
+ }
+ }
+ }
+}
+
#[test]
pub fn test_unused() {
let arena: TypedArena<Point> = TypedArena::default();
diff --git a/compiler/rustc_ast/Cargo.toml b/compiler/rustc_ast/Cargo.toml
index 13e17a8..6b9b9e8 100644
--- a/compiler/rustc_ast/Cargo.toml
+++ b/compiler/rustc_ast/Cargo.toml
@@ -15,5 +15,5 @@
rustc_index = { path = "../rustc_index" }
rustc_lexer = { path = "../rustc_lexer" }
rustc_macros = { path = "../rustc_macros" }
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
bitflags = "1.2.1"
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs
index 2ddcb9e..7e82d7f 100644
--- a/compiler/rustc_ast/src/ast.rs
+++ b/compiler/rustc_ast/src/ast.rs
@@ -149,9 +149,17 @@
pub fn from_ident(ident: Ident) -> Self {
PathSegment { ident, id: DUMMY_NODE_ID, args: None }
}
+
pub fn path_root(span: Span) -> Self {
PathSegment::from_ident(Ident::new(kw::PathRoot, span))
}
+
+ pub fn span(&self) -> Span {
+ match &self.args {
+ Some(args) => self.ident.span.to(args.span()),
+ None => self.ident.span,
+ }
+ }
}
/// The arguments of a path segment.
@@ -486,8 +494,8 @@
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Crate {
- pub module: Mod,
pub attrs: Vec<Attribute>,
+ pub items: Vec<P<Item>>,
pub span: Span,
/// The order of items in the HIR is unrelated to the order of
/// items in the AST. However, we generate proc macro harnesses
@@ -647,7 +655,7 @@
/// are treated the same as `x: x, y: ref y, z: ref mut z`,
/// except when `is_shorthand` is true.
#[derive(Clone, Encodable, Decodable, Debug)]
-pub struct FieldPat {
+pub struct PatField {
/// The identifier for the field.
pub ident: Ident,
/// The pattern the field is destructured to.
@@ -692,7 +700,7 @@
/// A struct or struct variant pattern (e.g., `Variant {x, y, ..}`).
/// The `bool` is `true` in the presence of a `..`.
- Struct(Path, Vec<FieldPat>, /* recovered */ bool),
+ Struct(Path, Vec<PatField>, /* recovered */ bool),
/// A tuple struct/variant pattern (`Variant(x, y, .., z)`).
TupleStruct(Path, Vec<P<Pat>>),
@@ -915,16 +923,6 @@
}
}
- pub fn tokens_mut(&mut self) -> Option<&mut LazyTokenStream> {
- match self.kind {
- StmtKind::Local(ref mut local) => local.tokens.as_mut(),
- StmtKind::Item(ref mut item) => item.tokens.as_mut(),
- StmtKind::Expr(ref mut expr) | StmtKind::Semi(ref mut expr) => expr.tokens.as_mut(),
- StmtKind::Empty => None,
- StmtKind::MacCall(ref mut mac) => mac.tokens.as_mut(),
- }
- }
-
pub fn has_trailing_semicolon(&self) -> bool {
match &self.kind {
StmtKind::Semi(_) => true,
@@ -1037,9 +1035,9 @@
pub is_placeholder: bool,
}
-/// Access of a named (e.g., `obj.foo`) or unnamed (e.g., `obj.0`) struct field.
+/// A single field in a struct expression, e.g. `x: value` and `y` in `Foo { x: value, y }`.
#[derive(Clone, Encodable, Decodable, Debug)]
-pub struct Field {
+pub struct ExprField {
pub attrs: AttrVec,
pub id: NodeId,
pub span: Span,
@@ -1083,8 +1081,8 @@
}
// `Expr` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
-rustc_data_structures::static_assert_size!(Expr, 120);
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(Expr, 104);
impl Expr {
/// Returns `true` if this expression would be valid somewhere that expects a value;
@@ -1139,6 +1137,14 @@
}
}
+ pub fn peel_parens(&self) -> &Expr {
+ let mut expr = self;
+ while let ExprKind::Paren(inner) = &expr.kind {
+ expr = &inner;
+ }
+ expr
+ }
+
/// Attempts to reparse as `Ty` (for diagnostic purposes).
pub fn to_ty(&self) -> Option<P<Ty>> {
let kind = match &self.kind {
@@ -1247,6 +1253,13 @@
}
#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct StructExpr {
+ pub path: Path,
+ pub fields: Vec<ExprField>,
+ pub rest: StructRest,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
pub enum ExprKind {
/// A `box x` expression.
Box(P<Expr>),
@@ -1371,7 +1384,7 @@
/// A struct literal expression.
///
/// E.g., `Foo {x: 1, y: 2}`, or `Foo {x: 1, .. rest}`.
- Struct(Path, Vec<Field>, StructRest),
+ Struct(P<StructExpr>),
/// An array literal constructed from one repeated element.
///
@@ -1951,7 +1964,7 @@
}
/// Syntax used to declare a trait object.
-#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug)]
+#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum TraitObjectSyntax {
Dyn,
None,
@@ -1979,7 +1992,7 @@
}
}
-#[derive(Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
+#[derive(Clone, PartialEq, PartialOrd, Encodable, Decodable, Debug, Hash, HashStable_Generic)]
pub enum InlineAsmTemplatePiece {
String(String),
Placeholder { operand_idx: usize, modifier: Option<char>, span: Span },
@@ -2067,7 +2080,7 @@
/// Inline assembly dialect.
///
/// E.g., `"intel"` as in `llvm_asm!("mov eax, 2" : "={eax}"(result) : : : "intel")`.
-#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy, HashStable_Generic)]
+#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy, Hash, HashStable_Generic)]
pub enum LlvmAsmDialect {
Att,
Intel,
@@ -2299,21 +2312,22 @@
}
}
-/// Module declaration.
-///
-/// E.g., `mod foo;` or `mod foo { .. }`.
+#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug)]
+pub enum Inline {
+ Yes,
+ No,
+}
+
+/// Module item kind.
#[derive(Clone, Encodable, Decodable, Debug)]
-pub struct Mod {
- /// A span from the first token past `{` to the last token until `}`.
- /// For `mod foo;`, the inner span ranges from the first token
- /// to the last token in the external file.
- pub inner: Span,
- /// `unsafe` keyword accepted syntactically for macro DSLs, but not
- /// semantically by Rust.
- pub unsafety: Unsafe,
- pub items: Vec<P<Item>>,
- /// `true` for `mod foo { .. }`; `false` for `mod foo;`.
- pub inline: bool,
+pub enum ModKind {
+ /// Module with inlined definition `mod foo { ... }`,
+ /// or with definition outlined to a separate file `mod foo;` and already loaded from it.
+ /// The inner span is from the first token past `{` to the last token until `}`,
+ /// or from the first to the last token in the loaded file.
+ Loaded(Vec<P<Item>>, Inline, Span),
+ /// Module with definition outlined to a separate file `mod foo;` but not yet loaded from it.
+ Unloaded,
}
/// Foreign module declaration.
@@ -2520,11 +2534,11 @@
}
}
-/// Field of a struct.
+/// Field definition in a struct, variant or union.
///
/// E.g., `bar: usize` as in `struct Foo { bar: usize }`.
#[derive(Clone, Encodable, Decodable, Debug)]
-pub struct StructField {
+pub struct FieldDef {
pub attrs: Vec<Attribute>,
pub id: NodeId,
pub span: Span,
@@ -2541,11 +2555,11 @@
/// Struct variant.
///
/// E.g., `Bar { .. }` as in `enum Foo { Bar { .. } }`.
- Struct(Vec<StructField>, bool),
+ Struct(Vec<FieldDef>, bool),
/// Tuple variant.
///
/// E.g., `Bar(..)` as in `enum Foo { Bar(..) }`.
- Tuple(Vec<StructField>, NodeId),
+ Tuple(Vec<FieldDef>, NodeId),
/// Unit variant.
///
/// E.g., `Bar = ..` as in `enum Foo { Bar = .. }`.
@@ -2554,7 +2568,7 @@
impl VariantData {
/// Return the fields of this variant.
- pub fn fields(&self) -> &[StructField] {
+ pub fn fields(&self) -> &[FieldDef] {
match *self {
VariantData::Struct(ref fields, ..) | VariantData::Tuple(ref fields, _) => fields,
_ => &[],
@@ -2694,7 +2708,7 @@
/// A use declaration item (`use`).
///
/// E.g., `use foo;`, `use foo::bar;` or `use foo::bar as FooBar;`.
- Use(P<UseTree>),
+ Use(UseTree),
/// A static item (`static`).
///
/// E.g., `static FOO: i32 = 42;` or `static FOO: &'static str = "bar";`.
@@ -2710,13 +2724,15 @@
/// A module declaration (`mod`).
///
/// E.g., `mod foo;` or `mod foo { .. }`.
- Mod(Mod),
+ /// `unsafe` keyword on modules is accepted syntactically for macro DSLs, but not
+ /// semantically by Rust.
+ Mod(Unsafe, ModKind),
/// An external module (`extern`).
///
/// E.g., `extern {}` or `extern "C" {}`.
ForeignMod(ForeignMod),
/// Module-level inline assembly (from `global_asm!()`).
- GlobalAsm(P<GlobalAsm>),
+ GlobalAsm(GlobalAsm),
/// A type alias (`type`).
///
/// E.g., `type Foo = Bar<u8>;`.
@@ -2754,7 +2770,7 @@
MacroDef(MacroDef),
}
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(ItemKind, 112);
impl ItemKind {
@@ -2828,7 +2844,7 @@
MacCall(MacCall),
}
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(AssocItemKind, 72);
impl AssocItemKind {
@@ -2880,7 +2896,7 @@
MacCall(MacCall),
}
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(ForeignItemKind, 72);
impl From<ForeignItemKind> for ItemKind {
@@ -2909,69 +2925,3 @@
}
pub type ForeignItem = Item<ForeignItemKind>;
-
-pub trait HasTokens {
- /// Called by `Parser::collect_tokens` to store the collected
- /// tokens inside an AST node
- fn finalize_tokens(&mut self, tokens: LazyTokenStream);
-}
-
-impl<T: HasTokens + 'static> HasTokens for P<T> {
- fn finalize_tokens(&mut self, tokens: LazyTokenStream) {
- (**self).finalize_tokens(tokens);
- }
-}
-
-impl<T: HasTokens> HasTokens for Option<T> {
- fn finalize_tokens(&mut self, tokens: LazyTokenStream) {
- if let Some(inner) = self {
- inner.finalize_tokens(tokens);
- }
- }
-}
-
-impl HasTokens for Attribute {
- fn finalize_tokens(&mut self, tokens: LazyTokenStream) {
- match &mut self.kind {
- AttrKind::Normal(_, attr_tokens) => {
- if attr_tokens.is_none() {
- *attr_tokens = Some(tokens);
- }
- }
- AttrKind::DocComment(..) => {
- panic!("Called finalize_tokens on doc comment attr {:?}", self)
- }
- }
- }
-}
-
-impl HasTokens for Stmt {
- fn finalize_tokens(&mut self, tokens: LazyTokenStream) {
- let stmt_tokens = match self.kind {
- StmtKind::Local(ref mut local) => &mut local.tokens,
- StmtKind::Item(ref mut item) => &mut item.tokens,
- StmtKind::Expr(ref mut expr) | StmtKind::Semi(ref mut expr) => &mut expr.tokens,
- StmtKind::Empty => return,
- StmtKind::MacCall(ref mut mac) => &mut mac.tokens,
- };
- if stmt_tokens.is_none() {
- *stmt_tokens = Some(tokens);
- }
- }
-}
-
-macro_rules! derive_has_tokens {
- ($($ty:path),*) => { $(
- impl HasTokens for $ty {
- fn finalize_tokens(&mut self, tokens: LazyTokenStream) {
- if self.tokens.is_none() {
- self.tokens = Some(tokens);
- }
- }
- }
- )* }
-}
-
-derive_has_tokens! {
- Item, Expr, Ty, AttrItem, Visibility, Path, Block, Pat
-}
diff --git a/compiler/rustc_ast/src/ast_like.rs b/compiler/rustc_ast/src/ast_like.rs
new file mode 100644
index 0000000..63bc7c4
--- /dev/null
+++ b/compiler/rustc_ast/src/ast_like.rs
@@ -0,0 +1,198 @@
+use super::ptr::P;
+use super::tokenstream::LazyTokenStream;
+use super::{Arm, ExprField, FieldDef, GenericParam, Param, PatField, Variant};
+use super::{AssocItem, Expr, ForeignItem, Item, Local};
+use super::{AttrItem, AttrKind, Block, Pat, Path, Ty, Visibility};
+use super::{AttrVec, Attribute, Stmt, StmtKind};
+
+/// An `AstLike` represents an AST node (or some wrapper around
+/// and AST node) which stores some combination of attributes
+/// and tokens.
+pub trait AstLike: Sized {
+ fn attrs(&self) -> &[Attribute];
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>));
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>>;
+}
+
+impl<T: AstLike + 'static> AstLike for P<T> {
+ fn attrs(&self) -> &[Attribute] {
+ (**self).attrs()
+ }
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ (**self).visit_attrs(f);
+ }
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ (**self).tokens_mut()
+ }
+}
+
+fn visit_attrvec(attrs: &mut AttrVec, f: impl FnOnce(&mut Vec<Attribute>)) {
+ crate::mut_visit::visit_clobber(attrs, |attrs| {
+ let mut vec = attrs.into();
+ f(&mut vec);
+ vec.into()
+ });
+}
+
+impl AstLike for StmtKind {
+ fn attrs(&self) -> &[Attribute] {
+ match self {
+ StmtKind::Local(local) => local.attrs(),
+ StmtKind::Expr(expr) | StmtKind::Semi(expr) => expr.attrs(),
+ StmtKind::Item(item) => item.attrs(),
+ StmtKind::Empty => &[],
+ StmtKind::MacCall(mac) => &mac.attrs,
+ }
+ }
+
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ match self {
+ StmtKind::Local(local) => local.visit_attrs(f),
+ StmtKind::Expr(expr) | StmtKind::Semi(expr) => expr.visit_attrs(f),
+ StmtKind::Item(item) => item.visit_attrs(f),
+ StmtKind::Empty => {}
+ StmtKind::MacCall(mac) => visit_attrvec(&mut mac.attrs, f),
+ }
+ }
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ Some(match self {
+ StmtKind::Local(local) => &mut local.tokens,
+ StmtKind::Item(item) => &mut item.tokens,
+ StmtKind::Expr(expr) | StmtKind::Semi(expr) => &mut expr.tokens,
+ StmtKind::Empty => return None,
+ StmtKind::MacCall(mac) => &mut mac.tokens,
+ })
+ }
+}
+
+impl AstLike for Stmt {
+ fn attrs(&self) -> &[Attribute] {
+ self.kind.attrs()
+ }
+
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ self.kind.visit_attrs(f);
+ }
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ self.kind.tokens_mut()
+ }
+}
+
+impl AstLike for Attribute {
+ fn attrs(&self) -> &[Attribute] {
+ &[]
+ }
+ fn visit_attrs(&mut self, _f: impl FnOnce(&mut Vec<Attribute>)) {}
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ Some(match &mut self.kind {
+ AttrKind::Normal(_, tokens) => tokens,
+ kind @ AttrKind::DocComment(..) => {
+ panic!("Called tokens_mut on doc comment attr {:?}", kind)
+ }
+ })
+ }
+}
+
+impl<T: AstLike> AstLike for Option<T> {
+ fn attrs(&self) -> &[Attribute] {
+ self.as_ref().map(|inner| inner.attrs()).unwrap_or(&[])
+ }
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ if let Some(inner) = self.as_mut() {
+ inner.visit_attrs(f);
+ }
+ }
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ self.as_mut().and_then(|inner| inner.tokens_mut())
+ }
+}
+
+/// Helper trait for the macros below. Abstracts over
+/// the two types of attribute fields that AST nodes
+/// may have (`Vec<Attribute>` or `AttrVec`)
+trait VecOrAttrVec {
+ fn visit(&mut self, f: impl FnOnce(&mut Vec<Attribute>));
+}
+
+impl VecOrAttrVec for Vec<Attribute> {
+ fn visit(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ f(self)
+ }
+}
+
+impl VecOrAttrVec for AttrVec {
+ fn visit(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ visit_attrvec(self, f)
+ }
+}
+
+macro_rules! derive_has_tokens_and_attrs {
+ ($($ty:path),*) => { $(
+ impl AstLike for $ty {
+ fn attrs(&self) -> &[Attribute] {
+ &self.attrs
+ }
+
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ VecOrAttrVec::visit(&mut self.attrs, f)
+ }
+
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ Some(&mut self.tokens)
+ }
+ }
+ )* }
+}
+
+macro_rules! derive_has_attrs_no_tokens {
+ ($($ty:path),*) => { $(
+ impl AstLike for $ty {
+ fn attrs(&self) -> &[Attribute] {
+ &self.attrs
+ }
+
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ VecOrAttrVec::visit(&mut self.attrs, f)
+ }
+
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ None
+ }
+ }
+ )* }
+}
+
+macro_rules! derive_has_tokens_no_attrs {
+ ($($ty:path),*) => { $(
+ impl AstLike for $ty {
+ fn attrs(&self) -> &[Attribute] {
+ &[]
+ }
+
+ fn visit_attrs(&mut self, _f: impl FnOnce(&mut Vec<Attribute>)) {}
+
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ Some(&mut self.tokens)
+ }
+ }
+ )* }
+}
+
+// These AST nodes support both inert and active
+// attributes, so they also have tokens.
+derive_has_tokens_and_attrs! {
+ Item, Expr, Local, AssocItem, ForeignItem
+}
+
+// These ast nodes only support inert attributes, so they don't
+// store tokens (since nothing can observe them)
+derive_has_attrs_no_tokens! {
+ FieldDef, Arm, ExprField, PatField, Variant, Param, GenericParam
+}
+
+// These AST nodes don't support attributes, but can
+// be captured by a `macro_rules!` matcher. Therefore,
+// they need to store tokens.
+derive_has_tokens_no_attrs! {
+ Ty, Block, AttrItem, Pat, Path, Visibility
+}
diff --git a/compiler/rustc_ast/src/attr/mod.rs b/compiler/rustc_ast/src/attr/mod.rs
index 4dcbe48..2c5e180 100644
--- a/compiler/rustc_ast/src/attr/mod.rs
+++ b/compiler/rustc_ast/src/attr/mod.rs
@@ -1,17 +1,15 @@
//! Functions dealing with attributes and meta items.
use crate::ast;
-use crate::ast::{AttrId, AttrItem, AttrKind, AttrStyle, AttrVec, Attribute};
-use crate::ast::{Expr, GenericParam, Item, Lit, LitKind, Local, Stmt, StmtKind};
+use crate::ast::{AttrId, AttrItem, AttrKind, AttrStyle, Attribute};
+use crate::ast::{Lit, LitKind};
use crate::ast::{MacArgs, MacDelimiter, MetaItem, MetaItemKind, NestedMetaItem};
use crate::ast::{Path, PathSegment};
-use crate::mut_visit::visit_clobber;
-use crate::ptr::P;
use crate::token::{self, CommentKind, Token};
use crate::tokenstream::{DelimSpan, LazyTokenStream, TokenStream, TokenTree, TreeAndSpacing};
use rustc_index::bit_set::GrowableBitSet;
-use rustc_span::source_map::{BytePos, Spanned};
+use rustc_span::source_map::BytePos;
use rustc_span::symbol::{sym, Ident, Symbol};
use rustc_span::Span;
@@ -35,10 +33,6 @@
}
}
-pub fn is_known_lint_tool(m_item: Ident) -> bool {
- [sym::clippy, sym::rustc].contains(&m_item.name)
-}
-
impl NestedMetaItem {
/// Returns the `MetaItem` if `self` is a `NestedMetaItem::MetaItem`.
pub fn meta_item(&self) -> Option<&MetaItem> {
@@ -122,6 +116,7 @@
}
impl Attribute {
+ #[inline]
pub fn has_name(&self, name: Symbol) -> bool {
match self.kind {
AttrKind::Normal(ref item, _) => item.path == name,
@@ -617,101 +612,3 @@
MetaItem::from_tokens(tokens).map(NestedMetaItem::MetaItem)
}
}
-
-pub trait HasAttrs: Sized {
- fn attrs(&self) -> &[Attribute];
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>));
-}
-
-impl<T: HasAttrs> HasAttrs for Spanned<T> {
- fn attrs(&self) -> &[Attribute] {
- self.node.attrs()
- }
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
- self.node.visit_attrs(f);
- }
-}
-
-impl HasAttrs for Vec<Attribute> {
- fn attrs(&self) -> &[Attribute] {
- self
- }
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
- f(self)
- }
-}
-
-impl HasAttrs for AttrVec {
- fn attrs(&self) -> &[Attribute] {
- self
- }
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
- visit_clobber(self, |this| {
- let mut vec = this.into();
- f(&mut vec);
- vec.into()
- });
- }
-}
-
-impl<T: HasAttrs + 'static> HasAttrs for P<T> {
- fn attrs(&self) -> &[Attribute] {
- (**self).attrs()
- }
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
- (**self).visit_attrs(f);
- }
-}
-
-impl HasAttrs for StmtKind {
- fn attrs(&self) -> &[Attribute] {
- match *self {
- StmtKind::Local(ref local) => local.attrs(),
- StmtKind::Expr(ref expr) | StmtKind::Semi(ref expr) => expr.attrs(),
- StmtKind::Item(ref item) => item.attrs(),
- StmtKind::Empty => &[],
- StmtKind::MacCall(ref mac) => mac.attrs.attrs(),
- }
- }
-
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
- match self {
- StmtKind::Local(local) => local.visit_attrs(f),
- StmtKind::Expr(expr) | StmtKind::Semi(expr) => expr.visit_attrs(f),
- StmtKind::Item(item) => item.visit_attrs(f),
- StmtKind::Empty => {}
- StmtKind::MacCall(mac) => {
- mac.attrs.visit_attrs(f);
- }
- }
- }
-}
-
-impl HasAttrs for Stmt {
- fn attrs(&self) -> &[ast::Attribute] {
- self.kind.attrs()
- }
-
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
- self.kind.visit_attrs(f);
- }
-}
-
-macro_rules! derive_has_attrs {
- ($($ty:path),*) => { $(
- impl HasAttrs for $ty {
- fn attrs(&self) -> &[Attribute] {
- &self.attrs
- }
-
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
- self.attrs.visit_attrs(f);
- }
- }
- )* }
-}
-
-derive_has_attrs! {
- Item, Expr, Local, ast::AssocItem, ast::ForeignItem, ast::StructField, ast::Arm,
- ast::Field, ast::FieldPat, ast::Variant, ast::Param, GenericParam
-}
diff --git a/compiler/rustc_ast/src/lib.rs b/compiler/rustc_ast/src/lib.rs
index ddf52ca..03ec4b8 100644
--- a/compiler/rustc_ast/src/lib.rs
+++ b/compiler/rustc_ast/src/lib.rs
@@ -40,8 +40,8 @@
}
pub mod ast;
+pub mod ast_like;
pub mod attr;
-pub mod crate_disambiguator;
pub mod entry;
pub mod expand;
pub mod mut_visit;
@@ -52,6 +52,7 @@
pub mod visit;
pub use self::ast::*;
+pub use self::ast_like::AstLike;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs
index 024d968..f426f2c 100644
--- a/compiler/rustc_ast/src/mut_visit.rs
+++ b/compiler/rustc_ast/src/mut_visit.rs
@@ -102,8 +102,8 @@
noop_visit_fn_header(header, self);
}
- fn flat_map_struct_field(&mut self, sf: StructField) -> SmallVec<[StructField; 1]> {
- noop_flat_map_struct_field(sf, self)
+ fn flat_map_field_def(&mut self, fd: FieldDef) -> SmallVec<[FieldDef; 1]> {
+ noop_flat_map_field_def(fd, self)
}
fn visit_item_kind(&mut self, i: &mut ItemKind) {
@@ -170,10 +170,6 @@
noop_visit_ty_constraint(t, self);
}
- fn visit_mod(&mut self, m: &mut Mod) {
- noop_visit_mod(m, self);
- }
-
fn visit_foreign_mod(&mut self, nm: &mut ForeignMod) {
noop_visit_foreign_mod(nm, self);
}
@@ -258,8 +254,8 @@
noop_visit_mt(mt, self);
}
- fn flat_map_field(&mut self, f: Field) -> SmallVec<[Field; 1]> {
- noop_flat_map_field(f, self)
+ fn flat_map_expr_field(&mut self, f: ExprField) -> SmallVec<[ExprField; 1]> {
+ noop_flat_map_expr_field(f, self)
}
fn visit_where_clause(&mut self, where_clause: &mut WhereClause) {
@@ -282,8 +278,8 @@
// Do nothing.
}
- fn flat_map_field_pattern(&mut self, fp: FieldPat) -> SmallVec<[FieldPat; 1]> {
- noop_flat_map_field_pattern(fp, self)
+ fn flat_map_pat_field(&mut self, fp: PatField) -> SmallVec<[PatField; 1]> {
+ noop_flat_map_pat_field(fp, self)
}
}
@@ -389,11 +385,11 @@
vis.visit_span(&mut dspan.close);
}
-pub fn noop_flat_map_field_pattern<T: MutVisitor>(
- mut fp: FieldPat,
+pub fn noop_flat_map_pat_field<T: MutVisitor>(
+ mut fp: PatField,
vis: &mut T,
-) -> SmallVec<[FieldPat; 1]> {
- let FieldPat { attrs, id, ident, is_placeholder: _, is_shorthand: _, pat, span } = &mut fp;
+) -> SmallVec<[PatField; 1]> {
+ let PatField { attrs, id, ident, is_placeholder: _, is_shorthand: _, pat, span } = &mut fp;
vis.visit_id(id);
vis.visit_ident(ident);
vis.visit_pat(pat);
@@ -846,10 +842,10 @@
pub fn noop_visit_variant_data<T: MutVisitor>(vdata: &mut VariantData, vis: &mut T) {
match vdata {
VariantData::Struct(fields, ..) => {
- fields.flat_map_in_place(|field| vis.flat_map_struct_field(field));
+ fields.flat_map_in_place(|field| vis.flat_map_field_def(field));
}
VariantData::Tuple(fields, id) => {
- fields.flat_map_in_place(|field| vis.flat_map_struct_field(field));
+ fields.flat_map_in_place(|field| vis.flat_map_field_def(field));
vis.visit_id(id);
}
VariantData::Unit(id) => vis.visit_id(id),
@@ -868,22 +864,25 @@
vis.visit_span(span);
}
-pub fn noop_flat_map_struct_field<T: MutVisitor>(
- mut sf: StructField,
+pub fn noop_flat_map_field_def<T: MutVisitor>(
+ mut fd: FieldDef,
visitor: &mut T,
-) -> SmallVec<[StructField; 1]> {
- let StructField { span, ident, vis, id, ty, attrs, is_placeholder: _ } = &mut sf;
+) -> SmallVec<[FieldDef; 1]> {
+ let FieldDef { span, ident, vis, id, ty, attrs, is_placeholder: _ } = &mut fd;
visitor.visit_span(span);
visit_opt(ident, |ident| visitor.visit_ident(ident));
visitor.visit_vis(vis);
visitor.visit_id(id);
visitor.visit_ty(ty);
visit_attrs(attrs, visitor);
- smallvec![sf]
+ smallvec![fd]
}
-pub fn noop_flat_map_field<T: MutVisitor>(mut f: Field, vis: &mut T) -> SmallVec<[Field; 1]> {
- let Field { ident, expr, span, is_shorthand: _, attrs, id, is_placeholder: _ } = &mut f;
+pub fn noop_flat_map_expr_field<T: MutVisitor>(
+ mut f: ExprField,
+ vis: &mut T,
+) -> SmallVec<[ExprField; 1]> {
+ let ExprField { ident, expr, span, is_shorthand: _, attrs, id, is_placeholder: _ } = &mut f;
vis.visit_ident(ident);
vis.visit_expr(expr);
vis.visit_id(id);
@@ -917,7 +916,13 @@
vis.visit_generics(generics);
visit_opt(body, |body| vis.visit_block(body));
}
- ItemKind::Mod(m) => vis.visit_mod(m),
+ ItemKind::Mod(_unsafety, mod_kind) => match mod_kind {
+ ModKind::Loaded(items, _inline, inner_span) => {
+ vis.visit_span(inner_span);
+ items.flat_map_in_place(|item| vis.flat_map_item(item));
+ }
+ ModKind::Unloaded => {}
+ },
ItemKind::ForeignMod(nm) => vis.visit_foreign_mod(nm),
ItemKind::GlobalAsm(_ga) => {}
ItemKind::TyAlias(box TyAliasKind(_, generics, bounds, ty)) => {
@@ -998,14 +1003,10 @@
vis.visit_asyncness(asyncness);
}
-pub fn noop_visit_mod<T: MutVisitor>(module: &mut Mod, vis: &mut T) {
- let Mod { inner, unsafety: _, items, inline: _ } = module;
- vis.visit_span(inner);
- items.flat_map_in_place(|item| vis.flat_map_item(item));
-}
-
+// FIXME: Avoid visiting the crate as a `Mod` item, flat map only the inner items if possible,
+// or make crate visiting first class if necessary.
pub fn noop_visit_crate<T: MutVisitor>(krate: &mut Crate, vis: &mut T) {
- visit_clobber(krate, |Crate { module, attrs, span, proc_macros }| {
+ visit_clobber(krate, |Crate { attrs, items, span, proc_macros }| {
let item_vis =
Visibility { kind: VisibilityKind::Public, span: span.shrink_to_lo(), tokens: None };
let item = P(Item {
@@ -1014,19 +1015,20 @@
id: DUMMY_NODE_ID,
vis: item_vis,
span,
- kind: ItemKind::Mod(module),
+ kind: ItemKind::Mod(Unsafe::No, ModKind::Loaded(items, Inline::Yes, span)),
tokens: None,
});
let items = vis.flat_map_item(item);
let len = items.len();
if len == 0 {
- let module = Mod { inner: span, unsafety: Unsafe::No, items: vec![], inline: true };
- Crate { module, attrs: vec![], span, proc_macros }
+ Crate { attrs: vec![], items: vec![], span, proc_macros }
} else if len == 1 {
let Item { attrs, span, kind, .. } = items.into_iter().next().unwrap().into_inner();
match kind {
- ItemKind::Mod(module) => Crate { module, attrs, span, proc_macros },
+ ItemKind::Mod(_, ModKind::Loaded(items, ..)) => {
+ Crate { attrs, items, span, proc_macros }
+ }
_ => panic!("visitor converted a module to not a module"),
}
} else {
@@ -1103,7 +1105,7 @@
}
PatKind::Struct(path, fields, _etc) => {
vis.visit_path(path);
- fields.flat_map_in_place(|field| vis.flat_map_field_pattern(field));
+ fields.flat_map_in_place(|field| vis.flat_map_pat_field(field));
}
PatKind::Box(inner) => vis.visit_pat(inner),
PatKind::Ref(inner, _mutbl) => vis.visit_pat(inner),
@@ -1284,10 +1286,11 @@
visit_vec(inputs, |(_c, expr)| vis.visit_expr(expr));
}
ExprKind::MacCall(mac) => vis.visit_mac_call(mac),
- ExprKind::Struct(path, fields, expr) => {
+ ExprKind::Struct(se) => {
+ let StructExpr { path, fields, rest } = se.deref_mut();
vis.visit_path(path);
- fields.flat_map_in_place(|field| vis.flat_map_field(field));
- match expr {
+ fields.flat_map_in_place(|field| vis.flat_map_expr_field(field));
+ match rest {
StructRest::Base(expr) => vis.visit_expr(expr),
StructRest::Rest(_span) => {}
StructRest::None => {}
diff --git a/compiler/rustc_ast/src/node_id.rs b/compiler/rustc_ast/src/node_id.rs
index 6e7d2ba..d20bace 100644
--- a/compiler/rustc_ast/src/node_id.rs
+++ b/compiler/rustc_ast/src/node_id.rs
@@ -2,6 +2,12 @@
use std::fmt;
rustc_index::newtype_index! {
+ /// Identifies an AST node.
+ ///
+ /// This identifies top-level definitions, expressions, and everything in between.
+ /// This is later turned into [`DefId`] and `HirId` for the HIR.
+ ///
+ /// [`DefId`]: rustc_span::def_id::DefId
pub struct NodeId {
DEBUG_FORMAT = "NodeId({})"
}
@@ -9,12 +15,12 @@
rustc_data_structures::define_id_collections!(NodeMap, NodeSet, NodeId);
-/// `NodeId` used to represent the root of the crate.
+/// The [`NodeId`] used to represent the root of the crate.
pub const CRATE_NODE_ID: NodeId = NodeId::from_u32(0);
-/// When parsing and doing expansions, we initially give all AST nodes this AST
-/// node value. Then later, during expansion, we renumber them to have small,
-/// positive ids.
+/// When parsing and at the beginning of doing expansions, we initially give all AST nodes
+/// this dummy AST [`NodeId`]. Then, during a later phase of expansion, we renumber them
+/// to have small, positive IDs.
pub const DUMMY_NODE_ID: NodeId = NodeId::MAX;
impl NodeId {
diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs
index 90bfb01..7e58426 100644
--- a/compiler/rustc_ast/src/token.rs
+++ b/compiler/rustc_ast/src/token.rs
@@ -11,11 +11,9 @@
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
use rustc_macros::HashStable_Generic;
-use rustc_span::hygiene::ExpnKind;
-use rustc_span::source_map::SourceMap;
use rustc_span::symbol::{kw, sym};
use rustc_span::symbol::{Ident, Symbol};
-use rustc_span::{self, edition::Edition, FileName, RealFileName, Span, DUMMY_SP};
+use rustc_span::{self, edition::Edition, Span, DUMMY_SP};
use std::borrow::Cow;
use std::{fmt, mem};
@@ -244,7 +242,7 @@
}
// `TokenKind` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(TokenKind, 16);
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
@@ -682,7 +680,7 @@
}
// `Nonterminal` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(Nonterminal, 48);
#[derive(Debug, Copy, Clone, PartialEq, Encodable, Decodable)]
@@ -786,79 +784,6 @@
NtTT(tt) => tt.span(),
}
}
-
- /// This nonterminal looks like some specific enums from
- /// `proc-macro-hack` and `procedural-masquerade` crates.
- /// We need to maintain some special pretty-printing behavior for them due to incorrect
- /// asserts in old versions of those crates and their wide use in the ecosystem.
- /// See issue #73345 for more details.
- /// FIXME(#73933): Remove this eventually.
- pub fn pretty_printing_compatibility_hack(&self) -> bool {
- let item = match self {
- NtItem(item) => item,
- NtStmt(stmt) => match &stmt.kind {
- ast::StmtKind::Item(item) => item,
- _ => return false,
- },
- _ => return false,
- };
-
- let name = item.ident.name;
- if name == sym::ProceduralMasqueradeDummyType || name == sym::ProcMacroHack {
- if let ast::ItemKind::Enum(enum_def, _) = &item.kind {
- if let [variant] = &*enum_def.variants {
- return variant.ident.name == sym::Input;
- }
- }
- }
- false
- }
-
- // See issue #74616 for details
- pub fn ident_name_compatibility_hack(
- &self,
- orig_span: Span,
- source_map: &SourceMap,
- ) -> Option<(Ident, bool)> {
- if let NtIdent(ident, is_raw) = self {
- if let ExpnKind::Macro(_, macro_name) = orig_span.ctxt().outer_expn_data().kind {
- let filename = source_map.span_to_filename(orig_span);
- if let FileName::Real(RealFileName::Named(path)) = filename {
- let matches_prefix = |prefix, filename| {
- // Check for a path that ends with 'prefix*/src/<filename>'
- let mut iter = path.components().rev();
- iter.next().and_then(|p| p.as_os_str().to_str()) == Some(filename)
- && iter.next().and_then(|p| p.as_os_str().to_str()) == Some("src")
- && iter
- .next()
- .and_then(|p| p.as_os_str().to_str())
- .map_or(false, |p| p.starts_with(prefix))
- };
-
- if (macro_name == sym::impl_macros
- && matches_prefix("time-macros-impl", "lib.rs"))
- || (macro_name == sym::arrays && matches_prefix("js-sys", "lib.rs"))
- {
- let snippet = source_map.span_to_snippet(orig_span);
- if snippet.as_deref() == Ok("$name") {
- return Some((*ident, *is_raw));
- }
- }
-
- if macro_name == sym::tuple_from_req
- && (matches_prefix("actix-web", "extract.rs")
- || matches_prefix("actori-web", "extract.rs"))
- {
- let snippet = source_map.span_to_snippet(orig_span);
- if snippet.as_deref() == Ok("$T") {
- return Some((*ident, *is_raw));
- }
- }
- }
- }
- }
- None
- }
}
impl PartialEq for Nonterminal {
diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs
index 9ac05f3..c5c3142 100644
--- a/compiler/rustc_ast/src/tokenstream.rs
+++ b/compiler/rustc_ast/src/tokenstream.rs
@@ -189,7 +189,7 @@
pub type TreeAndSpacing = (TokenTree, Spacing);
// `TokenStream` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(TokenStream, 8);
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable)]
diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs
index c37d4cd..b1ad29e 100644
--- a/compiler/rustc_ast/src/visit.rs
+++ b/compiler/rustc_ast/src/visit.rs
@@ -74,7 +74,7 @@
/// Each method of the `Visitor` trait is a hook to be potentially
/// overridden. Each method's default implementation recursively visits
/// the substructure of the input via the corresponding `walk` method;
-/// e.g., the `visit_mod` method by default calls `visit::walk_mod`.
+/// e.g., the `visit_item` method by default calls `visit::walk_item`.
///
/// If you want to ensure that your code handles every variant
/// explicitly, you need to override each method. (And you also need
@@ -87,9 +87,6 @@
fn visit_ident(&mut self, ident: Ident) {
walk_ident(self, ident);
}
- fn visit_mod(&mut self, m: &'ast Mod, _s: Span, _attrs: &[Attribute], _n: NodeId) {
- walk_mod(self, m);
- }
fn visit_foreign_item(&mut self, i: &'ast ForeignItem) {
walk_foreign_item(self, i)
}
@@ -154,8 +151,8 @@
fn visit_variant_data(&mut self, s: &'ast VariantData) {
walk_struct_def(self, s)
}
- fn visit_struct_field(&mut self, s: &'ast StructField) {
- walk_struct_field(self, s)
+ fn visit_field_def(&mut self, s: &'ast FieldDef) {
+ walk_field_def(self, s)
}
fn visit_enum_def(
&mut self,
@@ -211,11 +208,11 @@
fn visit_fn_header(&mut self, _header: &'ast FnHeader) {
// Nothing to do
}
- fn visit_field(&mut self, f: &'ast Field) {
- walk_field(self, f)
+ fn visit_expr_field(&mut self, f: &'ast ExprField) {
+ walk_expr_field(self, f)
}
- fn visit_field_pattern(&mut self, fp: &'ast FieldPat) {
- walk_field_pattern(self, fp)
+ fn visit_pat_field(&mut self, fp: &'ast PatField) {
+ walk_pat_field(self, fp)
}
}
@@ -238,14 +235,10 @@
}
pub fn walk_crate<'a, V: Visitor<'a>>(visitor: &mut V, krate: &'a Crate) {
- visitor.visit_mod(&krate.module, krate.span, &krate.attrs, CRATE_NODE_ID);
+ walk_list!(visitor, visit_item, &krate.items);
walk_list!(visitor, visit_attribute, &krate.attrs);
}
-pub fn walk_mod<'a, V: Visitor<'a>>(visitor: &mut V, module: &'a Mod) {
- walk_list!(visitor, visit_item, &module.items);
-}
-
pub fn walk_local<'a, V: Visitor<'a>>(visitor: &mut V, local: &'a Local) {
for attr in local.attrs.iter() {
visitor.visit_attribute(attr);
@@ -297,7 +290,12 @@
let kind = FnKind::Fn(FnCtxt::Free, item.ident, sig, &item.vis, body.as_deref());
visitor.visit_fn(kind, item.span, item.id)
}
- ItemKind::Mod(ref module) => visitor.visit_mod(module, item.span, &item.attrs, item.id),
+ ItemKind::Mod(_unsafety, ref mod_kind) => match mod_kind {
+ ModKind::Loaded(items, _inline, _inner_span) => {
+ walk_list!(visitor, visit_item, items)
+ }
+ ModKind::Unloaded => {}
+ },
ItemKind::ForeignMod(ref foreign_module) => {
walk_list!(visitor, visit_foreign_item, &foreign_module.items);
}
@@ -366,13 +364,13 @@
walk_list!(visitor, visit_attribute, &variant.attrs);
}
-pub fn walk_field<'a, V: Visitor<'a>>(visitor: &mut V, f: &'a Field) {
+pub fn walk_expr_field<'a, V: Visitor<'a>>(visitor: &mut V, f: &'a ExprField) {
visitor.visit_expr(&f.expr);
visitor.visit_ident(f.ident);
walk_list!(visitor, visit_attribute, f.attrs.iter());
}
-pub fn walk_field_pattern<'a, V: Visitor<'a>>(visitor: &mut V, fp: &'a FieldPat) {
+pub fn walk_pat_field<'a, V: Visitor<'a>>(visitor: &mut V, fp: &'a PatField) {
visitor.visit_ident(fp.ident);
visitor.visit_pat(&fp.pat);
walk_list!(visitor, visit_attribute, fp.attrs.iter());
@@ -511,7 +509,7 @@
}
PatKind::Struct(ref path, ref fields, _) => {
visitor.visit_path(path, pattern.id);
- walk_list!(visitor, visit_field_pattern, fields);
+ walk_list!(visitor, visit_pat_field, fields);
}
PatKind::Box(ref subpattern)
| PatKind::Ref(ref subpattern, _)
@@ -670,16 +668,16 @@
}
pub fn walk_struct_def<'a, V: Visitor<'a>>(visitor: &mut V, struct_definition: &'a VariantData) {
- walk_list!(visitor, visit_struct_field, struct_definition.fields());
+ walk_list!(visitor, visit_field_def, struct_definition.fields());
}
-pub fn walk_struct_field<'a, V: Visitor<'a>>(visitor: &mut V, struct_field: &'a StructField) {
- visitor.visit_vis(&struct_field.vis);
- if let Some(ident) = struct_field.ident {
+pub fn walk_field_def<'a, V: Visitor<'a>>(visitor: &mut V, field: &'a FieldDef) {
+ visitor.visit_vis(&field.vis);
+ if let Some(ident) = field.ident {
visitor.visit_ident(ident);
}
- visitor.visit_ty(&struct_field.ty);
- walk_list!(visitor, visit_attribute, &struct_field.attrs);
+ visitor.visit_ty(&field.ty);
+ walk_list!(visitor, visit_attribute, &field.attrs);
}
pub fn walk_block<'a, V: Visitor<'a>>(visitor: &mut V, block: &'a Block) {
@@ -723,10 +721,10 @@
visitor.visit_expr(element);
visitor.visit_anon_const(count)
}
- ExprKind::Struct(ref path, ref fields, ref optional_base) => {
- visitor.visit_path(path, expression.id);
- walk_list!(visitor, visit_field, fields);
- match optional_base {
+ ExprKind::Struct(ref se) => {
+ visitor.visit_path(&se.path, expression.id);
+ walk_list!(visitor, visit_expr_field, &se.fields);
+ match &se.rest {
StructRest::Base(expr) => visitor.visit_expr(expr),
StructRest::Rest(_span) => {}
StructRest::None => {}
diff --git a/compiler/rustc_ast_lowering/Cargo.toml b/compiler/rustc_ast_lowering/Cargo.toml
index 177a906..0cced00 100644
--- a/compiler/rustc_ast_lowering/Cargo.toml
+++ b/compiler/rustc_ast_lowering/Cargo.toml
@@ -19,4 +19,4 @@
rustc_errors = { path = "../rustc_errors" }
rustc_session = { path = "../rustc_session" }
rustc_ast = { path = "../rustc_ast" }
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs
index 4d6afd2..32fb8d1 100644
--- a/compiler/rustc_ast_lowering/src/expr.rs
+++ b/compiler/rustc_ast_lowering/src/expr.rs
@@ -9,7 +9,9 @@
use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::def::Res;
+use rustc_hir::definitions::DefPathData;
use rustc_session::parse::feature_err;
+use rustc_span::hygiene::ExpnId;
use rustc_span::source_map::{respan, DesugaringKind, Span, Spanned};
use rustc_span::symbol::{sym, Ident, Symbol};
use rustc_span::{hygiene::ForLoopLoc, DUMMY_SP};
@@ -42,8 +44,12 @@
}
ExprKind::Tup(ref elts) => hir::ExprKind::Tup(self.lower_exprs(elts)),
ExprKind::Call(ref f, ref args) => {
- let f = self.lower_expr(f);
- hir::ExprKind::Call(f, self.lower_exprs(args))
+ if let Some(legacy_args) = self.resolver.legacy_const_generic_args(f) {
+ self.lower_legacy_const_generics((**f).clone(), args.clone(), &legacy_args)
+ } else {
+ let f = self.lower_expr(f);
+ hir::ExprKind::Call(f, self.lower_exprs(args))
+ }
}
ExprKind::MethodCall(ref seg, ref args, span) => {
let hir_seg = self.arena.alloc(self.lower_path_segment(
@@ -91,6 +97,23 @@
ExprKind::Let(ref pat, ref scrutinee) => {
self.lower_expr_if_let(e.span, pat, scrutinee, then, else_opt.as_deref())
}
+ ExprKind::Paren(ref paren) => match paren.peel_parens().kind {
+ ExprKind::Let(ref pat, ref scrutinee) => {
+ // A user has written `if (let Some(x) = foo) {`, we want to avoid
+ // confusing them with mentions of nightly features.
+ // If this logic is changed, you will also likely need to touch
+ // `unused::UnusedParens::check_expr`.
+ self.if_let_expr_with_parens(cond, &paren.peel_parens());
+ self.lower_expr_if_let(
+ e.span,
+ pat,
+ scrutinee,
+ then,
+ else_opt.as_deref(),
+ )
+ }
+ _ => self.lower_expr_if(cond, then, else_opt.as_deref()),
+ },
_ => self.lower_expr_if(cond, then, else_opt.as_deref()),
},
ExprKind::While(ref cond, ref body, opt_label) => self
@@ -201,8 +224,8 @@
}
ExprKind::InlineAsm(ref asm) => self.lower_expr_asm(e.span, asm),
ExprKind::LlvmInlineAsm(ref asm) => self.lower_expr_llvm_asm(asm),
- ExprKind::Struct(ref path, ref fields, ref rest) => {
- let rest = match rest {
+ ExprKind::Struct(ref se) => {
+ let rest = match &se.rest {
StructRest::Base(e) => Some(self.lower_expr(e)),
StructRest::Rest(sp) => {
self.sess
@@ -217,11 +240,12 @@
self.arena.alloc(self.lower_qpath(
e.id,
&None,
- path,
+ &se.path,
ParamMode::Optional,
ImplTraitContext::disallowed(),
)),
- self.arena.alloc_from_iter(fields.iter().map(|x| self.lower_field(x))),
+ self.arena
+ .alloc_from_iter(se.fields.iter().map(|x| self.lower_expr_field(x))),
rest,
)
}
@@ -235,9 +259,18 @@
ex.span = e.span;
}
// Merge attributes into the inner expression.
- let mut attrs: Vec<_> = e.attrs.iter().map(|a| self.lower_attr(a)).collect();
- attrs.extend::<Vec<_>>(ex.attrs.into());
- ex.attrs = attrs.into();
+ if !e.attrs.is_empty() {
+ let old_attrs = self.attrs.get(&ex.hir_id).map(|la| *la).unwrap_or(&[]);
+ self.attrs.insert(
+ ex.hir_id,
+ &*self.arena.alloc_from_iter(
+ e.attrs
+ .iter()
+ .map(|a| self.lower_attr(a))
+ .chain(old_attrs.iter().cloned()),
+ ),
+ );
+ }
return ex;
}
@@ -249,20 +282,17 @@
ExprKind::MacCall(_) => panic!("{:?} shouldn't exist here", e.span),
};
- hir::Expr {
- hir_id: self.lower_node_id(e.id),
- kind,
- span: e.span,
- attrs: e.attrs.iter().map(|a| self.lower_attr(a)).collect::<Vec<_>>().into(),
- }
+ let hir_id = self.lower_node_id(e.id);
+ self.lower_attrs(hir_id, &e.attrs);
+ hir::Expr { hir_id, kind, span: e.span }
})
}
fn lower_unop(&mut self, u: UnOp) -> hir::UnOp {
match u {
- UnOp::Deref => hir::UnOp::UnDeref,
- UnOp::Not => hir::UnOp::UnNot,
- UnOp::Neg => hir::UnOp::UnNeg,
+ UnOp::Deref => hir::UnOp::Deref,
+ UnOp::Not => hir::UnOp::Not,
+ UnOp::Neg => hir::UnOp::Neg,
}
}
@@ -292,6 +322,73 @@
}
}
+ fn lower_legacy_const_generics(
+ &mut self,
+ mut f: Expr,
+ args: Vec<AstP<Expr>>,
+ legacy_args_idx: &[usize],
+ ) -> hir::ExprKind<'hir> {
+ let path = match f.kind {
+ ExprKind::Path(None, ref mut path) => path,
+ _ => unreachable!(),
+ };
+
+ // Split the arguments into const generics and normal arguments
+ let mut real_args = vec![];
+ let mut generic_args = vec![];
+ for (idx, arg) in args.into_iter().enumerate() {
+ if legacy_args_idx.contains(&idx) {
+ let parent_def_id = self.current_hir_id_owner.last().unwrap().0;
+ let node_id = self.resolver.next_node_id();
+
+ // Add a definition for the in-band const def.
+ self.resolver.create_def(
+ parent_def_id,
+ node_id,
+ DefPathData::AnonConst,
+ ExpnId::root(),
+ arg.span,
+ );
+
+ let anon_const = AnonConst { id: node_id, value: arg };
+ generic_args.push(AngleBracketedArg::Arg(GenericArg::Const(anon_const)));
+ } else {
+ real_args.push(arg);
+ }
+ }
+
+ // Add generic args to the last element of the path.
+ let last_segment = path.segments.last_mut().unwrap();
+ assert!(last_segment.args.is_none());
+ last_segment.args = Some(AstP(GenericArgs::AngleBracketed(AngleBracketedArgs {
+ span: DUMMY_SP,
+ args: generic_args,
+ })));
+
+ // Now lower everything as normal.
+ let f = self.lower_expr(&f);
+ hir::ExprKind::Call(f, self.lower_exprs(&real_args))
+ }
+
+ fn if_let_expr_with_parens(&mut self, cond: &Expr, paren: &Expr) {
+ let start = cond.span.until(paren.span);
+ let end = paren.span.shrink_to_hi().until(cond.span.shrink_to_hi());
+ self.sess
+ .struct_span_err(
+ vec![start, end],
+ "invalid parentheses around `let` expression in `if let`",
+ )
+ .multipart_suggestion(
+ "`if let` needs to be written without parentheses",
+ vec![(start, String::new()), (end, String::new())],
+ rustc_errors::Applicability::MachineApplicable,
+ )
+ .emit();
+ // Ideally, we'd remove the feature gating of a `let` expression since we are already
+ // complaining about it here, but `feature_gate::check_crate` has already run by now:
+ // self.sess.parse_sess.gated_spans.ungate_last(sym::let_chains, paren.span);
+ }
+
/// Emit an error and lower `ast::ExprKind::Let(pat, scrutinee)` into:
/// ```rust
/// match scrutinee { pats => true, _ => false }
@@ -302,8 +399,10 @@
if self.sess.opts.unstable_features.is_nightly_build() {
self.sess
.struct_span_err(span, "`let` expressions are not supported here")
- .note("only supported directly in conditions of `if`- and `while`-expressions")
- .note("as well as when nested within `&&` and parenthesis in those conditions")
+ .note(
+ "only supported directly without parentheses in conditions of `if`- and \
+ `while`-expressions, as well as in `let` chains within parentheses",
+ )
.emit();
} else {
self.sess
@@ -347,8 +446,9 @@
) -> hir::ExprKind<'hir> {
macro_rules! make_if {
($opt:expr) => {{
+ let cond = self.lower_expr(cond);
let then_expr = self.lower_block_expr(then);
- hir::ExprKind::If(self.lower_expr(cond), self.arena.alloc(then_expr), $opt)
+ hir::ExprKind::If(cond, self.arena.alloc(then_expr), $opt)
}};
}
if let Some(rslt) = else_opt {
@@ -525,14 +625,9 @@
hir::Guard::If(self.lower_expr(cond))
}
});
- hir::Arm {
- hir_id: self.next_id(),
- attrs: self.lower_attrs(&arm.attrs),
- pat,
- guard,
- body: self.lower_expr(&arm.body),
- span: arm.span,
- }
+ let hir_id = self.next_id();
+ self.lower_attrs(hir_id, &arm.attrs);
+ hir::Arm { hir_id, pat, guard, body: self.lower_expr(&arm.body), span: arm.span }
}
/// Lower an `async` construct to a generator that is then wrapped so it implements `Future`.
@@ -576,7 +671,7 @@
Ident::with_dummy_span(sym::_task_context),
hir::BindingAnnotation::Mutable,
);
- let param = hir::Param { attrs: &[], hir_id: self.next_id(), pat, ty_span: span, span };
+ let param = hir::Param { hir_id: self.next_id(), pat, ty_span: span, span };
let params = arena_vec![self; param];
let body_id = self.lower_body(move |this| {
@@ -597,12 +692,8 @@
span,
Some(hir::Movability::Static),
);
- let generator = hir::Expr {
- hir_id: self.lower_node_id(closure_node_id),
- kind: generator_kind,
- span,
- attrs: ThinVec::new(),
- };
+ let generator =
+ hir::Expr { hir_id: self.lower_node_id(closure_node_id), kind: generator_kind, span };
// `future::from_generator`:
let unstable_span =
@@ -756,7 +847,6 @@
hir_id: loop_hir_id,
kind: hir::ExprKind::Loop(loop_block, None, hir::LoopSource::Loop, span),
span,
- attrs: ThinVec::new(),
});
// mut pinned => loop { ... }
@@ -933,7 +1023,7 @@
// Introduce a `let` for destructuring: `let (lhs1, lhs2) = t`.
let destructure_let = self.stmt_let_pat(
- ThinVec::new(),
+ None,
whole_span,
Some(rhs),
pat,
@@ -1021,10 +1111,10 @@
}
}
// Structs.
- ExprKind::Struct(path, fields, rest) => {
- let field_pats = self.arena.alloc_from_iter(fields.iter().map(|f| {
+ ExprKind::Struct(se) => {
+ let field_pats = self.arena.alloc_from_iter(se.fields.iter().map(|f| {
let pat = self.destructure_assign(&f.expr, eq_sign_span, assignments);
- hir::FieldPat {
+ hir::PatField {
hir_id: self.next_id(),
ident: f.ident,
pat,
@@ -1035,11 +1125,11 @@
let qpath = self.lower_qpath(
lhs.id,
&None,
- path,
+ &se.path,
ParamMode::Optional,
ImplTraitContext::disallowed(),
);
- let fields_omitted = match rest {
+ let fields_omitted = match &se.rest {
StructRest::Base(e) => {
self.sess
.struct_span_err(
@@ -1155,7 +1245,7 @@
e1.iter().map(|e| ("start", e)).chain(e2.iter().map(|e| ("end", e))).map(|(s, e)| {
let expr = self.lower_expr(&e);
let ident = Ident::new(Symbol::intern(s), e.span);
- self.field(ident, expr, e.span)
+ self.expr_field(ident, expr, e.span)
}),
);
@@ -1242,84 +1332,83 @@
}
fn lower_expr_asm(&mut self, sp: Span, asm: &InlineAsm) -> hir::ExprKind<'hir> {
- if self.sess.asm_arch.is_none() {
+ // Rustdoc needs to support asm! from foriegn architectures: don't try
+ // lowering the register contraints in this case.
+ let asm_arch = if self.sess.opts.actually_rustdoc { None } else { self.sess.asm_arch };
+ if asm_arch.is_none() && !self.sess.opts.actually_rustdoc {
struct_span_err!(self.sess, sp, E0472, "asm! is unsupported on this target").emit();
}
if asm.options.contains(InlineAsmOptions::ATT_SYNTAX)
- && !matches!(
- self.sess.asm_arch,
- Some(asm::InlineAsmArch::X86 | asm::InlineAsmArch::X86_64)
- )
+ && !matches!(asm_arch, Some(asm::InlineAsmArch::X86 | asm::InlineAsmArch::X86_64))
+ && !self.sess.opts.actually_rustdoc
{
self.sess
.struct_span_err(sp, "the `att_syntax` option is only supported on x86")
.emit();
}
- // Lower operands to HIR, filter_map skips any operands with invalid
- // register classes.
+ // Lower operands to HIR. We use dummy register classes if an error
+ // occurs during lowering because we still need to be able to produce a
+ // valid HIR.
let sess = self.sess;
let operands: Vec<_> = asm
.operands
.iter()
- .filter_map(|(op, op_sp)| {
- let lower_reg = |reg| {
- Some(match reg {
- InlineAsmRegOrRegClass::Reg(s) => asm::InlineAsmRegOrRegClass::Reg(
+ .map(|(op, op_sp)| {
+ let lower_reg = |reg| match reg {
+ InlineAsmRegOrRegClass::Reg(s) => {
+ asm::InlineAsmRegOrRegClass::Reg(if let Some(asm_arch) = asm_arch {
asm::InlineAsmReg::parse(
- sess.asm_arch?,
+ asm_arch,
|feature| sess.target_features.contains(&Symbol::intern(feature)),
&sess.target,
s,
)
- .map_err(|e| {
+ .unwrap_or_else(|e| {
let msg = format!("invalid register `{}`: {}", s.as_str(), e);
sess.struct_span_err(*op_sp, &msg).emit();
+ asm::InlineAsmReg::Err
})
- .ok()?,
- ),
- InlineAsmRegOrRegClass::RegClass(s) => {
- asm::InlineAsmRegOrRegClass::RegClass(
- asm::InlineAsmRegClass::parse(sess.asm_arch?, s)
- .map_err(|e| {
- let msg = format!(
- "invalid register class `{}`: {}",
- s.as_str(),
- e
- );
- sess.struct_span_err(*op_sp, &msg).emit();
- })
- .ok()?,
- )
- }
- })
+ } else {
+ asm::InlineAsmReg::Err
+ })
+ }
+ InlineAsmRegOrRegClass::RegClass(s) => {
+ asm::InlineAsmRegOrRegClass::RegClass(if let Some(asm_arch) = asm_arch {
+ asm::InlineAsmRegClass::parse(asm_arch, s).unwrap_or_else(|e| {
+ let msg = format!("invalid register class `{}`: {}", s.as_str(), e);
+ sess.struct_span_err(*op_sp, &msg).emit();
+ asm::InlineAsmRegClass::Err
+ })
+ } else {
+ asm::InlineAsmRegClass::Err
+ })
+ }
};
- // lower_reg is executed last because we need to lower all
- // sub-expressions even if we throw them away later.
let op = match *op {
InlineAsmOperand::In { reg, ref expr } => hir::InlineAsmOperand::In {
+ reg: lower_reg(reg),
expr: self.lower_expr_mut(expr),
- reg: lower_reg(reg)?,
},
InlineAsmOperand::Out { reg, late, ref expr } => hir::InlineAsmOperand::Out {
+ reg: lower_reg(reg),
late,
expr: expr.as_ref().map(|expr| self.lower_expr_mut(expr)),
- reg: lower_reg(reg)?,
},
InlineAsmOperand::InOut { reg, late, ref expr } => {
hir::InlineAsmOperand::InOut {
+ reg: lower_reg(reg),
late,
expr: self.lower_expr_mut(expr),
- reg: lower_reg(reg)?,
}
}
InlineAsmOperand::SplitInOut { reg, late, ref in_expr, ref out_expr } => {
hir::InlineAsmOperand::SplitInOut {
+ reg: lower_reg(reg),
late,
in_expr: self.lower_expr_mut(in_expr),
out_expr: out_expr.as_ref().map(|expr| self.lower_expr_mut(expr)),
- reg: lower_reg(reg)?,
}
}
InlineAsmOperand::Const { ref expr } => {
@@ -1329,17 +1418,11 @@
hir::InlineAsmOperand::Sym { expr: self.lower_expr_mut(expr) }
}
};
- Some((op, *op_sp))
+ (op, *op_sp)
})
.collect();
- // Stop if there were any errors when lowering the register classes
- if operands.len() != asm.operands.len() || sess.asm_arch.is_none() {
- return hir::ExprKind::Err;
- }
-
// Validate template modifiers against the register classes for the operands
- let asm_arch = sess.asm_arch.unwrap();
for p in &asm.template {
if let InlineAsmTemplatePiece::Placeholder {
operand_idx,
@@ -1354,7 +1437,10 @@
| hir::InlineAsmOperand::InOut { reg, .. }
| hir::InlineAsmOperand::SplitInOut { reg, .. } => {
let class = reg.reg_class();
- let valid_modifiers = class.valid_modifiers(asm_arch);
+ if class == asm::InlineAsmRegClass::Err {
+ continue;
+ }
+ let valid_modifiers = class.valid_modifiers(asm_arch.unwrap());
if !valid_modifiers.contains(&modifier) {
let mut err = sess.struct_span_err(
placeholder_span,
@@ -1417,7 +1503,10 @@
// features. We check that at least one type is available for
// the current target.
let reg_class = reg.reg_class();
- for &(_, feature) in reg_class.supported_types(asm_arch) {
+ if reg_class == asm::InlineAsmRegClass::Err {
+ continue;
+ }
+ for &(_, feature) in reg_class.supported_types(asm_arch.unwrap()) {
if let Some(feature) = feature {
if self.sess.target_features.contains(&Symbol::intern(feature)) {
required_features.clear();
@@ -1569,8 +1658,8 @@
hir::ExprKind::LlvmInlineAsm(self.arena.alloc(hir_asm))
}
- fn lower_field(&mut self, f: &Field) -> hir::Field<'hir> {
- hir::Field {
+ fn lower_expr_field(&mut self, f: &ExprField) -> hir::ExprField<'hir> {
+ hir::ExprField {
hir_id: self.next_id(),
ident: f.ident,
expr: self.lower_expr(&f.expr),
@@ -1692,7 +1781,7 @@
// `let mut __next`
let next_let = self.stmt_let_pat(
- ThinVec::new(),
+ None,
desugared_span,
None,
next_pat,
@@ -1702,7 +1791,7 @@
// `let <pat> = __next`
let pat = self.lower_pat(pat);
let pat_let = self.stmt_let_pat(
- ThinVec::new(),
+ None,
desugared_span,
Some(next_expr),
pat,
@@ -1726,12 +1815,8 @@
hir::LoopSource::ForLoop,
e.span.with_hi(orig_head_span.hi()),
);
- let loop_expr = self.arena.alloc(hir::Expr {
- hir_id: self.lower_node_id(e.id),
- kind,
- span: e.span,
- attrs: ThinVec::new(),
- });
+ let loop_expr =
+ self.arena.alloc(hir::Expr { hir_id: self.lower_node_id(e.id), kind, span: e.span });
// `mut iter => { ... }`
let iter_arm = self.arm(iter_pat, loop_expr);
@@ -2066,21 +2151,21 @@
kind: hir::ExprKind<'hir>,
attrs: AttrVec,
) -> hir::Expr<'hir> {
- hir::Expr { hir_id: self.next_id(), kind, span, attrs }
+ let hir_id = self.next_id();
+ self.lower_attrs(hir_id, &attrs);
+ hir::Expr { hir_id, kind, span }
}
- fn field(&mut self, ident: Ident, expr: &'hir hir::Expr<'hir>, span: Span) -> hir::Field<'hir> {
- hir::Field { hir_id: self.next_id(), ident, span, expr, is_shorthand: false }
+ fn expr_field(
+ &mut self,
+ ident: Ident,
+ expr: &'hir hir::Expr<'hir>,
+ span: Span,
+ ) -> hir::ExprField<'hir> {
+ hir::ExprField { hir_id: self.next_id(), ident, span, expr, is_shorthand: false }
}
fn arm(&mut self, pat: &'hir hir::Pat<'hir>, expr: &'hir hir::Expr<'hir>) -> hir::Arm<'hir> {
- hir::Arm {
- hir_id: self.next_id(),
- attrs: &[],
- pat,
- guard: None,
- span: expr.span,
- body: expr,
- }
+ hir::Arm { hir_id: self.next_id(), pat, guard: None, span: expr.span, body: expr }
}
}
diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs
index 1efe83c..edd0c5f 100644
--- a/compiler/rustc_ast_lowering/src/item.rs
+++ b/compiler/rustc_ast_lowering/src/item.rs
@@ -15,11 +15,11 @@
use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::Span;
use rustc_target::spec::abi;
-
use smallvec::{smallvec, SmallVec};
-use std::collections::BTreeSet;
use tracing::debug;
+use std::mem;
+
pub(super) struct ItemLowerer<'a, 'lowering, 'hir> {
pub(super) lctx: &'a mut LoweringContext<'lowering, 'hir>,
}
@@ -34,32 +34,13 @@
}
impl<'a> Visitor<'a> for ItemLowerer<'a, '_, '_> {
- fn visit_mod(&mut self, m: &'a Mod, _s: Span, _attrs: &[Attribute], n: NodeId) {
- let hir_id = self.lctx.lower_node_id(n);
-
- self.lctx.modules.insert(
- hir_id,
- hir::ModuleItems {
- items: BTreeSet::new(),
- trait_items: BTreeSet::new(),
- impl_items: BTreeSet::new(),
- foreign_items: BTreeSet::new(),
- },
- );
-
- let old = self.lctx.current_module;
- self.lctx.current_module = hir_id;
- visit::walk_mod(self, m);
- self.lctx.current_module = old;
- }
-
fn visit_item(&mut self, item: &'a Item) {
let mut item_hir_id = None;
self.lctx.with_hir_id_owner(item.id, |lctx| {
lctx.without_in_scope_lifetime_defs(|lctx| {
if let Some(hir_item) = lctx.lower_item(item) {
- item_hir_id = Some(hir_item.hir_id);
- lctx.insert_item(hir_item);
+ let id = lctx.insert_item(hir_item);
+ item_hir_id = Some(id);
}
})
});
@@ -67,10 +48,18 @@
if let Some(hir_id) = item_hir_id {
self.lctx.with_parent_item_lifetime_defs(hir_id, |this| {
let this = &mut ItemLowerer { lctx: this };
- if let ItemKind::Impl(box ImplKind { ref of_trait, .. }) = item.kind {
- this.with_trait_impl_ref(of_trait, |this| visit::walk_item(this, item));
- } else {
- visit::walk_item(this, item);
+ match item.kind {
+ ItemKind::Mod(..) => {
+ let def_id = this.lctx.lower_node_id(item.id).expect_owner();
+ let old_current_module =
+ mem::replace(&mut this.lctx.current_module, def_id);
+ visit::walk_item(this, item);
+ this.lctx.current_module = old_current_module;
+ }
+ ItemKind::Impl(box ImplKind { ref of_trait, .. }) => {
+ this.with_trait_impl_ref(of_trait, |this| visit::walk_item(this, item));
+ }
+ _ => visit::walk_item(this, item),
}
});
}
@@ -92,15 +81,15 @@
self.lctx.with_hir_id_owner(item.id, |lctx| match ctxt {
AssocCtxt::Trait => {
let hir_item = lctx.lower_trait_item(item);
- let id = hir::TraitItemId { hir_id: hir_item.hir_id };
+ let id = hir_item.trait_item_id();
lctx.trait_items.insert(id, hir_item);
- lctx.modules.get_mut(&lctx.current_module).unwrap().trait_items.insert(id);
+ lctx.modules.entry(lctx.current_module).or_default().trait_items.insert(id);
}
AssocCtxt::Impl => {
let hir_item = lctx.lower_impl_item(item);
- let id = hir::ImplItemId { hir_id: hir_item.hir_id };
+ let id = hir_item.impl_item_id();
lctx.impl_items.insert(id, hir_item);
- lctx.modules.get_mut(&lctx.current_module).unwrap().impl_items.insert(id);
+ lctx.modules.entry(lctx.current_module).or_default().impl_items.insert(id);
}
});
@@ -111,9 +100,9 @@
self.lctx.allocate_hir_id_counter(item.id);
self.lctx.with_hir_id_owner(item.id, |lctx| {
let hir_item = lctx.lower_foreign_item(item);
- let id = hir::ForeignItemId { hir_id: hir_item.hir_id };
+ let id = hir_item.foreign_item_id();
lctx.foreign_items.insert(id, hir_item);
- lctx.modules.get_mut(&lctx.current_module).unwrap().foreign_items.insert(id);
+ lctx.modules.entry(lctx.current_module).or_default().foreign_items.insert(id);
});
visit::walk_foreign_item(self, item);
@@ -128,14 +117,14 @@
// only used when lowering a child item of a trait or impl.
fn with_parent_item_lifetime_defs<T>(
&mut self,
- parent_hir_id: hir::HirId,
+ parent_hir_id: hir::ItemId,
f: impl FnOnce(&mut LoweringContext<'_, '_>) -> T,
) -> T {
let old_len = self.in_scope_lifetimes.len();
let parent_generics = match self.items.get(&parent_hir_id).unwrap().kind {
hir::ItemKind::Impl(hir::Impl { ref generics, .. })
- | hir::ItemKind::Trait(_, _, ref generics, ..) => &generics.params[..],
+ | hir::ItemKind::Trait(_, _, ref generics, ..) => generics.params,
_ => &[],
};
let lt_def_names = parent_generics.iter().filter_map(|param| match param.kind {
@@ -157,7 +146,7 @@
&mut self,
f: impl FnOnce(&mut LoweringContext<'_, '_>) -> T,
) -> T {
- let old_in_scope_lifetimes = std::mem::replace(&mut self.in_scope_lifetimes, vec![]);
+ let old_in_scope_lifetimes = mem::replace(&mut self.in_scope_lifetimes, vec![]);
// this vector is only used when walking over impl headers,
// input types, and the like, and should not be non-empty in
@@ -172,12 +161,10 @@
res
}
- pub(super) fn lower_mod(&mut self, m: &Mod) -> hir::Mod<'hir> {
+ pub(super) fn lower_mod(&mut self, items: &[P<Item>], inner: Span) -> hir::Mod<'hir> {
hir::Mod {
- inner: m.inner,
- item_ids: self
- .arena
- .alloc_from_iter(m.items.iter().flat_map(|x| self.lower_item_id(x))),
+ inner,
+ item_ids: self.arena.alloc_from_iter(items.iter().flat_map(|x| self.lower_item_id(x))),
}
}
@@ -197,7 +184,9 @@
node_ids
.into_iter()
- .map(|node_id| hir::ItemId { id: self.allocate_hir_id_counter(node_id) })
+ .map(|node_id| hir::ItemId {
+ def_id: self.allocate_hir_id_counter(node_id).expect_owner(),
+ })
.collect()
}
@@ -228,37 +217,41 @@
pub fn lower_item(&mut self, i: &Item) -> Option<hir::Item<'hir>> {
let mut ident = i.ident;
let mut vis = self.lower_visibility(&i.vis, None);
- let attrs = self.lower_attrs(&i.attrs);
if let ItemKind::MacroDef(MacroDef { ref body, macro_rules }) = i.kind {
if !macro_rules || self.sess.contains_name(&i.attrs, sym::macro_export) {
let hir_id = self.lower_node_id(i.id);
+ self.lower_attrs(hir_id, &i.attrs);
let body = P(self.lower_mac_args(body));
self.exported_macros.push(hir::MacroDef {
ident,
vis,
- attrs,
- hir_id,
+ def_id: hir_id.expect_owner(),
span: i.span,
ast: MacroDef { body, macro_rules },
});
} else {
- self.non_exported_macro_attrs.extend(attrs.iter().cloned());
+ for a in i.attrs.iter() {
+ let a = self.lower_attr(a);
+ self.non_exported_macro_attrs.push(a);
+ }
}
return None;
}
- let kind = self.lower_item_kind(i.span, i.id, &mut ident, attrs, &mut vis, &i.kind);
-
- Some(hir::Item { hir_id: self.lower_node_id(i.id), ident, attrs, kind, vis, span: i.span })
+ let hir_id = self.lower_node_id(i.id);
+ let attrs = self.lower_attrs(hir_id, &i.attrs);
+ let kind = self.lower_item_kind(i.span, i.id, hir_id, &mut ident, attrs, &mut vis, &i.kind);
+ Some(hir::Item { def_id: hir_id.expect_owner(), ident, kind, vis, span: i.span })
}
fn lower_item_kind(
&mut self,
span: Span,
id: NodeId,
+ hir_id: hir::HirId,
ident: &mut Ident,
- attrs: &'hir [Attribute],
+ attrs: Option<&'hir [Attribute]>,
vis: &mut hir::Visibility<'hir>,
i: &ItemKind,
) -> hir::ItemKind<'hir> {
@@ -318,13 +311,18 @@
hir::ItemKind::Fn(sig, generics, body_id)
})
}
- ItemKind::Mod(ref m) => hir::ItemKind::Mod(self.lower_mod(m)),
+ ItemKind::Mod(_, ref mod_kind) => match mod_kind {
+ ModKind::Loaded(items, _, inner_span) => {
+ hir::ItemKind::Mod(self.lower_mod(items, *inner_span))
+ }
+ ModKind::Unloaded => panic!("`mod` items should have been loaded by now"),
+ },
ItemKind::ForeignMod(ref fm) => {
if fm.abi.is_none() {
- self.maybe_lint_missing_abi(span, id, abi::Abi::C);
+ self.maybe_lint_missing_abi(span, id, abi::Abi::C { unwind: false });
}
hir::ItemKind::ForeignMod {
- abi: fm.abi.map_or(abi::Abi::C, |abi| self.lower_abi(abi)),
+ abi: fm.abi.map_or(abi::Abi::C { unwind: false }, |abi| self.lower_abi(abi)),
items: self
.arena
.alloc_from_iter(fm.items.iter().map(|x| self.lower_foreign_item_ref(x))),
@@ -344,7 +342,7 @@
ty,
ImplTraitContext::OtherOpaqueTy {
capturable_lifetimes: &mut FxHashSet::default(),
- origin: hir::OpaqueTyOrigin::Misc,
+ origin: hir::OpaqueTyOrigin::TyAlias,
},
);
let generics = self.lower_generics(gen, ImplTraitContext::disallowed());
@@ -364,14 +362,14 @@
self.lower_generics(generics, ImplTraitContext::disallowed()),
),
ItemKind::Struct(ref struct_def, ref generics) => {
- let struct_def = self.lower_variant_data(struct_def);
+ let struct_def = self.lower_variant_data(hir_id, struct_def);
hir::ItemKind::Struct(
struct_def,
self.lower_generics(generics, ImplTraitContext::disallowed()),
)
}
ItemKind::Union(ref vdata, ref generics) => {
- let vdata = self.lower_variant_data(vdata);
+ let vdata = self.lower_variant_data(hir_id, vdata);
hir::ItemKind::Union(
vdata,
self.lower_generics(generics, ImplTraitContext::disallowed()),
@@ -387,8 +385,6 @@
self_ty: ref ty,
items: ref impl_items,
}) => {
- let def_id = self.resolver.local_def_id(id);
-
// Lower the "impl header" first. This ordering is important
// for in-band lifetimes! Consider `'a` here:
//
@@ -402,10 +398,10 @@
// method, it will not be considered an in-band
// lifetime to be added, but rather a reference to a
// parent lifetime.
- let lowered_trait_impl_id = self.lower_node_id(id);
+ let lowered_trait_def_id = self.lower_node_id(id).expect_owner();
let (generics, (trait_ref, lowered_ty)) = self.add_in_band_defs(
ast_generics,
- def_id,
+ lowered_trait_def_id,
AnonymousLifetimeMode::CreateParameter,
|this, _| {
let trait_ref = trait_ref.as_ref().map(|trait_ref| {
@@ -417,7 +413,7 @@
this.trait_impls
.entry(def_id)
.or_default()
- .push(lowered_trait_impl_id);
+ .push(lowered_trait_def_id);
}
}
@@ -506,7 +502,7 @@
id: NodeId,
vis: &mut hir::Visibility<'hir>,
ident: &mut Ident,
- attrs: &'hir [Attribute],
+ attrs: Option<&'hir [Attribute]>,
) -> hir::ItemKind<'hir> {
debug!("lower_use_tree(tree={:?})", tree);
debug!("lower_use_tree: vis = {:?}", vis);
@@ -555,11 +551,13 @@
let path = this.lower_path_extra(res, &path, ParamMode::Explicit, None);
let kind = hir::ItemKind::Use(path, hir::UseKind::Single);
let vis = this.rebuild_vis(&vis);
+ if let Some(attrs) = attrs {
+ this.attrs.insert(new_id, attrs);
+ }
this.insert_item(hir::Item {
- hir_id: new_id,
+ def_id: new_id.expect_owner(),
ident,
- attrs,
kind,
vis,
span,
@@ -627,11 +625,13 @@
let kind =
this.lower_use_tree(use_tree, &prefix, id, &mut vis, &mut ident, attrs);
+ if let Some(attrs) = attrs {
+ this.attrs.insert(new_hir_id, attrs);
+ }
this.insert_item(hir::Item {
- hir_id: new_hir_id,
+ def_id: new_hir_id.expect_owner(),
ident,
- attrs,
kind,
vis,
span: use_tree.span,
@@ -700,11 +700,12 @@
}
fn lower_foreign_item(&mut self, i: &ForeignItem) -> hir::ForeignItem<'hir> {
- let def_id = self.resolver.local_def_id(i.id);
+ let hir_id = self.lower_node_id(i.id);
+ let def_id = hir_id.expect_owner();
+ self.lower_attrs(hir_id, &i.attrs);
hir::ForeignItem {
- hir_id: self.lower_node_id(i.id),
+ def_id,
ident: i.ident,
- attrs: self.lower_attrs(&i.attrs),
kind: match i.kind {
ForeignItemKind::Fn(box FnKind(_, ref sig, ref generics, _)) => {
let fdec = &sig.decl;
@@ -737,7 +738,7 @@
fn lower_foreign_item_ref(&mut self, i: &ForeignItem) -> hir::ForeignItemRef<'hir> {
hir::ForeignItemRef {
- id: hir::ForeignItemId { hir_id: self.lower_node_id(i.id) },
+ id: hir::ForeignItemId { def_id: self.lower_node_id(i.id).expect_owner() },
ident: i.ident,
span: i.span,
vis: self.lower_visibility(&i.vis, Some(i.id)),
@@ -749,33 +750,47 @@
}
fn lower_variant(&mut self, v: &Variant) -> hir::Variant<'hir> {
+ let id = self.lower_node_id(v.id);
+ self.lower_attrs(id, &v.attrs);
hir::Variant {
- attrs: self.lower_attrs(&v.attrs),
- data: self.lower_variant_data(&v.data),
+ id,
+ data: self.lower_variant_data(id, &v.data),
disr_expr: v.disr_expr.as_ref().map(|e| self.lower_anon_const(e)),
- id: self.lower_node_id(v.id),
ident: v.ident,
span: v.span,
}
}
- fn lower_variant_data(&mut self, vdata: &VariantData) -> hir::VariantData<'hir> {
+ fn lower_variant_data(
+ &mut self,
+ parent_id: hir::HirId,
+ vdata: &VariantData,
+ ) -> hir::VariantData<'hir> {
match *vdata {
VariantData::Struct(ref fields, recovered) => hir::VariantData::Struct(
self.arena
- .alloc_from_iter(fields.iter().enumerate().map(|f| self.lower_struct_field(f))),
+ .alloc_from_iter(fields.iter().enumerate().map(|f| self.lower_field_def(f))),
recovered,
),
- VariantData::Tuple(ref fields, id) => hir::VariantData::Tuple(
- self.arena
- .alloc_from_iter(fields.iter().enumerate().map(|f| self.lower_struct_field(f))),
- self.lower_node_id(id),
- ),
- VariantData::Unit(id) => hir::VariantData::Unit(self.lower_node_id(id)),
+ VariantData::Tuple(ref fields, id) => {
+ let ctor_id = self.lower_node_id(id);
+ self.alias_attrs(ctor_id, parent_id);
+ hir::VariantData::Tuple(
+ self.arena.alloc_from_iter(
+ fields.iter().enumerate().map(|f| self.lower_field_def(f)),
+ ),
+ ctor_id,
+ )
+ }
+ VariantData::Unit(id) => {
+ let ctor_id = self.lower_node_id(id);
+ self.alias_attrs(ctor_id, parent_id);
+ hir::VariantData::Unit(ctor_id)
+ }
}
}
- fn lower_struct_field(&mut self, (index, f): (usize, &StructField)) -> hir::StructField<'hir> {
+ fn lower_field_def(&mut self, (index, f): (usize, &FieldDef)) -> hir::FieldDef<'hir> {
let ty = if let TyKind::Path(ref qself, ref path) = f.ty.kind {
let t = self.lower_path_ty(
&f.ty,
@@ -788,9 +803,11 @@
} else {
self.lower_ty(&f.ty, ImplTraitContext::disallowed())
};
- hir::StructField {
+ let hir_id = self.lower_node_id(f.id);
+ self.lower_attrs(hir_id, &f.attrs);
+ hir::FieldDef {
span: f.span,
- hir_id: self.lower_node_id(f.id),
+ hir_id,
ident: match f.ident {
Some(ident) => ident,
// FIXME(jseyfried): positional field hygiene.
@@ -798,12 +815,12 @@
},
vis: self.lower_visibility(&f.vis, None),
ty,
- attrs: self.lower_attrs(&f.attrs),
}
}
fn lower_trait_item(&mut self, i: &AssocItem) -> hir::TraitItem<'hir> {
- let trait_item_def_id = self.resolver.local_def_id(i.id);
+ let hir_id = self.lower_node_id(i.id);
+ let trait_item_def_id = hir_id.expect_owner();
let (generics, kind) = match i.kind {
AssocItemKind::Const(_, ref ty, ref default) => {
@@ -836,14 +853,8 @@
AssocItemKind::MacCall(..) => panic!("macro item shouldn't exist at this point"),
};
- hir::TraitItem {
- hir_id: self.lower_node_id(i.id),
- ident: i.ident,
- attrs: self.lower_attrs(&i.attrs),
- generics,
- kind,
- span: i.span,
- }
+ self.lower_attrs(hir_id, &i.attrs);
+ hir::TraitItem { def_id: trait_item_def_id, ident: i.ident, generics, kind, span: i.span }
}
fn lower_trait_item_ref(&mut self, i: &AssocItem) -> hir::TraitItemRef {
@@ -857,7 +868,7 @@
}
AssocItemKind::MacCall(..) => unimplemented!(),
};
- let id = hir::TraitItemId { hir_id: self.lower_node_id(i.id) };
+ let id = hir::TraitItemId { def_id: self.lower_node_id(i.id).expect_owner() };
let defaultness = hir::Defaultness::Default { has_value: has_default };
hir::TraitItemRef { id, ident: i.ident, span: i.span, defaultness, kind }
}
@@ -907,7 +918,7 @@
ty,
ImplTraitContext::OtherOpaqueTy {
capturable_lifetimes: &mut FxHashSet::default(),
- origin: hir::OpaqueTyOrigin::Misc,
+ origin: hir::OpaqueTyOrigin::TyAlias,
},
);
hir::ImplItemKind::TyAlias(ty)
@@ -921,10 +932,11 @@
// Since `default impl` is not yet implemented, this is always true in impls.
let has_value = true;
let (defaultness, _) = self.lower_defaultness(i.kind.defaultness(), has_value);
+ let hir_id = self.lower_node_id(i.id);
+ self.lower_attrs(hir_id, &i.attrs);
hir::ImplItem {
- hir_id: self.lower_node_id(i.id),
+ def_id: hir_id.expect_owner(),
ident: i.ident,
- attrs: self.lower_attrs(&i.attrs),
generics,
vis: self.lower_visibility(&i.vis, None),
defaultness,
@@ -938,7 +950,7 @@
let has_value = true;
let (defaultness, _) = self.lower_defaultness(i.kind.defaultness(), has_value);
hir::ImplItemRef {
- id: hir::ImplItemId { hir_id: self.lower_node_id(i.id) },
+ id: hir::ImplItemId { def_id: self.lower_node_id(i.id).expect_owner() },
ident: i.ident,
span: i.span,
vis: self.lower_visibility(&i.vis, Some(i.id)),
@@ -1025,9 +1037,10 @@
}
fn lower_param(&mut self, param: &Param) -> hir::Param<'hir> {
+ let hir_id = self.lower_node_id(param.id);
+ self.lower_attrs(hir_id, ¶m.attrs);
hir::Param {
- attrs: self.lower_attrs(¶m.attrs),
- hir_id: self.lower_node_id(param.id),
+ hir_id,
pat: self.lower_pat(¶m.pat),
ty_span: param.ty.span,
span: param.span,
@@ -1159,11 +1172,9 @@
//
// If this is the simple case, this parameter will end up being the same as the
// original parameter, but with a different pattern id.
- let mut stmt_attrs = AttrVec::new();
- stmt_attrs.extend(parameter.attrs.iter().cloned());
+ let stmt_attrs = this.attrs.get(¶meter.hir_id).copied();
let (new_parameter_pat, new_parameter_id) = this.pat_ident(desugared_span, ident);
let new_parameter = hir::Param {
- attrs: parameter.attrs,
hir_id: parameter.hir_id,
pat: new_parameter_pat,
ty_span: parameter.ty_span,
@@ -1206,7 +1217,7 @@
);
let move_expr = this.expr_ident(desugared_span, ident, new_parameter_id);
let move_stmt = this.stmt_let_pat(
- AttrVec::new(),
+ None,
desugared_span,
Some(move_expr),
move_pat,
@@ -1323,8 +1334,8 @@
match ext {
Extern::None => abi::Abi::Rust,
Extern::Implicit => {
- self.maybe_lint_missing_abi(span, id, abi::Abi::C);
- abi::Abi::C
+ self.maybe_lint_missing_abi(span, id, abi::Abi::C { unwind: false });
+ abi::Abi::C { unwind: false }
}
Extern::Explicit(abi) => self.lower_abi(abi),
}
diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs
index f076dca..f9872f8 100644
--- a/compiler/rustc_ast_lowering/src/lib.rs
+++ b/compiler/rustc_ast_lowering/src/lib.rs
@@ -12,7 +12,7 @@
//! For the simpler lowering steps, IDs and spans should be preserved. Unlike
//! expansion we do not preserve the process of lowering in the spans, so spans
//! should not be modified here. When creating a new node (as opposed to
-//! 'folding' an existing one), then you create a new ID using `next_id()`.
+//! "folding" an existing one), create a new ID using `next_id()`.
//!
//! You must ensure that IDs are unique. That means that you should only use the
//! ID from an AST node in a single HIR node (you can assume that AST node-IDs
@@ -26,7 +26,7 @@
//! span and spans don't need to be kept in order, etc. Where code is preserved
//! by lowering, it should have the same span as in the AST. Where HIR nodes are
//! new it is probably best to give a span for the whole AST node being lowered.
-//! All nodes should have real spans, don't use dummy spans. Tools are likely to
+//! All nodes should have real spans; don't use dummy spans. Tools are likely to
//! get confused if the spans from leaf AST nodes occur in multiple places
//! in the HIR, especially for multiple identifiers.
@@ -48,7 +48,7 @@
use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Namespace, PartialRes, PerNS, Res};
-use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId, CRATE_DEF_INDEX};
+use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId, CRATE_DEF_ID};
use rustc_hir::definitions::{DefKey, DefPathData, Definitions};
use rustc_hir::intravisit;
use rustc_hir::{ConstArg, GenericArg, ParamName};
@@ -95,11 +95,11 @@
/// librustc_middle is independent of the parser, we use dynamic dispatch here.
nt_to_tokenstream: NtToTokenstream,
- /// Used to allocate HIR nodes
+ /// Used to allocate HIR nodes.
arena: &'hir Arena<'hir>,
/// The items being lowered are collected here.
- items: BTreeMap<hir::HirId, hir::Item<'hir>>,
+ items: BTreeMap<hir::ItemId, hir::Item<'hir>>,
trait_items: BTreeMap<hir::TraitItemId, hir::TraitItem<'hir>>,
impl_items: BTreeMap<hir::ImplItemId, hir::ImplItem<'hir>>,
@@ -108,12 +108,14 @@
exported_macros: Vec<hir::MacroDef<'hir>>,
non_exported_macro_attrs: Vec<ast::Attribute>,
- trait_impls: BTreeMap<DefId, Vec<hir::HirId>>,
+ trait_impls: BTreeMap<DefId, Vec<LocalDefId>>,
- modules: BTreeMap<hir::HirId, hir::ModuleItems>,
+ modules: BTreeMap<LocalDefId, hir::ModuleItems>,
generator_kind: Option<hir::GeneratorKind>,
+ attrs: BTreeMap<hir::HirId, &'hir [Attribute]>,
+
/// When inside an `async` context, this is the `HirId` of the
/// `task_context` local bound to the resume argument of the generator.
task_context: Option<hir::HirId>,
@@ -128,7 +130,7 @@
is_in_trait_impl: bool,
is_in_dyn_type: bool,
- /// What to do when we encounter either an "anonymous lifetime
+ /// What to do when we encounter an "anonymous lifetime
/// reference". The term "anonymous" is meant to encompass both
/// `'_` lifetimes as well as fully elided cases where nothing is
/// written at all (e.g., `&T` or `std::cell::Ref<T>`).
@@ -158,7 +160,7 @@
/// vector.
in_scope_lifetimes: Vec<ParamName>,
- current_module: hir::HirId,
+ current_module: LocalDefId,
type_def_lifetime_params: DefIdMap<usize>,
@@ -175,6 +177,8 @@
fn item_generics_num_lifetimes(&self, def: DefId, sess: &Session) -> usize;
+ fn legacy_const_generic_args(&mut self, expr: &Expr) -> Option<Vec<usize>>;
+
/// Obtains resolution for a `NodeId` with a single resolution.
fn get_partial_res(&mut self, id: NodeId) -> Option<PartialRes>;
@@ -219,7 +223,7 @@
/// equivalent to a fresh universal parameter like `fn foo<T: Debug>(x: T)`.
///
/// Newly generated parameters should be inserted into the given `Vec`.
- Universal(&'b mut Vec<hir::GenericParam<'a>>),
+ Universal(&'b mut Vec<hir::GenericParam<'a>>, LocalDefId),
/// Treat `impl Trait` as shorthand for a new opaque type.
/// Example: `fn foo() -> impl Debug`, where `impl Debug` is conceptually
@@ -236,11 +240,13 @@
OtherOpaqueTy {
/// Set of lifetimes that this opaque type can capture, if it uses
/// them. This includes lifetimes bound since we entered this context.
- /// For example, in
+ /// For example:
///
+ /// ```
/// type A<'b> = impl for<'a> Trait<'a, Out = impl Sized + 'a>;
+ /// ```
///
- /// the inner opaque type captures `'a` because it uses it. It doesn't
+ /// Here the inner opaque type captures `'a` because it uses it. It doesn't
/// need to capture `'b` because it already inherits the lifetime
/// parameter from `A`.
// FIXME(impl_trait): but `required_region_bounds` will ICE later
@@ -272,7 +278,7 @@
fn reborrow<'this>(&'this mut self) -> ImplTraitContext<'this, 'a> {
use self::ImplTraitContext::*;
match self {
- Universal(params) => Universal(params),
+ Universal(params, parent) => Universal(params, *parent),
ReturnPositionOpaqueTy { fn_def_id, origin } => {
ReturnPositionOpaqueTy { fn_def_id: *fn_def_id, origin: *origin }
}
@@ -305,6 +311,7 @@
bodies: BTreeMap::new(),
trait_impls: BTreeMap::new(),
modules: BTreeMap::new(),
+ attrs: BTreeMap::default(),
exported_macros: Vec::new(),
non_exported_macro_attrs: Vec::new(),
catch_scopes: Vec::new(),
@@ -314,8 +321,8 @@
is_in_dyn_type: false,
anonymous_lifetime_mode: AnonymousLifetimeMode::PassThrough,
type_def_lifetime_params: Default::default(),
- current_module: hir::CRATE_HIR_ID,
- current_hir_id_owner: vec![(LocalDefId { local_def_index: CRATE_DEF_INDEX }, 0)],
+ current_module: CRATE_DEF_ID,
+ current_hir_id_owner: vec![(CRATE_DEF_ID, 0)],
item_local_id_counters: Default::default(),
node_id_to_hir_id: IndexVec::new(),
generator_kind: None,
@@ -431,31 +438,6 @@
}
}
-struct ImplTraitTypeIdVisitor<'a> {
- ids: &'a mut SmallVec<[NodeId; 1]>,
-}
-
-impl Visitor<'_> for ImplTraitTypeIdVisitor<'_> {
- fn visit_ty(&mut self, ty: &Ty) {
- match ty.kind {
- TyKind::Typeof(_) | TyKind::BareFn(_) => return,
-
- TyKind::ImplTrait(id, _) => self.ids.push(id),
- _ => {}
- }
- visit::walk_ty(self, ty);
- }
-
- fn visit_path_segment(&mut self, path_span: Span, path_segment: &PathSegment) {
- if let Some(ref p) = path_segment.args {
- if let GenericArgs::Parenthesized(_) = **p {
- return;
- }
- }
- visit::walk_path_segment(self, path_span, path_segment)
- }
-}
-
impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn lower_crate(mut self, c: &Crate) -> hir::Crate<'hir> {
/// Full-crate AST visitor that inserts into a fresh
@@ -468,25 +450,18 @@
}
impl MiscCollector<'_, '_, '_> {
- fn allocate_use_tree_hir_id_counters(&mut self, tree: &UseTree, owner: LocalDefId) {
+ fn allocate_use_tree_hir_id_counters(&mut self, tree: &UseTree) {
match tree.kind {
UseTreeKind::Simple(_, id1, id2) => {
for &id in &[id1, id2] {
- self.lctx.resolver.create_def(
- owner,
- id,
- DefPathData::Misc,
- ExpnId::root(),
- tree.prefix.span,
- );
self.lctx.allocate_hir_id_counter(id);
}
}
UseTreeKind::Glob => (),
UseTreeKind::Nested(ref trees) => {
for &(ref use_tree, id) in trees {
- let hir_id = self.lctx.allocate_hir_id_counter(id);
- self.allocate_use_tree_hir_id_counters(use_tree, hir_id.owner);
+ self.lctx.allocate_hir_id_counter(id);
+ self.allocate_use_tree_hir_id_counters(use_tree);
}
}
}
@@ -495,7 +470,7 @@
impl<'tcx> Visitor<'tcx> for MiscCollector<'tcx, '_, '_> {
fn visit_item(&mut self, item: &'tcx Item) {
- let hir_id = self.lctx.allocate_hir_id_counter(item.id);
+ self.lctx.allocate_hir_id_counter(item.id);
match item.kind {
ItemKind::Struct(_, ref generics)
@@ -514,7 +489,7 @@
self.lctx.type_def_lifetime_params.insert(def_id.to_def_id(), count);
}
ItemKind::Use(ref use_tree) => {
- self.allocate_use_tree_hir_id_counters(use_tree, hir_id.owner);
+ self.allocate_use_tree_hir_id_counters(use_tree);
}
_ => {}
}
@@ -560,8 +535,8 @@
visit::walk_crate(&mut MiscCollector { lctx: &mut self }, c);
visit::walk_crate(&mut item::ItemLowerer { lctx: &mut self }, c);
- let module = self.lower_mod(&c.module);
- let attrs = self.lower_attrs(&c.attrs);
+ let module = self.lower_mod(&c.items, c.span);
+ self.lower_attrs(hir::CRATE_HIR_ID, &c.attrs);
let body_ids = body_ids(&self.bodies);
let proc_macros =
c.proc_macros.iter().map(|id| self.node_id_to_hir_id[*id].unwrap()).collect();
@@ -588,8 +563,16 @@
self.resolver.definitions().init_def_id_to_hir_id_mapping(def_id_to_hir_id);
+ #[cfg(debug_assertions)]
+ for (&id, attrs) in self.attrs.iter() {
+ // Verify that we do not store empty slices in the map.
+ if attrs.is_empty() {
+ panic!("Stored empty attributes for {:?}", id);
+ }
+ }
+
hir::Crate {
- item: hir::CrateItem { module, attrs, span: c.span },
+ item: hir::CrateItem { module, span: c.span },
exported_macros: self.arena.alloc_from_iter(self.exported_macros),
non_exported_macro_attrs: self.arena.alloc_from_iter(self.non_exported_macro_attrs),
items: self.items,
@@ -602,15 +585,15 @@
modules: self.modules,
proc_macros,
trait_map,
+ attrs: self.attrs,
}
}
- fn insert_item(&mut self, item: hir::Item<'hir>) {
- let id = item.hir_id;
- // FIXME: Use `debug_asset-rt`.
- assert_eq!(id.local_id, hir::ItemLocalId::from_u32(0));
+ fn insert_item(&mut self, item: hir::Item<'hir>) -> hir::ItemId {
+ let id = hir::ItemId { def_id: item.def_id };
self.items.insert(id, item);
- self.modules.get_mut(&self.current_module).unwrap().items.insert(id);
+ self.modules.entry(self.current_module).or_default().items.insert(id);
+ id
}
fn allocate_hir_id_counter(&mut self, owner: NodeId) -> hir::HirId {
@@ -831,7 +814,6 @@
hir::GenericParam {
hir_id: self.lower_node_id(node_id),
name: hir_name,
- attrs: &[],
bounds: &[],
span,
pure_wrt_drop: false,
@@ -925,8 +907,13 @@
// `lifetimes_to_define`. If we swapped the order of these two,
// in-band-lifetimes introduced by generics or where-clauses
// wouldn't have been added yet.
- let generics =
- this.lower_generics_mut(generics, ImplTraitContext::Universal(&mut params));
+ let generics = this.lower_generics_mut(
+ generics,
+ ImplTraitContext::Universal(
+ &mut params,
+ this.current_hir_id_owner.last().unwrap().0,
+ ),
+ );
let res = f(this, &mut params);
(params, (generics, res))
})
@@ -964,11 +951,18 @@
ret
}
- fn lower_attrs(&mut self, attrs: &[Attribute]) -> &'hir [Attribute] {
- self.arena.alloc_from_iter(attrs.iter().map(|a| self.lower_attr(a)))
+ fn lower_attrs(&mut self, id: hir::HirId, attrs: &[Attribute]) -> Option<&'hir [Attribute]> {
+ if attrs.is_empty() {
+ None
+ } else {
+ let ret = self.arena.alloc_from_iter(attrs.iter().map(|a| self.lower_attr(a)));
+ debug_assert!(!ret.is_empty());
+ self.attrs.insert(id, ret);
+ Some(ret)
+ }
}
- fn lower_attr(&mut self, attr: &Attribute) -> Attribute {
+ fn lower_attr(&self, attr: &Attribute) -> Attribute {
// Note that we explicitly do not walk the path. Since we don't really
// lower attributes (we use the AST version) there is nowhere to keep
// the `HirId`s. We don't actually need HIR version of attributes anyway.
@@ -988,7 +982,14 @@
Attribute { kind, id: attr.id, style: attr.style, span: attr.span }
}
- fn lower_mac_args(&mut self, args: &MacArgs) -> MacArgs {
+ fn alias_attrs(&mut self, id: hir::HirId, target_id: hir::HirId) {
+ if let Some(&a) = self.attrs.get(&target_id) {
+ debug_assert!(!a.is_empty());
+ self.attrs.insert(id, a);
+ }
+ }
+
+ fn lower_mac_args(&self, args: &MacArgs) -> MacArgs {
match *args {
MacArgs::Empty => MacArgs::Empty,
MacArgs::Delimited(dspan, delim, ref tokens) => {
@@ -1117,6 +1118,7 @@
}
AssocTyConstraintKind::Bound { ref bounds } => {
let mut capturable_lifetimes;
+ let mut parent_def_id = self.current_hir_id_owner.last().unwrap().0;
// Piggy-back on the `impl Trait` context to figure out the correct behavior.
let (desugar_to_impl_trait, itctx) = match itctx {
// We are in the return position:
@@ -1136,7 +1138,10 @@
// so desugar to
//
// fn foo(x: dyn Iterator<Item = impl Debug>)
- ImplTraitContext::Universal(..) if self.is_in_dyn_type => (true, itctx),
+ ImplTraitContext::Universal(_, parent) if self.is_in_dyn_type => {
+ parent_def_id = parent;
+ (true, itctx)
+ }
// In `type Foo = dyn Iterator<Item: Debug>` we desugar to
// `type Foo = dyn Iterator<Item = impl Debug>` but we have to override the
@@ -1170,7 +1175,6 @@
// constructing the HIR for `impl bounds...` and then lowering that.
let impl_trait_node_id = self.resolver.next_node_id();
- let parent_def_id = self.current_hir_id_owner.last().unwrap().0;
self.resolver.create_def(
parent_def_id,
impl_trait_node_id,
@@ -1392,7 +1396,7 @@
if kind != TraitObjectSyntax::Dyn {
self.maybe_lint_bare_trait(t.span, t.id, false);
}
- hir::TyKind::TraitObject(bounds, lifetime_bound)
+ hir::TyKind::TraitObject(bounds, lifetime_bound, kind)
}
TyKind::ImplTrait(def_node_id, ref bounds) => {
let span = t.span;
@@ -1423,7 +1427,7 @@
|this| this.lower_param_bounds(bounds, nested_itctx),
)
}
- ImplTraitContext::Universal(in_band_ty_params) => {
+ ImplTraitContext::Universal(in_band_ty_params, parent_def_id) => {
// Add a definition for the in-band `Param`.
let def_id = self.resolver.local_def_id(def_node_id);
@@ -1432,7 +1436,7 @@
let hir_bounds = self.with_hir_id_owner(def_node_id, |this| {
this.lower_param_bounds(
bounds,
- ImplTraitContext::Universal(in_band_ty_params),
+ ImplTraitContext::Universal(in_band_ty_params, parent_def_id),
)
});
// Set the name to `impl Bound1 + Bound2`.
@@ -1441,7 +1445,6 @@
hir_id: self.lower_node_id(def_node_id),
name: ParamName::Plain(ident),
pure_wrt_drop: false,
- attrs: &[],
bounds: hir_bounds,
span,
kind: hir::GenericParamKind::Type {
@@ -1547,11 +1550,10 @@
};
trace!("lower_opaque_impl_trait: {:#?}", opaque_ty_def_id);
- let opaque_ty_id =
- lctx.generate_opaque_type(opaque_ty_node_id, opaque_ty_item, span, opaque_ty_span);
+ lctx.generate_opaque_type(opaque_ty_def_id, opaque_ty_item, span, opaque_ty_span);
// `impl Trait` now just becomes `Foo<'a, 'b, ..>`.
- hir::TyKind::OpaqueDef(hir::ItemId { id: opaque_ty_id }, lifetimes)
+ hir::TyKind::OpaqueDef(hir::ItemId { def_id: opaque_ty_def_id }, lifetimes)
})
}
@@ -1559,19 +1561,17 @@
/// returns the lowered node-ID for the opaque type.
fn generate_opaque_type(
&mut self,
- opaque_ty_node_id: NodeId,
+ opaque_ty_id: LocalDefId,
opaque_ty_item: hir::OpaqueTy<'hir>,
span: Span,
opaque_ty_span: Span,
- ) -> hir::HirId {
+ ) {
let opaque_ty_item_kind = hir::ItemKind::OpaqueTy(opaque_ty_item);
- let opaque_ty_id = self.lower_node_id(opaque_ty_node_id);
// Generate an `type Foo = impl Trait;` declaration.
trace!("registering opaque type with id {:#?}", opaque_ty_id);
let opaque_ty_item = hir::Item {
- hir_id: opaque_ty_id,
+ def_id: opaque_ty_id,
ident: Ident::invalid(),
- attrs: Default::default(),
kind: opaque_ty_item_kind,
vis: respan(span.shrink_to_lo(), hir::VisibilityKind::Inherited),
span: opaque_ty_span,
@@ -1581,7 +1581,6 @@
// automatically for all AST items. But this opaque type item
// does not actually exist in the AST.
self.insert_item(opaque_ty_item);
- opaque_ty_id
}
fn lifetimes_from_impl_trait_bounds(
@@ -1733,7 +1732,6 @@
name,
span: lifetime.span,
pure_wrt_drop: false,
- attrs: &[],
bounds: &[],
kind: hir::GenericParamKind::Lifetime { kind },
});
@@ -1766,14 +1764,7 @@
)
}
- fn lower_local(&mut self, l: &Local) -> (hir::Local<'hir>, SmallVec<[NodeId; 1]>) {
- let mut ids = SmallVec::<[NodeId; 1]>::new();
- if self.sess.features_untracked().impl_trait_in_bindings {
- if let Some(ref ty) = l.ty {
- let mut visitor = ImplTraitTypeIdVisitor { ids: &mut ids };
- visitor.visit_ty(ty);
- }
- }
+ fn lower_local(&mut self, l: &Local) -> hir::Local<'hir> {
let ty = l.ty.as_ref().map(|t| {
let mut capturable_lifetimes;
self.lower_ty(
@@ -1790,18 +1781,16 @@
)
});
let init = l.init.as_ref().map(|e| self.lower_expr(e));
- (
- hir::Local {
- hir_id: self.lower_node_id(l.id),
- ty,
- pat: self.lower_pat(&l.pat),
- init,
- span: l.span,
- attrs: l.attrs.iter().map(|a| self.lower_attr(a)).collect::<Vec<_>>().into(),
- source: hir::LocalSource::Normal,
- },
- ids,
- )
+ let hir_id = self.lower_node_id(l.id);
+ self.lower_attrs(hir_id, &l.attrs);
+ hir::Local {
+ hir_id,
+ ty,
+ pat: self.lower_pat(&l.pat),
+ init,
+ span: l.span,
+ source: hir::LocalSource::Normal,
+ }
}
fn lower_fn_params_to_names(&mut self, decl: &FnDecl) -> &'hir [Ident] {
@@ -1868,7 +1857,13 @@
}
this.arena.alloc_from_iter(inputs.iter().map(|param| {
if let Some((_, ibty)) = &mut in_band_ty_params {
- this.lower_ty_direct(¶m.ty, ImplTraitContext::Universal(ibty))
+ this.lower_ty_direct(
+ ¶m.ty,
+ ImplTraitContext::Universal(
+ ibty,
+ this.current_hir_id_owner.last().unwrap().0,
+ ),
+ )
} else {
this.lower_ty_direct(¶m.ty, ImplTraitContext::disallowed())
}
@@ -2010,7 +2005,7 @@
// grow.
let input_lifetimes_count = self.in_scope_lifetimes.len() + self.lifetimes_to_define.len();
- let (opaque_ty_id, lifetime_params) = self.with_hir_id_owner(opaque_ty_node_id, |this| {
+ let lifetime_params = self.with_hir_id_owner(opaque_ty_node_id, |this| {
// We have to be careful to get elision right here. The
// idea is that we create a lifetime parameter for each
// lifetime in the return type. So, given a return type
@@ -2061,10 +2056,9 @@
};
trace!("exist ty from async fn def id: {:#?}", opaque_ty_def_id);
- let opaque_ty_id =
- this.generate_opaque_type(opaque_ty_node_id, opaque_ty_item, span, opaque_ty_span);
+ this.generate_opaque_type(opaque_ty_def_id, opaque_ty_item, span, opaque_ty_span);
- (opaque_ty_id, lifetime_params)
+ lifetime_params
});
// As documented above on the variable
@@ -2107,12 +2101,13 @@
// Foo = impl Trait` is, internally, created as a child of the
// async fn, so the *type parameters* are inherited. It's
// only the lifetime parameters that we must supply.
- let opaque_ty_ref = hir::TyKind::OpaqueDef(hir::ItemId { id: opaque_ty_id }, generic_args);
+ let opaque_ty_ref =
+ hir::TyKind::OpaqueDef(hir::ItemId { def_id: opaque_ty_def_id }, generic_args);
let opaque_ty = self.ty(opaque_ty_span, opaque_ty_ref);
hir::FnRetTy::Return(self.arena.alloc(opaque_ty))
}
- /// Transforms `-> T` into `Future<Output = T>`
+ /// Transforms `-> T` into `Future<Output = T>`.
fn lower_async_fn_output_type_to_future_bound(
&mut self,
output: &FnRetTy,
@@ -2300,12 +2295,13 @@
}
};
+ let hir_id = self.lower_node_id(param.id);
+ self.lower_attrs(hir_id, ¶m.attrs);
hir::GenericParam {
- hir_id: self.lower_node_id(param.id),
+ hir_id,
name,
span: param.ident.span,
pure_wrt_drop: self.sess.contains_name(¶m.attrs, sym::may_dangle),
- attrs: self.lower_attrs(¶m.attrs),
bounds: self.arena.alloc_from_iter(bounds),
kind,
}
@@ -2385,26 +2381,12 @@
}
fn lower_block_noalloc(&mut self, b: &Block, targeted_by_break: bool) -> hir::Block<'hir> {
- let mut expr: Option<&'hir _> = None;
-
- let stmts = self.arena.alloc_from_iter(
- b.stmts
- .iter()
- .enumerate()
- .filter_map(|(index, stmt)| {
- if index == b.stmts.len() - 1 {
- if let StmtKind::Expr(ref e) = stmt.kind {
- expr = Some(self.lower_expr(e));
- None
- } else {
- Some(self.lower_stmt(stmt))
- }
- } else {
- Some(self.lower_stmt(stmt))
- }
- })
- .flatten(),
- );
+ let (stmts, expr) = match &*b.stmts {
+ [stmts @ .., Stmt { kind: StmtKind::Expr(e), .. }] => (stmts, Some(&*e)),
+ stmts => (stmts, None),
+ };
+ let stmts = self.arena.alloc_from_iter(stmts.iter().flat_map(|stmt| self.lower_stmt(stmt)));
+ let expr = expr.map(|e| self.lower_expr(e));
let rules = self.lower_block_check_mode(&b.rules);
let hir_id = self.lower_node_id(b.id);
@@ -2426,24 +2408,16 @@
}
fn lower_stmt(&mut self, s: &Stmt) -> SmallVec<[hir::Stmt<'hir>; 1]> {
- let kind = match s.kind {
+ let (hir_id, kind) = match s.kind {
StmtKind::Local(ref l) => {
- let (l, item_ids) = self.lower_local(l);
- let mut ids: SmallVec<[hir::Stmt<'hir>; 1]> = item_ids
- .into_iter()
- .map(|item_id| {
- let item_id = hir::ItemId { id: self.lower_node_id(item_id) };
- self.stmt(s.span, hir::StmtKind::Item(item_id))
- })
- .collect();
- ids.push({
- hir::Stmt {
- hir_id: self.lower_node_id(s.id),
- kind: hir::StmtKind::Local(self.arena.alloc(l)),
- span: s.span,
- }
- });
- return ids;
+ let l = self.lower_local(l);
+ let hir_id = self.lower_node_id(s.id);
+ self.alias_attrs(hir_id, l.hir_id);
+ return smallvec![hir::Stmt {
+ hir_id,
+ kind: hir::StmtKind::Local(self.arena.alloc(l)),
+ span: s.span,
+ }];
}
StmtKind::Item(ref it) => {
// Can only use the ID once.
@@ -2461,12 +2435,22 @@
})
.collect();
}
- StmtKind::Expr(ref e) => hir::StmtKind::Expr(self.lower_expr(e)),
- StmtKind::Semi(ref e) => hir::StmtKind::Semi(self.lower_expr(e)),
+ StmtKind::Expr(ref e) => {
+ let e = self.lower_expr(e);
+ let hir_id = self.lower_node_id(s.id);
+ self.alias_attrs(hir_id, e.hir_id);
+ (hir_id, hir::StmtKind::Expr(e))
+ }
+ StmtKind::Semi(ref e) => {
+ let e = self.lower_expr(e);
+ let hir_id = self.lower_node_id(s.id);
+ self.alias_attrs(hir_id, e.hir_id);
+ (hir_id, hir::StmtKind::Semi(e))
+ }
StmtKind::Empty => return smallvec![],
StmtKind::MacCall(..) => panic!("shouldn't exist here"),
};
- smallvec![hir::Stmt { hir_id: self.lower_node_id(s.id), kind, span: s.span }]
+ smallvec![hir::Stmt { hir_id, kind, span: s.span }]
}
fn lower_block_check_mode(&mut self, b: &BlockCheckMode) -> hir::BlockCheckMode {
@@ -2510,13 +2494,18 @@
fn stmt_let_pat(
&mut self,
- attrs: AttrVec,
+ attrs: Option<&'hir [Attribute]>,
span: Span,
init: Option<&'hir hir::Expr<'hir>>,
pat: &'hir hir::Pat<'hir>,
source: hir::LocalSource,
) -> hir::Stmt<'hir> {
- let local = hir::Local { attrs, hir_id: self.next_id(), init, pat, source, span, ty: None };
+ let hir_id = self.next_id();
+ if let Some(a) = attrs {
+ debug_assert!(!a.is_empty());
+ self.attrs.insert(hir_id, a);
+ }
+ let local = hir::Local { hir_id, init, pat, source, span, ty: None };
self.stmt(span, hir::StmtKind::Local(self.arena.alloc(local)))
}
@@ -2570,8 +2559,8 @@
&mut self,
span: Span,
pat: &'hir hir::Pat<'hir>,
- ) -> &'hir [hir::FieldPat<'hir>] {
- let field = hir::FieldPat {
+ ) -> &'hir [hir::PatField<'hir>] {
+ let field = hir::PatField {
hir_id: self.next_id(),
ident: Ident::new(sym::integer(0), span),
is_shorthand: false,
@@ -2585,7 +2574,7 @@
&mut self,
span: Span,
lang_item: hir::LangItem,
- fields: &'hir [hir::FieldPat<'hir>],
+ fields: &'hir [hir::PatField<'hir>],
) -> &'hir hir::Pat<'hir> {
let qpath = hir::QPath::LangItem(lang_item, span);
self.pat(span, hir::PatKind::Struct(qpath, fields, false))
@@ -2659,6 +2648,7 @@
hir::TyKind::TraitObject(
arena_vec![self; principal],
self.elided_dyn_bound(span),
+ TraitObjectSyntax::None,
)
}
_ => hir::TyKind::Path(hir::QPath::Resolved(None, path)),
diff --git a/compiler/rustc_ast_lowering/src/pat.rs b/compiler/rustc_ast_lowering/src/pat.rs
index e4e7b24..2451409 100644
--- a/compiler/rustc_ast_lowering/src/pat.rs
+++ b/compiler/rustc_ast_lowering/src/pat.rs
@@ -56,7 +56,7 @@
ImplTraitContext::disallowed(),
);
- let fs = self.arena.alloc_from_iter(fields.iter().map(|f| hir::FieldPat {
+ let fs = self.arena.alloc_from_iter(fields.iter().map(|f| hir::PatField {
hir_id: self.next_id(),
ident: f.ident,
pat: self.lower_pat(&f.pat),
diff --git a/compiler/rustc_ast_lowering/src/path.rs b/compiler/rustc_ast_lowering/src/path.rs
index cb4d5ea..46dac2f 100644
--- a/compiler/rustc_ast_lowering/src/path.rs
+++ b/compiler/rustc_ast_lowering/src/path.rs
@@ -30,6 +30,7 @@
let partial_res =
self.resolver.get_partial_res(id).unwrap_or_else(|| PartialRes::new(Res::Err));
+ let path_span_lo = p.span.shrink_to_lo();
let proj_start = p.segments.len() - partial_res.unresolved_segments();
let path = self.arena.alloc(hir::Path {
res: self.lower_res(partial_res.base_res()),
@@ -108,7 +109,9 @@
)
},
)),
- span: p.span,
+ span: p.segments[..proj_start]
+ .last()
+ .map_or(path_span_lo, |segment| path_span_lo.to(segment.span())),
});
// Simple case, either no projections, or only fully-qualified.
@@ -127,7 +130,7 @@
// e.g., `Vec` in `Vec::new` or `<I as Iterator>::Item` in
// `<I as Iterator>::Item::default`.
let new_id = self.next_id();
- self.arena.alloc(self.ty_path(new_id, p.span, hir::QPath::Resolved(qself, path)))
+ self.arena.alloc(self.ty_path(new_id, path.span, hir::QPath::Resolved(qself, path)))
};
// Anything after the base path are associated "extensions",
@@ -141,7 +144,7 @@
// 3. `<<std::vec::Vec<T>>::IntoIter>::Item`
// * final path is `<<<std::vec::Vec<T>>::IntoIter>::Item>::clone`
for (i, segment) in p.segments.iter().enumerate().skip(proj_start) {
- let segment = self.arena.alloc(self.lower_path_segment(
+ let hir_segment = self.arena.alloc(self.lower_path_segment(
p.span,
segment,
param_mode,
@@ -150,7 +153,7 @@
itctx.reborrow(),
None,
));
- let qpath = hir::QPath::TypeRelative(ty, segment);
+ let qpath = hir::QPath::TypeRelative(ty, hir_segment);
// It's finished, return the extension of the right node type.
if i == p.segments.len() - 1 {
@@ -159,7 +162,7 @@
// Wrap the associated extension in another type node.
let new_id = self.next_id();
- ty = self.arena.alloc(self.ty_path(new_id, p.span, qpath));
+ ty = self.arena.alloc(self.ty_path(new_id, path_span_lo.to(segment.span()), qpath));
}
// We should've returned in the for loop above.
diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs
index 8defd91..563bcda 100644
--- a/compiler/rustc_ast_passes/src/ast_validation.rs
+++ b/compiler/rustc_ast_passes/src/ast_validation.rs
@@ -1054,12 +1054,14 @@
walk_list!(self, visit_attribute, &item.attrs);
return;
}
- ItemKind::Mod(Mod { inline, unsafety, .. }) => {
+ ItemKind::Mod(unsafety, ref mod_kind) => {
if let Unsafe::Yes(span) = unsafety {
self.err_handler().span_err(span, "module cannot be declared unsafe");
}
// Ensure that `path` attributes on modules are recorded as used (cf. issue #35584).
- if !inline && !self.session.contains_name(&item.attrs, sym::path) {
+ if !matches!(mod_kind, ModKind::Loaded(_, Inline::Yes, _))
+ && !self.session.contains_name(&item.attrs, sym::path)
+ {
self.check_mod_file_item_asciionly(item.ident);
}
}
diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs
index 6514de2..0ca9c12 100644
--- a/compiler/rustc_ast_passes/src/feature_gate.rs
+++ b/compiler/rustc_ast_passes/src/feature_gate.rs
@@ -164,6 +164,38 @@
"C-cmse-nonsecure-call ABI is experimental and subject to change"
);
}
+ "C-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "C-unwind ABI is experimental and subject to change"
+ );
+ }
+ "stdcall-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "stdcall-unwind ABI is experimental and subject to change"
+ );
+ }
+ "system-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "system-unwind ABI is experimental and subject to change"
+ );
+ }
+ "thiscall-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "thiscall-unwind ABI is experimental and subject to change"
+ );
+ }
abi => self
.sess
.parse_sess
@@ -247,7 +279,7 @@
if let ast::TyKind::ImplTrait(..) = ty.kind {
gate_feature_post!(
&self.vis,
- type_alias_impl_trait,
+ min_type_alias_impl_trait,
ty.span,
"`impl Trait` in type aliases is unstable"
);
@@ -638,8 +670,16 @@
}
};
}
- gate_all!(if_let_guard, "`if let` guards are experimental");
- gate_all!(let_chains, "`let` expressions in this position are experimental");
+ gate_all!(
+ if_let_guard,
+ "`if let` guards are experimental",
+ "you can write `if matches!(<expr>, <pattern>)` instead of `if let <pattern> = <expr>`"
+ );
+ gate_all!(
+ let_chains,
+ "`let` expressions in this position are experimental",
+ "you can write `matches!(<expr>, <pattern>)` instead of `let <pattern> = <expr>`"
+ );
gate_all!(
async_closure,
"async closures are unstable",
@@ -665,6 +705,7 @@
// involved, so we only emit errors where there are no other parsing errors.
gate_all!(destructuring_assignment, "destructuring assignments are unstable");
}
+ gate_all!(pub_macro_rules, "`pub` on `macro_rules` items is unstable");
// All uses of `gate_all!` below this point were added in #65742,
// and subsequently disabled (with the non-early gating readded).
diff --git a/compiler/rustc_ast_passes/src/node_count.rs b/compiler/rustc_ast_passes/src/node_count.rs
index 2971fa4..3980e6d 100644
--- a/compiler/rustc_ast_passes/src/node_count.rs
+++ b/compiler/rustc_ast_passes/src/node_count.rs
@@ -20,10 +20,6 @@
self.count += 1;
walk_ident(self, ident);
}
- fn visit_mod(&mut self, m: &Mod, _s: Span, _a: &[Attribute], _n: NodeId) {
- self.count += 1;
- walk_mod(self, m)
- }
fn visit_foreign_item(&mut self, i: &ForeignItem) {
self.count += 1;
walk_foreign_item(self, i)
@@ -92,9 +88,9 @@
self.count += 1;
walk_struct_def(self, s)
}
- fn visit_struct_field(&mut self, s: &StructField) {
+ fn visit_field_def(&mut self, s: &FieldDef) {
self.count += 1;
- walk_struct_field(self, s)
+ walk_field_def(self, s)
}
fn visit_enum_def(
&mut self,
diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs
index 7f4775b..cb6f567 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state.rs
@@ -9,7 +9,7 @@
use rustc_ast::util::comments::{gather_comments, Comment, CommentStyle};
use rustc_ast::util::parser::{self, AssocOp, Fixity};
use rustc_ast::{self as ast, BlockCheckMode, PatKind, RangeEnd, RangeSyntax};
-use rustc_ast::{GenericArg, MacArgs};
+use rustc_ast::{GenericArg, MacArgs, ModKind};
use rustc_ast::{GenericBound, SelfKind, TraitBoundModifier};
use rustc_ast::{InlineAsmOperand, InlineAsmRegOrRegClass};
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
@@ -87,7 +87,6 @@
pub s: pp::Printer,
comments: Option<Comments<'a>>,
ann: &'a (dyn PpAnn + 'a),
- is_expanded: bool,
}
crate const INDENT_UNIT: usize = 4;
@@ -103,12 +102,8 @@
is_expanded: bool,
edition: Edition,
) -> String {
- let mut s = State {
- s: pp::mk_printer(),
- comments: Some(Comments::new(sm, filename, input)),
- ann,
- is_expanded,
- };
+ let mut s =
+ State { s: pp::mk_printer(), comments: Some(Comments::new(sm, filename, input)), ann };
if is_expanded && !krate.attrs.iter().any(|attr| attr.has_name(sym::no_core)) {
// We need to print `#![no_std]` (and its feature gate) so that
@@ -132,7 +127,10 @@
}
}
- s.print_mod(&krate.module, &krate.attrs);
+ s.print_inner_attributes(&krate.attrs);
+ for item in &krate.items {
+ s.print_item(item);
+ }
s.print_remaining_comments();
s.ann.post(&mut s, AnnNode::Crate(krate));
s.s.eof()
@@ -853,7 +851,7 @@
impl<'a> State<'a> {
pub fn new() -> State<'a> {
- State { s: pp::mk_printer(), comments: None, ann: &NoAnn, is_expanded: false }
+ State { s: pp::mk_printer(), comments: None, ann: &NoAnn }
}
// Synthesizes a comment that was not textually present in the original source
@@ -891,13 +889,6 @@
self.commasep_cmnt(b, exprs, |s, e| s.print_expr(e), |e| e.span)
}
- pub fn print_mod(&mut self, _mod: &ast::Mod, attrs: &[ast::Attribute]) {
- self.print_inner_attributes(attrs);
- for item in &_mod.items {
- self.print_item(item);
- }
- }
-
crate fn print_foreign_mod(&mut self, nmod: &ast::ForeignMod, attrs: &[ast::Attribute]) {
self.print_inner_attributes(attrs);
for item in &nmod.items {
@@ -914,6 +905,7 @@
pub fn print_assoc_constraint(&mut self, constraint: &ast::AssocTyConstraint) {
self.print_ident(constraint.ident);
+ constraint.gen_args.as_ref().map(|args| self.print_generic_args(args, false));
self.s.space();
match &constraint.kind {
ast::AssocTyConstraintKind::Equality { ty } => {
@@ -1138,23 +1130,29 @@
let body = body.as_deref();
self.print_fn_full(sig, item.ident, gen, &item.vis, def, body, &item.attrs);
}
- ast::ItemKind::Mod(ref _mod) => {
+ ast::ItemKind::Mod(unsafety, ref mod_kind) => {
self.head(self.to_string(|s| {
s.print_visibility(&item.vis);
- s.print_unsafety(_mod.unsafety);
+ s.print_unsafety(unsafety);
s.word("mod");
}));
self.print_ident(item.ident);
- if _mod.inline || self.is_expanded {
- self.nbsp();
- self.bopen();
- self.print_mod(_mod, &item.attrs);
- self.bclose(item.span);
- } else {
- self.s.word(";");
- self.end(); // end inner head-block
- self.end(); // end outer head-block
+ match mod_kind {
+ ModKind::Loaded(items, ..) => {
+ self.nbsp();
+ self.bopen();
+ self.print_inner_attributes(&item.attrs);
+ for item in items {
+ self.print_item(item);
+ }
+ self.bclose(item.span);
+ }
+ ModKind::Unloaded => {
+ self.s.word(";");
+ self.end(); // end inner head-block
+ self.end(); // end outer head-block
+ }
}
}
ast::ItemKind::ForeignMod(ref nmod) => {
@@ -1311,6 +1309,9 @@
true,
item.span,
);
+ if macro_def.body.need_semicolon() {
+ self.word(";");
+ }
}
}
self.ann.post(self, AnnNode::Item(item))
@@ -1678,7 +1679,7 @@
self.ibox(INDENT_UNIT);
self.s.word("[");
self.print_inner_attributes_inline(attrs);
- self.commasep_exprs(Inconsistent, &exprs[..]);
+ self.commasep_exprs(Inconsistent, exprs);
self.s.word("]");
self.end();
}
@@ -1710,7 +1711,7 @@
fn print_expr_struct(
&mut self,
path: &ast::Path,
- fields: &[ast::Field],
+ fields: &[ast::ExprField],
rest: &ast::StructRest,
attrs: &[ast::Attribute],
) {
@@ -1719,7 +1720,7 @@
self.print_inner_attributes_inline(attrs);
self.commasep_cmnt(
Consistent,
- &fields[..],
+ fields,
|s, field| {
s.print_outer_attributes(&field.attrs);
s.ibox(INDENT_UNIT);
@@ -1754,7 +1755,7 @@
fn print_expr_tup(&mut self, exprs: &[P<ast::Expr>], attrs: &[ast::Attribute]) {
self.popen();
self.print_inner_attributes_inline(attrs);
- self.commasep_exprs(Inconsistent, &exprs[..]);
+ self.commasep_exprs(Inconsistent, exprs);
if exprs.len() == 1 {
self.s.word(",");
}
@@ -1872,8 +1873,8 @@
ast::ExprKind::Repeat(ref element, ref count) => {
self.print_expr_repeat(element, count, attrs);
}
- ast::ExprKind::Struct(ref path, ref fields, ref rest) => {
- self.print_expr_struct(path, &fields[..], rest, attrs);
+ ast::ExprKind::Struct(ref se) => {
+ self.print_expr_struct(&se.path, &se.fields, &se.rest, attrs);
}
ast::ExprKind::Tup(ref exprs) => {
self.print_expr_tup(&exprs[..], attrs);
diff --git a/compiler/rustc_attr/src/builtin.rs b/compiler/rustc_attr/src/builtin.rs
index aca3fbb..e58b266 100644
--- a/compiler/rustc_attr/src/builtin.rs
+++ b/compiler/rustc_attr/src/builtin.rs
@@ -176,7 +176,7 @@
sess: &Session,
attrs: &[Attribute],
item_sp: Span,
-) -> (Option<Stability>, Option<ConstStability>) {
+) -> (Option<(Stability, Span)>, Option<(ConstStability, Span)>) {
find_stability_generic(sess, attrs.iter(), item_sp)
}
@@ -184,15 +184,16 @@
sess: &Session,
attrs_iter: I,
item_sp: Span,
-) -> (Option<Stability>, Option<ConstStability>)
+) -> (Option<(Stability, Span)>, Option<(ConstStability, Span)>)
where
I: Iterator<Item = &'a Attribute>,
{
use StabilityLevel::*;
- let mut stab: Option<Stability> = None;
- let mut const_stab: Option<ConstStability> = None;
+ let mut stab: Option<(Stability, Span)> = None;
+ let mut const_stab: Option<(ConstStability, Span)> = None;
let mut promotable = false;
+
let diagnostic = &sess.parse_sess.span_diagnostic;
'outer: for attr in attrs_iter {
@@ -356,10 +357,12 @@
}
let level = Unstable { reason, issue: issue_num, is_soft };
if sym::unstable == meta_name {
- stab = Some(Stability { level, feature });
+ stab = Some((Stability { level, feature }, attr.span));
} else {
- const_stab =
- Some(ConstStability { level, feature, promotable: false });
+ const_stab = Some((
+ ConstStability { level, feature, promotable: false },
+ attr.span,
+ ));
}
}
(None, _, _) => {
@@ -432,10 +435,12 @@
(Some(feature), Some(since)) => {
let level = Stable { since };
if sym::stable == meta_name {
- stab = Some(Stability { level, feature });
+ stab = Some((Stability { level, feature }, attr.span));
} else {
- const_stab =
- Some(ConstStability { level, feature, promotable: false });
+ const_stab = Some((
+ ConstStability { level, feature, promotable: false },
+ attr.span,
+ ));
}
}
(None, _) => {
@@ -455,7 +460,7 @@
// Merge the const-unstable info into the stability info
if promotable {
- if let Some(ref mut stab) = const_stab {
+ if let Some((ref mut stab, _)) = const_stab {
stab.promotable = promotable;
} else {
struct_span_err!(
@@ -1035,14 +1040,14 @@
pub fn allow_internal_unstable<'a>(
sess: &'a Session,
attrs: &'a [Attribute],
-) -> Option<impl Iterator<Item = Symbol> + 'a> {
+) -> impl Iterator<Item = Symbol> + 'a {
allow_unstable(sess, attrs, sym::allow_internal_unstable)
}
pub fn rustc_allow_const_fn_unstable<'a>(
sess: &'a Session,
attrs: &'a [Attribute],
-) -> Option<impl Iterator<Item = Symbol> + 'a> {
+) -> impl Iterator<Item = Symbol> + 'a {
allow_unstable(sess, attrs, sym::rustc_allow_const_fn_unstable)
}
@@ -1050,7 +1055,7 @@
sess: &'a Session,
attrs: &'a [Attribute],
symbol: Symbol,
-) -> Option<impl Iterator<Item = Symbol> + 'a> {
+) -> impl Iterator<Item = Symbol> + 'a {
let attrs = sess.filter_by_name(attrs, symbol);
let list = attrs
.filter_map(move |attr| {
@@ -1064,7 +1069,7 @@
})
.flatten();
- Some(list.into_iter().filter_map(move |it| {
+ list.into_iter().filter_map(move |it| {
let name = it.ident().map(|ident| ident.name);
if name.is_none() {
sess.diagnostic().span_err(
@@ -1073,5 +1078,5 @@
);
}
name
- }))
+ })
}
diff --git a/compiler/rustc_builtin_macros/Cargo.toml b/compiler/rustc_builtin_macros/Cargo.toml
index c397a85..962dfba 100644
--- a/compiler/rustc_builtin_macros/Cargo.toml
+++ b/compiler/rustc_builtin_macros/Cargo.toml
@@ -15,10 +15,11 @@
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
rustc_feature = { path = "../rustc_feature" }
+rustc_lexer = { path = "../rustc_lexer" }
rustc_parse = { path = "../rustc_parse" }
rustc_target = { path = "../rustc_target" }
rustc_session = { path = "../rustc_session" }
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
rustc_ast = { path = "../rustc_ast" }
rustc_expand = { path = "../rustc_expand" }
rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_builtin_macros/src/asm.rs b/compiler/rustc_builtin_macros/src/asm.rs
index 36cd6c2..8d8b3f4 100644
--- a/compiler/rustc_builtin_macros/src/asm.rs
+++ b/compiler/rustc_builtin_macros/src/asm.rs
@@ -7,7 +7,10 @@
use rustc_expand::base::{self, *};
use rustc_parse::parser::Parser;
use rustc_parse_format as parse;
-use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_span::{
+ symbol::{kw, sym, Symbol},
+ BytePos,
+};
use rustc_span::{InnerSpan, Span};
struct AsmArgs {
@@ -399,6 +402,8 @@
let mut line_spans = Vec::with_capacity(args.templates.len());
let mut curarg = 0;
+ let default_dialect = ecx.sess.inline_asm_dialect();
+
for template_expr in args.templates.into_iter() {
if !template.is_empty() {
template.push(ast::InlineAsmTemplatePiece::String("\n".to_string()));
@@ -424,6 +429,60 @@
let template_str = &template_str.as_str();
let template_snippet = ecx.source_map().span_to_snippet(template_sp).ok();
+
+ if let Some(snippet) = &template_snippet {
+ let snippet = snippet.trim_matches('"');
+ match default_dialect {
+ ast::LlvmAsmDialect::Intel => {
+ if let Some(span) = check_syntax_directive(snippet, ".intel_syntax") {
+ let span = template_span.from_inner(span);
+ let mut err = ecx.struct_span_err(span, "intel syntax is the default syntax on this target, and trying to use this directive may cause issues");
+ err.span_suggestion(
+ span,
+ "remove this assembler directive",
+ "".to_string(),
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ }
+
+ if let Some(span) = check_syntax_directive(snippet, ".att_syntax") {
+ let span = template_span.from_inner(span);
+ let mut err = ecx.struct_span_err(span, "using the .att_syntax directive may cause issues, use the att_syntax option instead");
+ let asm_end = sp.hi() - BytePos(2);
+ let suggestions = vec![
+ (span, "".to_string()),
+ (
+ Span::new(asm_end, asm_end, sp.ctxt()),
+ ", options(att_syntax)".to_string(),
+ ),
+ ];
+ err.multipart_suggestion(
+ "remove the assembler directive and replace it with options(att_syntax)",
+ suggestions,
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ }
+ }
+ ast::LlvmAsmDialect::Att => {
+ if let Some(span) = check_syntax_directive(snippet, ".att_syntax") {
+ let span = template_span.from_inner(span);
+ let mut err = ecx.struct_span_err(span, "att syntax is the default syntax on this target, and trying to use this directive may cause issues");
+ err.span_suggestion(
+ span,
+ "remove this assembler directive",
+ "".to_string(),
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ }
+
+ // Use of .intel_syntax is ignored
+ }
+ }
+ }
+
let mut parser = parse::Parser::new(
template_str,
str_style,
@@ -631,3 +690,15 @@
}
}
}
+
+fn check_syntax_directive<S: AsRef<str>>(piece: S, syntax: &str) -> Option<InnerSpan> {
+ let piece = piece.as_ref();
+ if let Some(idx) = piece.find(syntax) {
+ let end =
+ idx + &piece[idx..].find(|c| matches!(c, '\n' | ';')).unwrap_or(piece[idx..].len());
+ // Offset by one because these represent the span with the " removed
+ Some(InnerSpan::new(idx + 1, end + 1))
+ } else {
+ None
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/cfg_eval.rs b/compiler/rustc_builtin_macros/src/cfg_eval.rs
new file mode 100644
index 0000000..025872d
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/cfg_eval.rs
@@ -0,0 +1,157 @@
+use crate::util::check_builtin_macro_attribute;
+
+use rustc_ast::mut_visit::{self, MutVisitor};
+use rustc_ast::ptr::P;
+use rustc_ast::{self as ast, AstLike};
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_expand::config::StripUnconfigured;
+use rustc_expand::configure;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use smallvec::SmallVec;
+
+crate fn expand(
+ ecx: &mut ExtCtxt<'_>,
+ _span: Span,
+ meta_item: &ast::MetaItem,
+ annotatable: Annotatable,
+) -> Vec<Annotatable> {
+ check_builtin_macro_attribute(ecx, meta_item, sym::cfg_eval);
+ cfg_eval(ecx, annotatable)
+}
+
+crate fn cfg_eval(ecx: &ExtCtxt<'_>, annotatable: Annotatable) -> Vec<Annotatable> {
+ let mut visitor = CfgEval {
+ cfg: StripUnconfigured { sess: ecx.sess, features: ecx.ecfg.features, modified: false },
+ };
+ let mut annotatable = visitor.configure_annotatable(annotatable);
+ if visitor.cfg.modified {
+ // Erase the tokens if cfg-stripping modified the item
+ // This will cause us to synthesize fake tokens
+ // when `nt_to_tokenstream` is called on this item.
+ if let Some(tokens) = annotatable.tokens_mut() {
+ *tokens = None;
+ }
+ }
+ vec![annotatable]
+}
+
+struct CfgEval<'a> {
+ cfg: StripUnconfigured<'a>,
+}
+
+impl CfgEval<'_> {
+ fn configure<T: AstLike>(&mut self, node: T) -> Option<T> {
+ self.cfg.configure(node)
+ }
+
+ fn configure_annotatable(&mut self, annotatable: Annotatable) -> Annotatable {
+ // Since the item itself has already been configured by the InvocationCollector,
+ // we know that fold result vector will contain exactly one element
+ match annotatable {
+ Annotatable::Item(item) => Annotatable::Item(self.flat_map_item(item).pop().unwrap()),
+ Annotatable::TraitItem(item) => {
+ Annotatable::TraitItem(self.flat_map_trait_item(item).pop().unwrap())
+ }
+ Annotatable::ImplItem(item) => {
+ Annotatable::ImplItem(self.flat_map_impl_item(item).pop().unwrap())
+ }
+ Annotatable::ForeignItem(item) => {
+ Annotatable::ForeignItem(self.flat_map_foreign_item(item).pop().unwrap())
+ }
+ Annotatable::Stmt(stmt) => {
+ Annotatable::Stmt(stmt.map(|stmt| self.flat_map_stmt(stmt).pop().unwrap()))
+ }
+ Annotatable::Expr(mut expr) => Annotatable::Expr({
+ self.visit_expr(&mut expr);
+ expr
+ }),
+ Annotatable::Arm(arm) => Annotatable::Arm(self.flat_map_arm(arm).pop().unwrap()),
+ Annotatable::ExprField(field) => {
+ Annotatable::ExprField(self.flat_map_expr_field(field).pop().unwrap())
+ }
+ Annotatable::PatField(fp) => {
+ Annotatable::PatField(self.flat_map_pat_field(fp).pop().unwrap())
+ }
+ Annotatable::GenericParam(param) => {
+ Annotatable::GenericParam(self.flat_map_generic_param(param).pop().unwrap())
+ }
+ Annotatable::Param(param) => {
+ Annotatable::Param(self.flat_map_param(param).pop().unwrap())
+ }
+ Annotatable::FieldDef(sf) => {
+ Annotatable::FieldDef(self.flat_map_field_def(sf).pop().unwrap())
+ }
+ Annotatable::Variant(v) => {
+ Annotatable::Variant(self.flat_map_variant(v).pop().unwrap())
+ }
+ }
+ }
+}
+
+impl MutVisitor for CfgEval<'_> {
+ fn visit_expr(&mut self, expr: &mut P<ast::Expr>) {
+ self.cfg.configure_expr(expr);
+ mut_visit::noop_visit_expr(expr, self);
+ }
+
+ fn filter_map_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> {
+ let mut expr = configure!(self, expr);
+ mut_visit::noop_visit_expr(&mut expr, self);
+ Some(expr)
+ }
+
+ fn flat_map_generic_param(
+ &mut self,
+ param: ast::GenericParam,
+ ) -> SmallVec<[ast::GenericParam; 1]> {
+ mut_visit::noop_flat_map_generic_param(configure!(self, param), self)
+ }
+
+ fn flat_map_stmt(&mut self, stmt: ast::Stmt) -> SmallVec<[ast::Stmt; 1]> {
+ mut_visit::noop_flat_map_stmt(configure!(self, stmt), self)
+ }
+
+ fn flat_map_item(&mut self, item: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
+ mut_visit::noop_flat_map_item(configure!(self, item), self)
+ }
+
+ fn flat_map_impl_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> {
+ mut_visit::noop_flat_map_assoc_item(configure!(self, item), self)
+ }
+
+ fn flat_map_trait_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> {
+ mut_visit::noop_flat_map_assoc_item(configure!(self, item), self)
+ }
+
+ fn flat_map_foreign_item(
+ &mut self,
+ foreign_item: P<ast::ForeignItem>,
+ ) -> SmallVec<[P<ast::ForeignItem>; 1]> {
+ mut_visit::noop_flat_map_foreign_item(configure!(self, foreign_item), self)
+ }
+
+ fn flat_map_arm(&mut self, arm: ast::Arm) -> SmallVec<[ast::Arm; 1]> {
+ mut_visit::noop_flat_map_arm(configure!(self, arm), self)
+ }
+
+ fn flat_map_expr_field(&mut self, field: ast::ExprField) -> SmallVec<[ast::ExprField; 1]> {
+ mut_visit::noop_flat_map_expr_field(configure!(self, field), self)
+ }
+
+ fn flat_map_pat_field(&mut self, fp: ast::PatField) -> SmallVec<[ast::PatField; 1]> {
+ mut_visit::noop_flat_map_pat_field(configure!(self, fp), self)
+ }
+
+ fn flat_map_param(&mut self, p: ast::Param) -> SmallVec<[ast::Param; 1]> {
+ mut_visit::noop_flat_map_param(configure!(self, p), self)
+ }
+
+ fn flat_map_field_def(&mut self, sf: ast::FieldDef) -> SmallVec<[ast::FieldDef; 1]> {
+ mut_visit::noop_flat_map_field_def(configure!(self, sf), self)
+ }
+
+ fn flat_map_variant(&mut self, variant: ast::Variant) -> SmallVec<[ast::Variant; 1]> {
+ mut_visit::noop_flat_map_variant(configure!(self, variant), self)
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/derive.rs b/compiler/rustc_builtin_macros/src/derive.rs
new file mode 100644
index 0000000..0da2c1c
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/derive.rs
@@ -0,0 +1,114 @@
+use crate::cfg_eval::cfg_eval;
+
+use rustc_ast::{self as ast, token, ItemKind, MetaItemKind, NestedMetaItem, StmtKind};
+use rustc_errors::{struct_span_err, Applicability};
+use rustc_expand::base::{Annotatable, ExpandResult, ExtCtxt, Indeterminate, MultiItemModifier};
+use rustc_feature::AttributeTemplate;
+use rustc_parse::validate_attr;
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+crate struct Expander;
+
+impl MultiItemModifier for Expander {
+ fn expand(
+ &self,
+ ecx: &mut ExtCtxt<'_>,
+ span: Span,
+ meta_item: &ast::MetaItem,
+ item: Annotatable,
+ ) -> ExpandResult<Vec<Annotatable>, Annotatable> {
+ let sess = ecx.sess;
+ if report_bad_target(sess, &item, span) {
+ // We don't want to pass inappropriate targets to derive macros to avoid
+ // follow up errors, all other errors below are recoverable.
+ return ExpandResult::Ready(vec![item]);
+ }
+
+ let template =
+ AttributeTemplate { list: Some("Trait1, Trait2, ..."), ..Default::default() };
+ let attr = ecx.attribute(meta_item.clone());
+ validate_attr::check_builtin_attribute(&sess.parse_sess, &attr, sym::derive, template);
+
+ let derives: Vec<_> = attr
+ .meta_item_list()
+ .unwrap_or_default()
+ .into_iter()
+ .filter_map(|nested_meta| match nested_meta {
+ NestedMetaItem::MetaItem(meta) => Some(meta),
+ NestedMetaItem::Literal(lit) => {
+ // Reject `#[derive("Debug")]`.
+ report_unexpected_literal(sess, &lit);
+ None
+ }
+ })
+ .map(|meta| {
+ // Reject `#[derive(Debug = "value", Debug(abc))]`, but recover the paths.
+ report_path_args(sess, &meta);
+ meta.path
+ })
+ .collect();
+
+ // FIXME: Try to cache intermediate results to avoid collecting same paths multiple times.
+ match ecx.resolver.resolve_derives(ecx.current_expansion.id, derives, ecx.force_mode) {
+ Ok(()) => ExpandResult::Ready(cfg_eval(ecx, item)),
+ Err(Indeterminate) => ExpandResult::Retry(item),
+ }
+ }
+}
+
+fn report_bad_target(sess: &Session, item: &Annotatable, span: Span) -> bool {
+ let item_kind = match item {
+ Annotatable::Item(item) => Some(&item.kind),
+ Annotatable::Stmt(stmt) => match &stmt.kind {
+ StmtKind::Item(item) => Some(&item.kind),
+ _ => None,
+ },
+ _ => None,
+ };
+
+ let bad_target =
+ !matches!(item_kind, Some(ItemKind::Struct(..) | ItemKind::Enum(..) | ItemKind::Union(..)));
+ if bad_target {
+ struct_span_err!(
+ sess,
+ span,
+ E0774,
+ "`derive` may only be applied to structs, enums and unions",
+ )
+ .emit();
+ }
+ bad_target
+}
+
+fn report_unexpected_literal(sess: &Session, lit: &ast::Lit) {
+ let help_msg = match lit.token.kind {
+ token::Str if rustc_lexer::is_ident(&lit.token.symbol.as_str()) => {
+ format!("try using `#[derive({})]`", lit.token.symbol)
+ }
+ _ => "for example, write `#[derive(Debug)]` for `Debug`".to_string(),
+ };
+ struct_span_err!(sess, lit.span, E0777, "expected path to a trait, found literal",)
+ .help(&help_msg)
+ .emit();
+}
+
+fn report_path_args(sess: &Session, meta: &ast::MetaItem) {
+ let report_error = |title, action| {
+ let span = meta.span.with_lo(meta.path.span.hi());
+ sess.struct_span_err(span, title)
+ .span_suggestion(span, action, String::new(), Applicability::MachineApplicable)
+ .emit();
+ };
+ match meta.kind {
+ MetaItemKind::Word => {}
+ MetaItemKind::List(..) => report_error(
+ "traits in `#[derive(...)]` don't accept arguments",
+ "remove the arguments",
+ ),
+ MetaItemKind::NameValue(..) => {
+ report_error("traits in `#[derive(...)]` don't accept values", "remove the value")
+ }
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs
index c1473e2..f84e6e0 100644
--- a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs
@@ -47,9 +47,10 @@
span: Span,
self_arg_tags: &[Ident],
) -> P<ast::Expr> {
- let lft = cx.expr_ident(span, self_arg_tags[0]);
+ let lft = cx.expr_addr_of(span, cx.expr_ident(span, self_arg_tags[0]));
let rgt = cx.expr_addr_of(span, cx.expr_ident(span, self_arg_tags[1]));
- cx.expr_method_call(span, lft, Ident::new(sym::cmp, span), vec![rgt])
+ let fn_cmp_path = cx.std_path(&[sym::cmp, sym::Ord, sym::cmp]);
+ cx.expr_call_global(span, fn_cmp_path, vec![lft, rgt])
}
pub fn cs_cmp(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> P<Expr> {
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs
index 21174ca..151a919 100644
--- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs
@@ -1,13 +1,11 @@
-pub use OrderingOp::*;
-
use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
-use crate::deriving::{path_local, path_std, pathvec_std};
+use crate::deriving::{path_std, pathvec_std};
use rustc_ast::ptr::P;
-use rustc_ast::{self as ast, BinOpKind, Expr, MetaItem};
+use rustc_ast::{Expr, MetaItem};
use rustc_expand::base::{Annotatable, ExtCtxt};
-use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::symbol::{sym, Ident};
use rustc_span::Span;
pub fn expand_deriving_partial_ord(
@@ -17,26 +15,6 @@
item: &Annotatable,
push: &mut dyn FnMut(Annotatable),
) {
- macro_rules! md {
- ($name:expr, $op:expr, $equal:expr) => {{
- let inline = cx.meta_word(span, sym::inline);
- let attrs = vec![cx.attribute(inline)];
- MethodDef {
- name: $name,
- generics: Bounds::empty(),
- explicit_self: borrowed_explicit_self(),
- args: vec![(borrowed_self(), sym::other)],
- ret_ty: Literal(path_local!(bool)),
- attributes: attrs,
- is_unsafe: false,
- unify_fieldless_variants: true,
- combine_substructure: combine_substructure(Box::new(|cx, span, substr| {
- cs_op($op, $equal, cx, span, substr)
- })),
- }
- }};
- }
-
let ordering_ty = Literal(path_std!(cmp::Ordering));
let ret_ty = Literal(Path::new_(
pathvec_std!(option::Option),
@@ -62,21 +40,6 @@
})),
};
- // avoid defining extra methods if we can
- // c-like enums, enums without any fields and structs without fields
- // can safely define only `partial_cmp`.
- let methods = if is_type_without_fields(item) {
- vec![partial_cmp_def]
- } else {
- vec![
- partial_cmp_def,
- md!(sym::lt, true, false),
- md!(sym::le, true, true),
- md!(sym::gt, false, false),
- md!(sym::ge, false, true),
- ]
- };
-
let trait_def = TraitDef {
span,
attributes: vec![],
@@ -85,39 +48,12 @@
generics: Bounds::empty(),
is_unsafe: false,
supports_unions: false,
- methods,
+ methods: vec![partial_cmp_def],
associated_types: Vec::new(),
};
trait_def.expand(cx, mitem, item, push)
}
-#[derive(Copy, Clone)]
-pub enum OrderingOp {
- PartialCmpOp,
- LtOp,
- LeOp,
- GtOp,
- GeOp,
-}
-
-pub fn some_ordering_collapsed(
- cx: &mut ExtCtxt<'_>,
- span: Span,
- op: OrderingOp,
- self_arg_tags: &[Ident],
-) -> P<ast::Expr> {
- let lft = cx.expr_ident(span, self_arg_tags[0]);
- let rgt = cx.expr_addr_of(span, cx.expr_ident(span, self_arg_tags[1]));
- let op_sym = match op {
- PartialCmpOp => sym::partial_cmp,
- LtOp => sym::lt,
- LeOp => sym::le,
- GtOp => sym::gt,
- GeOp => sym::ge,
- };
- cx.expr_method_call(span, lft, Ident::new(op_sym, span), vec![rgt])
-}
-
pub fn cs_partial_cmp(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> P<Expr> {
let test_id = Ident::new(sym::cmp, span);
let ordering = cx.path_global(span, cx.std_path(&[sym::cmp, sym::Ordering, sym::Equal]));
@@ -171,7 +107,11 @@
if self_args.len() != 2 {
cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`")
} else {
- some_ordering_collapsed(cx, span, PartialCmpOp, tag_tuple)
+ let lft = cx.expr_addr_of(span, cx.expr_ident(span, tag_tuple[0]));
+ let rgt = cx.expr_addr_of(span, cx.expr_ident(span, tag_tuple[1]));
+ let fn_partial_cmp_path =
+ cx.std_path(&[sym::cmp, sym::PartialOrd, sym::partial_cmp]);
+ cx.expr_call_global(span, fn_partial_cmp_path, vec![lft, rgt])
}
}),
cx,
@@ -179,124 +119,3 @@
substr,
)
}
-
-/// Strict inequality.
-fn cs_op(
- less: bool,
- inclusive: bool,
- cx: &mut ExtCtxt<'_>,
- span: Span,
- substr: &Substructure<'_>,
-) -> P<Expr> {
- let ordering_path = |cx: &mut ExtCtxt<'_>, name: &str| {
- cx.expr_path(
- cx.path_global(span, cx.std_path(&[sym::cmp, sym::Ordering, Symbol::intern(name)])),
- )
- };
-
- let par_cmp = |cx: &mut ExtCtxt<'_>, span, self_f: P<Expr>, other_fs: &[P<Expr>], default| {
- let other_f = match other_fs {
- [o_f] => o_f,
- _ => cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`"),
- };
-
- // `PartialOrd::partial_cmp(self.fi, other.fi)`
- let cmp_path = cx.expr_path(
- cx.path_global(span, cx.std_path(&[sym::cmp, sym::PartialOrd, sym::partial_cmp])),
- );
- let cmp = cx.expr_call(
- span,
- cmp_path,
- vec![cx.expr_addr_of(span, self_f), cx.expr_addr_of(span, other_f.clone())],
- );
-
- let default = ordering_path(cx, default);
- // `Option::unwrap_or(_, Ordering::Equal)`
- let unwrap_path = cx.expr_path(
- cx.path_global(span, cx.std_path(&[sym::option, sym::Option, sym::unwrap_or])),
- );
- cx.expr_call(span, unwrap_path, vec![cmp, default])
- };
-
- let fold = cs_fold1(
- false, // need foldr
- |cx, span, subexpr, self_f, other_fs| {
- // build up a series of `partial_cmp`s from the inside
- // out (hence foldr) to get lexical ordering, i.e., for op ==
- // `ast::lt`
- //
- // ```
- // Ordering::then_with(
- // Option::unwrap_or(
- // PartialOrd::partial_cmp(self.f1, other.f1), Ordering::Equal)
- // ),
- // Option::unwrap_or(
- // PartialOrd::partial_cmp(self.f2, other.f2), Ordering::Greater)
- // )
- // )
- // == Ordering::Less
- // ```
- //
- // and for op ==
- // `ast::le`
- //
- // ```
- // Ordering::then_with(
- // Option::unwrap_or(
- // PartialOrd::partial_cmp(self.f1, other.f1), Ordering::Equal)
- // ),
- // Option::unwrap_or(
- // PartialOrd::partial_cmp(self.f2, other.f2), Ordering::Greater)
- // )
- // )
- // != Ordering::Greater
- // ```
- //
- // The optimiser should remove the redundancy. We explicitly
- // get use the binops to avoid auto-deref dereferencing too many
- // layers of pointers, if the type includes pointers.
-
- // `Option::unwrap_or(PartialOrd::partial_cmp(self.fi, other.fi), Ordering::Equal)`
- let par_cmp = par_cmp(cx, span, self_f, other_fs, "Equal");
-
- // `Ordering::then_with(Option::unwrap_or(..), ..)`
- let then_with_path = cx.expr_path(
- cx.path_global(span, cx.std_path(&[sym::cmp, sym::Ordering, sym::then_with])),
- );
- cx.expr_call(span, then_with_path, vec![par_cmp, cx.lambda0(span, subexpr)])
- },
- |cx, args| match args {
- Some((span, self_f, other_fs)) => {
- let opposite = if less { "Greater" } else { "Less" };
- par_cmp(cx, span, self_f, other_fs, opposite)
- }
- None => cx.expr_bool(span, inclusive),
- },
- Box::new(|cx, span, (self_args, tag_tuple), _non_self_args| {
- if self_args.len() != 2 {
- cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`")
- } else {
- let op = match (less, inclusive) {
- (false, false) => GtOp,
- (false, true) => GeOp,
- (true, false) => LtOp,
- (true, true) => LeOp,
- };
- some_ordering_collapsed(cx, span, op, tag_tuple)
- }
- }),
- cx,
- span,
- substr,
- );
-
- match *substr.fields {
- EnumMatching(.., ref all_fields) | Struct(.., ref all_fields) if !all_fields.is_empty() => {
- let ordering = ordering_path(cx, if less ^ inclusive { "Less" } else { "Greater" });
- let comp_op = if inclusive { BinOpKind::Ne } else { BinOpKind::Eq };
-
- cx.expr_binary(span, comp_op, fold, ordering)
- }
- _ => fold,
- }
-}
diff --git a/compiler/rustc_builtin_macros/src/deriving/debug.rs b/compiler/rustc_builtin_macros/src/deriving/debug.rs
index ba43be6..cc6dac5 100644
--- a/compiler/rustc_builtin_macros/src/deriving/debug.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/debug.rs
@@ -77,7 +77,8 @@
// tuple struct/"normal" variant
let fn_path_debug_tuple = cx.std_path(&[sym::fmt, sym::Formatter, sym::debug_tuple]);
let expr = cx.expr_call_global(span, fn_path_debug_tuple, vec![fmt, name]);
- stmts.push(cx.stmt_let(span, true, builder, expr));
+ let expr = make_mut_borrow(cx, span, expr);
+ stmts.push(cx.stmt_let(span, false, builder, expr));
for field in fields {
// Use double indirection to make sure this works for unsized types
@@ -85,8 +86,8 @@
let field = cx.expr_addr_of(field.span, field);
let fn_path_field = cx.std_path(&[sym::fmt, sym::DebugTuple, sym::field]);
- let builder_recv = make_mut_borrow(cx, span, builder_expr.clone());
- let expr = cx.expr_call_global(span, fn_path_field, vec![builder_recv, field]);
+ let expr =
+ cx.expr_call_global(span, fn_path_field, vec![builder_expr.clone(), field]);
// Use `let _ = expr;` to avoid triggering the
// unused_results lint.
@@ -99,7 +100,8 @@
// normal struct/struct variant
let fn_path_debug_struct = cx.std_path(&[sym::fmt, sym::Formatter, sym::debug_struct]);
let expr = cx.expr_call_global(span, fn_path_debug_struct, vec![fmt, name]);
- stmts.push(cx.stmt_let(DUMMY_SP, true, builder, expr));
+ let expr = make_mut_borrow(cx, span, expr);
+ stmts.push(cx.stmt_let(DUMMY_SP, false, builder, expr));
for field in fields {
let name = cx.expr_lit(
@@ -111,17 +113,18 @@
let fn_path_field = cx.std_path(&[sym::fmt, sym::DebugStruct, sym::field]);
let field = cx.expr_addr_of(field.span, field.self_.clone());
let field = cx.expr_addr_of(field.span, field);
- let builder_recv = make_mut_borrow(cx, span, builder_expr.clone());
- let expr =
- cx.expr_call_global(span, fn_path_field, vec![builder_recv, name, field]);
+ let expr = cx.expr_call_global(
+ span,
+ fn_path_field,
+ vec![builder_expr.clone(), name, field],
+ );
stmts.push(stmt_let_underscore(cx, span, expr));
}
fn_path_finish = cx.std_path(&[sym::fmt, sym::DebugStruct, sym::finish]);
}
}
- let builder_recv = make_mut_borrow(cx, span, builder_expr);
- let expr = cx.expr_call_global(span, fn_path_finish, vec![builder_recv]);
+ let expr = cx.expr_call_global(span, fn_path_finish, vec![builder_expr]);
stmts.push(cx.stmt_expr(expr));
let block = cx.block(span, stmts);
diff --git a/compiler/rustc_builtin_macros/src/deriving/decodable.rs b/compiler/rustc_builtin_macros/src/deriving/decodable.rs
index df69f6c..1d892b2 100644
--- a/compiler/rustc_builtin_macros/src/deriving/decodable.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/decodable.rs
@@ -91,18 +91,19 @@
Unnamed(ref fields, _) => fields.len(),
Named(ref fields) => fields.len(),
};
- let read_struct_field = Ident::new(sym::read_struct_field, trait_span);
+ let fn_read_struct_field_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Decoder, sym::read_struct_field]);
let path = cx.path_ident(trait_span, substr.type_ident);
let result =
decode_static_fields(cx, trait_span, path, summary, |cx, span, name, field| {
cx.expr_try(
span,
- cx.expr_method_call(
+ cx.expr_call_global(
span,
- blkdecoder.clone(),
- read_struct_field,
+ fn_read_struct_field_path.clone(),
vec![
+ blkdecoder.clone(),
cx.expr_str(span, name),
cx.expr_usize(span, field),
exprdecode.clone(),
@@ -111,11 +112,14 @@
)
});
let result = cx.expr_ok(trait_span, result);
- cx.expr_method_call(
+ let fn_read_struct_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Decoder, sym::read_struct]);
+
+ cx.expr_call_global(
trait_span,
- decoder,
- Ident::new(sym::read_struct, trait_span),
+ fn_read_struct_path,
vec![
+ decoder,
cx.expr_str(trait_span, substr.type_ident.name),
cx.expr_usize(trait_span, nfields),
cx.lambda1(trait_span, result, blkarg),
@@ -127,7 +131,9 @@
let mut arms = Vec::with_capacity(fields.len() + 1);
let mut variants = Vec::with_capacity(fields.len());
- let rvariant_arg = Ident::new(sym::read_enum_variant_arg, trait_span);
+
+ let fn_read_enum_variant_arg_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Decoder, sym::read_enum_variant_arg]);
for (i, &(ident, v_span, ref parts)) in fields.iter().enumerate() {
variants.push(cx.expr_str(v_span, ident.name));
@@ -138,11 +144,10 @@
let idx = cx.expr_usize(span, field);
cx.expr_try(
span,
- cx.expr_method_call(
+ cx.expr_call_global(
span,
- blkdecoder.clone(),
- rvariant_arg,
- vec![idx, exprdecode.clone()],
+ fn_read_enum_variant_arg_path.clone(),
+ vec![blkdecoder.clone(), idx, exprdecode.clone()],
),
)
});
@@ -159,17 +164,21 @@
let lambda = cx.lambda(trait_span, vec![blkarg, variant], result);
let variant_vec = cx.expr_vec(trait_span, variants);
let variant_vec = cx.expr_addr_of(trait_span, variant_vec);
- let result = cx.expr_method_call(
+ let fn_read_enum_variant_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Decoder, sym::read_enum_variant]);
+ let result = cx.expr_call_global(
trait_span,
- blkdecoder,
- Ident::new(sym::read_enum_variant, trait_span),
- vec![variant_vec, lambda],
+ fn_read_enum_variant_path,
+ vec![blkdecoder, variant_vec, lambda],
);
- cx.expr_method_call(
+ let fn_read_enum_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Decoder, sym::read_enum]);
+
+ cx.expr_call_global(
trait_span,
- decoder,
- Ident::new(sym::read_enum, trait_span),
+ fn_read_enum_path,
vec![
+ decoder,
cx.expr_str(trait_span, substr.type_ident.name),
cx.lambda1(trait_span, result, blkarg),
],
diff --git a/compiler/rustc_builtin_macros/src/deriving/encodable.rs b/compiler/rustc_builtin_macros/src/deriving/encodable.rs
index 62aa1cb..01a57be 100644
--- a/compiler/rustc_builtin_macros/src/deriving/encodable.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/encodable.rs
@@ -179,7 +179,8 @@
match *substr.fields {
Struct(_, ref fields) => {
- let emit_struct_field = Ident::new(sym::emit_struct_field, trait_span);
+ let fn_emit_struct_field_path =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Encoder, sym::emit_struct_field]);
let mut stmts = Vec::new();
for (i, &FieldInfo { name, ref self_, span, .. }) in fields.iter().enumerate() {
let name = match name {
@@ -189,11 +190,15 @@
let self_ref = cx.expr_addr_of(span, self_.clone());
let enc = cx.expr_call(span, fn_path.clone(), vec![self_ref, blkencoder.clone()]);
let lambda = cx.lambda1(span, enc, blkarg);
- let call = cx.expr_method_call(
+ let call = cx.expr_call_global(
span,
- blkencoder.clone(),
- emit_struct_field,
- vec![cx.expr_str(span, name), cx.expr_usize(span, i), lambda],
+ fn_emit_struct_field_path.clone(),
+ vec![
+ blkencoder.clone(),
+ cx.expr_str(span, name),
+ cx.expr_usize(span, i),
+ lambda,
+ ],
);
// last call doesn't need a try!
@@ -216,11 +221,14 @@
cx.lambda_stmts_1(trait_span, stmts, blkarg)
};
- cx.expr_method_call(
+ let fn_emit_struct_path =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Encoder, sym::emit_struct]);
+
+ cx.expr_call_global(
trait_span,
- encoder,
- Ident::new(sym::emit_struct, trait_span),
+ fn_emit_struct_path,
vec![
+ encoder,
cx.expr_str(trait_span, substr.type_ident.name),
cx.expr_usize(trait_span, fields.len()),
blk,
@@ -235,7 +243,10 @@
// actually exist.
let me = cx.stmt_let(trait_span, false, blkarg, encoder);
let encoder = cx.expr_ident(trait_span, blkarg);
- let emit_variant_arg = Ident::new(sym::emit_enum_variant_arg, trait_span);
+
+ let fn_emit_enum_variant_arg_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Encoder, sym::emit_enum_variant_arg]);
+
let mut stmts = Vec::new();
if !fields.is_empty() {
let last = fields.len() - 1;
@@ -244,11 +255,11 @@
let enc =
cx.expr_call(span, fn_path.clone(), vec![self_ref, blkencoder.clone()]);
let lambda = cx.lambda1(span, enc, blkarg);
- let call = cx.expr_method_call(
+
+ let call = cx.expr_call_global(
span,
- blkencoder.clone(),
- emit_variant_arg,
- vec![cx.expr_usize(span, i), lambda],
+ fn_emit_enum_variant_arg_path.clone(),
+ vec![blkencoder.clone(), cx.expr_usize(span, i), lambda],
);
let call = if i != last {
cx.expr_try(span, call)
@@ -265,23 +276,29 @@
let blk = cx.lambda_stmts_1(trait_span, stmts, blkarg);
let name = cx.expr_str(trait_span, variant.ident.name);
- let call = cx.expr_method_call(
+
+ let fn_emit_enum_variant_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Encoder, sym::emit_enum_variant]);
+
+ let call = cx.expr_call_global(
trait_span,
- blkencoder,
- Ident::new(sym::emit_enum_variant, trait_span),
+ fn_emit_enum_variant_path,
vec![
+ blkencoder,
name,
cx.expr_usize(trait_span, idx),
cx.expr_usize(trait_span, fields.len()),
blk,
],
);
+
let blk = cx.lambda1(trait_span, call, blkarg);
- let ret = cx.expr_method_call(
+ let fn_emit_enum_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Encoder, sym::emit_enum]);
+ let ret = cx.expr_call_global(
trait_span,
- encoder,
- Ident::new(sym::emit_enum, trait_span),
- vec![cx.expr_str(trait_span, substr.type_ident.name), blk],
+ fn_emit_enum_path,
+ vec![encoder, cx.expr_str(trait_span, substr.type_ident.name), blk],
);
cx.expr_block(cx.block(trait_span, vec![me, cx.stmt_expr(ret)]))
}
diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
index d498c8e..da85cc7 100644
--- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
@@ -1578,7 +1578,7 @@
if ident.is_none() {
cx.span_bug(sp, "a braced struct with unnamed fields in `derive`");
}
- ast::FieldPat {
+ ast::PatField {
ident: ident.unwrap(),
is_shorthand: false,
attrs: ast::AttrVec::new(),
diff --git a/compiler/rustc_builtin_macros/src/format.rs b/compiler/rustc_builtin_macros/src/format.rs
index 85ca1da..7e88b58 100644
--- a/compiler/rustc_builtin_macros/src/format.rs
+++ b/compiler/rustc_builtin_macros/src/format.rs
@@ -270,7 +270,7 @@
parse::ArgumentNamed(s) => Named(s),
};
- let ty = Placeholder(match &arg.format.ty[..] {
+ let ty = Placeholder(match arg.format.ty {
"" => "Display",
"?" => "Debug",
"e" => "LowerExp",
diff --git a/compiler/rustc_builtin_macros/src/format_foreign.rs b/compiler/rustc_builtin_macros/src/format_foreign.rs
index 0496c72..0cc520e 100644
--- a/compiler/rustc_builtin_macros/src/format_foreign.rs
+++ b/compiler/rustc_builtin_macros/src/format_foreign.rs
@@ -312,7 +312,7 @@
return Some((Substitution::Escape, &s[start + 2..]));
}
- Cur::new_at(&s[..], start)
+ Cur::new_at(s, start)
};
// This is meant to be a translation of the following regex:
@@ -673,7 +673,7 @@
_ => { /* fall-through */ }
}
- Cur::new_at(&s[..], start)
+ Cur::new_at(s, start)
};
let at = at.at_next_cp()?;
diff --git a/compiler/rustc_builtin_macros/src/global_asm.rs b/compiler/rustc_builtin_macros/src/global_asm.rs
index 3689e33..76d8745 100644
--- a/compiler/rustc_builtin_macros/src/global_asm.rs
+++ b/compiler/rustc_builtin_macros/src/global_asm.rs
@@ -28,7 +28,7 @@
ident: Ident::invalid(),
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
- kind: ast::ItemKind::GlobalAsm(P(global_asm)),
+ kind: ast::ItemKind::GlobalAsm(global_asm),
vis: ast::Visibility {
span: sp.shrink_to_lo(),
kind: ast::VisibilityKind::Inherited,
diff --git a/compiler/rustc_builtin_macros/src/lib.rs b/compiler/rustc_builtin_macros/src/lib.rs
index 59844b6..a46a550 100644
--- a/compiler/rustc_builtin_macros/src/lib.rs
+++ b/compiler/rustc_builtin_macros/src/lib.rs
@@ -11,6 +11,7 @@
#![feature(or_patterns)]
#![feature(proc_macro_internals)]
#![feature(proc_macro_quote)]
+#![recursion_limit = "256"]
extern crate proc_macro;
@@ -24,9 +25,11 @@
mod assert;
mod cfg;
mod cfg_accessible;
+mod cfg_eval;
mod compile_error;
mod concat;
mod concat_idents;
+mod derive;
mod deriving;
mod env;
mod format;
@@ -88,6 +91,8 @@
register_attr! {
bench: test::expand_bench,
cfg_accessible: cfg_accessible::Expander,
+ cfg_eval: cfg_eval::expand,
+ derive: derive::Expander,
global_allocator: global_allocator::expand,
test: test::expand_test,
test_case: test::expand_test_case,
diff --git a/compiler/rustc_builtin_macros/src/proc_macro_harness.rs b/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
index 7582d98..71bbae1 100644
--- a/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
+++ b/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
@@ -91,7 +91,7 @@
}
let decls = mk_decls(&mut krate, &mut cx, ¯os);
- krate.module.items.push(decls);
+ krate.items.push(decls);
krate
}
diff --git a/compiler/rustc_builtin_macros/src/source_util.rs b/compiler/rustc_builtin_macros/src/source_util.rs
index 28efd48..4aafcb2 100644
--- a/compiler/rustc_builtin_macros/src/source_util.rs
+++ b/compiler/rustc_builtin_macros/src/source_util.rs
@@ -4,7 +4,7 @@
use rustc_ast::tokenstream::TokenStream;
use rustc_ast_pretty::pprust;
use rustc_expand::base::{self, *};
-use rustc_expand::module::DirectoryOwnership;
+use rustc_expand::module::DirOwnership;
use rustc_parse::parser::{ForceCollect, Parser};
use rustc_parse::{self, new_parser_from_file};
use rustc_session::lint::builtin::INCOMPLETE_INCLUDE;
@@ -101,7 +101,7 @@
None => return DummyResult::any(sp),
};
// The file will be added to the code map by the parser
- let mut file = match cx.resolve_path(file, sp) {
+ let file = match cx.resolve_path(file, sp) {
Ok(f) => f,
Err(mut err) => {
err.emit();
@@ -114,10 +114,9 @@
// then the path of `bar.rs` should be relative to the directory of `file`.
// See https://github.com/rust-lang/rust/pull/69838/files#r395217057 for a discussion.
// `MacroExpander::fully_expand_fragment` later restores, so "stack discipline" is maintained.
- file.pop();
- cx.current_expansion.directory_ownership = DirectoryOwnership::Owned { relative: None };
- let mod_path = cx.current_expansion.module.mod_path.clone();
- cx.current_expansion.module = Rc::new(ModuleData { mod_path, directory: file });
+ let dir_path = file.parent().unwrap_or(&file).to_owned();
+ cx.current_expansion.module = Rc::new(cx.current_expansion.module.with_dir_path(dir_path));
+ cx.current_expansion.dir_ownership = DirOwnership::Owned { relative: None };
struct ExpandResult<'a> {
p: Parser<'a>,
diff --git a/compiler/rustc_builtin_macros/src/standard_library_imports.rs b/compiler/rustc_builtin_macros/src/standard_library_imports.rs
index 91566ec..fbd8be2 100644
--- a/compiler/rustc_builtin_macros/src/standard_library_imports.rs
+++ b/compiler/rustc_builtin_macros/src/standard_library_imports.rs
@@ -1,9 +1,8 @@
use rustc_ast as ast;
-use rustc_ast::ptr::P;
use rustc_expand::base::{ExtCtxt, ResolverExpand};
use rustc_expand::expand::ExpansionConfig;
use rustc_session::Session;
-use rustc_span::edition::Edition;
+use rustc_span::edition::Edition::*;
use rustc_span::hygiene::AstPass;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::DUMMY_SP;
@@ -14,7 +13,7 @@
sess: &Session,
alt_std_name: Option<Symbol>,
) -> ast::Crate {
- let rust_2018 = sess.parse_sess.edition >= Edition::Edition2018;
+ let edition = sess.parse_sess.edition;
// the first name in this list is the crate name of the crate with the prelude
let names: &[Symbol] = if sess.contains_name(&krate.attrs, sym::no_core) {
@@ -43,8 +42,12 @@
// .rev() to preserve ordering above in combination with insert(0, ...)
for &name in names.iter().rev() {
- let ident = if rust_2018 { Ident::new(name, span) } else { Ident::new(name, call_site) };
- krate.module.items.insert(
+ let ident = if edition >= Edition2018 {
+ Ident::new(name, span)
+ } else {
+ Ident::new(name, call_site)
+ };
+ krate.items.insert(
0,
cx.item(
span,
@@ -59,27 +62,31 @@
// the one with the prelude.
let name = names[0];
- let import_path = if rust_2018 {
- [name, sym::prelude, sym::v1].iter().map(|symbol| Ident::new(*symbol, span)).collect()
- } else {
- [kw::PathRoot, name, sym::prelude, sym::v1]
- .iter()
- .map(|symbol| Ident::new(*symbol, span))
- .collect()
- };
+ let root = (edition == Edition2015).then(|| kw::PathRoot);
+
+ let import_path = root
+ .iter()
+ .chain(&[name, sym::prelude])
+ .chain(&[match edition {
+ Edition2015 => sym::rust_2015,
+ Edition2018 => sym::rust_2018,
+ Edition2021 => sym::rust_2021,
+ }])
+ .map(|&symbol| Ident::new(symbol, span))
+ .collect();
let use_item = cx.item(
span,
Ident::invalid(),
vec![cx.attribute(cx.meta_word(span, sym::prelude_import))],
- ast::ItemKind::Use(P(ast::UseTree {
+ ast::ItemKind::Use(ast::UseTree {
prefix: cx.path(span, import_path),
kind: ast::UseTreeKind::Glob,
span,
- })),
+ }),
);
- krate.module.items.insert(0, use_item);
+ krate.items.insert(0, use_item);
krate
}
diff --git a/compiler/rustc_builtin_macros/src/test_harness.rs b/compiler/rustc_builtin_macros/src/test_harness.rs
index 4ac22be..28e8259 100644
--- a/compiler/rustc_builtin_macros/src/test_harness.rs
+++ b/compiler/rustc_builtin_macros/src/test_harness.rs
@@ -1,10 +1,10 @@
// Code that generates a test runner to run all the tests in a crate
use rustc_ast as ast;
-use rustc_ast::attr;
use rustc_ast::entry::EntryPointType;
use rustc_ast::mut_visit::{ExpectOne, *};
use rustc_ast::ptr::P;
+use rustc_ast::{attr, ModKind};
use rustc_expand::base::{ExtCtxt, ResolverExpand};
use rustc_expand::expand::{AstFragment, ExpansionConfig};
use rustc_feature::Features;
@@ -89,7 +89,7 @@
noop_visit_crate(c, self);
// Create a main function to run our tests
- c.module.items.push(mk_main(&mut self.cx));
+ c.items.push(mk_main(&mut self.cx));
}
fn flat_map_item(&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
@@ -103,9 +103,9 @@
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
- if let ast::ItemKind::Mod(mut module) = item.kind {
+ if let ast::ItemKind::Mod(..) = item.kind {
let tests = mem::take(&mut self.tests);
- noop_visit_mod(&mut module, self);
+ noop_visit_item_kind(&mut item.kind, self);
let mut tests = mem::replace(&mut self.tests, tests);
if !tests.is_empty() {
@@ -113,8 +113,12 @@
if item.id == ast::DUMMY_NODE_ID { ast::CRATE_NODE_ID } else { item.id };
// Create an identifier that will hygienically resolve the test
// case name, even in another module.
+ let inner_span = match item.kind {
+ ast::ItemKind::Mod(_, ModKind::Loaded(.., span)) => span,
+ _ => unreachable!(),
+ };
let expn_id = self.cx.ext_cx.resolver.expansion_for_ast_pass(
- module.inner,
+ inner_span,
AstPass::TestHarness,
&[],
Some(parent),
@@ -126,7 +130,6 @@
}
self.cx.test_cases.extend(tests);
}
- item.kind = ast::ItemKind::Mod(module);
}
smallvec![P(item)]
}
diff --git a/compiler/rustc_codegen_cranelift/.cirrus.yml b/compiler/rustc_codegen_cranelift/.cirrus.yml
new file mode 100644
index 0000000..e173df4
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.cirrus.yml
@@ -0,0 +1,25 @@
+task:
+ name: freebsd
+ freebsd_instance:
+ image: freebsd-12-1-release-amd64
+ setup_rust_script:
+ - pkg install -y curl git bash
+ - curl https://sh.rustup.rs -sSf --output rustup.sh
+ - sh rustup.sh --default-toolchain none -y --profile=minimal
+ cargo_bin_cache:
+ folder: ~/.cargo/bin
+ target_cache:
+ folder: target
+ prepare_script:
+ - . $HOME/.cargo/env
+ - git config --global user.email "[email protected]"
+ - git config --global user.name "User"
+ - ./prepare.sh
+ test_script:
+ - . $HOME/.cargo/env
+ - # Enable backtraces for easier debugging
+ - export RUST_BACKTRACE=1
+ - # Reduce amount of benchmark runs as they are slow
+ - export COMPILE_RUNS=2
+ - export RUN_RUNS=2
+ - ./test.sh
diff --git a/compiler/rustc_codegen_cranelift/.github/workflows/main.yml b/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
index 20c5842..e6d3375 100644
--- a/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
+++ b/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
@@ -12,9 +12,6 @@
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
- env:
- - BACKEND: ""
- - BACKEND: --oldbe
steps:
- uses: actions/checkout@v2
@@ -54,7 +51,7 @@
export COMPILE_RUNS=2
export RUN_RUNS=2
- ./test.sh $BACKEND
+ ./test.sh
- name: Package prebuilt cg_clif
run: tar cvfJ cg_clif.tar.xz build
diff --git a/compiler/rustc_codegen_cranelift/.vscode/settings.json b/compiler/rustc_codegen_cranelift/.vscode/settings.json
index 19ea415..a13d593 100644
--- a/compiler/rustc_codegen_cranelift/.vscode/settings.json
+++ b/compiler/rustc_codegen_cranelift/.vscode/settings.json
@@ -1,6 +1,6 @@
{
// source for rustc_* is not included in the rust-src component; disable the errors about this
- "rust-analyzer.diagnostics.disabled": ["unresolved-extern-crate"],
+ "rust-analyzer.diagnostics.disabled": ["unresolved-extern-crate", "macro-error"],
"rust-analyzer.assist.importMergeBehavior": "last",
"rust-analyzer.cargo.loadOutDirsFromCheck": true,
"rust-analyzer.linkedProjects": [
diff --git a/compiler/rustc_codegen_cranelift/Cargo.lock b/compiler/rustc_codegen_cranelift/Cargo.lock
index 5495cfa..76d9f0d 100644
--- a/compiler/rustc_codegen_cranelift/Cargo.lock
+++ b/compiler/rustc_codegen_cranelift/Cargo.lock
@@ -1,5 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
+version = 3
+
[[package]]
name = "anyhow"
version = "1.0.38"
@@ -30,18 +32,6 @@
checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b"
[[package]]
-name = "cc"
-version = "1.0.66"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
-
-[[package]]
-name = "cfg-if"
-version = "0.1.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
-
-[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -49,16 +39,16 @@
[[package]]
name = "cranelift-bforest"
-version = "0.69.0"
-source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
+version = "0.70.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
dependencies = [
"cranelift-entity",
]
[[package]]
name = "cranelift-codegen"
-version = "0.69.0"
-source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
+version = "0.70.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
dependencies = [
"byteorder",
"cranelift-bforest",
@@ -75,8 +65,8 @@
[[package]]
name = "cranelift-codegen-meta"
-version = "0.69.0"
-source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
+version = "0.70.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
dependencies = [
"cranelift-codegen-shared",
"cranelift-entity",
@@ -84,18 +74,18 @@
[[package]]
name = "cranelift-codegen-shared"
-version = "0.69.0"
-source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
+version = "0.70.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
[[package]]
name = "cranelift-entity"
-version = "0.69.0"
-source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
+version = "0.70.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
[[package]]
name = "cranelift-frontend"
-version = "0.69.0"
-source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
+version = "0.70.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
dependencies = [
"cranelift-codegen",
"log",
@@ -105,8 +95,8 @@
[[package]]
name = "cranelift-jit"
-version = "0.69.0"
-source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
+version = "0.70.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
dependencies = [
"anyhow",
"cranelift-codegen",
@@ -123,8 +113,8 @@
[[package]]
name = "cranelift-module"
-version = "0.69.0"
-source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
+version = "0.70.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
dependencies = [
"anyhow",
"cranelift-codegen",
@@ -135,18 +125,17 @@
[[package]]
name = "cranelift-native"
-version = "0.69.0"
-source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
+version = "0.70.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
dependencies = [
"cranelift-codegen",
- "raw-cpuid",
"target-lexicon",
]
[[package]]
name = "cranelift-object"
-version = "0.69.0"
-source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
+version = "0.70.0"
+source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
dependencies = [
"anyhow",
"cranelift-codegen",
@@ -162,7 +151,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
dependencies = [
- "cfg-if 1.0.0",
+ "cfg-if",
]
[[package]]
@@ -219,9 +208,9 @@
[[package]]
name = "libc"
-version = "0.2.82"
+version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929"
+checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c"
[[package]]
name = "libloading"
@@ -229,17 +218,17 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883"
dependencies = [
- "cfg-if 1.0.0",
+ "cfg-if",
"winapi",
]
[[package]]
name = "log"
-version = "0.4.13"
+version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2"
+checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
dependencies = [
- "cfg-if 0.1.10",
+ "cfg-if",
]
[[package]]
@@ -253,9 +242,9 @@
[[package]]
name = "object"
-version = "0.22.0"
+version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397"
+checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4"
dependencies = [
"crc32fast",
"indexmap",
@@ -272,25 +261,14 @@
[[package]]
name = "quote"
-version = "1.0.8"
+version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df"
+checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
dependencies = [
"proc-macro2",
]
[[package]]
-name = "raw-cpuid"
-version = "8.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1fdf7d9dbd43f3d81d94a49c1c3df73cc2b3827995147e6cf7f89d4ec5483e73"
-dependencies = [
- "bitflags",
- "cc",
- "rustc_version",
-]
-
-[[package]]
name = "regalloc"
version = "0.0.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -338,30 +316,6 @@
]
[[package]]
-name = "rustc_version"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
-dependencies = [
- "semver",
-]
-
-[[package]]
-name = "semver"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
-dependencies = [
- "semver-parser",
-]
-
-[[package]]
-name = "semver-parser"
-version = "0.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
-
-[[package]]
name = "smallvec"
version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -369,9 +323,9 @@
[[package]]
name = "syn"
-version = "1.0.58"
+version = "1.0.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5"
+checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081"
dependencies = [
"proc-macro2",
"quote",
@@ -380,24 +334,24 @@
[[package]]
name = "target-lexicon"
-version = "0.11.1"
+version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ee5a98e506fb7231a304c3a1bd7c132a55016cf65001e0282480665870dfcb9"
+checksum = "422045212ea98508ae3d28025bc5aaa2bd4a9cdaecd442a08da2ee620ee9ea95"
[[package]]
name = "thiserror"
-version = "1.0.23"
+version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146"
+checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.23"
+version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1"
+checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0"
dependencies = [
"proc-macro2",
"quote",
diff --git a/compiler/rustc_codegen_cranelift/Cargo.toml b/compiler/rustc_codegen_cranelift/Cargo.toml
index 3820fce..9861af1 100644
--- a/compiler/rustc_codegen_cranelift/Cargo.toml
+++ b/compiler/rustc_codegen_cranelift/Cargo.toml
@@ -9,14 +9,14 @@
[dependencies]
# These have to be in sync with each other
-cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", features = ["unwind", "x86", "x64"] }
+cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", features = ["unwind", "x64"] }
cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", optional = true }
cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
target-lexicon = "0.11.0"
gimli = { version = "0.23.0", default-features = false, features = ["write"]}
-object = { version = "0.22.0", default-features = false, features = ["std", "read_core", "write", "coff", "elf", "macho", "pe"] }
+object = { version = "0.23.0", default-features = false, features = ["std", "read_core", "write", "coff", "elf", "macho", "pe"] }
ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
indexmap = "1.0.2"
@@ -38,7 +38,6 @@
default = ["jit", "inline_asm"]
jit = ["cranelift-jit", "libloading"]
inline_asm = []
-oldbe = []
[profile.dev]
# By compiling dependencies with optimizations, performing tests gets much faster.
diff --git a/compiler/rustc_codegen_cranelift/build.sh b/compiler/rustc_codegen_cranelift/build.sh
index 598ce35..090349e 100755
--- a/compiler/rustc_codegen_cranelift/build.sh
+++ b/compiler/rustc_codegen_cranelift/build.sh
@@ -1,11 +1,10 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -e
# Settings
export CHANNEL="release"
build_sysroot="clif"
target_dir='build'
-oldbe=''
while [[ $# != 0 ]]; do
case $1 in
"--debug")
@@ -19,12 +18,9 @@
target_dir=$2
shift
;;
- "--oldbe")
- oldbe='--features oldbe'
- ;;
*)
echo "Unknown flag '$1'"
- echo "Usage: ./build.sh [--debug] [--sysroot none|clif|llvm] [--target-dir DIR] [--oldbe]"
+ echo "Usage: ./build.sh [--debug] [--sysroot none|clif|llvm] [--target-dir DIR]"
exit 1
;;
esac
@@ -34,19 +30,19 @@
# Build cg_clif
unset CARGO_TARGET_DIR
unamestr=$(uname)
-if [[ "$unamestr" == 'Linux' ]]; then
+if [[ "$unamestr" == 'Linux' || "$unamestr" == "FreeBSD" ]]; then
export RUSTFLAGS='-Clink-arg=-Wl,-rpath=$ORIGIN/../lib '$RUSTFLAGS
elif [[ "$unamestr" == 'Darwin' ]]; then
export RUSTFLAGS='-Csplit-debuginfo=unpacked -Clink-arg=-Wl,-rpath,@loader_path/../lib -Zosx-rpath-install-name '$RUSTFLAGS
dylib_ext='dylib'
else
- echo "Unsupported os"
+ echo "Unsupported os $unamestr"
exit 1
fi
if [[ "$CHANNEL" == "release" ]]; then
- cargo build $oldbe --release
+ cargo build --release
else
- cargo build $oldbe
+ cargo build
fi
source scripts/ext_config.sh
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
index 0da9999..a7650ab 100644
--- a/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
@@ -1,5 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
+version = 3
+
[[package]]
name = "addr2line"
version = "0.14.1"
@@ -31,15 +33,6 @@
]
[[package]]
-name = "alloc_system"
-version = "0.0.0"
-dependencies = [
- "compiler_builtins",
- "core",
- "libc",
-]
-
-[[package]]
name = "autocfg"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -47,9 +40,9 @@
[[package]]
name = "cc"
-version = "1.0.66"
+version = "1.0.67"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
+checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
[[package]]
name = "cfg-if"
@@ -139,9 +132,9 @@
[[package]]
name = "libc"
-version = "0.2.84"
+version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1cca32fa0182e8c0989459524dc356b8f2b5c10f1b9eb521b7d182c03cf8c5ff"
+checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c"
dependencies = [
"rustc-std-workspace-core",
]
@@ -258,7 +251,6 @@
version = "0.0.0"
dependencies = [
"alloc",
- "alloc_system",
"compiler_builtins",
"core",
"std",
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml
index 82516c9..04748d5 100644
--- a/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml
@@ -9,8 +9,6 @@
std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] }
test = { path = "./sysroot_src/library/test" }
-alloc_system = { path = "./alloc_system" }
-
compiler_builtins = { version = "0.1.39", default-features = false, features = ["no-asm"] }
[patch.crates-io]
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/alloc_system/Cargo.toml b/compiler/rustc_codegen_cranelift/build_sysroot/alloc_system/Cargo.toml
deleted file mode 100644
index 9fffca8..0000000
--- a/compiler/rustc_codegen_cranelift/build_sysroot/alloc_system/Cargo.toml
+++ /dev/null
@@ -1,13 +0,0 @@
-[package]
-authors = ["The Rust Project Developers", "bjorn3 (edited to be usable outside the rust source)"]
-name = "alloc_system"
-version = "0.0.0"
-[lib]
-name = "alloc_system"
-path = "lib.rs"
-test = false
-doc = false
-[dependencies]
-core = { path = "../sysroot_src/library/core" }
-libc = { version = "0.2.43", features = ['rustc-dep-of-std'], default-features = false }
-compiler_builtins = "0.1"
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/build_sysroot.sh b/compiler/rustc_codegen_cranelift/build_sysroot/build_sysroot.sh
index 282ce4a..636aa5f 100755
--- a/compiler/rustc_codegen_cranelift/build_sysroot/build_sysroot.sh
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/build_sysroot.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# Requires the CHANNEL env var to be set to `debug` or `release.`
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/prepare_sysroot_src.sh b/compiler/rustc_codegen_cranelift/build_sysroot/prepare_sysroot_src.sh
index d3b87e0..c90205d 100755
--- a/compiler/rustc_codegen_cranelift/build_sysroot/prepare_sysroot_src.sh
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/prepare_sysroot_src.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -e
cd "$(dirname "$0")"
@@ -33,7 +33,7 @@
pushd compiler-builtins
git checkout -- .
git checkout 0.1.39
-git apply ../../crate_patches/0001-compiler-builtins-Remove-rotate_left-from-Int.patch
+git apply ../../crate_patches/000*-compiler-builtins-*.patch
popd
echo "Successfully prepared sysroot source for building"
diff --git a/compiler/rustc_codegen_cranelift/clean_all.sh b/compiler/rustc_codegen_cranelift/clean_all.sh
index b47efe7..a7bbeb0 100755
--- a/compiler/rustc_codegen_cranelift/clean_all.sh
+++ b/compiler/rustc_codegen_cranelift/clean_all.sh
@@ -1,4 +1,4 @@
-#!/bin/bash --verbose
+#!/usr/bin/env bash
set -e
rm -rf target/ build/ build_sysroot/{sysroot_src/,target/,compiler-builtins/} perf.data{,.old}
diff --git a/compiler/rustc_codegen_cranelift/crate_patches/0002-compiler-builtins-Disable-128bit-atomic-operations.patch b/compiler/rustc_codegen_cranelift/crate_patches/0002-compiler-builtins-Disable-128bit-atomic-operations.patch
new file mode 100644
index 0000000..7daea99
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/crate_patches/0002-compiler-builtins-Disable-128bit-atomic-operations.patch
@@ -0,0 +1,48 @@
+From 1d574bf5e32d51641dcacaf8ef777e95b44f6f2a Mon Sep 17 00:00:00 2001
+From: bjorn3 <[email protected]>
+Date: Thu, 18 Feb 2021 18:30:55 +0100
+Subject: [PATCH] Disable 128bit atomic operations
+
+Cranelift doesn't support them yet
+---
+ src/mem/mod.rs | 12 ------------
+ 1 file changed, 12 deletions(-)
+
+diff --git a/src/mem/mod.rs b/src/mem/mod.rs
+index 107762c..2d1ae10 100644
+--- a/src/mem/mod.rs
++++ b/src/mem/mod.rs
+@@ -137,10 +137,6 @@ intrinsics! {
+ pub extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
+ memcpy_element_unordered_atomic(dest, src, bytes);
+ }
+- #[cfg(target_has_atomic_load_store = "128")]
+- pub extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
+- memcpy_element_unordered_atomic(dest, src, bytes);
+- }
+
+ #[cfg(target_has_atomic_load_store = "8")]
+ pub extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
+@@ -158,10 +154,6 @@ intrinsics! {
+ pub extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
+ memmove_element_unordered_atomic(dest, src, bytes);
+ }
+- #[cfg(target_has_atomic_load_store = "128")]
+- pub extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
+- memmove_element_unordered_atomic(dest, src, bytes);
+- }
+
+ #[cfg(target_has_atomic_load_store = "8")]
+ pub extern "C" fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) -> () {
+@@ -179,8 +171,4 @@ intrinsics! {
+ pub extern "C" fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) -> () {
+ memset_element_unordered_atomic(s, c, bytes);
+ }
+- #[cfg(target_has_atomic_load_store = "128")]
+- pub extern "C" fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) -> () {
+- memset_element_unordered_atomic(s, c, bytes);
+- }
+ }
+--
+2.26.2.7.g19db9cfb68
+
diff --git a/compiler/rustc_codegen_cranelift/example/alloc_example.rs b/compiler/rustc_codegen_cranelift/example/alloc_example.rs
index f59600e..71e93e8 100644
--- a/compiler/rustc_codegen_cranelift/example/alloc_example.rs
+++ b/compiler/rustc_codegen_cranelift/example/alloc_example.rs
@@ -1,4 +1,4 @@
-#![feature(start, box_syntax, alloc_system, core_intrinsics, alloc_prelude, alloc_error_handler)]
+#![feature(start, box_syntax, core_intrinsics, alloc_prelude, alloc_error_handler)]
#![no_std]
extern crate alloc;
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/alloc_system/lib.rs b/compiler/rustc_codegen_cranelift/example/alloc_system.rs
similarity index 62%
rename from compiler/rustc_codegen_cranelift/build_sysroot/alloc_system/lib.rs
rename to compiler/rustc_codegen_cranelift/example/alloc_system.rs
index c832d5e..5f66ca6 100644
--- a/compiler/rustc_codegen_cranelift/build_sysroot/alloc_system/lib.rs
+++ b/compiler/rustc_codegen_cranelift/example/alloc_system.rs
@@ -8,66 +8,24 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![no_std]
-#![allow(unused_attributes)]
-#![unstable(feature = "alloc_system",
- reason = "this library is unlikely to be stabilized in its current \
- form or name",
- issue = "32838")]
-#![feature(allocator_api)]
-#![feature(core_intrinsics)]
-#![feature(nll)]
-#![feature(staged_api)]
-#![feature(rustc_attrs)]
-#![feature(alloc_layout_extra)]
-#![cfg_attr(
- all(target_arch = "wasm32", not(target_os = "emscripten")),
- feature(integer_atomics, stdsimd)
-)]
+#![feature(allocator_api, rustc_private)]
#![cfg_attr(any(unix, target_os = "redox"), feature(libc))]
+
// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values.
#[cfg(all(any(target_arch = "x86",
target_arch = "arm",
target_arch = "mips",
target_arch = "powerpc",
- target_arch = "powerpc64",
- target_arch = "asmjs",
- target_arch = "wasm32")))]
-#[allow(dead_code)]
+ target_arch = "powerpc64")))]
const MIN_ALIGN: usize = 8;
#[cfg(all(any(target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "mips64",
target_arch = "s390x",
target_arch = "sparc64")))]
-#[allow(dead_code)]
const MIN_ALIGN: usize = 16;
-/// The default memory allocator provided by the operating system.
-///
-/// This is based on `malloc` on Unix platforms and `HeapAlloc` on Windows,
-/// plus related functions.
-///
-/// This type can be used in a `static` item
-/// with the `#[global_allocator]` attribute
-/// to force the global allocator to be the system’s one.
-/// (The default is jemalloc for executables, on some platforms.)
-///
-/// ```rust
-/// use std::alloc::System;
-///
-/// #[global_allocator]
-/// static A: System = System;
-///
-/// fn main() {
-/// let a = Box::new(4); // Allocates from the system allocator.
-/// println!("{}", a);
-/// }
-/// ```
-///
-/// It can also be used directly to allocate memory
-/// independently of the standard library’s global allocator.
-#[stable(feature = "alloc_system_type", since = "1.28.0")]
pub struct System;
#[cfg(any(windows, unix, target_os = "redox"))]
mod realloc_fallback {
@@ -96,7 +54,6 @@
use MIN_ALIGN;
use System;
use core::alloc::{GlobalAlloc, Layout};
- #[stable(feature = "alloc_system_type", since = "1.28.0")]
unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
@@ -221,7 +178,6 @@
};
ptr as *mut u8
}
- #[stable(feature = "alloc_system_type", since = "1.28.0")]
unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
@@ -254,89 +210,3 @@
}
}
}
-// This is an implementation of a global allocator on the wasm32 platform when
-// emscripten is not in use. In that situation there's no actual runtime for us
-// to lean on for allocation, so instead we provide our own!
-//
-// The wasm32 instruction set has two instructions for getting the current
-// amount of memory and growing the amount of memory. These instructions are the
-// foundation on which we're able to build an allocator, so we do so! Note that
-// the instructions are also pretty "global" and this is the "global" allocator
-// after all!
-//
-// The current allocator here is the `dlmalloc` crate which we've got included
-// in the rust-lang/rust repository as a submodule. The crate is a port of
-// dlmalloc.c from C to Rust and is basically just so we can have "pure Rust"
-// for now which is currently technically required (can't link with C yet).
-//
-// The crate itself provides a global allocator which on wasm has no
-// synchronization as there are no threads!
-#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
-mod platform {
- extern crate dlmalloc;
- use core::alloc::{GlobalAlloc, Layout};
- use System;
- static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::DLMALLOC_INIT;
- #[stable(feature = "alloc_system_type", since = "1.28.0")]
- unsafe impl GlobalAlloc for System {
- #[inline]
- unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- let _lock = lock::lock();
- DLMALLOC.malloc(layout.size(), layout.align())
- }
- #[inline]
- unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
- let _lock = lock::lock();
- DLMALLOC.calloc(layout.size(), layout.align())
- }
- #[inline]
- unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
- let _lock = lock::lock();
- DLMALLOC.free(ptr, layout.size(), layout.align())
- }
- #[inline]
- unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
- let _lock = lock::lock();
- DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size)
- }
- }
- #[cfg(target_feature = "atomics")]
- mod lock {
- use core::arch::wasm32;
- use core::sync::atomic::{AtomicI32, Ordering::SeqCst};
- static LOCKED: AtomicI32 = AtomicI32::new(0);
- pub struct DropLock;
- pub fn lock() -> DropLock {
- loop {
- if LOCKED.swap(1, SeqCst) == 0 {
- return DropLock
- }
- unsafe {
- let r = wasm32::atomic::wait_i32(
- &LOCKED as *const AtomicI32 as *mut i32,
- 1, // expected value
- -1, // timeout
- );
- debug_assert!(r == 0 || r == 1);
- }
- }
- }
- impl Drop for DropLock {
- fn drop(&mut self) {
- let r = LOCKED.swap(0, SeqCst);
- debug_assert_eq!(r, 1);
- unsafe {
- wasm32::atomic::wake(
- &LOCKED as *const AtomicI32 as *mut i32,
- 1, // only one thread
- );
- }
- }
- }
- }
- #[cfg(not(target_feature = "atomics"))]
- mod lock {
- #[inline]
- pub fn lock() {} // no atomics, no threads, that's easy!
- }
-}
diff --git a/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs b/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs
index 0b0039a..ddeb752 100644
--- a/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs
+++ b/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs
@@ -1,22 +1,12 @@
// Adapted from rustc run-pass test suite
-#![feature(no_core, arbitrary_self_types, box_syntax)]
+#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)]
#![feature(rustc_attrs)]
-#![feature(start, lang_items)]
-#![no_core]
-
-extern crate mini_core;
-
-use mini_core::*;
-
-macro_rules! assert_eq {
- ($l:expr, $r: expr) => {
- if $l != $r {
- panic(stringify!($l != $r));
- }
- }
-}
+use std::{
+ ops::{Deref, CoerceUnsized, DispatchFromDyn},
+ marker::Unsize,
+};
struct Ptr<T: ?Sized>(Box<T>);
@@ -67,16 +57,13 @@
}
}
-#[start]
-fn main(_: isize, _: *const *const u8) -> isize {
- let pw = Ptr(box Wrapper(5)) as Ptr<Wrapper<dyn Trait>>;
+fn main() {
+ let pw = Ptr(Box::new(Wrapper(5))) as Ptr<Wrapper<dyn Trait>>;
assert_eq!(pw.ptr_wrapper(), 5);
- let wp = Wrapper(Ptr(box 6)) as Wrapper<Ptr<dyn Trait>>;
+ let wp = Wrapper(Ptr(Box::new(6))) as Wrapper<Ptr<dyn Trait>>;
assert_eq!(wp.wrapper_ptr(), 6);
- let wpw = Wrapper(Ptr(box Wrapper(7))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
+ let wpw = Wrapper(Ptr(Box::new(Wrapper(7)))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
-
- 0
}
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core.rs b/compiler/rustc_codegen_cranelift/example/mini_core.rs
index 002ec7e..7c6d7fc 100644
--- a/compiler/rustc_codegen_cranelift/example/mini_core.rs
+++ b/compiler/rustc_codegen_cranelift/example/mini_core.rs
@@ -365,6 +365,22 @@
}
}
+#[lang = "shl"]
+pub trait Shl<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn shl(self, rhs: RHS) -> Self::Output;
+}
+
+impl Shl for u128 {
+ type Output = u128;
+
+ fn shl(self, rhs: u128) -> u128 {
+ self << rhs
+ }
+}
+
#[lang = "neg"]
pub trait Neg {
type Output;
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
index 4a8375a..237f4d1 100644
--- a/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
+++ b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
@@ -264,6 +264,9 @@
assert_eq!(f2 as i8, -128);
assert_eq!(f2 as u8, 0);
+ let amount = 0;
+ assert_eq!(1u128 << amount, 1);
+
static ANOTHER_STATIC: &u8 = &A_STATIC;
assert_eq!(*ANOTHER_STATIC, 42);
diff --git a/compiler/rustc_codegen_cranelift/patches/0022-core-Disable-not-compiling-tests.patch b/compiler/rustc_codegen_cranelift/patches/0022-core-Disable-not-compiling-tests.patch
index 3eb1006..8cfffe5 100644
--- a/compiler/rustc_codegen_cranelift/patches/0022-core-Disable-not-compiling-tests.patch
+++ b/compiler/rustc_codegen_cranelift/patches/0022-core-Disable-not-compiling-tests.patch
@@ -119,21 +119,5 @@
#[test]
#[should_panic(expected = "index 0 greater than length of slice")]
-diff --git a/library/core/tests/num/ops.rs b/library/core/tests/num/ops.rs
-index 9979cc8..d5d1d83 100644
---- a/library/core/tests/num/ops.rs
-+++ b/library/core/tests/num/ops.rs
-@@ -238,7 +238,7 @@ macro_rules! test_shift_assign {
- }
- };
- }
--test_shift!(test_shl_defined, Shl::shl);
--test_shift_assign!(test_shl_assign_defined, ShlAssign::shl_assign);
--test_shift!(test_shr_defined, Shr::shr);
--test_shift_assign!(test_shr_assign_defined, ShrAssign::shr_assign);
-+//test_shift!(test_shl_defined, Shl::shl);
-+//test_shift_assign!(test_shl_assign_defined, ShlAssign::shl_assign);
-+//test_shift!(test_shr_defined, Shr::shr);
-+//test_shift_assign!(test_shr_assign_defined, ShrAssign::shr_assign);
--
2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_cranelift/patches/0027-Disable-128bit-atomic-operations.patch b/compiler/rustc_codegen_cranelift/patches/0027-Disable-128bit-atomic-operations.patch
new file mode 100644
index 0000000..32e5930
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0027-Disable-128bit-atomic-operations.patch
@@ -0,0 +1,103 @@
+From 894e07dfec2624ba539129b1c1d63e1d7d812bda Mon Sep 17 00:00:00 2001
+From: bjorn3 <[email protected]>
+Date: Thu, 18 Feb 2021 18:45:28 +0100
+Subject: [PATCH] Disable 128bit atomic operations
+
+Cranelift doesn't support them yet
+---
+ library/core/src/sync/atomic.rs | 38 ---------------------------------
+ library/core/tests/atomic.rs | 4 ----
+ library/std/src/panic.rs | 6 ------
+ 3 files changed, 48 deletions(-)
+
+diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
+index 81c9e1d..65c9503 100644
+--- a/library/core/src/sync/atomic.rs
++++ b/library/core/src/sync/atomic.rs
+@@ -2228,44 +2228,6 @@ atomic_int! {
+ "AtomicU64::new(0)",
+ u64 AtomicU64 ATOMIC_U64_INIT
+ }
+-#[cfg(target_has_atomic_load_store = "128")]
+-atomic_int! {
+- cfg(target_has_atomic = "128"),
+- cfg(target_has_atomic_equal_alignment = "128"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- "i128",
+- "#![feature(integer_atomics)]\n\n",
+- atomic_min, atomic_max,
+- 16,
+- "AtomicI128::new(0)",
+- i128 AtomicI128 ATOMIC_I128_INIT
+-}
+-#[cfg(target_has_atomic_load_store = "128")]
+-atomic_int! {
+- cfg(target_has_atomic = "128"),
+- cfg(target_has_atomic_equal_alignment = "128"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- "u128",
+- "#![feature(integer_atomics)]\n\n",
+- atomic_umin, atomic_umax,
+- 16,
+- "AtomicU128::new(0)",
+- u128 AtomicU128 ATOMIC_U128_INIT
+-}
+
+ macro_rules! atomic_int_ptr_sized {
+ ( $($target_pointer_width:literal $align:literal)* ) => { $(
+diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs
+index 2d1e449..cb6da5d 100644
+--- a/library/core/tests/atomic.rs
++++ b/library/core/tests/atomic.rs
+@@ -145,10 +145,6 @@ fn atomic_alignment() {
+ assert_eq!(align_of::<AtomicU64>(), size_of::<AtomicU64>());
+ #[cfg(target_has_atomic = "64")]
+ assert_eq!(align_of::<AtomicI64>(), size_of::<AtomicI64>());
+- #[cfg(target_has_atomic = "128")]
+- assert_eq!(align_of::<AtomicU128>(), size_of::<AtomicU128>());
+- #[cfg(target_has_atomic = "128")]
+- assert_eq!(align_of::<AtomicI128>(), size_of::<AtomicI128>());
+ #[cfg(target_has_atomic = "ptr")]
+ assert_eq!(align_of::<AtomicUsize>(), size_of::<AtomicUsize>());
+ #[cfg(target_has_atomic = "ptr")]
+diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs
+index 89a822a..779fd88 100644
+--- a/library/std/src/panic.rs
++++ b/library/std/src/panic.rs
+@@ -279,9 +279,6 @@ impl RefUnwindSafe for atomic::AtomicI32 {}
+ #[cfg(target_has_atomic_load_store = "64")]
+ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+ impl RefUnwindSafe for atomic::AtomicI64 {}
+-#[cfg(target_has_atomic_load_store = "128")]
+-#[unstable(feature = "integer_atomics", issue = "32976")]
+-impl RefUnwindSafe for atomic::AtomicI128 {}
+
+ #[cfg(target_has_atomic_load_store = "ptr")]
+ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+@@ -298,9 +295,6 @@ impl RefUnwindSafe for atomic::AtomicU32 {}
+ #[cfg(target_has_atomic_load_store = "64")]
+ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+ impl RefUnwindSafe for atomic::AtomicU64 {}
+-#[cfg(target_has_atomic_load_store = "128")]
+-#[unstable(feature = "integer_atomics", issue = "32976")]
+-impl RefUnwindSafe for atomic::AtomicU128 {}
+
+ #[cfg(target_has_atomic_load_store = "8")]
+ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+--
+2.26.2.7.g19db9cfb68
+
diff --git a/compiler/rustc_codegen_cranelift/prepare.sh b/compiler/rustc_codegen_cranelift/prepare.sh
index 08e7cb1..ee995ff 100755
--- a/compiler/rustc_codegen_cranelift/prepare.sh
+++ b/compiler/rustc_codegen_cranelift/prepare.sh
@@ -1,4 +1,4 @@
-#!/bin/bash --verbose
+#!/usr/bin/env bash
set -e
rustup component add rust-src rustc-dev llvm-tools-preview
diff --git a/compiler/rustc_codegen_cranelift/rust-toolchain b/compiler/rustc_codegen_cranelift/rust-toolchain
index a08f00d..908ca52 100644
--- a/compiler/rustc_codegen_cranelift/rust-toolchain
+++ b/compiler/rustc_codegen_cranelift/rust-toolchain
@@ -1 +1 @@
-nightly-2021-01-30
+nightly-2021-03-05
diff --git a/compiler/rustc_codegen_cranelift/rustfmt.toml b/compiler/rustc_codegen_cranelift/rustfmt.toml
new file mode 100644
index 0000000..2bd8f7d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/rustfmt.toml
@@ -0,0 +1,4 @@
+# Matches rustfmt.toml of rustc
+version = "Two"
+use_small_heuristics = "Max"
+merge_derives = false
diff --git a/compiler/rustc_codegen_cranelift/scripts/cargo.sh b/compiler/rustc_codegen_cranelift/scripts/cargo.sh
index a3d6d30..669d2d4 100755
--- a/compiler/rustc_codegen_cranelift/scripts/cargo.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/cargo.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
dir=$(dirname "$0")
source "$dir/config.sh"
diff --git a/compiler/rustc_codegen_cranelift/scripts/config.sh b/compiler/rustc_codegen_cranelift/scripts/config.sh
index 834708a..c2ed2bf 100644
--- a/compiler/rustc_codegen_cranelift/scripts/config.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/config.sh
@@ -3,7 +3,7 @@
set -e
unamestr=$(uname)
-if [[ "$unamestr" == 'Linux' ]]; then
+if [[ "$unamestr" == 'Linux' || "$unamestr" == 'FreeBSD' ]]; then
dylib_ext='so'
elif [[ "$unamestr" == 'Darwin' ]]; then
dylib_ext='dylib'
@@ -26,7 +26,7 @@
export RUSTDOCFLAGS=$linker' -Cpanic=abort -Zpanic-abort-tests '\
'-Zcodegen-backend='$dir'/lib/librustc_codegen_cranelift.'$dylib_ext' --sysroot '$dir
-# FIXME remove once the atomic shim is gone
+# FIXME fix `#[linkage = "extern_weak"]` without this
if [[ "$unamestr" == 'Darwin' ]]; then
export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
fi
diff --git a/compiler/rustc_codegen_cranelift/scripts/rustup.sh b/compiler/rustc_codegen_cranelift/scripts/rustup.sh
index 430f5c4..694945a 100755
--- a/compiler/rustc_codegen_cranelift/scripts/rustup.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/rustup.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -e
diff --git a/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh b/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh
index db69541..6473c6a 100755
--- a/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/../"
@@ -14,21 +14,18 @@
git checkout "$(rustc -V | cut -d' ' -f3 | tr -d '(')"
git apply - <<EOF
-diff --git a/.gitmodules b/.gitmodules
-index 984113151de..c1e9d960d56 100644
---- a/.gitmodules
-+++ b/.gitmodules
-@@ -34,10 +34,6 @@
- [submodule "src/doc/edition-guide"]
- path = src/doc/edition-guide
- url = https://github.com/rust-lang/edition-guide.git
--[submodule "src/llvm-project"]
-- path = src/llvm-project
-- url = https://github.com/rust-lang/llvm-project.git
-- branch = rustc/11.0-2020-10-12
- [submodule "src/doc/embedded-book"]
- path = src/doc/embedded-book
- url = https://github.com/rust-embedded/book.git
+diff --git a/Cargo.toml b/Cargo.toml
+index 5bd1147cad5..10d68a2ff14 100644
+--- a/Cargo.toml
++++ b/Cargo.toml
+@@ -111,5 +111,7 @@ rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
+ rustc-std-workspace-alloc = { path = 'library/rustc-std-workspace-alloc' }
+ rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
+
++compiler_builtins = { path = "../build_sysroot/compiler-builtins" }
++
+ [patch."https://github.com/rust-lang/rust-clippy"]
+ clippy_lints = { path = "src/tools/clippy/clippy_lints" }
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
index 23e689fcae7..5f077b765b6 100644
--- a/compiler/rustc_data_structures/Cargo.toml
@@ -41,6 +38,19 @@
[target.'cfg(windows)'.dependencies]
winapi = { version = "0.3", features = ["fileapi", "psapi"] }
+diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
+index d95b5b7f17f..00b6f0e3635 100644
+--- a/library/alloc/Cargo.toml
++++ b/library/alloc/Cargo.toml
+@@ -8,7 +8,7 @@ edition = "2018"
+
+ [dependencies]
+ core = { path = "../core" }
+-compiler_builtins = { version = "0.1.39", features = ['rustc-dep-of-std'] }
++compiler_builtins = { version = "0.1.39", features = ['rustc-dep-of-std', 'no-asm'] }
+
+ [dev-dependencies]
+ rand = "0.7"
EOF
cat > config.toml <<EOF
diff --git a/compiler/rustc_codegen_cranelift/scripts/tests.sh b/compiler/rustc_codegen_cranelift/scripts/tests.sh
index d37b57b..f9a9fb0 100755
--- a/compiler/rustc_codegen_cranelift/scripts/tests.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/tests.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -e
@@ -27,13 +27,16 @@
$MY_RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target "$TARGET_TRIPLE"
$RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
# (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
-
- echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
- $MY_RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target "$TARGET_TRIPLE"
- $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
}
function base_sysroot_tests() {
+ echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
+ $MY_RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
+
+ echo "[AOT] alloc_system"
+ $MY_RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
+
echo "[AOT] alloc_example"
$MY_RUSTC example/alloc_example.rs --crate-type bin --target "$TARGET_TRIPLE"
$RUN_WRAPPER ./target/out/alloc_example
diff --git a/compiler/rustc_codegen_cranelift/src/abi/comments.rs b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
index 9aab45b..c3cf90e 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/comments.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
@@ -10,14 +10,14 @@
use crate::prelude::*;
-pub(super) fn add_args_header_comment(fx: &mut FunctionCx<'_, '_, impl Module>) {
+pub(super) fn add_args_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
fx.add_global_comment(
"kind loc.idx param pass mode ty".to_string(),
);
}
pub(super) fn add_arg_comment<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
kind: &str,
local: Option<mir::Local>,
local_field: Option<usize>,
@@ -42,11 +42,7 @@
[param_a, param_b] => Cow::Owned(format!("= {:?},{:?}", param_a, param_b)),
params => Cow::Owned(format!(
"= {}",
- params
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- .join(",")
+ params.iter().map(ToString::to_string).collect::<Vec<_>>().join(",")
)),
};
@@ -62,7 +58,7 @@
));
}
-pub(super) fn add_locals_header_comment(fx: &mut FunctionCx<'_, '_, impl Module>) {
+pub(super) fn add_locals_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
fx.add_global_comment(String::new());
fx.add_global_comment(
"kind local ty size align (abi,pref)".to_string(),
@@ -70,19 +66,13 @@
}
pub(super) fn add_local_place_comments<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
place: CPlace<'tcx>,
local: Local,
) {
let TyAndLayout { ty, layout } = place.layout();
- let rustc_target::abi::Layout {
- size,
- align,
- abi: _,
- variants: _,
- fields: _,
- largest_niche: _,
- } = layout;
+ let rustc_target::abi::Layout { size, align, abi: _, variants: _, fields: _, largest_niche: _ } =
+ layout;
let (kind, extra) = match *place.inner() {
CPlaceInner::Var(place_local, var) => {
@@ -91,10 +81,7 @@
}
CPlaceInner::VarPair(place_local, var1, var2) => {
assert_eq!(local, place_local);
- (
- "ssa",
- Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())),
- )
+ ("ssa", Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())))
}
CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
CPlaceInner::Addr(ptr, meta) => {
@@ -104,18 +91,15 @@
Cow::Borrowed("")
};
match ptr.base_and_offset() {
- (crate::pointer::PointerBase::Addr(addr), offset) => (
- "reuse",
- format!("storage={}{}{}", addr, offset, meta).into(),
- ),
- (crate::pointer::PointerBase::Stack(stack_slot), offset) => (
- "stack",
- format!("storage={}{}{}", stack_slot, offset, meta).into(),
- ),
- (crate::pointer::PointerBase::Dangling(align), offset) => (
- "zst",
- format!("align={},offset={}", align.bytes(), offset).into(),
- ),
+ (crate::pointer::PointerBase::Addr(addr), offset) => {
+ ("reuse", format!("storage={}{}{}", addr, offset, meta).into())
+ }
+ (crate::pointer::PointerBase::Stack(stack_slot), offset) => {
+ ("stack", format!("storage={}{}{}", stack_slot, offset, meta).into())
+ }
+ (crate::pointer::PointerBase::Dangling(align), offset) => {
+ ("zst", format!("align={},offset={}", align.bytes(), offset).into())
+ }
}
}
};
@@ -128,11 +112,7 @@
size.bytes(),
align.abi.bytes(),
align.pref.bytes(),
- if extra.is_empty() {
- ""
- } else {
- " "
- },
+ if extra.is_empty() { "" } else { " " },
extra,
));
}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
index b2647e6..b158d73 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -38,25 +38,15 @@
| Conv::X86VectorCall
| Conv::AmdGpuKernel
| Conv::AvrInterrupt
- | Conv::AvrNonBlockingInterrupt => {
- todo!("{:?}", fn_abi.conv)
- }
+ | Conv::AvrNonBlockingInterrupt => todo!("{:?}", fn_abi.conv),
};
- let inputs = fn_abi
- .args
- .iter()
- .map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter())
- .flatten();
+ let inputs = fn_abi.args.iter().map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter()).flatten();
let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
// Sometimes the first param is an pointer to the place where the return value needs to be stored.
let params: Vec<_> = return_ptr.into_iter().chain(inputs).collect();
- Signature {
- params,
- returns,
- call_conv,
- }
+ Signature { params, returns, call_conv }
}
pub(crate) fn get_function_sig<'tcx>(
@@ -65,34 +55,25 @@
inst: Instance<'tcx>,
) -> Signature {
assert!(!inst.substs.needs_infer());
- clif_sig_from_fn_abi(
- tcx,
- triple,
- &FnAbi::of_instance(&RevealAllLayoutCx(tcx), inst, &[]),
- )
+ clif_sig_from_fn_abi(tcx, triple, &FnAbi::of_instance(&RevealAllLayoutCx(tcx), inst, &[]))
}
/// Instance must be monomorphized
pub(crate) fn import_function<'tcx>(
tcx: TyCtxt<'tcx>,
- module: &mut impl Module,
+ module: &mut dyn Module,
inst: Instance<'tcx>,
) -> FuncId {
let name = tcx.symbol_name(inst).name.to_string();
let sig = get_function_sig(tcx, module.isa().triple(), inst);
- module
- .declare_function(&name, Linkage::Import, &sig)
- .unwrap()
+ module.declare_function(&name, Linkage::Import, &sig).unwrap()
}
-impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> {
+impl<'tcx> FunctionCx<'_, '_, 'tcx> {
/// Instance must be monomorphized
pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
- let func_id = import_function(self.tcx, &mut self.cx.module, inst);
- let func_ref = self
- .cx
- .module
- .declare_func_in_func(func_id, &mut self.bcx.func);
+ let func_id = import_function(self.tcx, self.cx.module, inst);
+ let func_ref = self.cx.module.declare_func_in_func(func_id, &mut self.bcx.func);
#[cfg(debug_assertions)]
self.add_comment(func_ref, format!("{:?}", inst));
@@ -107,20 +88,9 @@
returns: Vec<AbiParam>,
args: &[Value],
) -> &[Value] {
- let sig = Signature {
- params,
- returns,
- call_conv: CallConv::triple_default(self.triple()),
- };
- let func_id = self
- .cx
- .module
- .declare_function(&name, Linkage::Import, &sig)
- .unwrap();
- let func_ref = self
- .cx
- .module
- .declare_func_in_func(func_id, &mut self.bcx.func);
+ let sig = Signature { params, returns, call_conv: CallConv::triple_default(self.triple()) };
+ let func_id = self.cx.module.declare_function(&name, Linkage::Import, &sig).unwrap();
+ let func_ref = self.cx.module.declare_func_in_func(func_id, &mut self.bcx.func);
let call_inst = self.bcx.ins().call(func_ref, args);
#[cfg(debug_assertions)]
{
@@ -140,17 +110,12 @@
let (input_tys, args): (Vec<_>, Vec<_>) = args
.iter()
.map(|arg| {
- (
- AbiParam::new(self.clif_type(arg.layout().ty).unwrap()),
- arg.load_scalar(self),
- )
+ (AbiParam::new(self.clif_type(arg.layout().ty).unwrap()), arg.load_scalar(self))
})
.unzip();
let return_layout = self.layout_of(return_ty);
let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
- tup.types()
- .map(|ty| AbiParam::new(self.clif_type(ty).unwrap()))
- .collect()
+ tup.types().map(|ty| AbiParam::new(self.clif_type(ty).unwrap())).collect()
} else {
vec![AbiParam::new(self.clif_type(return_ty).unwrap())]
};
@@ -169,7 +134,7 @@
/// Make a [`CPlace`] capable of holding value of the specified type.
fn make_local_place<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
local: Local,
layout: TyAndLayout<'tcx>,
is_ssa: bool,
@@ -190,10 +155,7 @@
place
}
-pub(crate) fn codegen_fn_prelude<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- start_block: Block,
-) {
+pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_block: Block) {
fx.bcx.append_block_params_for_function_params(start_block);
fx.bcx.switch_to_block(start_block);
@@ -204,13 +166,7 @@
#[cfg(debug_assertions)]
self::comments::add_args_header_comment(fx);
- let mut block_params_iter = fx
- .bcx
- .func
- .dfg
- .block_params(start_block)
- .to_vec()
- .into_iter();
+ let mut block_params_iter = fx.bcx.func.dfg.block_params(start_block).to_vec().into_iter();
let ret_place =
self::returning::codegen_return_param(fx, &ssa_analyzed, &mut block_params_iter);
assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
@@ -286,10 +242,10 @@
if let Some((addr, meta)) = val.try_to_ptr() {
let local_decl = &fx.mir.local_decls[local];
// v this ! is important
- let internally_mutable = !val.layout().ty.is_freeze(
- fx.tcx.at(local_decl.source_info.span),
- ParamEnv::reveal_all(),
- );
+ let internally_mutable = !val
+ .layout()
+ .ty
+ .is_freeze(fx.tcx.at(local_decl.source_info.span), ParamEnv::reveal_all());
if local_decl.mutability == mir::Mutability::Not && !internally_mutable {
// We wont mutate this argument, so it is fine to borrow the backing storage
// of this argument, to prevent a copy.
@@ -321,9 +277,7 @@
ArgKind::Spread(params) => {
for (i, param) in params.into_iter().enumerate() {
if let Some(param) = param {
- place
- .place_field(fx, mir::Field::new(i))
- .write_cvalue(fx, param);
+ place.place_field(fx, mir::Field::new(i)).write_cvalue(fx, param);
}
}
}
@@ -340,13 +294,11 @@
assert_eq!(fx.local_map.push(place), local);
}
- fx.bcx
- .ins()
- .jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
+ fx.bcx.ins().jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
}
pub(crate) fn codegen_terminator_call<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
span: Span,
current_block: Block,
func: &Operand<'tcx>,
@@ -354,9 +306,8 @@
destination: Option<(Place<'tcx>, BasicBlock)>,
) {
let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
- let fn_sig = fx
- .tcx
- .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
+ let fn_sig =
+ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
let destination = destination.map(|(place, bb)| (codegen_place(fx, place), bb));
@@ -404,20 +355,11 @@
let fn_abi = if let Some(instance) = instance {
FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), instance, &extra_args)
} else {
- FnAbi::of_fn_ptr(
- &RevealAllLayoutCx(fx.tcx),
- fn_ty.fn_sig(fx.tcx),
- &extra_args,
- )
+ FnAbi::of_fn_ptr(&RevealAllLayoutCx(fx.tcx), fn_ty.fn_sig(fx.tcx), &extra_args)
};
let is_cold = instance
- .map(|inst| {
- fx.tcx
- .codegen_fn_attrs(inst.def_id())
- .flags
- .contains(CodegenFnAttrFlags::COLD)
- })
+ .map(|inst| fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD))
.unwrap_or(false);
if is_cold {
fx.cold_blocks.insert(current_block);
@@ -441,9 +383,7 @@
}
args
} else {
- args.iter()
- .map(|arg| codegen_operand(fx, arg))
- .collect::<Vec<_>>()
+ args.iter().map(|arg| codegen_operand(fx, arg)).collect::<Vec<_>>()
};
// | indirect call target
@@ -451,10 +391,7 @@
// v v
let (func_ref, first_arg) = match instance {
// Trait object call
- Some(Instance {
- def: InstanceDef::Virtual(_, idx),
- ..
- }) => {
+ Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => {
#[cfg(debug_assertions)]
{
let nop_inst = fx.bcx.ins().nop();
@@ -511,10 +448,7 @@
)
.collect::<Vec<_>>();
- if instance
- .map(|inst| inst.def.requires_caller_location(fx.tcx))
- .unwrap_or(false)
- {
+ if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
// Pass the caller location for `#[track_caller]`.
let caller_location = fx.get_caller_location(span);
call_args.extend(
@@ -542,7 +476,7 @@
// FIXME find a cleaner way to support varargs
if fn_sig.c_variadic {
- if fn_sig.abi != Abi::C {
+ if !matches!(fn_sig.abi, Abi::C { .. }) {
fx.tcx.sess.span_fatal(
span,
&format!("Variadic call for non-C abi {:?}", fn_sig.abi),
@@ -555,9 +489,7 @@
let ty = fx.bcx.func.dfg.value_type(arg);
if !ty.is_int() {
// FIXME set %al to upperbound on float args once floats are supported
- fx.tcx
- .sess
- .span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
+ fx.tcx.sess.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
}
AbiParam::new(ty)
})
@@ -574,7 +506,7 @@
}
pub(crate) fn codegen_drop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
span: Span,
drop_place: CPlace<'tcx>,
) {
@@ -611,10 +543,7 @@
fx,
fx.layout_of(fx.tcx.mk_ref(
&ty::RegionKind::ReErased,
- TypeAndMut {
- ty,
- mutbl: crate::rustc_hir::Mutability::Mut,
- },
+ TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut },
)),
);
let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0]);
diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
index 1202c23..d58f952 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
@@ -71,12 +71,7 @@
.prefix
.iter()
.flatten()
- .map(|&kind| {
- reg_to_abi_param(Reg {
- kind,
- size: cast.prefix_chunk_size,
- })
- })
+ .map(|&kind| reg_to_abi_param(Reg { kind, size: cast.prefix_chunk_size }))
.chain((0..rest_count).map(|_| reg_to_abi_param(cast.rest.unit)))
.collect::<SmallVec<_>>();
@@ -98,12 +93,10 @@
match self.mode {
PassMode::Ignore => smallvec![],
PassMode::Direct(attrs) => match &self.layout.abi {
- Abi::Scalar(scalar) => {
- smallvec![apply_arg_attrs_to_abi_param(
- AbiParam::new(scalar_to_clif_type(tcx, scalar.clone())),
- attrs
- )]
- }
+ Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
+ AbiParam::new(scalar_to_clif_type(tcx, scalar.clone())),
+ attrs
+ )],
Abi::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
smallvec![AbiParam::new(vector_ty)]
@@ -122,11 +115,7 @@
_ => unreachable!("{:?}", self.layout.abi),
},
PassMode::Cast(cast) => cast_target_to_abi_params(cast),
- PassMode::Indirect {
- attrs,
- extra_attrs: None,
- on_stack,
- } => {
+ PassMode::Indirect { attrs, extra_attrs: None, on_stack } => {
if on_stack {
let size = u32::try_from(self.layout.size.bytes()).unwrap();
smallvec![apply_arg_attrs_to_abi_param(
@@ -134,17 +123,10 @@
attrs
)]
} else {
- smallvec![apply_arg_attrs_to_abi_param(
- AbiParam::new(pointer_ty(tcx)),
- attrs
- )]
+ smallvec![apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs)]
}
}
- PassMode::Indirect {
- attrs,
- extra_attrs: Some(extra_attrs),
- on_stack,
- } => {
+ PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
assert!(!on_stack);
smallvec![
apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs),
@@ -158,10 +140,9 @@
match self.mode {
PassMode::Ignore => (None, vec![]),
PassMode::Direct(_) => match &self.layout.abi {
- Abi::Scalar(scalar) => (
- None,
- vec![AbiParam::new(scalar_to_clif_type(tcx, scalar.clone()))],
- ),
+ Abi::Scalar(scalar) => {
+ (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar.clone()))])
+ }
Abi::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
(None, vec![AbiParam::new(vector_ty)])
@@ -177,31 +158,19 @@
_ => unreachable!("{:?}", self.layout.abi),
},
PassMode::Cast(cast) => (None, cast_target_to_abi_params(cast).into_iter().collect()),
- PassMode::Indirect {
- attrs: _,
- extra_attrs: None,
- on_stack,
- } => {
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack } => {
assert!(!on_stack);
- (
- Some(AbiParam::special(
- pointer_ty(tcx),
- ArgumentPurpose::StructReturn,
- )),
- vec![],
- )
+ (Some(AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructReturn)), vec![])
}
- PassMode::Indirect {
- attrs: _,
- extra_attrs: Some(_),
- on_stack: _,
- } => unreachable!("unsized return value"),
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
}
}
}
pub(super) fn to_casted_value<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
arg: CValue<'tcx>,
cast: CastTarget,
) -> SmallVec<[Value; 2]> {
@@ -211,9 +180,7 @@
cast_target_to_abi_params(cast)
.into_iter()
.map(|param| {
- let val = ptr
- .offset_i64(fx, offset)
- .load(fx, param.value_type, MemFlags::new());
+ let val = ptr.offset_i64(fx, offset).load(fx, param.value_type, MemFlags::new());
offset += i64::from(param.value_type.bytes());
val
})
@@ -221,16 +188,13 @@
}
pub(super) fn from_casted_value<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
block_params: &[Value],
layout: TyAndLayout<'tcx>,
cast: CastTarget,
) -> CValue<'tcx> {
let abi_params = cast_target_to_abi_params(cast);
- let abi_param_size: u32 = abi_params
- .iter()
- .map(|param| param.value_type.bytes())
- .sum();
+ let abi_param_size: u32 = abi_params.iter().map(|param| param.value_type.bytes()).sum();
let layout_size = u32::try_from(layout.size.bytes()).unwrap();
let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
@@ -260,7 +224,7 @@
/// Get a set of values to be passed as function arguments.
pub(super) fn adjust_arg_for_abi<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
arg: CValue<'tcx>,
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
) -> SmallVec<[Value; 2]> {
@@ -283,7 +247,7 @@
/// Create a [`CValue`] containing the value of a function parameter adding clif function parameters
/// as necessary.
pub(super) fn cvalue_for_param<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
#[cfg_attr(not(debug_assertions), allow(unused_variables))] local: Option<mir::Local>,
#[cfg_attr(not(debug_assertions), allow(unused_variables))] local_field: Option<usize>,
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
@@ -294,10 +258,7 @@
.into_iter()
.map(|abi_param| {
let block_param = block_params_iter.next().unwrap();
- assert_eq!(
- fx.bcx.func.dfg.value_type(block_param),
- abi_param.value_type
- );
+ assert_eq!(fx.bcx.func.dfg.value_type(block_param), abi_param.value_type);
block_param
})
.collect::<SmallVec<[_; 2]>>();
@@ -321,29 +282,14 @@
}
PassMode::Pair(_, _) => {
assert_eq!(block_params.len(), 2, "{:?}", block_params);
- Some(CValue::by_val_pair(
- block_params[0],
- block_params[1],
- arg_abi.layout,
- ))
+ Some(CValue::by_val_pair(block_params[0], block_params[1], arg_abi.layout))
}
PassMode::Cast(cast) => Some(from_casted_value(fx, &block_params, arg_abi.layout, cast)),
- PassMode::Indirect {
- attrs: _,
- extra_attrs: None,
- on_stack: _,
- } => {
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
assert_eq!(block_params.len(), 1, "{:?}", block_params);
- Some(CValue::by_ref(
- Pointer::new(block_params[0]),
- arg_abi.layout,
- ))
+ Some(CValue::by_ref(Pointer::new(block_params[0]), arg_abi.layout))
}
- PassMode::Indirect {
- attrs: _,
- extra_attrs: Some(_),
- on_stack: _,
- } => {
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
assert_eq!(block_params.len(), 2, "{:?}", block_params);
Some(CValue::by_ref_unsized(
Pointer::new(block_params[0]),
diff --git a/compiler/rustc_codegen_cranelift/src/abi/returning.rs b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
index a382963..9fa066d 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/returning.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
@@ -8,14 +8,13 @@
/// Can the given type be returned into an ssa var or does it need to be returned on the stack.
pub(crate) fn can_return_to_ssa_var<'tcx>(
- fx: &FunctionCx<'_, 'tcx, impl Module>,
+ fx: &FunctionCx<'_, '_, 'tcx>,
func: &mir::Operand<'tcx>,
args: &[mir::Operand<'tcx>],
) -> bool {
let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
- let fn_sig = fx
- .tcx
- .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
+ let fn_sig =
+ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
// Handle special calls like instrinsics and empty drop glue.
let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
@@ -42,11 +41,7 @@
let fn_abi = if let Some(instance) = instance {
FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), instance, &extra_args)
} else {
- FnAbi::of_fn_ptr(
- &RevealAllLayoutCx(fx.tcx),
- fn_ty.fn_sig(fx.tcx),
- &extra_args,
- )
+ FnAbi::of_fn_ptr(&RevealAllLayoutCx(fx.tcx), fn_ty.fn_sig(fx.tcx), &extra_args)
};
match fn_abi.ret.mode {
PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => true,
@@ -58,15 +53,12 @@
/// Return a place where the return value of the current function can be written to. If necessary
/// this adds an extra parameter pointing to where the return value needs to be stored.
pub(super) fn codegen_return_param<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
ssa_analyzed: &rustc_index::vec::IndexVec<Local, crate::analyze::SsaKind>,
block_params_iter: &mut impl Iterator<Item = Value>,
) -> CPlace<'tcx> {
let (ret_place, ret_param): (_, SmallVec<[_; 2]>) = match fx.fn_abi.as_ref().unwrap().ret.mode {
- PassMode::Ignore => (
- CPlace::no_place(fx.fn_abi.as_ref().unwrap().ret.layout),
- smallvec![],
- ),
+ PassMode::Ignore => (CPlace::no_place(fx.fn_abi.as_ref().unwrap().ret.layout), smallvec![]),
PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => {
let is_ssa = ssa_analyzed[RETURN_PLACE] == crate::analyze::SsaKind::Ssa;
(
@@ -79,26 +71,17 @@
smallvec![],
)
}
- PassMode::Indirect {
- attrs: _,
- extra_attrs: None,
- on_stack: _,
- } => {
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
let ret_param = block_params_iter.next().unwrap();
assert_eq!(fx.bcx.func.dfg.value_type(ret_param), pointer_ty(fx.tcx));
(
- CPlace::for_ptr(
- Pointer::new(ret_param),
- fx.fn_abi.as_ref().unwrap().ret.layout,
- ),
+ CPlace::for_ptr(Pointer::new(ret_param), fx.fn_abi.as_ref().unwrap().ret.layout),
smallvec![ret_param],
)
}
- PassMode::Indirect {
- attrs: _,
- extra_attrs: Some(_),
- on_stack: _,
- } => unreachable!("unsized return value"),
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
};
#[cfg(not(debug_assertions))]
@@ -120,27 +103,21 @@
/// Invokes the closure with if necessary a value representing the return pointer. When the closure
/// returns the call return value(s) if any are written to the correct place.
-pub(super) fn codegen_with_call_return_arg<'tcx, M: Module, T>(
- fx: &mut FunctionCx<'_, 'tcx, M>,
+pub(super) fn codegen_with_call_return_arg<'tcx, T>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
ret_arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
ret_place: Option<CPlace<'tcx>>,
- f: impl FnOnce(&mut FunctionCx<'_, 'tcx, M>, Option<Value>) -> (Inst, T),
+ f: impl FnOnce(&mut FunctionCx<'_, '_, 'tcx>, Option<Value>) -> (Inst, T),
) -> (Inst, T) {
let return_ptr = match ret_arg_abi.mode {
PassMode::Ignore => None,
- PassMode::Indirect {
- attrs: _,
- extra_attrs: None,
- on_stack: _,
- } => match ret_place {
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => match ret_place {
Some(ret_place) => Some(ret_place.to_ptr().get_addr(fx)),
None => Some(fx.bcx.ins().iconst(fx.pointer_type, 43)), // FIXME allocate temp stack slot
},
- PassMode::Indirect {
- attrs: _,
- extra_attrs: Some(_),
- on_stack: _,
- } => unreachable!("unsized return value"),
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => None,
};
@@ -177,37 +154,24 @@
ret_place.write_cvalue(fx, result);
}
}
- PassMode::Indirect {
- attrs: _,
- extra_attrs: None,
- on_stack: _,
- } => {}
- PassMode::Indirect {
- attrs: _,
- extra_attrs: Some(_),
- on_stack: _,
- } => unreachable!("unsized return value"),
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {}
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
}
(call_inst, meta)
}
/// Codegen a return instruction with the right return value(s) if any.
-pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, impl Module>) {
+pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, '_>) {
match fx.fn_abi.as_ref().unwrap().ret.mode {
- PassMode::Ignore
- | PassMode::Indirect {
- attrs: _,
- extra_attrs: None,
- on_stack: _,
- } => {
+ PassMode::Ignore | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
fx.bcx.ins().return_(&[]);
}
- PassMode::Indirect {
- attrs: _,
- extra_attrs: Some(_),
- on_stack: _,
- } => unreachable!("unsized return value"),
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
PassMode::Direct(_) => {
let place = fx.get_local_place(RETURN_PLACE);
let ret_val = place.to_cvalue(fx).load_scalar(fx);
diff --git a/compiler/rustc_codegen_cranelift/src/allocator.rs b/compiler/rustc_codegen_cranelift/src/allocator.rs
index 6c59165..efb6423 100644
--- a/compiler/rustc_codegen_cranelift/src/allocator.rs
+++ b/compiler/rustc_codegen_cranelift/src/allocator.rs
@@ -66,13 +66,9 @@
let callee_name = kind.fn_name(method.name);
//eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
- let func_id = module
- .declare_function(&caller_name, Linkage::Export, &sig)
- .unwrap();
+ let func_id = module.declare_function(&caller_name, Linkage::Export, &sig).unwrap();
- let callee_func_id = module
- .declare_function(&callee_name, Linkage::Import, &sig)
- .unwrap();
+ let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
let mut ctx = Context::new();
ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone());
@@ -96,11 +92,7 @@
bcx.finalize();
}
module
- .define_function(
- func_id,
- &mut ctx,
- &mut cranelift_codegen::binemit::NullTrapSink {},
- )
+ .define_function(func_id, &mut ctx, &mut cranelift_codegen::binemit::NullTrapSink {})
.unwrap();
unwind_context.add_function(func_id, &ctx, module.isa());
}
@@ -114,13 +106,10 @@
let callee_name = kind.fn_name(sym::oom);
//eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
- let func_id = module
- .declare_function("__rust_alloc_error_handler", Linkage::Export, &sig)
- .unwrap();
+ let func_id =
+ module.declare_function("__rust_alloc_error_handler", Linkage::Export, &sig).unwrap();
- let callee_func_id = module
- .declare_function(&callee_name, Linkage::Import, &sig)
- .unwrap();
+ let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
let mut ctx = Context::new();
ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
@@ -143,11 +132,7 @@
bcx.finalize();
}
module
- .define_function(
- func_id,
- &mut ctx,
- &mut cranelift_codegen::binemit::NullTrapSink {},
- )
+ .define_function(func_id, &mut ctx, &mut cranelift_codegen::binemit::NullTrapSink {})
.unwrap();
unwind_context.add_function(func_id, &ctx, module.isa());
}
diff --git a/compiler/rustc_codegen_cranelift/src/analyze.rs b/compiler/rustc_codegen_cranelift/src/analyze.rs
index 62fbcfe..efead25 100644
--- a/compiler/rustc_codegen_cranelift/src/analyze.rs
+++ b/compiler/rustc_codegen_cranelift/src/analyze.rs
@@ -11,7 +11,7 @@
Ssa,
}
-pub(crate) fn analyze(fx: &FunctionCx<'_, '_, impl Module>) -> IndexVec<Local, SsaKind> {
+pub(crate) fn analyze(fx: &FunctionCx<'_, '_, '_>) -> IndexVec<Local, SsaKind> {
let mut flag_map = fx
.mir
.local_decls
@@ -40,12 +40,7 @@
}
match &bb.terminator().kind {
- TerminatorKind::Call {
- destination,
- func,
- args,
- ..
- } => {
+ TerminatorKind::Call { destination, func, args, .. } => {
if let Some((dest_place, _dest_bb)) = destination {
if !crate::abi::can_return_to_ssa_var(fx, func, args) {
not_ssa(&mut flag_map, dest_place.local)
diff --git a/compiler/rustc_codegen_cranelift/src/archive.rs b/compiler/rustc_codegen_cranelift/src/archive.rs
index 9657905..7583fc4 100644
--- a/compiler/rustc_codegen_cranelift/src/archive.rs
+++ b/compiler/rustc_codegen_cranelift/src/archive.rs
@@ -12,10 +12,7 @@
#[derive(Debug)]
enum ArchiveEntry {
- FromArchive {
- archive_index: usize,
- entry_index: usize,
- },
+ FromArchive { archive_index: usize, entry_index: usize },
File(PathBuf),
}
@@ -30,7 +27,6 @@
// Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
// the end of an archive for linkers to not get confused.
entries: Vec<(String, ArchiveEntry)>,
- update_symbols: bool,
}
impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
@@ -46,10 +42,7 @@
let entry = entry.unwrap();
entries.push((
String::from_utf8(entry.header().identifier().to_vec()).unwrap(),
- ArchiveEntry::FromArchive {
- archive_index: 0,
- entry_index: i,
- },
+ ArchiveEntry::FromArchive { archive_index: 0, entry_index: i },
));
i += 1;
}
@@ -69,7 +62,6 @@
src_archives,
entries,
- update_symbols: false,
}
}
@@ -95,14 +87,9 @@
fn add_native_library(&mut self, name: rustc_span::symbol::Symbol) {
let location = find_library(name, &self.lib_search_paths, self.sess);
- self.add_archive(location.clone(), |_| false)
- .unwrap_or_else(|e| {
- panic!(
- "failed to add native library {}: {}",
- location.to_string_lossy(),
- e
- );
- });
+ self.add_archive(location.clone(), |_| false).unwrap_or_else(|e| {
+ panic!("failed to add native library {}: {}", location.to_string_lossy(), e);
+ });
}
fn add_rlib(
@@ -136,9 +123,7 @@
})
}
- fn update_symbols(&mut self) {
- self.update_symbols = true;
- }
+ fn update_symbols(&mut self) {}
fn build(mut self) {
enum BuilderKind {
@@ -156,10 +141,7 @@
// FIXME only read the symbol table of the object files to avoid having to keep all
// object files in memory at once, or read them twice.
let data = match entry {
- ArchiveEntry::FromArchive {
- archive_index,
- entry_index,
- } => {
+ ArchiveEntry::FromArchive { archive_index, entry_index } => {
// FIXME read symbols from symtab
use std::io::Read;
let (ref _src_archive_path, ref mut src_archive) =
@@ -225,10 +207,7 @@
err
));
}),
- entries
- .iter()
- .map(|(name, _)| name.as_bytes().to_vec())
- .collect(),
+ entries.iter().map(|(name, _)| name.as_bytes().to_vec()).collect(),
ar::GnuSymbolTableFormat::Size32,
symbol_table,
)
@@ -271,8 +250,7 @@
.expect("Couldn't run ranlib");
if !status.success() {
- self.sess
- .fatal(&format!("Ranlib exited with code {:?}", status.code()));
+ self.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
}
}
}
@@ -292,13 +270,8 @@
let file_name = String::from_utf8(entry.header().identifier().to_vec())
.map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?;
if !skip(&file_name) {
- self.entries.push((
- file_name,
- ArchiveEntry::FromArchive {
- archive_index,
- entry_index: i,
- },
- ));
+ self.entries
+ .push((file_name, ArchiveEntry::FromArchive { archive_index, entry_index: i }));
}
i += 1;
}
diff --git a/compiler/rustc_codegen_cranelift/src/atomic_shim.rs b/compiler/rustc_codegen_cranelift/src/atomic_shim.rs
deleted file mode 100644
index 674e6d9..0000000
--- a/compiler/rustc_codegen_cranelift/src/atomic_shim.rs
+++ /dev/null
@@ -1,185 +0,0 @@
-//! Atomic intrinsics are implemented using a global lock for now, as Cranelift doesn't support
-//! atomic operations yet.
-
-// FIXME implement atomic instructions in Cranelift.
-
-use crate::prelude::*;
-
-#[cfg(all(feature = "jit", unix))]
-#[no_mangle]
-static mut __cg_clif_global_atomic_mutex: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER;
-
-pub(crate) fn init_global_lock(
- module: &mut impl Module,
- bcx: &mut FunctionBuilder<'_>,
- use_jit: bool,
-) {
- if use_jit {
- // When using JIT, dylibs won't find the __cg_clif_global_atomic_mutex data object defined here,
- // so instead we define it in the cg_clif dylib.
-
- return;
- }
-
- let mut data_ctx = DataContext::new();
- data_ctx.define_zeroinit(1024); // 1024 bytes should be big enough on all platforms.
- data_ctx.set_align(16);
- let atomic_mutex = module
- .declare_data(
- "__cg_clif_global_atomic_mutex",
- Linkage::Export,
- true,
- false,
- )
- .unwrap();
- module.define_data(atomic_mutex, &data_ctx).unwrap();
-
- let pthread_mutex_init = module
- .declare_function(
- "pthread_mutex_init",
- Linkage::Import,
- &cranelift_codegen::ir::Signature {
- call_conv: module.target_config().default_call_conv,
- params: vec![
- AbiParam::new(
- module.target_config().pointer_type(), /* *mut pthread_mutex_t */
- ),
- AbiParam::new(
- module.target_config().pointer_type(), /* *const pthread_mutex_attr_t */
- ),
- ],
- returns: vec![AbiParam::new(types::I32 /* c_int */)],
- },
- )
- .unwrap();
-
- let pthread_mutex_init = module.declare_func_in_func(pthread_mutex_init, bcx.func);
-
- let atomic_mutex = module.declare_data_in_func(atomic_mutex, bcx.func);
- let atomic_mutex = bcx
- .ins()
- .global_value(module.target_config().pointer_type(), atomic_mutex);
-
- let nullptr = bcx.ins().iconst(module.target_config().pointer_type(), 0);
-
- bcx.ins().call(pthread_mutex_init, &[atomic_mutex, nullptr]);
-}
-
-pub(crate) fn init_global_lock_constructor(
- module: &mut impl Module,
- constructor_name: &str,
-) -> FuncId {
- let sig = Signature::new(CallConv::SystemV);
- let init_func_id = module
- .declare_function(constructor_name, Linkage::Export, &sig)
- .unwrap();
-
- let mut ctx = Context::new();
- ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
- {
- let mut func_ctx = FunctionBuilderContext::new();
- let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
-
- let block = bcx.create_block();
- bcx.switch_to_block(block);
-
- crate::atomic_shim::init_global_lock(module, &mut bcx, false);
-
- bcx.ins().return_(&[]);
- bcx.seal_all_blocks();
- bcx.finalize();
- }
- module
- .define_function(
- init_func_id,
- &mut ctx,
- &mut cranelift_codegen::binemit::NullTrapSink {},
- )
- .unwrap();
-
- init_func_id
-}
-
-pub(crate) fn lock_global_lock(fx: &mut FunctionCx<'_, '_, impl Module>) {
- let atomic_mutex = fx
- .cx
- .module
- .declare_data(
- "__cg_clif_global_atomic_mutex",
- Linkage::Import,
- true,
- false,
- )
- .unwrap();
-
- let pthread_mutex_lock = fx
- .cx
- .module
- .declare_function(
- "pthread_mutex_lock",
- Linkage::Import,
- &cranelift_codegen::ir::Signature {
- call_conv: fx.cx.module.target_config().default_call_conv,
- params: vec![AbiParam::new(
- fx.cx.module.target_config().pointer_type(), /* *mut pthread_mutex_t */
- )],
- returns: vec![AbiParam::new(types::I32 /* c_int */)],
- },
- )
- .unwrap();
-
- let pthread_mutex_lock = fx
- .cx
- .module
- .declare_func_in_func(pthread_mutex_lock, fx.bcx.func);
-
- let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
- let atomic_mutex = fx
- .bcx
- .ins()
- .global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
-
- fx.bcx.ins().call(pthread_mutex_lock, &[atomic_mutex]);
-}
-
-pub(crate) fn unlock_global_lock(fx: &mut FunctionCx<'_, '_, impl Module>) {
- let atomic_mutex = fx
- .cx
- .module
- .declare_data(
- "__cg_clif_global_atomic_mutex",
- Linkage::Import,
- true,
- false,
- )
- .unwrap();
-
- let pthread_mutex_unlock = fx
- .cx
- .module
- .declare_function(
- "pthread_mutex_unlock",
- Linkage::Import,
- &cranelift_codegen::ir::Signature {
- call_conv: fx.cx.module.target_config().default_call_conv,
- params: vec![AbiParam::new(
- fx.cx.module.target_config().pointer_type(), /* *mut pthread_mutex_t */
- )],
- returns: vec![AbiParam::new(types::I32 /* c_int */)],
- },
- )
- .unwrap();
-
- let pthread_mutex_unlock = fx
- .cx
- .module
- .declare_func_in_func(pthread_mutex_unlock, fx.bcx.func);
-
- let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
- let atomic_mutex = fx
- .bcx
- .ins()
- .global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
-
- fx.bcx.ins().call(pthread_mutex_unlock, &[atomic_mutex]);
-}
diff --git a/compiler/rustc_codegen_cranelift/src/backend.rs b/compiler/rustc_codegen_cranelift/src/backend.rs
index 0ce34c9..eb7927f 100644
--- a/compiler/rustc_codegen_cranelift/src/backend.rs
+++ b/compiler/rustc_codegen_cranelift/src/backend.rs
@@ -8,7 +8,7 @@
use cranelift_module::FuncId;
use object::write::*;
-use object::{RelocationEncoding, RelocationKind, SectionKind, SymbolFlags};
+use object::{RelocationEncoding, SectionKind, SymbolFlags};
use cranelift_object::{ObjectBuilder, ObjectModule, ObjectProduct};
@@ -22,9 +22,7 @@
impl WriteMetadata for object::write::Object {
fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, _is_like_osx: bool) {
- let segment = self
- .segment_name(object::write::StandardSegment::Data)
- .to_vec();
+ let segment = self.segment_name(object::write::StandardSegment::Data).to_vec();
let section_id = self.add_section(segment, b".rustc".to_vec(), object::SectionKind::Data);
let offset = self.append_section_data(section_id, &data, 1);
// For MachO and probably PE this is necessary to prevent the linker from throwing away the
@@ -74,11 +72,7 @@
let section_id = self.object.add_section(
segment,
name,
- if id == SectionId::EhFrame {
- SectionKind::ReadOnlyData
- } else {
- SectionKind::Debug
- },
+ if id == SectionId::EhFrame { SectionKind::ReadOnlyData } else { SectionKind::Debug },
);
self.object
.section_mut(section_id)
@@ -118,49 +112,6 @@
}
}
-// FIXME remove once atomic instructions are implemented in Cranelift.
-pub(crate) trait AddConstructor {
- fn add_constructor(&mut self, func_id: FuncId);
-}
-
-impl AddConstructor for ObjectProduct {
- fn add_constructor(&mut self, func_id: FuncId) {
- let symbol = self.function_symbol(func_id);
- let segment = self
- .object
- .segment_name(object::write::StandardSegment::Data);
- let init_array_section =
- self.object
- .add_section(segment.to_vec(), b".init_array".to_vec(), SectionKind::Data);
- let address_size = self
- .object
- .architecture()
- .address_size()
- .expect("address_size must be known")
- .bytes();
- self.object.append_section_data(
- init_array_section,
- &std::iter::repeat(0)
- .take(address_size.into())
- .collect::<Vec<u8>>(),
- 8,
- );
- self.object
- .add_relocation(
- init_array_section,
- object::write::Relocation {
- offset: 0,
- size: address_size * 8,
- kind: RelocationKind::Absolute,
- encoding: RelocationEncoding::Generic,
- symbol,
- addend: 0,
- },
- )
- .unwrap();
- }
-}
-
pub(crate) fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Object)) -> Vec<u8> {
let triple = crate::build_isa(sess).triple().clone();
@@ -175,10 +126,9 @@
target_lexicon::Architecture::X86_64 => object::Architecture::X86_64,
target_lexicon::Architecture::Arm(_) => object::Architecture::Arm,
target_lexicon::Architecture::Aarch64(_) => object::Architecture::Aarch64,
- architecture => sess.fatal(&format!(
- "target architecture {:?} is unsupported",
- architecture,
- )),
+ architecture => {
+ sess.fatal(&format!("target architecture {:?} is unsupported", architecture,))
+ }
};
let endian = match triple.endianness().unwrap() {
target_lexicon::Endianness::Little => object::Endianness::Little,
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index 4842628..8b5ae9e 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -8,7 +8,7 @@
use crate::prelude::*;
pub(crate) fn codegen_fn<'tcx>(
- cx: &mut crate::CodegenCx<'tcx, impl Module>,
+ cx: &mut crate::CodegenCx<'_, 'tcx>,
instance: Instance<'tcx>,
linkage: Linkage,
) {
@@ -38,9 +38,8 @@
// Predefine blocks
let start_block = bcx.create_block();
- let block_map: IndexVec<BasicBlock, Block> = (0..mir.basic_blocks().len())
- .map(|_| bcx.create_block())
- .collect();
+ let block_map: IndexVec<BasicBlock, Block> =
+ (0..mir.basic_blocks().len()).map(|_| bcx.create_block()).collect();
// Make FunctionCx
let pointer_type = cx.module.target_config().pointer_type();
@@ -68,22 +67,23 @@
inline_asm_index: 0,
};
- let arg_uninhabited = fx.mir.args_iter().any(|arg| {
- fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty))
- .abi
- .is_uninhabited()
- });
+ let arg_uninhabited = fx
+ .mir
+ .args_iter()
+ .any(|arg| fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
- if arg_uninhabited {
- fx.bcx
- .append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ if !crate::constant::check_constants(&mut fx) {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ crate::trap::trap_unreachable(&mut fx, "compilation should have been aborted");
+ } else if arg_uninhabited {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument");
} else {
tcx.sess.time("codegen clif ir", || {
- tcx.sess.time("codegen prelude", || {
- crate::abi::codegen_fn_prelude(&mut fx, start_block)
- });
+ tcx.sess
+ .time("codegen prelude", || crate::abi::codegen_fn_prelude(&mut fx, start_block));
codegen_fn_content(&mut fx);
});
}
@@ -131,11 +131,7 @@
let module = &mut cx.module;
tcx.sess.time("define function", || {
module
- .define_function(
- func_id,
- context,
- &mut cranelift_codegen::binemit::NullTrapSink {},
- )
+ .define_function(func_id, context, &mut cranelift_codegen::binemit::NullTrapSink {})
.unwrap()
});
@@ -149,14 +145,12 @@
&clif_comments,
);
- if let Some(mach_compile_result) = &context.mach_compile_result {
- if let Some(disasm) = &mach_compile_result.disasm {
- crate::pretty_clif::write_ir_file(
- tcx,
- &format!("{}.vcode", tcx.symbol_name(instance).name),
- |file| file.write_all(disasm.as_bytes()),
- )
- }
+ if let Some(disasm) = &context.mach_compile_result.as_ref().unwrap().disasm {
+ crate::pretty_clif::write_ir_file(
+ tcx,
+ &format!("{}.vcode", tcx.symbol_name(instance).name),
+ |file| file.write_all(disasm.as_bytes()),
+ )
}
// Define debuginfo for function
@@ -199,16 +193,13 @@
Some(Box::new(writer)),
err,
);
- tcx.sess
- .fatal(&format!("cranelift verify error:\n{}", pretty_error));
+ tcx.sess.fatal(&format!("cranelift verify error:\n{}", pretty_error));
}
}
});
}
-fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Module>) {
- crate::constant::check_constants(fx);
-
+fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, '_>) {
for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() {
let block = fx.get_block(bb);
fx.bcx.switch_to_block(block);
@@ -231,11 +222,7 @@
#[cfg(debug_assertions)]
{
let mut terminator_head = "\n".to_string();
- bb_data
- .terminator()
- .kind
- .fmt_head(&mut terminator_head)
- .unwrap();
+ bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
let inst = fx.bcx.func.layout.last_inst(block).unwrap();
fx.add_comment(inst, terminator_head);
}
@@ -267,13 +254,7 @@
TerminatorKind::Return => {
crate::abi::codegen_return(fx);
}
- TerminatorKind::Assert {
- cond,
- expected,
- msg,
- target,
- cleanup: _,
- } => {
+ TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
if !fx.tcx.sess.overflow_checks() {
if let mir::AssertKind::OverflowNeg(_) = *msg {
let target = fx.get_block(*target);
@@ -319,11 +300,7 @@
}
}
- TerminatorKind::SwitchInt {
- discr,
- switch_ty,
- targets,
- } => {
+ TerminatorKind::SwitchInt { discr, switch_ty, targets } => {
let discr = codegen_operand(fx, discr).load_scalar(fx);
let use_bool_opt = switch_ty.kind() == fx.tcx.types.bool.kind()
@@ -433,11 +410,7 @@
| TerminatorKind::GeneratorDrop => {
bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
}
- TerminatorKind::Drop {
- place,
- target,
- unwind: _,
- } => {
+ TerminatorKind::Drop { place, target, unwind: _ } => {
let drop_place = codegen_place(fx, *place);
crate::abi::codegen_drop(fx, bb_data.terminator().source_info.span, drop_place);
@@ -452,7 +425,7 @@
}
fn codegen_stmt<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
#[allow(unused_variables)] cur_block: Block,
stmt: &Statement<'tcx>,
) {
@@ -470,10 +443,7 @@
}
match &stmt.kind {
- StatementKind::SetDiscriminant {
- place,
- variant_index,
- } => {
+ StatementKind::SetDiscriminant { place, variant_index } => {
let place = codegen_place(fx, **place);
crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
}
@@ -494,14 +464,14 @@
let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout());
lval.write_cvalue(fx, val);
}
- Rvalue::BinaryOp(bin_op, ref lhs, ref rhs) => {
+ Rvalue::BinaryOp(bin_op, box (ref lhs, ref rhs)) => {
let lhs = codegen_operand(fx, lhs);
let rhs = codegen_operand(fx, rhs);
let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs);
lval.write_cvalue(fx, res);
}
- Rvalue::CheckedBinaryOp(bin_op, ref lhs, ref rhs) => {
+ Rvalue::CheckedBinaryOp(bin_op, box (ref lhs, ref rhs)) => {
let lhs = codegen_operand(fx, lhs);
let rhs = codegen_operand(fx, rhs);
@@ -594,19 +564,11 @@
let from_ty = operand.layout().ty;
let to_ty = fx.monomorphize(to_ty);
- fn is_fat_ptr<'tcx>(
- fx: &FunctionCx<'_, 'tcx, impl Module>,
- ty: Ty<'tcx>,
- ) -> bool {
+ fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
ty.builtin_deref(true)
- .map(
- |ty::TypeAndMut {
- ty: pointee_ty,
- mutbl: _,
- }| {
- has_ptr_meta(fx.tcx, pointee_ty)
- },
- )
+ .map(|ty::TypeAndMut { ty: pointee_ty, mutbl: _ }| {
+ has_ptr_meta(fx.tcx, pointee_ty)
+ })
.unwrap_or(false)
}
@@ -626,50 +588,22 @@
ty::Uint(_) | ty::Int(_) => {}
_ => unreachable!("cast adt {} -> {}", from_ty, to_ty),
}
+ let to_clif_ty = fx.clif_type(to_ty).unwrap();
- use rustc_target::abi::{Int, TagEncoding, Variants};
+ let discriminant = crate::discriminant::codegen_get_discriminant(
+ fx,
+ operand,
+ fx.layout_of(operand.layout().ty.discriminant_ty(fx.tcx)),
+ )
+ .load_scalar(fx);
- match operand.layout().variants {
- Variants::Single { index } => {
- let discr = operand
- .layout()
- .ty
- .discriminant_for_variant(fx.tcx, index)
- .unwrap();
- let discr = if discr.ty.is_signed() {
- fx.layout_of(discr.ty).size.sign_extend(discr.val)
- } else {
- discr.val
- };
- let discr = discr.into();
-
- let discr = CValue::const_val(fx, fx.layout_of(to_ty), discr);
- lval.write_cvalue(fx, discr);
- }
- Variants::Multiple {
- ref tag,
- tag_field,
- tag_encoding: TagEncoding::Direct,
- variants: _,
- } => {
- let cast_to = fx.clif_type(dest_layout.ty).unwrap();
-
- // Read the tag/niche-encoded discriminant from memory.
- let encoded_discr =
- operand.value_field(fx, mir::Field::new(tag_field));
- let encoded_discr = encoded_discr.load_scalar(fx);
-
- // Decode the discriminant (specifically if it's niche-encoded).
- let signed = match tag.value {
- Int(_, signed) => signed,
- _ => false,
- };
- let val = clif_intcast(fx, encoded_discr, cast_to, signed);
- let val = CValue::by_val(val, dest_layout);
- lval.write_cvalue(fx, val);
- }
- Variants::Multiple { .. } => unreachable!(),
- }
+ let res = crate::cast::clif_intcast(
+ fx,
+ discriminant,
+ to_clif_ty,
+ to_ty.is_signed(),
+ );
+ lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
} else {
let to_clif_ty = fx.clif_type(to_ty).unwrap();
let from = operand.load_scalar(fx);
@@ -730,8 +664,7 @@
// FIXME use emit_small_memset where possible
let addr = lval.to_ptr().get_addr(fx);
let val = operand.load_scalar(fx);
- fx.bcx
- .call_memset(fx.cx.module.target_config(), addr, val, times);
+ fx.bcx.call_memset(fx.cx.module.target_config(), addr, val, times);
} else {
let loop_block = fx.bcx.create_block();
let loop_block2 = fx.bcx.create_block();
@@ -766,25 +699,19 @@
let content_ty = fx.monomorphize(content_ty);
let layout = fx.layout_of(content_ty);
let llsize = fx.bcx.ins().iconst(usize_type, layout.size.bytes() as i64);
- let llalign = fx
- .bcx
- .ins()
- .iconst(usize_type, layout.align.abi.bytes() as i64);
+ let llalign = fx.bcx.ins().iconst(usize_type, layout.align.abi.bytes() as i64);
let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
// Allocate space:
- let def_id = match fx
- .tcx
- .lang_items()
- .require(rustc_hir::LangItem::ExchangeMalloc)
- {
- Ok(id) => id,
- Err(s) => {
- fx.tcx
- .sess
- .fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
- }
- };
+ let def_id =
+ match fx.tcx.lang_items().require(rustc_hir::LangItem::ExchangeMalloc) {
+ Ok(id) => id,
+ Err(s) => {
+ fx.tcx
+ .sess
+ .fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
+ }
+ };
let instance = ty::Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
let func_ref = fx.get_function_ref(instance);
let call = fx.bcx.ins().call(func_ref, &[llsize, llalign]);
@@ -792,10 +719,11 @@
lval.write_cvalue(fx, CValue::by_val(ptr, box_layout));
}
Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
- assert!(lval
- .layout()
- .ty
- .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all()));
+ assert!(
+ lval.layout()
+ .ty
+ .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all())
+ );
let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes();
let val =
CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into());
@@ -823,11 +751,7 @@
StatementKind::LlvmInlineAsm(asm) => {
use rustc_span::symbol::Symbol;
- let LlvmInlineAsm {
- asm,
- outputs,
- inputs,
- } = &**asm;
+ let LlvmInlineAsm { asm, outputs, inputs } = &**asm;
let rustc_hir::LlvmInlineAsmInner {
asm: asm_code, // Name
outputs: output_names, // Vec<LlvmInlineAsmOutput>
@@ -843,15 +767,9 @@
// Black box
}
"mov %rbx, %rsi\n cpuid\n xchg %rbx, %rsi" => {
- assert_eq!(
- input_names,
- &[Symbol::intern("{eax}"), Symbol::intern("{ecx}")]
- );
+ assert_eq!(input_names, &[Symbol::intern("{eax}"), Symbol::intern("{ecx}")]);
assert_eq!(output_names.len(), 4);
- for (i, c) in (&["={eax}", "={esi}", "={ecx}", "={edx}"])
- .iter()
- .enumerate()
- {
+ for (i, c) in (&["={eax}", "={esi}", "={ecx}", "={edx}"]).iter().enumerate() {
assert_eq!(&output_names[i].constraint.as_str(), c);
assert!(!output_names[i].is_rw);
assert!(!output_names[i].is_indirect);
@@ -897,12 +815,7 @@
crate::trap::trap_unimplemented(fx, "_xgetbv arch intrinsic is not supported");
}
// ___chkstk, ___chkstk_ms and __alloca are only used on Windows
- _ if fx
- .tcx
- .symbol_name(fx.instance)
- .name
- .starts_with("___chkstk") =>
- {
+ _ if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") => {
crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
}
_ if fx.tcx.symbol_name(fx.instance).name == "__alloca" => {
@@ -919,30 +832,45 @@
}
}
StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
+ StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+ src,
+ dst,
+ count,
+ }) => {
+ let dst = codegen_operand(fx, dst);
+ let pointee = dst
+ .layout()
+ .pointee_info_at(fx, rustc_target::abi::Size::ZERO)
+ .expect("Expected pointer");
+ let dst = dst.load_scalar(fx);
+ let src = codegen_operand(fx, src).load_scalar(fx);
+ let count = codegen_operand(fx, count).load_scalar(fx);
+ let elem_size: u64 = pointee.size.bytes();
+ let bytes = if elem_size != 1 {
+ fx.bcx.ins().imul_imm(count, elem_size as i64)
+ } else {
+ count
+ };
+ fx.bcx.call_memcpy(fx.cx.module.target_config(), dst, src, bytes);
+ }
}
}
-fn codegen_array_len<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- place: CPlace<'tcx>,
-) -> Value {
+fn codegen_array_len<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, place: CPlace<'tcx>) -> Value {
match *place.layout().ty.kind() {
ty::Array(_elem_ty, len) => {
- let len = fx
- .monomorphize(len)
- .eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
+ let len = fx.monomorphize(len).eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
fx.bcx.ins().iconst(fx.pointer_type, len)
}
- ty::Slice(_elem_ty) => place
- .to_ptr_maybe_unsized()
- .1
- .expect("Length metadata for slice place"),
+ ty::Slice(_elem_ty) => {
+ place.to_ptr_maybe_unsized().1.expect("Length metadata for slice place")
+ }
_ => bug!("Rvalue::Len({:?})", place),
}
}
pub(crate) fn codegen_place<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
place: Place<'tcx>,
) -> CPlace<'tcx> {
let mut cplace = fx.get_local_place(place.local);
@@ -959,11 +887,7 @@
let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
cplace = cplace.place_index(fx, index);
}
- PlaceElem::ConstantIndex {
- offset,
- min_length: _,
- from_end,
- } => {
+ PlaceElem::ConstantIndex { offset, min_length: _, from_end } => {
let offset: u64 = offset;
let index = if !from_end {
fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
@@ -1014,7 +938,7 @@
}
pub(crate) fn codegen_operand<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
operand: &Operand<'tcx>,
) -> CValue<'tcx> {
match operand {
@@ -1026,34 +950,24 @@
}
}
-pub(crate) fn codegen_panic<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- msg_str: &str,
- span: Span,
-) {
+pub(crate) fn codegen_panic<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, msg_str: &str, span: Span) {
let location = fx.get_caller_location(span).load_scalar(fx);
let msg_ptr = fx.anonymous_str("assert", msg_str);
- let msg_len = fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
+ let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
let args = [msg_ptr, msg_len, location];
codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, span);
}
pub(crate) fn codegen_panic_inner<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
lang_item: rustc_hir::LangItem,
args: &[Value],
span: Span,
) {
- let def_id = fx
- .tcx
- .lang_items()
- .require(lang_item)
- .unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
+ let def_id =
+ fx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
let symbol_name = fx.tcx.symbol_name(instance).name;
diff --git a/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs b/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs
index be369b0..983839d 100644
--- a/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs
+++ b/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs
@@ -27,13 +27,7 @@
config.opts.cg.panic = Some(PanicStrategy::Abort);
config.opts.debugging_opts.panic_abort_tests = true;
config.opts.maybe_sysroot = Some(config.opts.maybe_sysroot.clone().unwrap_or_else(|| {
- std::env::current_exe()
- .unwrap()
- .parent()
- .unwrap()
- .parent()
- .unwrap()
- .to_owned()
+ std::env::current_exe().unwrap().parent().unwrap().parent().unwrap().to_owned()
}));
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/bin/cg_clif_build_sysroot.rs b/compiler/rustc_codegen_cranelift/src/bin/cg_clif_build_sysroot.rs
index 83e5dc6..e7cd5ed 100644
--- a/compiler/rustc_codegen_cranelift/src/bin/cg_clif_build_sysroot.rs
+++ b/compiler/rustc_codegen_cranelift/src/bin/cg_clif_build_sysroot.rs
@@ -46,15 +46,8 @@
config.opts.cg.panic = Some(PanicStrategy::Abort);
config.opts.debugging_opts.panic_abort_tests = true;
- config.opts.maybe_sysroot = Some(
- std::env::current_exe()
- .unwrap()
- .parent()
- .unwrap()
- .parent()
- .unwrap()
- .to_owned(),
- );
+ config.opts.maybe_sysroot =
+ Some(std::env::current_exe().unwrap().parent().unwrap().parent().unwrap().to_owned());
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/cast.rs b/compiler/rustc_codegen_cranelift/src/cast.rs
index 57204de..74c5e09 100644
--- a/compiler/rustc_codegen_cranelift/src/cast.rs
+++ b/compiler/rustc_codegen_cranelift/src/cast.rs
@@ -3,7 +3,7 @@
use crate::prelude::*;
pub(crate) fn clif_intcast(
- fx: &mut FunctionCx<'_, '_, impl Module>,
+ fx: &mut FunctionCx<'_, '_, '_>,
val: Value,
to: Type,
signed: bool,
@@ -40,18 +40,14 @@
// reduce
(types::I128, _) => {
let (lsb, _msb) = fx.bcx.ins().isplit(val);
- if to == types::I64 {
- lsb
- } else {
- fx.bcx.ins().ireduce(to, lsb)
- }
+ if to == types::I64 { lsb } else { fx.bcx.ins().ireduce(to, lsb) }
}
(_, _) => fx.bcx.ins().ireduce(to, val),
}
}
pub(crate) fn clif_int_or_float_cast(
- fx: &mut FunctionCx<'_, '_, impl Module>,
+ fx: &mut FunctionCx<'_, '_, '_>,
from: Value,
from_signed: bool,
to_ty: Type,
@@ -87,11 +83,7 @@
},
);
- let from_rust_ty = if from_signed {
- fx.tcx.types.i128
- } else {
- fx.tcx.types.u128
- };
+ let from_rust_ty = if from_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
let to_rust_ty = match to_ty {
types::F32 => fx.tcx.types.f32,
@@ -100,11 +92,7 @@
};
return fx
- .easy_call(
- &name,
- &[CValue::by_val(from, fx.layout_of(from_rust_ty))],
- to_rust_ty,
- )
+ .easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
.load_scalar(fx);
}
@@ -138,18 +126,10 @@
_ => unreachable!(),
};
- let to_rust_ty = if to_signed {
- fx.tcx.types.i128
- } else {
- fx.tcx.types.u128
- };
+ let to_rust_ty = if to_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
return fx
- .easy_call(
- &name,
- &[CValue::by_val(from, fx.layout_of(from_rust_ty))],
- to_rust_ty,
- )
+ .easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
.load_scalar(fx);
}
diff --git a/compiler/rustc_codegen_cranelift/src/codegen_i128.rs b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
index 866ba90..ae75e65 100644
--- a/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
+++ b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
@@ -5,13 +5,17 @@
use crate::prelude::*;
pub(crate) fn maybe_codegen<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
checked: bool,
lhs: CValue<'tcx>,
rhs: CValue<'tcx>,
) -> Option<CValue<'tcx>> {
- if lhs.layout().ty != fx.tcx.types.u128 && lhs.layout().ty != fx.tcx.types.i128 {
+ if lhs.layout().ty != fx.tcx.types.u128
+ && lhs.layout().ty != fx.tcx.types.i128
+ && rhs.layout().ty != fx.tcx.types.u128
+ && rhs.layout().ty != fx.tcx.types.i128
+ {
return None;
}
@@ -27,11 +31,7 @@
}
BinOp::Add | BinOp::Sub if !checked => None,
BinOp::Mul if !checked => {
- let val_ty = if is_signed {
- fx.tcx.types.i128
- } else {
- fx.tcx.types.u128
- };
+ let val_ty = if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
Some(fx.easy_call("__multi3", &[lhs, rhs], val_ty))
}
BinOp::Add | BinOp::Sub | BinOp::Mul => {
@@ -43,11 +43,7 @@
AbiParam::new(types::I128),
AbiParam::new(types::I128),
];
- let args = [
- out_place.to_ptr().get_addr(fx),
- lhs.load_scalar(fx),
- rhs.load_scalar(fx),
- ];
+ let args = [out_place.to_ptr().get_addr(fx), lhs.load_scalar(fx), rhs.load_scalar(fx)];
let name = match (bin_op, is_signed) {
(BinOp::Add, false) => "__rust_u128_addo",
(BinOp::Add, true) => "__rust_i128_addo",
@@ -97,70 +93,23 @@
None
};
- // Optimize `val >> 64`, because compiler_builtins uses it to deconstruct an 128bit
- // integer into its lsb and msb.
- // https://github.com/rust-lang-nursery/compiler-builtins/blob/79a6a1603d5672cbb9187ff41ff4d9b5048ac1cb/src/int/mod.rs#L217
- if resolve_value_imm(fx.bcx.func, rhs_val) == Some(64) {
- let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs_val);
- let all_zeros = fx.bcx.ins().iconst(types::I64, 0);
- let val = match (bin_op, is_signed) {
- (BinOp::Shr, false) => {
- let val = fx.bcx.ins().iconcat(lhs_msb, all_zeros);
- Some(CValue::by_val(val, fx.layout_of(fx.tcx.types.u128)))
- }
- (BinOp::Shr, true) => {
- let sign = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, lhs_msb, 0);
- let all_ones = fx.bcx.ins().iconst(types::I64, u64::MAX as i64);
- let all_sign_bits = fx.bcx.ins().select(sign, all_zeros, all_ones);
-
- let val = fx.bcx.ins().iconcat(lhs_msb, all_sign_bits);
- Some(CValue::by_val(val, fx.layout_of(fx.tcx.types.i128)))
- }
- (BinOp::Shl, _) => {
- let val_ty = if is_signed {
- fx.tcx.types.i128
- } else {
- fx.tcx.types.u128
- };
- let val = fx.bcx.ins().iconcat(all_zeros, lhs_lsb);
- Some(CValue::by_val(val, fx.layout_of(val_ty)))
- }
- _ => None,
- };
- if let Some(val) = val {
- if let Some(is_overflow) = is_overflow {
- let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
- let val = val.load_scalar(fx);
- return Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)));
- } else {
- return Some(val);
- }
- }
- }
-
let truncated_rhs = clif_intcast(fx, rhs_val, types::I32, false);
- let truncated_rhs = CValue::by_val(truncated_rhs, fx.layout_of(fx.tcx.types.u32));
- let val = match (bin_op, is_signed) {
- (BinOp::Shl, false) => {
- fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.tcx.types.u128)
+ let val = match bin_op {
+ BinOp::Shl => fx.bcx.ins().ishl(lhs_val, truncated_rhs),
+ BinOp::Shr => {
+ if is_signed {
+ fx.bcx.ins().sshr(lhs_val, truncated_rhs)
+ } else {
+ fx.bcx.ins().ushr(lhs_val, truncated_rhs)
+ }
}
- (BinOp::Shl, true) => {
- fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.tcx.types.i128)
- }
- (BinOp::Shr, false) => {
- fx.easy_call("__lshrti3", &[lhs, truncated_rhs], fx.tcx.types.u128)
- }
- (BinOp::Shr, true) => {
- fx.easy_call("__ashrti3", &[lhs, truncated_rhs], fx.tcx.types.i128)
- }
- (_, _) => unreachable!(),
+ _ => unreachable!(),
};
if let Some(is_overflow) = is_overflow {
let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
- let val = val.load_scalar(fx);
Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)))
} else {
- Some(val)
+ Some(CValue::by_val(val, lhs.layout()))
}
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs
index fbee84e..6a4a674 100644
--- a/compiler/rustc_codegen_cranelift/src/common.rs
+++ b/compiler/rustc_codegen_cranelift/src/common.rs
@@ -3,8 +3,6 @@
use rustc_target::abi::{Integer, Primitive};
use rustc_target::spec::{HasTargetSpec, Target};
-use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
-
use crate::prelude::*;
pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
@@ -56,11 +54,7 @@
FloatTy::F64 => types::F64,
},
ty::FnPtr(_) => pointer_ty(tcx),
- ty::RawPtr(TypeAndMut {
- ty: pointee_ty,
- mutbl: _,
- })
- | ty::Ref(_, pointee_ty, _) => {
+ ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
if has_ptr_meta(tcx, pointee_ty) {
return None;
} else {
@@ -99,11 +93,7 @@
}
(a, b)
}
- ty::RawPtr(TypeAndMut {
- ty: pointee_ty,
- mutbl: _,
- })
- | ty::Ref(_, pointee_ty, _) => {
+ ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
if has_ptr_meta(tcx, pointee_ty) {
(pointer_ty(tcx), pointer_ty(tcx))
} else {
@@ -116,15 +106,8 @@
/// Is a pointer to this type a fat ptr?
pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
- let ptr_ty = tcx.mk_ptr(TypeAndMut {
- ty,
- mutbl: rustc_hir::Mutability::Not,
- });
- match &tcx
- .layout_of(ParamEnv::reveal_all().and(ptr_ty))
- .unwrap()
- .abi
- {
+ let ptr_ty = tcx.mk_ptr(TypeAndMut { ty, mutbl: rustc_hir::Mutability::Not });
+ match &tcx.layout_of(ParamEnv::reveal_all().and(ptr_ty)).unwrap().abi {
Abi::Scalar(_) => false,
Abi::ScalarPair(_, _) => true,
abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
@@ -132,7 +115,7 @@
}
pub(crate) fn codegen_icmp_imm(
- fx: &mut FunctionCx<'_, '_, impl Module>,
+ fx: &mut FunctionCx<'_, '_, '_>,
intcc: IntCC,
lhs: Value,
rhs: i128,
@@ -175,51 +158,6 @@
}
}
-fn resolve_normal_value_imm(func: &Function, val: Value) -> Option<i64> {
- if let ValueDef::Result(inst, 0 /*param*/) = func.dfg.value_def(val) {
- if let InstructionData::UnaryImm {
- opcode: Opcode::Iconst,
- imm,
- } = func.dfg[inst]
- {
- Some(imm.into())
- } else {
- None
- }
- } else {
- None
- }
-}
-
-fn resolve_128bit_value_imm(func: &Function, val: Value) -> Option<u128> {
- let (lsb, msb) = if let ValueDef::Result(inst, 0 /*param*/) = func.dfg.value_def(val) {
- if let InstructionData::Binary {
- opcode: Opcode::Iconcat,
- args: [lsb, msb],
- } = func.dfg[inst]
- {
- (lsb, msb)
- } else {
- return None;
- }
- } else {
- return None;
- };
-
- let lsb = u128::from(resolve_normal_value_imm(func, lsb)? as u64);
- let msb = u128::from(resolve_normal_value_imm(func, msb)? as u64);
-
- Some(msb << 64 | lsb)
-}
-
-pub(crate) fn resolve_value_imm(func: &Function, val: Value) -> Option<u128> {
- if func.dfg.value_type(val) == types::I128 {
- resolve_128bit_value_imm(func, val)
- } else {
- resolve_normal_value_imm(func, val).map(|imm| u128::from(imm as u64))
- }
-}
-
pub(crate) fn type_min_max_value(
bcx: &mut FunctionBuilder<'_>,
ty: Type,
@@ -288,8 +226,8 @@
}
}
-pub(crate) struct FunctionCx<'clif, 'tcx, M: Module> {
- pub(crate) cx: &'clif mut crate::CodegenCx<'tcx, M>,
+pub(crate) struct FunctionCx<'m, 'clif, 'tcx> {
+ pub(crate) cx: &'clif mut crate::CodegenCx<'m, 'tcx>,
pub(crate) tcx: TyCtxt<'tcx>,
pub(crate) pointer_type: Type, // Cached from module
@@ -316,7 +254,7 @@
pub(crate) inline_asm_index: u32,
}
-impl<'tcx, M: Module> LayoutOf for FunctionCx<'_, 'tcx, M> {
+impl<'tcx> LayoutOf for FunctionCx<'_, '_, 'tcx> {
type Ty = Ty<'tcx>;
type TyAndLayout = TyAndLayout<'tcx>;
@@ -325,31 +263,31 @@
}
}
-impl<'tcx, M: Module> layout::HasTyCtxt<'tcx> for FunctionCx<'_, 'tcx, M> {
+impl<'tcx> layout::HasTyCtxt<'tcx> for FunctionCx<'_, '_, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
self.tcx
}
}
-impl<'tcx, M: Module> rustc_target::abi::HasDataLayout for FunctionCx<'_, 'tcx, M> {
+impl<'tcx> rustc_target::abi::HasDataLayout for FunctionCx<'_, '_, 'tcx> {
fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
&self.tcx.data_layout
}
}
-impl<'tcx, M: Module> layout::HasParamEnv<'tcx> for FunctionCx<'_, 'tcx, M> {
+impl<'tcx> layout::HasParamEnv<'tcx> for FunctionCx<'_, '_, 'tcx> {
fn param_env(&self) -> ParamEnv<'tcx> {
ParamEnv::reveal_all()
}
}
-impl<'tcx, M: Module> HasTargetSpec for FunctionCx<'_, 'tcx, M> {
+impl<'tcx> HasTargetSpec for FunctionCx<'_, '_, 'tcx> {
fn target_spec(&self) -> &Target {
&self.tcx.sess.target
}
}
-impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> {
+impl<'tcx> FunctionCx<'_, '_, 'tcx> {
pub(crate) fn monomorphize<T>(&self, value: T) -> T
where
T: TypeFoldable<'tcx> + Copy,
@@ -416,12 +354,7 @@
let msg_id = self
.cx
.module
- .declare_data(
- &format!("__{}_{:08x}", prefix, msg_hash),
- Linkage::Local,
- false,
- false,
- )
+ .declare_data(&format!("__{}_{:08x}", prefix, msg_hash), Linkage::Local, false, false)
.unwrap();
// Ignore DuplicateDefinition error, as the data will be the same
@@ -444,15 +377,13 @@
fn layout_of(&self, ty: Ty<'tcx>) -> TyAndLayout<'tcx> {
assert!(!ty.still_further_specializable());
- self.0
- .layout_of(ParamEnv::reveal_all().and(&ty))
- .unwrap_or_else(|e| {
- if let layout::LayoutError::SizeOverflow(_) = e {
- self.0.sess.fatal(&e.to_string())
- } else {
- bug!("failed to get layout for `{}`: {}", ty, e)
- }
- })
+ self.0.layout_of(ParamEnv::reveal_all().and(&ty)).unwrap_or_else(|e| {
+ if let layout::LayoutError::SizeOverflow(_) = e {
+ self.0.sess.fatal(&e.to_string())
+ } else {
+ bug!("failed to get layout for `{}`: {}", ty, e)
+ }
+ })
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
index 5702832..9d93370 100644
--- a/compiler/rustc_codegen_cranelift/src/constant.rs
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -8,7 +8,7 @@
use rustc_middle::mir::interpret::{
read_target_uint, AllocId, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Pointer, Scalar,
};
-use rustc_middle::ty::{Const, ConstKind};
+use rustc_middle::ty::ConstKind;
use cranelift_codegen::ir::GlobalValueData;
use cranelift_module::*;
@@ -28,7 +28,7 @@
}
impl ConstantCx {
- pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut impl Module) {
+ pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut dyn Module) {
//println!("todo {:?}", self.todo);
define_all_allocs(tcx, module, &mut self);
//println!("done {:?}", self.done);
@@ -36,21 +36,23 @@
}
}
-pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, impl Module>) {
+pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
+ let mut all_constants_ok = true;
for constant in &fx.mir.required_consts {
- let const_ = fx.monomorphize(constant.literal);
+ let const_ = match fx.monomorphize(constant.literal) {
+ ConstantKind::Ty(ct) => ct,
+ ConstantKind::Val(..) => continue,
+ };
match const_.val {
ConstKind::Value(_) => {}
ConstKind::Unevaluated(def, ref substs, promoted) => {
if let Err(err) =
- fx.tcx
- .const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
+ fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
{
+ all_constants_ok = false;
match err {
ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
- fx.tcx
- .sess
- .span_err(constant.span, "erroneous constant encountered");
+ fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
}
ErrorHandled::TooGeneric => {
span_bug!(
@@ -69,6 +71,7 @@
| ConstKind::Error(_) => unreachable!("{:?}", const_),
}
}
+ all_constants_ok
}
pub(crate) fn codegen_static(constants_cx: &mut ConstantCx, def_id: DefId) {
@@ -76,11 +79,11 @@
}
pub(crate) fn codegen_tls_ref<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
def_id: DefId,
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
- let data_id = data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
+ let data_id = data_id_for_static(fx.tcx, fx.cx.module, def_id, false);
let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("tls {:?}", def_id));
@@ -89,11 +92,11 @@
}
fn codegen_static_ref<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
def_id: DefId,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
- let data_id = data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
+ let data_id = data_id_for_static(fx.tcx, fx.cx.module, def_id, false);
let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", def_id));
@@ -110,38 +113,26 @@
}
pub(crate) fn codegen_constant<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
constant: &Constant<'tcx>,
) -> CValue<'tcx> {
- let const_ = fx.monomorphize(constant.literal);
+ let const_ = match fx.monomorphize(constant.literal) {
+ ConstantKind::Ty(ct) => ct,
+ ConstantKind::Val(val, ty) => return codegen_const_value(fx, val, ty),
+ };
let const_val = match const_.val {
ConstKind::Value(const_val) => const_val,
ConstKind::Unevaluated(def, ref substs, promoted) if fx.tcx.is_static(def.did) => {
assert!(substs.is_empty());
assert!(promoted.is_none());
- return codegen_static_ref(
- fx,
- def.did,
- fx.layout_of(fx.monomorphize(&constant.literal.ty)),
- )
- .to_cvalue(fx);
+ return codegen_static_ref(fx, def.did, fx.layout_of(const_.ty)).to_cvalue(fx);
}
ConstKind::Unevaluated(def, ref substs, promoted) => {
- match fx
- .tcx
- .const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
- {
+ match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
Ok(const_val) => const_val,
Err(_) => {
- fx.tcx
- .sess
- .span_err(constant.span, "erroneous constant encountered");
- return crate::trap::trap_unreachable_ret_value(
- fx,
- fx.layout_of(const_.ty),
- "erroneous constant encountered",
- );
+ span_bug!(constant.span, "erroneous constant not captured by required_consts");
}
}
}
@@ -156,7 +147,7 @@
}
pub(crate) fn codegen_const_value<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
const_val: ConstValue<'tcx>,
ty: Ty<'tcx>,
) -> CValue<'tcx> {
@@ -172,9 +163,7 @@
if fx.clif_type(layout.ty).is_none() {
let (size, align) = (layout.size, layout.align.pref);
let mut alloc = Allocation::from_bytes(
- std::iter::repeat(0)
- .take(size.bytes_usize())
- .collect::<Vec<u8>>(),
+ std::iter::repeat(0).take(size.bytes_usize()).collect::<Vec<u8>>(),
align,
);
let ptr = Pointer::new(AllocId(!0), Size::ZERO); // The alloc id is never used
@@ -190,11 +179,8 @@
let base_addr = match alloc_kind {
Some(GlobalAlloc::Memory(alloc)) => {
fx.cx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id));
- let data_id = data_id_for_alloc_id(
- &mut fx.cx.module,
- ptr.alloc_id,
- alloc.mutability,
- );
+ let data_id =
+ data_id_for_alloc_id(fx.cx.module, ptr.alloc_id, alloc.mutability);
let local_data_id =
fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
@@ -203,15 +189,14 @@
}
Some(GlobalAlloc::Function(instance)) => {
let func_id =
- crate::abi::import_function(fx.tcx, &mut fx.cx.module, instance);
+ crate::abi::import_function(fx.tcx, fx.cx.module, instance);
let local_func_id =
fx.cx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
}
Some(GlobalAlloc::Static(def_id)) => {
assert!(fx.tcx.is_static(def_id));
- let data_id =
- data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
+ let data_id = data_id_for_static(fx.tcx, fx.cx.module, def_id, false);
let local_data_id =
fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
@@ -221,9 +206,7 @@
None => bug!("missing allocation {:?}", ptr.alloc_id),
};
let val = if ptr.offset.bytes() != 0 {
- fx.bcx
- .ins()
- .iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap())
+ fx.bcx.ins().iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap())
} else {
base_addr
};
@@ -240,22 +223,22 @@
let ptr = pointer_for_allocation(fx, data)
.offset_i64(fx, i64::try_from(start).unwrap())
.get_addr(fx);
- let len = fx.bcx.ins().iconst(
- fx.pointer_type,
- i64::try_from(end.checked_sub(start).unwrap()).unwrap(),
- );
+ let len = fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
CValue::by_val_pair(ptr, len, layout)
}
}
}
fn pointer_for_allocation<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
alloc: &'tcx Allocation,
) -> crate::pointer::Pointer {
let alloc_id = fx.tcx.create_memory_alloc(alloc);
fx.cx.constants_cx.todo.push(TodoItem::Alloc(alloc_id));
- let data_id = data_id_for_alloc_id(&mut fx.cx.module, alloc_id, alloc.mutability);
+ let data_id = data_id_for_alloc_id(fx.cx.module, alloc_id, alloc.mutability);
let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
@@ -265,7 +248,7 @@
}
fn data_id_for_alloc_id(
- module: &mut impl Module,
+ module: &mut dyn Module,
alloc_id: AllocId,
mutability: rustc_hir::Mutability,
) -> DataId {
@@ -281,7 +264,7 @@
fn data_id_for_static(
tcx: TyCtxt<'_>,
- module: &mut impl Module,
+ module: &mut dyn Module,
def_id: DefId,
definition: bool,
) -> DataId {
@@ -304,12 +287,7 @@
} else {
!ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all())
};
- let align = tcx
- .layout_of(ParamEnv::reveal_all().and(ty))
- .unwrap()
- .align
- .pref
- .bytes();
+ let align = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().align.pref.bytes();
let attrs = tcx.codegen_fn_attrs(def_id);
@@ -332,17 +310,11 @@
// zero.
let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
- let ref_data_id = module
- .declare_data(&ref_name, Linkage::Local, false, false)
- .unwrap();
+ let ref_data_id = module.declare_data(&ref_name, Linkage::Local, false, false).unwrap();
let mut data_ctx = DataContext::new();
data_ctx.set_align(align);
let data = module.declare_data_in_data(data_id, &mut data_ctx);
- data_ctx.define(
- std::iter::repeat(0)
- .take(pointer_ty(tcx).bytes() as usize)
- .collect(),
- );
+ data_ctx.define(std::iter::repeat(0).take(pointer_ty(tcx).bytes() as usize).collect());
data_ctx.write_data_addr(0, data, 0);
match module.define_data(ref_data_id, &data_ctx) {
// Every time the static is referenced there will be another definition of this global,
@@ -356,7 +328,7 @@
}
}
-fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut impl Module, cx: &mut ConstantCx) {
+fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut ConstantCx) {
while let Some(todo_item) = cx.todo.pop() {
let (data_id, alloc, section_name) = match todo_item {
TodoItem::Alloc(alloc_id) => {
@@ -371,10 +343,7 @@
TodoItem::Static(def_id) => {
//println!("static {:?}", def_id);
- let section_name = tcx
- .codegen_fn_attrs(def_id)
- .link_section
- .map(|s| s.as_str());
+ let section_name = tcx.codegen_fn_attrs(def_id).link_section.map(|s| s.as_str());
let alloc = tcx.eval_static_initializer(def_id).unwrap();
@@ -396,9 +365,7 @@
data_ctx.set_segment_section("", &*section_name);
}
- let bytes = alloc
- .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
- .to_vec();
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
data_ctx.define(bytes.into_boxed_slice());
for &(offset, (_tag, reloc)) in alloc.relocations().iter() {
@@ -426,10 +393,7 @@
data_id_for_alloc_id(module, reloc, target_alloc.mutability)
}
GlobalAlloc::Static(def_id) => {
- if tcx
- .codegen_fn_attrs(def_id)
- .flags
- .contains(CodegenFnAttrFlags::THREAD_LOCAL)
+ if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
{
tcx.sess.fatal(&format!(
"Allocation {:?} contains reference to TLS value {:?}",
@@ -457,14 +421,16 @@
}
pub(crate) fn mir_operand_get_const_val<'tcx>(
- fx: &FunctionCx<'_, 'tcx, impl Module>,
+ fx: &FunctionCx<'_, '_, 'tcx>,
operand: &Operand<'tcx>,
-) -> Option<&'tcx Const<'tcx>> {
+) -> Option<ConstValue<'tcx>> {
match operand {
Operand::Copy(_) | Operand::Move(_) => None,
- Operand::Constant(const_) => Some(
- fx.monomorphize(const_.literal)
- .eval(fx.tcx, ParamEnv::reveal_all()),
- ),
+ Operand::Constant(const_) => match const_.literal {
+ ConstantKind::Ty(const_) => {
+ fx.monomorphize(const_).eval(fx.tcx, ParamEnv::reveal_all()).val.try_to_value()
+ }
+ ConstantKind::Val(val, _) => Some(val),
+ },
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
index 6160f9b..6018eef 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
@@ -14,10 +14,7 @@
let unit_range_list_id = self.dwarf.unit.ranges.add(self.unit_range_list.clone());
let root = self.dwarf.unit.root();
let root = self.dwarf.unit.get_mut(root);
- root.set(
- gimli::DW_AT_ranges,
- AttributeValue::RangeListRef(unit_range_list_id),
- );
+ root.set(gimli::DW_AT_ranges, AttributeValue::RangeListRef(unit_range_list_id));
let mut sections = Sections::new(WriterRelocate::new(self.endian));
self.dwarf.write(&mut sections).unwrap();
@@ -66,10 +63,7 @@
impl WriterRelocate {
pub(super) fn new(endian: RunTimeEndian) -> Self {
- WriterRelocate {
- relocs: Vec::new(),
- writer: EndianVec::new(endian),
- }
+ WriterRelocate { relocs: Vec::new(), writer: EndianVec::new(endian) }
}
/// Perform the collected relocations to be usable for JIT usage.
@@ -85,9 +79,7 @@
cranelift_module::FuncId::from_u32(sym.try_into().unwrap()),
);
let val = (addr as u64 as i64 + reloc.addend) as u64;
- self.writer
- .write_udata_at(reloc.offset as usize, val, reloc.size)
- .unwrap();
+ self.writer.write_udata_at(reloc.offset as usize, val, reloc.size).unwrap();
}
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
index d226755..30ed356 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
@@ -53,11 +53,7 @@
if hash.kind == SourceFileHashAlgorithm::Md5 {
let mut buf = [0u8; MD5_LEN];
buf.copy_from_slice(hash.hash_bytes());
- Some(FileInfo {
- timestamp: 0,
- size: 0,
- md5: buf,
- })
+ Some(FileInfo { timestamp: 0, size: 0, md5: buf })
} else {
None
}
@@ -112,24 +108,14 @@
let entry = self.dwarf.unit.get_mut(entry_id);
- entry.set(
- gimli::DW_AT_decl_file,
- AttributeValue::FileIndex(Some(file_id)),
- );
- entry.set(
- gimli::DW_AT_decl_line,
- AttributeValue::Udata(loc.line as u64),
- );
+ entry.set(gimli::DW_AT_decl_file, AttributeValue::FileIndex(Some(file_id)));
+ entry.set(gimli::DW_AT_decl_line, AttributeValue::Udata(loc.line as u64));
// FIXME: probably omit this
- entry.set(
- gimli::DW_AT_decl_column,
- AttributeValue::Udata(loc.col.to_usize() as u64),
- );
+ entry.set(gimli::DW_AT_decl_column, AttributeValue::Udata(loc.col.to_usize() as u64));
}
pub(super) fn create_debug_lines(
&mut self,
- isa: &dyn cranelift_codegen::isa::TargetIsa,
symbol: usize,
entry_id: UnitEntryId,
context: &Context,
@@ -138,7 +124,6 @@
) -> CodeOffset {
let tcx = self.tcx;
let line_program = &mut self.dwarf.unit.line_program;
- let func = &context.func;
let line_strings = &mut self.dwarf.line_strings;
let mut last_span = None;
@@ -202,43 +187,22 @@
let mut func_end = 0;
- if let Some(ref mcr) = &context.mach_compile_result {
- for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
- line_program.row().address_offset = u64::from(start);
- if !loc.is_default() {
- let source_info = *source_info_set.get_index(loc.bits() as usize).unwrap();
- create_row_for_span(line_program, source_info.span);
- } else {
- create_row_for_span(line_program, function_span);
- }
- func_end = end;
+ let mcr = context.mach_compile_result.as_ref().unwrap();
+ for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
+ line_program.row().address_offset = u64::from(start);
+ if !loc.is_default() {
+ let source_info = *source_info_set.get_index(loc.bits() as usize).unwrap();
+ create_row_for_span(line_program, source_info.span);
+ } else {
+ create_row_for_span(line_program, function_span);
}
-
- line_program.end_sequence(u64::from(func_end));
-
- func_end = mcr.buffer.total_size();
- } else {
- let encinfo = isa.encoding_info();
- let mut blocks = func.layout.blocks().collect::<Vec<_>>();
- blocks.sort_by_key(|block| func.offsets[*block]); // Ensure inst offsets always increase
-
- for block in blocks {
- for (offset, inst, size) in func.inst_offsets(block, &encinfo) {
- let srcloc = func.srclocs[inst];
- line_program.row().address_offset = u64::from(offset);
- if !srcloc.is_default() {
- let source_info =
- *source_info_set.get_index(srcloc.bits() as usize).unwrap();
- create_row_for_span(line_program, source_info.span);
- } else {
- create_row_for_span(line_program, function_span);
- }
- func_end = offset + size;
- }
- }
- line_program.end_sequence(u64::from(func_end));
+ func_end = end;
}
+ line_program.end_sequence(u64::from(func_end));
+
+ let func_end = mcr.buffer.total_size();
+
assert_ne!(func_end, 0);
let entry = self.dwarf.unit.get_mut(entry_id);
@@ -246,10 +210,7 @@
gimli::DW_AT_low_pc,
AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
);
- entry.set(
- gimli::DW_AT_high_pc,
- AttributeValue::Udata(u64::from(func_end)),
- );
+ entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(func_end)));
self.emit_location(entry_id, function_span);
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
index a6f4ded..dc8bc8d 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
@@ -9,7 +9,7 @@
use rustc_index::vec::IndexVec;
use cranelift_codegen::entity::EntityRef;
-use cranelift_codegen::ir::{StackSlots, ValueLabel, ValueLoc};
+use cranelift_codegen::ir::{LabelValueLoc, StackSlots, ValueLabel, ValueLoc};
use cranelift_codegen::isa::TargetIsa;
use cranelift_codegen::ValueLocRange;
@@ -39,7 +39,6 @@
dwarf: DwarfUnit,
unit_range_list: RangeList,
- clif_types: FxHashMap<Type, UnitEntryId>,
types: FxHashMap<Ty<'tcx>, UnitEntryId>,
}
@@ -91,20 +90,11 @@
let root = dwarf.unit.root();
let root = dwarf.unit.get_mut(root);
- root.set(
- gimli::DW_AT_producer,
- AttributeValue::StringRef(dwarf.strings.add(producer)),
- );
- root.set(
- gimli::DW_AT_language,
- AttributeValue::Language(gimli::DW_LANG_Rust),
- );
+ root.set(gimli::DW_AT_producer, AttributeValue::StringRef(dwarf.strings.add(producer)));
+ root.set(gimli::DW_AT_language, AttributeValue::Language(gimli::DW_LANG_Rust));
root.set(gimli::DW_AT_name, AttributeValue::StringRef(name));
root.set(gimli::DW_AT_comp_dir, AttributeValue::StringRef(comp_dir));
- root.set(
- gimli::DW_AT_low_pc,
- AttributeValue::Address(Address::Constant(0)),
- );
+ root.set(gimli::DW_AT_low_pc, AttributeValue::Address(Address::Constant(0)));
}
DebugContext {
@@ -115,48 +105,10 @@
dwarf,
unit_range_list: RangeList(Vec::new()),
- clif_types: FxHashMap::default(),
types: FxHashMap::default(),
}
}
- fn dwarf_ty_for_clif_ty(&mut self, ty: Type) -> UnitEntryId {
- if let Some(type_id) = self.clif_types.get(&ty) {
- return *type_id;
- }
-
- let new_entry = |dwarf: &mut DwarfUnit, tag| dwarf.unit.add(dwarf.unit.root(), tag);
-
- let primitive = |dwarf: &mut DwarfUnit, ate| {
- let type_id = new_entry(dwarf, gimli::DW_TAG_base_type);
- let type_entry = dwarf.unit.get_mut(type_id);
- type_entry.set(gimli::DW_AT_encoding, AttributeValue::Encoding(ate));
- type_id
- };
-
- let type_id = if ty.is_bool() {
- primitive(&mut self.dwarf, gimli::DW_ATE_boolean)
- } else if ty.is_int() {
- primitive(&mut self.dwarf, gimli::DW_ATE_address)
- } else if ty.is_float() {
- primitive(&mut self.dwarf, gimli::DW_ATE_float)
- } else {
- new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type)
- };
-
- let type_entry = self.dwarf.unit.get_mut(type_id);
- type_entry.set(
- gimli::DW_AT_name,
- AttributeValue::String(format!("{}", ty).replace('i', "u").into_bytes()),
- );
- type_entry.set(
- gimli::DW_AT_byte_size,
- AttributeValue::Udata(u64::from(ty.bytes())),
- );
-
- type_id
- }
-
fn dwarf_ty(&mut self, ty: Ty<'tcx>) -> UnitEntryId {
if let Some(type_id) = self.types.get(ty) {
return *type_id;
@@ -181,10 +133,7 @@
ty::Int(_) => primitive(&mut self.dwarf, gimli::DW_ATE_signed),
ty::Float(_) => primitive(&mut self.dwarf, gimli::DW_ATE_float),
ty::Ref(_, pointee_ty, _mutbl)
- | ty::RawPtr(ty::TypeAndMut {
- ty: pointee_ty,
- mutbl: _mutbl,
- }) => {
+ | ty::RawPtr(ty::TypeAndMut { ty: pointee_ty, mutbl: _mutbl }) => {
let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_pointer_type);
// Ensure that type is inserted before recursing to avoid duplicates
@@ -211,10 +160,7 @@
let field_offset = layout.fields.offset(field_idx);
let field_layout = layout
.field(
- &layout::LayoutCx {
- tcx: self.tcx,
- param_env: ParamEnv::reveal_all(),
- },
+ &layout::LayoutCx { tcx: self.tcx, param_env: ParamEnv::reveal_all() },
field_idx,
)
.unwrap();
@@ -243,10 +189,7 @@
let type_entry = self.dwarf.unit.get_mut(type_id);
type_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
- type_entry.set(
- gimli::DW_AT_byte_size,
- AttributeValue::Udata(layout.size.bytes()),
- );
+ type_entry.set(gimli::DW_AT_byte_size, AttributeValue::Udata(layout.size.bytes()));
self.types.insert(ty, type_id);
@@ -286,23 +229,15 @@
let name_id = self.dwarf.strings.add(name);
// Gdb requires DW_AT_name. Otherwise the DW_TAG_subprogram is skipped.
entry.set(gimli::DW_AT_name, AttributeValue::StringRef(name_id));
- entry.set(
- gimli::DW_AT_linkage_name,
- AttributeValue::StringRef(name_id),
- );
+ entry.set(gimli::DW_AT_linkage_name, AttributeValue::StringRef(name_id));
- let end =
- self.create_debug_lines(isa, symbol, entry_id, context, mir.span, source_info_set);
+ let end = self.create_debug_lines(symbol, entry_id, context, mir.span, source_info_set);
self.unit_range_list.0.push(Range::StartLength {
begin: Address::Symbol { symbol, addend: 0 },
length: u64::from(end),
});
- if isa.get_mach_backend().is_some() {
- return; // Not yet implemented for the AArch64 backend.
- }
-
let func_entry = self.dwarf.unit.get_mut(entry_id);
// Gdb requires both DW_AT_low_pc and DW_AT_high_pc. Otherwise the DW_TAG_subprogram is skipped.
func_entry.set(
@@ -312,51 +247,6 @@
// Using Udata for DW_AT_high_pc requires at least DWARF4
func_entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(end)));
- // FIXME Remove once actual debuginfo for locals works.
- for (i, (param, &val)) in context
- .func
- .signature
- .params
- .iter()
- .zip(
- context
- .func
- .dfg
- .block_params(context.func.layout.entry_block().unwrap()),
- )
- .enumerate()
- {
- use cranelift_codegen::ir::ArgumentPurpose;
- let base_name = match param.purpose {
- ArgumentPurpose::Normal => "arg",
- ArgumentPurpose::StructArgument(_) => "struct_arg",
- ArgumentPurpose::StructReturn => "sret",
- ArgumentPurpose::Link
- | ArgumentPurpose::FramePointer
- | ArgumentPurpose::CalleeSaved => continue,
- ArgumentPurpose::VMContext
- | ArgumentPurpose::SignatureId
- | ArgumentPurpose::CallerTLS
- | ArgumentPurpose::CalleeTLS
- | ArgumentPurpose::StackLimit => unreachable!(),
- };
- let name = format!("{}{}", base_name, i);
-
- let dw_ty = self.dwarf_ty_for_clif_ty(param.value_type);
- let loc =
- translate_loc(isa, context.func.locations[val], &context.func.stack_slots).unwrap();
-
- let arg_id = self
- .dwarf
- .unit
- .add(entry_id, gimli::DW_TAG_formal_parameter);
- let var_entry = self.dwarf.unit.get_mut(arg_id);
-
- var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
- var_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(dw_ty));
- var_entry.set(gimli::DW_AT_location, AttributeValue::Exprloc(loc));
- }
-
// FIXME make it more reliable and implement scopes before re-enabling this.
if false {
let value_labels_ranges = context.build_value_labels_ranges(isa).unwrap();
@@ -376,10 +266,7 @@
context,
&local_map,
&value_labels_ranges,
- Place {
- local,
- projection: ty::List::empty(),
- },
+ Place { local, projection: ty::List::empty() },
);
let var_entry = self.dwarf.unit.get_mut(var_id);
@@ -417,10 +304,7 @@
symbol,
addend: i64::from(value_loc_range.start),
},
- end: Address::Symbol {
- symbol,
- addend: i64::from(value_loc_range.end),
- },
+ end: Address::Symbol { symbol, addend: i64::from(value_loc_range.end) },
data: translate_loc(
isa,
value_loc_range.loc,
@@ -463,17 +347,17 @@
// Adapted from https://github.com/CraneStation/wasmtime/blob/5a1845b4caf7a5dba8eda1fef05213a532ed4259/crates/debug/src/transform/expression.rs#L59-L137
fn translate_loc(
isa: &dyn TargetIsa,
- loc: ValueLoc,
+ loc: LabelValueLoc,
stack_slots: &StackSlots,
) -> Option<Expression> {
match loc {
- ValueLoc::Reg(reg) => {
+ LabelValueLoc::ValueLoc(ValueLoc::Reg(reg)) => {
let machine_reg = isa.map_dwarf_register(reg).unwrap();
let mut expr = Expression::new();
expr.op_reg(gimli::Register(machine_reg));
Some(expr)
}
- ValueLoc::Stack(ss) => {
+ LabelValueLoc::ValueLoc(ValueLoc::Stack(ss)) => {
if let Some(ss_offset) = stack_slots[ss].offset {
let mut expr = Expression::new();
expr.op_breg(X86_64::RBP, i64::from(ss_offset) + 16);
@@ -482,6 +366,17 @@
None
}
}
- _ => None,
+ LabelValueLoc::ValueLoc(ValueLoc::Unassigned) => unreachable!(),
+ LabelValueLoc::Reg(reg) => {
+ let machine_reg = isa.map_regalloc_reg_to_dwarf(reg).unwrap();
+ let mut expr = Expression::new();
+ expr.op_reg(gimli::Register(machine_reg));
+ Some(expr)
+ }
+ LabelValueLoc::SPOffset(offset) => {
+ let mut expr = Expression::new();
+ expr.op_breg(X86_64::RSP, offset);
+ Some(expr)
+ }
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
index 49de927..357c9fe 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
@@ -28,11 +28,7 @@
None
};
- UnwindContext {
- tcx,
- frame_table,
- cie_id,
- }
+ UnwindContext { tcx, frame_table, cie_id }
}
pub(crate) fn add_function(&mut self, func_id: FuncId, context: &Context, isa: &dyn TargetIsa) {
@@ -46,10 +42,8 @@
UnwindInfo::SystemV(unwind_info) => {
self.frame_table.add_fde(
self.cie_id.unwrap(),
- unwind_info.to_fde(Address::Symbol {
- symbol: func_id.as_u32() as usize,
- addend: 0,
- }),
+ unwind_info
+ .to_fde(Address::Symbol { symbol: func_id.as_u32() as usize, addend: 0 }),
);
}
UnwindInfo::WindowsX64(_) => {
@@ -60,9 +54,8 @@
}
pub(crate) fn emit<P: WriteDebugInfo>(self, product: &mut P) {
- let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
- self.tcx,
- )));
+ let mut eh_frame =
+ EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(self.tcx)));
self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
if !eh_frame.0.writer.slice().is_empty() {
@@ -82,9 +75,8 @@
self,
jit_module: &cranelift_jit::JITModule,
) -> Option<UnwindRegistry> {
- let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
- self.tcx,
- )));
+ let mut eh_frame =
+ EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(self.tcx)));
self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
if eh_frame.0.writer.slice().is_empty() {
@@ -130,10 +122,7 @@
registrations.push(ptr as usize);
}
- Some(UnwindRegistry {
- _frame_table: eh_frame,
- registrations,
- })
+ Some(UnwindRegistry { _frame_table: eh_frame, registrations })
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/discriminant.rs b/compiler/rustc_codegen_cranelift/src/discriminant.rs
index ad63501..3326f87 100644
--- a/compiler/rustc_codegen_cranelift/src/discriminant.rs
+++ b/compiler/rustc_codegen_cranelift/src/discriminant.rs
@@ -7,7 +7,7 @@
use crate::prelude::*;
pub(crate) fn codegen_set_discriminant<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
place: CPlace<'tcx>,
variant_index: VariantIdx,
) {
@@ -26,11 +26,7 @@
variants: _,
} => {
let ptr = place.place_field(fx, mir::Field::new(tag_field));
- let to = layout
- .ty
- .discriminant_for_variant(fx.tcx, variant_index)
- .unwrap()
- .val;
+ let to = layout.ty.discriminant_for_variant(fx.tcx, variant_index).unwrap().val;
let to = if ptr.layout().abi.is_signed() {
ty::ScalarInt::try_from_int(
ptr.layout().size.sign_extend(to) as i128,
@@ -46,12 +42,7 @@
Variants::Multiple {
tag: _,
tag_field,
- tag_encoding:
- TagEncoding::Niche {
- dataful_variant,
- ref niche_variants,
- niche_start,
- },
+ tag_encoding: TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
variants: _,
} => {
if variant_index != dataful_variant {
@@ -70,7 +61,7 @@
}
pub(crate) fn codegen_get_discriminant<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
value: CValue<'tcx>,
dest_layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
@@ -101,12 +92,9 @@
};
return CValue::const_val(fx, dest_layout, discr_val);
}
- Variants::Multiple {
- tag,
- tag_field,
- tag_encoding,
- variants: _,
- } => (tag, *tag_field, tag_encoding),
+ Variants::Multiple { tag, tag_field, tag_encoding, variants: _ } => {
+ (tag, *tag_field, tag_encoding)
+ }
};
let cast_to = fx.clif_type(dest_layout.ty).unwrap();
@@ -125,11 +113,7 @@
let val = clif_intcast(fx, tag, cast_to, signed);
CValue::by_val(val, dest_layout)
}
- TagEncoding::Niche {
- dataful_variant,
- ref niche_variants,
- niche_start,
- } => {
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
// Rebase from niche values to discriminants, and check
// whether the result is in range for the niche variants.
@@ -146,9 +130,7 @@
tag
} else {
// FIXME handle niche_start > i64::MAX
- fx.bcx
- .ins()
- .iadd_imm(tag, -i64::try_from(niche_start).unwrap())
+ fx.bcx.ins().iadd_imm(tag, -i64::try_from(niche_start).unwrap())
};
let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
let is_niche = {
@@ -176,15 +158,10 @@
} else {
clif_intcast(fx, relative_discr, cast_to, false)
};
- fx.bcx
- .ins()
- .iadd_imm(relative_discr, i64::from(niche_variants.start().as_u32()))
+ fx.bcx.ins().iadd_imm(relative_discr, i64::from(niche_variants.start().as_u32()))
};
- let dataful_variant = fx
- .bcx
- .ins()
- .iconst(cast_to, i64::from(dataful_variant.as_u32()));
+ let dataful_variant = fx.bcx.ins().iconst(cast_to, i64::from(dataful_variant.as_u32()));
let discr = fx.bcx.ins().select(is_niche, niche_discr, dataful_variant);
CValue::by_val(discr, dest_layout)
}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
index df89883..b87dcc4 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/aot.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
@@ -12,11 +12,9 @@
use rustc_session::cgu_reuse_tracker::CguReuse;
use rustc_session::config::{DebugInfo, OutputType};
-use cranelift_object::{ObjectModule, ObjectProduct};
+use cranelift_object::ObjectModule;
-use crate::prelude::*;
-
-use crate::backend::AddConstructor;
+use crate::{prelude::*, BackendConfig};
fn new_module(tcx: TyCtxt<'_>, name: String) -> ObjectModule {
let module = crate::backend::make_module(tcx.sess, name);
@@ -39,7 +37,6 @@
module: ObjectModule,
debug: Option<DebugContext<'_>>,
unwind_context: UnwindContext<'_>,
- map_product: impl FnOnce(ObjectProduct) -> ObjectProduct,
) -> ModuleCodegenResult {
let mut product = module.finish();
@@ -49,15 +46,10 @@
unwind_context.emit(&mut product);
- let product = map_product(product);
-
- let tmp_file = tcx
- .output_filenames(LOCAL_CRATE)
- .temp_path(OutputType::Object, Some(&name));
+ let tmp_file = tcx.output_filenames(LOCAL_CRATE).temp_path(OutputType::Object, Some(&name));
let obj = product.object.write().unwrap();
if let Err(err) = std::fs::write(&tmp_file, obj) {
- tcx.sess
- .fatal(&format!("error writing object file: {}", err));
+ tcx.sess.fatal(&format!("error writing object file: {}", err));
}
let work_product = if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() {
@@ -71,13 +63,7 @@
};
ModuleCodegenResult(
- CompiledModule {
- name,
- kind,
- object: Some(tmp_file),
- dwarf_object: None,
- bytecode: None,
- },
+ CompiledModule { name, kind, object: Some(tmp_file), dwarf_object: None, bytecode: None },
work_product,
)
}
@@ -117,55 +103,33 @@
}
}
-fn module_codegen(tcx: TyCtxt<'_>, cgu_name: rustc_span::Symbol) -> ModuleCodegenResult {
+fn module_codegen(
+ tcx: TyCtxt<'_>,
+ (backend_config, cgu_name): (BackendConfig, rustc_span::Symbol),
+) -> ModuleCodegenResult {
let cgu = tcx.codegen_unit(cgu_name);
let mono_items = cgu.items_in_deterministic_order(tcx);
let mut module = new_module(tcx, cgu_name.as_str().to_string());
- // Initialize the global atomic mutex using a constructor for proc-macros.
- // FIXME implement atomic instructions in Cranelift.
- let mut init_atomics_mutex_from_constructor = None;
- if tcx
- .sess
- .crate_types()
- .contains(&rustc_session::config::CrateType::ProcMacro)
- {
- if mono_items.iter().any(|(mono_item, _)| match mono_item {
- rustc_middle::mir::mono::MonoItem::Static(def_id) => tcx
- .symbol_name(Instance::mono(tcx, *def_id))
- .name
- .contains("__rustc_proc_macro_decls_"),
- _ => false,
- }) {
- init_atomics_mutex_from_constructor =
- Some(crate::atomic_shim::init_global_lock_constructor(
- &mut module,
- &format!("{}_init_atomics_mutex", cgu_name.as_str()),
- ));
- }
- }
-
let mut cx = crate::CodegenCx::new(
tcx,
- module,
+ backend_config,
+ &mut module,
tcx.sess.opts.debuginfo != DebugInfo::None,
- true,
);
super::predefine_mono_items(&mut cx, &mono_items);
for (mono_item, (linkage, visibility)) in mono_items {
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
match mono_item {
MonoItem::Fn(inst) => {
- cx.tcx.sess.time("codegen fn", || {
- crate::base::codegen_fn(&mut cx, inst, linkage)
- });
+ cx.tcx.sess.time("codegen fn", || crate::base::codegen_fn(&mut cx, inst, linkage));
}
MonoItem::Static(def_id) => {
crate::constant::codegen_static(&mut cx.constants_cx, def_id)
}
- MonoItem::GlobalAsm(hir_id) => {
- let item = cx.tcx.hir().expect_item(hir_id);
+ MonoItem::GlobalAsm(item_id) => {
+ let item = cx.tcx.hir().item(item_id);
if let rustc_hir::ItemKind::GlobalAsm(rustc_hir::GlobalAsm { asm }) = item.kind {
cx.global_asm.push_str(&*asm.as_str());
cx.global_asm.push_str("\n\n");
@@ -175,9 +139,9 @@
}
}
}
- let (mut module, global_asm, debug, mut unwind_context) =
+ let (global_asm, debug, mut unwind_context) =
tcx.sess.time("finalize CodegenCx", || cx.finalize());
- crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context, false);
+ crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context);
let codegen_result = emit_module(
tcx,
@@ -186,13 +150,6 @@
module,
debug,
unwind_context,
- |mut product| {
- if let Some(func_id) = init_atomics_mutex_from_constructor {
- product.add_constructor(func_id);
- }
-
- product
- },
);
codegen_global_asm(tcx, &cgu.name().as_str(), &global_asm);
@@ -202,6 +159,7 @@
pub(super) fn run_aot(
tcx: TyCtxt<'_>,
+ backend_config: BackendConfig,
metadata: EncodedMetadata,
need_metadata_module: bool,
) -> Box<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)> {
@@ -225,9 +183,7 @@
cgus.iter()
.map(|cgu| {
let cgu_reuse = determine_cgu_reuse(tcx, cgu);
- tcx.sess
- .cgu_reuse_tracker
- .set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
+ tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
match cgu_reuse {
_ if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() => {}
@@ -242,7 +198,7 @@
let (ModuleCodegenResult(module, work_product), _) = tcx.dep_graph.with_task(
dep_node,
tcx,
- cgu.name(),
+ (backend_config, cgu.name()),
module_codegen,
rustc_middle::dep_graph::hash_result,
);
@@ -271,7 +227,6 @@
allocator_module,
None,
allocator_unwind_context,
- |product| product,
);
if let Some((id, product)) = work_product {
work_products.insert(id, product);
@@ -301,8 +256,7 @@
});
if let Err(err) = std::fs::write(&tmp_file, obj) {
- tcx.sess
- .fatal(&format!("error writing metadata object file: {}", err));
+ tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
}
(metadata_cgu_name, tmp_file)
@@ -356,8 +310,7 @@
"asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift",
);
} else {
- tcx.sess
- .fatal("asm! and global_asm! are not yet supported on macOS and Windows");
+ tcx.sess.fatal("asm! and global_asm! are not yet supported on macOS and Windows");
}
}
@@ -367,19 +320,12 @@
// Remove all LLVM style comments
let global_asm = global_asm
.lines()
- .map(|line| {
- if let Some(index) = line.find("//") {
- &line[0..index]
- } else {
- line
- }
- })
+ .map(|line| if let Some(index) = line.find("//") { &line[0..index] } else { line })
.collect::<Vec<_>>()
.join("\n");
- let output_object_file = tcx
- .output_filenames(LOCAL_CRATE)
- .temp_path(OutputType::Object, Some(cgu_name));
+ let output_object_file =
+ tcx.output_filenames(LOCAL_CRATE).temp_path(OutputType::Object, Some(cgu_name));
// Assemble `global_asm`
let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
@@ -389,16 +335,10 @@
.stdin(Stdio::piped())
.spawn()
.expect("Failed to spawn `as`.");
- child
- .stdin
- .take()
- .unwrap()
- .write_all(global_asm.as_bytes())
- .unwrap();
+ child.stdin.take().unwrap().write_all(global_asm.as_bytes()).unwrap();
let status = child.wait().expect("Failed to wait for `as`.");
if !status.success() {
- tcx.sess
- .fatal(&format!("Failed to assemble `{}`", global_asm));
+ tcx.sess.fatal(&format!("Failed to assemble `{}`", global_asm));
}
// Link the global asm and main object file together
@@ -442,11 +382,7 @@
}
let work_product_id = &cgu.work_product_id();
- if tcx
- .dep_graph
- .previous_work_product(work_product_id)
- .is_none()
- {
+ if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
// We don't have anything cached for this CGU. This can happen
// if the CGU did not exist in the previous session.
return CguReuse::No;
@@ -465,9 +401,5 @@
cgu.name()
);
- if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
- CguReuse::PreLto
- } else {
- CguReuse::No
- }
+ if tcx.try_mark_green(&dep_node) { CguReuse::PreLto } else { CguReuse::No }
}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/jit.rs b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
index 2d14ff2..245df03 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/jit.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
@@ -10,43 +10,24 @@
use cranelift_jit::{JITBuilder, JITModule};
-use crate::prelude::*;
+use crate::{prelude::*, BackendConfig};
use crate::{CodegenCx, CodegenMode};
thread_local! {
+ pub static BACKEND_CONFIG: RefCell<Option<BackendConfig>> = RefCell::new(None);
pub static CURRENT_MODULE: RefCell<Option<JITModule>> = RefCell::new(None);
}
-pub(super) fn run_jit(tcx: TyCtxt<'_>, codegen_mode: CodegenMode) -> ! {
+pub(super) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
if !tcx.sess.opts.output_types.should_codegen() {
tcx.sess.fatal("JIT mode doesn't work with `cargo check`.");
}
- #[cfg(unix)]
- unsafe {
- // When not using our custom driver rustc will open us without the RTLD_GLOBAL flag, so
- // __cg_clif_global_atomic_mutex will not be exported. We fix this by opening ourself again
- // as global.
- // FIXME remove once atomic_shim is gone
-
- let mut dl_info: libc::Dl_info = std::mem::zeroed();
- assert_ne!(
- libc::dladdr(run_jit as *const libc::c_void, &mut dl_info),
- 0
- );
- assert_ne!(
- libc::dlopen(dl_info.dli_fname, libc::RTLD_NOW | libc::RTLD_GLOBAL),
- std::ptr::null_mut(),
- );
- }
-
let imported_symbols = load_imported_symbols_for_jit(tcx);
- let mut jit_builder = JITBuilder::with_isa(
- crate::build_isa(tcx.sess),
- cranelift_module::default_libcall_names(),
- );
- jit_builder.hotswap(matches!(codegen_mode, CodegenMode::JitLazy));
+ let mut jit_builder =
+ JITBuilder::with_isa(crate::build_isa(tcx.sess), cranelift_module::default_libcall_names());
+ jit_builder.hotswap(matches!(backend_config.codegen_mode, CodegenMode::JitLazy));
jit_builder.symbols(imported_symbols);
let mut jit_module = JITModule::new(jit_builder);
assert_eq!(pointer_ty(tcx), jit_module.target_config().pointer_type());
@@ -56,14 +37,10 @@
AbiParam::new(jit_module.target_config().pointer_type()),
AbiParam::new(jit_module.target_config().pointer_type()),
],
- returns: vec![AbiParam::new(
- jit_module.target_config().pointer_type(), /*isize*/
- )],
+ returns: vec![AbiParam::new(jit_module.target_config().pointer_type() /*isize*/)],
call_conv: CallConv::triple_default(&crate::target_triple(tcx.sess)),
};
- let main_func_id = jit_module
- .declare_function("main", Linkage::Import, &sig)
- .unwrap();
+ let main_func_id = jit_module.declare_function("main", Linkage::Import, &sig).unwrap();
let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
let mono_items = cgus
@@ -74,35 +51,34 @@
.into_iter()
.collect::<Vec<(_, (_, _))>>();
- let mut cx = crate::CodegenCx::new(tcx, jit_module, false, false);
+ let mut cx = crate::CodegenCx::new(tcx, backend_config, &mut jit_module, false);
super::time(tcx, "codegen mono items", || {
super::predefine_mono_items(&mut cx, &mono_items);
for (mono_item, (linkage, visibility)) in mono_items {
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
match mono_item {
- MonoItem::Fn(inst) => match codegen_mode {
+ MonoItem::Fn(inst) => match backend_config.codegen_mode {
CodegenMode::Aot => unreachable!(),
CodegenMode::Jit => {
- cx.tcx.sess.time("codegen fn", || {
- crate::base::codegen_fn(&mut cx, inst, linkage)
- });
+ cx.tcx
+ .sess
+ .time("codegen fn", || crate::base::codegen_fn(&mut cx, inst, linkage));
}
CodegenMode::JitLazy => codegen_shim(&mut cx, inst),
},
MonoItem::Static(def_id) => {
crate::constant::codegen_static(&mut cx.constants_cx, def_id);
}
- MonoItem::GlobalAsm(hir_id) => {
- let item = cx.tcx.hir().expect_item(hir_id);
- tcx.sess
- .span_fatal(item.span, "Global asm is not supported in JIT mode");
+ MonoItem::GlobalAsm(item_id) => {
+ let item = cx.tcx.hir().item(item_id);
+ tcx.sess.span_fatal(item.span, "Global asm is not supported in JIT mode");
}
}
}
});
- let (mut jit_module, global_asm, _debug, mut unwind_context) =
+ let (global_asm, _debug, mut unwind_context) =
tcx.sess.time("finalize CodegenCx", || cx.finalize());
jit_module.finalize_definitions();
@@ -110,7 +86,7 @@
tcx.sess.fatal("Inline asm is not supported in JIT mode");
}
- crate::main_shim::maybe_create_entry_wrapper(tcx, &mut jit_module, &mut unwind_context, true);
+ crate::main_shim::maybe_create_entry_wrapper(tcx, &mut jit_module, &mut unwind_context);
crate::allocator::codegen(tcx, &mut jit_module, &mut unwind_context);
tcx.sess.abort_if_errors();
@@ -121,7 +97,9 @@
let finalized_main: *const u8 = jit_module.get_finalized_function(main_func_id);
- println!("Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed");
+ println!(
+ "Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed"
+ );
let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
unsafe { ::std::mem::transmute(finalized_main) };
@@ -137,6 +115,9 @@
// useful as some dynamic linkers use it as a marker to jump over.
argv.push(std::ptr::null());
+ BACKEND_CONFIG.with(|tls_backend_config| {
+ assert!(tls_backend_config.borrow_mut().replace(backend_config).is_none())
+ });
CURRENT_MODULE
.with(|current_module| assert!(current_module.borrow_mut().replace(jit_module).is_none()));
@@ -154,21 +135,19 @@
CURRENT_MODULE.with(|jit_module| {
let mut jit_module = jit_module.borrow_mut();
let jit_module = jit_module.as_mut().unwrap();
- let mut cx = crate::CodegenCx::new(tcx, jit_module, false, false);
+ let backend_config =
+ BACKEND_CONFIG.with(|backend_config| backend_config.borrow().clone().unwrap());
let name = tcx.symbol_name(instance).name.to_string();
- let sig = crate::abi::get_function_sig(tcx, cx.module.isa().triple(), instance);
- let func_id = cx
- .module
- .declare_function(&name, Linkage::Export, &sig)
- .unwrap();
- cx.module.prepare_for_function_redefine(func_id).unwrap();
+ let sig = crate::abi::get_function_sig(tcx, jit_module.isa().triple(), instance);
+ let func_id = jit_module.declare_function(&name, Linkage::Export, &sig).unwrap();
+ jit_module.prepare_for_function_redefine(func_id).unwrap();
- tcx.sess.time("codegen fn", || {
- crate::base::codegen_fn(&mut cx, instance, Linkage::Export)
- });
+ let mut cx = crate::CodegenCx::new(tcx, backend_config, jit_module, false);
+ tcx.sess
+ .time("codegen fn", || crate::base::codegen_fn(&mut cx, instance, Linkage::Export));
- let (jit_module, global_asm, _debug_context, unwind_context) = cx.finalize();
+ let (global_asm, _debug_context, unwind_context) = cx.finalize();
assert!(global_asm.is_empty());
jit_module.finalize_definitions();
std::mem::forget(unsafe { unwind_context.register_jit(&jit_module) });
@@ -195,9 +174,8 @@
Linkage::NotLinked | Linkage::IncludedFromDylib => {}
Linkage::Static => {
let name = tcx.crate_name(cnum);
- let mut err = tcx
- .sess
- .struct_err(&format!("Can't load static lib {}", name.as_str()));
+ let mut err =
+ tcx.sess.struct_err(&format!("Can't load static lib {}", name.as_str()));
err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
err.emit();
}
@@ -218,6 +196,11 @@
if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
return None;
}
+ if name.starts_with("rust_metadata_") {
+ // The metadata is part of a section that is not loaded by the dynamic linker in
+ // case of cg_llvm.
+ return None;
+ }
let dlsym_name = if cfg!(target_os = "macos") {
// On macOS `dlsym` expects the name without leading `_`.
assert!(name.starts_with('_'), "{:?}", name);
@@ -237,17 +220,14 @@
imported_symbols
}
-pub(super) fn codegen_shim<'tcx>(cx: &mut CodegenCx<'tcx, impl Module>, inst: Instance<'tcx>) {
+pub(super) fn codegen_shim<'tcx>(cx: &mut CodegenCx<'_, 'tcx>, inst: Instance<'tcx>) {
let tcx = cx.tcx;
let pointer_type = cx.module.target_config().pointer_type();
let name = tcx.symbol_name(inst).name.to_string();
let sig = crate::abi::get_function_sig(tcx, cx.module.isa().triple(), inst);
- let func_id = cx
- .module
- .declare_function(&name, Linkage::Export, &sig)
- .unwrap();
+ let func_id = cx.module.declare_function(&name, Linkage::Export, &sig).unwrap();
let instance_ptr = Box::into_raw(Box::new(inst));
@@ -268,28 +248,18 @@
let mut builder_ctx = FunctionBuilderContext::new();
let mut trampoline_builder = FunctionBuilder::new(&mut trampoline, &mut builder_ctx);
- let jit_fn = cx
- .module
- .declare_func_in_func(jit_fn, trampoline_builder.func);
+ let jit_fn = cx.module.declare_func_in_func(jit_fn, trampoline_builder.func);
let sig_ref = trampoline_builder.func.import_signature(sig);
let entry_block = trampoline_builder.create_block();
trampoline_builder.append_block_params_for_function_params(entry_block);
- let fn_args = trampoline_builder
- .func
- .dfg
- .block_params(entry_block)
- .to_vec();
+ let fn_args = trampoline_builder.func.dfg.block_params(entry_block).to_vec();
trampoline_builder.switch_to_block(entry_block);
- let instance_ptr = trampoline_builder
- .ins()
- .iconst(pointer_type, instance_ptr as u64 as i64);
+ let instance_ptr = trampoline_builder.ins().iconst(pointer_type, instance_ptr as u64 as i64);
let jitted_fn = trampoline_builder.ins().call(jit_fn, &[instance_ptr]);
let jitted_fn = trampoline_builder.func.dfg.inst_results(jitted_fn)[0];
- let call_inst = trampoline_builder
- .ins()
- .call_indirect(sig_ref, jitted_fn, &fn_args);
+ let call_inst = trampoline_builder.ins().call_indirect(sig_ref, jitted_fn, &fn_args);
let ret_vals = trampoline_builder.func.dfg.inst_results(call_inst).to_vec();
trampoline_builder.ins().return_(&ret_vals);
diff --git a/compiler/rustc_codegen_cranelift/src/driver/mod.rs b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
index 2497f9d..b994f28 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
@@ -17,33 +17,30 @@
tcx: TyCtxt<'_>,
metadata: EncodedMetadata,
need_metadata_module: bool,
- config: crate::BackendConfig,
+ backend_config: crate::BackendConfig,
) -> Box<dyn Any> {
tcx.sess.abort_if_errors();
- match config.codegen_mode {
- CodegenMode::Aot => aot::run_aot(tcx, metadata, need_metadata_module),
+ match backend_config.codegen_mode {
+ CodegenMode::Aot => aot::run_aot(tcx, backend_config, metadata, need_metadata_module),
CodegenMode::Jit | CodegenMode::JitLazy => {
- let is_executable = tcx
- .sess
- .crate_types()
- .contains(&rustc_session::config::CrateType::Executable);
+ let is_executable =
+ tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable);
if !is_executable {
tcx.sess.fatal("can't jit non-executable crate");
}
#[cfg(feature = "jit")]
- let _: ! = jit::run_jit(tcx, config.codegen_mode);
+ let _: ! = jit::run_jit(tcx, backend_config);
#[cfg(not(feature = "jit"))]
- tcx.sess
- .fatal("jit support was disabled when compiling rustc_codegen_cranelift");
+ tcx.sess.fatal("jit support was disabled when compiling rustc_codegen_cranelift");
}
}
}
fn predefine_mono_items<'tcx>(
- cx: &mut crate::CodegenCx<'tcx, impl Module>,
+ cx: &mut crate::CodegenCx<'_, 'tcx>,
mono_items: &[(MonoItem<'tcx>, (RLinkage, Visibility))],
) {
cx.tcx.sess.time("predefine functions", || {
@@ -63,21 +60,12 @@
}
fn time<R>(tcx: TyCtxt<'_>, name: &'static str, f: impl FnOnce() -> R) -> R {
- if std::env::var("CG_CLIF_DISPLAY_CG_TIME")
- .as_ref()
- .map(|val| &**val)
- == Ok("1")
- {
+ if std::env::var("CG_CLIF_DISPLAY_CG_TIME").as_ref().map(|val| &**val) == Ok("1") {
println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
let before = std::time::Instant::now();
let res = tcx.sess.time(name, f);
let after = std::time::Instant::now();
- println!(
- "[{:<30}: {}] end time: {:?}",
- tcx.crate_name(LOCAL_CRATE),
- name,
- after - before
- );
+ println!("[{:<30}: {}] end time: {:?}", tcx.crate_name(LOCAL_CRATE), name, after - before);
res
} else {
tcx.sess.time(name, f)
diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
index 04aac78..5b3df2b 100644
--- a/compiler/rustc_codegen_cranelift/src/inline_asm.rs
+++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
@@ -9,7 +9,7 @@
use rustc_target::asm::*;
pub(crate) fn codegen_inline_asm<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
_span: Span,
template: &[InlineAsmTemplatePiece],
operands: &[InlineAsmOperand<'tcx>],
@@ -53,11 +53,7 @@
crate::base::codegen_operand(fx, value).load_scalar(fx),
));
}
- InlineAsmOperand::Out {
- reg,
- late: _,
- place,
- } => {
+ InlineAsmOperand::Out { reg, late: _, place } => {
let reg = expect_reg(reg);
clobbered_regs.push((reg, new_slot(reg.reg_class())));
if let Some(place) = place {
@@ -68,12 +64,7 @@
));
}
}
- InlineAsmOperand::InOut {
- reg,
- late: _,
- ref in_value,
- out_place,
- } => {
+ InlineAsmOperand::InOut { reg, late: _, ref in_value, out_place } => {
let reg = expect_reg(reg);
clobbered_regs.push((reg, new_slot(reg.reg_class())));
inputs.push((
@@ -97,11 +88,8 @@
let inline_asm_index = fx.inline_asm_index;
fx.inline_asm_index += 1;
- let asm_name = format!(
- "{}__inline_asm_{}",
- fx.tcx.symbol_name(fx.instance).name,
- inline_asm_index
- );
+ let asm_name =
+ format!("{}__inline_asm_{}", fx.tcx.symbol_name(fx.instance).name, inline_asm_index);
let generated_asm = generate_asm_wrapper(
&asm_name,
@@ -129,12 +117,7 @@
let mut generated_asm = String::new();
writeln!(generated_asm, ".globl {}", asm_name).unwrap();
writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
- writeln!(
- generated_asm,
- ".section .text.{},\"ax\",@progbits",
- asm_name
- )
- .unwrap();
+ writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
writeln!(generated_asm, "{}:", asm_name).unwrap();
generated_asm.push_str(".intel_syntax noprefix\n");
@@ -164,11 +147,7 @@
InlineAsmTemplatePiece::String(s) => {
generated_asm.push_str(s);
}
- InlineAsmTemplatePiece::Placeholder {
- operand_idx: _,
- modifier: _,
- span: _,
- } => todo!(),
+ InlineAsmTemplatePiece::Placeholder { operand_idx: _, modifier: _, span: _ } => todo!(),
}
}
generated_asm.push('\n');
@@ -203,7 +182,7 @@
}
fn call_inline_asm<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
asm_name: &str,
slot_size: Size,
inputs: Vec<(InlineAsmReg, Size, Value)>,
@@ -230,17 +209,12 @@
},
)
.unwrap();
- let inline_asm_func = fx
- .cx
- .module
- .declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
+ let inline_asm_func = fx.cx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(inline_asm_func, asm_name);
for (_reg, offset, value) in inputs {
- fx.bcx
- .ins()
- .stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
}
let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
@@ -248,10 +222,7 @@
for (_reg, offset, place) in outputs {
let ty = fx.clif_type(place.layout().ty).unwrap();
- let value = fx
- .bcx
- .ins()
- .stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
place.write_cvalue(fx, CValue::by_val(value, place.layout()));
}
}
@@ -267,8 +238,7 @@
match arch {
InlineAsmArch::X86_64 => {
write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
- reg.emit(generated_asm, InlineAsmArch::X86_64, None)
- .unwrap();
+ reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
generated_asm.push('\n');
}
_ => unimplemented!("save_register for {:?}", arch),
@@ -284,8 +254,7 @@
match arch {
InlineAsmArch::X86_64 => {
generated_asm.push_str(" mov ");
- reg.emit(generated_asm, InlineAsmArch::X86_64, None)
- .unwrap();
+ reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
}
_ => unimplemented!("restore_register for {:?}", arch),
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
index c1a1cdb..b27b0ed 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
@@ -6,7 +6,7 @@
///
/// This emulates an intel cpu with sse and sse2 support, but which doesn't support anything else.
pub(crate) fn codegen_cpuid_call<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
leaf: Value,
_subleaf: Value,
) -> (Value, Value, Value, Value) {
@@ -31,54 +31,28 @@
fx.bcx.switch_to_block(leaf_0);
let max_basic_leaf = fx.bcx.ins().iconst(types::I32, 1);
- let vend0 = fx
- .bcx
- .ins()
- .iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu")));
- let vend2 = fx
- .bcx
- .ins()
- .iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI")));
- let vend1 = fx
- .bcx
- .ins()
- .iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel")));
- fx.bcx
- .ins()
- .jump(dest, &[max_basic_leaf, vend0, vend1, vend2]);
+ let vend0 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu")));
+ let vend2 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI")));
+ let vend1 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel")));
+ fx.bcx.ins().jump(dest, &[max_basic_leaf, vend0, vend1, vend2]);
fx.bcx.switch_to_block(leaf_1);
let cpu_signature = fx.bcx.ins().iconst(types::I32, 0);
let additional_information = fx.bcx.ins().iconst(types::I32, 0);
let ecx_features = fx.bcx.ins().iconst(types::I32, 0);
- let edx_features = fx
- .bcx
- .ins()
- .iconst(types::I32, 1 << 25 /* sse */ | 1 << 26 /* sse2 */);
- fx.bcx.ins().jump(
- dest,
- &[
- cpu_signature,
- additional_information,
- ecx_features,
- edx_features,
- ],
- );
+ let edx_features = fx.bcx.ins().iconst(types::I32, 1 << 25 /* sse */ | 1 << 26 /* sse2 */);
+ fx.bcx.ins().jump(dest, &[cpu_signature, additional_information, ecx_features, edx_features]);
fx.bcx.switch_to_block(leaf_8000_0000);
let extended_max_basic_leaf = fx.bcx.ins().iconst(types::I32, 0);
let zero = fx.bcx.ins().iconst(types::I32, 0);
- fx.bcx
- .ins()
- .jump(dest, &[extended_max_basic_leaf, zero, zero, zero]);
+ fx.bcx.ins().jump(dest, &[extended_max_basic_leaf, zero, zero, zero]);
fx.bcx.switch_to_block(leaf_8000_0001);
let zero = fx.bcx.ins().iconst(types::I32, 0);
let proc_info_ecx = fx.bcx.ins().iconst(types::I32, 0);
let proc_info_edx = fx.bcx.ins().iconst(types::I32, 0);
- fx.bcx
- .ins()
- .jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
+ fx.bcx.ins().jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
fx.bcx.switch_to_block(unsupported_leaf);
crate::trap::trap_unreachable(
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
index d58e4d4..83c91f7 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
@@ -6,7 +6,7 @@
use rustc_middle::ty::subst::SubstsRef;
pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
substs: SubstsRef<'tcx>,
args: &[mir::Operand<'tcx>],
@@ -53,7 +53,7 @@
};
llvm.x86.sse2.cmp.ps | llvm.x86.sse2.cmp.pd, (c x, c y, o kind) {
let kind_const = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const");
- let flt_cc = match kind_const.val.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind_const)) {
+ let flt_cc = match kind_const.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind_const)) {
0 => FloatCC::Equal,
1 => FloatCC::LessThan,
2 => FloatCC::LessThanOrEqual,
@@ -84,7 +84,7 @@
llvm.x86.sse2.psrli.d, (c a, o imm8) {
let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
- let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
+ let res_lane = match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
_ => fx.bcx.ins().iconst(types::I32, 0),
};
@@ -94,7 +94,7 @@
llvm.x86.sse2.pslli.d, (c a, o imm8) {
let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
- let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
+ let res_lane = match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
_ => fx.bcx.ins().iconst(types::I32, 0),
};
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index 8946ac4..39e047a 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -9,6 +9,7 @@
pub(crate) use llvm::codegen_llvm_intrinsic_call;
use crate::prelude::*;
+use cranelift_codegen::ir::AtomicRmwOp;
use rustc_middle::ty::print::with_no_trimmed_paths;
macro intrinsic_pat {
@@ -112,38 +113,6 @@
}
}
-macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
- crate::atomic_shim::lock_global_lock($fx);
-
- let clif_ty = $fx.clif_type($T).unwrap();
- let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
- let new = $fx.bcx.ins().$op(old, $src);
- $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
- $ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
-
- crate::atomic_shim::unlock_global_lock($fx);
-}
-
-macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) {
- crate::atomic_shim::lock_global_lock($fx);
-
- // Read old
- let clif_ty = $fx.clif_type($T).unwrap();
- let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
-
- // Compare
- let is_eq = $fx.bcx.ins().icmp(IntCC::SignedGreaterThan, old, $src);
- let new = $fx.bcx.ins().select(is_eq, old, $src);
-
- // Write new
- $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
-
- let ret_val = CValue::by_val(old, $ret.layout());
- $ret.write_cvalue($fx, ret_val);
-
- crate::atomic_shim::unlock_global_lock($fx);
-}
-
macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
match $ty.kind() {
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
@@ -184,12 +153,12 @@
}
}
-fn simd_for_each_lane<'tcx, M: Module>(
- fx: &mut FunctionCx<'_, 'tcx, M>,
+fn simd_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
val: CValue<'tcx>,
ret: CPlace<'tcx>,
f: impl Fn(
- &mut FunctionCx<'_, 'tcx, M>,
+ &mut FunctionCx<'_, '_, 'tcx>,
TyAndLayout<'tcx>,
TyAndLayout<'tcx>,
Value,
@@ -213,13 +182,13 @@
}
}
-fn simd_pair_for_each_lane<'tcx, M: Module>(
- fx: &mut FunctionCx<'_, 'tcx, M>,
+fn simd_pair_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
x: CValue<'tcx>,
y: CValue<'tcx>,
ret: CPlace<'tcx>,
f: impl Fn(
- &mut FunctionCx<'_, 'tcx, M>,
+ &mut FunctionCx<'_, '_, 'tcx>,
TyAndLayout<'tcx>,
TyAndLayout<'tcx>,
Value,
@@ -246,11 +215,11 @@
}
}
-fn simd_reduce<'tcx, M: Module>(
- fx: &mut FunctionCx<'_, 'tcx, M>,
+fn simd_reduce<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
val: CValue<'tcx>,
ret: CPlace<'tcx>,
- f: impl Fn(&mut FunctionCx<'_, 'tcx, M>, TyAndLayout<'tcx>, Value, Value) -> Value,
+ f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
) {
let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
let lane_layout = fx.layout_of(lane_ty);
@@ -258,20 +227,19 @@
let mut res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
for lane_idx in 1..lane_count {
- let lane = val
- .value_field(fx, mir::Field::new(lane_idx.try_into().unwrap()))
- .load_scalar(fx);
+ let lane =
+ val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
res_val = f(fx, lane_layout, res_val, lane);
}
let res = CValue::by_val(res_val, lane_layout);
ret.write_cvalue(fx, res);
}
-fn simd_reduce_bool<'tcx, M: Module>(
- fx: &mut FunctionCx<'_, 'tcx, M>,
+fn simd_reduce_bool<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
val: CValue<'tcx>,
ret: CPlace<'tcx>,
- f: impl Fn(&mut FunctionCx<'_, 'tcx, M>, Value, Value) -> Value,
+ f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
) {
let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
assert!(ret.layout().ty.is_bool());
@@ -279,9 +247,8 @@
let res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
for lane_idx in 1..lane_count {
- let lane = val
- .value_field(fx, mir::Field::new(lane_idx.try_into().unwrap()))
- .load_scalar(fx);
+ let lane =
+ val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
res_val = f(fx, res_val, lane);
}
@@ -290,7 +257,7 @@
}
fn bool_to_zero_or_max_uint<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
val: Value,
) -> CValue<'tcx> {
@@ -424,7 +391,7 @@
}
pub(crate) fn codegen_intrinsic_call<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
instance: Instance<'tcx>,
args: &[mir::Operand<'tcx>],
destination: Option<(CPlace<'tcx>, BasicBlock)>,
@@ -912,136 +879,175 @@
};
_ if intrinsic.starts_with("atomic_fence"), () {
- crate::atomic_shim::lock_global_lock(fx);
- crate::atomic_shim::unlock_global_lock(fx);
+ fx.bcx.ins().fence();
};
_ if intrinsic.starts_with("atomic_singlethreadfence"), () {
- crate::atomic_shim::lock_global_lock(fx);
- crate::atomic_shim::unlock_global_lock(fx);
+ // FIXME use a compiler fence once Cranelift supports it
+ fx.bcx.ins().fence();
};
- _ if intrinsic.starts_with("atomic_load"), (c ptr) {
- crate::atomic_shim::lock_global_lock(fx);
+ _ if intrinsic.starts_with("atomic_load"), <T> (v ptr) {
+ validate_atomic_type!(fx, intrinsic, span, T);
+ let ty = fx.clif_type(T).unwrap();
- let inner_layout =
- fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
- validate_atomic_type!(fx, intrinsic, span, inner_layout.ty);
- let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+ let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
+
+ let val = CValue::by_val(val, fx.layout_of(T));
ret.write_cvalue(fx, val);
-
- crate::atomic_shim::unlock_global_lock(fx);
};
_ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
- crate::atomic_shim::lock_global_lock(fx);
+ let val = val.load_scalar(fx);
- let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
- dest.write_cvalue(fx, val);
-
- crate::atomic_shim::unlock_global_lock(fx);
+ fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
};
- _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, T);
+ _ if intrinsic.starts_with("atomic_xchg"), (v ptr, c new) {
+ let layout = new.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
- crate::atomic_shim::lock_global_lock(fx);
+ let new = new.load_scalar(fx);
- // Read old
- let clif_ty = fx.clif_type(T).unwrap();
- let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
- ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
- // Write new
- let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
- dest.write_cvalue(fx, src);
-
- crate::atomic_shim::unlock_global_lock(fx);
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
- validate_atomic_type!(fx, intrinsic, span, T);
+ _ if intrinsic.starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
+ let layout = new.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
let test_old = test_old.load_scalar(fx);
let new = new.load_scalar(fx);
- crate::atomic_shim::lock_global_lock(fx);
-
- // Read old
- let clif_ty = fx.clif_type(T).unwrap();
- let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
-
- // Compare
+ let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
- let new = fx.bcx.ins().select(is_eq, new, old); // Keep old if not equal to test_old
-
- // Write new
- fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
- ret.write_cvalue(fx, ret_val);
-
- crate::atomic_shim::unlock_global_lock(fx);
+ ret.write_cvalue(fx, ret_val)
};
- _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, c amount) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ _ if intrinsic.starts_with("atomic_xadd"), (v ptr, c amount) {
+ let layout = amount.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
let amount = amount.load_scalar(fx);
- atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, c amount) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ _ if intrinsic.starts_with("atomic_xsub"), (v ptr, c amount) {
+ let layout = amount.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
let amount = amount.load_scalar(fx);
- atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_and"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
- let src = src.load_scalar(fx);
- atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
- };
- _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, T);
+ _ if intrinsic.starts_with("atomic_and"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
let src = src.load_scalar(fx);
- crate::atomic_shim::lock_global_lock(fx);
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
- let clif_ty = fx.clif_type(T).unwrap();
- let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
- let and = fx.bcx.ins().band(old, src);
- let new = fx.bcx.ins().bnot(and);
- fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
- ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+ _ if intrinsic.starts_with("atomic_or"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
- crate::atomic_shim::unlock_global_lock(fx);
- };
- _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
let src = src.load_scalar(fx);
- atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ _ if intrinsic.starts_with("atomic_xor"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
let src = src.load_scalar(fx);
- atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_max"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ // FIXME https://github.com/bytecodealliance/wasmtime/issues/2647
+ _ if intrinsic.starts_with("atomic_nand"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
let src = src.load_scalar(fx);
- atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ _ if intrinsic.starts_with("atomic_max"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
let src = src.load_scalar(fx);
- atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_min"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ _ if intrinsic.starts_with("atomic_umax"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
let src = src.load_scalar(fx);
- atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ _ if intrinsic.starts_with("atomic_min"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
let src = src.load_scalar(fx);
- atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+ _ if intrinsic.starts_with("atomic_umin"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
minnumf32, (v a, v b) {
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
index e0eb5c5..86df71a 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -4,7 +4,7 @@
use crate::prelude::*;
pub(super) fn codegen_simd_intrinsic_call<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
instance: Instance<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
@@ -85,8 +85,8 @@
use rustc_middle::mir::interpret::*;
let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const");
- let idx_bytes = match idx_const.val {
- ty::ConstKind::Value(ConstValue::ByRef { alloc, offset }) => {
+ let idx_bytes = match idx_const {
+ ConstValue::ByRef { alloc, offset } => {
let ptr = Pointer::new(AllocId(0 /* dummy */), offset);
let size = Size::from_bytes(4 * u64::from(ret_lane_count) /* size_of([u32; ret_lane_count]) */);
alloc.get_bytes(fx, ptr, size).unwrap()
@@ -130,7 +130,7 @@
);
};
- let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
if idx >= lane_count.into() {
fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
@@ -159,7 +159,7 @@
return;
};
- let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
if idx >= lane_count.into() {
fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
@@ -276,5 +276,6 @@
// simd_bitmask
// simd_select
// simd_rem
+ // simd_neg
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
index 1707504..8edb883 100644
--- a/compiler/rustc_codegen_cranelift/src/lib.rs
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -5,15 +5,13 @@
associated_type_bounds,
never_type,
try_blocks,
- hash_drain_filter,
- str_split_once
+ box_patterns,
+ hash_drain_filter
)]
#![warn(rust_2018_idioms)]
#![warn(unused_lifetimes)]
#![warn(unreachable_pub)]
-#[cfg(feature = "jit")]
-extern crate libc;
extern crate snap;
#[macro_use]
extern crate rustc_middle;
@@ -54,7 +52,6 @@
mod allocator;
mod analyze;
mod archive;
-mod atomic_shim;
mod backend;
mod base;
mod cast;
@@ -130,9 +127,9 @@
}
}
-struct CodegenCx<'tcx, M: Module> {
+struct CodegenCx<'m, 'tcx: 'm> {
tcx: TyCtxt<'tcx>,
- module: M,
+ module: &'m mut dyn Module,
global_asm: String,
constants_cx: ConstantCx,
cached_context: Context,
@@ -141,14 +138,20 @@
unwind_context: UnwindContext<'tcx>,
}
-impl<'tcx, M: Module> CodegenCx<'tcx, M> {
- fn new(tcx: TyCtxt<'tcx>, module: M, debug_info: bool, pic_eh_frame: bool) -> Self {
- let unwind_context = UnwindContext::new(tcx, module.isa(), pic_eh_frame);
- let debug_context = if debug_info {
- Some(DebugContext::new(tcx, module.isa()))
- } else {
- None
- };
+impl<'m, 'tcx> CodegenCx<'m, 'tcx> {
+ fn new(
+ tcx: TyCtxt<'tcx>,
+ backend_config: BackendConfig,
+ module: &'m mut dyn Module,
+ debug_info: bool,
+ ) -> Self {
+ let unwind_context = UnwindContext::new(
+ tcx,
+ module.isa(),
+ matches!(backend_config.codegen_mode, CodegenMode::Aot),
+ );
+ let debug_context =
+ if debug_info { Some(DebugContext::new(tcx, module.isa())) } else { None };
CodegenCx {
tcx,
module,
@@ -161,14 +164,9 @@
}
}
- fn finalize(mut self) -> (M, String, Option<DebugContext<'tcx>>, UnwindContext<'tcx>) {
- self.constants_cx.finalize(self.tcx, &mut self.module);
- (
- self.module,
- self.global_asm,
- self.debug_context,
- self.unwind_context,
- )
+ fn finalize(self) -> (String, Option<DebugContext<'tcx>>, UnwindContext<'tcx>) {
+ self.constants_cx.finalize(self.tcx, self.module);
+ (self.global_asm, self.debug_context, self.unwind_context)
}
}
@@ -303,14 +301,7 @@
flags_builder.enable("is_pic").unwrap();
flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
flags_builder
- .set(
- "enable_verifier",
- if cfg!(debug_assertions) {
- "true"
- } else {
- "false"
- },
- )
+ .set("enable_verifier", if cfg!(debug_assertions) { "true" } else { "false" })
.unwrap();
let tls_model = match target_triple.binary_format {
@@ -339,11 +330,7 @@
let flags = settings::Flags::new(flags_builder);
- let variant = if cfg!(feature = "oldbe") {
- cranelift_codegen::isa::BackendVariant::Legacy
- } else {
- cranelift_codegen::isa::BackendVariant::MachInst
- };
+ let variant = cranelift_codegen::isa::BackendVariant::MachInst;
let mut isa_builder = cranelift_codegen::isa::lookup_variant(target_triple, variant).unwrap();
// Don't use "haswell", as it implies `has_lzcnt`.macOS CI is still at Ivy Bridge EP, so `lzcnt`
// is interpreted as `bsr`.
diff --git a/compiler/rustc_codegen_cranelift/src/main_shim.rs b/compiler/rustc_codegen_cranelift/src/main_shim.rs
index b193cea..62e551b 100644
--- a/compiler/rustc_codegen_cranelift/src/main_shim.rs
+++ b/compiler/rustc_codegen_cranelift/src/main_shim.rs
@@ -9,7 +9,6 @@
tcx: TyCtxt<'_>,
module: &mut impl Module,
unwind_context: &mut UnwindContext<'_>,
- use_jit: bool,
) {
let (main_def_id, use_start_lang_item) = match tcx.entry_fn(LOCAL_CRATE) {
Some((def_id, entry_ty)) => (
@@ -27,14 +26,7 @@
return;
}
- create_entry_fn(
- tcx,
- module,
- unwind_context,
- main_def_id,
- use_start_lang_item,
- use_jit,
- );
+ create_entry_fn(tcx, module, unwind_context, main_def_id, use_start_lang_item);
fn create_entry_fn(
tcx: TyCtxt<'_>,
@@ -42,7 +34,6 @@
unwind_context: &mut UnwindContext<'_>,
rust_main_def_id: DefId,
use_start_lang_item: bool,
- use_jit: bool,
) {
let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
// Given that `main()` has no arguments,
@@ -57,23 +48,17 @@
AbiParam::new(m.target_config().pointer_type()),
AbiParam::new(m.target_config().pointer_type()),
],
- returns: vec![AbiParam::new(
- m.target_config().pointer_type(), /*isize*/
- )],
+ returns: vec![AbiParam::new(m.target_config().pointer_type() /*isize*/)],
call_conv: CallConv::triple_default(m.isa().triple()),
};
- let cmain_func_id = m
- .declare_function("main", Linkage::Export, &cmain_sig)
- .unwrap();
+ let cmain_func_id = m.declare_function("main", Linkage::Export, &cmain_sig).unwrap();
let instance = Instance::mono(tcx, rust_main_def_id).polymorphize(tcx);
let main_name = tcx.symbol_name(instance).name.to_string();
let main_sig = get_function_sig(tcx, m.isa().triple(), instance);
- let main_func_id = m
- .declare_function(&main_name, Linkage::Import, &main_sig)
- .unwrap();
+ let main_func_id = m.declare_function(&main_name, Linkage::Import, &main_sig).unwrap();
let mut ctx = Context::new();
ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig);
@@ -86,8 +71,6 @@
let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
- crate::atomic_shim::init_global_lock(m, &mut bcx, use_jit);
-
let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
let call_inst = if use_start_lang_item {
@@ -103,9 +86,7 @@
.polymorphize(tcx);
let start_func_id = import_function(tcx, m, start_instance);
- let main_val = bcx
- .ins()
- .func_addr(m.target_config().pointer_type(), main_func_ref);
+ let main_val = bcx.ins().func_addr(m.target_config().pointer_type(), main_func_ref);
let func_ref = m.declare_func_in_func(start_func_id, &mut bcx.func);
bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv])
diff --git a/compiler/rustc_codegen_cranelift/src/metadata.rs b/compiler/rustc_codegen_cranelift/src/metadata.rs
index 2e3b9fb..190c4f4 100644
--- a/compiler/rustc_codegen_cranelift/src/metadata.rs
+++ b/compiler/rustc_codegen_cranelift/src/metadata.rs
@@ -94,9 +94,7 @@
assert!(kind == MetadataKind::Compressed);
let mut compressed = tcx.metadata_encoding_version();
- FrameEncoder::new(&mut compressed)
- .write_all(&metadata.raw_data)
- .unwrap();
+ FrameEncoder::new(&mut compressed).write_all(&metadata.raw_data).unwrap();
product.add_rustc_section(
rustc_middle::middle::exported_symbols::metadata_symbol_name(tcx),
diff --git a/compiler/rustc_codegen_cranelift/src/num.rs b/compiler/rustc_codegen_cranelift/src/num.rs
index d1d2b3b..da49e1c 100644
--- a/compiler/rustc_codegen_cranelift/src/num.rs
+++ b/compiler/rustc_codegen_cranelift/src/num.rs
@@ -41,7 +41,7 @@
}
fn codegen_compare_bin_op<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
signed: bool,
lhs: Value,
@@ -54,7 +54,7 @@
}
pub(crate) fn codegen_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
@@ -93,17 +93,12 @@
ty::Uint(_) | ty::Int(_) => crate::num::codegen_int_binop(fx, bin_op, in_lhs, in_rhs),
ty::Float(_) => crate::num::codegen_float_binop(fx, bin_op, in_lhs, in_rhs),
ty::RawPtr(..) | ty::FnPtr(..) => crate::num::codegen_ptr_binop(fx, bin_op, in_lhs, in_rhs),
- _ => unreachable!(
- "{:?}({:?}, {:?})",
- bin_op,
- in_lhs.layout().ty,
- in_rhs.layout().ty
- ),
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
}
}
pub(crate) fn codegen_bool_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
@@ -124,7 +119,7 @@
}
pub(crate) fn codegen_int_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
@@ -185,19 +180,14 @@
}
}
// Compare binops handles by `codegen_binop`.
- _ => unreachable!(
- "{:?}({:?}, {:?})",
- bin_op,
- in_lhs.layout().ty,
- in_rhs.layout().ty
- ),
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
};
CValue::by_val(val, in_lhs.layout())
}
pub(crate) fn codegen_checked_int_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
@@ -268,9 +258,7 @@
let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
let val = fx.bcx.ins().imul(lhs, rhs);
let has_underflow =
- fx.bcx
- .ins()
- .icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
+ fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
let has_overflow = fx.bcx.ins().icmp_imm(
IntCC::SignedGreaterThan,
val,
@@ -309,10 +297,7 @@
let val = fx.bcx.ins().ishl(lhs, actual_shift);
let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1;
- let has_overflow = fx
- .bcx
- .ins()
- .icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
(val, has_overflow)
}
BinOp::Shr => {
@@ -326,38 +311,20 @@
};
let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1;
- let has_overflow = fx
- .bcx
- .ins()
- .icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
(val, has_overflow)
}
- _ => bug!(
- "binop {:?} on checked int/uint lhs: {:?} rhs: {:?}",
- bin_op,
- in_lhs,
- in_rhs
- ),
+ _ => bug!("binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs),
};
let has_overflow = fx.bcx.ins().bint(types::I8, has_overflow);
- // FIXME directly write to result place instead
- let out_place = CPlace::new_stack_slot(
- fx,
- fx.layout_of(
- fx.tcx
- .mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()),
- ),
- );
- let out_layout = out_place.layout();
- out_place.write_cvalue(fx, CValue::by_val_pair(res, has_overflow, out_layout));
-
- out_place.to_cvalue(fx)
+ let out_layout = fx.layout_of(fx.tcx.mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()));
+ CValue::by_val_pair(res, has_overflow, out_layout)
}
pub(crate) fn codegen_float_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
@@ -402,7 +369,7 @@
}
pub(crate) fn codegen_ptr_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
@@ -452,9 +419,7 @@
let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
let ptr_cmp =
- fx.bcx
- .ins()
- .icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
+ fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
let extra_cmp = fx.bcx.ins().icmp(
bin_op_to_intcc(bin_op, false).unwrap(),
lhs_extra,
@@ -466,9 +431,6 @@
_ => panic!("bin_op {:?} on ptr", bin_op),
};
- CValue::by_val(
- fx.bcx.ins().bint(types::I8, res),
- fx.layout_of(fx.tcx.types.bool),
- )
+ CValue::by_val(fx.bcx.ins().bint(types::I8, res), fx.layout_of(fx.tcx.types.bool))
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/code_layout.rs b/compiler/rustc_codegen_cranelift/src/optimize/code_layout.rs
index f027320..ca9ff15 100644
--- a/compiler/rustc_codegen_cranelift/src/optimize/code_layout.rs
+++ b/compiler/rustc_codegen_cranelift/src/optimize/code_layout.rs
@@ -15,10 +15,7 @@
// bytecodealliance/cranelift#1339 is implemented.
let mut block_insts = FxHashMap::default();
- for block in cold_blocks
- .keys()
- .filter(|&block| cold_blocks.contains(block))
- {
+ for block in cold_blocks.keys().filter(|&block| cold_blocks.contains(block)) {
let insts = ctx.func.layout.block_insts(block).collect::<Vec<_>>();
for &inst in &insts {
ctx.func.layout.remove_inst(inst);
@@ -28,10 +25,7 @@
}
// And then append them at the back again.
- for block in cold_blocks
- .keys()
- .filter(|&block| cold_blocks.contains(block))
- {
+ for block in cold_blocks.keys().filter(|&block| cold_blocks.contains(block)) {
ctx.func.layout.append_block(block);
for inst in block_insts.remove(&block).unwrap() {
ctx.func.layout.append_inst(inst, block);
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/mod.rs b/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
index 3ce7f8c..389f50e 100644
--- a/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
@@ -19,7 +19,12 @@
if tcx.sess.opts.optimize == rustc_session::config::OptLevel::No {
return; // FIXME classify optimizations over opt levels
}
- self::stack2reg::optimize_function(ctx, clif_comments);
+
+ // FIXME(#1142) stack2reg miscompiles lewton
+ if false {
+ self::stack2reg::optimize_function(ctx, clif_comments);
+ }
+
crate::pretty_clif::write_clif_file(tcx, "stack2reg", None, instance, &ctx, &*clif_comments);
crate::base::verify_func(tcx, &*clif_comments, &ctx.func);
}
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs b/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
index a575ed8..b95e2d7 100644
--- a/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
+++ b/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
@@ -10,10 +10,7 @@
pub(crate) fn maybe_unwrap_bint(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
match bcx.func.dfg[arg_inst] {
- InstructionData::Unary {
- opcode: Opcode::Bint,
- arg,
- } => arg,
+ InstructionData::Unary { opcode: Opcode::Bint, arg } => arg,
_ => arg,
}
} else {
@@ -54,12 +51,7 @@
match bcx.func.dfg[arg_inst] {
// This is the lowering of Rvalue::Not
- InstructionData::Load {
- opcode: Opcode::Load,
- arg: ptr,
- flags,
- offset,
- } => {
+ InstructionData::Load { opcode: Opcode::Load, arg: ptr, flags, offset } => {
// Using `load.i8 + uextend.i32` would legalize to `uload8 + ireduce.i8 +
// uextend.i32`. Just `uload8` is much faster.
match bcx.func.dfg.ctrl_typevar(arg_inst) {
@@ -95,20 +87,14 @@
};
match bcx.func.dfg[arg_inst] {
- InstructionData::UnaryBool {
- opcode: Opcode::Bconst,
- imm,
- } => {
+ InstructionData::UnaryBool { opcode: Opcode::Bconst, imm } => {
if test_zero {
Some(!imm)
} else {
Some(imm)
}
}
- InstructionData::UnaryImm {
- opcode: Opcode::Iconst,
- imm,
- } => {
+ InstructionData::UnaryImm { opcode: Opcode::Iconst, imm } => {
if test_zero {
Some(imm.bits() == 0)
} else {
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/stack2reg.rs b/compiler/rustc_codegen_cranelift/src/optimize/stack2reg.rs
index 3c939d5..d111f37 100644
--- a/compiler/rustc_codegen_cranelift/src/optimize/stack2reg.rs
+++ b/compiler/rustc_codegen_cranelift/src/optimize/stack2reg.rs
@@ -175,16 +175,14 @@
}
}
- OptimizeContext {
- ctx,
- stack_slot_usage_map,
- }
+ OptimizeContext { ctx, stack_slot_usage_map }
}
}
pub(super) fn optimize_function(
ctx: &mut Context,
- #[cfg_attr(not(debug_assertions), allow(unused_variables))] clif_comments: &mut crate::pretty_clif::CommentWriter,
+ #[cfg_attr(not(debug_assertions), allow(unused_variables))]
+ clif_comments: &mut crate::pretty_clif::CommentWriter,
) {
combine_stack_addr_with_load_store(&mut ctx.func);
@@ -296,12 +294,7 @@
while let Some(_block) = cursor.next_block() {
while let Some(inst) = cursor.next_inst() {
match cursor.func.dfg[inst] {
- InstructionData::Load {
- opcode: Opcode::Load,
- arg: addr,
- flags: _,
- offset,
- } => {
+ InstructionData::Load { opcode: Opcode::Load, arg: addr, flags: _, offset } => {
if cursor.func.dfg.ctrl_typevar(inst) == types::I128
|| cursor.func.dfg.ctrl_typevar(inst).is_vector()
{
@@ -391,20 +384,14 @@
stack_slot_users
.stack_addr
.drain_filter(|inst| {
- stack_addr_load_insts_users
- .get(inst)
- .map(|users| users.is_empty())
- .unwrap_or(true)
+ stack_addr_load_insts_users.get(inst).map(|users| users.is_empty()).unwrap_or(true)
})
.for_each(|inst| StackSlotUsage::remove_unused_stack_addr(&mut func, inst));
stack_slot_users
.stack_load
.drain_filter(|inst| {
- stack_addr_load_insts_users
- .get(inst)
- .map(|users| users.is_empty())
- .unwrap_or(true)
+ stack_addr_load_insts_users.get(inst).map(|users| users.is_empty()).unwrap_or(true)
})
.for_each(|inst| StackSlotUsage::remove_unused_load(&mut func, inst));
}
@@ -415,11 +402,8 @@
addr: Value,
) -> Option<(StackSlot, Offset32)> {
if let ValueDef::Result(addr_inst, 0) = func.dfg.value_def(addr) {
- if let InstructionData::StackLoad {
- opcode: Opcode::StackAddr,
- stack_slot,
- offset,
- } = func.dfg[addr_inst]
+ if let InstructionData::StackLoad { opcode: Opcode::StackAddr, stack_slot, offset } =
+ func.dfg[addr_inst]
{
return Some((stack_slot, offset));
}
@@ -437,16 +421,8 @@
fn spatial_overlap(func: &Function, src: Inst, dest: Inst) -> SpatialOverlap {
fn inst_info(func: &Function, inst: Inst) -> (StackSlot, Offset32, u32) {
match func.dfg[inst] {
- InstructionData::StackLoad {
- opcode: Opcode::StackAddr,
- stack_slot,
- offset,
- }
- | InstructionData::StackLoad {
- opcode: Opcode::StackLoad,
- stack_slot,
- offset,
- }
+ InstructionData::StackLoad { opcode: Opcode::StackAddr, stack_slot, offset }
+ | InstructionData::StackLoad { opcode: Opcode::StackLoad, stack_slot, offset }
| InstructionData::StackStore {
opcode: Opcode::StackStore,
stack_slot,
@@ -471,10 +447,7 @@
}
let src_end: i64 = src_offset.try_add_i64(i64::from(src_size)).unwrap().into();
- let dest_end: i64 = dest_offset
- .try_add_i64(i64::from(dest_size))
- .unwrap()
- .into();
+ let dest_end: i64 = dest_offset.try_add_i64(i64::from(dest_size)).unwrap().into();
if src_end <= dest_offset.into() || dest_end <= src_offset.into() {
return SpatialOverlap::No;
}
diff --git a/compiler/rustc_codegen_cranelift/src/pointer.rs b/compiler/rustc_codegen_cranelift/src/pointer.rs
index b2036d7b..88a78f3 100644
--- a/compiler/rustc_codegen_cranelift/src/pointer.rs
+++ b/compiler/rustc_codegen_cranelift/src/pointer.rs
@@ -23,35 +23,20 @@
impl Pointer {
pub(crate) fn new(addr: Value) -> Self {
- Pointer {
- base: PointerBase::Addr(addr),
- offset: Offset32::new(0),
- }
+ Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
}
pub(crate) fn stack_slot(stack_slot: StackSlot) -> Self {
- Pointer {
- base: PointerBase::Stack(stack_slot),
- offset: Offset32::new(0),
- }
+ Pointer { base: PointerBase::Stack(stack_slot), offset: Offset32::new(0) }
}
- pub(crate) fn const_addr<'a, 'tcx>(
- fx: &mut FunctionCx<'a, 'tcx, impl Module>,
- addr: i64,
- ) -> Self {
+ pub(crate) fn const_addr(fx: &mut FunctionCx<'_, '_, '_>, addr: i64) -> Self {
let addr = fx.bcx.ins().iconst(fx.pointer_type, addr);
- Pointer {
- base: PointerBase::Addr(addr),
- offset: Offset32::new(0),
- }
+ Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
}
pub(crate) fn dangling(align: Align) -> Self {
- Pointer {
- base: PointerBase::Dangling(align),
- offset: Offset32::new(0),
- }
+ Pointer { base: PointerBase::Dangling(align), offset: Offset32::new(0) }
}
#[cfg(debug_assertions)]
@@ -59,46 +44,28 @@
(self.base, self.offset)
}
- pub(crate) fn get_addr<'a, 'tcx>(self, fx: &mut FunctionCx<'a, 'tcx, impl Module>) -> Value {
+ pub(crate) fn get_addr(self, fx: &mut FunctionCx<'_, '_, '_>) -> Value {
match self.base {
PointerBase::Addr(base_addr) => {
let offset: i64 = self.offset.into();
- if offset == 0 {
- base_addr
- } else {
- fx.bcx.ins().iadd_imm(base_addr, offset)
- }
+ if offset == 0 { base_addr } else { fx.bcx.ins().iadd_imm(base_addr, offset) }
}
PointerBase::Stack(stack_slot) => {
- fx.bcx
- .ins()
- .stack_addr(fx.pointer_type, stack_slot, self.offset)
+ fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset)
}
- PointerBase::Dangling(align) => fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()),
+ PointerBase::Dangling(align) => {
+ fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap())
+ }
}
}
- pub(crate) fn offset<'a, 'tcx>(
- self,
- fx: &mut FunctionCx<'a, 'tcx, impl Module>,
- extra_offset: Offset32,
- ) -> Self {
+ pub(crate) fn offset(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: Offset32) -> Self {
self.offset_i64(fx, extra_offset.into())
}
- pub(crate) fn offset_i64<'a, 'tcx>(
- self,
- fx: &mut FunctionCx<'a, 'tcx, impl Module>,
- extra_offset: i64,
- ) -> Self {
+ pub(crate) fn offset_i64(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: i64) -> Self {
if let Some(new_offset) = self.offset.try_add_i64(extra_offset) {
- Pointer {
- base: self.base,
- offset: new_offset,
- }
+ Pointer { base: self.base, offset: new_offset }
} else {
let base_offset: i64 = self.offset.into();
if let Some(new_offset) = base_offset.checked_add(extra_offset) {
@@ -107,16 +74,12 @@
PointerBase::Stack(stack_slot) => {
fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0)
}
- PointerBase::Dangling(align) => fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()),
+ PointerBase::Dangling(align) => {
+ fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap())
+ }
};
let addr = fx.bcx.ins().iadd_imm(base_addr, new_offset);
- Pointer {
- base: PointerBase::Addr(addr),
- offset: Offset32::new(0),
- }
+ Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
} else {
panic!(
"self.offset ({}) + extra_offset ({}) not representable in i64",
@@ -126,31 +89,22 @@
}
}
- pub(crate) fn offset_value<'a, 'tcx>(
- self,
- fx: &mut FunctionCx<'a, 'tcx, impl Module>,
- extra_offset: Value,
- ) -> Self {
+ pub(crate) fn offset_value(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: Value) -> Self {
match self.base {
PointerBase::Addr(addr) => Pointer {
base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
offset: self.offset,
},
PointerBase::Stack(stack_slot) => {
- let base_addr = fx
- .bcx
- .ins()
- .stack_addr(fx.pointer_type, stack_slot, self.offset);
+ let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset);
Pointer {
base: PointerBase::Addr(fx.bcx.ins().iadd(base_addr, extra_offset)),
offset: Offset32::new(0),
}
}
PointerBase::Dangling(align) => {
- let addr = fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap());
+ let addr =
+ fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap());
Pointer {
base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
offset: self.offset,
@@ -159,46 +113,21 @@
}
}
- pub(crate) fn load<'a, 'tcx>(
- self,
- fx: &mut FunctionCx<'a, 'tcx, impl Module>,
- ty: Type,
- flags: MemFlags,
- ) -> Value {
+ pub(crate) fn load(self, fx: &mut FunctionCx<'_, '_, '_>, ty: Type, flags: MemFlags) -> Value {
match self.base {
PointerBase::Addr(base_addr) => fx.bcx.ins().load(ty, flags, base_addr, self.offset),
- PointerBase::Stack(stack_slot) => {
- if ty == types::I128 || ty.is_vector() {
- // WORKAROUND for stack_load.i128 and stack_load.iXxY not being implemented
- let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
- fx.bcx.ins().load(ty, flags, base_addr, self.offset)
- } else {
- fx.bcx.ins().stack_load(ty, stack_slot, self.offset)
- }
- }
+ PointerBase::Stack(stack_slot) => fx.bcx.ins().stack_load(ty, stack_slot, self.offset),
PointerBase::Dangling(_align) => unreachable!(),
}
}
- pub(crate) fn store<'a, 'tcx>(
- self,
- fx: &mut FunctionCx<'a, 'tcx, impl Module>,
- value: Value,
- flags: MemFlags,
- ) {
+ pub(crate) fn store(self, fx: &mut FunctionCx<'_, '_, '_>, value: Value, flags: MemFlags) {
match self.base {
PointerBase::Addr(base_addr) => {
fx.bcx.ins().store(flags, value, base_addr, self.offset);
}
PointerBase::Stack(stack_slot) => {
- let val_ty = fx.bcx.func.dfg.value_type(value);
- if val_ty == types::I128 || val_ty.is_vector() {
- // WORKAROUND for stack_store.i128 and stack_store.iXxY not being implemented
- let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
- fx.bcx.ins().store(flags, value, base_addr, self.offset);
- } else {
- fx.bcx.ins().stack_store(value, stack_slot, self.offset);
- }
+ fx.bcx.ins().stack_store(value, stack_slot, self.offset);
}
PointerBase::Dangling(_align) => unreachable!(),
}
diff --git a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
index f4a15ab..9c91b92 100644
--- a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
+++ b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
@@ -79,20 +79,14 @@
vec![
format!("symbol {}", tcx.symbol_name(instance).name),
format!("instance {:?}", instance),
- format!(
- "abi {:?}",
- FnAbi::of_instance(&RevealAllLayoutCx(tcx), instance, &[])
- ),
+ format!("abi {:?}", FnAbi::of_instance(&RevealAllLayoutCx(tcx), instance, &[])),
String::new(),
]
} else {
vec![]
};
- CommentWriter {
- global_comments,
- entity_comments: FxHashMap::default(),
- }
+ CommentWriter { global_comments, entity_comments: FxHashMap::default() }
}
}
@@ -186,7 +180,7 @@
}
#[cfg(debug_assertions)]
-impl<M: Module> FunctionCx<'_, '_, M> {
+impl FunctionCx<'_, '_, '_> {
pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
self.clif_comments.add_global_comment(comment);
}
@@ -201,12 +195,7 @@
}
pub(crate) fn should_write_ir(tcx: TyCtxt<'_>) -> bool {
- cfg!(debug_assertions)
- || tcx
- .sess
- .opts
- .output_types
- .contains_key(&OutputType::LlvmAssembly)
+ tcx.sess.opts.output_types.contains_key(&OutputType::LlvmAssembly)
}
pub(crate) fn write_ir_file<'tcx>(
@@ -245,40 +234,33 @@
context: &cranelift_codegen::Context,
mut clif_comments: &CommentWriter,
) {
- write_ir_file(
- tcx,
- &format!("{}.{}.clif", tcx.symbol_name(instance).name, postfix),
- |file| {
- let value_ranges = isa.map(|isa| {
- context
- .build_value_labels_ranges(isa)
- .expect("value location ranges")
- });
+ write_ir_file(tcx, &format!("{}.{}.clif", tcx.symbol_name(instance).name, postfix), |file| {
+ let value_ranges =
+ isa.map(|isa| context.build_value_labels_ranges(isa).expect("value location ranges"));
- let mut clif = String::new();
- cranelift_codegen::write::decorate_function(
- &mut clif_comments,
- &mut clif,
- &context.func,
- &DisplayFunctionAnnotations {
- isa: Some(&*crate::build_isa(tcx.sess)),
- value_ranges: value_ranges.as_ref(),
- },
- )
- .unwrap();
+ let mut clif = String::new();
+ cranelift_codegen::write::decorate_function(
+ &mut clif_comments,
+ &mut clif,
+ &context.func,
+ &DisplayFunctionAnnotations {
+ isa: Some(&*crate::build_isa(tcx.sess)),
+ value_ranges: value_ranges.as_ref(),
+ },
+ )
+ .unwrap();
- writeln!(file, "test compile")?;
- writeln!(file, "set is_pic")?;
- writeln!(file, "set enable_simd")?;
- writeln!(file, "target {} haswell", crate::target_triple(tcx.sess))?;
- writeln!(file)?;
- file.write_all(clif.as_bytes())?;
- Ok(())
- },
- );
+ writeln!(file, "test compile")?;
+ writeln!(file, "set is_pic")?;
+ writeln!(file, "set enable_simd")?;
+ writeln!(file, "target {} haswell", crate::target_triple(tcx.sess))?;
+ writeln!(file)?;
+ file.write_all(clif.as_bytes())?;
+ Ok(())
+ });
}
-impl<M: Module> fmt::Debug for FunctionCx<'_, '_, M> {
+impl fmt::Debug for FunctionCx<'_, '_, '_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "{:?}", self.instance.substs)?;
writeln!(f, "{:?}", self.local_map)?;
diff --git a/compiler/rustc_codegen_cranelift/src/toolchain.rs b/compiler/rustc_codegen_cranelift/src/toolchain.rs
index 735c59d..484a9b6 100644
--- a/compiler/rustc_codegen_cranelift/src/toolchain.rs
+++ b/compiler/rustc_codegen_cranelift/src/toolchain.rs
@@ -71,12 +71,9 @@
flavor,
)),
(Some(linker), None) => {
- let stem = linker
- .file_stem()
- .and_then(|stem| stem.to_str())
- .unwrap_or_else(|| {
- sess.fatal("couldn't extract file stem from specified linker")
- });
+ let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| {
+ sess.fatal("couldn't extract file stem from specified linker")
+ });
let flavor = if stem == "emcc" {
LinkerFlavor::Em
@@ -105,11 +102,7 @@
// linker and linker flavor specified via command line have precedence over what the target
// specification specifies
- if let Some(ret) = infer_from(
- sess,
- sess.opts.cg.linker.clone(),
- sess.opts.cg.linker_flavor,
- ) {
+ if let Some(ret) = infer_from(sess, sess.opts.cg.linker.clone(), sess.opts.cg.linker_flavor) {
return ret;
}
diff --git a/compiler/rustc_codegen_cranelift/src/trap.rs b/compiler/rustc_codegen_cranelift/src/trap.rs
index 67495c7..bb63d72 100644
--- a/compiler/rustc_codegen_cranelift/src/trap.rs
+++ b/compiler/rustc_codegen_cranelift/src/trap.rs
@@ -2,7 +2,7 @@
use crate::prelude::*;
-fn codegen_print(fx: &mut FunctionCx<'_, '_, impl Module>, msg: &str) {
+fn codegen_print(fx: &mut FunctionCx<'_, '_, '_>, msg: &str) {
let puts = fx
.cx
.module
@@ -29,7 +29,7 @@
}
/// Trap code: user1
-pub(crate) fn trap_abort(fx: &mut FunctionCx<'_, '_, impl Module>, msg: impl AsRef<str>) {
+pub(crate) fn trap_abort(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
codegen_print(fx, msg.as_ref());
fx.bcx.ins().trap(TrapCode::User(1));
}
@@ -38,7 +38,7 @@
/// so you can **not** add instructions to it afterwards.
///
/// Trap code: user65535
-pub(crate) fn trap_unreachable(fx: &mut FunctionCx<'_, '_, impl Module>, msg: impl AsRef<str>) {
+pub(crate) fn trap_unreachable(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
codegen_print(fx, msg.as_ref());
fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
@@ -47,7 +47,7 @@
///
/// Trap code: user65535
pub(crate) fn trap_unreachable_ret_value<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
dest_layout: TyAndLayout<'tcx>,
msg: impl AsRef<str>,
) -> CValue<'tcx> {
@@ -62,7 +62,7 @@
/// to it afterwards.
///
/// Trap code: user65535
-pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, impl Module>, msg: impl AsRef<str>) {
+pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
codegen_print(fx, msg.as_ref());
let true_ = fx.bcx.ins().iconst(types::I32, 1);
fx.bcx.ins().trapnz(true_, TrapCode::User(!0));
@@ -72,7 +72,7 @@
///
/// Trap code: user65535
pub(crate) fn trap_unimplemented_ret_value<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
dest_layout: TyAndLayout<'tcx>,
msg: impl AsRef<str>,
) -> CValue<'tcx> {
diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs
index c77ff5d..042583c 100644
--- a/compiler/rustc_codegen_cranelift/src/unsize.rs
+++ b/compiler/rustc_codegen_cranelift/src/unsize.rs
@@ -13,19 +13,18 @@
/// in an upcast, where the new vtable for an object will be derived
/// from the old one.
pub(crate) fn unsized_info<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
source: Ty<'tcx>,
target: Ty<'tcx>,
old_info: Option<Value>,
) -> Value {
let (source, target) =
- fx.tcx
- .struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
+ fx.tcx.struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
match (&source.kind(), &target.kind()) {
- (&ty::Array(_, len), &ty::Slice(_)) => fx.bcx.ins().iconst(
- fx.pointer_type,
- len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64,
- ),
+ (&ty::Array(_, len), &ty::Slice(_)) => fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64),
(&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
@@ -35,17 +34,13 @@
(_, &ty::Dynamic(ref data, ..)) => {
crate::vtable::get_vtable(fx, fx.layout_of(source), data.principal())
}
- _ => bug!(
- "unsized_info: invalid unsizing {:?} -> {:?}",
- source,
- target
- ),
+ _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
}
}
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
fn unsize_thin_ptr<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
src: Value,
src_layout: TyAndLayout<'tcx>,
dst_layout: TyAndLayout<'tcx>,
@@ -89,24 +84,22 @@
/// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst`
pub(crate) fn coerce_unsized_into<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
src: CValue<'tcx>,
dst: CPlace<'tcx>,
) {
let src_ty = src.layout().ty;
let dst_ty = dst.layout().ty;
let mut coerce_ptr = || {
- let (base, info) = if fx
- .layout_of(src.layout().ty.builtin_deref(true).unwrap().ty)
- .is_unsized()
- {
- // fat-ptr to fat-ptr unsize preserves the vtable
- // i.e., &'a fmt::Debug+Send => &'a fmt::Debug
- src.load_scalar_pair(fx)
- } else {
- let base = src.load_scalar(fx);
- unsize_thin_ptr(fx, base, src.layout(), dst.layout())
- };
+ let (base, info) =
+ if fx.layout_of(src.layout().ty.builtin_deref(true).unwrap().ty).is_unsized() {
+ // fat-ptr to fat-ptr unsize preserves the vtable
+ // i.e., &'a fmt::Debug+Send => &'a fmt::Debug
+ src.load_scalar_pair(fx)
+ } else {
+ let base = src.load_scalar(fx);
+ unsize_thin_ptr(fx, base, src.layout(), dst.layout())
+ };
dst.write_cvalue(fx, CValue::by_val_pair(base, info, dst.layout()));
};
match (&src_ty.kind(), &dst_ty.kind()) {
@@ -131,39 +124,26 @@
}
}
}
- _ => bug!(
- "coerce_unsized_into: invalid coercion {:?} -> {:?}",
- src_ty,
- dst_ty
- ),
+ _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty),
}
}
// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/glue.rs
pub(crate) fn size_and_align_of_dst<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
info: Value,
) -> (Value, Value) {
if !layout.is_unsized() {
- let size = fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, layout.size.bytes() as i64);
- let align = fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, layout.align.abi.bytes() as i64);
+ let size = fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64);
+ let align = fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64);
return (size, align);
}
match layout.ty.kind() {
ty::Dynamic(..) => {
// load size/align from vtable
- (
- crate::vtable::size_of_obj(fx, info),
- crate::vtable::min_align_of_obj(fx, info),
- )
+ (crate::vtable::size_of_obj(fx, info), crate::vtable::min_align_of_obj(fx, info))
}
ty::Slice(_) | ty::Str => {
let unit = layout.field(fx, 0);
@@ -171,9 +151,7 @@
// times the unit size.
(
fx.bcx.ins().imul_imm(info, unit.size.bytes() as i64),
- fx.bcx
- .ins()
- .iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
+ fx.bcx.ins().iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
)
}
_ => {
@@ -211,10 +189,7 @@
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
- let cmp = fx
- .bcx
- .ins()
- .icmp(IntCC::UnsignedGreaterThan, sized_align, unsized_align);
+ let cmp = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, sized_align, unsized_align);
let align = fx.bcx.ins().select(cmp, sized_align, unsized_align);
// Issue #27023: must add any necessary padding to `size`
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
index 765604e..cffaf79 100644
--- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -6,7 +6,7 @@
use cranelift_codegen::ir::immediates::Offset32;
fn codegen_field<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
base: Pointer,
extra: Option<Value>,
layout: TyAndLayout<'tcx>,
@@ -15,11 +15,8 @@
let field_offset = layout.fields.offset(field.index());
let field_layout = layout.field(&*fx, field.index());
- let simple = |fx: &mut FunctionCx<'_, '_, _>| {
- (
- base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()),
- field_layout,
- )
+ let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
+ (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
};
if let Some(extra) = extra {
@@ -58,10 +55,7 @@
a_scalar: &Scalar,
b_scalar: &Scalar,
) -> Offset32 {
- let b_offset = a_scalar
- .value
- .size(&tcx)
- .align_to(b_scalar.value.align(&tcx).abi);
+ let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
Offset32::new(b_offset.bytes().try_into().unwrap())
}
@@ -106,10 +100,7 @@
}
// FIXME remove
- pub(crate) fn force_stack(
- self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- ) -> (Pointer, Option<Value>) {
+ pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, meta) => (ptr, meta),
@@ -129,7 +120,7 @@
}
/// Load a value with layout.abi of scalar
- pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, 'tcx, impl Module>) -> Value {
+ pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, None) => {
@@ -153,10 +144,7 @@
}
/// Load a value pair with layout.abi of scalar pair
- pub(crate) fn load_scalar_pair(
- self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- ) -> (Value, Value) {
+ pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, None) => {
@@ -183,7 +171,7 @@
pub(crate) fn value_field(
self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
field: mir::Field,
) -> CValue<'tcx> {
let layout = self.1;
@@ -219,21 +207,17 @@
}
}
- pub(crate) fn unsize_value(
- self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- dest: CPlace<'tcx>,
- ) {
+ pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
crate::unsize::coerce_unsized_into(fx, self, dest);
}
/// If `ty` is signed, `const_val` must already be sign extended.
pub(crate) fn const_val(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
const_val: ty::ScalarInt,
) -> CValue<'tcx> {
- assert_eq!(const_val.size(), layout.size);
+ assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
let clif_ty = fx.clif_type(layout.ty).unwrap();
@@ -250,18 +234,11 @@
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
let const_val = const_val.to_bits(layout.size).unwrap();
let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
- let msb = fx
- .bcx
- .ins()
- .iconst(types::I64, (const_val >> 64) as u64 as i64);
+ let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
fx.bcx.ins().iconcat(lsb, msb)
}
- ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..)
- | ty::RawPtr(..) => {
- fx
- .bcx
- .ins()
- .iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
+ ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
+ fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
}
ty::Float(FloatTy::F32) => {
fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
@@ -279,14 +256,8 @@
}
pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
- assert!(matches!(
- self.layout().ty.kind(),
- ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)
- ));
- assert!(matches!(
- layout.ty.kind(),
- ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)
- ));
+ assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+ assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
assert_eq!(self.layout().abi, layout.abi);
CValue(self.0, layout)
}
@@ -317,14 +288,11 @@
}
pub(crate) fn no_place(layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
- CPlace {
- inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
- layout,
- }
+ CPlace { inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None), layout }
}
pub(crate) fn new_stack_slot(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
assert!(!layout.is_unsized());
@@ -339,28 +307,22 @@
size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
offset: None,
});
- CPlace {
- inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None),
- layout,
- }
+ CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
}
pub(crate) fn new_var(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
local: Local,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
let var = Variable::with_u32(fx.next_ssa_var);
fx.next_ssa_var += 1;
fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
- CPlace {
- inner: CPlaceInner::Var(local, var),
- layout,
- }
+ CPlace { inner: CPlaceInner::Var(local, var), layout }
}
pub(crate) fn new_var_pair(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
local: Local,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
@@ -372,17 +334,11 @@
let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
fx.bcx.declare_var(var1, ty1);
fx.bcx.declare_var(var2, ty2);
- CPlace {
- inner: CPlaceInner::VarPair(local, var1, var2),
- layout,
- }
+ CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
}
pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
- CPlace {
- inner: CPlaceInner::Addr(ptr, None),
- layout,
- }
+ CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
}
pub(crate) fn for_ptr_with_extra(
@@ -390,34 +346,27 @@
extra: Value,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
- CPlace {
- inner: CPlaceInner::Addr(ptr, Some(extra)),
- layout,
- }
+ CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
}
- pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, 'tcx, impl Module>) -> CValue<'tcx> {
+ pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
let layout = self.layout();
match self.inner {
CPlaceInner::Var(_local, var) => {
let val = fx.bcx.use_var(var);
- fx.bcx
- .set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
CValue::by_val(val, layout)
}
CPlaceInner::VarPair(_local, var1, var2) => {
let val1 = fx.bcx.use_var(var1);
- fx.bcx
- .set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
+ //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
let val2 = fx.bcx.use_var(var2);
- fx.bcx
- .set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
+ //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
CValue::by_val_pair(val1, val2, layout)
}
CPlaceInner::VarLane(_local, var, lane) => {
let val = fx.bcx.use_var(var);
- fx.bcx
- .set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
let val = fx.bcx.ins().extractlane(val, lane);
CValue::by_val(val, layout)
}
@@ -447,11 +396,7 @@
}
}
- pub(crate) fn write_cvalue(
- self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- from: CValue<'tcx>,
- ) {
+ pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
assert_assignable(fx, from.layout().ty, self.layout().ty);
self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
@@ -459,7 +404,7 @@
pub(crate) fn write_cvalue_transmute(
self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
from: CValue<'tcx>,
) {
self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
@@ -467,12 +412,12 @@
fn write_cvalue_maybe_transmute(
self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
from: CValue<'tcx>,
#[cfg_attr(not(debug_assertions), allow(unused_variables))] method: &'static str,
) {
fn transmute_value<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
var: Variable,
data: Value,
dst_ty: Type,
@@ -511,8 +456,7 @@
}
_ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
};
- fx.bcx
- .set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
fx.bcx.def_var(var, data);
}
@@ -558,15 +502,13 @@
// First get the old vector
let vector = fx.bcx.use_var(var);
- fx.bcx
- .set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
// Next insert the written lane into the vector
let vector = fx.bcx.ins().insertlane(vector, data, lane);
// Finally write the new vector
- fx.bcx
- .set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
fx.bcx.def_var(var, vector);
return;
@@ -604,10 +546,7 @@
to_ptr.store(fx, val, flags);
}
CValueInner::ByValPair(_, _) => {
- bug!(
- "Non ScalarPair abi {:?} for ByValPair CValue",
- dst_layout.abi
- );
+ bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
}
CValueInner::ByRef(from_ptr, None) => {
let from_addr = from_ptr.get_addr(fx);
@@ -632,7 +571,7 @@
pub(crate) fn place_field(
self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
field: mir::Field,
) -> CPlace<'tcx> {
let layout = self.layout();
@@ -650,18 +589,8 @@
let layout = layout.field(&*fx, field.index());
match field.as_u32() {
- 0 => {
- return CPlace {
- inner: CPlaceInner::Var(local, var1),
- layout,
- }
- }
- 1 => {
- return CPlace {
- inner: CPlaceInner::Var(local, var2),
- layout,
- }
- }
+ 0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
+ 1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
_ => unreachable!("field should be 0 or 1"),
}
}
@@ -680,7 +609,7 @@
pub(crate) fn place_index(
self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
index: Value,
) -> CPlace<'tcx> {
let (elem_layout, ptr) = match self.layout().ty.kind() {
@@ -689,30 +618,24 @@
_ => bug!("place_index({:?})", self.layout().ty),
};
- let offset = fx
- .bcx
- .ins()
- .imul_imm(index, elem_layout.size.bytes() as i64);
+ let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
}
- pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, 'tcx, impl Module>) -> CPlace<'tcx> {
+ pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
if has_ptr_meta(fx.tcx, inner_layout.ty) {
let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
} else {
- CPlace::for_ptr(
- Pointer::new(self.to_cvalue(fx).load_scalar(fx)),
- inner_layout,
- )
+ CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
}
}
pub(crate) fn place_ref(
self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
if has_ptr_meta(fx.tcx, self.layout().ty) {
@@ -729,21 +652,18 @@
pub(crate) fn downcast_variant(
self,
- fx: &FunctionCx<'_, 'tcx, impl Module>,
+ fx: &FunctionCx<'_, '_, 'tcx>,
variant: VariantIdx,
) -> Self {
assert!(!self.layout().is_unsized());
let layout = self.layout().for_variant(fx, variant);
- CPlace {
- inner: self.inner,
- layout,
- }
+ CPlace { inner: self.inner, layout }
}
}
#[track_caller]
pub(crate) fn assert_assignable<'tcx>(
- fx: &FunctionCx<'_, 'tcx, impl Module>,
+ fx: &FunctionCx<'_, '_, 'tcx>,
from_ty: Ty<'tcx>,
to_ty: Ty<'tcx>,
) {
@@ -776,12 +696,9 @@
}
(&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => {
for (from, to) in from_traits.iter().zip(to_traits) {
- let from = fx
- .tcx
- .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
- let to = fx
- .tcx
- .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
+ let from =
+ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
+ let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
assert_eq!(
from, to,
"Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs
index 8f15586..4d2551a 100644
--- a/compiler/rustc_codegen_cranelift/src/vtable.rs
+++ b/compiler/rustc_codegen_cranelift/src/vtable.rs
@@ -15,7 +15,7 @@
flags
}
-pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
+pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
pointer_ty(fx.tcx),
@@ -25,7 +25,7 @@
)
}
-pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
+pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
pointer_ty(fx.tcx),
@@ -35,7 +35,7 @@
)
}
-pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
+pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
pointer_ty(fx.tcx),
@@ -46,7 +46,7 @@
}
pub(crate) fn get_ptr_and_method_ref<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
arg: CValue<'tcx>,
idx: usize,
) -> (Value, Value) {
@@ -68,7 +68,7 @@
}
pub(crate) fn get_vtable<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
) -> Value {
@@ -85,7 +85,7 @@
}
fn build_vtable<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
) -> DataId {
@@ -94,7 +94,7 @@
let drop_in_place_fn = import_function(
tcx,
- &mut fx.cx.module,
+ fx.cx.module,
Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fx.tcx),
);
@@ -111,7 +111,7 @@
opt_mth.map(|(def_id, substs)| {
import_function(
tcx,
- &mut fx.cx.module,
+ fx.cx.module,
Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs)
.unwrap()
.polymorphize(fx.tcx),
@@ -165,11 +165,8 @@
}
fn write_usize(tcx: TyCtxt<'_>, buf: &mut [u8], idx: usize, num: u64) {
- let pointer_size = tcx
- .layout_of(ParamEnv::reveal_all().and(tcx.types.usize))
- .unwrap()
- .size
- .bytes() as usize;
+ let pointer_size =
+ tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.usize)).unwrap().size.bytes() as usize;
let target = &mut buf[idx * pointer_size..(idx + 1) * pointer_size];
match tcx.data_layout.endian {
diff --git a/compiler/rustc_codegen_cranelift/test.sh b/compiler/rustc_codegen_cranelift/test.sh
index 5ab10e0..e222adc 100755
--- a/compiler/rustc_codegen_cranelift/test.sh
+++ b/compiler/rustc_codegen_cranelift/test.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -e
./build.sh --sysroot none "$@"
diff --git a/compiler/rustc_codegen_llvm/Cargo.toml b/compiler/rustc_codegen_llvm/Cargo.toml
index f937364..4999cb3 100644
--- a/compiler/rustc_codegen_llvm/Cargo.toml
+++ b/compiler/rustc_codegen_llvm/Cargo.toml
@@ -10,8 +10,9 @@
[dependencies]
bitflags = "1.0"
+cstr = "0.2"
libc = "0.2"
-measureme = "9.0.0"
+measureme = "9.1.0"
snap = "1"
tracing = "0.1"
rustc_middle = { path = "../rustc_middle" }
@@ -29,6 +30,6 @@
rustc_session = { path = "../rustc_session" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_target = { path = "../rustc_target" }
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
rustc_ast = { path = "../rustc_ast" }
rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index a69241e..d9393ff 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -430,7 +430,13 @@
PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(attrs);
- llvm::Attribute::StructRet.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
+ unsafe {
+ llvm::LLVMRustAddStructRetAttr(
+ llfn,
+ llvm::AttributePlace::Argument(i).as_uint(),
+ self.ret.layout.llvm_type(cx),
+ );
+ }
}
_ => {}
}
@@ -486,8 +492,13 @@
PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(attrs);
- llvm::Attribute::StructRet
- .apply_callsite(llvm::AttributePlace::Argument(i), callsite);
+ unsafe {
+ llvm::LLVMRustAddStructRetCallSiteAttr(
+ callsite,
+ llvm::AttributePlace::Argument(i).as_uint(),
+ self.ret.layout.llvm_type(bx),
+ );
+ }
}
_ => {}
}
@@ -554,7 +565,7 @@
llvm::AddCallSiteAttrString(
callsite,
llvm::AttributePlace::Function,
- rustc_data_structures::const_cstr!("cmse_nonsecure_call"),
+ cstr::cstr!("cmse_nonsecure_call"),
);
}
}
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index a5ea0b2..068e5e9 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -93,7 +93,7 @@
let args = [usize, usize]; // size, align
let ty = llvm::LLVMFunctionType(void, args.as_ptr(), args.len() as c_uint, False);
- let name = "__rust_alloc_error_handler".to_string();
+ let name = "__rust_alloc_error_handler";
let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
// -> ! DIFlagNoReturn
llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 8801211..e7d359c 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -61,9 +61,9 @@
// Default per-arch clobbers
// Basically what clang does
let arch_clobbers = match &self.sess().target.arch[..] {
- "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
- "mips" | "mips64" => vec!["~{$1}"],
- _ => Vec::new(),
+ "x86" | "x86_64" => &["~{dirflag}", "~{fpsr}", "~{flags}"][..],
+ "mips" | "mips64" => &["~{$1}"],
+ _ => &[],
};
let all_constraints = ia
@@ -304,6 +304,7 @@
} else if options.contains(InlineAsmOptions::READONLY) {
llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result);
}
+ llvm::Attribute::WillReturn.apply_callsite(llvm::AttributePlace::Function, result);
} else if options.contains(InlineAsmOptions::NOMEM) {
llvm::Attribute::InaccessibleMemOnly
.apply_callsite(llvm::AttributePlace::Function, result);
@@ -487,6 +488,9 @@
} else if reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) {
// LLVM doesn't recognize x30
"{lr}".to_string()
+ } else if reg == InlineAsmReg::Arm(ArmInlineAsmReg::r14) {
+ // LLVM doesn't recognize r14
+ "{lr}".to_string()
} else {
format!("{{{}}}", reg.name())
}
@@ -524,6 +528,7 @@
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("LLVM backend does not support SPIR-V")
}
+ InlineAsmRegClass::Err => unreachable!(),
}
.to_string(),
}
@@ -590,6 +595,7 @@
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("LLVM backend does not support SPIR-V")
}
+ InlineAsmRegClass::Err => unreachable!(),
}
}
@@ -633,6 +639,7 @@
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("LLVM backend does not support SPIR-V")
}
+ InlineAsmRegClass::Err => unreachable!(),
}
}
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index a78d692..64ebe58 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -2,8 +2,8 @@
use std::ffi::CString;
+use cstr::cstr;
use rustc_codegen_ssa::traits::*;
-use rustc_data_structures::const_cstr;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_hir::def_id::DefId;
@@ -53,6 +53,9 @@
if enabled.contains(SanitizerSet::THREAD) {
llvm::Attribute::SanitizeThread.apply_llfn(Function, llfn);
}
+ if enabled.contains(SanitizerSet::HWADDRESS) {
+ llvm::Attribute::SanitizeHWAddress.apply_llfn(Function, llfn);
+ }
}
/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
@@ -72,8 +75,8 @@
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
- const_cstr!("frame-pointer"),
- const_cstr!("all"),
+ cstr!("frame-pointer"),
+ cstr!("all"),
);
}
}
@@ -92,7 +95,7 @@
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
- const_cstr!("instrument-function-entry-inlined"),
+ cstr!("instrument-function-entry-inlined"),
&mcount_name,
);
}
@@ -126,16 +129,16 @@
StackProbeType::None => None,
// Request LLVM to generate the probes inline. If the given LLVM version does not support
// this, no probe is generated at all (even if the attribute is specified).
- StackProbeType::Inline => Some(const_cstr!("inline-asm")),
+ StackProbeType::Inline => Some(cstr!("inline-asm")),
// Flag our internal `__rust_probestack` function as the stack probe symbol.
// This is defined in the `compiler-builtins` crate for each architecture.
- StackProbeType::Call => Some(const_cstr!("__rust_probestack")),
+ StackProbeType::Call => Some(cstr!("__rust_probestack")),
// Pick from the two above based on the LLVM version.
StackProbeType::InlineOrCall { min_llvm_version_for_inline } => {
if llvm_util::get_version() < min_llvm_version_for_inline {
- Some(const_cstr!("__rust_probestack"))
+ Some(cstr!("__rust_probestack"))
} else {
- Some(const_cstr!("inline-asm"))
+ Some(cstr!("inline-asm"))
}
}
};
@@ -143,30 +146,18 @@
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
- const_cstr!("probe-stack"),
+ cstr!("probe-stack"),
attr_value,
);
}
}
-pub fn llvm_target_features(sess: &Session) -> impl Iterator<Item = &str> {
- const RUSTC_SPECIFIC_FEATURES: &[&str] = &["crt-static"];
-
- let cmdline = sess
- .opts
- .cg
- .target_feature
- .split(',')
- .filter(|f| !RUSTC_SPECIFIC_FEATURES.iter().any(|s| f.contains(s)));
- sess.target.features.split(',').chain(cmdline).filter(|l| !l.is_empty())
-}
-
pub fn apply_target_cpu_attr(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
let target_cpu = SmallCStr::new(llvm_util::target_cpu(cx.tcx.sess));
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
- const_cstr!("target-cpu"),
+ cstr!("target-cpu"),
target_cpu.as_c_str(),
);
}
@@ -177,7 +168,7 @@
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
- const_cstr!("tune-cpu"),
+ cstr!("tune-cpu"),
tune_cpu.as_c_str(),
);
}
@@ -286,7 +277,7 @@
Attribute::NoAlias.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
}
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) {
- llvm::AddFunctionAttrString(llfn, Function, const_cstr!("cmse_nonsecure_entry"));
+ llvm::AddFunctionAttrString(llfn, Function, cstr!("cmse_nonsecure_entry"));
}
sanitize(cx, codegen_fn_attrs.no_sanitize, llfn);
@@ -298,25 +289,27 @@
// The target doesn't care; the subtarget reads our attribute.
apply_tune_cpu_attr(cx, llfn);
- let features = llvm_target_features(cx.tcx.sess)
- .map(|s| s.to_string())
- .chain(codegen_fn_attrs.target_features.iter().map(|f| {
+ let function_features = codegen_fn_attrs
+ .target_features
+ .iter()
+ .map(|f| {
let feature = &f.as_str();
format!("+{}", llvm_util::to_llvm_feature(cx.tcx.sess, feature))
- }))
+ })
.chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
}))
- .collect::<Vec<String>>()
- .join(",");
-
- if !features.is_empty() {
+ .collect::<Vec<String>>();
+ if !function_features.is_empty() {
+ let mut global_features = llvm_util::llvm_global_features(cx.tcx.sess);
+ global_features.extend(function_features.into_iter());
+ let features = global_features.join(",");
let val = CString::new(features).unwrap();
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
- const_cstr!("target-features"),
+ cstr!("target-features"),
&val,
);
}
@@ -329,7 +322,7 @@
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
- const_cstr!("wasm-import-module"),
+ cstr!("wasm-import-module"),
&module,
);
@@ -339,7 +332,7 @@
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
- const_cstr!("wasm-import-name"),
+ cstr!("wasm-import-name"),
&name,
);
}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 326ae35..388dd7c 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -1,4 +1,3 @@
-use crate::attributes;
use crate::back::lto::ThinBuffer;
use crate::back::profiling::{
selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
@@ -11,6 +10,7 @@
use crate::type_::Type;
use crate::LlvmCodegenBackend;
use crate::ModuleLlvm;
+use rustc_codegen_ssa::back::link::ensure_removed;
use rustc_codegen_ssa::back::write::{
BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
TargetMachineFactoryFn,
@@ -93,7 +93,7 @@
pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() {
tcx.output_filenames(LOCAL_CRATE)
- .split_dwarf_filename(tcx.sess.split_debuginfo(), Some(mod_name))
+ .split_dwarf_path(tcx.sess.split_debuginfo(), Some(mod_name))
} else {
None
};
@@ -139,7 +139,7 @@
}
}
-fn to_llvm_code_model(code_model: Option<CodeModel>) -> llvm::CodeModel {
+pub(crate) fn to_llvm_code_model(code_model: Option<CodeModel>) -> llvm::CodeModel {
match code_model {
Some(CodeModel::Tiny) => llvm::CodeModel::Tiny,
Some(CodeModel::Small) => llvm::CodeModel::Small,
@@ -165,8 +165,6 @@
let code_model = to_llvm_code_model(sess.code_model());
- let mut features = llvm_util::handle_native_features(sess);
- features.extend(attributes::llvm_target_features(sess).map(|s| s.to_owned()));
let mut singlethread = sess.target.singlethread;
// On the wasm target once the `atomics` feature is enabled that means that
@@ -181,7 +179,7 @@
let triple = SmallCStr::new(&sess.target.llvm_target);
let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
- let features = features.join(",");
+ let features = llvm_util::llvm_global_features(sess).join(",");
let features = CString::new(features).unwrap();
let abi = SmallCStr::new(&sess.target.llvm_abiname);
let trap_unreachable =
@@ -440,6 +438,8 @@
sanitize_memory_recover: config.sanitizer_recover.contains(SanitizerSet::MEMORY),
sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int,
sanitize_thread: config.sanitizer.contains(SanitizerSet::THREAD),
+ sanitize_hwaddress: config.sanitizer.contains(SanitizerSet::HWADDRESS),
+ sanitize_hwaddress_recover: config.sanitizer_recover.contains(SanitizerSet::HWADDRESS),
})
} else {
None
@@ -652,6 +652,10 @@
if config.sanitizer.contains(SanitizerSet::THREAD) {
passes.push(llvm::LLVMRustCreateThreadSanitizerPass());
}
+ if config.sanitizer.contains(SanitizerSet::HWADDRESS) {
+ let recover = config.sanitizer_recover.contains(SanitizerSet::HWADDRESS);
+ passes.push(llvm::LLVMRustCreateHWAddressSanitizerPass(recover));
+ }
}
pub(crate) fn link(
@@ -873,9 +877,7 @@
if !config.emit_bc {
debug!("removing_bitcode {:?}", bc_out);
- if let Err(e) = fs::remove_file(&bc_out) {
- diag_handler.err(&format!("failed to remove bitcode: {}", e));
- }
+ ensure_removed(diag_handler, &bc_out);
}
}
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index d2f4d3e..f4852c9 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -5,13 +5,13 @@
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
+use cstr::cstr;
use libc::{c_char, c_uint};
use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, TypeKind};
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::MemFlags;
-use rustc_data_structures::const_cstr;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_hir::def_id::DefId;
use rustc_middle::ty::layout::TyAndLayout;
@@ -979,7 +979,7 @@
}
fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> {
- let name = const_cstr!("cleanuppad");
+ let name = cstr!("cleanuppad");
let ret = unsafe {
llvm::LLVMRustBuildCleanupPad(
self.llbuilder,
@@ -1003,7 +1003,7 @@
}
fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> {
- let name = const_cstr!("catchpad");
+ let name = cstr!("catchpad");
let ret = unsafe {
llvm::LLVMRustBuildCatchPad(
self.llbuilder,
@@ -1022,7 +1022,7 @@
unwind: Option<&'ll BasicBlock>,
num_handlers: usize,
) -> &'ll Value {
- let name = const_cstr!("catchswitch");
+ let name = cstr!("catchswitch");
let ret = unsafe {
llvm::LLVMRustBuildCatchSwitch(
self.llbuilder,
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 16e1a8a..9904683 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -5,9 +5,9 @@
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
+use cstr::cstr;
use libc::c_uint;
use rustc_codegen_ssa::traits::*;
-use rustc_data_structures::const_cstr;
use rustc_hir::def_id::DefId;
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc_middle::mir::interpret::{
@@ -419,9 +419,9 @@
.all(|&byte| byte == 0);
let sect_name = if all_bytes_are_zero {
- const_cstr!("__DATA,__thread_bss")
+ cstr!("__DATA,__thread_bss")
} else {
- const_cstr!("__DATA,__thread_data")
+ cstr!("__DATA,__thread_data")
};
llvm::LLVMSetSection(g, sect_name.as_ptr());
}
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 6acd26b..21473f3 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -1,4 +1,5 @@
use crate::attributes;
+use crate::back::write::to_llvm_code_model;
use crate::callee::get_fn;
use crate::coverageinfo;
use crate::debuginfo;
@@ -7,10 +8,10 @@
use crate::type_::Type;
use crate::value::Value;
+use cstr::cstr;
use rustc_codegen_ssa::base::wants_msvc_seh;
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::base_n;
-use rustc_data_structures::const_cstr;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_middle::bug;
@@ -104,6 +105,10 @@
data_layout.replace("-p270:32:32-p271:32:32-p272:64:64-", "-")
}
+fn strip_powerpc64_vectors(data_layout: String) -> String {
+ data_layout.replace("-v256:256:256-v512:512:512", "")
+}
+
pub unsafe fn create_module(
tcx: TyCtxt<'_>,
llcx: &'ll llvm::Context,
@@ -119,6 +124,9 @@
{
target_data_layout = strip_x86_address_spaces(target_data_layout);
}
+ if llvm_util::get_version() < (12, 0, 0) && sess.target.arch == "powerpc64" {
+ target_data_layout = strip_powerpc64_vectors(target_data_layout);
+ }
// Ensure the data-layout values hardcoded remain the defaults.
if sess.target.is_builtin {
@@ -174,6 +182,13 @@
}
}
+ // Linking object files with different code models is undefined behavior
+ // because the compiler would have to generate additional code (to span
+ // longer jumps) if a larger code model is used with a smaller one.
+ //
+ // See https://reviews.llvm.org/D52322 and https://reviews.llvm.org/D52323.
+ llvm::LLVMRustSetModuleCodeModel(llmod, to_llvm_code_model(sess.code_model()));
+
// If skipping the PLT is enabled, we need to add some module metadata
// to ensure intrinsic calls don't use it.
if !sess.needs_plt() {
@@ -380,7 +395,7 @@
"rust_eh_personality"
};
let fty = self.type_variadic_func(&[], self.type_i32());
- self.declare_cfn(name, fty)
+ self.declare_cfn(name, llvm::UnnamedAddr::Global, fty)
}
};
attributes::apply_target_cpu_attr(self, llfn);
@@ -414,8 +429,8 @@
}
fn create_used_variable(&self) {
- let name = const_cstr!("llvm.used");
- let section = const_cstr!("llvm.metadata");
+ let name = cstr!("llvm.used");
+ let section = cstr!("llvm.metadata");
let array =
self.const_array(&self.type_ptr_to(self.type_i8()), &*self.used_statics.borrow());
@@ -429,7 +444,7 @@
fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
if self.get_declared_value("main").is_none() {
- Some(self.declare_cfn("main", fn_type))
+ Some(self.declare_cfn("main", llvm::UnnamedAddr::Global, fn_type))
} else {
// If the symbol already exists, it is an error: for example, the user wrote
// #[no_mangle] extern "C" fn main(..) {..}
@@ -459,8 +474,7 @@
} else {
self.type_variadic_func(&[], ret)
};
- let f = self.declare_cfn(name, fn_ty);
- llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
+ let f = self.declare_cfn(name, llvm::UnnamedAddr::No, fn_ty);
self.intrinsics.borrow_mut().insert(name, f);
f
}
@@ -498,25 +512,6 @@
let t_f32 = self.type_f32();
let t_f64 = self.type_f64();
- macro_rules! vector_types {
- ($id_out:ident: $elem_ty:ident, $len:expr) => {
- let $id_out = self.type_vector($elem_ty, $len);
- };
- ($($id_out:ident: $elem_ty:ident, $len:expr;)*) => {
- $(vector_types!($id_out: $elem_ty, $len);)*
- }
- }
- vector_types! {
- t_v2f32: t_f32, 2;
- t_v4f32: t_f32, 4;
- t_v8f32: t_f32, 8;
- t_v16f32: t_f32, 16;
-
- t_v2f64: t_f64, 2;
- t_v4f64: t_f64, 4;
- t_v8f64: t_f64, 8;
- }
-
ifn!("llvm.wasm.trunc.saturate.unsigned.i32.f32", fn(t_f32) -> t_i32);
ifn!("llvm.wasm.trunc.saturate.unsigned.i32.f64", fn(t_f64) -> t_i32);
ifn!("llvm.wasm.trunc.saturate.unsigned.i64.f32", fn(t_f32) -> t_i64);
@@ -540,124 +535,40 @@
ifn!("llvm.sideeffect", fn() -> void);
ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
- ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32);
- ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32);
- ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32);
- ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32);
ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
- ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64);
- ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64);
- ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64);
ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
- ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32);
- ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32);
- ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32);
- ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32);
ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
- ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64);
- ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64);
- ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64);
ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
- ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32);
- ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32);
- ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32);
- ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
- ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64);
- ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64);
- ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
- ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32);
- ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32);
- ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32);
- ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
- ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64);
- ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64);
- ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
- ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32);
- ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32);
- ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32);
- ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
- ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64);
- ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64);
- ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
- ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32);
- ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32);
- ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32);
- ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
- ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64);
- ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64);
- ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
- ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32);
- ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32);
- ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32);
- ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
- ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64);
- ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64);
- ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
- ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32);
- ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32);
- ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32);
- ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
- ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64);
- ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64);
- ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
- ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32);
- ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32);
- ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32);
- ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
- ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64);
- ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64);
- ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
- ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32);
- ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32);
- ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32);
- ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
- ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64);
- ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64);
- ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
- ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32);
- ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32);
- ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32);
- ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32);
ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
- ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64);
- ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64);
- ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64);
ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
- ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32);
- ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32);
- ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32);
- ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
- ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64);
- ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64);
- ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
@@ -665,24 +576,10 @@
ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
- ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32);
- ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32);
- ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32);
- ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
- ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64);
- ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64);
- ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
- ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32);
- ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32);
- ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32);
- ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
- ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64);
- ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64);
- ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 444a9d4..352638a 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -254,7 +254,7 @@
///
/// 1. The file name of an "Unreachable" function must match the file name of the existing
/// codegenned (covered) function to which the unreachable code regions will be added.
-/// 2. The function to which the unreachable code regions will be added must not be a genaric
+/// 2. The function to which the unreachable code regions will be added must not be a generic
/// function (must not have type parameters) because the coverage tools will get confused
/// if the codegenned function has more than one instantiation and additional `CodeRegion`s
/// attached to only one of those instantiations.
@@ -284,7 +284,7 @@
let all_def_ids: DefIdSet =
tcx.mir_keys(LOCAL_CRATE).iter().map(|local_def_id| local_def_id.to_def_id()).collect();
- let (codegenned_def_ids, _) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
+ let codegenned_def_ids = tcx.codegened_and_inlined_items(LOCAL_CRATE);
let mut unreachable_def_ids_by_file: FxHashMap<Symbol, Vec<DefId>> = FxHashMap::default();
for &non_codegenned_def_id in all_def_ids.difference(codegenned_def_ids) {
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index e777f36..e47b8fd 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -162,7 +162,7 @@
pub(crate) fn write_mapping_to_buffer(
virtual_file_mapping: Vec<u32>,
expressions: Vec<CounterExpression>,
- mut mapping_regions: Vec<CounterMappingRegion>,
+ mapping_regions: Vec<CounterMappingRegion>,
buffer: &RustString,
) {
unsafe {
@@ -171,7 +171,7 @@
virtual_file_mapping.len() as c_uint,
expressions.as_ptr(),
expressions.len() as c_uint,
- mapping_regions.as_mut_ptr(),
+ mapping_regions.as_ptr(),
mapping_regions.len() as c_uint,
buffer,
);
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index 7673dfb..c2725b8 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -1,4 +1,4 @@
-use super::metadata::{file_metadata, UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
+use super::metadata::file_metadata;
use super::utils::DIB;
use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext};
use rustc_codegen_ssa::traits::*;
@@ -102,8 +102,8 @@
DIB(cx),
parent_scope.dbg_scope.unwrap(),
file_metadata,
- loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
- loc.col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
+ loc.line,
+ loc.col,
)
},
};
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 6e7c0b3..d5b32e5 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -18,8 +18,8 @@
};
use crate::value::Value;
+use cstr::cstr;
use rustc_codegen_ssa::traits::*;
-use rustc_data_structures::const_cstr;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
@@ -979,7 +979,7 @@
// The OSX linker has an idiosyncrasy where it will ignore some debuginfo
// if multiple object files with the same `DW_AT_name` are linked together.
// As a workaround we generate unique names for each object file. Those do
- // not correspond to an actual source file but that should be harmless.
+ // not correspond to an actual source file but that is harmless.
if tcx.sess.target.is_like_osx {
name_in_debuginfo.push("@");
name_in_debuginfo.push(codegen_unit_name);
@@ -992,17 +992,17 @@
let producer = format!("clang LLVM ({})", rustc_producer);
let name_in_debuginfo = name_in_debuginfo.to_string_lossy();
+ let work_dir = tcx.sess.working_dir.0.to_string_lossy();
let flags = "\0";
-
let out_dir = &tcx.output_filenames(LOCAL_CRATE).out_directory;
let split_name = if tcx.sess.target_can_use_split_dwarf() {
tcx.output_filenames(LOCAL_CRATE)
- .split_dwarf_filename(tcx.sess.split_debuginfo(), Some(codegen_unit_name))
+ .split_dwarf_path(tcx.sess.split_debuginfo(), Some(codegen_unit_name))
+ .map(|f| out_dir.join(f))
} else {
None
}
.unwrap_or_default();
- let out_dir = out_dir.to_str().unwrap();
let split_name = split_name.to_str().unwrap();
// FIXME(#60020):
@@ -1024,12 +1024,12 @@
assert!(tcx.sess.opts.debuginfo != DebugInfo::None);
unsafe {
- let file_metadata = llvm::LLVMRustDIBuilderCreateFile(
+ let compile_unit_file = llvm::LLVMRustDIBuilderCreateFile(
debug_context.builder,
name_in_debuginfo.as_ptr().cast(),
name_in_debuginfo.len(),
- out_dir.as_ptr().cast(),
- out_dir.len(),
+ work_dir.as_ptr().cast(),
+ work_dir.len(),
llvm::ChecksumKind::None,
ptr::null(),
0,
@@ -1038,12 +1038,15 @@
let unit_metadata = llvm::LLVMRustDIBuilderCreateCompileUnit(
debug_context.builder,
DW_LANG_RUST,
- file_metadata,
+ compile_unit_file,
producer.as_ptr().cast(),
producer.len(),
tcx.sess.opts.optimize != config::OptLevel::No,
flags.as_ptr().cast(),
0,
+ // NB: this doesn't actually have any perceptible effect, it seems. LLVM will instead
+ // put the path supplied to `MCSplitDwarfFile` into the debug info of the final
+ // output(s).
split_name.as_ptr().cast(),
split_name.len(),
kind,
@@ -1072,7 +1075,7 @@
gcov_cu_info.len() as c_uint,
);
- let llvm_gcov_ident = const_cstr!("llvm.gcov");
+ let llvm_gcov_ident = cstr!("llvm.gcov");
llvm::LLVMAddNamedMetadataOperand(
debug_context.llmod,
llvm_gcov_ident.as_ptr(),
@@ -1090,7 +1093,7 @@
);
llvm::LLVMAddNamedMetadataOperand(
debug_context.llmod,
- const_cstr!("llvm.ident").as_ptr(),
+ cstr!("llvm.ident").as_ptr(),
llvm::LLVMMDNodeInContext(debug_context.llcontext, &name_metadata, 1),
);
}
@@ -1414,7 +1417,7 @@
def_id: DefId,
) -> (&'tcx GeneratorLayout<'tcx>, IndexVec<mir::GeneratorSavedLocal, Option<Symbol>>) {
let body = tcx.optimized_mir(def_id);
- let generator_layout = body.generator_layout.as_ref().unwrap();
+ let generator_layout = body.generator_layout().unwrap();
let mut generator_saved_local_names = IndexVec::from_elem(None, &generator_layout.field_tys);
let state_arg = mir::Local::new(1);
@@ -1839,10 +1842,7 @@
.span;
if !span.is_dummy() {
let loc = cx.lookup_debug_loc(span.lo());
- return Some(SourceInfo {
- file: file_metadata(cx, &loc.file),
- line: loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
- });
+ return Some(SourceInfo { file: file_metadata(cx, &loc.file), line: loc.line });
}
}
_ => {}
@@ -2369,7 +2369,7 @@
fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
let mut names = generics
.parent
- .map_or(vec![], |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
+ .map_or_else(Vec::new, |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
names.extend(generics.params.iter().map(|param| param.name));
names
}
@@ -2481,7 +2481,7 @@
let loc = cx.lookup_debug_loc(span.lo());
(file_metadata(cx, &loc.file), loc.line)
} else {
- (unknown_file_metadata(cx), None)
+ (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER)
};
let is_local_to_unit = is_node_local_to_unit(cx, def_id);
@@ -2504,7 +2504,7 @@
linkage_name.as_ptr().cast(),
linkage_name.len(),
file_metadata,
- line_number.unwrap_or(UNKNOWN_LINE_NUMBER),
+ line_number,
type_metadata,
is_local_to_unit,
global,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 955e739..440e4d5 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -224,9 +224,9 @@
/// Information about the original source file.
pub file: Lrc<SourceFile>,
/// The (1-based) line number.
- pub line: Option<u32>,
+ pub line: u32,
/// The (1-based) column number.
- pub col: Option<u32>,
+ pub col: u32,
}
impl CodegenCx<'ll, '_> {
@@ -243,16 +243,16 @@
let line = (line + 1) as u32;
let col = (pos - line_pos).to_u32() + 1;
- (file, Some(line), Some(col))
+ (file, line, col)
}
- Err(file) => (file, None, None),
+ Err(file) => (file, UNKNOWN_LINE_NUMBER, UNKNOWN_COLUMN_NUMBER),
};
// For MSVC, omit the column number.
// Otherwise, emit it. This mimics clang behaviour.
// See discussion in https://github.com/rust-lang/rust/issues/42921
if self.sess().target.is_like_msvc {
- DebugLoc { file, line, col: None }
+ DebugLoc { file, line, col: UNKNOWN_COLUMN_NUMBER }
} else {
DebugLoc { file, line, col }
}
@@ -358,9 +358,9 @@
linkage_name.as_ptr().cast(),
linkage_name.len(),
file_metadata,
- loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
+ loc.line,
function_type_metadata,
- scope_line.unwrap_or(UNKNOWN_LINE_NUMBER),
+ scope_line,
flags,
spflags,
maybe_definition_llfn,
@@ -481,9 +481,9 @@
}
fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
- let mut names = generics
- .parent
- .map_or(vec![], |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
+ let mut names = generics.parent.map_or_else(Vec::new, |def_id| {
+ get_parameter_names(cx, cx.tcx.generics_of(def_id))
+ });
names.extend(generics.params.iter().map(|param| param.name));
names
}
@@ -550,14 +550,7 @@
) -> &'ll DILocation {
let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
- unsafe {
- llvm::LLVMRustDIBuilderCreateDebugLocation(
- line.unwrap_or(UNKNOWN_LINE_NUMBER),
- col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
- scope,
- inlined_at,
- )
- }
+ unsafe { llvm::LLVMRustDIBuilderCreateDebugLocation(line, col, scope, inlined_at) }
}
fn create_vtable_metadata(&self, ty: Ty<'tcx>, vtable: Self::Value) {
@@ -606,7 +599,7 @@
name.as_ptr().cast(),
name.len(),
file_metadata,
- loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
+ loc.line,
type_metadata,
true,
DIFlags::FlagZero,
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
index 0591e0a..8977fa0 100644
--- a/compiler/rustc_codegen_llvm/src/declare.rs
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -30,6 +30,7 @@
cx: &CodegenCx<'ll, '_>,
name: &str,
callconv: llvm::CallConv,
+ unnamed: llvm::UnnamedAddr,
ty: &'ll Type,
) -> &'ll Value {
debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
@@ -38,9 +39,7 @@
};
llvm::SetFunctionCallConv(llfn, callconv);
- // Function addresses in Rust are never significant, allowing functions to
- // be merged.
- llvm::SetUnnamedAddress(llfn, llvm::UnnamedAddr::Global);
+ llvm::SetUnnamedAddress(llfn, unnamed);
if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.disable_redzone) {
llvm::Attribute::NoRedZone.apply_llfn(Function, llfn);
@@ -68,8 +67,13 @@
///
/// If there’s a value with the same name already declared, the function will
/// update the declaration and return existing Value instead.
- pub fn declare_cfn(&self, name: &str, fn_type: &'ll Type) -> &'ll Value {
- declare_raw_fn(self, name, llvm::CCallConv, fn_type)
+ pub fn declare_cfn(
+ &self,
+ name: &str,
+ unnamed: llvm::UnnamedAddr,
+ fn_type: &'ll Type,
+ ) -> &'ll Value {
+ declare_raw_fn(self, name, llvm::CCallConv, unnamed, fn_type)
}
/// Declare a Rust function.
@@ -79,7 +83,15 @@
pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Value {
debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
- let llfn = declare_raw_fn(self, name, fn_abi.llvm_cconv(), fn_abi.llvm_type(self));
+ // Function addresses in Rust are never significant, allowing functions to
+ // be merged.
+ let llfn = declare_raw_fn(
+ self,
+ name,
+ fn_abi.llvm_cconv(),
+ llvm::UnnamedAddr::Global,
+ fn_abi.llvm_type(self),
+ );
fn_abi.apply_attrs_llfn(self, llfn);
llfn
}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index bf0d499..af366f9 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -334,8 +334,11 @@
self.call(expect, &[cond, self.const_bool(expected)], None)
}
- fn sideeffect(&mut self, unconditional: bool) {
- if unconditional || self.tcx.sess.opts.debugging_opts.insert_sideeffect {
+ fn sideeffect(&mut self) {
+ // This kind of check would make a ton of sense in the caller, but currently the only
+ // caller of this function is in `rustc_codegen_ssa`, which is agnostic to whether LLVM
+ // codegen backend being used, and so is unable to check the LLVM version.
+ if unsafe { llvm::LLVMRustVersionMajor() } < 12 {
let fnname = self.get_intrinsic(&("llvm.sideeffect"));
self.call(fnname, &[], None);
}
@@ -390,7 +393,6 @@
) {
let llfn = get_rust_try_fn(bx, &mut |mut bx| {
bx.set_personality_fn(bx.eh_personality());
- bx.sideeffect(false);
let mut normal = bx.build_sibling_block("normal");
let mut catchswitch = bx.build_sibling_block("catchswitch");
@@ -552,9 +554,6 @@
// (%ptr, _) = landingpad
// call %catch_func(%data, %ptr)
// ret 1
-
- bx.sideeffect(false);
-
let mut then = bx.build_sibling_block("then");
let mut catch = bx.build_sibling_block("catch");
@@ -614,9 +613,6 @@
// %catch_data[1] = %is_rust_panic
// call %catch_func(%data, %catch_data)
// ret 1
-
- bx.sideeffect(false);
-
let mut then = bx.build_sibling_block("then");
let mut catch = bx.build_sibling_block("catch");
@@ -1009,7 +1005,7 @@
}
fn simd_simple_float_intrinsic(
- name: &str,
+ name: Symbol,
in_elem: &::rustc_middle::ty::TyS<'_>,
in_ty: &::rustc_middle::ty::TyS<'_>,
in_len: u64,
@@ -1036,93 +1032,69 @@
}
}
}
- let ety = match in_elem.kind() {
- ty::Float(f) if f.bit_width() == 32 => {
- if in_len < 2 || in_len > 16 {
+
+ let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
+ let elem_ty = bx.cx.type_float_from_ty(*f);
+ match f.bit_width() {
+ 32 => ("f32", elem_ty),
+ 64 => ("f64", elem_ty),
+ _ => {
return_error!(
- "unsupported floating-point vector `{}` with length `{}` \
- out-of-range [2, 16]",
- in_ty,
- in_len
+ "unsupported element type `{}` of floating-point vector `{}`",
+ f.name_str(),
+ in_ty
);
}
- "f32"
}
- ty::Float(f) if f.bit_width() == 64 => {
- if in_len < 2 || in_len > 8 {
- return_error!(
- "unsupported floating-point vector `{}` with length `{}` \
- out-of-range [2, 8]",
- in_ty,
- in_len
- );
- }
- "f64"
- }
- ty::Float(f) => {
- return_error!(
- "unsupported element type `{}` of floating-point vector `{}`",
- f.name_str(),
- in_ty
- );
- }
- _ => {
- return_error!("`{}` is not a floating-point type", in_ty);
- }
+ } else {
+ return_error!("`{}` is not a floating-point type", in_ty);
};
- let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
- let intrinsic = bx.get_intrinsic(&llvm_name);
- let c =
- bx.call(intrinsic, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
+ let vec_ty = bx.type_vector(elem_ty, in_len);
+
+ let (intr_name, fn_ty) = match name {
+ sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
+ sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
+ sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
+ _ => return_error!("unrecognized intrinsic `{}`", name),
+ };
+
+ let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
+ let f = bx.declare_cfn(&llvm_name, llvm::UnnamedAddr::No, fn_ty);
+ let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
Ok(c)
}
- match name {
- sym::simd_fsqrt => {
- return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_fsin => {
- return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_fcos => {
- return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_fabs => {
- return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_floor => {
- return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_ceil => {
- return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_fexp => {
- return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_fexp2 => {
- return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_flog10 => {
- return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_flog2 => {
- return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_flog => {
- return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_fpowi => {
- return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_fpow => {
- return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
- }
- sym::simd_fma => {
- return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
- }
- _ => { /* fallthrough */ }
+ if std::matches!(
+ name,
+ sym::simd_fsqrt
+ | sym::simd_fsin
+ | sym::simd_fcos
+ | sym::simd_fabs
+ | sym::simd_floor
+ | sym::simd_ceil
+ | sym::simd_fexp
+ | sym::simd_fexp2
+ | sym::simd_flog10
+ | sym::simd_flog2
+ | sym::simd_flog
+ | sym::simd_fpowi
+ | sym::simd_fpow
+ | sym::simd_fma
+ ) {
+ return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
}
// FIXME: use:
@@ -1278,12 +1250,12 @@
format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
let f = bx.declare_cfn(
&llvm_intrinsic,
+ llvm::UnnamedAddr::No,
bx.type_func(
&[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
llvm_elem_vec_ty,
),
);
- llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
return Ok(v);
}
@@ -1408,9 +1380,9 @@
format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
let f = bx.declare_cfn(
&llvm_intrinsic,
+ llvm::UnnamedAddr::No,
bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
);
- llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
return Ok(v);
}
@@ -1656,7 +1628,7 @@
out_elem
);
}
- macro_rules! arith {
+ macro_rules! arith_binary {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
$(if name == sym::$name {
match in_elem.kind() {
@@ -1672,7 +1644,7 @@
})*
}
}
- arith! {
+ arith_binary! {
simd_add: Uint, Int => add, Float => fadd;
simd_sub: Uint, Int => sub, Float => fsub;
simd_mul: Uint, Int => mul, Float => fmul;
@@ -1687,6 +1659,25 @@
simd_fmin: Float => minnum;
}
+ macro_rules! arith_unary {
+ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+ $(if name == sym::$name {
+ match in_elem.kind() {
+ $($(ty::$p(_))|* => {
+ return Ok(bx.$call(args[0].immediate()))
+ })*
+ _ => {},
+ }
+ require!(false,
+ "unsupported operation on `{}` with element `{}`",
+ in_ty,
+ in_elem)
+ })*
+ }
+ }
+ arith_unary! {
+ simd_neg: Int => neg, Float => fneg;
+ }
if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
let lhs = args[0].immediate();
@@ -1714,8 +1705,11 @@
);
let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
- let f = bx.declare_cfn(&llvm_intrinsic, bx.type_func(&[vec_ty, vec_ty], vec_ty));
- llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
+ let f = bx.declare_cfn(
+ &llvm_intrinsic,
+ llvm::UnnamedAddr::No,
+ bx.type_func(&[vec_ty, vec_ty], vec_ty),
+ );
let v = bx.call(f, &[lhs, rhs], None);
return Ok(v);
}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index e82198f..82cd1be 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -131,6 +131,8 @@
ReturnsTwice = 25,
ReadNone = 26,
InaccessibleMemOnly = 27,
+ SanitizeHWAddress = 28,
+ WillReturn = 29,
}
/// LLVMIntPredicate
@@ -238,6 +240,7 @@
Token = 16,
ScalableVector = 17,
BFloat = 18,
+ X86_AMX = 19,
}
impl TypeKind {
@@ -262,6 +265,7 @@
TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token,
TypeKind::ScalableVector => rustc_codegen_ssa::common::TypeKind::ScalableVector,
TypeKind::BFloat => rustc_codegen_ssa::common::TypeKind::BFloat,
+ TypeKind::X86_AMX => rustc_codegen_ssa::common::TypeKind::X86_AMX,
}
}
}
@@ -439,6 +443,8 @@
pub sanitize_memory_recover: bool,
pub sanitize_memory_track_origins: c_int,
pub sanitize_thread: bool,
+ pub sanitize_hwaddress: bool,
+ pub sanitize_hwaddress_recover: bool,
}
/// LLVMRelocMode
@@ -671,9 +677,7 @@
/// array", encoded separately), and source location (start and end positions of the represented
/// code region).
///
- /// Aligns with [llvm::coverage::CounterMappingRegion](https://github.com/rust-lang/llvm-project/blob/rustc/11.0-2020-10-12/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L224-L227)
- /// Important: The Rust struct layout (order and types of fields) must match its C++
- /// counterpart.
+ /// Matches LLVMRustCounterMappingRegion.
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub struct CounterMappingRegion {
@@ -1070,6 +1074,7 @@
pub fn LLVMRustAddDereferenceableAttr(Fn: &Value, index: c_uint, bytes: u64);
pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: &Value, index: c_uint, bytes: u64);
pub fn LLVMRustAddByValAttr(Fn: &Value, index: c_uint, ty: &Type);
+ pub fn LLVMRustAddStructRetAttr(Fn: &Value, index: c_uint, ty: &Type);
pub fn LLVMRustAddFunctionAttribute(Fn: &Value, index: c_uint, attr: Attribute);
pub fn LLVMRustAddFunctionAttrStringValue(
Fn: &Value,
@@ -1105,6 +1110,7 @@
pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: &Value, index: c_uint, bytes: u64);
pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: &Value, index: c_uint, bytes: u64);
pub fn LLVMRustAddByValCallSiteAttr(Instr: &Value, index: c_uint, ty: &Type);
+ pub fn LLVMRustAddStructRetCallSiteAttr(Instr: &Value, index: c_uint, ty: &Type);
// Operations on load/store instructions (only)
pub fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool);
@@ -1789,7 +1795,7 @@
NumVirtualFileMappingIDs: c_uint,
Expressions: *const coverage_map::CounterExpression,
NumExpressions: c_uint,
- MappingRegions: *mut coverageinfo::CounterMappingRegion,
+ MappingRegions: *const coverageinfo::CounterMappingRegion,
NumMappingRegions: c_uint,
BufferOut: &RustString,
);
@@ -2128,6 +2134,7 @@
Recover: bool,
) -> &'static mut Pass;
pub fn LLVMRustCreateThreadSanitizerPass() -> &'static mut Pass;
+ pub fn LLVMRustCreateHWAddressSanitizerPass(Recover: bool) -> &'static mut Pass;
pub fn LLVMRustAddPass(PM: &PassManager<'_>, Pass: &'static mut Pass);
pub fn LLVMRustAddLastExtensionPasses(
PMB: &PassManagerBuilder,
@@ -2319,6 +2326,7 @@
pub fn LLVMRustUnsetComdat(V: &Value);
pub fn LLVMRustSetModulePICLevel(M: &Module);
pub fn LLVMRustSetModulePIELevel(M: &Module);
+ pub fn LLVMRustSetModuleCodeModel(M: &Module, Model: CodeModel);
pub fn LLVMRustModuleBufferCreate(M: &Module) -> &'static mut ModuleBuffer;
pub fn LLVMRustModuleBufferPtr(p: &ModuleBuffer) -> *const u8;
pub fn LLVMRustModuleBufferLen(p: &ModuleBuffer) -> usize;
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 544ef38..c7dff41 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -218,13 +218,39 @@
handle_native(name)
}
-pub fn handle_native_features(sess: &Session) -> Vec<String> {
- match sess.opts.cg.target_cpu {
- Some(ref s) => {
- if s != "native" {
- return vec![];
- }
+/// The list of LLVM features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
+/// `--target` and similar).
+// FIXME(nagisa): Cache the output of this somehow? Maybe make this a query? We're calling this
+// for every function that has `#[target_feature]` on it. The global features won't change between
+// the functions; only crates, maybe…
+pub fn llvm_global_features(sess: &Session) -> Vec<String> {
+ // FIXME(nagisa): this should definitely be available more centrally and to other codegen backends.
+ /// These features control behaviour of rustc rather than llvm.
+ const RUSTC_SPECIFIC_FEATURES: &[&str] = &["crt-static"];
+ // Features that come earlier are overriden by conflicting features later in the string.
+ // Typically we'll want more explicit settings to override the implicit ones, so:
+ //
+ // * Features from -Ctarget-cpu=*; are overriden by [^1]
+ // * Features implied by --target; are overriden by
+ // * Features from -Ctarget-feature; are overriden by
+ // * function specific features.
+ //
+ // [^1]: target-cpu=native is handled here, other target-cpu values are handled implicitly
+ // through LLVM TargetMachine implementation.
+ //
+ // FIXME(nagisa): it isn't clear what's the best interaction between features implied by
+ // `-Ctarget-cpu` and `--target` are. On one hand, you'd expect CLI arguments to always
+ // override anything that's implicit, so e.g. when there's no `--target` flag, features implied
+ // the host target are overriden by `-Ctarget-cpu=*`. On the other hand, what about when both
+ // `--target` and `-Ctarget-cpu=*` are specified? Both then imply some target features and both
+ // flags are specified by the user on the CLI. It isn't as clear-cut which order of precedence
+ // should be taken in cases like these.
+ let mut features = vec![];
+
+ // -Ctarget-cpu=native
+ match sess.opts.cg.target_cpu {
+ Some(ref s) if s == "native" => {
let features_string = unsafe {
let ptr = llvm::LLVMGetHostCPUFeatures();
let features_string = if !ptr.is_null() {
@@ -242,11 +268,31 @@
features_string
};
-
- features_string.split(",").map(|s| s.to_owned()).collect()
+ features.extend(features_string.split(",").map(String::from));
}
- None => vec![],
- }
+ Some(_) | None => {}
+ };
+
+ // Features implied by an implicit or explicit `--target`.
+ features.extend(
+ sess.target
+ .features
+ .split(',')
+ .filter(|f| !f.is_empty() && !RUSTC_SPECIFIC_FEATURES.iter().any(|s| f.contains(s)))
+ .map(String::from),
+ );
+
+ // -Ctarget-features
+ features.extend(
+ sess.opts
+ .cg
+ .target_feature
+ .split(',')
+ .filter(|f| !f.is_empty() && !RUSTC_SPECIFIC_FEATURES.iter().any(|s| f.contains(s)))
+ .map(String::from),
+ );
+
+ features
}
pub fn tune_cpu(sess: &Session) -> Option<&str> {
diff --git a/compiler/rustc_codegen_llvm/src/metadata.rs b/compiler/rustc_codegen_llvm/src/metadata.rs
index 3912d6a..b007df5 100644
--- a/compiler/rustc_codegen_llvm/src/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/metadata.rs
@@ -65,8 +65,8 @@
while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False {
let mut name_buf = None;
let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf);
- let name = name_buf.map_or(
- String::new(), // We got a NULL ptr, ignore `name_len`.
+ let name = name_buf.map_or_else(
+ String::new, // We got a NULL ptr, ignore `name_len`.
|buf| {
String::from_utf8(
slice::from_raw_parts(buf.as_ptr() as *const u8, name_len as usize)
diff --git a/compiler/rustc_codegen_ssa/Cargo.toml b/compiler/rustc_codegen_ssa/Cargo.toml
index 835f906..15dbbbd 100644
--- a/compiler/rustc_codegen_ssa/Cargo.toml
+++ b/compiler/rustc_codegen_ssa/Cargo.toml
@@ -11,8 +11,7 @@
bitflags = "1.2.1"
cc = "1.0.1"
itertools = "0.9"
-num_cpus = "1.0"
-memmap = "0.7"
+memmap2 = "0.2.1"
tracing = "0.1"
libc = "0.2.50"
jobserver = "0.1.11"
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index 8bc4e64..b11821b 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -1,5 +1,6 @@
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::temp_dir::MaybeTempDir;
+use rustc_errors::Handler;
use rustc_fs_util::fix_windows_verbatim_for_gcc;
use rustc_hir::def_id::CrateNum;
use rustc_middle::middle::cstore::{EncodedMetadata, LibSource};
@@ -34,9 +35,11 @@
use std::process::{ExitStatus, Output, Stdio};
use std::{ascii, char, env, fmt, fs, io, mem, str};
-pub fn remove(sess: &Session, path: &Path) {
+pub fn ensure_removed(diag_handler: &Handler, path: &Path) {
if let Err(e) = fs::remove_file(path) {
- sess.err(&format!("failed to remove {}: {}", path.display(), e));
+ if e.kind() != io::ErrorKind::NotFound {
+ diag_handler.err(&format!("failed to remove {}: {}", path.display(), e));
+ }
}
}
@@ -112,11 +115,11 @@
if !sess.opts.cg.save_temps {
let remove_temps_from_module = |module: &CompiledModule| {
if let Some(ref obj) = module.object {
- remove(sess, obj);
+ ensure_removed(sess.diagnostic(), obj);
}
if let Some(ref obj) = module.dwarf_object {
- remove(sess, obj);
+ ensure_removed(sess.diagnostic(), obj);
}
};
@@ -178,16 +181,16 @@
let original_path = tool.path();
if let Some(ref root_lib_path) = original_path.ancestors().nth(4) {
let arch = match t.arch.as_str() {
- "x86_64" => Some("x64".to_string()),
- "x86" => Some("x86".to_string()),
- "aarch64" => Some("arm64".to_string()),
- "arm" => Some("arm".to_string()),
+ "x86_64" => Some("x64"),
+ "x86" => Some("x86"),
+ "aarch64" => Some("arm64"),
+ "arm" => Some("arm"),
_ => None,
};
if let Some(ref a) = arch {
// FIXME: Move this to `fn linker_with_args`.
let mut arg = OsString::from("/LIBPATH:");
- arg.push(format!("{}\\lib\\{}\\store", root_lib_path.display(), a.to_string()));
+ arg.push(format!("{}\\lib\\{}\\store", root_lib_path.display(), a));
cmd.arg(&arg);
} else {
warn!("arch is not supported");
@@ -708,7 +711,7 @@
status.signal() == Some(libc::SIGILL)
}
- #[cfg(windows)]
+ #[cfg(not(unix))]
fn is_illegal_instruction(_status: &ExitStatus) -> bool {
false
}
@@ -893,6 +896,9 @@
if sanitizer.contains(SanitizerSet::THREAD) {
link_sanitizer_runtime(sess, linker, "tsan");
}
+ if sanitizer.contains(SanitizerSet::HWADDRESS) {
+ link_sanitizer_runtime(sess, linker, "hwasan");
+ }
}
fn link_sanitizer_runtime(sess: &Session, linker: &mut dyn Linker, name: &str) {
@@ -1192,7 +1198,7 @@
flush_linked_file(&output, out_filename)?;
return output;
- #[cfg(unix)]
+ #[cfg(not(windows))]
fn flush_linked_file(_: &io::Result<Output>, _: &Path) -> io::Result<()> {
Ok(())
}
@@ -1232,6 +1238,11 @@
err.raw_os_error() == Some(ERROR_FILENAME_EXCED_RANGE)
}
+ #[cfg(not(any(unix, windows)))]
+ fn command_line_too_big(_: &io::Error) -> bool {
+ false
+ }
+
struct Escape<'a> {
arg: &'a str,
is_like_msvc: bool,
@@ -2076,7 +2087,7 @@
let filestem = cratepath.file_stem().unwrap().to_str().unwrap();
cmd.link_rust_dylib(
Symbol::intern(&unlib(&sess.target, filestem)),
- parent.unwrap_or(Path::new("")),
+ parent.unwrap_or_else(|| Path::new("")),
);
}
}
@@ -2187,6 +2198,7 @@
("x86_64", "tvos") => "appletvsimulator",
("arm", "ios") => "iphoneos",
("aarch64", "ios") if llvm_target.contains("macabi") => "macosx",
+ ("aarch64", "ios") if llvm_target.contains("sim") => "iphonesimulator",
("aarch64", "ios") => "iphoneos",
("x86", "ios") => "iphonesimulator",
("x86_64", "ios") if llvm_target.contains("macabi") => "macosx",
diff --git a/compiler/rustc_codegen_ssa/src/back/lto.rs b/compiler/rustc_codegen_ssa/src/back/lto.rs
index 0d7f444..c6aea22 100644
--- a/compiler/rustc_codegen_ssa/src/back/lto.rs
+++ b/compiler/rustc_codegen_ssa/src/back/lto.rs
@@ -93,7 +93,7 @@
pub enum SerializedModule<M: ModuleBufferMethods> {
Local(M),
FromRlib(Vec<u8>),
- FromUncompressedFile(memmap::Mmap),
+ FromUncompressedFile(memmap2::Mmap),
}
impl<M: ModuleBufferMethods> SerializedModule<M> {
diff --git a/compiler/rustc_codegen_ssa/src/back/rpath.rs b/compiler/rustc_codegen_ssa/src/back/rpath.rs
index 005d2ef..5f21046 100644
--- a/compiler/rustc_codegen_ssa/src/back/rpath.rs
+++ b/compiler/rustc_codegen_ssa/src/back/rpath.rs
@@ -24,7 +24,7 @@
debug!("preparing the RPATH!");
- let libs = config.used_crates.clone();
+ let libs = config.used_crates;
let libs = libs.iter().filter_map(|&(_, ref l)| l.option()).collect::<Vec<_>>();
let rpaths = get_rpaths(config, &libs);
let mut flags = rpaths_to_flags(&rpaths);
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index 6aef5cb..490b3d3 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -1,4 +1,4 @@
-use super::link::{self, remove};
+use super::link::{self, ensure_removed};
use super::linker::LinkerInfo;
use super::lto::{self, SerializedModule};
use super::symbol_export::symbol_name_for_instance_in_crate;
@@ -288,7 +288,7 @@
module_name: &str,
) -> TargetMachineFactoryConfig {
let split_dwarf_file = if cgcx.target_can_use_split_dwarf {
- cgcx.output_filenames.split_dwarf_filename(cgcx.split_debuginfo, Some(module_name))
+ cgcx.output_filenames.split_dwarf_path(cgcx.split_debuginfo, Some(module_name))
} else {
None
};
@@ -433,12 +433,10 @@
let sess = tcx.sess;
let crate_name = tcx.crate_name(LOCAL_CRATE);
- let no_builtins = tcx.sess.contains_name(&tcx.hir().krate().item.attrs, sym::no_builtins);
- let is_compiler_builtins =
- tcx.sess.contains_name(&tcx.hir().krate().item.attrs, sym::compiler_builtins);
- let subsystem = tcx
- .sess
- .first_attr_value_str_by_name(&tcx.hir().krate().item.attrs, sym::windows_subsystem);
+ let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
+ let no_builtins = tcx.sess.contains_name(crate_attrs, sym::no_builtins);
+ let is_compiler_builtins = tcx.sess.contains_name(crate_attrs, sym::compiler_builtins);
+ let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
let windows_subsystem = subsystem.map(|subsystem| {
if subsystem != sym::windows && subsystem != sym::console {
tcx.sess.fatal(&format!(
@@ -543,7 +541,7 @@
copy_gracefully(&path, &crate_output.path(output_type));
if !sess.opts.cg.save_temps && !keep_numbered {
// The user just wants `foo.x`, not `foo.#module-name#.x`.
- remove(sess, &path);
+ ensure_removed(sess.diagnostic(), &path);
}
} else {
let ext = crate_output
@@ -642,19 +640,19 @@
for module in compiled_modules.modules.iter() {
if let Some(ref path) = module.object {
if !keep_numbered_objects {
- remove(sess, path);
+ ensure_removed(sess.diagnostic(), path);
}
}
if let Some(ref path) = module.dwarf_object {
if !keep_numbered_objects {
- remove(sess, path);
+ ensure_removed(sess.diagnostic(), path);
}
}
if let Some(ref path) = module.bytecode {
if !keep_numbered_bitcode {
- remove(sess, path);
+ ensure_removed(sess.diagnostic(), path);
}
}
}
@@ -662,13 +660,13 @@
if !user_wants_bitcode {
if let Some(ref metadata_module) = compiled_modules.metadata_module {
if let Some(ref path) = metadata_module.bytecode {
- remove(sess, &path);
+ ensure_removed(sess.diagnostic(), &path);
}
}
if let Some(ref allocator_module) = compiled_modules.allocator_module {
if let Some(ref path) = allocator_module.bytecode {
- remove(sess, path);
+ ensure_removed(sess.diagnostic(), path);
}
}
}
@@ -712,6 +710,33 @@
}
}
}
+
+ /// Generate a short description of this work item suitable for use as a thread name.
+ fn short_description(&self) -> String {
+ // `pthread_setname()` on *nix is limited to 15 characters and longer names are ignored.
+ // Use very short descriptions in this case to maximize the space available for the module name.
+ // Windows does not have that limitation so use slightly more descriptive names there.
+ match self {
+ WorkItem::Optimize(m) => {
+ #[cfg(windows)]
+ return format!("optimize module {}", m.name);
+ #[cfg(not(windows))]
+ return format!("opt {}", m.name);
+ }
+ WorkItem::CopyPostLtoArtifacts(m) => {
+ #[cfg(windows)]
+ return format!("copy LTO artifacts for {}", m.name);
+ #[cfg(not(windows))]
+ return format!("copy {}", m.name);
+ }
+ WorkItem::LTO(m) => {
+ #[cfg(windows)]
+ return format!("LTO module {}", m.name());
+ #[cfg(not(windows))]
+ return format!("LTO {}", m.name());
+ }
+ }
+ }
}
enum WorkItemResult<B: WriteBackendMethods> {
@@ -735,7 +760,7 @@
match work_item {
WorkItem::Optimize(module) => execute_optimize_work_item(cgcx, module, module_config),
WorkItem::CopyPostLtoArtifacts(module) => {
- execute_copy_from_cache_work_item(cgcx, module, module_config)
+ Ok(execute_copy_from_cache_work_item(cgcx, module, module_config))
}
WorkItem::LTO(module) => execute_lto_work_item(cgcx, module, module_config),
}
@@ -844,7 +869,7 @@
cgcx: &CodegenContext<B>,
module: CachedModuleCodegen,
module_config: &ModuleConfig,
-) -> Result<WorkItemResult<B>, FatalError> {
+) -> WorkItemResult<B> {
let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
let mut object = None;
if let Some(saved_file) = module.source.saved_file {
@@ -870,13 +895,13 @@
assert_eq!(object.is_some(), module_config.emit_obj != EmitObj::None);
- Ok(WorkItemResult::Compiled(CompiledModule {
+ WorkItemResult::Compiled(CompiledModule {
name: module.name,
kind: ModuleKind::Regular,
object,
dwarf_object: None,
bytecode: None,
- }))
+ })
}
fn execute_lto_work_item<B: ExtraBackendMethods>(
@@ -1193,7 +1218,6 @@
// necessary. There's already optimizations in place to avoid sending work
// back to the coordinator if LTO isn't requested.
return thread::spawn(move || {
- let max_workers = num_cpus::get();
let mut worker_id_counter = 0;
let mut free_worker_ids = Vec::new();
let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
@@ -1253,7 +1277,17 @@
// For codegenning more CGU or for running them through LLVM.
if !codegen_done {
if main_thread_worker_state == MainThreadWorkerState::Idle {
- if !queue_full_enough(work_items.len(), running, max_workers) {
+ // Compute the number of workers that will be running once we've taken as many
+ // items from the work queue as we can, plus one for the main thread. It's not
+ // critically important that we use this instead of just `running`, but it
+ // prevents the `queue_full_enough` heuristic from fluctuating just because a
+ // worker finished up and we decreased the `running` count, even though we're
+ // just going to increase it right after this when we put a new worker to work.
+ let extra_tokens = tokens.len().checked_sub(running).unwrap();
+ let additional_running = std::cmp::min(extra_tokens, work_items.len());
+ let anticipated_running = running + additional_running + 1;
+
+ if !queue_full_enough(work_items.len(), anticipated_running) {
// The queue is not full enough, codegen more items:
if codegen_worker_send.send(Message::CodegenItem).is_err() {
panic!("Could not send Message::CodegenItem to main thread")
@@ -1529,13 +1563,59 @@
// A heuristic that determines if we have enough LLVM WorkItems in the
// queue so that the main thread can do LLVM work instead of codegen
- fn queue_full_enough(
- items_in_queue: usize,
- workers_running: usize,
- max_workers: usize,
- ) -> bool {
- // Tune me, plz.
- items_in_queue > 0 && items_in_queue >= max_workers.saturating_sub(workers_running / 2)
+ fn queue_full_enough(items_in_queue: usize, workers_running: usize) -> bool {
+ // This heuristic scales ahead-of-time codegen according to available
+ // concurrency, as measured by `workers_running`. The idea is that the
+ // more concurrency we have available, the more demand there will be for
+ // work items, and the fuller the queue should be kept to meet demand.
+ // An important property of this approach is that we codegen ahead of
+ // time only as much as necessary, so as to keep fewer LLVM modules in
+ // memory at once, thereby reducing memory consumption.
+ //
+ // When the number of workers running is less than the max concurrency
+ // available to us, this heuristic can cause us to instruct the main
+ // thread to work on an LLVM item (that is, tell it to "LLVM") instead
+ // of codegen, even though it seems like it *should* be codegenning so
+ // that we can create more work items and spawn more LLVM workers.
+ //
+ // But this is not a problem. When the main thread is told to LLVM,
+ // according to this heuristic and how work is scheduled, there is
+ // always at least one item in the queue, and therefore at least one
+ // pending jobserver token request. If there *is* more concurrency
+ // available, we will immediately receive a token, which will upgrade
+ // the main thread's LLVM worker to a real one (conceptually), and free
+ // up the main thread to codegen if necessary. On the other hand, if
+ // there isn't more concurrency, then the main thread working on an LLVM
+ // item is appropriate, as long as the queue is full enough for demand.
+ //
+ // Speaking of which, how full should we keep the queue? Probably less
+ // full than you'd think. A lot has to go wrong for the queue not to be
+ // full enough and for that to have a negative effect on compile times.
+ //
+ // Workers are unlikely to finish at exactly the same time, so when one
+ // finishes and takes another work item off the queue, we often have
+ // ample time to codegen at that point before the next worker finishes.
+ // But suppose that codegen takes so long that the workers exhaust the
+ // queue, and we have one or more workers that have nothing to work on.
+ // Well, it might not be so bad. Of all the LLVM modules we create and
+ // optimize, one has to finish last. It's not necessarily the case that
+ // by losing some concurrency for a moment, we delay the point at which
+ // that last LLVM module is finished and the rest of compilation can
+ // proceed. Also, when we can't take advantage of some concurrency, we
+ // give tokens back to the job server. That enables some other rustc to
+ // potentially make use of the available concurrency. That could even
+ // *decrease* overall compile time if we're lucky. But yes, if no other
+ // rustc can make use of the concurrency, then we've squandered it.
+ //
+ // However, keeping the queue full is also beneficial when we have a
+ // surge in available concurrency. Then items can be taken from the
+ // queue immediately, without having to wait for codegen.
+ //
+ // So, the heuristic below tries to keep one item in the queue for every
+ // four running workers. Based on limited benchmarking, this appears to
+ // be more than sufficient to avoid increasing compilation times.
+ let quarter_of_workers = workers_running - 3 * workers_running / 4;
+ items_in_queue > 0 && items_in_queue >= quarter_of_workers
}
fn maybe_start_llvm_timer<'a>(
@@ -1554,56 +1634,59 @@
pub struct WorkerFatalError;
fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>) {
- thread::spawn(move || {
- // Set up a destructor which will fire off a message that we're done as
- // we exit.
- struct Bomb<B: ExtraBackendMethods> {
- coordinator_send: Sender<Box<dyn Any + Send>>,
- result: Option<Result<WorkItemResult<B>, FatalError>>,
- worker_id: usize,
- }
- impl<B: ExtraBackendMethods> Drop for Bomb<B> {
- fn drop(&mut self) {
- let worker_id = self.worker_id;
- let msg = match self.result.take() {
- Some(Ok(WorkItemResult::Compiled(m))) => {
- Message::Done::<B> { result: Ok(m), worker_id }
- }
- Some(Ok(WorkItemResult::NeedsLink(m))) => {
- Message::NeedsLink::<B> { module: m, worker_id }
- }
- Some(Ok(WorkItemResult::NeedsFatLTO(m))) => {
- Message::NeedsFatLTO::<B> { result: m, worker_id }
- }
- Some(Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))) => {
- Message::NeedsThinLTO::<B> { name, thin_buffer, worker_id }
- }
- Some(Err(FatalError)) => {
- Message::Done::<B> { result: Err(Some(WorkerFatalError)), worker_id }
- }
- None => Message::Done::<B> { result: Err(None), worker_id },
- };
- drop(self.coordinator_send.send(Box::new(msg)));
+ let builder = thread::Builder::new().name(work.short_description());
+ builder
+ .spawn(move || {
+ // Set up a destructor which will fire off a message that we're done as
+ // we exit.
+ struct Bomb<B: ExtraBackendMethods> {
+ coordinator_send: Sender<Box<dyn Any + Send>>,
+ result: Option<Result<WorkItemResult<B>, FatalError>>,
+ worker_id: usize,
}
- }
+ impl<B: ExtraBackendMethods> Drop for Bomb<B> {
+ fn drop(&mut self) {
+ let worker_id = self.worker_id;
+ let msg = match self.result.take() {
+ Some(Ok(WorkItemResult::Compiled(m))) => {
+ Message::Done::<B> { result: Ok(m), worker_id }
+ }
+ Some(Ok(WorkItemResult::NeedsLink(m))) => {
+ Message::NeedsLink::<B> { module: m, worker_id }
+ }
+ Some(Ok(WorkItemResult::NeedsFatLTO(m))) => {
+ Message::NeedsFatLTO::<B> { result: m, worker_id }
+ }
+ Some(Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))) => {
+ Message::NeedsThinLTO::<B> { name, thin_buffer, worker_id }
+ }
+ Some(Err(FatalError)) => {
+ Message::Done::<B> { result: Err(Some(WorkerFatalError)), worker_id }
+ }
+ None => Message::Done::<B> { result: Err(None), worker_id },
+ };
+ drop(self.coordinator_send.send(Box::new(msg)));
+ }
+ }
- let mut bomb = Bomb::<B> {
- coordinator_send: cgcx.coordinator_send.clone(),
- result: None,
- worker_id: cgcx.worker,
- };
+ let mut bomb = Bomb::<B> {
+ coordinator_send: cgcx.coordinator_send.clone(),
+ result: None,
+ worker_id: cgcx.worker,
+ };
- // Execute the work itself, and if it finishes successfully then flag
- // ourselves as a success as well.
- //
- // Note that we ignore any `FatalError` coming out of `execute_work_item`,
- // as a diagnostic was already sent off to the main thread - just
- // surface that there was an error in this worker.
- bomb.result = {
- let _prof_timer = work.start_profiling(&cgcx);
- Some(execute_work_item(&cgcx, work))
- };
- });
+ // Execute the work itself, and if it finishes successfully then flag
+ // ourselves as a success as well.
+ //
+ // Note that we ignore any `FatalError` coming out of `execute_work_item`,
+ // as a diagnostic was already sent off to the main thread - just
+ // surface that there was an error in this worker.
+ bomb.result = {
+ let _prof_timer = work.start_profiling(&cgcx);
+ Some(execute_work_item(&cgcx, work))
+ };
+ })
+ .expect("failed to spawn thread");
}
enum SharedEmitterMessage {
@@ -1875,7 +1958,7 @@
.unwrap_or_else(|e| panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e));
let mmap = unsafe {
- memmap::Mmap::map(&file).unwrap_or_else(|e| {
+ memmap2::Mmap::map(&file).unwrap_or_else(|e| {
panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e)
})
};
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index 658ad3c..08e31c3 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -867,7 +867,7 @@
cgu.name()
);
- if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
+ if tcx.try_mark_green(&dep_node) {
// We can re-use either the pre- or the post-thinlto state. If no LTO is
// being performed then we can use post-LTO artifacts, otherwise we must
// reuse pre-LTO artifacts
diff --git a/compiler/rustc_codegen_ssa/src/common.rs b/compiler/rustc_codegen_ssa/src/common.rs
index 780b1d2..afd83bf 100644
--- a/compiler/rustc_codegen_ssa/src/common.rs
+++ b/compiler/rustc_codegen_ssa/src/common.rs
@@ -95,6 +95,7 @@
Token,
ScalableVector,
BFloat,
+ X86_AMX,
}
// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
index 549b8d4..af6482f 100644
--- a/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
@@ -8,7 +8,7 @@
use rustc_middle::ty::Instance;
use rustc_middle::ty::TyCtxt;
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub struct Expression {
lhs: ExpressionOperandId,
op: Op,
@@ -64,7 +64,9 @@
/// Adds a code region to be counted by an injected counter intrinsic.
pub fn add_counter(&mut self, id: CounterValueReference, region: CodeRegion) {
- self.counters[id].replace(region).expect_none("add_counter called with duplicate `id`");
+ if let Some(previous_region) = self.counters[id].replace(region.clone()) {
+ assert_eq!(previous_region, region, "add_counter: code region for id changed");
+ }
}
/// Both counters and "counter expressions" (or simply, "expressions") can be operands in other
@@ -94,9 +96,18 @@
expression_id, lhs, op, rhs, region
);
let expression_index = self.expression_index(u32::from(expression_id));
- self.expressions[expression_index]
- .replace(Expression { lhs, op, rhs, region })
- .expect_none("add_counter_expression called with duplicate `id_descending_from_max`");
+ if let Some(previous_expression) = self.expressions[expression_index].replace(Expression {
+ lhs,
+ op,
+ rhs,
+ region: region.clone(),
+ }) {
+ assert_eq!(
+ previous_expression,
+ Expression { lhs, op, rhs, region },
+ "add_counter_expression: expression for id changed"
+ );
+ }
}
/// Add a region that will be marked as "unreachable", with a constant "zero counter".
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
index 0307117..ab9ea2f 100644
--- a/compiler/rustc_codegen_ssa/src/lib.rs
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -1,6 +1,5 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(bool_to_option)]
-#![feature(option_expect_none)]
#![feature(box_patterns)]
#![feature(drain_filter)]
#![feature(try_blocks)]
@@ -9,6 +8,7 @@
#![feature(or_patterns)]
#![feature(associated_type_bounds)]
#![recursion_limit = "256"]
+#![feature(box_syntax)]
//! This crate contains codegen code that is used by all codegen backends (LLVM and others).
//! The backend-agnostic functions of this crate use functions defined in various traits that
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
index fd0ff5b..38e9281 100644
--- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -199,7 +199,7 @@
}
self.visit_local(&place_ref.local, context, location);
- self.visit_projection(place_ref.local, place_ref.projection, context, location);
+ self.visit_projection(*place_ref, context, location);
}
}
}
@@ -231,7 +231,7 @@
fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
let check = match terminator.kind {
mir::TerminatorKind::Call { func: mir::Operand::Constant(ref c), ref args, .. } => {
- match *c.literal.ty.kind() {
+ match *c.ty().kind() {
ty::FnDef(did, _) => Some((did, args)),
_ => None,
}
@@ -281,7 +281,18 @@
Some(assignment_location) => {
assignment_location.dominates(location, &self.dominators)
}
- None => false,
+ None => {
+ debug!("No first assignment found for {:?}", local);
+ // We have not seen any assignment to the local yet,
+ // but before marking not_ssa, check if it is a ZST,
+ // in which case we don't need to initialize the local.
+ let ty = self.fx.mir.local_decls[local].ty;
+ let ty = self.fx.monomorphize(ty);
+
+ let is_zst = self.fx.cx.layout_of(ty).is_zst();
+ debug!("is_zst: {}", is_zst);
+ is_zst
+ }
};
if !ssa_read {
self.not_ssa(local);
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index c821908..04225dd 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -146,24 +146,6 @@
}
}
}
-
- // Generate sideeffect intrinsic if jumping to any of the targets can form
- // a loop.
- fn maybe_sideeffect<Bx: BuilderMethods<'a, 'tcx>>(
- &self,
- mir: &'tcx mir::Body<'tcx>,
- bx: &mut Bx,
- targets: &[mir::BasicBlock],
- ) {
- if bx.tcx().sess.opts.debugging_opts.insert_sideeffect {
- if targets.iter().any(|&target| {
- target <= self.bb
- && target.start_location().is_predecessor_of(self.bb.start_location(), mir)
- }) {
- bx.sideeffect(false);
- }
- }
- }
}
/// Codegen implementations for some terminator variants.
@@ -198,8 +180,6 @@
let discr = self.codegen_operand(&mut bx, &discr);
// `switch_ty` is redundant, sanity-check that.
assert_eq!(discr.layout.ty, switch_ty);
- helper.maybe_sideeffect(self.mir, &mut bx, targets.all_targets());
-
let mut target_iter = targets.iter();
if target_iter.len() == 1 {
// If there are two targets (one conditional, one fallback), emit br instead of switch
@@ -308,7 +288,6 @@
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
// we don't actually need to drop anything.
- helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.funclet_br(self, &mut bx, target);
return;
}
@@ -337,7 +316,6 @@
}
_ => (bx.get_fn_addr(drop_fn), FnAbi::of_instance(&bx, drop_fn, &[])),
};
- helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.do_call(
self,
&mut bx,
@@ -379,7 +357,6 @@
// Don't codegen the panic block if success if known.
if const_cond == Some(expected) {
- helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.funclet_br(self, &mut bx, target);
return;
}
@@ -390,7 +367,6 @@
// Create the failure block and the conditional branch to it.
let lltarget = helper.llblock(self, target);
let panic_block = self.new_block("panic");
- helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
if expected {
bx.cond_br(cond, lltarget, panic_block.llbb());
} else {
@@ -491,9 +467,6 @@
let fn_abi = FnAbi::of_instance(bx, instance, &[]);
let llfn = bx.get_fn_addr(instance);
- if let Some((_, target)) = destination.as_ref() {
- helper.maybe_sideeffect(self.mir, bx, &[*target]);
- }
// Codegen the actual panic invoke/call.
helper.do_call(
self,
@@ -507,7 +480,6 @@
} else {
// a NOP
let target = destination.as_ref().unwrap().1;
- helper.maybe_sideeffect(self.mir, bx, &[target]);
helper.funclet_br(self, bx, target)
}
true
@@ -551,7 +523,6 @@
if let Some(ty::InstanceDef::DropGlue(_, None)) = def {
// Empty drop glue; a no-op.
let &(_, target) = destination.as_ref().unwrap();
- helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.funclet_br(self, &mut bx, target);
return;
}
@@ -586,7 +557,6 @@
if let Some(destination_ref) = destination.as_ref() {
let &(dest, target) = destination_ref;
self.codegen_transmute(&mut bx, &args[0], dest);
- helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.funclet_br(self, &mut bx, target);
} else {
// If we are trying to transmute to an uninhabited type,
@@ -634,74 +604,73 @@
location.val.store(&mut bx, tmp);
}
self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate());
-
- helper.maybe_sideeffect(self.mir, &mut bx, &[*target]);
helper.funclet_br(self, &mut bx, *target);
}
return;
}
- if intrinsic.is_some() && intrinsic != Some(sym::drop_in_place) {
- let intrinsic = intrinsic.unwrap();
- let dest = match ret_dest {
- _ if fn_abi.ret.is_indirect() => llargs[0],
- ReturnDest::Nothing => {
- bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
- }
- ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
- ReturnDest::DirectOperand(_) => {
- bug!("Cannot use direct operand with an intrinsic call")
- }
- };
-
- let args: Vec<_> = args
- .iter()
- .enumerate()
- .map(|(i, arg)| {
- // The indices passed to simd_shuffle* in the
- // third argument must be constant. This is
- // checked by const-qualification, which also
- // promotes any complex rvalues to constants.
- if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") {
- if let mir::Operand::Constant(constant) = arg {
- let c = self.eval_mir_constant(constant);
- let (llval, ty) = self.simd_shuffle_indices(
- &bx,
- constant.span,
- constant.literal.ty,
- c,
- );
- return OperandRef { val: Immediate(llval), layout: bx.layout_of(ty) };
- } else {
- span_bug!(span, "shuffle indices must be constant");
- }
+ match intrinsic {
+ None | Some(sym::drop_in_place) => {}
+ Some(sym::copy_nonoverlapping) => unreachable!(),
+ Some(intrinsic) => {
+ let dest = match ret_dest {
+ _ if fn_abi.ret.is_indirect() => llargs[0],
+ ReturnDest::Nothing => {
+ bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
}
+ ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
+ ReturnDest::DirectOperand(_) => {
+ bug!("Cannot use direct operand with an intrinsic call")
+ }
+ };
- self.codegen_operand(&mut bx, arg)
- })
- .collect();
+ let args: Vec<_> = args
+ .iter()
+ .enumerate()
+ .map(|(i, arg)| {
+ // The indices passed to simd_shuffle* in the
+ // third argument must be constant. This is
+ // checked by const-qualification, which also
+ // promotes any complex rvalues to constants.
+ if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") {
+ if let mir::Operand::Constant(constant) = arg {
+ let c = self.eval_mir_constant(constant);
+ let (llval, ty) =
+ self.simd_shuffle_indices(&bx, constant.span, constant.ty(), c);
+ return OperandRef {
+ val: Immediate(llval),
+ layout: bx.layout_of(ty),
+ };
+ } else {
+ span_bug!(span, "shuffle indices must be constant");
+ }
+ }
- Self::codegen_intrinsic_call(
- &mut bx,
- *instance.as_ref().unwrap(),
- &fn_abi,
- &args,
- dest,
- span,
- );
+ self.codegen_operand(&mut bx, arg)
+ })
+ .collect();
- if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
- self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval);
+ Self::codegen_intrinsic_call(
+ &mut bx,
+ *instance.as_ref().unwrap(),
+ &fn_abi,
+ &args,
+ dest,
+ span,
+ );
+
+ if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
+ self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval);
+ }
+
+ if let Some((_, target)) = *destination {
+ helper.funclet_br(self, &mut bx, target);
+ } else {
+ bx.unreachable();
+ }
+
+ return;
}
-
- if let Some((_, target)) = *destination {
- helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
- helper.funclet_br(self, &mut bx, target);
- } else {
- bx.unreachable();
- }
-
- return;
}
// Split the rust-call tupled arguments off.
@@ -709,7 +678,7 @@
let (tup, args) = args.split_last().unwrap();
(args, Some(tup))
} else {
- (&args[..], None)
+ (args, None)
};
'make_args: for (i, arg) in first_args.iter().enumerate() {
@@ -811,9 +780,6 @@
_ => span_bug!(span, "no llfn for call"),
};
- if let Some((_, target)) = destination.as_ref() {
- helper.maybe_sideeffect(self.mir, &mut bx, &[*target]);
- }
helper.do_call(
self,
&mut bx,
@@ -860,7 +826,7 @@
let const_value = self
.eval_mir_constant(constant)
.unwrap_or_else(|_| span_bug!(span, "asm const cannot be resolved"));
- let ty = constant.literal.ty;
+ let ty = constant.ty();
let size = bx.layout_of(ty).size;
let scalar = match const_value {
ConstValue::Scalar(s) => s,
@@ -894,7 +860,7 @@
}
mir::InlineAsmOperand::SymFn { ref value } => {
let literal = self.monomorphize(value.literal);
- if let ty::FnDef(def_id, substs) = *literal.ty.kind() {
+ if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
let instance = ty::Instance::resolve_for_fn_ptr(
bx.tcx(),
ty::ParamEnv::reveal_all(),
@@ -963,22 +929,16 @@
mir::TerminatorKind::Goto { target } => {
if bb == target {
- // This is an unconditional branch back to this same basic
- // block. That means we have something like a `loop {}`
- // statement. Currently LLVM miscompiles this because it
- // assumes forward progress. We want to prevent this in all
- // cases, but that has a fairly high cost to compile times
- // currently. Instead, try to handle this specific case
- // which comes up commonly in practice (e.g., in embedded
- // code).
+ // This is an unconditional branch back to this same basic block. That means we
+ // have something like a `loop {}` statement. LLVM versions before 12.0
+ // miscompile this because they assume forward progress. For older versions
+ // try to handle just this specific case which comes up commonly in practice
+ // (e.g., in embedded code).
//
- // The `true` here means we insert side effects regardless
- // of -Zinsert-sideeffect being passed on unconditional
- // branching to the same basic block.
- bx.sideeffect(true);
- } else {
- helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+ // NB: the `sideeffect` currently checks for the LLVM version used internally.
+ bx.sideeffect();
}
+
helper.funclet_br(self, &mut bx, target);
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
index b79a221..aa41acc 100644
--- a/compiler/rustc_codegen_ssa/src/mir/constant.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -16,7 +16,7 @@
constant: &mir::Constant<'tcx>,
) -> Result<OperandRef<'tcx, Bx::Value>, ErrorHandled> {
let val = self.eval_mir_constant(constant)?;
- let ty = self.monomorphize(constant.literal.ty);
+ let ty = self.monomorphize(constant.ty());
Ok(OperandRef::from_const(bx, val, ty))
}
@@ -24,7 +24,12 @@
&self,
constant: &mir::Constant<'tcx>,
) -> Result<ConstValue<'tcx>, ErrorHandled> {
- match self.monomorphize(constant.literal).val {
+ let ct = self.monomorphize(constant.literal);
+ let ct = match ct {
+ mir::ConstantKind::Ty(ct) => ct,
+ mir::ConstantKind::Val(val, _) => return Ok(val),
+ };
+ match ct.val {
ty::ConstKind::Unevaluated(def, substs, promoted) => self
.cx
.tcx()
diff --git a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
index a115d35..5ab1baa 100644
--- a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
@@ -2,27 +2,38 @@
use rustc_middle::mir::coverage::*;
use rustc_middle::mir::Coverage;
+use rustc_middle::mir::SourceScope;
use super::FunctionCx;
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
- pub fn codegen_coverage(&self, bx: &mut Bx, coverage: Coverage) {
+ pub fn codegen_coverage(&self, bx: &mut Bx, coverage: Coverage, scope: SourceScope) {
+ // Determine the instance that coverage data was originally generated for.
+ let scope_data = &self.mir.source_scopes[scope];
+ let instance = if let Some((inlined_instance, _)) = scope_data.inlined {
+ self.monomorphize(inlined_instance)
+ } else if let Some(inlined_scope) = scope_data.inlined_parent_scope {
+ self.monomorphize(self.mir.source_scopes[inlined_scope].inlined.unwrap().0)
+ } else {
+ self.instance
+ };
+
let Coverage { kind, code_region } = coverage;
match kind {
CoverageKind::Counter { function_source_hash, id } => {
- if bx.set_function_source_hash(self.instance, function_source_hash) {
+ if bx.set_function_source_hash(instance, function_source_hash) {
// If `set_function_source_hash()` returned true, the coverage map is enabled,
// so continue adding the counter.
if let Some(code_region) = code_region {
// Note: Some counters do not have code regions, but may still be referenced
// from expressions. In that case, don't add the counter to the coverage map,
// but do inject the counter intrinsic.
- bx.add_coverage_counter(self.instance, id, code_region);
+ bx.add_coverage_counter(instance, id, code_region);
}
- let coverageinfo = bx.tcx().coverageinfo(self.instance.def_id());
+ let coverageinfo = bx.tcx().coverageinfo(instance.def_id());
- let fn_name = bx.create_pgo_func_name_var(self.instance);
+ let fn_name = bx.create_pgo_func_name_var(instance);
let hash = bx.const_u64(function_source_hash);
let num_counters = bx.const_u32(coverageinfo.num_counters);
let index = bx.const_u32(u32::from(id));
@@ -34,11 +45,11 @@
}
}
CoverageKind::Expression { id, lhs, op, rhs } => {
- bx.add_coverage_counter_expression(self.instance, id, lhs, op, rhs, code_region);
+ bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region);
}
CoverageKind::Unreachable => {
bx.add_coverage_unreachable(
- self.instance,
+ instance,
code_region.expect("unreachable regions always have code regions"),
);
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index f1eae60..a3f20ab 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -320,6 +320,8 @@
) -> Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>> {
let full_debug_info = self.cx.sess().opts.debuginfo == DebugInfo::Full;
+ let target_is_msvc = self.cx.sess().target.is_like_msvc;
+
if !full_debug_info && self.cx.sess().fewer_names() {
return None;
}
@@ -341,18 +343,36 @@
&& var.source_info.scope == mir::OUTERMOST_SOURCE_SCOPE
{
let arg_index = place.local.index() - 1;
-
- // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
- // offset in closures to account for the hidden environment?
- // Also, is this `+ 1` needed at all?
- VariableKind::ArgumentVariable(arg_index + 1)
+ if target_is_msvc {
+ // Rust compiler decomposes every &str or slice argument into two components:
+ // a pointer to the memory address where the data is stored and a usize representing
+ // the length of the str (or slice). These components will later be used to reconstruct
+ // the original argument inside the body of the function that owns it (see the
+ // definition of debug_introduce_local for more details).
+ //
+ // Since the original argument is declared inside a function rather than being passed
+ // in as an argument, it must be marked as a LocalVariable for MSVC debuggers to visualize
+ // its data correctly. (See issue #81894 for an in-depth description of the problem).
+ match *var_ty.kind() {
+ ty::Ref(_, inner_type, _) => match *inner_type.kind() {
+ ty::Slice(_) | ty::Str => VariableKind::LocalVariable,
+ _ => VariableKind::ArgumentVariable(arg_index + 1),
+ },
+ _ => VariableKind::ArgumentVariable(arg_index + 1),
+ }
+ } else {
+ // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
+ // offset in closures to account for the hidden environment?
+ // Also, is this `+ 1` needed at all?
+ VariableKind::ArgumentVariable(arg_index + 1)
+ }
} else {
VariableKind::LocalVariable
};
(var_ty, var_kind)
}
mir::VarDebugInfoContents::Const(c) => {
- let ty = self.monomorphize(c.literal.ty);
+ let ty = self.monomorphize(c.ty());
(ty, VariableKind::LocalVariable)
}
};
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 80e3ed7..8502309 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -125,19 +125,6 @@
let offset = args[1].immediate();
bx.gep(ptr, &[offset])
}
-
- sym::copy_nonoverlapping => {
- copy_intrinsic(
- bx,
- false,
- false,
- substs.type_at(0),
- args[1].immediate(),
- args[0].immediate(),
- args[2].immediate(),
- );
- return;
- }
sym::copy => {
copy_intrinsic(
bx,
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index d31ecec..3f94547 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -149,8 +149,6 @@
bx.set_personality_fn(cx.eh_personality());
}
- bx.sideeffect(false);
-
let cleanup_kinds = analyze::cleanup_kinds(&mir);
// Allocate a `Block` for every basic block, except
// the start block, if nothing loops back to it.
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index e3a6cab..629cb64 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -325,7 +325,7 @@
let er = scalar.valid_range_exclusive(bx.cx());
if er.end != er.start
- && scalar.valid_range.end() > scalar.valid_range.start()
+ && scalar.valid_range.end() >= scalar.valid_range.start()
{
// We want `table[e as usize ± k]` to not
// have bound checks, and this is the most
@@ -424,7 +424,7 @@
(bx, operand)
}
- mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
+ mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
let lhs = self.codegen_operand(&mut bx, lhs);
let rhs = self.codegen_operand(&mut bx, rhs);
let llresult = match (lhs.val, rhs.val) {
@@ -453,7 +453,7 @@
};
(bx, operand)
}
- mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
+ mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
let lhs = self.codegen_operand(&mut bx, lhs);
let rhs = self.codegen_operand(&mut bx, rhs);
let result = self.codegen_scalar_checked_binop(
diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs
index 6f74ba7..fe7f628 100644
--- a/compiler/rustc_codegen_ssa/src/mir/statement.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs
@@ -112,7 +112,27 @@
bx
}
mir::StatementKind::Coverage(box ref coverage) => {
- self.codegen_coverage(&mut bx, coverage.clone());
+ self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope);
+ bx
+ }
+ mir::StatementKind::CopyNonOverlapping(box mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ let dst_val = self.codegen_operand(&mut bx, dst);
+ let src_val = self.codegen_operand(&mut bx, src);
+ let count = self.codegen_operand(&mut bx, count).immediate();
+ let pointee_layout = dst_val
+ .layout
+ .pointee_info_at(&mut bx, rustc_target::abi::Size::ZERO)
+ .expect("Expected pointer");
+ let bytes = bx.mul(count, bx.const_usize(pointee_layout.size.bytes()));
+
+ let align = pointee_layout.align;
+ let dst = dst_val.immediate();
+ let src = src_val.immediate();
+ bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty());
bx
}
mir::StatementKind::FakeRead(..)
diff --git a/compiler/rustc_codegen_ssa/src/mono_item.rs b/compiler/rustc_codegen_ssa/src/mono_item.rs
index 607b545..8e79193 100644
--- a/compiler/rustc_codegen_ssa/src/mono_item.rs
+++ b/compiler/rustc_codegen_ssa/src/mono_item.rs
@@ -30,8 +30,8 @@
MonoItem::Static(def_id) => {
cx.codegen_static(def_id, cx.tcx().is_mutable_static(def_id));
}
- MonoItem::GlobalAsm(hir_id) => {
- let item = cx.tcx().hir().expect_item(hir_id);
+ MonoItem::GlobalAsm(item_id) => {
+ let item = cx.tcx().hir().item(item_id);
if let hir::ItemKind::GlobalAsm(ref ga) = item.kind {
cx.codegen_global_asm(ga);
} else {
diff --git a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
index ac3c99f..777436a 100644
--- a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
@@ -20,9 +20,10 @@
fn abort(&mut self);
fn assume(&mut self, val: Self::Value);
fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value;
- /// Normally, sideeffect is only emitted if -Zinsert-sideeffect is passed;
- /// in some cases though we want to emit it regardless.
- fn sideeffect(&mut self, unconditional: bool);
+ /// Emits a forced side effect.
+ ///
+ /// Currently has any effect only when LLVM versions prior to 12.0 are used as the backend.
+ fn sideeffect(&mut self);
/// Trait method used to inject `va_start` on the "spoofed" `VaListImpl` in
/// Rust defined C-variadic functions.
fn va_start(&mut self, val: Self::Value) -> Self::Value;
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
index 23e689fc..2e5a86b 100644
--- a/compiler/rustc_data_structures/Cargo.toml
+++ b/compiler/rustc_data_structures/Cargo.toml
@@ -19,13 +19,13 @@
cfg-if = "0.1.2"
crossbeam-utils = { version = "0.7", features = ["nightly"] }
stable_deref_trait = "1.0.0"
-rayon = { version = "0.3.0", package = "rustc-rayon" }
-rayon-core = { version = "0.3.0", package = "rustc-rayon-core" }
+rayon = { version = "0.3.1", package = "rustc-rayon" }
+rayon-core = { version = "0.3.1", package = "rustc-rayon-core" }
rustc-hash = "1.1.0"
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
rustc_index = { path = "../rustc_index", package = "rustc_index" }
bitflags = "1.2.1"
-measureme = "9.0.0"
+measureme = "9.1.0"
libc = "0.2"
stacker = "0.1.12"
tempfile = "3.0.5"
diff --git a/compiler/rustc_data_structures/src/const_cstr.rs b/compiler/rustc_data_structures/src/const_cstr.rs
deleted file mode 100644
index 1ebcb87..0000000
--- a/compiler/rustc_data_structures/src/const_cstr.rs
+++ /dev/null
@@ -1,30 +0,0 @@
-/// This macro creates a zero-overhead &CStr by adding a NUL terminator to
-/// the string literal passed into it at compile-time. Use it like:
-///
-/// ```
-/// let some_const_cstr = const_cstr!("abc");
-/// ```
-///
-/// The above is roughly equivalent to:
-///
-/// ```
-/// let some_const_cstr = CStr::from_bytes_with_nul(b"abc\0").unwrap()
-/// ```
-///
-/// Note that macro only checks the string literal for internal NULs if
-/// debug-assertions are enabled in order to avoid runtime overhead in release
-/// builds.
-#[macro_export]
-macro_rules! const_cstr {
- ($s:expr) => {{
- use std::ffi::CStr;
-
- let str_plus_nul = concat!($s, "\0");
-
- if cfg!(debug_assertions) {
- CStr::from_bytes_with_nul(str_plus_nul.as_bytes()).unwrap()
- } else {
- unsafe { CStr::from_bytes_with_nul_unchecked(str_plus_nul.as_bytes()) }
- }
- }};
-}
diff --git a/compiler/rustc_data_structures/src/fingerprint.rs b/compiler/rustc_data_structures/src/fingerprint.rs
index 08c3419..681b49e 100644
--- a/compiler/rustc_data_structures/src/fingerprint.rs
+++ b/compiler/rustc_data_structures/src/fingerprint.rs
@@ -7,19 +7,30 @@
use std::mem::{self, MaybeUninit};
#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Copy)]
+#[repr(C)]
pub struct Fingerprint(u64, u64);
impl Fingerprint {
pub const ZERO: Fingerprint = Fingerprint(0, 0);
#[inline]
+ pub fn new(_0: u64, _1: u64) -> Fingerprint {
+ Fingerprint(_0, _1)
+ }
+
+ #[inline]
pub fn from_smaller_hash(hash: u64) -> Fingerprint {
Fingerprint(hash, hash)
}
#[inline]
pub fn to_smaller_hash(&self) -> u64 {
- self.0
+ // Even though both halves of the fingerprint are expected to be good
+ // quality hash values, let's still combine the two values because the
+ // Fingerprints in DefPathHash have the StableCrateId portion which is
+ // the same for all DefPathHashes from the same crate. Combining the
+ // two halfs makes sure we get a good quality hash in such cases too.
+ self.0.wrapping_mul(3).wrapping_add(self.1)
}
#[inline]
@@ -92,8 +103,19 @@
impl FingerprintHasher for crate::unhash::Unhasher {
#[inline]
fn write_fingerprint(&mut self, fingerprint: &Fingerprint) {
- // `Unhasher` only wants a single `u64`
- self.write_u64(fingerprint.0);
+ // Even though both halves of the fingerprint are expected to be good
+ // quality hash values, let's still combine the two values because the
+ // Fingerprints in DefPathHash have the StableCrateId portion which is
+ // the same for all DefPathHashes from the same crate. Combining the
+ // two halfs makes sure we get a good quality hash in such cases too.
+ //
+ // Since `Unhasher` is used only in the context of HashMaps, it is OK
+ // to combine the two components in an order-independent way (which is
+ // cheaper than the more robust Fingerprint::to_smaller_hash()). For
+ // HashMaps we don't really care if Fingerprint(x,y) and
+ // Fingerprint(y, x) result in the same hash value. Collision
+ // probability will still be much better than with FxHash.
+ self.write_u64(fingerprint.0.wrapping_add(fingerprint.1));
}
}
diff --git a/compiler/rustc_data_structures/src/graph/dominators/mod.rs b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
index ad62e3c..1cd1705 100644
--- a/compiler/rustc_data_structures/src/graph/dominators/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
@@ -85,6 +85,10 @@
}
impl<Node: Idx> Dominators<Node> {
+ pub fn dummy() -> Self {
+ Self { post_order_rank: IndexVec::new(), immediate_dominators: IndexVec::new() }
+ }
+
pub fn is_reachable(&self, node: Node) -> bool {
self.immediate_dominators[node].is_some()
}
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
index 5880bbd..fcb2bca 100644
--- a/compiler/rustc_data_structures/src/lib.rs
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -26,7 +26,6 @@
#![feature(thread_id_value)]
#![feature(extend_one)]
#![feature(const_panic)]
-#![cfg_attr(bootstrap, feature(min_const_generics))]
#![feature(new_uninit)]
#![feature(once_cell)]
#![feature(maybe_uninit_uninit_array)]
@@ -70,7 +69,6 @@
pub mod binary_search_util;
pub mod box_region;
pub mod captures;
-pub mod const_cstr;
pub mod flock;
pub mod functor;
pub mod fx;
diff --git a/compiler/rustc_data_structures/src/macros.rs b/compiler/rustc_data_structures/src/macros.rs
index b918ed9..48dfbba 100644
--- a/compiler/rustc_data_structures/src/macros.rs
+++ b/compiler/rustc_data_structures/src/macros.rs
@@ -9,11 +9,11 @@
#[macro_export]
macro_rules! enum_from_u32 {
($(#[$attr:meta])* pub enum $name:ident {
- $($variant:ident = $e:expr,)*
+ $($(#[$var_attr:meta])* $variant:ident = $e:expr,)*
}) => {
$(#[$attr])*
pub enum $name {
- $($variant = $e),*
+ $($(#[$var_attr])* $variant = $e),*
}
impl $name {
@@ -26,11 +26,11 @@
}
};
($(#[$attr:meta])* pub enum $name:ident {
- $($variant:ident,)*
+ $($(#[$var_attr:meta])* $variant:ident,)*
}) => {
$(#[$attr])*
pub enum $name {
- $($variant,)*
+ $($(#[$var_attr])* $variant,)*
}
impl $name {
diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs
index f0b413c..51f851d 100644
--- a/compiler/rustc_data_structures/src/profiling.rs
+++ b/compiler/rustc_data_structures/src/profiling.rs
@@ -608,7 +608,7 @@
(None, None) => String::new(),
};
- println!("time: {:>7}{}\t{}", duration_to_secs_str(dur), mem_string, what);
+ eprintln!("time: {:>7}{}\t{}", duration_to_secs_str(dur), mem_string, what);
}
// Hack up our own formatting for the duration to make it easier for scripts
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
index 485719c..14db71c 100644
--- a/compiler/rustc_data_structures/src/sharded.rs
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -63,23 +63,9 @@
if SHARDS == 1 { &self.shards[0].0 } else { self.get_shard_by_hash(make_hash(val)) }
}
- /// Get a shard with a pre-computed hash value. If `get_shard_by_value` is
- /// ever used in combination with `get_shard_by_hash` on a single `Sharded`
- /// instance, then `hash` must be computed with `FxHasher`. Otherwise,
- /// `hash` can be computed with any hasher, so long as that hasher is used
- /// consistently for each `Sharded` instance.
- #[inline]
- pub fn get_shard_index_by_hash(&self, hash: u64) -> usize {
- let hash_len = mem::size_of::<usize>();
- // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
- // hashbrown also uses the lowest bits, so we can't use those
- let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
- bits % SHARDS
- }
-
#[inline]
pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
- &self.shards[self.get_shard_index_by_hash(hash)].0
+ &self.shards[get_shard_index_by_hash(hash)].0
}
#[inline]
@@ -166,3 +152,17 @@
val.hash(&mut state);
state.finish()
}
+
+/// Get a shard with a pre-computed hash value. If `get_shard_by_value` is
+/// ever used in combination with `get_shard_by_hash` on a single `Sharded`
+/// instance, then `hash` must be computed with `FxHasher`. Otherwise,
+/// `hash` can be computed with any hasher, so long as that hasher is used
+/// consistently for each `Sharded` instance.
+#[inline]
+pub fn get_shard_index_by_hash(hash: u64) -> usize {
+ let hash_len = mem::size_of::<usize>();
+ // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
+ // hashbrown also uses the lowest bits, so we can't use those
+ let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
+ bits % SHARDS
+}
diff --git a/compiler/rustc_data_structures/src/stable_hasher.rs b/compiler/rustc_data_structures/src/stable_hasher.rs
index 3850c9b..ff28784 100644
--- a/compiler/rustc_data_structures/src/stable_hasher.rs
+++ b/compiler/rustc_data_structures/src/stable_hasher.rs
@@ -35,6 +35,7 @@
StableHasher { state: SipHasher128::new_with_keys(0, 0) }
}
+ #[inline]
pub fn finish<W: StableHasherResult>(self) -> W {
W::finish(self)
}
diff --git a/compiler/rustc_data_structures/src/tiny_list.rs b/compiler/rustc_data_structures/src/tiny_list.rs
index e94a0c6..f88bcc2 100644
--- a/compiler/rustc_data_structures/src/tiny_list.rs
+++ b/compiler/rustc_data_structures/src/tiny_list.rs
@@ -15,7 +15,7 @@
mod tests;
#[derive(Clone)]
-pub struct TinyList<T: PartialEq> {
+pub struct TinyList<T> {
head: Option<Element<T>>,
}
@@ -56,20 +56,10 @@
}
false
}
-
- #[inline]
- pub fn len(&self) -> usize {
- let (mut elem, mut count) = (self.head.as_ref(), 0);
- while let Some(ref e) = elem {
- count += 1;
- elem = e.next.as_deref();
- }
- count
- }
}
#[derive(Clone)]
-struct Element<T: PartialEq> {
+struct Element<T> {
data: T,
next: Option<Box<Element<T>>>,
}
diff --git a/compiler/rustc_data_structures/src/tiny_list/tests.rs b/compiler/rustc_data_structures/src/tiny_list/tests.rs
index a8ae2bc..c0334d2 100644
--- a/compiler/rustc_data_structures/src/tiny_list/tests.rs
+++ b/compiler/rustc_data_structures/src/tiny_list/tests.rs
@@ -3,6 +3,17 @@
extern crate test;
use test::{black_box, Bencher};
+impl<T> TinyList<T> {
+ fn len(&self) -> usize {
+ let (mut elem, mut count) = (self.head.as_ref(), 0);
+ while let Some(ref e) = elem {
+ count += 1;
+ elem = e.next.as_deref();
+ }
+ count
+ }
+}
+
#[test]
fn test_contains_and_insert() {
fn do_insert(i: u32) -> bool {
diff --git a/compiler/rustc_data_structures/src/transitive_relation.rs b/compiler/rustc_data_structures/src/transitive_relation.rs
index 2e1512b..ccf8bd6 100644
--- a/compiler/rustc_data_structures/src/transitive_relation.rs
+++ b/compiler/rustc_data_structures/src/transitive_relation.rs
@@ -9,7 +9,7 @@
mod tests;
#[derive(Clone, Debug)]
-pub struct TransitiveRelation<T: Eq + Hash> {
+pub struct TransitiveRelation<T> {
// List of elements. This is used to map from a T to a usize.
elements: FxIndexSet<T>,
@@ -49,7 +49,7 @@
target: Index,
}
-impl<T: Clone + Debug + Eq + Hash> TransitiveRelation<T> {
+impl<T: Eq + Hash> TransitiveRelation<T> {
pub fn is_empty(&self) -> bool {
self.edges.is_empty()
}
@@ -322,12 +322,6 @@
.collect()
}
- /// A "best" parent in some sense. See `parents` and
- /// `postdom_upper_bound` for more details.
- pub fn postdom_parent(&self, a: &T) -> Option<&T> {
- self.mutual_immediate_postdominator(self.parents(a))
- }
-
fn with_closure<OP, R>(&self, op: OP) -> R
where
OP: FnOnce(&BitMatrix<usize, usize>) -> R,
diff --git a/compiler/rustc_data_structures/src/transitive_relation/tests.rs b/compiler/rustc_data_structures/src/transitive_relation/tests.rs
index ca90ba1..9fa7224 100644
--- a/compiler/rustc_data_structures/src/transitive_relation/tests.rs
+++ b/compiler/rustc_data_structures/src/transitive_relation/tests.rs
@@ -1,5 +1,13 @@
use super::*;
+impl<T: Eq + Hash> TransitiveRelation<T> {
+ /// A "best" parent in some sense. See `parents` and
+ /// `postdom_upper_bound` for more details.
+ fn postdom_parent(&self, a: &T) -> Option<&T> {
+ self.mutual_immediate_postdominator(self.parents(a))
+ }
+}
+
#[test]
fn test_one_step() {
let mut relation = TransitiveRelation::default();
diff --git a/compiler/rustc_driver/Cargo.toml b/compiler/rustc_driver/Cargo.toml
index b88b556..c521f204 100644
--- a/compiler/rustc_driver/Cargo.toml
+++ b/compiler/rustc_driver/Cargo.toml
@@ -10,9 +10,9 @@
[dependencies]
libc = "0.2"
atty = "0.2"
-tracing = { version = "0.1.18" }
-tracing-subscriber = { version = "0.2.13", default-features = false, features = ["fmt", "env-filter", "smallvec", "parking_lot", "ansi"] }
-tracing-tree = "0.1.6"
+tracing = { version = "0.1.25" }
+tracing-subscriber = { version = "0.2.16", default-features = false, features = ["fmt", "env-filter", "smallvec", "parking_lot", "ansi"] }
+tracing-tree = "0.1.9"
rustc_middle = { path = "../rustc_middle" }
rustc_ast_pretty = { path = "../rustc_ast_pretty" }
rustc_target = { path = "../rustc_target" }
@@ -34,6 +34,8 @@
rustc_serialize = { path = "../rustc_serialize" }
rustc_ast = { path = "../rustc_ast" }
rustc_span = { path = "../rustc_span" }
+rustc_mir_build = { path = "../rustc_mir_build" }
+rustc_typeck = { path = "../rustc_typeck" }
[target.'cfg(windows)'.dependencies]
winapi = { version = "0.3", features = ["consoleapi", "debugapi", "processenv"] }
diff --git a/compiler/rustc_driver/src/args.rs b/compiler/rustc_driver/src/args.rs
index 4f2febf..0133835 100644
--- a/compiler/rustc_driver/src/args.rs
+++ b/compiler/rustc_driver/src/args.rs
@@ -3,7 +3,7 @@
use std::fs;
use std::io;
-pub fn arg_expand(arg: String) -> Result<Vec<String>, Error> {
+fn arg_expand(arg: String) -> Result<Vec<String>, Error> {
if let Some(path) = arg.strip_prefix('@') {
let file = match fs::read_to_string(path) {
Ok(file) => file,
@@ -18,6 +18,20 @@
}
}
+pub fn arg_expand_all(at_args: &[String]) -> Vec<String> {
+ let mut args = Vec::new();
+ for arg in at_args {
+ match arg_expand(arg.clone()) {
+ Ok(arg) => args.extend(arg),
+ Err(err) => rustc_session::early_error(
+ rustc_session::config::ErrorOutputType::default(),
+ &format!("Failed to load argument file: {}", err),
+ ),
+ }
+ }
+ args
+}
+
#[derive(Debug)]
pub enum Error {
Utf8Error(Option<String>),
diff --git a/compiler/rustc_driver/src/lib.rs b/compiler/rustc_driver/src/lib.rs
index 8295e88..25a7804 100644
--- a/compiler/rustc_driver/src/lib.rs
+++ b/compiler/rustc_driver/src/lib.rs
@@ -27,7 +27,6 @@
use rustc_lint::LintStore;
use rustc_metadata::locator;
use rustc_middle::middle::cstore::MetadataLoader;
-use rustc_middle::ty::TyCtxt;
use rustc_save_analysis as save;
use rustc_save_analysis::DumpHandler;
use rustc_serialize::json::{self, ToJson};
@@ -55,7 +54,7 @@
use std::str;
use std::time::Instant;
-mod args;
+pub mod args;
pub mod pretty;
/// Exit status code used for successful compilation and help output.
@@ -188,16 +187,8 @@
Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>,
>,
) -> interface::Result<()> {
- let mut args = Vec::new();
- for arg in at_args {
- match args::arg_expand(arg.clone()) {
- Ok(arg) => args.extend(arg),
- Err(err) => early_error(
- ErrorOutputType::default(),
- &format!("Failed to load argument file: {}", err),
- ),
- }
- }
+ let args = args::arg_expand_all(at_args);
+
let diagnostic_output = emitter.map_or(DiagnosticOutput::Default, DiagnosticOutput::Raw);
let matches = match handle_options(&args) {
Some(matches) => matches,
@@ -224,6 +215,7 @@
diagnostic_output,
stderr: None,
lint_caps: Default::default(),
+ parse_sess_created: None,
register_lints: None,
override_queries: None,
make_codegen_backend: make_codegen_backend.take().unwrap(),
@@ -307,6 +299,7 @@
diagnostic_output,
stderr: None,
lint_caps: Default::default(),
+ parse_sess_created: None,
register_lints: None,
override_queries: None,
make_codegen_backend: make_codegen_backend.unwrap(),
@@ -821,7 +814,7 @@
} else {
"\n --help -v Print the full set of options rustc accepts"
};
- let at_path = if verbose && nightly_build {
+ let at_path = if verbose {
" @path Read newline separated options from `path`\n"
} else {
""
@@ -1240,7 +1233,7 @@
let num_frames = if backtrace { None } else { Some(2) };
- TyCtxt::try_print_query_stack(&handler, num_frames);
+ interface::try_print_query_stack(&handler, num_frames);
#[cfg(windows)]
unsafe {
diff --git a/compiler/rustc_driver/src/pretty.rs b/compiler/rustc_driver/src/pretty.rs
index b7edc24..5512bd7 100644
--- a/compiler/rustc_driver/src/pretty.rs
+++ b/compiler/rustc_driver/src/pretty.rs
@@ -9,14 +9,14 @@
use rustc_middle::hir::map as hir_map;
use rustc_middle::ty::{self, TyCtxt};
use rustc_mir::util::{write_mir_graphviz, write_mir_pretty};
-use rustc_session::config::{Input, PpMode, PpSourceMode};
+use rustc_mir_build::thir;
+use rustc_session::config::{Input, PpAstTreeMode, PpHirMode, PpMode, PpSourceMode};
use rustc_session::Session;
use rustc_span::symbol::Ident;
use rustc_span::FileName;
use std::cell::Cell;
-use std::fs::File;
-use std::io::Write;
+use std::fmt::Write;
use std::path::Path;
pub use self::PpMode::*;
@@ -44,43 +44,41 @@
F: FnOnce(&dyn PrinterSupport) -> A,
{
match *ppmode {
- PpmNormal | PpmEveryBodyLoops | PpmExpanded => {
+ Normal | EveryBodyLoops | Expanded => {
let annotation = NoAnn { sess, tcx };
f(&annotation)
}
- PpmIdentified | PpmExpandedIdentified => {
+ Identified | ExpandedIdentified => {
let annotation = IdentifiedAnnotation { sess, tcx };
f(&annotation)
}
- PpmExpandedHygiene => {
+ ExpandedHygiene => {
let annotation = HygieneAnnotation { sess };
f(&annotation)
}
- _ => panic!("Should use call_with_pp_support_hir"),
}
}
-fn call_with_pp_support_hir<A, F>(ppmode: &PpSourceMode, tcx: TyCtxt<'_>, f: F) -> A
+fn call_with_pp_support_hir<A, F>(ppmode: &PpHirMode, tcx: TyCtxt<'_>, f: F) -> A
where
F: FnOnce(&dyn HirPrinterSupport<'_>, &hir::Crate<'_>) -> A,
{
match *ppmode {
- PpmNormal => {
+ PpHirMode::Normal => {
let annotation = NoAnn { sess: tcx.sess, tcx: Some(tcx) };
f(&annotation, tcx.hir().krate())
}
- PpmIdentified => {
+ PpHirMode::Identified => {
let annotation = IdentifiedAnnotation { sess: tcx.sess, tcx: Some(tcx) };
f(&annotation, tcx.hir().krate())
}
- PpmTyped => {
+ PpHirMode::Typed => {
abort_on_err(tcx.analysis(LOCAL_CRATE), tcx.sess);
let annotation = TypedAnnotation { tcx, maybe_typeck_results: Cell::new(None) };
tcx.dep_graph.with_ignore(|| f(&annotation, tcx.hir().krate()))
}
- _ => panic!("Should use call_with_pp_support"),
}
}
@@ -237,7 +235,7 @@
pprust_hir::AnnNode::Name(_) => {}
pprust_hir::AnnNode::Item(item) => {
s.s.space();
- s.synth_comment(format!("hir_id: {}", item.hir_id));
+ s.synth_comment(format!("hir_id: {}", item.hir_id()));
}
pprust_hir::AnnNode::SubItem(id) => {
s.s.space();
@@ -375,13 +373,14 @@
(src, src_name)
}
-fn write_output(out: Vec<u8>, ofile: Option<&Path>) {
+fn write_or_print(out: &str, ofile: Option<&Path>) {
match ofile {
- None => print!("{}", String::from_utf8(out).unwrap()),
- Some(p) => match File::create(p) {
- Ok(mut w) => w.write_all(&out).unwrap(),
- Err(e) => panic!("print-print failed to open {} due to {}", p.display(), e),
- },
+ None => print!("{}", out),
+ Some(p) => {
+ if let Err(e) = std::fs::write(p, out) {
+ panic!("print-print failed to write {} due to {}", p.display(), e);
+ }
+ }
}
}
@@ -394,30 +393,32 @@
) {
let (src, src_name) = get_source(input, sess);
- let mut out = String::new();
-
- if let PpmSource(s) = ppm {
- // Silently ignores an identified node.
- let out = &mut out;
- call_with_pp_support(&s, sess, None, move |annotation| {
- debug!("pretty printing source code {:?}", s);
- let sess = annotation.sess();
- let parse = &sess.parse_sess;
- *out = pprust::print_crate(
- sess.source_map(),
- krate,
- src_name,
- src,
- annotation.pp_ann(),
- false,
- parse.edition,
- )
- })
- } else {
- unreachable!();
+ let out = match ppm {
+ Source(s) => {
+ // Silently ignores an identified node.
+ call_with_pp_support(&s, sess, None, move |annotation| {
+ debug!("pretty printing source code {:?}", s);
+ let sess = annotation.sess();
+ let parse = &sess.parse_sess;
+ pprust::print_crate(
+ sess.source_map(),
+ krate,
+ src_name,
+ src,
+ annotation.pp_ann(),
+ false,
+ parse.edition,
+ )
+ })
+ }
+ AstTree(PpAstTreeMode::Normal) => {
+ debug!("pretty printing AST tree");
+ format!("{:#?}", krate)
+ }
+ _ => unreachable!(),
};
- write_output(out.into_bytes(), ofile);
+ write_or_print(&out, ofile);
}
pub fn print_after_hir_lowering<'tcx>(
@@ -434,17 +435,14 @@
let (src, src_name) = get_source(input, tcx.sess);
- let mut out = String::new();
-
- match ppm {
- PpmSource(s) => {
+ let out = match ppm {
+ Source(s) => {
// Silently ignores an identified node.
- let out = &mut out;
call_with_pp_support(&s, tcx.sess, Some(tcx), move |annotation| {
debug!("pretty printing source code {:?}", s);
let sess = annotation.sess();
let parse = &sess.parse_sess;
- *out = pprust::print_crate(
+ pprust::print_crate(
sess.source_map(),
krate,
src_name,
@@ -456,28 +454,42 @@
})
}
- PpmHir(s) => {
- let out = &mut out;
- call_with_pp_support_hir(&s, tcx, move |annotation, krate| {
- debug!("pretty printing source code {:?}", s);
- let sess = annotation.sess();
- let sm = sess.source_map();
- *out = pprust_hir::print_crate(sm, krate, src_name, src, annotation.pp_ann())
- })
+ AstTree(PpAstTreeMode::Expanded) => {
+ debug!("pretty-printing expanded AST");
+ format!("{:#?}", krate)
}
- PpmHirTree(s) => {
- let out = &mut out;
- call_with_pp_support_hir(&s, tcx, move |_annotation, krate| {
- debug!("pretty printing source code {:?}", s);
- *out = format!("{:#?}", krate);
- });
+ Hir(s) => call_with_pp_support_hir(&s, tcx, move |annotation, krate| {
+ debug!("pretty printing HIR {:?}", s);
+ let sess = annotation.sess();
+ let sm = sess.source_map();
+ pprust_hir::print_crate(sm, krate, src_name, src, annotation.pp_ann())
+ }),
+
+ HirTree => call_with_pp_support_hir(&PpHirMode::Normal, tcx, move |_annotation, krate| {
+ debug!("pretty printing HIR tree");
+ format!("{:#?}", krate)
+ }),
+
+ ThirTree => {
+ let mut out = String::new();
+ abort_on_err(rustc_typeck::check_crate(tcx), tcx.sess);
+ debug!("pretty printing THIR tree");
+ for did in tcx.body_owners() {
+ let hir = tcx.hir();
+ let body = hir.body(hir.body_owned_by(hir.local_def_id_to_hir_id(did)));
+ let arena = thir::Arena::default();
+ let thir =
+ thir::build_thir(tcx, ty::WithOptConstParam::unknown(did), &arena, &body.value);
+ let _ = writeln!(out, "{:?}:\n{:#?}\n", did, thir);
+ }
+ out
}
_ => unreachable!(),
- }
+ };
- write_output(out.into_bytes(), ofile);
+ write_or_print(&out, ofile);
}
// In an ideal world, this would be a public function called by the driver after
@@ -494,16 +506,13 @@
tcx.analysis(LOCAL_CRATE)?;
match ppm {
- PpmMir | PpmMirCFG => match ppm {
- PpmMir => write_mir_pretty(tcx, None, &mut out),
- PpmMirCFG => write_mir_graphviz(tcx, None, &mut out),
- _ => unreachable!(),
- },
+ Mir => write_mir_pretty(tcx, None, &mut out).unwrap(),
+ MirCFG => write_mir_graphviz(tcx, None, &mut out).unwrap(),
_ => unreachable!(),
}
- .unwrap();
- write_output(out, ofile);
+ let out = std::str::from_utf8(&out).unwrap();
+ write_or_print(out, ofile);
Ok(())
}
diff --git a/compiler/rustc_error_codes/src/error_codes.rs b/compiler/rustc_error_codes/src/error_codes.rs
index 1ed43669..4b52973 100644
--- a/compiler/rustc_error_codes/src/error_codes.rs
+++ b/compiler/rustc_error_codes/src/error_codes.rs
@@ -103,7 +103,6 @@
E0199: include_str!("./error_codes/E0199.md"),
E0200: include_str!("./error_codes/E0200.md"),
E0201: include_str!("./error_codes/E0201.md"),
-E0202: include_str!("./error_codes/E0202.md"),
E0203: include_str!("./error_codes/E0203.md"),
E0204: include_str!("./error_codes/E0204.md"),
E0205: include_str!("./error_codes/E0205.md"),
@@ -285,7 +284,12 @@
E0538: include_str!("./error_codes/E0538.md"),
E0539: include_str!("./error_codes/E0539.md"),
E0541: include_str!("./error_codes/E0541.md"),
+E0542: include_str!("./error_codes/E0542.md"),
+E0543: include_str!("./error_codes/E0543.md"),
+E0545: include_str!("./error_codes/E0545.md"),
E0546: include_str!("./error_codes/E0546.md"),
+E0547: include_str!("./error_codes/E0547.md"),
+E0549: include_str!("./error_codes/E0549.md"),
E0550: include_str!("./error_codes/E0550.md"),
E0551: include_str!("./error_codes/E0551.md"),
E0552: include_str!("./error_codes/E0552.md"),
@@ -602,15 +606,8 @@
E0523,
// E0526, // shuffle indices are not constant
// E0540, // multiple rustc_deprecated attributes
- E0542, // missing 'since'
- E0543, // missing 'reason'
E0544, // multiple stability levels
- E0545, // incorrect 'issue'
- E0547, // missing 'issue'
// E0548, // replaced with a generic attribute input check
- // rustc_deprecated attribute must be paired with either stable or unstable
- // attribute
- E0549,
E0553, // multiple rustc_const_unstable attributes
// E0555, // replaced with a generic attribute input check
// E0558, // replaced with a generic attribute input check
diff --git a/compiler/rustc_error_codes/src/error_codes/E0074.md b/compiler/rustc_error_codes/src/error_codes/E0074.md
index e25dec7..785d6de 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0074.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0074.md
@@ -11,7 +11,7 @@
#![feature(repr_simd)]
#[repr(simd)]
-struct Bad<T>(T, T, T);
+struct Bad<T>(T, T, T, T);
```
This will not:
@@ -20,5 +20,5 @@
#![feature(repr_simd)]
#[repr(simd)]
-struct Good(u32, u32, u32);
+struct Good(u32, u32, u32, u32);
```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0076.md b/compiler/rustc_error_codes/src/error_codes/E0076.md
index f293a2a..1da8caa 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0076.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0076.md
@@ -7,7 +7,7 @@
#![feature(repr_simd)]
#[repr(simd)]
-struct Bad(u16, u32, u32); // error!
+struct Bad(u16, u32, u32 u32); // error!
```
When using the `#[simd]` attribute to automatically use SIMD operations in tuple
@@ -20,5 +20,5 @@
#![feature(repr_simd)]
#[repr(simd)]
-struct Good(u32, u32, u32); // ok!
+struct Good(u32, u32, u32, u32); // ok!
```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0077.md b/compiler/rustc_error_codes/src/error_codes/E0077.md
index b14513c..91aa24d 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0077.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0077.md
@@ -19,5 +19,5 @@
#![feature(repr_simd)]
#[repr(simd)]
-struct Good(u32, u32, u32); // ok!
+struct Good(u32, u32, u32, u32); // ok!
```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0162.md b/compiler/rustc_error_codes/src/error_codes/E0162.md
index 9814614..0161c93 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0162.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0162.md
@@ -1,6 +1,6 @@
#### Note: this error code is no longer emitted by the compiler.
-An if-let pattern attempts to match the pattern, and enters the body if the
+An `if let` pattern attempts to match the pattern, and enters the body if the
match was successful. If the match is irrefutable (when it cannot fail to
match), use a regular `let`-binding instead. For instance:
diff --git a/compiler/rustc_error_codes/src/error_codes/E0165.md b/compiler/rustc_error_codes/src/error_codes/E0165.md
index 92243db..7bcd6c0 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0165.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0165.md
@@ -1,6 +1,6 @@
#### Note: this error code is no longer emitted by the compiler.
-A while-let pattern attempts to match the pattern, and enters the body if the
+A `while let` pattern attempts to match the pattern, and enters the body if the
match was successful. If the match is irrefutable (when it cannot fail to
match), use a regular `let`-binding inside a `loop` instead. For instance:
diff --git a/compiler/rustc_error_codes/src/error_codes/E0202.md b/compiler/rustc_error_codes/src/error_codes/E0202.md
deleted file mode 100644
index afc61ec..0000000
--- a/compiler/rustc_error_codes/src/error_codes/E0202.md
+++ /dev/null
@@ -1,15 +0,0 @@
-Inherent associated types were part of [RFC 195] but are not yet implemented.
-See [the tracking issue][iss8995] for the status of this implementation.
-
-Erroneous code example:
-
-```compile_fail,E0202
-struct Foo;
-
-impl Foo {
- type Bar = isize; // error!
-}
-```
-
-[RFC 195]: https://github.com/rust-lang/rfcs/blob/master/text/0195-associated-items.md
-[iss8995]: https://github.com/rust-lang/rust/issues/8995
diff --git a/compiler/rustc_error_codes/src/error_codes/E0542.md b/compiler/rustc_error_codes/src/error_codes/E0542.md
new file mode 100644
index 0000000..7cb58f9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0542.md
@@ -0,0 +1,47 @@
+The `since` value is missing in a stability attribute.
+
+Erroneous code example:
+
+```compile_fail,E0542
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[stable(feature = "_stable_fn")] // invalid
+fn _stable_fn() {}
+
+#[rustc_const_stable(feature = "_stable_const_fn")] // invalid
+fn _stable_const_fn() {}
+
+#[stable(feature = "_deprecated_fn", since = "0.1.0")]
+#[rustc_deprecated(
+ reason = "explanation for deprecation"
+)] // invalid
+fn _deprecated_fn() {}
+```
+
+To fix this issue, you need to provide the `since` field. Example:
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[stable(feature = "_stable_fn", since = "1.0.0")] // ok!
+fn _stable_fn() {}
+
+#[rustc_const_stable(feature = "_stable_const_fn", since = "1.0.0")] // ok!
+fn _stable_const_fn() {}
+
+#[stable(feature = "_deprecated_fn", since = "0.1.0")]
+#[rustc_deprecated(
+ since = "1.0.0",
+ reason = "explanation for deprecation"
+)] // ok!
+fn _deprecated_fn() {}
+```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0543.md b/compiler/rustc_error_codes/src/error_codes/E0543.md
new file mode 100644
index 0000000..ba26f92
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0543.md
@@ -0,0 +1,35 @@
+The `reason` value is missing in a stability attribute.
+
+Erroneous code example:
+
+```compile_fail,E0543
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[stable(since = "0.1.0", feature = "_deprecated_fn")]
+#[rustc_deprecated(
+ since = "1.0.0"
+)] // invalid
+fn _deprecated_fn() {}
+```
+
+To fix this issue, you need to provide the `reason` field. Example:
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[stable(since = "0.1.0", feature = "_deprecated_fn")]
+#[rustc_deprecated(
+ since = "1.0.0",
+ reason = "explanation for deprecation"
+)] // ok!
+fn _deprecated_fn() {}
+```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0545.md b/compiler/rustc_error_codes/src/error_codes/E0545.md
new file mode 100644
index 0000000..9fb935a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0545.md
@@ -0,0 +1,35 @@
+The `issue` value is incorrect in a stability attribute.
+
+Erroneous code example:
+
+```compile_fail,E0545
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[unstable(feature = "_unstable_fn", issue = "0")] // invalid
+fn _unstable_fn() {}
+
+#[rustc_const_unstable(feature = "_unstable_const_fn", issue = "0")] // invalid
+fn _unstable_const_fn() {}
+```
+
+To fix this issue, you need to provide a correct value in the `issue` field.
+Example:
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[unstable(feature = "_unstable_fn", issue = "none")] // ok!
+fn _unstable_fn() {}
+
+#[rustc_const_unstable(feature = "_unstable_const_fn", issue = "1")] // ok!
+fn _unstable_const_fn() {}
+```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0546.md b/compiler/rustc_error_codes/src/error_codes/E0546.md
index b2df22c..a33dcb7 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0546.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0546.md
@@ -1,4 +1,4 @@
-A feature name is missing.
+The `feature` value is missing in a stability attribute.
Erroneous code example:
@@ -13,7 +13,7 @@
fn stable_fn() {}
```
-To fix the issue you need to provide a feature name.
+To fix this issue, you need to provide the `feature` field. Example:
```
#![feature(staged_api)]
@@ -25,3 +25,10 @@
#[stable(feature = "stable_fn", since = "1.0.0")] // ok!
fn stable_fn() {}
```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0547.md b/compiler/rustc_error_codes/src/error_codes/E0547.md
new file mode 100644
index 0000000..1aa4b35
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0547.md
@@ -0,0 +1,37 @@
+The `issue` value is missing in a stability attribute.
+
+Erroneous code example:
+
+```compile_fail,E0547
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[unstable(feature = "_unstable_fn")] // invalid
+fn _unstable_fn() {}
+
+#[rustc_const_unstable(feature = "_unstable_const_fn")] // invalid
+fn _unstable_const_fn() {}
+```
+
+To fix this issue, you need to provide the `issue` field. Example:
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[unstable(feature = "_unstable_fn", issue = "none")] // ok!
+fn _unstable_fn() {}
+
+#[rustc_const_unstable(
+ feature = "_unstable_const_fn",
+ issue = "none"
+)] // ok!
+fn _unstable_const_fn() {}
+```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0549.md b/compiler/rustc_error_codes/src/error_codes/E0549.md
new file mode 100644
index 0000000..d4b78e7
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0549.md
@@ -0,0 +1,37 @@
+A `rustc_deprecated` attribute wasn't paired with a `stable`/`unstable`
+attribute.
+
+Erroneous code example:
+
+```compile_fail,E0549
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[rustc_deprecated(
+ since = "1.0.1",
+ reason = "explanation for deprecation"
+)] // invalid
+fn _deprecated_fn() {}
+```
+
+To fix this issue, you need to add also an attribute `stable` or `unstable`.
+Example:
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[stable(since = "1.0.0", feature = "test")]
+#[rustc_deprecated(
+ since = "1.0.1",
+ reason = "explanation for deprecation"
+)] // ok!
+fn _deprecated_fn() {}
+```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0761.md b/compiler/rustc_error_codes/src/error_codes/E0761.md
index e112674..760c589 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0761.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0761.md
@@ -2,7 +2,7 @@
Erroneous code example:
-```ignore (multiple source files required for compile_fail)
+```ignore (Multiple source files are required for compile_fail.)
// file: ambiguous_module/mod.rs
fn foo() {}
diff --git a/compiler/rustc_error_codes/src/lib.rs b/compiler/rustc_error_codes/src/lib.rs
index e4a7025..14ddb3e 100644
--- a/compiler/rustc_error_codes/src/lib.rs
+++ b/compiler/rustc_error_codes/src/lib.rs
@@ -1,4 +1,5 @@
-#![deny(invalid_codeblock_attributes)]
+#![cfg_attr(bootstrap, deny(invalid_codeblock_attributes))]
+#![cfg_attr(not(bootstrap), deny(rustdoc::invalid_codeblock_attributes))]
//! This library is used to gather all error codes into one place,
//! the goal being to make their maintenance easier.
diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs
index e61476b..ce5b130 100644
--- a/compiler/rustc_errors/src/diagnostic.rs
+++ b/compiler/rustc_errors/src/diagnostic.rs
@@ -4,7 +4,9 @@
use crate::Substitution;
use crate::SubstitutionPart;
use crate::SuggestionStyle;
+use crate::ToolMetadata;
use rustc_lint_defs::Applicability;
+use rustc_serialize::json::Json;
use rustc_span::{MultiSpan, Span, DUMMY_SP};
use std::fmt;
@@ -293,6 +295,7 @@
suggestion: Vec<(Span, String)>,
applicability: Applicability,
) -> &mut Self {
+ assert!(!suggestion.is_empty());
self.suggestions.push(CodeSuggestion {
substitutions: vec![Substitution {
parts: suggestion
@@ -303,6 +306,7 @@
msg: msg.to_owned(),
style: SuggestionStyle::ShowCode,
applicability,
+ tool_metadata: Default::default(),
});
self
}
@@ -315,6 +319,10 @@
suggestions: Vec<Vec<(Span, String)>>,
applicability: Applicability,
) -> &mut Self {
+ assert!(!suggestions.is_empty());
+ for s in &suggestions {
+ assert!(!s.is_empty());
+ }
self.suggestions.push(CodeSuggestion {
substitutions: suggestions
.into_iter()
@@ -328,6 +336,7 @@
msg: msg.to_owned(),
style: SuggestionStyle::ShowCode,
applicability,
+ tool_metadata: Default::default(),
});
self
}
@@ -344,6 +353,7 @@
suggestion: Vec<(Span, String)>,
applicability: Applicability,
) -> &mut Self {
+ assert!(!suggestion.is_empty());
self.suggestions.push(CodeSuggestion {
substitutions: vec![Substitution {
parts: suggestion
@@ -354,6 +364,7 @@
msg: msg.to_owned(),
style: SuggestionStyle::CompletelyHidden,
applicability,
+ tool_metadata: Default::default(),
});
self
}
@@ -408,6 +419,7 @@
msg: msg.to_owned(),
style,
applicability,
+ tool_metadata: Default::default(),
});
self
}
@@ -446,6 +458,7 @@
msg: msg.to_owned(),
style: SuggestionStyle::ShowCode,
applicability,
+ tool_metadata: Default::default(),
});
self
}
@@ -515,6 +528,23 @@
self
}
+ /// Adds a suggestion intended only for a tool. The intent is that the metadata encodes
+ /// the suggestion in a tool-specific way, as it may not even directly involve Rust code.
+ pub fn tool_only_suggestion_with_metadata(
+ &mut self,
+ msg: &str,
+ applicability: Applicability,
+ tool_metadata: Json,
+ ) {
+ self.suggestions.push(CodeSuggestion {
+ substitutions: vec![],
+ msg: msg.to_owned(),
+ style: SuggestionStyle::CompletelyHidden,
+ applicability,
+ tool_metadata: ToolMetadata::new(tool_metadata),
+ })
+ }
+
pub fn set_span<S: Into<MultiSpan>>(&mut self, sp: S) -> &mut Self {
self.span = sp.into();
if let Some(span) = self.span.primary_span() {
diff --git a/compiler/rustc_errors/src/diagnostic_builder.rs b/compiler/rustc_errors/src/diagnostic_builder.rs
index c09cce2..79507e6 100644
--- a/compiler/rustc_errors/src/diagnostic_builder.rs
+++ b/compiler/rustc_errors/src/diagnostic_builder.rs
@@ -30,15 +30,6 @@
allow_suggestions: bool,
}
-/// This is a helper macro for [`forward!`] that allows automatically adding documentation
-/// that uses tokens from [`forward!`]'s input.
-macro_rules! forward_inner_docs {
- ($e:expr => $i:item) => {
- #[doc = $e]
- $i
- };
-}
-
/// In general, the `DiagnosticBuilder` uses deref to allow access to
/// the fields and methods of the embedded `diagnostic` in a
/// transparent way. *However,* many of the methods are intended to
@@ -54,11 +45,11 @@
pub fn $n:ident(&self, $($name:ident: $ty:ty),* $(,)?) -> &Self
) => {
$(#[$attrs])*
- forward_inner_docs!(concat!("See [`Diagnostic::", stringify!($n), "()`].") =>
+ #[doc = concat!("See [`Diagnostic::", stringify!($n), "()`].")]
pub fn $n(&self, $($name: $ty),*) -> &Self {
self.diagnostic.$n($($name),*);
self
- });
+ }
};
// Forward pattern for &mut self -> &mut Self
@@ -67,11 +58,11 @@
pub fn $n:ident(&mut self, $($name:ident: $ty:ty),* $(,)?) -> &mut Self
) => {
$(#[$attrs])*
- forward_inner_docs!(concat!("See [`Diagnostic::", stringify!($n), "()`].") =>
+ #[doc = concat!("See [`Diagnostic::", stringify!($n), "()`].")]
pub fn $n(&mut self, $($name: $ty),*) -> &mut Self {
self.0.diagnostic.$n($($name),*);
self
- });
+ }
};
// Forward pattern for &mut self -> &mut Self, with generic parameters.
@@ -84,11 +75,11 @@
) -> &mut Self
) => {
$(#[$attrs])*
- forward_inner_docs!(concat!("See [`Diagnostic::", stringify!($n), "()`].") =>
+ #[doc = concat!("See [`Diagnostic::", stringify!($n), "()`].")]
pub fn $n<$($generic: $bound),*>(&mut self, $($name: $ty),*) -> &mut Self {
self.0.diagnostic.$n($($name),*);
self
- });
+ }
};
}
diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs
index ea62e21..9b6f671 100644
--- a/compiler/rustc_errors/src/emitter.rs
+++ b/compiler/rustc_errors/src/emitter.rs
@@ -1713,7 +1713,8 @@
let max_line_num_len = if self.ui_testing {
ANONYMIZED_LINE_NUM.len()
} else {
- self.get_max_line_num(span, children).to_string().len()
+ let n = self.get_max_line_num(span, children);
+ num_decimal_digits(n)
};
match self.emit_message_default(span, message, code, level, max_line_num_len, false) {
@@ -1941,6 +1942,30 @@
}
}
+// instead of taking the String length or dividing by 10 while > 0, we multiply a limit by 10 until
+// we're higher. If the loop isn't exited by the `return`, the last multiplication will wrap, which
+// is OK, because while we cannot fit a higher power of 10 in a usize, the loop will end anyway.
+// This is also why we need the max number of decimal digits within a `usize`.
+fn num_decimal_digits(num: usize) -> usize {
+ #[cfg(target_pointer_width = "64")]
+ const MAX_DIGITS: usize = 20;
+
+ #[cfg(target_pointer_width = "32")]
+ const MAX_DIGITS: usize = 10;
+
+ #[cfg(target_pointer_width = "16")]
+ const MAX_DIGITS: usize = 5;
+
+ let mut lim = 10;
+ for num_digits in 1..MAX_DIGITS {
+ if num < lim {
+ return num_digits;
+ }
+ lim = lim.wrapping_mul(10);
+ }
+ MAX_DIGITS
+}
+
fn replace_tabs(str: &str) -> String {
str.replace('\t', " ")
}
diff --git a/compiler/rustc_errors/src/json.rs b/compiler/rustc_errors/src/json.rs
index d57beb1..c27b39a 100644
--- a/compiler/rustc_errors/src/json.rs
+++ b/compiler/rustc_errors/src/json.rs
@@ -14,6 +14,7 @@
use crate::emitter::{Emitter, HumanReadableErrorType};
use crate::registry::Registry;
use crate::DiagnosticId;
+use crate::ToolMetadata;
use crate::{CodeSuggestion, SubDiagnostic};
use rustc_lint_defs::{Applicability, FutureBreakage};
@@ -26,6 +27,7 @@
use std::vec;
use rustc_serialize::json::{as_json, as_pretty_json};
+use rustc_serialize::{Encodable, Encoder};
#[cfg(test)]
mod tests;
@@ -168,7 +170,8 @@
// The following data types are provided just for serialisation.
-#[derive(Encodable)]
+// NOTE: this has a manual implementation of Encodable which needs to be updated in
+// parallel.
struct Diagnostic {
/// The primary error message.
message: String,
@@ -180,6 +183,65 @@
children: Vec<Diagnostic>,
/// The message as rustc would render it.
rendered: Option<String>,
+ /// Extra tool metadata
+ tool_metadata: ToolMetadata,
+}
+
+macro_rules! encode_fields {
+ (
+ $enc:expr, // encoder
+ $idx:expr, // starting field index
+ $struct:expr, // struct we're serializing
+ $struct_name:ident, // struct name
+ [ $($name:ident),+$(,)? ], // fields to encode
+ [ $($ignore:ident),+$(,)? ] // fields we're skipping
+ ) => {
+ {
+ // Pattern match to make sure all fields are accounted for
+ let $struct_name { $($name,)+ $($ignore: _,)+ } = $struct;
+ let mut idx = $idx;
+ $(
+ $enc.emit_struct_field(
+ stringify!($name),
+ idx,
+ |enc| $name.encode(enc),
+ )?;
+ idx += 1;
+ )+
+ idx
+ }
+ };
+}
+
+// Special-case encoder to skip tool_metadata if not set
+impl<E: Encoder> Encodable<E> for Diagnostic {
+ fn encode(&self, s: &mut E) -> Result<(), E::Error> {
+ s.emit_struct("diagnostic", 7, |s| {
+ let mut idx = 0;
+
+ idx = encode_fields!(
+ s,
+ idx,
+ self,
+ Self,
+ [message, code, level, spans, children, rendered],
+ [tool_metadata]
+ );
+ if self.tool_metadata.is_set() {
+ idx = encode_fields!(
+ s,
+ idx,
+ self,
+ Self,
+ [tool_metadata],
+ [message, code, level, spans, children, rendered]
+ );
+ }
+
+ let _ = idx;
+ Ok(())
+ })
+ }
}
#[derive(Encodable)]
@@ -269,6 +331,7 @@
spans: DiagnosticSpan::from_suggestion(sugg, je),
children: vec![],
rendered: None,
+ tool_metadata: sugg.tool_metadata.clone(),
});
// generate regular command line output and store it in the json
@@ -312,6 +375,7 @@
.chain(sugg)
.collect(),
rendered: Some(output),
+ tool_metadata: ToolMetadata::default(),
}
}
@@ -327,6 +391,7 @@
.unwrap_or_else(|| DiagnosticSpan::from_multispan(&diag.span, je)),
children: vec![],
rendered: None,
+ tool_metadata: ToolMetadata::default(),
}
}
}
diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs
index aa88233..fa855f5 100644
--- a/compiler/rustc_errors/src/lib.rs
+++ b/compiler/rustc_errors/src/lib.rs
@@ -5,6 +5,7 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(crate_visibility_modifier)]
#![feature(backtrace)]
+#![feature(extended_key_value_attributes)]
#![feature(nll)]
#[macro_use]
@@ -23,10 +24,14 @@
use rustc_data_structures::AtomicRef;
use rustc_lint_defs::FutureBreakage;
pub use rustc_lint_defs::{pluralize, Applicability};
+use rustc_serialize::json::Json;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use rustc_span::source_map::SourceMap;
use rustc_span::{Loc, MultiSpan, Span};
use std::borrow::Cow;
+use std::hash::{Hash, Hasher};
+use std::num::NonZeroUsize;
use std::panic;
use std::path::Path;
use std::{error, fmt};
@@ -48,7 +53,7 @@
// `PResult` is used a lot. Make sure it doesn't unintentionally get bigger.
// (See also the comment on `DiagnosticBuilderInner`.)
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(PResult<'_, bool>, 16);
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash, Encodable, Decodable)]
@@ -73,6 +78,39 @@
}
}
+#[derive(Clone, Debug, PartialEq, Default)]
+pub struct ToolMetadata(pub Option<Json>);
+
+impl ToolMetadata {
+ fn new(json: Json) -> Self {
+ ToolMetadata(Some(json))
+ }
+
+ fn is_set(&self) -> bool {
+ self.0.is_some()
+ }
+}
+
+impl Hash for ToolMetadata {
+ fn hash<H: Hasher>(&self, _state: &mut H) {}
+}
+
+// Doesn't really need to round-trip
+impl<D: Decoder> Decodable<D> for ToolMetadata {
+ fn decode(_d: &mut D) -> Result<Self, D::Error> {
+ Ok(ToolMetadata(None))
+ }
+}
+
+impl<S: Encoder> Encodable<S> for ToolMetadata {
+ fn encode(&self, e: &mut S) -> Result<(), S::Error> {
+ match &self.0 {
+ None => e.emit_unit(),
+ Some(json) => json.encode(e),
+ }
+ }
+}
+
#[derive(Clone, Debug, PartialEq, Hash, Encodable, Decodable)]
pub struct CodeSuggestion {
/// Each substitute can have multiple variants due to multiple
@@ -106,6 +144,8 @@
/// which are useful for users but not useful for
/// tools like rustfix
pub applicability: Applicability,
+ /// Tool-specific metadata
+ pub tool_metadata: ToolMetadata,
}
#[derive(Clone, Debug, PartialEq, Hash, Encodable, Decodable)]
@@ -321,7 +361,7 @@
pub can_emit_warnings: bool,
/// If true, error-level diagnostics are upgraded to bug-level.
/// (rustc: see `-Z treat-err-as-bug`)
- pub treat_err_as_bug: Option<usize>,
+ pub treat_err_as_bug: Option<NonZeroUsize>,
/// If true, immediately emit diagnostics that would otherwise be buffered.
/// (rustc: see `-Z dont-buffer-diagnostics` and `-Z treat-err-as-bug`)
pub dont_buffer_diagnostics: bool,
@@ -358,7 +398,7 @@
pub fn with_tty_emitter(
color_config: ColorConfig,
can_emit_warnings: bool,
- treat_err_as_bug: Option<usize>,
+ treat_err_as_bug: Option<NonZeroUsize>,
sm: Option<Lrc<SourceMap>>,
) -> Self {
Self::with_tty_emitter_and_flags(
@@ -386,7 +426,7 @@
pub fn with_emitter(
can_emit_warnings: bool,
- treat_err_as_bug: Option<usize>,
+ treat_err_as_bug: Option<NonZeroUsize>,
emitter: Box<dyn Emitter + sync::Send>,
) -> Self {
Handler::with_emitter_and_flags(
@@ -775,7 +815,6 @@
}
let already_emitted = |this: &mut Self| {
- use std::hash::Hash;
let mut hasher = StableHasher::new();
diagnostic.hash(&mut hasher);
let diagnostic_hash = hasher.finish();
@@ -804,7 +843,7 @@
}
fn treat_err_as_bug(&self) -> bool {
- self.flags.treat_err_as_bug.map_or(false, |c| self.err_count() >= c)
+ self.flags.treat_err_as_bug.map_or(false, |c| self.err_count() >= c.get())
}
fn print_error_count(&mut self, registry: &Registry) {
@@ -913,7 +952,7 @@
// This is technically `self.treat_err_as_bug()` but `delay_span_bug` is called before
// incrementing `err_count` by one, so we need to +1 the comparing.
// FIXME: Would be nice to increment err_count in a more coherent way.
- if self.flags.treat_err_as_bug.map_or(false, |c| self.err_count() + 1 >= c) {
+ if self.flags.treat_err_as_bug.map_or(false, |c| self.err_count() + 1 >= c.get()) {
// FIXME: don't abort here if report_delayed_bugs is off
self.span_bug(sp, msg);
}
@@ -986,7 +1025,7 @@
fn panic_if_treat_err_as_bug(&self) {
if self.treat_err_as_bug() {
- match (self.err_count(), self.flags.treat_err_as_bug.unwrap_or(0)) {
+ match (self.err_count(), self.flags.treat_err_as_bug.map(|c| c.get()).unwrap_or(0)) {
(1, 1) => panic!("aborting due to `-Z treat-err-as-bug=1`"),
(0, _) | (1, _) => {}
(count, as_bug) => panic!(
diff --git a/compiler/rustc_expand/Cargo.toml b/compiler/rustc_expand/Cargo.toml
index 7413b0d..59c1604e 100644
--- a/compiler/rustc_expand/Cargo.toml
+++ b/compiler/rustc_expand/Cargo.toml
@@ -23,5 +23,5 @@
rustc_lexer = { path = "../rustc_lexer" }
rustc_parse = { path = "../rustc_parse" }
rustc_session = { path = "../rustc_session" }
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
rustc_ast = { path = "../rustc_ast" }
diff --git a/compiler/rustc_expand/src/base.rs b/compiler/rustc_expand/src/base.rs
index 08543d1..594b9a8 100644
--- a/compiler/rustc_expand/src/base.rs
+++ b/compiler/rustc_expand/src/base.rs
@@ -1,15 +1,17 @@
use crate::expand::{self, AstFragment, Invocation};
-use crate::module::DirectoryOwnership;
+use crate::module::DirOwnership;
use rustc_ast::ptr::P;
use rustc_ast::token::{self, Nonterminal};
-use rustc_ast::tokenstream::{CanSynthesizeMissingTokens, TokenStream};
+use rustc_ast::tokenstream::{CanSynthesizeMissingTokens, LazyTokenStream, TokenStream};
use rustc_ast::visit::{AssocCtxt, Visitor};
-use rustc_ast::{self as ast, Attribute, NodeId, PatKind};
-use rustc_attr::{self as attr, Deprecation, HasAttrs, Stability};
+use rustc_ast::{self as ast, AstLike, Attribute, Item, NodeId, PatKind};
+use rustc_attr::{self as attr, Deprecation, Stability};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::{self, Lrc};
use rustc_errors::{DiagnosticBuilder, ErrorReported};
+use rustc_lint_defs::builtin::PROC_MACRO_BACK_COMPAT;
+use rustc_lint_defs::BuiltinLintDiagnostics;
use rustc_parse::{self, nt_to_tokenstream, parser, MACRO_ARGUMENTS};
use rustc_session::{parse::ParseSess, Limit, Session};
use rustc_span::def_id::DefId;
@@ -36,15 +38,15 @@
Stmt(P<ast::Stmt>),
Expr(P<ast::Expr>),
Arm(ast::Arm),
- Field(ast::Field),
- FieldPat(ast::FieldPat),
+ ExprField(ast::ExprField),
+ PatField(ast::PatField),
GenericParam(ast::GenericParam),
Param(ast::Param),
- StructField(ast::StructField),
+ FieldDef(ast::FieldDef),
Variant(ast::Variant),
}
-impl HasAttrs for Annotatable {
+impl AstLike for Annotatable {
fn attrs(&self) -> &[Attribute] {
match *self {
Annotatable::Item(ref item) => &item.attrs,
@@ -54,11 +56,11 @@
Annotatable::Stmt(ref stmt) => stmt.attrs(),
Annotatable::Expr(ref expr) => &expr.attrs,
Annotatable::Arm(ref arm) => &arm.attrs,
- Annotatable::Field(ref field) => &field.attrs,
- Annotatable::FieldPat(ref fp) => &fp.attrs,
+ Annotatable::ExprField(ref field) => &field.attrs,
+ Annotatable::PatField(ref fp) => &fp.attrs,
Annotatable::GenericParam(ref gp) => &gp.attrs,
Annotatable::Param(ref p) => &p.attrs,
- Annotatable::StructField(ref sf) => &sf.attrs,
+ Annotatable::FieldDef(ref sf) => &sf.attrs,
Annotatable::Variant(ref v) => &v.attrs(),
}
}
@@ -72,14 +74,32 @@
Annotatable::Stmt(stmt) => stmt.visit_attrs(f),
Annotatable::Expr(expr) => expr.visit_attrs(f),
Annotatable::Arm(arm) => arm.visit_attrs(f),
- Annotatable::Field(field) => field.visit_attrs(f),
- Annotatable::FieldPat(fp) => fp.visit_attrs(f),
+ Annotatable::ExprField(field) => field.visit_attrs(f),
+ Annotatable::PatField(fp) => fp.visit_attrs(f),
Annotatable::GenericParam(gp) => gp.visit_attrs(f),
Annotatable::Param(p) => p.visit_attrs(f),
- Annotatable::StructField(sf) => sf.visit_attrs(f),
+ Annotatable::FieldDef(sf) => sf.visit_attrs(f),
Annotatable::Variant(v) => v.visit_attrs(f),
}
}
+
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ match self {
+ Annotatable::Item(item) => item.tokens_mut(),
+ Annotatable::TraitItem(trait_item) => trait_item.tokens_mut(),
+ Annotatable::ImplItem(impl_item) => impl_item.tokens_mut(),
+ Annotatable::ForeignItem(foreign_item) => foreign_item.tokens_mut(),
+ Annotatable::Stmt(stmt) => stmt.tokens_mut(),
+ Annotatable::Expr(expr) => expr.tokens_mut(),
+ Annotatable::Arm(arm) => arm.tokens_mut(),
+ Annotatable::ExprField(field) => field.tokens_mut(),
+ Annotatable::PatField(fp) => fp.tokens_mut(),
+ Annotatable::GenericParam(gp) => gp.tokens_mut(),
+ Annotatable::Param(p) => p.tokens_mut(),
+ Annotatable::FieldDef(sf) => sf.tokens_mut(),
+ Annotatable::Variant(v) => v.tokens_mut(),
+ }
+ }
}
impl Annotatable {
@@ -92,11 +112,11 @@
Annotatable::Stmt(ref stmt) => stmt.span,
Annotatable::Expr(ref expr) => expr.span,
Annotatable::Arm(ref arm) => arm.span,
- Annotatable::Field(ref field) => field.span,
- Annotatable::FieldPat(ref fp) => fp.pat.span,
+ Annotatable::ExprField(ref field) => field.span,
+ Annotatable::PatField(ref fp) => fp.pat.span,
Annotatable::GenericParam(ref gp) => gp.ident.span,
Annotatable::Param(ref p) => p.span,
- Annotatable::StructField(ref sf) => sf.span,
+ Annotatable::FieldDef(ref sf) => sf.span,
Annotatable::Variant(ref v) => v.span,
}
}
@@ -110,11 +130,11 @@
Annotatable::Stmt(stmt) => visitor.visit_stmt(stmt),
Annotatable::Expr(expr) => visitor.visit_expr(expr),
Annotatable::Arm(arm) => visitor.visit_arm(arm),
- Annotatable::Field(field) => visitor.visit_field(field),
- Annotatable::FieldPat(fp) => visitor.visit_field_pattern(fp),
+ Annotatable::ExprField(field) => visitor.visit_expr_field(field),
+ Annotatable::PatField(fp) => visitor.visit_pat_field(fp),
Annotatable::GenericParam(gp) => visitor.visit_generic_param(gp),
Annotatable::Param(p) => visitor.visit_param(p),
- Annotatable::StructField(sf) => visitor.visit_struct_field(sf),
+ Annotatable::FieldDef(sf) => visitor.visit_field_def(sf),
Annotatable::Variant(v) => visitor.visit_variant(v),
}
}
@@ -131,17 +151,20 @@
Annotatable::Stmt(stmt) => token::NtStmt(stmt.into_inner()),
Annotatable::Expr(expr) => token::NtExpr(expr),
Annotatable::Arm(..)
- | Annotatable::Field(..)
- | Annotatable::FieldPat(..)
+ | Annotatable::ExprField(..)
+ | Annotatable::PatField(..)
| Annotatable::GenericParam(..)
| Annotatable::Param(..)
- | Annotatable::StructField(..)
+ | Annotatable::FieldDef(..)
| Annotatable::Variant(..) => panic!("unexpected annotatable"),
}
}
crate fn into_tokens(self, sess: &ParseSess) -> TokenStream {
- nt_to_tokenstream(&self.into_nonterminal(), sess, CanSynthesizeMissingTokens::No)
+ // Tokens of an attribute target may be invalidated by some outer `#[derive]` performing
+ // "full configuration" (attributes following derives on the same item should be the most
+ // common case), that's why synthesizing tokens is allowed.
+ nt_to_tokenstream(&self.into_nonterminal(), sess, CanSynthesizeMissingTokens::Yes)
}
pub fn expect_item(self) -> P<ast::Item> {
@@ -193,16 +216,16 @@
}
}
- pub fn expect_field(self) -> ast::Field {
+ pub fn expect_expr_field(self) -> ast::ExprField {
match self {
- Annotatable::Field(field) => field,
+ Annotatable::ExprField(field) => field,
_ => panic!("expected field"),
}
}
- pub fn expect_field_pattern(self) -> ast::FieldPat {
+ pub fn expect_pat_field(self) -> ast::PatField {
match self {
- Annotatable::FieldPat(fp) => fp,
+ Annotatable::PatField(fp) => fp,
_ => panic!("expected field pattern"),
}
}
@@ -221,9 +244,9 @@
}
}
- pub fn expect_struct_field(self) -> ast::StructField {
+ pub fn expect_field_def(self) -> ast::FieldDef {
match self {
- Annotatable::StructField(sf) => sf,
+ Annotatable::FieldDef(sf) => sf,
_ => panic!("expected struct field"),
}
}
@@ -234,25 +257,6 @@
_ => panic!("expected variant"),
}
}
-
- pub fn derive_allowed(&self) -> bool {
- match *self {
- Annotatable::Stmt(ref stmt) => match stmt.kind {
- ast::StmtKind::Item(ref item) => matches!(
- item.kind,
- ast::ItemKind::Struct(..) | ast::ItemKind::Enum(..) | ast::ItemKind::Union(..)
- ),
- _ => false,
- },
- Annotatable::Item(ref item) => match item.kind {
- ast::ItemKind::Struct(..) | ast::ItemKind::Enum(..) | ast::ItemKind::Union(..) => {
- true
- }
- _ => false,
- },
- _ => false,
- }
- }
}
/// Result of an expansion that may need to be retried.
@@ -428,11 +432,11 @@
None
}
- fn make_fields(self: Box<Self>) -> Option<SmallVec<[ast::Field; 1]>> {
+ fn make_expr_fields(self: Box<Self>) -> Option<SmallVec<[ast::ExprField; 1]>> {
None
}
- fn make_field_patterns(self: Box<Self>) -> Option<SmallVec<[ast::FieldPat; 1]>> {
+ fn make_pat_fields(self: Box<Self>) -> Option<SmallVec<[ast::PatField; 1]>> {
None
}
@@ -444,7 +448,7 @@
None
}
- fn make_struct_fields(self: Box<Self>) -> Option<SmallVec<[ast::StructField; 1]>> {
+ fn make_field_defs(self: Box<Self>) -> Option<SmallVec<[ast::FieldDef; 1]>> {
None
}
@@ -628,11 +632,11 @@
Some(SmallVec::new())
}
- fn make_fields(self: Box<DummyResult>) -> Option<SmallVec<[ast::Field; 1]>> {
+ fn make_expr_fields(self: Box<DummyResult>) -> Option<SmallVec<[ast::ExprField; 1]>> {
Some(SmallVec::new())
}
- fn make_field_patterns(self: Box<DummyResult>) -> Option<SmallVec<[ast::FieldPat; 1]>> {
+ fn make_pat_fields(self: Box<DummyResult>) -> Option<SmallVec<[ast::PatField; 1]>> {
Some(SmallVec::new())
}
@@ -644,7 +648,7 @@
Some(SmallVec::new())
}
- fn make_struct_fields(self: Box<DummyResult>) -> Option<SmallVec<[ast::StructField; 1]>> {
+ fn make_field_defs(self: Box<DummyResult>) -> Option<SmallVec<[ast::FieldDef; 1]>> {
Some(SmallVec::new())
}
@@ -772,8 +776,8 @@
name: Symbol,
attrs: &[ast::Attribute],
) -> SyntaxExtension {
- let allow_internal_unstable = attr::allow_internal_unstable(sess, &attrs)
- .map(|features| features.collect::<Vec<Symbol>>().into());
+ let allow_internal_unstable =
+ attr::allow_internal_unstable(sess, &attrs).collect::<Vec<Symbol>>();
let mut local_inner_macros = false;
if let Some(macro_export) = sess.find_by_name(attrs, sym::macro_export) {
@@ -786,19 +790,26 @@
.find_by_name(attrs, sym::rustc_builtin_macro)
.map(|a| a.value_str().unwrap_or(name));
let (stability, const_stability) = attr::find_stability(&sess, attrs, span);
- if const_stability.is_some() {
+ if let Some((_, sp)) = const_stability {
sess.parse_sess
.span_diagnostic
- .span_err(span, "macros cannot have const stability attributes");
+ .struct_span_err(sp, "macros cannot have const stability attributes")
+ .span_label(sp, "invalid const stability attribute")
+ .span_label(
+ sess.source_map().guess_head_span(span),
+ "const stability attribute affects this macro",
+ )
+ .emit();
}
SyntaxExtension {
kind,
span,
- allow_internal_unstable,
+ allow_internal_unstable: (!allow_internal_unstable.is_empty())
+ .then(|| allow_internal_unstable.into()),
allow_internal_unsafe: sess.contains_name(attrs, sym::allow_internal_unsafe),
local_inner_macros,
- stability,
+ stability: stability.map(|(s, _)| s),
deprecation: attr::find_deprecation(&sess, attrs).map(|(d, _)| d),
helper_attrs,
edition,
@@ -854,12 +865,6 @@
}
}
-/// Result of resolving a macro invocation.
-pub enum InvocationRes {
- Single(Lrc<SyntaxExtension>),
- DeriveContainer(Vec<Lrc<SyntaxExtension>>),
-}
-
/// Error type that denotes indeterminacy.
pub struct Indeterminate;
@@ -885,24 +890,53 @@
invoc: &Invocation,
eager_expansion_root: ExpnId,
force: bool,
- ) -> Result<InvocationRes, Indeterminate>;
+ ) -> Result<Lrc<SyntaxExtension>, Indeterminate>;
fn check_unused_macros(&mut self);
/// Some parent node that is close enough to the given macro call.
- fn lint_node_id(&mut self, expn_id: ExpnId) -> NodeId;
+ fn lint_node_id(&self, expn_id: ExpnId) -> NodeId;
// Resolver interfaces for specific built-in macros.
/// Does `#[derive(...)]` attribute with the given `ExpnId` have built-in `Copy` inside it?
fn has_derive_copy(&self, expn_id: ExpnId) -> bool;
+ /// Resolve paths inside the `#[derive(...)]` attribute with the given `ExpnId`.
+ fn resolve_derives(
+ &mut self,
+ expn_id: ExpnId,
+ derives: Vec<ast::Path>,
+ force: bool,
+ ) -> Result<(), Indeterminate>;
+ /// Take resolutions for paths inside the `#[derive(...)]` attribute with the given `ExpnId`
+ /// back from resolver.
+ fn take_derive_resolutions(
+ &mut self,
+ expn_id: ExpnId,
+ ) -> Option<Vec<(Lrc<SyntaxExtension>, ast::Path)>>;
/// Path resolution logic for `#[cfg_accessible(path)]`.
fn cfg_accessible(&mut self, expn_id: ExpnId, path: &ast::Path) -> Result<bool, Indeterminate>;
}
-#[derive(Clone)]
+#[derive(Clone, Default)]
pub struct ModuleData {
+ /// Path to the module starting from the crate name, like `my_crate::foo::bar`.
pub mod_path: Vec<Ident>,
- pub directory: PathBuf,
+ /// Stack of paths to files loaded by out-of-line module items,
+ /// used to detect and report recursive module inclusions.
+ pub file_path_stack: Vec<PathBuf>,
+ /// Directory to search child module files in,
+ /// often (but not necessarily) the parent of the top file path on the `file_path_stack`.
+ pub dir_path: PathBuf,
+}
+
+impl ModuleData {
+ pub fn with_dir_path(&self, dir_path: PathBuf) -> ModuleData {
+ ModuleData {
+ mod_path: self.mod_path.clone(),
+ file_path_stack: self.file_path_stack.clone(),
+ dir_path,
+ }
+ }
}
#[derive(Clone)]
@@ -910,10 +944,13 @@
pub id: ExpnId,
pub depth: usize,
pub module: Rc<ModuleData>,
- pub directory_ownership: DirectoryOwnership,
+ pub dir_ownership: DirOwnership,
pub prior_type_ascription: Option<(Span, bool)>,
}
+type OnExternModLoaded<'a> =
+ Option<&'a dyn Fn(Ident, Vec<Attribute>, Vec<P<Item>>, Span) -> (Vec<Attribute>, Vec<P<Item>>)>;
+
/// One of these is made during expansion and incrementally updated as we go;
/// when a macro expansion occurs, the resulting nodes have the `backtrace()
/// -> expn_data` of their expansion context stored into their span.
@@ -931,7 +968,7 @@
/// Called directly after having parsed an external `mod foo;` in expansion.
///
/// `Ident` is the module name.
- pub(super) extern_mod_loaded: Option<&'a dyn Fn(&ast::Crate, Ident)>,
+ pub(super) extern_mod_loaded: OnExternModLoaded<'a>,
}
impl<'a> ExtCtxt<'a> {
@@ -939,7 +976,7 @@
sess: &'a Session,
ecfg: expand::ExpansionConfig<'a>,
resolver: &'a mut dyn ResolverExpand,
- extern_mod_loaded: Option<&'a dyn Fn(&ast::Crate, Ident)>,
+ extern_mod_loaded: OnExternModLoaded<'a>,
) -> ExtCtxt<'a> {
ExtCtxt {
sess,
@@ -951,8 +988,8 @@
current_expansion: ExpansionData {
id: ExpnId::root(),
depth: 0,
- module: Rc::new(ModuleData { mod_path: Vec::new(), directory: PathBuf::new() }),
- directory_ownership: DirectoryOwnership::Owned { relative: None },
+ module: Default::default(),
+ dir_ownership: DirOwnership::Owned { relative: None },
prior_type_ascription: None,
},
force_mode: false,
@@ -1052,6 +1089,10 @@
.chain(components.iter().map(|&s| Ident::with_dummy_span(s)))
.collect()
}
+ pub fn def_site_path(&self, components: &[Symbol]) -> Vec<Ident> {
+ let def_site = self.with_def_site_ctxt(DUMMY_SP);
+ components.iter().map(|&s| Ident::new(s, def_site)).collect()
+ }
pub fn check_unused_macros(&mut self) {
self.resolver.check_unused_macros();
@@ -1202,3 +1243,41 @@
}
Some(es)
}
+
+/// This nonterminal looks like some specific enums from
+/// `proc-macro-hack` and `procedural-masquerade` crates.
+/// We need to maintain some special pretty-printing behavior for them due to incorrect
+/// asserts in old versions of those crates and their wide use in the ecosystem.
+/// See issue #73345 for more details.
+/// FIXME(#73933): Remove this eventually.
+pub(crate) fn pretty_printing_compatibility_hack(nt: &Nonterminal, sess: &ParseSess) -> bool {
+ let item = match nt {
+ Nonterminal::NtItem(item) => item,
+ Nonterminal::NtStmt(stmt) => match &stmt.kind {
+ ast::StmtKind::Item(item) => item,
+ _ => return false,
+ },
+ _ => return false,
+ };
+
+ let name = item.ident.name;
+ if name == sym::ProceduralMasqueradeDummyType {
+ if let ast::ItemKind::Enum(enum_def, _) = &item.kind {
+ if let [variant] = &*enum_def.variants {
+ if variant.ident.name == sym::Input {
+ sess.buffer_lint_with_diagnostic(
+ &PROC_MACRO_BACK_COMPAT,
+ item.ident.span,
+ ast::CRATE_NODE_ID,
+ "using `procedural-masquerade` crate",
+ BuiltinLintDiagnostics::ProcMacroBackCompat(
+ "The `procedural-masquerade` crate has been unnecessary since Rust 1.30.0. \
+ Versions of this crate below 0.1.7 will eventually stop compiling.".to_string())
+ );
+ return true;
+ }
+ }
+ }
+ }
+ false
+}
diff --git a/compiler/rustc_expand/src/build.rs b/compiler/rustc_expand/src/build.rs
index fe67b40..3664ff3 100644
--- a/compiler/rustc_expand/src/build.rs
+++ b/compiler/rustc_expand/src/build.rs
@@ -267,8 +267,8 @@
pub fn expr_block(&self, b: P<ast::Block>) -> P<ast::Expr> {
self.expr(b.span, ast::ExprKind::Block(b, None))
}
- pub fn field_imm(&self, span: Span, ident: Ident, e: P<ast::Expr>) -> ast::Field {
- ast::Field {
+ pub fn field_imm(&self, span: Span, ident: Ident, e: P<ast::Expr>) -> ast::ExprField {
+ ast::ExprField {
ident: ident.with_span_pos(span),
expr: e,
span,
@@ -282,15 +282,18 @@
&self,
span: Span,
path: ast::Path,
- fields: Vec<ast::Field>,
+ fields: Vec<ast::ExprField>,
) -> P<ast::Expr> {
- self.expr(span, ast::ExprKind::Struct(path, fields, ast::StructRest::None))
+ self.expr(
+ span,
+ ast::ExprKind::Struct(P(ast::StructExpr { path, fields, rest: ast::StructRest::None })),
+ )
}
pub fn expr_struct_ident(
&self,
span: Span,
id: Ident,
- fields: Vec<ast::Field>,
+ fields: Vec<ast::ExprField>,
) -> P<ast::Expr> {
self.expr_struct(span, self.path_ident(span, id), fields)
}
@@ -419,7 +422,7 @@
&self,
span: Span,
path: ast::Path,
- field_pats: Vec<ast::FieldPat>,
+ field_pats: Vec<ast::PatField>,
) -> P<ast::Pat> {
self.pat(span, PatKind::Struct(path, field_pats, false))
}
diff --git a/compiler/rustc_expand/src/config.rs b/compiler/rustc_expand/src/config.rs
index b07bce94..a23731c 100644
--- a/compiler/rustc_expand/src/config.rs
+++ b/compiler/rustc_expand/src/config.rs
@@ -1,13 +1,9 @@
//! Conditional compilation stripping.
-use crate::base::Annotatable;
-
-use rustc_ast::attr::HasAttrs;
-use rustc_ast::mut_visit::*;
use rustc_ast::ptr::P;
use rustc_ast::token::{DelimToken, Token, TokenKind};
use rustc_ast::tokenstream::{DelimSpan, LazyTokenStream, Spacing, TokenStream, TokenTree};
-use rustc_ast::{self as ast, AttrItem, Attribute, MetaItem};
+use rustc_ast::{self as ast, AstLike, AttrItem, Attribute, MetaItem};
use rustc_attr as attr;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::map_in_place::MapInPlace;
@@ -23,8 +19,6 @@
use rustc_span::symbol::{sym, Symbol};
use rustc_span::{Span, DUMMY_SP};
-use smallvec::SmallVec;
-
/// A folder that strips out items that do not belong in the current configuration.
pub struct StripUnconfigured<'a> {
pub sess: &'a Session,
@@ -205,11 +199,11 @@
let unconfigured_attrs = krate.attrs.clone();
let diag = &sess.parse_sess.span_diagnostic;
let err_count = diag.err_count();
- let features = match strip_unconfigured.configure(krate.attrs) {
+ let features = match strip_unconfigured.configure_krate_attrs(krate.attrs) {
None => {
// The entire crate is unconfigured.
krate.attrs = Vec::new();
- krate.module.items = Vec::new();
+ krate.items = Vec::new();
Features::default()
}
Some(attrs) => {
@@ -218,7 +212,9 @@
if err_count == diag.err_count() {
// Avoid reconfiguring malformed `cfg_attr`s.
strip_unconfigured.features = Some(&features);
- strip_unconfigured.configure(unconfigured_attrs);
+ // Run configuration again, this time with features available
+ // so that we can perform feature-gating.
+ strip_unconfigured.configure_krate_attrs(unconfigured_attrs);
}
features
}
@@ -242,7 +238,7 @@
#the-cfg_attr-attribute>";
impl<'a> StripUnconfigured<'a> {
- pub fn configure<T: HasAttrs>(&mut self, mut node: T) -> Option<T> {
+ pub fn configure<T: AstLike>(&mut self, mut node: T) -> Option<T> {
self.process_cfg_attrs(&mut node);
if self.in_cfg(node.attrs()) {
Some(node)
@@ -252,13 +248,26 @@
}
}
+ fn configure_krate_attrs(
+ &mut self,
+ mut attrs: Vec<ast::Attribute>,
+ ) -> Option<Vec<ast::Attribute>> {
+ attrs.flat_map_in_place(|attr| self.process_cfg_attr(attr));
+ if self.in_cfg(&attrs) {
+ Some(attrs)
+ } else {
+ self.modified = true;
+ None
+ }
+ }
+
/// Parse and expand all `cfg_attr` attributes into a list of attributes
/// that are within each `cfg_attr` that has a true configuration predicate.
///
/// Gives compiler warnings if any `cfg_attr` does not contain any
/// attributes and is in the original source code. Gives compiler errors if
/// the syntax of any `cfg_attr` is incorrect.
- pub fn process_cfg_attrs<T: HasAttrs>(&mut self, node: &mut T) {
+ fn process_cfg_attrs<T: AstLike>(&mut self, node: &mut T) {
node.visit_attrs(|attrs| {
attrs.flat_map_in_place(|attr| self.process_cfg_attr(attr));
});
@@ -373,7 +382,7 @@
}
/// Determines if a node with the given attributes should be included in this configuration.
- pub fn in_cfg(&self, attrs: &[Attribute]) -> bool {
+ fn in_cfg(&self, attrs: &[Attribute]) -> bool {
attrs.iter().all(|attr| {
if !is_cfg(self.sess, attr) {
return true;
@@ -413,16 +422,8 @@
})
}
- /// Visit attributes on expression and statements (but not attributes on items in blocks).
- fn visit_expr_attrs(&mut self, attrs: &[Attribute]) {
- // flag the offending attributes
- for attr in attrs.iter() {
- self.maybe_emit_expr_attr_err(attr);
- }
- }
-
/// If attributes are not allowed on expressions, emit an error for `attr`
- pub fn maybe_emit_expr_attr_err(&self, attr: &Attribute) {
+ crate fn maybe_emit_expr_attr_err(&self, attr: &Attribute) {
if !self.features.map_or(true, |features| features.stmt_expr_attributes) {
let mut err = feature_err(
&self.sess.parse_sess,
@@ -439,49 +440,10 @@
}
}
- pub fn configure_foreign_mod(&mut self, foreign_mod: &mut ast::ForeignMod) {
- let ast::ForeignMod { unsafety: _, abi: _, items } = foreign_mod;
- items.flat_map_in_place(|item| self.configure(item));
- }
-
- fn configure_variant_data(&mut self, vdata: &mut ast::VariantData) {
- match vdata {
- ast::VariantData::Struct(fields, ..) | ast::VariantData::Tuple(fields, _) => {
- fields.flat_map_in_place(|field| self.configure(field))
- }
- ast::VariantData::Unit(_) => {}
- }
- }
-
- pub fn configure_item_kind(&mut self, item: &mut ast::ItemKind) {
- match item {
- ast::ItemKind::Struct(def, _generics) | ast::ItemKind::Union(def, _generics) => {
- self.configure_variant_data(def)
- }
- ast::ItemKind::Enum(ast::EnumDef { variants }, _generics) => {
- variants.flat_map_in_place(|variant| self.configure(variant));
- for variant in variants {
- self.configure_variant_data(&mut variant.data);
- }
- }
- _ => {}
- }
- }
-
- pub fn configure_expr_kind(&mut self, expr_kind: &mut ast::ExprKind) {
- match expr_kind {
- ast::ExprKind::Match(_m, arms) => {
- arms.flat_map_in_place(|arm| self.configure(arm));
- }
- ast::ExprKind::Struct(_path, fields, _base) => {
- fields.flat_map_in_place(|field| self.configure(field));
- }
- _ => {}
- }
- }
-
pub fn configure_expr(&mut self, expr: &mut P<ast::Expr>) {
- self.visit_expr_attrs(expr.attrs());
+ for attr in expr.attrs.iter() {
+ self.maybe_emit_expr_attr_err(attr);
+ }
// If an expr is valid to cfg away it will have been removed by the
// outer stmt or expression folder before descending in here.
@@ -497,117 +459,6 @@
self.process_cfg_attrs(expr)
}
-
- pub fn configure_pat(&mut self, pat: &mut P<ast::Pat>) {
- if let ast::PatKind::Struct(_path, fields, _etc) = &mut pat.kind {
- fields.flat_map_in_place(|field| self.configure(field));
- }
- }
-
- pub fn configure_fn_decl(&mut self, fn_decl: &mut ast::FnDecl) {
- fn_decl.inputs.flat_map_in_place(|arg| self.configure(arg));
- }
-
- pub fn fully_configure(&mut self, item: Annotatable) -> Annotatable {
- // Since the item itself has already been configured by the InvocationCollector,
- // we know that fold result vector will contain exactly one element
- match item {
- Annotatable::Item(item) => Annotatable::Item(self.flat_map_item(item).pop().unwrap()),
- Annotatable::TraitItem(item) => {
- Annotatable::TraitItem(self.flat_map_trait_item(item).pop().unwrap())
- }
- Annotatable::ImplItem(item) => {
- Annotatable::ImplItem(self.flat_map_impl_item(item).pop().unwrap())
- }
- Annotatable::ForeignItem(item) => {
- Annotatable::ForeignItem(self.flat_map_foreign_item(item).pop().unwrap())
- }
- Annotatable::Stmt(stmt) => {
- Annotatable::Stmt(stmt.map(|stmt| self.flat_map_stmt(stmt).pop().unwrap()))
- }
- Annotatable::Expr(mut expr) => Annotatable::Expr({
- self.visit_expr(&mut expr);
- expr
- }),
- Annotatable::Arm(arm) => Annotatable::Arm(self.flat_map_arm(arm).pop().unwrap()),
- Annotatable::Field(field) => {
- Annotatable::Field(self.flat_map_field(field).pop().unwrap())
- }
- Annotatable::FieldPat(fp) => {
- Annotatable::FieldPat(self.flat_map_field_pattern(fp).pop().unwrap())
- }
- Annotatable::GenericParam(param) => {
- Annotatable::GenericParam(self.flat_map_generic_param(param).pop().unwrap())
- }
- Annotatable::Param(param) => {
- Annotatable::Param(self.flat_map_param(param).pop().unwrap())
- }
- Annotatable::StructField(sf) => {
- Annotatable::StructField(self.flat_map_struct_field(sf).pop().unwrap())
- }
- Annotatable::Variant(v) => {
- Annotatable::Variant(self.flat_map_variant(v).pop().unwrap())
- }
- }
- }
-}
-
-impl<'a> MutVisitor for StripUnconfigured<'a> {
- fn visit_foreign_mod(&mut self, foreign_mod: &mut ast::ForeignMod) {
- self.configure_foreign_mod(foreign_mod);
- noop_visit_foreign_mod(foreign_mod, self);
- }
-
- fn visit_item_kind(&mut self, item: &mut ast::ItemKind) {
- self.configure_item_kind(item);
- noop_visit_item_kind(item, self);
- }
-
- fn visit_expr(&mut self, expr: &mut P<ast::Expr>) {
- self.configure_expr(expr);
- self.configure_expr_kind(&mut expr.kind);
- noop_visit_expr(expr, self);
- }
-
- fn filter_map_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> {
- let mut expr = configure!(self, expr);
- self.configure_expr_kind(&mut expr.kind);
- noop_visit_expr(&mut expr, self);
- Some(expr)
- }
-
- fn flat_map_generic_param(
- &mut self,
- param: ast::GenericParam,
- ) -> SmallVec<[ast::GenericParam; 1]> {
- noop_flat_map_generic_param(configure!(self, param), self)
- }
-
- fn flat_map_stmt(&mut self, stmt: ast::Stmt) -> SmallVec<[ast::Stmt; 1]> {
- noop_flat_map_stmt(configure!(self, stmt), self)
- }
-
- fn flat_map_item(&mut self, item: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
- noop_flat_map_item(configure!(self, item), self)
- }
-
- fn flat_map_impl_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> {
- noop_flat_map_assoc_item(configure!(self, item), self)
- }
-
- fn flat_map_trait_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> {
- noop_flat_map_assoc_item(configure!(self, item), self)
- }
-
- fn visit_pat(&mut self, pat: &mut P<ast::Pat>) {
- self.configure_pat(pat);
- noop_visit_pat(pat, self)
- }
-
- fn visit_fn_decl(&mut self, mut fn_decl: &mut P<ast::FnDecl>) {
- self.configure_fn_decl(&mut fn_decl);
- noop_visit_fn_decl(fn_decl, self);
- }
}
fn is_cfg(sess: &Session, attr: &Attribute) -> bool {
diff --git a/compiler/rustc_expand/src/expand.rs b/compiler/rustc_expand/src/expand.rs
index 5fdb7fc..0992f59 100644
--- a/compiler/rustc_expand/src/expand.rs
+++ b/compiler/rustc_expand/src/expand.rs
@@ -1,26 +1,28 @@
use crate::base::*;
use crate::config::StripUnconfigured;
use crate::configure;
-use crate::hygiene::{ExpnData, ExpnKind, SyntaxContext};
+use crate::hygiene::SyntaxContext;
use crate::mbe::macro_rules::annotate_err_with_kind;
-use crate::module::{parse_external_mod, push_directory, Directory, DirectoryOwnership};
+use crate::module::{mod_dir_path, parse_external_mod, DirOwnership, ParsedExternalMod};
use crate::placeholders::{placeholder, PlaceholderExpander};
-use crate::proc_macro::collect_derives;
+use rustc_ast as ast;
use rustc_ast::mut_visit::*;
use rustc_ast::ptr::P;
use rustc_ast::token;
use rustc_ast::tokenstream::TokenStream;
use rustc_ast::visit::{self, AssocCtxt, Visitor};
-use rustc_ast::{self as ast, AttrItem, AttrStyle, Block, LitKind, NodeId, PatKind, Path};
-use rustc_ast::{ItemKind, MacArgs, MacCallStmt, MacStmtStyle, StmtKind, Unsafe};
+use rustc_ast::{AstLike, AttrItem, AttrStyle, Block, Inline, ItemKind, LitKind, MacArgs};
+use rustc_ast::{MacCallStmt, MacStmtStyle, MetaItemKind, ModKind, NestedMetaItem};
+use rustc_ast::{NodeId, PatKind, Path, StmtKind, Unsafe};
use rustc_ast_pretty::pprust;
-use rustc_attr::{self as attr, is_builtin_attr, HasAttrs};
+use rustc_attr::{self as attr, is_builtin_attr};
use rustc_data_structures::map_in_place::MapInPlace;
use rustc_data_structures::stack::ensure_sufficient_stack;
-use rustc_errors::{struct_span_err, Applicability, PResult};
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{Applicability, PResult};
use rustc_feature::Features;
-use rustc_parse::parser::{AttemptLocalParseRecovery, ForceCollect, Parser};
+use rustc_parse::parser::{AttemptLocalParseRecovery, ForceCollect, GateOr, Parser, RecoverComma};
use rustc_parse::validate_attr;
use rustc_session::lint::builtin::UNUSED_DOC_COMMENTS;
use rustc_session::lint::BuiltinLintDiagnostics;
@@ -175,14 +177,14 @@
Arms(SmallVec<[ast::Arm; 1]>) {
"match arm"; many fn flat_map_arm; fn visit_arm(); fn make_arms;
}
- Fields(SmallVec<[ast::Field; 1]>) {
- "field expression"; many fn flat_map_field; fn visit_field(); fn make_fields;
+ Fields(SmallVec<[ast::ExprField; 1]>) {
+ "field expression"; many fn flat_map_expr_field; fn visit_expr_field(); fn make_expr_fields;
}
- FieldPats(SmallVec<[ast::FieldPat; 1]>) {
+ FieldPats(SmallVec<[ast::PatField; 1]>) {
"field pattern";
- many fn flat_map_field_pattern;
- fn visit_field_pattern();
- fn make_field_patterns;
+ many fn flat_map_pat_field;
+ fn visit_pat_field();
+ fn make_pat_fields;
}
GenericParams(SmallVec<[ast::GenericParam; 1]>) {
"generic parameter";
@@ -193,11 +195,11 @@
Params(SmallVec<[ast::Param; 1]>) {
"function parameter"; many fn flat_map_param; fn visit_param(); fn make_params;
}
- StructFields(SmallVec<[ast::StructField; 1]>) {
+ StructFields(SmallVec<[ast::FieldDef; 1]>) {
"field";
- many fn flat_map_struct_field;
- fn visit_struct_field();
- fn make_struct_fields;
+ many fn flat_map_field_def;
+ fn visit_field_def();
+ fn make_field_defs;
}
Variants(SmallVec<[ast::Variant; 1]>) {
"variant"; many fn flat_map_variant; fn visit_variant(); fn make_variants;
@@ -241,10 +243,10 @@
AstFragment::Arms(items.map(Annotatable::expect_arm).collect())
}
AstFragmentKind::Fields => {
- AstFragment::Fields(items.map(Annotatable::expect_field).collect())
+ AstFragment::Fields(items.map(Annotatable::expect_expr_field).collect())
}
AstFragmentKind::FieldPats => {
- AstFragment::FieldPats(items.map(Annotatable::expect_field_pattern).collect())
+ AstFragment::FieldPats(items.map(Annotatable::expect_pat_field).collect())
}
AstFragmentKind::GenericParams => {
AstFragment::GenericParams(items.map(Annotatable::expect_generic_param).collect())
@@ -253,7 +255,7 @@
AstFragment::Params(items.map(Annotatable::expect_param).collect())
}
AstFragmentKind::StructFields => {
- AstFragment::StructFields(items.map(Annotatable::expect_struct_field).collect())
+ AstFragment::StructFields(items.map(Annotatable::expect_field_def).collect())
}
AstFragmentKind::Variants => {
AstFragment::Variants(items.map(Annotatable::expect_variant).collect())
@@ -299,23 +301,16 @@
},
Attr {
attr: ast::Attribute,
+ // Re-insertion position for inert attributes.
+ pos: usize,
item: Annotatable,
// Required for resolving derive helper attributes.
derives: Vec<Path>,
- // We temporarily report errors for attribute macros placed after derives
- after_derive: bool,
},
Derive {
path: Path,
item: Annotatable,
},
- /// "Invocation" that contains all derives from an item,
- /// broken into multiple `Derive` invocations when expanded.
- /// FIXME: Find a way to remove it.
- DeriveContainer {
- derives: Vec<Path>,
- item: Annotatable,
- },
}
impl InvocationKind {
@@ -326,9 +321,8 @@
// The assumption is that the attribute expansion cannot change field visibilities,
// and it holds because only inert attributes are supported in this position.
match self {
- InvocationKind::Attr { item: Annotatable::StructField(field), .. }
- | InvocationKind::Derive { item: Annotatable::StructField(field), .. }
- | InvocationKind::DeriveContainer { item: Annotatable::StructField(field), .. }
+ InvocationKind::Attr { item: Annotatable::FieldDef(field), .. }
+ | InvocationKind::Derive { item: Annotatable::FieldDef(field), .. }
if field.ident.is_none() =>
{
Some(field.vis.clone())
@@ -344,7 +338,6 @@
InvocationKind::Bang { span, .. } => *span,
InvocationKind::Attr { attr, .. } => attr.span,
InvocationKind::Derive { path, .. } => path.span,
- InvocationKind::DeriveContainer { item, .. } => item.span(),
}
}
}
@@ -359,24 +352,28 @@
MacroExpander { cx, monotonic }
}
+ // FIXME: Avoid visiting the crate as a `Mod` item,
+ // make crate a first class expansion target instead.
pub fn expand_crate(&mut self, mut krate: ast::Crate) -> ast::Crate {
- let mut module = ModuleData {
- mod_path: vec![Ident::from_str(&self.cx.ecfg.crate_name)],
- directory: match self.cx.source_map().span_to_unmapped_path(krate.span) {
- FileName::Real(name) => name.into_local_path(),
- other => PathBuf::from(other.to_string()),
- },
+ let file_path = match self.cx.source_map().span_to_unmapped_path(krate.span) {
+ FileName::Real(name) => name.into_local_path(),
+ other => PathBuf::from(other.to_string()),
};
- module.directory.pop();
- self.cx.root_path = module.directory.clone();
- self.cx.current_expansion.module = Rc::new(module);
-
- let orig_mod_span = krate.module.inner;
+ let dir_path = file_path.parent().unwrap_or(&file_path).to_owned();
+ self.cx.root_path = dir_path.clone();
+ self.cx.current_expansion.module = Rc::new(ModuleData {
+ mod_path: vec![Ident::from_str(&self.cx.ecfg.crate_name)],
+ file_path_stack: vec![file_path],
+ dir_path,
+ });
let krate_item = AstFragment::Items(smallvec![P(ast::Item {
attrs: krate.attrs,
span: krate.span,
- kind: ast::ItemKind::Mod(krate.module),
+ kind: ast::ItemKind::Mod(
+ Unsafe::No,
+ ModKind::Loaded(krate.items, Inline::Yes, krate.span)
+ ),
ident: Ident::invalid(),
id: ast::DUMMY_NODE_ID,
vis: ast::Visibility {
@@ -388,28 +385,22 @@
})]);
match self.fully_expand_fragment(krate_item).make_items().pop().map(P::into_inner) {
- Some(ast::Item { attrs, kind: ast::ItemKind::Mod(module), .. }) => {
+ Some(ast::Item {
+ attrs,
+ kind: ast::ItemKind::Mod(_, ModKind::Loaded(items, ..)),
+ ..
+ }) => {
krate.attrs = attrs;
- krate.module = module;
+ krate.items = items;
}
None => {
// Resolution failed so we return an empty expansion
krate.attrs = vec![];
- krate.module = ast::Mod {
- inner: orig_mod_span,
- unsafety: Unsafe::No,
- items: vec![],
- inline: true,
- };
+ krate.items = vec![];
}
Some(ast::Item { span, kind, .. }) => {
krate.attrs = vec![];
- krate.module = ast::Mod {
- inner: orig_mod_span,
- unsafety: Unsafe::No,
- items: vec![],
- inline: true,
- };
+ krate.items = vec![];
self.cx.span_err(
span,
&format!(
@@ -446,7 +437,7 @@
let mut undetermined_invocations = Vec::new();
let (mut progress, mut force) = (false, !self.monotonic);
loop {
- let (invoc, res) = if let Some(invoc) = invocations.pop() {
+ let (invoc, ext) = if let Some(invoc) = invocations.pop() {
invoc
} else {
self.resolve_imports();
@@ -464,8 +455,8 @@
continue;
};
- let res = match res {
- Some(res) => res,
+ let ext = match ext {
+ Some(ext) => ext,
None => {
let eager_expansion_root = if self.monotonic {
invoc.expansion_data.id
@@ -477,7 +468,7 @@
eager_expansion_root,
force,
) {
- Ok(res) => res,
+ Ok(ext) => ext,
Err(Indeterminate) => {
// Cannot resolve, will retry this invocation later.
undetermined_invocations.push((invoc, None));
@@ -491,86 +482,78 @@
self.cx.current_expansion = invoc.expansion_data.clone();
self.cx.force_mode = force;
- // FIXME(jseyfried): Refactor out the following logic
let fragment_kind = invoc.fragment_kind;
- let (expanded_fragment, new_invocations) = match res {
- InvocationRes::Single(ext) => match self.expand_invoc(invoc, &ext.kind) {
- ExpandResult::Ready(fragment) => self.collect_invocations(fragment, &[]),
- ExpandResult::Retry(invoc) => {
- if force {
- self.cx.span_bug(
- invoc.span(),
- "expansion entered force mode but is still stuck",
- );
- } else {
- // Cannot expand, will retry this invocation later.
- undetermined_invocations
- .push((invoc, Some(InvocationRes::Single(ext))));
- continue;
- }
- }
- },
- InvocationRes::DeriveContainer(_exts) => {
- // FIXME: Consider using the derive resolutions (`_exts`) immediately,
- // instead of enqueuing the derives to be resolved again later.
- let (derives, mut item) = match invoc.kind {
- InvocationKind::DeriveContainer { derives, item } => (derives, item),
- _ => unreachable!(),
- };
- let (item, derive_placeholders) = if !item.derive_allowed() {
- self.error_derive_forbidden_on_non_adt(&derives, &item);
- item.visit_attrs(|attrs| attrs.retain(|a| !a.has_name(sym::derive)));
- (item, Vec::new())
- } else {
- let mut visitor = StripUnconfigured {
- sess: self.cx.sess,
- features: self.cx.ecfg.features,
- modified: false,
- };
- let mut item = visitor.fully_configure(item);
- item.visit_attrs(|attrs| attrs.retain(|a| !a.has_name(sym::derive)));
- if visitor.modified && !derives.is_empty() {
- // Erase the tokens if cfg-stripping modified the item
- // This will cause us to synthesize fake tokens
- // when `nt_to_tokenstream` is called on this item.
- match &mut item {
- Annotatable::Item(item) => item.tokens = None,
- Annotatable::Stmt(stmt) => {
- if let StmtKind::Item(item) = &mut stmt.kind {
- item.tokens = None
- } else {
- panic!("Unexpected stmt {:?}", stmt);
- }
- }
- _ => panic!("Unexpected annotatable {:?}", item),
+ let (expanded_fragment, new_invocations) = match self.expand_invoc(invoc, &ext.kind) {
+ ExpandResult::Ready(fragment) => {
+ let derive_placeholders = self
+ .cx
+ .resolver
+ .take_derive_resolutions(expn_id)
+ .map(|derives| {
+ enum AnnotatableRef<'a> {
+ Item(&'a P<ast::Item>),
+ Stmt(&'a ast::Stmt),
}
- }
+ let item = match &fragment {
+ AstFragment::Items(items) => match &items[..] {
+ [item] => AnnotatableRef::Item(item),
+ _ => unreachable!(),
+ },
+ AstFragment::Stmts(stmts) => match &stmts[..] {
+ [stmt] => AnnotatableRef::Stmt(stmt),
+ _ => unreachable!(),
+ },
+ _ => unreachable!(),
+ };
- invocations.reserve(derives.len());
- let derive_placeholders = derives
- .into_iter()
- .map(|path| {
- let expn_id = ExpnId::fresh(None);
- invocations.push((
- Invocation {
- kind: InvocationKind::Derive { path, item: item.clone() },
- fragment_kind,
- expansion_data: ExpansionData {
- id: expn_id,
- ..self.cx.current_expansion.clone()
+ invocations.reserve(derives.len());
+ derives
+ .into_iter()
+ .map(|(_exts, path)| {
+ // FIXME: Consider using the derive resolutions (`_exts`)
+ // instead of enqueuing the derives to be resolved again later.
+ let expn_id = ExpnId::fresh(None);
+ invocations.push((
+ Invocation {
+ kind: InvocationKind::Derive {
+ path,
+ item: match item {
+ AnnotatableRef::Item(item) => {
+ Annotatable::Item(item.clone())
+ }
+ AnnotatableRef::Stmt(stmt) => {
+ Annotatable::Stmt(P(stmt.clone()))
+ }
+ },
+ },
+ fragment_kind,
+ expansion_data: ExpansionData {
+ id: expn_id,
+ ..self.cx.current_expansion.clone()
+ },
},
- },
- None,
- ));
- NodeId::placeholder_from_expn_id(expn_id)
- })
- .collect::<Vec<_>>();
- (item, derive_placeholders)
- };
+ None,
+ ));
+ NodeId::placeholder_from_expn_id(expn_id)
+ })
+ .collect::<Vec<_>>()
+ })
+ .unwrap_or_default();
- let fragment = fragment_kind.expect_from_annotatables(::std::iter::once(item));
self.collect_invocations(fragment, &derive_placeholders)
}
+ ExpandResult::Retry(invoc) => {
+ if force {
+ self.cx.span_bug(
+ invoc.span(),
+ "expansion entered force mode but is still stuck",
+ );
+ } else {
+ // Cannot expand, will retry this invocation later.
+ undetermined_invocations.push((invoc, Some(ext)));
+ continue;
+ }
+ }
};
progress = true;
@@ -596,29 +579,6 @@
fragment_with_placeholders
}
- fn error_derive_forbidden_on_non_adt(&self, derives: &[Path], item: &Annotatable) {
- let attr = self.cx.sess.find_by_name(item.attrs(), sym::derive);
- let span = attr.map_or(item.span(), |attr| attr.span);
- let mut err = struct_span_err!(
- self.cx.sess,
- span,
- E0774,
- "`derive` may only be applied to structs, enums and unions",
- );
- if let Some(ast::Attribute { style: ast::AttrStyle::Inner, .. }) = attr {
- let trait_list = derives.iter().map(|t| pprust::path_to_string(t)).collect::<Vec<_>>();
- let suggestion = format!("#[derive({})]", trait_list.join(", "));
- err.span_suggestion(
- span,
- "try an outer attribute",
- suggestion,
- // We don't 𝑘𝑛𝑜𝑤 that the following item is an ADT
- Applicability::MaybeIncorrect,
- );
- }
- err.emit();
- }
-
fn resolve_imports(&mut self) {
if self.monotonic {
self.cx.resolver.resolve_imports();
@@ -633,7 +593,7 @@
&mut self,
mut fragment: AstFragment,
extra_placeholders: &[NodeId],
- ) -> (AstFragment, Vec<(Invocation, Option<InvocationRes>)>) {
+ ) -> (AstFragment, Vec<(Invocation, Option<Lrc<SyntaxExtension>>)>) {
// Resolve `$crate`s in the fragment for pretty-printing.
self.cx.resolver.resolve_dollar_crates();
@@ -733,7 +693,7 @@
}
_ => unreachable!(),
},
- InvocationKind::Attr { attr, mut item, derives, after_derive } => match ext {
+ InvocationKind::Attr { attr, pos, mut item, derives } => match ext {
SyntaxExtensionKind::Attr(expander) => {
self.gate_proc_macro_input(&item);
self.gate_proc_macro_attr_item(span, &item);
@@ -764,12 +724,7 @@
ExpandResult::Retry(item) => {
// Reassemble the original invocation for retrying.
return ExpandResult::Retry(Invocation {
- kind: InvocationKind::Attr {
- attr,
- item,
- derives,
- after_derive,
- },
+ kind: InvocationKind::Attr { attr, pos, item, derives },
..invoc
});
}
@@ -787,7 +742,7 @@
if *mark_used {
self.cx.sess.mark_attr_used(&attr);
}
- item.visit_attrs(|attrs| attrs.push(attr));
+ item.visit_attrs(|attrs| attrs.insert(pos, attr));
fragment_kind.expect_from_annotatables(iter::once(item))
}
_ => unreachable!(),
@@ -813,7 +768,6 @@
}
_ => unreachable!(),
},
- InvocationKind::DeriveContainer { .. } => unreachable!(),
})
}
@@ -833,11 +787,11 @@
}
Annotatable::Expr(_) => "expressions",
Annotatable::Arm(..)
- | Annotatable::Field(..)
- | Annotatable::FieldPat(..)
+ | Annotatable::ExprField(..)
+ | Annotatable::PatField(..)
| Annotatable::GenericParam(..)
| Annotatable::Param(..)
- | Annotatable::StructField(..)
+ | Annotatable::FieldDef(..)
| Annotatable::Variant(..) => panic!("unexpected annotatable"),
};
if self.cx.ecfg.proc_macro_hygiene() {
@@ -860,7 +814,9 @@
impl<'ast, 'a> Visitor<'ast> for GateProcMacroInput<'a> {
fn visit_item(&mut self, item: &'ast ast::Item) {
match &item.kind {
- ast::ItemKind::Mod(module) if !module.inline => {
+ ast::ItemKind::Mod(_, mod_kind)
+ if !matches!(mod_kind, ModKind::Loaded(_, Inline::Yes, _)) =>
+ {
feature_err(
self.parse_sess,
sym::proc_macro_hygiene,
@@ -960,7 +916,9 @@
}
}
AstFragmentKind::Ty => AstFragment::Ty(this.parse_ty()?),
- AstFragmentKind::Pat => AstFragment::Pat(this.parse_pat(None)?),
+ AstFragmentKind::Pat => {
+ AstFragment::Pat(this.parse_pat_allow_top_alt(None, GateOr::Yes, RecoverComma::No)?)
+ }
AstFragmentKind::Arms
| AstFragmentKind::Fields
| AstFragmentKind::FieldPats
@@ -1011,29 +969,13 @@
struct InvocationCollector<'a, 'b> {
cx: &'a mut ExtCtxt<'b>,
cfg: StripUnconfigured<'a>,
- invocations: Vec<(Invocation, Option<InvocationRes>)>,
+ invocations: Vec<(Invocation, Option<Lrc<SyntaxExtension>>)>,
monotonic: bool,
}
impl<'a, 'b> InvocationCollector<'a, 'b> {
fn collect(&mut self, fragment_kind: AstFragmentKind, kind: InvocationKind) -> AstFragment {
- // Expansion data for all the collected invocations is set upon their resolution,
- // with exception of the derive container case which is not resolved and can get
- // its expansion data immediately.
- let expn_data = match &kind {
- InvocationKind::DeriveContainer { item, .. } => {
- let mut expn_data = ExpnData::default(
- ExpnKind::Macro(MacroKind::Attr, sym::derive),
- item.span(),
- self.cx.sess.parse_sess.edition,
- None,
- );
- expn_data.parent = self.cx.current_expansion.id;
- Some(expn_data)
- }
- _ => None,
- };
- let expn_id = ExpnId::fresh(expn_data);
+ let expn_id = ExpnId::fresh(None);
let vis = kind.placeholder_visibility();
self.invocations.push((
Invocation {
@@ -1061,67 +1003,50 @@
fn collect_attr(
&mut self,
- (attr, derives, after_derive): (Option<ast::Attribute>, Vec<Path>, bool),
+ (attr, pos, derives): (ast::Attribute, usize, Vec<Path>),
item: Annotatable,
kind: AstFragmentKind,
) -> AstFragment {
- self.collect(
- kind,
- match attr {
- Some(attr) => InvocationKind::Attr { attr, item, derives, after_derive },
- None => InvocationKind::DeriveContainer { derives, item },
- },
- )
+ self.collect(kind, InvocationKind::Attr { attr, pos, item, derives })
}
- fn find_attr_invoc(
- &self,
- attrs: &mut Vec<ast::Attribute>,
- after_derive: &mut bool,
- ) -> Option<ast::Attribute> {
- attrs
- .iter()
- .position(|a| {
- if a.has_name(sym::derive) {
- *after_derive = true;
- }
- !self.cx.sess.is_attr_known(a) && !is_builtin_attr(a)
- })
- .map(|i| attrs.remove(i))
- }
-
- /// If `item` is an attr invocation, remove and return the macro attribute and derive traits.
+ /// If `item` is an attribute invocation, remove the attribute and return it together with
+ /// its position and derives following it. We have to collect the derives in order to resolve
+ /// legacy derive helpers (helpers written before derives that introduce them).
fn take_first_attr(
&mut self,
- item: &mut impl HasAttrs,
- ) -> Option<(Option<ast::Attribute>, Vec<Path>, /* after_derive */ bool)> {
- let (mut attr, mut traits, mut after_derive) = (None, Vec::new(), false);
+ item: &mut impl AstLike,
+ ) -> Option<(ast::Attribute, usize, Vec<Path>)> {
+ let mut attr = None;
- item.visit_attrs(|mut attrs| {
- attr = self.find_attr_invoc(&mut attrs, &mut after_derive);
- traits = collect_derives(&mut self.cx, &mut attrs);
+ item.visit_attrs(|attrs| {
+ attr = attrs
+ .iter()
+ .position(|a| !self.cx.sess.is_attr_known(a) && !is_builtin_attr(a))
+ .map(|attr_pos| {
+ let attr = attrs.remove(attr_pos);
+ let following_derives = attrs[attr_pos..]
+ .iter()
+ .filter(|a| a.has_name(sym::derive))
+ .flat_map(|a| a.meta_item_list().unwrap_or_default())
+ .filter_map(|nested_meta| match nested_meta {
+ NestedMetaItem::MetaItem(ast::MetaItem {
+ kind: MetaItemKind::Word,
+ path,
+ ..
+ }) => Some(path),
+ _ => None,
+ })
+ .collect();
+
+ (attr, attr_pos, following_derives)
+ })
});
- if attr.is_some() || !traits.is_empty() { Some((attr, traits, after_derive)) } else { None }
+ attr
}
- /// Alternative to `take_first_attr()` that ignores `#[derive]` so invocations fallthrough
- /// to the unused-attributes lint (making it an error on statements and expressions
- /// is a breaking change)
- fn take_first_attr_no_derive(
- &mut self,
- nonitem: &mut impl HasAttrs,
- ) -> Option<(Option<ast::Attribute>, Vec<Path>, /* after_derive */ bool)> {
- let (mut attr, mut after_derive) = (None, false);
-
- nonitem.visit_attrs(|mut attrs| {
- attr = self.find_attr_invoc(&mut attrs, &mut after_derive);
- });
-
- attr.map(|attr| (Some(attr), Vec::new(), after_derive))
- }
-
- fn configure<T: HasAttrs>(&mut self, node: T) -> Option<T> {
+ fn configure<T: AstLike>(&mut self, node: T) -> Option<T> {
self.cfg.configure(node)
}
@@ -1132,17 +1057,6 @@
for attr in attrs.iter() {
rustc_ast_passes::feature_gate::check_attribute(attr, self.cx.sess, features);
validate_attr::check_meta(&self.cx.sess.parse_sess, attr);
-
- // macros are expanded before any lint passes so this warning has to be hardcoded
- if attr.has_name(sym::derive) {
- self.cx
- .parse_sess()
- .span_diagnostic
- .struct_span_warn(attr.span, "`#[derive]` does nothing on macro invocations")
- .note("this may become a hard error in a future release")
- .emit();
- }
-
if attr.doc_str().is_some() {
self.cx.sess.parse_sess.buffer_lint_with_diagnostic(
&UNUSED_DOC_COMMENTS,
@@ -1160,14 +1074,10 @@
fn visit_expr(&mut self, expr: &mut P<ast::Expr>) {
self.cfg.configure_expr(expr);
visit_clobber(expr.deref_mut(), |mut expr| {
- self.cfg.configure_expr_kind(&mut expr.kind);
-
- if let Some(attr) = self.take_first_attr_no_derive(&mut expr) {
+ if let Some(attr) = self.take_first_attr(&mut expr) {
// Collect the invoc regardless of whether or not attributes are permitted here
// expansion will eat the attribute so it won't error later.
- if let Some(attr) = attr.0.as_ref() {
- self.cfg.maybe_emit_expr_attr_err(attr)
- }
+ self.cfg.maybe_emit_expr_attr_err(&attr.0);
// AstFragmentKind::Expr requires the macro to emit an expression.
return self
@@ -1198,28 +1108,28 @@
noop_flat_map_arm(arm, self)
}
- fn flat_map_field(&mut self, field: ast::Field) -> SmallVec<[ast::Field; 1]> {
+ fn flat_map_expr_field(&mut self, field: ast::ExprField) -> SmallVec<[ast::ExprField; 1]> {
let mut field = configure!(self, field);
if let Some(attr) = self.take_first_attr(&mut field) {
return self
- .collect_attr(attr, Annotatable::Field(field), AstFragmentKind::Fields)
- .make_fields();
+ .collect_attr(attr, Annotatable::ExprField(field), AstFragmentKind::Fields)
+ .make_expr_fields();
}
- noop_flat_map_field(field, self)
+ noop_flat_map_expr_field(field, self)
}
- fn flat_map_field_pattern(&mut self, fp: ast::FieldPat) -> SmallVec<[ast::FieldPat; 1]> {
+ fn flat_map_pat_field(&mut self, fp: ast::PatField) -> SmallVec<[ast::PatField; 1]> {
let mut fp = configure!(self, fp);
if let Some(attr) = self.take_first_attr(&mut fp) {
return self
- .collect_attr(attr, Annotatable::FieldPat(fp), AstFragmentKind::FieldPats)
- .make_field_patterns();
+ .collect_attr(attr, Annotatable::PatField(fp), AstFragmentKind::FieldPats)
+ .make_pat_fields();
}
- noop_flat_map_field_pattern(fp, self)
+ noop_flat_map_pat_field(fp, self)
}
fn flat_map_param(&mut self, p: ast::Param) -> SmallVec<[ast::Param; 1]> {
@@ -1234,16 +1144,16 @@
noop_flat_map_param(p, self)
}
- fn flat_map_struct_field(&mut self, sf: ast::StructField) -> SmallVec<[ast::StructField; 1]> {
+ fn flat_map_field_def(&mut self, sf: ast::FieldDef) -> SmallVec<[ast::FieldDef; 1]> {
let mut sf = configure!(self, sf);
if let Some(attr) = self.take_first_attr(&mut sf) {
return self
- .collect_attr(attr, Annotatable::StructField(sf), AstFragmentKind::StructFields)
- .make_struct_fields();
+ .collect_attr(attr, Annotatable::FieldDef(sf), AstFragmentKind::StructFields)
+ .make_field_defs();
}
- noop_flat_map_struct_field(sf, self)
+ noop_flat_map_field_def(sf, self)
}
fn flat_map_variant(&mut self, variant: ast::Variant) -> SmallVec<[ast::Variant; 1]> {
@@ -1261,12 +1171,8 @@
fn filter_map_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> {
let expr = configure!(self, expr);
expr.filter_map(|mut expr| {
- self.cfg.configure_expr_kind(&mut expr.kind);
-
- if let Some(attr) = self.take_first_attr_no_derive(&mut expr) {
- if let Some(attr) = attr.0.as_ref() {
- self.cfg.maybe_emit_expr_attr_err(attr)
- }
+ if let Some(attr) = self.take_first_attr(&mut expr) {
+ self.cfg.maybe_emit_expr_attr_err(&attr.0);
return self
.collect_attr(attr, Annotatable::Expr(P(expr)), AstFragmentKind::OptExpr)
@@ -1289,7 +1195,6 @@
}
fn visit_pat(&mut self, pat: &mut P<ast::Pat>) {
- self.cfg.configure_pat(pat);
match pat.kind {
PatKind::MacCall(_) => {}
_ => return noop_visit_pat(pat, self),
@@ -1308,15 +1213,7 @@
// we'll expand attributes on expressions separately
if !stmt.is_expr() {
- let attr = if stmt.is_item() {
- self.take_first_attr(&mut stmt)
- } else {
- // Ignore derives on non-item statements for backwards compatibility.
- // This will result in a unused attribute warning
- self.take_first_attr_no_derive(&mut stmt)
- };
-
- if let Some(attr) = attr {
+ if let Some(attr) = self.take_first_attr(&mut stmt) {
return self
.collect_attr(attr, Annotatable::Stmt(P(stmt)), AstFragmentKind::Stmts)
.make_stmts();
@@ -1349,10 +1246,12 @@
}
fn visit_block(&mut self, block: &mut P<Block>) {
- let old_directory_ownership = self.cx.current_expansion.directory_ownership;
- self.cx.current_expansion.directory_ownership = DirectoryOwnership::UnownedViaBlock;
+ let orig_dir_ownership = mem::replace(
+ &mut self.cx.current_expansion.dir_ownership,
+ DirOwnership::UnownedViaBlock,
+ );
noop_visit_block(block, self);
- self.cx.current_expansion.directory_ownership = old_directory_ownership;
+ self.cx.current_expansion.dir_ownership = orig_dir_ownership;
}
fn flat_map_item(&mut self, item: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
@@ -1379,69 +1278,83 @@
_ => unreachable!(),
})
}
- ast::ItemKind::Mod(ref mut old_mod @ ast::Mod { .. }) if ident != Ident::invalid() => {
- let sess = &self.cx.sess.parse_sess;
- let orig_ownership = self.cx.current_expansion.directory_ownership;
- let mut module = (*self.cx.current_expansion.module).clone();
-
- let pushed = &mut false; // Record `parse_external_mod` pushing so we can pop.
- let dir = Directory { ownership: orig_ownership, path: module.directory };
- let Directory { ownership, path } = if old_mod.inline {
- // Inline `mod foo { ... }`, but we still need to push directories.
- item.attrs = attrs;
- push_directory(&self.cx.sess, ident, &item.attrs, dir)
- } else {
- // We have an outline `mod foo;` so we need to parse the file.
- let (new_mod, dir) = parse_external_mod(
- &self.cx.sess,
- ident,
- span,
- old_mod.unsafety,
- dir,
- &mut attrs,
- pushed,
- );
-
- let krate = ast::Crate {
- span: new_mod.inner,
- module: new_mod,
- attrs,
- proc_macros: vec![],
- };
- if let Some(extern_mod_loaded) = self.cx.extern_mod_loaded {
- extern_mod_loaded(&krate, ident);
+ ast::ItemKind::Mod(_, ref mut mod_kind) if ident != Ident::invalid() => {
+ let (file_path, dir_path, dir_ownership) = match mod_kind {
+ ModKind::Loaded(_, inline, _) => {
+ // Inline `mod foo { ... }`, but we still need to push directories.
+ let (dir_path, dir_ownership) = mod_dir_path(
+ &self.cx.sess,
+ ident,
+ &attrs,
+ &self.cx.current_expansion.module,
+ self.cx.current_expansion.dir_ownership,
+ *inline,
+ );
+ item.attrs = attrs;
+ (None, dir_path, dir_ownership)
}
+ ModKind::Unloaded => {
+ // We have an outline `mod foo;` so we need to parse the file.
+ let old_attrs_len = attrs.len();
+ let ParsedExternalMod {
+ mut items,
+ inner_span,
+ file_path,
+ dir_path,
+ dir_ownership,
+ } = parse_external_mod(
+ &self.cx.sess,
+ ident,
+ span,
+ &self.cx.current_expansion.module,
+ self.cx.current_expansion.dir_ownership,
+ &mut attrs,
+ );
- *old_mod = krate.module;
- item.attrs = krate.attrs;
- // File can have inline attributes, e.g., `#![cfg(...)]` & co. => Reconfigure.
- item = match self.configure(item) {
- Some(node) => node,
- None => {
- if *pushed {
- sess.included_mod_stack.borrow_mut().pop();
- }
- return Default::default();
+ if let Some(extern_mod_loaded) = self.cx.extern_mod_loaded {
+ (attrs, items) = extern_mod_loaded(ident, attrs, items, inner_span);
}
- };
- dir
+
+ *mod_kind = ModKind::Loaded(items, Inline::No, inner_span);
+ item.attrs = attrs;
+ if item.attrs.len() > old_attrs_len {
+ // If we loaded an out-of-line module and added some inner attributes,
+ // then we need to re-configure it and re-collect attributes for
+ // resolution and expansion.
+ item = configure!(self, item);
+
+ if let Some(attr) = self.take_first_attr(&mut item) {
+ return self
+ .collect_attr(
+ attr,
+ Annotatable::Item(item),
+ AstFragmentKind::Items,
+ )
+ .make_items();
+ }
+ }
+ (Some(file_path), dir_path, dir_ownership)
+ }
};
// Set the module info before we flat map.
- self.cx.current_expansion.directory_ownership = ownership;
- module.directory = path;
+ let mut module = self.cx.current_expansion.module.with_dir_path(dir_path);
module.mod_path.push(ident);
+ if let Some(file_path) = file_path {
+ module.file_path_stack.push(file_path);
+ }
+
let orig_module =
mem::replace(&mut self.cx.current_expansion.module, Rc::new(module));
+ let orig_dir_ownership =
+ mem::replace(&mut self.cx.current_expansion.dir_ownership, dir_ownership);
let result = noop_flat_map_item(item, self);
// Restore the module info.
+ self.cx.current_expansion.dir_ownership = orig_dir_ownership;
self.cx.current_expansion.module = orig_module;
- self.cx.current_expansion.directory_ownership = orig_ownership;
- if *pushed {
- sess.included_mod_stack.borrow_mut().pop();
- }
+
result
}
_ => {
@@ -1511,15 +1424,12 @@
});
}
- fn visit_foreign_mod(&mut self, foreign_mod: &mut ast::ForeignMod) {
- self.cfg.configure_foreign_mod(foreign_mod);
- noop_visit_foreign_mod(foreign_mod, self);
- }
-
fn flat_map_foreign_item(
&mut self,
- mut foreign_item: P<ast::ForeignItem>,
+ foreign_item: P<ast::ForeignItem>,
) -> SmallVec<[P<ast::ForeignItem>; 1]> {
+ let mut foreign_item = configure!(self, foreign_item);
+
if let Some(attr) = self.take_first_attr(&mut foreign_item) {
return self
.collect_attr(
@@ -1544,11 +1454,6 @@
}
}
- fn visit_item_kind(&mut self, item: &mut ast::ItemKind) {
- self.cfg.configure_item_kind(item);
- noop_visit_item_kind(item, self);
- }
-
fn flat_map_generic_param(
&mut self,
param: ast::GenericParam,
@@ -1707,11 +1612,6 @@
*id = self.cx.resolver.next_node_id()
}
}
-
- fn visit_fn_decl(&mut self, mut fn_decl: &mut P<ast::FnDecl>) {
- self.cfg.configure_fn_decl(&mut fn_decl);
- noop_visit_fn_decl(fn_decl, self);
- }
}
pub struct ExpansionConfig<'feat> {
@@ -1719,9 +1619,8 @@
pub features: Option<&'feat Features>,
pub recursion_limit: Limit,
pub trace_mac: bool,
- pub should_test: bool, // If false, strip `#[test]` nodes
- pub keep_macs: bool,
- pub span_debug: bool, // If true, use verbose debugging for `proc_macro::Span`
+ pub should_test: bool, // If false, strip `#[test]` nodes
+ pub span_debug: bool, // If true, use verbose debugging for `proc_macro::Span`
pub proc_macro_backtrace: bool, // If true, show backtraces for proc-macro panics
}
@@ -1733,7 +1632,6 @@
recursion_limit: Limit::new(1024),
trace_mac: false,
should_test: false,
- keep_macs: false,
span_debug: false,
proc_macro_backtrace: false,
}
diff --git a/compiler/rustc_expand/src/lib.rs b/compiler/rustc_expand/src/lib.rs
index 3b722c0..1a93975 100644
--- a/compiler/rustc_expand/src/lib.rs
+++ b/compiler/rustc_expand/src/lib.rs
@@ -1,6 +1,7 @@
#![feature(bool_to_option)]
#![feature(crate_visibility_modifier)]
#![feature(decl_macro)]
+#![feature(destructuring_assignment)]
#![feature(or_patterns)]
#![feature(proc_macro_diagnostic)]
#![feature(proc_macro_internals)]
diff --git a/compiler/rustc_expand/src/module.rs b/compiler/rustc_expand/src/module.rs
index 171cb3f..c5ce0ba 100644
--- a/compiler/rustc_expand/src/module.rs
+++ b/compiler/rustc_expand/src/module.rs
@@ -1,236 +1,175 @@
-use rustc_ast::{token, Attribute, Mod, Unsafe};
-use rustc_errors::{struct_span_err, PResult};
+use crate::base::ModuleData;
+use rustc_ast::ptr::P;
+use rustc_ast::{token, Attribute, Inline, Item};
+use rustc_errors::{struct_span_err, DiagnosticBuilder};
use rustc_parse::new_parser_from_file;
use rustc_session::parse::ParseSess;
use rustc_session::Session;
-use rustc_span::source_map::{FileName, Span};
use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
use std::path::{self, Path, PathBuf};
-#[derive(Clone)]
-pub struct Directory {
- pub path: PathBuf,
- pub ownership: DirectoryOwnership,
-}
-
#[derive(Copy, Clone)]
-pub enum DirectoryOwnership {
+pub enum DirOwnership {
Owned {
// None if `mod.rs`, `Some("foo")` if we're in `foo.rs`.
relative: Option<Ident>,
},
UnownedViaBlock,
- UnownedViaMod,
-}
-
-/// Information about the path to a module.
-// Public for rustfmt usage.
-pub struct ModulePath<'a> {
- name: String,
- path_exists: bool,
- pub result: PResult<'a, ModulePathSuccess>,
}
// Public for rustfmt usage.
pub struct ModulePathSuccess {
- pub path: PathBuf,
- pub ownership: DirectoryOwnership,
+ pub file_path: PathBuf,
+ pub dir_ownership: DirOwnership,
+}
+
+crate struct ParsedExternalMod {
+ pub items: Vec<P<Item>>,
+ pub inner_span: Span,
+ pub file_path: PathBuf,
+ pub dir_path: PathBuf,
+ pub dir_ownership: DirOwnership,
+}
+
+pub enum ModError<'a> {
+ CircularInclusion(Vec<PathBuf>),
+ ModInBlock(Option<Ident>),
+ FileNotFound(Ident, PathBuf),
+ MultipleCandidates(Ident, String, String),
+ ParserError(DiagnosticBuilder<'a>),
}
crate fn parse_external_mod(
sess: &Session,
- id: Ident,
+ ident: Ident,
span: Span, // The span to blame on errors.
- unsafety: Unsafe,
- Directory { mut ownership, path }: Directory,
+ module: &ModuleData,
+ mut dir_ownership: DirOwnership,
attrs: &mut Vec<Attribute>,
- pop_mod_stack: &mut bool,
-) -> (Mod, Directory) {
+) -> ParsedExternalMod {
// We bail on the first error, but that error does not cause a fatal error... (1)
- let result: PResult<'_, _> = try {
+ let result: Result<_, ModError<'_>> = try {
// Extract the file path and the new ownership.
- let mp = submod_path(sess, id, span, &attrs, ownership, &path)?;
- ownership = mp.ownership;
+ let mp = mod_file_path(sess, ident, &attrs, &module.dir_path, dir_ownership)?;
+ dir_ownership = mp.dir_ownership;
// Ensure file paths are acyclic.
- let mut included_mod_stack = sess.parse_sess.included_mod_stack.borrow_mut();
- error_on_circular_module(&sess.parse_sess, span, &mp.path, &included_mod_stack)?;
- included_mod_stack.push(mp.path.clone());
- *pop_mod_stack = true; // We have pushed, so notify caller.
- drop(included_mod_stack);
+ if let Some(pos) = module.file_path_stack.iter().position(|p| p == &mp.file_path) {
+ Err(ModError::CircularInclusion(module.file_path_stack[pos..].to_vec()))?;
+ }
// Actually parse the external file as a module.
- let mut parser = new_parser_from_file(&sess.parse_sess, &mp.path, Some(span));
- let mut module = parser.parse_mod(&token::Eof, unsafety)?;
- module.0.inline = false;
- module
+ let mut parser = new_parser_from_file(&sess.parse_sess, &mp.file_path, Some(span));
+ let (mut inner_attrs, items, inner_span) =
+ parser.parse_mod(&token::Eof).map_err(|err| ModError::ParserError(err))?;
+ attrs.append(&mut inner_attrs);
+ (items, inner_span, mp.file_path)
};
// (1) ...instead, we return a dummy module.
- let (module, mut new_attrs) = result.map_err(|mut err| err.emit()).unwrap_or_else(|_| {
- let module = Mod { inner: Span::default(), unsafety, items: Vec::new(), inline: false };
- (module, Vec::new())
- });
- attrs.append(&mut new_attrs);
+ let (items, inner_span, file_path) =
+ result.map_err(|err| err.report(sess, span)).unwrap_or_default();
- // Extract the directory path for submodules of `module`.
- let path = sess.source_map().span_to_unmapped_path(module.inner);
- let mut path = match path {
- FileName::Real(name) => name.into_local_path(),
- other => PathBuf::from(other.to_string()),
- };
- path.pop();
+ // Extract the directory path for submodules of the module.
+ let dir_path = file_path.parent().unwrap_or(&file_path).to_owned();
- (module, Directory { ownership, path })
+ ParsedExternalMod { items, inner_span, file_path, dir_path, dir_ownership }
}
-fn error_on_circular_module<'a>(
- sess: &'a ParseSess,
- span: Span,
- path: &Path,
- included_mod_stack: &[PathBuf],
-) -> PResult<'a, ()> {
- if let Some(i) = included_mod_stack.iter().position(|p| *p == path) {
- let mut err = String::from("circular modules: ");
- for p in &included_mod_stack[i..] {
- err.push_str(&p.to_string_lossy());
- err.push_str(" -> ");
- }
- err.push_str(&path.to_string_lossy());
- return Err(sess.span_diagnostic.struct_span_err(span, &err[..]));
- }
- Ok(())
-}
-
-crate fn push_directory(
+crate fn mod_dir_path(
sess: &Session,
- id: Ident,
+ ident: Ident,
attrs: &[Attribute],
- Directory { mut ownership, mut path }: Directory,
-) -> Directory {
- if let Some(filename) = sess.first_attr_value_str_by_name(attrs, sym::path) {
- path.push(&*filename.as_str());
- ownership = DirectoryOwnership::Owned { relative: None };
- } else {
- // We have to push on the current module name in the case of relative
- // paths in order to ensure that any additional module paths from inline
- // `mod x { ... }` come after the relative extension.
- //
- // For example, a `mod z { ... }` inside `x/y.rs` should set the current
- // directory path to `/x/y/z`, not `/x/z` with a relative offset of `y`.
- if let DirectoryOwnership::Owned { relative } = &mut ownership {
- if let Some(ident) = relative.take() {
- // Remove the relative offset.
- path.push(&*ident.as_str());
+ module: &ModuleData,
+ mut dir_ownership: DirOwnership,
+ inline: Inline,
+) -> (PathBuf, DirOwnership) {
+ match inline {
+ Inline::Yes => {
+ if let Some(file_path) = mod_file_path_from_attr(sess, attrs, &module.dir_path) {
+ // For inline modules file path from `#[path]` is actually the directory path
+ // for historical reasons, so we don't pop the last segment here.
+ return (file_path, DirOwnership::Owned { relative: None });
}
- }
- path.push(&*id.as_str());
- }
- Directory { ownership, path }
-}
-fn submod_path<'a>(
- sess: &'a Session,
- id: Ident,
- span: Span,
- attrs: &[Attribute],
- ownership: DirectoryOwnership,
- dir_path: &Path,
-) -> PResult<'a, ModulePathSuccess> {
- if let Some(path) = submod_path_from_attr(sess, attrs, dir_path) {
- let ownership = match path.file_name().and_then(|s| s.to_str()) {
- // All `#[path]` files are treated as though they are a `mod.rs` file.
- // This means that `mod foo;` declarations inside `#[path]`-included
- // files are siblings,
+ // We have to push on the current module name in the case of relative
+ // paths in order to ensure that any additional module paths from inline
+ // `mod x { ... }` come after the relative extension.
//
- // Note that this will produce weirdness when a file named `foo.rs` is
- // `#[path]` included and contains a `mod foo;` declaration.
- // If you encounter this, it's your own darn fault :P
- Some(_) => DirectoryOwnership::Owned { relative: None },
- _ => DirectoryOwnership::UnownedViaMod,
- };
- return Ok(ModulePathSuccess { ownership, path });
- }
-
- let relative = match ownership {
- DirectoryOwnership::Owned { relative } => relative,
- DirectoryOwnership::UnownedViaBlock | DirectoryOwnership::UnownedViaMod => None,
- };
- let ModulePath { path_exists, name, result } =
- default_submod_path(&sess.parse_sess, id, span, relative, dir_path);
- match ownership {
- DirectoryOwnership::Owned { .. } => Ok(result?),
- DirectoryOwnership::UnownedViaBlock => {
- let _ = result.map_err(|mut err| err.cancel());
- error_decl_mod_in_block(&sess.parse_sess, span, path_exists, &name)
- }
- DirectoryOwnership::UnownedViaMod => {
- let _ = result.map_err(|mut err| err.cancel());
- error_cannot_declare_mod_here(&sess.parse_sess, span, path_exists, &name)
- }
- }
-}
-
-fn error_decl_mod_in_block<'a, T>(
- sess: &'a ParseSess,
- span: Span,
- path_exists: bool,
- name: &str,
-) -> PResult<'a, T> {
- let msg = "Cannot declare a non-inline module inside a block unless it has a path attribute";
- let mut err = sess.span_diagnostic.struct_span_err(span, msg);
- if path_exists {
- let msg = format!("Maybe `use` the module `{}` instead of redeclaring it", name);
- err.span_note(span, &msg);
- }
- Err(err)
-}
-
-fn error_cannot_declare_mod_here<'a, T>(
- sess: &'a ParseSess,
- span: Span,
- path_exists: bool,
- name: &str,
-) -> PResult<'a, T> {
- let mut err =
- sess.span_diagnostic.struct_span_err(span, "cannot declare a new module at this location");
- if !span.is_dummy() {
- if let FileName::Real(src_name) = sess.source_map().span_to_filename(span) {
- let src_path = src_name.into_local_path();
- if let Some(stem) = src_path.file_stem() {
- let mut dest_path = src_path.clone();
- dest_path.set_file_name(stem);
- dest_path.push("mod.rs");
- err.span_note(
- span,
- &format!(
- "maybe move this module `{}` to its own directory via `{}`",
- src_path.display(),
- dest_path.display()
- ),
- );
+ // For example, a `mod z { ... }` inside `x/y.rs` should set the current
+ // directory path to `/x/y/z`, not `/x/z` with a relative offset of `y`.
+ let mut dir_path = module.dir_path.clone();
+ if let DirOwnership::Owned { relative } = &mut dir_ownership {
+ if let Some(ident) = relative.take() {
+ // Remove the relative offset.
+ dir_path.push(&*ident.as_str());
+ }
}
+ dir_path.push(&*ident.as_str());
+
+ (dir_path, dir_ownership)
+ }
+ Inline::No => {
+ // FIXME: This is a subset of `parse_external_mod` without actual parsing,
+ // check whether the logic for unloaded, loaded and inline modules can be unified.
+ let file_path = mod_file_path(sess, ident, &attrs, &module.dir_path, dir_ownership)
+ .map(|mp| {
+ dir_ownership = mp.dir_ownership;
+ mp.file_path
+ })
+ .unwrap_or_default();
+
+ // Extract the directory path for submodules of the module.
+ let dir_path = file_path.parent().unwrap_or(&file_path).to_owned();
+
+ (dir_path, dir_ownership)
}
}
- if path_exists {
- err.span_note(
- span,
- &format!("... or maybe `use` the module `{}` instead of possibly redeclaring it", name),
- );
+}
+
+fn mod_file_path<'a>(
+ sess: &'a Session,
+ ident: Ident,
+ attrs: &[Attribute],
+ dir_path: &Path,
+ dir_ownership: DirOwnership,
+) -> Result<ModulePathSuccess, ModError<'a>> {
+ if let Some(file_path) = mod_file_path_from_attr(sess, attrs, dir_path) {
+ // All `#[path]` files are treated as though they are a `mod.rs` file.
+ // This means that `mod foo;` declarations inside `#[path]`-included
+ // files are siblings,
+ //
+ // Note that this will produce weirdness when a file named `foo.rs` is
+ // `#[path]` included and contains a `mod foo;` declaration.
+ // If you encounter this, it's your own darn fault :P
+ let dir_ownership = DirOwnership::Owned { relative: None };
+ return Ok(ModulePathSuccess { file_path, dir_ownership });
}
- Err(err)
+
+ let relative = match dir_ownership {
+ DirOwnership::Owned { relative } => relative,
+ DirOwnership::UnownedViaBlock => None,
+ };
+ let result = default_submod_path(&sess.parse_sess, ident, relative, dir_path);
+ match dir_ownership {
+ DirOwnership::Owned { .. } => result,
+ DirOwnership::UnownedViaBlock => Err(ModError::ModInBlock(match result {
+ Ok(_) | Err(ModError::MultipleCandidates(..)) => Some(ident),
+ _ => None,
+ })),
+ }
}
/// Derive a submodule path from the first found `#[path = "path_string"]`.
/// The provided `dir_path` is joined with the `path_string`.
-pub(super) fn submod_path_from_attr(
+fn mod_file_path_from_attr(
sess: &Session,
attrs: &[Attribute],
dir_path: &Path,
) -> Option<PathBuf> {
// Extract path string from first `#[path = "path_string"]` attribute.
- let path_string = sess.first_attr_value_str_by_name(attrs, sym::path)?;
- let path_string = path_string.as_str();
+ let path_string = sess.first_attr_value_str_by_name(attrs, sym::path)?.as_str();
// On windows, the base path might have the form
// `\\?\foo\bar` in which case it does not tolerate
@@ -246,15 +185,14 @@
// Public for rustfmt usage.
pub fn default_submod_path<'a>(
sess: &'a ParseSess,
- id: Ident,
- span: Span,
+ ident: Ident,
relative: Option<Ident>,
dir_path: &Path,
-) -> ModulePath<'a> {
+) -> Result<ModulePathSuccess, ModError<'a>> {
// If we're in a foo.rs file instead of a mod.rs file,
// we need to look for submodules in
- // `./foo/<id>.rs` and `./foo/<id>/mod.rs` rather than
- // `./<id>.rs` and `./<id>/mod.rs`.
+ // `./foo/<ident>.rs` and `./foo/<ident>/mod.rs` rather than
+ // `./<ident>.rs` and `./<ident>/mod.rs`.
let relative_prefix_string;
let relative_prefix = if let Some(ident) = relative {
relative_prefix_string = format!("{}{}", ident.name, path::MAIN_SEPARATOR);
@@ -263,7 +201,7 @@
""
};
- let mod_name = id.name.to_string();
+ let mod_name = ident.name.to_string();
let default_path_str = format!("{}{}.rs", relative_prefix, mod_name);
let secondary_path_str =
format!("{}{}{}mod.rs", relative_prefix, mod_name, path::MAIN_SEPARATOR);
@@ -272,44 +210,74 @@
let default_exists = sess.source_map().file_exists(&default_path);
let secondary_exists = sess.source_map().file_exists(&secondary_path);
- let result = match (default_exists, secondary_exists) {
+ match (default_exists, secondary_exists) {
(true, false) => Ok(ModulePathSuccess {
- path: default_path,
- ownership: DirectoryOwnership::Owned { relative: Some(id) },
+ file_path: default_path,
+ dir_ownership: DirOwnership::Owned { relative: Some(ident) },
}),
(false, true) => Ok(ModulePathSuccess {
- path: secondary_path,
- ownership: DirectoryOwnership::Owned { relative: None },
+ file_path: secondary_path,
+ dir_ownership: DirOwnership::Owned { relative: None },
}),
- (false, false) => {
- let mut err = struct_span_err!(
- sess.span_diagnostic,
- span,
- E0583,
- "file not found for module `{}`",
- mod_name,
- );
- err.help(&format!(
- "to create the module `{}`, create file \"{}\"",
- mod_name,
- default_path.display(),
- ));
- Err(err)
- }
+ (false, false) => Err(ModError::FileNotFound(ident, default_path)),
(true, true) => {
- let mut err = struct_span_err!(
- sess.span_diagnostic,
- span,
- E0761,
- "file for module `{}` found at both {} and {}",
- mod_name,
- default_path_str,
- secondary_path_str,
- );
- err.help("delete or rename one of them to remove the ambiguity");
- Err(err)
+ Err(ModError::MultipleCandidates(ident, default_path_str, secondary_path_str))
}
- };
+ }
+}
- ModulePath { name: mod_name, path_exists: default_exists || secondary_exists, result }
+impl ModError<'_> {
+ fn report(self, sess: &Session, span: Span) {
+ let diag = &sess.parse_sess.span_diagnostic;
+ match self {
+ ModError::CircularInclusion(file_paths) => {
+ let mut msg = String::from("circular modules: ");
+ for file_path in &file_paths {
+ msg.push_str(&file_path.display().to_string());
+ msg.push_str(" -> ");
+ }
+ msg.push_str(&file_paths[0].display().to_string());
+ diag.struct_span_err(span, &msg)
+ }
+ ModError::ModInBlock(ident) => {
+ let msg = "cannot declare a non-inline module inside a block unless it has a path attribute";
+ let mut err = diag.struct_span_err(span, msg);
+ if let Some(ident) = ident {
+ let note =
+ format!("maybe `use` the module `{}` instead of redeclaring it", ident);
+ err.span_note(span, ¬e);
+ }
+ err
+ }
+ ModError::FileNotFound(ident, default_path) => {
+ let mut err = struct_span_err!(
+ diag,
+ span,
+ E0583,
+ "file not found for module `{}`",
+ ident,
+ );
+ err.help(&format!(
+ "to create the module `{}`, create file \"{}\"",
+ ident,
+ default_path.display(),
+ ));
+ err
+ }
+ ModError::MultipleCandidates(ident, default_path_short, secondary_path_short) => {
+ let mut err = struct_span_err!(
+ diag,
+ span,
+ E0761,
+ "file for module `{}` found at both {} and {}",
+ ident,
+ default_path_short,
+ secondary_path_short,
+ );
+ err.help("delete or rename one of them to remove the ambiguity");
+ err
+ }
+ ModError::ParserError(err) => err,
+ }.emit()
+ }
}
diff --git a/compiler/rustc_expand/src/mut_visit/tests.rs b/compiler/rustc_expand/src/mut_visit/tests.rs
index be0300b..7e7155a 100644
--- a/compiler/rustc_expand/src/mut_visit/tests.rs
+++ b/compiler/rustc_expand/src/mut_visit/tests.rs
@@ -7,8 +7,8 @@
use rustc_span::with_default_session_globals;
// This version doesn't care about getting comments or doc-strings in.
-fn fake_print_crate(s: &mut pprust::State<'_>, krate: &ast::Crate) {
- s.print_mod(&krate.module, &krate.attrs)
+fn print_crate_items(krate: &ast::Crate) -> String {
+ krate.items.iter().map(|i| pprust::item_to_string(i)).collect::<Vec<_>>().join(" ")
}
// Change every identifier to "zz".
@@ -46,7 +46,7 @@
assert_pred!(
matches_codepattern,
"matches_codepattern",
- pprust::to_string(|s| fake_print_crate(s, &krate)),
+ print_crate_items(&krate),
"#[zz]mod zz{fn zz(zz:zz,zz:zz){zz!(zz,zz,zz);zz;zz}}".to_string()
);
})
@@ -66,7 +66,7 @@
assert_pred!(
matches_codepattern,
"matches_codepattern",
- pprust::to_string(|s| fake_print_crate(s, &krate)),
+ print_crate_items(&krate),
"macro_rules! zz{(zz$zz:zz$(zz $zz:zz)zz+=>(zz$(zz$zz$zz)+))}".to_string()
);
})
diff --git a/compiler/rustc_expand/src/parse/tests.rs b/compiler/rustc_expand/src/parse/tests.rs
index f4fcaf5..56f25ff 100644
--- a/compiler/rustc_expand/src/parse/tests.rs
+++ b/compiler/rustc_expand/src/parse/tests.rs
@@ -309,8 +309,8 @@
.unwrap()
.unwrap();
- if let ast::ItemKind::Mod(ref m) = item.kind {
- assert!(m.items.len() == 2);
+ if let ast::ItemKind::Mod(_, ref mod_kind) = item.kind {
+ assert!(matches!(mod_kind, ast::ModKind::Loaded(items, ..) if items.len() == 2));
} else {
panic!();
}
diff --git a/compiler/rustc_expand/src/placeholders.rs b/compiler/rustc_expand/src/placeholders.rs
index d040539c..6586ba1 100644
--- a/compiler/rustc_expand/src/placeholders.rs
+++ b/compiler/rustc_expand/src/placeholders.rs
@@ -117,7 +117,7 @@
span,
is_placeholder: true,
}]),
- AstFragmentKind::Fields => AstFragment::Fields(smallvec![ast::Field {
+ AstFragmentKind::Fields => AstFragment::Fields(smallvec![ast::ExprField {
attrs: Default::default(),
expr: expr_placeholder(),
id,
@@ -126,7 +126,7 @@
span,
is_placeholder: true,
}]),
- AstFragmentKind::FieldPats => AstFragment::FieldPats(smallvec![ast::FieldPat {
+ AstFragmentKind::FieldPats => AstFragment::FieldPats(smallvec![ast::PatField {
attrs: Default::default(),
id,
ident,
@@ -153,7 +153,7 @@
ty: ty(),
is_placeholder: true,
}]),
- AstFragmentKind::StructFields => AstFragment::StructFields(smallvec![ast::StructField {
+ AstFragmentKind::StructFields => AstFragment::StructFields(smallvec![ast::FieldDef {
attrs: Default::default(),
id,
ident: None,
@@ -205,19 +205,19 @@
}
}
- fn flat_map_field(&mut self, field: ast::Field) -> SmallVec<[ast::Field; 1]> {
+ fn flat_map_expr_field(&mut self, field: ast::ExprField) -> SmallVec<[ast::ExprField; 1]> {
if field.is_placeholder {
- self.remove(field.id).make_fields()
+ self.remove(field.id).make_expr_fields()
} else {
- noop_flat_map_field(field, self)
+ noop_flat_map_expr_field(field, self)
}
}
- fn flat_map_field_pattern(&mut self, fp: ast::FieldPat) -> SmallVec<[ast::FieldPat; 1]> {
+ fn flat_map_pat_field(&mut self, fp: ast::PatField) -> SmallVec<[ast::PatField; 1]> {
if fp.is_placeholder {
- self.remove(fp.id).make_field_patterns()
+ self.remove(fp.id).make_pat_fields()
} else {
- noop_flat_map_field_pattern(fp, self)
+ noop_flat_map_pat_field(fp, self)
}
}
@@ -240,11 +240,11 @@
}
}
- fn flat_map_struct_field(&mut self, sf: ast::StructField) -> SmallVec<[ast::StructField; 1]> {
+ fn flat_map_field_def(&mut self, sf: ast::FieldDef) -> SmallVec<[ast::FieldDef; 1]> {
if sf.is_placeholder {
- self.remove(sf.id).make_struct_fields()
+ self.remove(sf.id).make_field_defs()
} else {
- noop_flat_map_struct_field(sf, self)
+ noop_flat_map_field_def(sf, self)
}
}
@@ -371,12 +371,4 @@
}
}
}
-
- fn visit_mod(&mut self, module: &mut ast::Mod) {
- noop_visit_mod(module, self);
- // remove macro definitions
- module.items.retain(
- |item| !matches!(item.kind, ast::ItemKind::MacCall(_) if !self.cx.ecfg.keep_macs),
- );
- }
}
diff --git a/compiler/rustc_expand/src/proc_macro.rs b/compiler/rustc_expand/src/proc_macro.rs
index 6779734..61b776f 100644
--- a/compiler/rustc_expand/src/proc_macro.rs
+++ b/compiler/rustc_expand/src/proc_macro.rs
@@ -1,16 +1,14 @@
use crate::base::{self, *};
use crate::proc_macro_server;
+use rustc_ast as ast;
use rustc_ast::ptr::P;
use rustc_ast::token;
use rustc_ast::tokenstream::{CanSynthesizeMissingTokens, TokenStream, TokenTree};
-use rustc_ast::{self as ast, *};
use rustc_data_structures::sync::Lrc;
-use rustc_errors::{struct_span_err, Applicability, ErrorReported};
-use rustc_lexer::is_ident;
+use rustc_errors::ErrorReported;
use rustc_parse::nt_to_tokenstream;
use rustc_parse::parser::ForceCollect;
-use rustc_span::symbol::sym;
use rustc_span::{Span, DUMMY_SP};
const EXEC_STRATEGY: pm::bridge::server::SameThread = pm::bridge::server::SameThread;
@@ -92,7 +90,8 @@
}
_ => unreachable!(),
};
- let input = if item.pretty_printing_compatibility_hack() {
+ let input = if crate::base::pretty_printing_compatibility_hack(&item, &ecx.sess.parse_sess)
+ {
TokenTree::token(token::Interpolated(Lrc::new(item)), DUMMY_SP).into()
} else {
nt_to_tokenstream(&item, &ecx.sess.parse_sess, CanSynthesizeMissingTokens::Yes)
@@ -142,91 +141,3 @@
ExpandResult::Ready(items)
}
}
-
-crate fn collect_derives(cx: &mut ExtCtxt<'_>, attrs: &mut Vec<ast::Attribute>) -> Vec<ast::Path> {
- let mut result = Vec::new();
- attrs.retain(|attr| {
- if !attr.has_name(sym::derive) {
- return true;
- }
-
- // 1) First let's ensure that it's a meta item.
- let nmis = match attr.meta_item_list() {
- None => {
- cx.struct_span_err(attr.span, "malformed `derive` attribute input")
- .span_suggestion(
- attr.span,
- "missing traits to be derived",
- "#[derive(Trait1, Trait2, ...)]".to_owned(),
- Applicability::HasPlaceholders,
- )
- .emit();
- return false;
- }
- Some(x) => x,
- };
-
- let mut error_reported_filter_map = false;
- let mut error_reported_map = false;
- let traits = nmis
- .into_iter()
- // 2) Moreover, let's ensure we have a path and not `#[derive("foo")]`.
- .filter_map(|nmi| match nmi {
- NestedMetaItem::Literal(lit) => {
- error_reported_filter_map = true;
- let mut err = struct_span_err!(
- cx.sess,
- lit.span,
- E0777,
- "expected path to a trait, found literal",
- );
- let token = lit.token.to_string();
- if token.starts_with('"')
- && token.len() > 2
- && is_ident(&token[1..token.len() - 1])
- {
- err.help(&format!("try using `#[derive({})]`", &token[1..token.len() - 1]));
- } else {
- err.help("for example, write `#[derive(Debug)]` for `Debug`");
- }
- err.emit();
- None
- }
- NestedMetaItem::MetaItem(mi) => Some(mi),
- })
- // 3) Finally, we only accept `#[derive($path_0, $path_1, ..)]`
- // but not e.g. `#[derive($path_0 = "value", $path_1(abc))]`.
- // In this case we can still at least determine that the user
- // wanted this trait to be derived, so let's keep it.
- .map(|mi| {
- let mut traits_dont_accept = |title, action| {
- error_reported_map = true;
- let sp = mi.span.with_lo(mi.path.span.hi());
- cx.struct_span_err(sp, title)
- .span_suggestion(
- sp,
- action,
- String::new(),
- Applicability::MachineApplicable,
- )
- .emit();
- };
- match &mi.kind {
- MetaItemKind::List(..) => traits_dont_accept(
- "traits in `#[derive(...)]` don't accept arguments",
- "remove the arguments",
- ),
- MetaItemKind::NameValue(..) => traits_dont_accept(
- "traits in `#[derive(...)]` don't accept values",
- "remove the value",
- ),
- MetaItemKind::Word => {}
- }
- mi.path
- });
-
- result.extend(traits);
- !error_reported_filter_map && !error_reported_map
- });
- result
-}
diff --git a/compiler/rustc_expand/src/proc_macro_server.rs b/compiler/rustc_expand/src/proc_macro_server.rs
index b6195d3..cb41bc8 100644
--- a/compiler/rustc_expand/src/proc_macro_server.rs
+++ b/compiler/rustc_expand/src/proc_macro_server.rs
@@ -2,16 +2,21 @@
use rustc_ast as ast;
use rustc_ast::token;
+use rustc_ast::token::Nonterminal;
+use rustc_ast::token::NtIdent;
use rustc_ast::tokenstream::{self, CanSynthesizeMissingTokens};
use rustc_ast::tokenstream::{DelimSpan, Spacing::*, TokenStream, TreeAndSpacing};
use rustc_ast_pretty::pprust;
use rustc_data_structures::sync::Lrc;
use rustc_errors::Diagnostic;
+use rustc_lint_defs::builtin::PROC_MACRO_BACK_COMPAT;
+use rustc_lint_defs::BuiltinLintDiagnostics;
use rustc_parse::lexer::nfc_normalize;
use rustc_parse::{nt_to_tokenstream, parse_stream_from_source_str};
use rustc_session::parse::ParseSess;
+use rustc_span::hygiene::ExpnKind;
use rustc_span::symbol::{self, kw, sym, Symbol};
-use rustc_span::{BytePos, FileName, MultiSpan, Pos, SourceFile, Span};
+use rustc_span::{BytePos, FileName, MultiSpan, Pos, RealFileName, SourceFile, Span};
use pm::bridge::{server, TokenTree};
use pm::{Delimiter, Level, LineColumn, Spacing};
@@ -48,11 +53,11 @@
}
}
-impl FromInternal<(TreeAndSpacing, &'_ ParseSess, &'_ mut Vec<Self>)>
+impl FromInternal<(TreeAndSpacing, &'_ mut Vec<Self>, &mut Rustc<'_>)>
for TokenTree<Group, Punct, Ident, Literal>
{
fn from_internal(
- ((tree, spacing), sess, stack): (TreeAndSpacing, &ParseSess, &mut Vec<Self>),
+ ((tree, spacing), stack, rustc): (TreeAndSpacing, &mut Vec<Self>, &mut Rustc<'_>),
) -> Self {
use rustc_ast::token::*;
@@ -141,10 +146,10 @@
SingleQuote => op!('\''),
Ident(name, false) if name == kw::DollarCrate => tt!(Ident::dollar_crate()),
- Ident(name, is_raw) => tt!(Ident::new(sess, name, is_raw)),
+ Ident(name, is_raw) => tt!(Ident::new(rustc.sess, name, is_raw)),
Lifetime(name) => {
let ident = symbol::Ident::new(name, span).without_first_quote();
- stack.push(tt!(Ident::new(sess, ident.name, false)));
+ stack.push(tt!(Ident::new(rustc.sess, ident.name, false)));
tt!(Punct::new('\'', true))
}
Literal(lit) => tt!(Literal { lit }),
@@ -174,17 +179,15 @@
}
Interpolated(nt) => {
- if let Some((name, is_raw)) =
- nt.ident_name_compatibility_hack(span, sess.source_map())
- {
- TokenTree::Ident(Ident::new(sess, name.name, is_raw, name.span))
+ if let Some((name, is_raw)) = ident_name_compatibility_hack(&nt, span, rustc) {
+ TokenTree::Ident(Ident::new(rustc.sess, name.name, is_raw, name.span))
} else {
- let stream = nt_to_tokenstream(&nt, sess, CanSynthesizeMissingTokens::No);
+ let stream = nt_to_tokenstream(&nt, rustc.sess, CanSynthesizeMissingTokens::No);
TokenTree::Group(Group {
delimiter: Delimiter::None,
stream,
span: DelimSpan::from_single(span),
- flatten: nt.pretty_printing_compatibility_hack(),
+ flatten: crate::base::pretty_printing_compatibility_hack(&nt, rustc.sess),
})
}
}
@@ -446,7 +449,7 @@
loop {
let tree = iter.stack.pop().or_else(|| {
let next = iter.cursor.next_with_spacing()?;
- Some(TokenTree::from_internal((next, self.sess, &mut iter.stack)))
+ Some(TokenTree::from_internal((next, &mut iter.stack, self)))
})?;
// A hack used to pass AST fragments to attribute and derive macros
// as a single nonterminal token instead of a token stream.
@@ -711,3 +714,74 @@
self.sess.source_map().span_to_snippet(span).ok()
}
}
+
+// See issue #74616 for details
+fn ident_name_compatibility_hack(
+ nt: &Nonterminal,
+ orig_span: Span,
+ rustc: &mut Rustc<'_>,
+) -> Option<(rustc_span::symbol::Ident, bool)> {
+ if let NtIdent(ident, is_raw) = nt {
+ if let ExpnKind::Macro(_, macro_name) = orig_span.ctxt().outer_expn_data().kind {
+ let source_map = rustc.sess.source_map();
+ let filename = source_map.span_to_filename(orig_span);
+ if let FileName::Real(RealFileName::Named(path)) = filename {
+ let matches_prefix = |prefix, filename| {
+ // Check for a path that ends with 'prefix*/src/<filename>'
+ let mut iter = path.components().rev();
+ iter.next().and_then(|p| p.as_os_str().to_str()) == Some(filename)
+ && iter.next().and_then(|p| p.as_os_str().to_str()) == Some("src")
+ && iter
+ .next()
+ .and_then(|p| p.as_os_str().to_str())
+ .map_or(false, |p| p.starts_with(prefix))
+ };
+
+ let time_macros_impl =
+ macro_name == sym::impl_macros && matches_prefix("time-macros-impl", "lib.rs");
+ if time_macros_impl
+ || (macro_name == sym::arrays && matches_prefix("js-sys", "lib.rs"))
+ {
+ let snippet = source_map.span_to_snippet(orig_span);
+ if snippet.as_deref() == Ok("$name") {
+ if time_macros_impl {
+ rustc.sess.buffer_lint_with_diagnostic(
+ &PROC_MACRO_BACK_COMPAT,
+ orig_span,
+ ast::CRATE_NODE_ID,
+ "using an old version of `time-macros-impl`",
+ BuiltinLintDiagnostics::ProcMacroBackCompat(
+ "the `time-macros-impl` crate will stop compiling in futures version of Rust. \
+ Please update to the latest version of the `time` crate to avoid breakage".to_string())
+ );
+ }
+ return Some((*ident, *is_raw));
+ }
+ }
+
+ if macro_name == sym::tuple_from_req && matches_prefix("actix-web", "extract.rs") {
+ let snippet = source_map.span_to_snippet(orig_span);
+ if snippet.as_deref() == Ok("$T") {
+ if let FileName::Real(RealFileName::Named(macro_path)) =
+ source_map.span_to_filename(rustc.def_site)
+ {
+ if macro_path.to_string_lossy().contains("pin-project-internal-0.") {
+ rustc.sess.buffer_lint_with_diagnostic(
+ &PROC_MACRO_BACK_COMPAT,
+ orig_span,
+ ast::CRATE_NODE_ID,
+ "using an old version of `actix-web`",
+ BuiltinLintDiagnostics::ProcMacroBackCompat(
+ "the version of `actix-web` you are using might stop compiling in future versions of Rust; \
+ please update to the latest version of the `actix-web` crate to avoid breakage".to_string())
+ );
+ return Some((*ident, *is_raw));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ None
+}
diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs
index aa54ffb..9902b6c 100644
--- a/compiler/rustc_feature/src/accepted.rs
+++ b/compiler/rustc_feature/src/accepted.rs
@@ -272,9 +272,11 @@
(accepted, doc_alias, "1.48.0", Some(50146), None),
/// Allows patterns with concurrent by-move and by-ref bindings.
/// For example, you can write `Foo(a, ref b)` where `a` is by-move and `b` is by-ref.
- (accepted, move_ref_pattern, "1.48.0", Some(68354), None),
+ (accepted, move_ref_pattern, "1.49.0", Some(68354), None),
/// The smallest useful subset of `const_generics`.
(accepted, min_const_generics, "1.51.0", Some(74878), None),
+ /// The `unsafe_op_in_unsafe_fn` lint (allowed by default): no longer treat an unsafe function as an unsafe block.
+ (accepted, unsafe_block_in_unsafe_fn, "1.51.0", Some(71668), None),
// -------------------------------------------------------------------------
// feature-group-end: accepted features
diff --git a/compiler/rustc_feature/src/active.rs b/compiler/rustc_feature/src/active.rs
index 4f38e06..79ec9c2 100644
--- a/compiler/rustc_feature/src/active.rs
+++ b/compiler/rustc_feature/src/active.rs
@@ -557,9 +557,6 @@
/// Allows the use of `#[ffi_const]` on foreign functions.
(active, ffi_const, "1.45.0", Some(58328), None),
- /// No longer treat an unsafe function as an unsafe block.
- (active, unsafe_block_in_unsafe_fn, "1.45.0", Some(71668), None),
-
/// Allows `extern "avr-interrupt" fn()` and `extern "avr-non-blocking-interrupt" fn()`.
(active, abi_avr_interrupt, "1.45.0", Some(69664), None),
@@ -633,7 +630,23 @@
(active, abi_c_cmse_nonsecure_call, "1.51.0", Some(81391), None),
/// Lessens the requirements for structs to implement `Unsize`.
- (active, relaxed_struct_unsize, "1.51.0", Some(1), None),
+ (active, relaxed_struct_unsize, "1.51.0", Some(81793), None),
+
+ /// Allows macro attributes to observe output of `#[derive]`.
+ (active, macro_attributes_in_derive_output, "1.51.0", Some(81119), None),
+
+ /// Allows `pub` on `macro_rules` items.
+ (active, pub_macro_rules, "1.52.0", Some(78855), None),
+
+ /// Allows the use of type alias impl trait in function return positions
+ (active, min_type_alias_impl_trait, "1.52.0", Some(63063), None),
+
+ /// Allows associated types in inherent impls.
+ (active, inherent_associated_types, "1.52.0", Some(8995), None),
+
+ /// Allows `extern "C-unwind" fn` to enable unwinding across ABI boundaries.
+ (active, c_unwind, "1.52.0", Some(74990), None),
+
// -------------------------------------------------------------------------
// feature-group-end: actual feature gates
// -------------------------------------------------------------------------
@@ -659,6 +672,8 @@
sym::unsized_locals,
sym::capture_disjoint_fields,
sym::const_generics_defaults,
+ sym::inherent_associated_types,
+ sym::type_alias_impl_trait,
];
/// Some features are not allowed to be used together at the same time, if
diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs
index 3ed5320..072062d 100644
--- a/compiler/rustc_feature/src/builtin_attrs.rs
+++ b/compiler/rustc_feature/src/builtin_attrs.rs
@@ -188,7 +188,6 @@
ungated!(reexport_test_harness_main, Normal, template!(NameValueStr: "name")),
// Macros:
- ungated!(derive, Normal, template!(List: "Trait1, Trait2, ...")),
ungated!(automatically_derived, Normal, template!(Word)),
// FIXME(#14407)
ungated!(macro_use, Normal, template!(Word, List: "name1, name2, ...")),
@@ -471,6 +470,7 @@
rustc_attr!(rustc_promotable, AssumedUsed, template!(Word), IMPL_DETAIL),
rustc_attr!(rustc_args_required_const, AssumedUsed, template!(List: "N"), INTERNAL_UNSTABLE),
+ rustc_attr!(rustc_legacy_const_generics, AssumedUsed, template!(List: "N"), INTERNAL_UNSTABLE),
// ==========================================================================
// Internal attributes, Layout related:
diff --git a/compiler/rustc_feature/src/removed.rs b/compiler/rustc_feature/src/removed.rs
index 38a3a4e..aff6605 100644
--- a/compiler/rustc_feature/src/removed.rs
+++ b/compiler/rustc_feature/src/removed.rs
@@ -106,7 +106,7 @@
Some("subsumed by `.await` syntax")),
/// Allows defining `existential type`s.
(removed, existential_type, "1.38.0", Some(63063), None,
- Some("removed in favor of `#![feature(type_alias_impl_trait)]`")),
+ Some("removed in favor of `#![feature(min_type_alias_impl_trait)]`")),
/// Allows using the macros:
/// + `__diagnostic_used`
/// + `__register_diagnostic`
diff --git a/compiler/rustc_hir/Cargo.toml b/compiler/rustc_hir/Cargo.toml
index c141654..d41b81f 100644
--- a/compiler/rustc_hir/Cargo.toml
+++ b/compiler/rustc_hir/Cargo.toml
@@ -17,4 +17,4 @@
rustc_serialize = { path = "../rustc_serialize" }
rustc_ast = { path = "../rustc_ast" }
tracing = "0.1"
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_hir/src/arena.rs b/compiler/rustc_hir/src/arena.rs
index c7dc66b..ddf8218 100644
--- a/compiler/rustc_hir/src/arena.rs
+++ b/compiler/rustc_hir/src/arena.rs
@@ -25,8 +25,8 @@
[] generic_bound: rustc_hir::GenericBound<$tcx>,
[] generic_param: rustc_hir::GenericParam<$tcx>,
[] expr: rustc_hir::Expr<$tcx>,
- [] field: rustc_hir::Field<$tcx>,
- [] field_pat: rustc_hir::FieldPat<$tcx>,
+ [] expr_field: rustc_hir::ExprField<$tcx>,
+ [] pat_field: rustc_hir::PatField<$tcx>,
[] fn_decl: rustc_hir::FnDecl<$tcx>,
[] foreign_item: rustc_hir::ForeignItem<$tcx>,
[few] foreign_item_ref: rustc_hir::ForeignItemRef<$tcx>,
@@ -42,7 +42,7 @@
[] poly_trait_ref: rustc_hir::PolyTraitRef<$tcx>,
[] qpath: rustc_hir::QPath<$tcx>,
[] stmt: rustc_hir::Stmt<$tcx>,
- [] struct_field: rustc_hir::StructField<$tcx>,
+ [] field_def: rustc_hir::FieldDef<$tcx>,
[] trait_item_ref: rustc_hir::TraitItemRef,
[] ty: rustc_hir::Ty<$tcx>,
[] type_binding: rustc_hir::TypeBinding<$tcx>,
diff --git a/compiler/rustc_hir/src/definitions.rs b/compiler/rustc_hir/src/definitions.rs
index 6a1b9bd..3266dfa 100644
--- a/compiler/rustc_hir/src/definitions.rs
+++ b/compiler/rustc_hir/src/definitions.rs
@@ -5,13 +5,16 @@
//! expressions) that are mostly just leftovers.
pub use crate::def_id::DefPathHash;
-use crate::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
+use crate::def_id::{
+ CrateNum, DefId, DefIndex, LocalDefId, StableCrateId, CRATE_DEF_INDEX, LOCAL_CRATE,
+};
use crate::hir;
-use rustc_ast::crate_disambiguator::CrateDisambiguator;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::StableHasher;
+use rustc_data_structures::unhash::UnhashMap;
use rustc_index::vec::IndexVec;
+use rustc_span::crate_disambiguator::CrateDisambiguator;
use rustc_span::hygiene::ExpnId;
use rustc_span::symbol::{kw, sym, Symbol};
@@ -27,6 +30,7 @@
pub struct DefPathTable {
index_to_key: IndexVec<DefIndex, DefKey>,
def_path_hashes: IndexVec<DefIndex, DefPathHash>,
+ def_path_hash_to_index: UnhashMap<DefPathHash, DefIndex>,
}
impl DefPathTable {
@@ -39,6 +43,35 @@
};
self.def_path_hashes.push(def_path_hash);
debug_assert!(self.def_path_hashes.len() == self.index_to_key.len());
+
+ // Check for hash collisions of DefPathHashes. These should be
+ // exceedingly rare.
+ if let Some(existing) = self.def_path_hash_to_index.insert(def_path_hash, index) {
+ let def_path1 = DefPath::make(LOCAL_CRATE, existing, |idx| self.def_key(idx));
+ let def_path2 = DefPath::make(LOCAL_CRATE, index, |idx| self.def_key(idx));
+
+ // Continuing with colliding DefPathHashes can lead to correctness
+ // issues. We must abort compilation.
+ //
+ // The likelyhood of such a collision is very small, so actually
+ // running into one could be indicative of a poor hash function
+ // being used.
+ //
+ // See the documentation for DefPathHash for more information.
+ panic!(
+ "found DefPathHash collsion between {:?} and {:?}. \
+ Compilation cannot continue.",
+ def_path1, def_path2
+ );
+ }
+
+ // Assert that all DefPathHashes correctly contain the local crate's
+ // StableCrateId
+ #[cfg(debug_assertions)]
+ if let Some(root) = self.def_path_hashes.get(CRATE_DEF_INDEX) {
+ assert!(def_path_hash.stable_crate_id() == root.stable_crate_id());
+ }
+
index
}
@@ -108,13 +141,10 @@
}
impl DefKey {
- fn compute_stable_hash(&self, parent_hash: DefPathHash) -> DefPathHash {
+ pub(crate) fn compute_stable_hash(&self, parent: DefPathHash) -> DefPathHash {
let mut hasher = StableHasher::new();
- // We hash a `0u8` here to disambiguate between regular `DefPath` hashes,
- // and the special "root_parent" below.
- 0u8.hash(&mut hasher);
- parent_hash.hash(&mut hasher);
+ parent.hash(&mut hasher);
let DisambiguatedDefPathData { ref data, disambiguator } = self.disambiguated_data;
@@ -127,19 +157,13 @@
disambiguator.hash(&mut hasher);
- DefPathHash(hasher.finish())
- }
+ let local_hash: u64 = hasher.finish();
- fn root_parent_stable_hash(
- crate_name: &str,
- crate_disambiguator: CrateDisambiguator,
- ) -> DefPathHash {
- let mut hasher = StableHasher::new();
- // Disambiguate this from a regular `DefPath` hash; see `compute_stable_hash()` above.
- 1u8.hash(&mut hasher);
- crate_name.hash(&mut hasher);
- crate_disambiguator.hash(&mut hasher);
- DefPathHash(hasher.finish())
+ // Construct the new DefPathHash, making sure that the `crate_id`
+ // portion of the hash is properly copied from the parent. This way the
+ // `crate_id` part will be recursively propagated from the root to all
+ // DefPathHashes in this DefPathTable.
+ DefPathHash::new(parent.stable_crate_id(), local_hash)
}
}
@@ -295,6 +319,12 @@
self.table.def_path_hash(id.local_def_index)
}
+ #[inline]
+ pub fn def_path_hash_to_def_id(&self, def_path_hash: DefPathHash) -> LocalDefId {
+ let local_def_index = self.table.def_path_hash_to_index[&def_path_hash];
+ LocalDefId { local_def_index }
+ }
+
/// Returns the path from the crate root to `index`. The root
/// nodes are not included in the path (i.e., this will be an
/// empty vector for the crate root). For an inlined item, this
@@ -313,11 +343,6 @@
}
#[inline]
- pub fn opt_local_def_id_to_hir_id(&self, id: LocalDefId) -> Option<hir::HirId> {
- self.def_id_to_hir_id[id]
- }
-
- #[inline]
pub fn opt_hir_id_to_local_def_id(&self, hir_id: hir::HirId) -> Option<LocalDefId> {
self.hir_id_to_def_id.get(&hir_id).copied()
}
@@ -332,7 +357,8 @@
},
};
- let parent_hash = DefKey::root_parent_stable_hash(crate_name, crate_disambiguator);
+ let stable_crate_id = StableCrateId::new(crate_name, crate_disambiguator);
+ let parent_hash = DefPathHash::new(stable_crate_id, 0);
let def_path_hash = key.compute_stable_hash(parent_hash);
// Create the root definition.
diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs
index 67a1541..d03584d 100644
--- a/compiler/rustc_hir/src/hir.rs
+++ b/compiler/rustc_hir/src/hir.rs
@@ -1,11 +1,12 @@
-use crate::def::{DefKind, Namespace, Res};
+// ignore-tidy-filelength
+use crate::def::{CtorKind, DefKind, Namespace, Res};
use crate::def_id::DefId;
crate use crate::hir_id::HirId;
use crate::{itemlikevisit, LangItem};
use rustc_ast::util::parser::ExprPrecedence;
use rustc_ast::{self as ast, CrateSugar, LlvmAsmDialect};
-use rustc_ast::{AttrVec, Attribute, FloatTy, IntTy, Label, LitKind, StrStyle, UintTy};
+use rustc_ast::{Attribute, FloatTy, IntTy, Label, LitKind, StrStyle, TraitObjectSyntax, UintTy};
pub use rustc_ast::{BorrowKind, ImplPolarity, IsAuto};
pub use rustc_ast::{CaptureBy, Movability, Mutability};
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
@@ -468,7 +469,6 @@
pub struct GenericParam<'hir> {
pub hir_id: HirId,
pub name: ParamName,
- pub attrs: &'hir [Attribute],
pub bounds: GenericBounds<'hir>,
pub span: Span,
pub pure_wrt_drop: bool,
@@ -615,11 +615,11 @@
pub rhs_ty: &'hir Ty<'hir>,
}
-#[derive(Encodable, Debug, HashStable_Generic)]
+#[derive(Default, Encodable, Debug, HashStable_Generic)]
pub struct ModuleItems {
// Use BTreeSets here so items are in the same order as in the
// list of all items in Crate
- pub items: BTreeSet<HirId>,
+ pub items: BTreeSet<ItemId>,
pub trait_items: BTreeSet<TraitItemId>,
pub impl_items: BTreeSet<ImplItemId>,
pub foreign_items: BTreeSet<ForeignItemId>,
@@ -629,7 +629,6 @@
#[derive(Encodable, Debug, HashStable_Generic)]
pub struct CrateItem<'hir> {
pub module: Mod<'hir>,
- pub attrs: &'hir [Attribute],
pub span: Span,
}
@@ -652,13 +651,13 @@
// does, because it can affect the order in which errors are
// detected, which in turn can make UI tests yield
// slightly different results.
- pub items: BTreeMap<HirId, Item<'hir>>,
+ pub items: BTreeMap<ItemId, Item<'hir>>,
pub trait_items: BTreeMap<TraitItemId, TraitItem<'hir>>,
pub impl_items: BTreeMap<ImplItemId, ImplItem<'hir>>,
pub foreign_items: BTreeMap<ForeignItemId, ForeignItem<'hir>>,
pub bodies: BTreeMap<BodyId, Body<'hir>>,
- pub trait_impls: BTreeMap<DefId, Vec<HirId>>,
+ pub trait_impls: BTreeMap<DefId, Vec<LocalDefId>>,
/// A list of the body ids written out in the order in which they
/// appear in the crate. If you're going to process all the bodies
@@ -668,16 +667,19 @@
/// A list of modules written out in the order in which they
/// appear in the crate. This includes the main crate module.
- pub modules: BTreeMap<HirId, ModuleItems>,
+ pub modules: BTreeMap<LocalDefId, ModuleItems>,
/// A list of proc macro HirIds, written out in the order in which
/// they are declared in the static array generated by proc_macro_harness.
pub proc_macros: Vec<HirId>,
pub trait_map: BTreeMap<HirId, Vec<TraitCandidate>>,
+
+ /// Collected attributes from HIR nodes.
+ pub attrs: BTreeMap<HirId, &'hir [Attribute]>,
}
impl Crate<'hir> {
- pub fn item(&self, id: HirId) -> &Item<'hir> {
+ pub fn item(&self, id: ItemId) -> &Item<'hir> {
&self.items[&id]
}
@@ -761,16 +763,22 @@
/// A macro definition, in this crate or imported from another.
///
/// Not parsed directly, but created on macro import or `macro_rules!` expansion.
-#[derive(Debug, HashStable_Generic)]
+#[derive(Debug)]
pub struct MacroDef<'hir> {
pub ident: Ident,
pub vis: Visibility<'hir>,
- pub attrs: &'hir [Attribute],
- pub hir_id: HirId,
+ pub def_id: LocalDefId,
pub span: Span,
pub ast: ast::MacroDef,
}
+impl MacroDef<'_> {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ HirId::make_owner(self.def_id)
+ }
+}
+
/// A block of statements `{ .. }`, which may have a label (in this case the
/// `targeted_by_break` field will be `true`) and may be `unsafe` by means of
/// the `rules` being anything but `DefaultBlock`.
@@ -874,7 +882,7 @@
/// are treated the same as` x: x, y: ref y, z: ref mut z`,
/// except `is_shorthand` is true.
#[derive(Debug, HashStable_Generic)]
-pub struct FieldPat<'hir> {
+pub struct PatField<'hir> {
#[stable_hasher(ignore)]
pub hir_id: HirId,
/// The identifier for the field.
@@ -938,7 +946,7 @@
/// A struct or struct variant pattern (e.g., `Variant {x, y, ..}`).
/// The `bool` is `true` in the presence of a `..`.
- Struct(QPath<'hir>, &'hir [FieldPat<'hir>], bool),
+ Struct(QPath<'hir>, &'hir [PatField<'hir>], bool),
/// A tuple struct/variant pattern `Variant(x, y, .., z)`.
/// If the `..` pattern fragment is present, then `Option<usize>` denotes its position.
@@ -1112,25 +1120,25 @@
#[derive(Copy, Clone, PartialEq, Encodable, Debug, HashStable_Generic)]
pub enum UnOp {
/// The `*` operator (deferencing).
- UnDeref,
+ Deref,
/// The `!` operator (logical negation).
- UnNot,
+ Not,
/// The `-` operator (negation).
- UnNeg,
+ Neg,
}
impl UnOp {
pub fn as_str(self) -> &'static str {
match self {
- Self::UnDeref => "*",
- Self::UnNot => "!",
- Self::UnNeg => "-",
+ Self::Deref => "*",
+ Self::Not => "!",
+ Self::Neg => "-",
}
}
/// Returns `true` if the unary operator takes its argument by value.
pub fn is_by_value(self) -> bool {
- matches!(self, Self::UnNeg | Self::UnNot)
+ matches!(self, Self::Neg | Self::Not)
}
}
@@ -1158,16 +1166,6 @@
Semi(&'hir Expr<'hir>),
}
-impl<'hir> StmtKind<'hir> {
- pub fn attrs(&self, get_item: impl FnOnce(ItemId) -> &'hir Item<'hir>) -> &'hir [Attribute] {
- match *self {
- StmtKind::Local(ref l) => &l.attrs,
- StmtKind::Item(ref item_id) => &get_item(*item_id).attrs,
- StmtKind::Expr(ref e) | StmtKind::Semi(ref e) => &e.attrs,
- }
- }
-}
-
/// Represents a `let` statement (i.e., `let <pat>:<ty> = <expr>;`).
#[derive(Debug, HashStable_Generic)]
pub struct Local<'hir> {
@@ -1178,7 +1176,6 @@
pub init: Option<&'hir Expr<'hir>>,
pub hir_id: HirId,
pub span: Span,
- pub attrs: AttrVec,
/// Can be `ForLoopDesugar` if the `let` statement is part of a `for` loop
/// desugaring. Otherwise will be `Normal`.
pub source: LocalSource,
@@ -1191,7 +1188,6 @@
#[stable_hasher(ignore)]
pub hir_id: HirId,
pub span: Span,
- pub attrs: &'hir [Attribute],
/// If this pattern and the optional guard matches, then `body` is evaluated.
pub pat: &'hir Pat<'hir>,
/// Optional guard clause.
@@ -1207,7 +1203,7 @@
}
#[derive(Debug, HashStable_Generic)]
-pub struct Field<'hir> {
+pub struct ExprField<'hir> {
#[stable_hasher(ignore)]
pub hir_id: HirId,
pub ident: Ident,
@@ -1274,7 +1270,18 @@
}
/// The type of source expression that caused this generator to be created.
-#[derive(Clone, PartialEq, Eq, HashStable_Generic, Encodable, Decodable, Debug, Copy)]
+#[derive(
+ Clone,
+ PartialEq,
+ PartialOrd,
+ Eq,
+ Hash,
+ HashStable_Generic,
+ Encodable,
+ Decodable,
+ Debug,
+ Copy
+)]
pub enum GeneratorKind {
/// An explicit `async` block or the body of an async function.
Async(AsyncGeneratorKind),
@@ -1292,12 +1299,32 @@
}
}
+impl GeneratorKind {
+ pub fn descr(&self) -> &'static str {
+ match self {
+ GeneratorKind::Async(ask) => ask.descr(),
+ GeneratorKind::Gen => "generator",
+ }
+ }
+}
+
/// In the case of a generator created as part of an async construct,
/// which kind of async construct caused it to be created?
///
/// This helps error messages but is also used to drive coercions in
/// type-checking (see #60424).
-#[derive(Clone, PartialEq, Eq, HashStable_Generic, Encodable, Decodable, Debug, Copy)]
+#[derive(
+ Clone,
+ PartialEq,
+ PartialOrd,
+ Eq,
+ Hash,
+ HashStable_Generic,
+ Encodable,
+ Decodable,
+ Debug,
+ Copy
+)]
pub enum AsyncGeneratorKind {
/// An explicit `async` block written by the user.
Block,
@@ -1319,6 +1346,16 @@
}
}
+impl AsyncGeneratorKind {
+ pub fn descr(&self) -> &'static str {
+ match self {
+ AsyncGeneratorKind::Block => "`async` block",
+ AsyncGeneratorKind::Closure => "`async` closure body",
+ AsyncGeneratorKind::Fn => "`async fn` body",
+ }
+ }
+}
+
#[derive(Copy, Clone, Debug)]
pub enum BodyOwnerKind {
/// Functions and methods.
@@ -1409,14 +1446,9 @@
pub struct Expr<'hir> {
pub hir_id: HirId,
pub kind: ExprKind<'hir>,
- pub attrs: AttrVec,
pub span: Span,
}
-// `Expr` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
-rustc_data_structures::static_assert_size!(Expr<'static>, 72);
-
impl Expr<'_> {
pub fn precedence(&self) -> ExprPrecedence {
match self.kind {
@@ -1477,7 +1509,7 @@
// https://github.com/rust-lang/rfcs/blob/master/text/0803-type-ascription.md#type-ascription-and-temporaries
ExprKind::Type(ref e, _) => e.is_place_expr(allow_projections_from),
- ExprKind::Unary(UnOp::UnDeref, _) => true,
+ ExprKind::Unary(UnOp::Deref, _) => true,
ExprKind::Field(ref base, _) | ExprKind::Index(ref base, _) => {
allow_projections_from(base) || base.is_place_expr(allow_projections_from)
@@ -1532,6 +1564,71 @@
}
expr
}
+
+ pub fn peel_blocks(&self) -> &Self {
+ let mut expr = self;
+ while let ExprKind::Block(Block { expr: Some(inner), .. }, _) = &expr.kind {
+ expr = inner;
+ }
+ expr
+ }
+
+ pub fn can_have_side_effects(&self) -> bool {
+ match self.peel_drop_temps().kind {
+ ExprKind::Path(_) | ExprKind::Lit(_) => false,
+ ExprKind::Type(base, _)
+ | ExprKind::Unary(_, base)
+ | ExprKind::Field(base, _)
+ | ExprKind::Index(base, _)
+ | ExprKind::AddrOf(.., base)
+ | ExprKind::Cast(base, _) => {
+ // This isn't exactly true for `Index` and all `Unnary`, but we are using this
+ // method exclusively for diagnostics and there's a *cultural* pressure against
+ // them being used only for its side-effects.
+ base.can_have_side_effects()
+ }
+ ExprKind::Struct(_, fields, init) => fields
+ .iter()
+ .map(|field| field.expr)
+ .chain(init.into_iter())
+ .all(|e| e.can_have_side_effects()),
+
+ ExprKind::Array(args)
+ | ExprKind::Tup(args)
+ | ExprKind::Call(
+ Expr {
+ kind:
+ ExprKind::Path(QPath::Resolved(
+ None,
+ Path { res: Res::Def(DefKind::Ctor(_, CtorKind::Fn), _), .. },
+ )),
+ ..
+ },
+ args,
+ ) => args.iter().all(|arg| arg.can_have_side_effects()),
+ ExprKind::If(..)
+ | ExprKind::Match(..)
+ | ExprKind::MethodCall(..)
+ | ExprKind::Call(..)
+ | ExprKind::Closure(..)
+ | ExprKind::Block(..)
+ | ExprKind::Repeat(..)
+ | ExprKind::Break(..)
+ | ExprKind::Continue(..)
+ | ExprKind::Ret(..)
+ | ExprKind::Loop(..)
+ | ExprKind::Assign(..)
+ | ExprKind::InlineAsm(..)
+ | ExprKind::LlvmInlineAsm(..)
+ | ExprKind::AssignOp(..)
+ | ExprKind::ConstBlock(..)
+ | ExprKind::Box(..)
+ | ExprKind::Binary(..)
+ | ExprKind::Yield(..)
+ | ExprKind::DropTemps(..)
+ | ExprKind::Err => true,
+ }
+ }
}
/// Checks if the specified expression is a built-in range literal.
@@ -1665,7 +1762,7 @@
///
/// E.g., `Foo {x: 1, y: 2}`, or `Foo {x: 1, .. base}`,
/// where `base` is the `Option<Expr>`.
- Struct(&'hir QPath<'hir>, &'hir [Field<'hir>], Option<&'hir Expr<'hir>>),
+ Struct(&'hir QPath<'hir>, &'hir [ExprField<'hir>], Option<&'hir Expr<'hir>>),
/// An array literal constructed from one repeated element.
///
@@ -1712,7 +1809,7 @@
pub fn span(&self) -> Span {
match *self {
QPath::Resolved(_, path) => path.span,
- QPath::TypeRelative(_, ps) => ps.ident.span,
+ QPath::TypeRelative(qself, ps) => qself.span.to(ps.ident.span),
QPath::LangItem(_, span) => span,
}
}
@@ -1911,7 +2008,15 @@
// so it can fetched later.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Encodable, Debug)]
pub struct TraitItemId {
- pub hir_id: HirId,
+ pub def_id: LocalDefId,
+}
+
+impl TraitItemId {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
}
/// Represents an item declaration within a trait declaration,
@@ -1921,13 +2026,24 @@
#[derive(Debug)]
pub struct TraitItem<'hir> {
pub ident: Ident,
- pub hir_id: HirId,
- pub attrs: &'hir [Attribute],
+ pub def_id: LocalDefId,
pub generics: Generics<'hir>,
pub kind: TraitItemKind<'hir>,
pub span: Span,
}
+impl TraitItem<'_> {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
+
+ pub fn trait_item_id(&self) -> TraitItemId {
+ TraitItemId { def_id: self.def_id }
+ }
+}
+
/// Represents a trait method's body (or just argument names).
#[derive(Encodable, Debug, HashStable_Generic)]
pub enum TraitFn<'hir> {
@@ -1955,22 +2071,41 @@
// so it can fetched later.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Encodable, Debug)]
pub struct ImplItemId {
- pub hir_id: HirId,
+ pub def_id: LocalDefId,
+}
+
+impl ImplItemId {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
}
/// Represents anything within an `impl` block.
#[derive(Debug)]
pub struct ImplItem<'hir> {
pub ident: Ident,
- pub hir_id: HirId,
+ pub def_id: LocalDefId,
pub vis: Visibility<'hir>,
pub defaultness: Defaultness,
- pub attrs: &'hir [Attribute],
pub generics: Generics<'hir>,
pub kind: ImplItemKind<'hir>,
pub span: Span,
}
+impl ImplItem<'_> {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
+
+ pub fn impl_item_id(&self) -> ImplItemId {
+ ImplItemId { def_id: self.def_id }
+ }
+}
+
/// Represents various kinds of content within an `impl`.
#[derive(Debug, HashStable_Generic)]
pub enum ImplItemKind<'hir> {
@@ -2058,6 +2193,28 @@
}
impl PrimTy {
+ /// All of the primitive types
+ pub const ALL: [Self; 17] = [
+ // any changes here should also be reflected in `PrimTy::from_name`
+ Self::Int(IntTy::I8),
+ Self::Int(IntTy::I16),
+ Self::Int(IntTy::I32),
+ Self::Int(IntTy::I64),
+ Self::Int(IntTy::I128),
+ Self::Int(IntTy::Isize),
+ Self::Uint(UintTy::U8),
+ Self::Uint(UintTy::U16),
+ Self::Uint(UintTy::U32),
+ Self::Uint(UintTy::U64),
+ Self::Uint(UintTy::U128),
+ Self::Uint(UintTy::Usize),
+ Self::Float(FloatTy::F32),
+ Self::Float(FloatTy::F64),
+ Self::Bool,
+ Self::Char,
+ Self::Str,
+ ];
+
pub fn name_str(self) -> &'static str {
match self {
PrimTy::Int(i) => i.name_str(),
@@ -2079,6 +2236,33 @@
PrimTy::Char => sym::char,
}
}
+
+ /// Returns the matching `PrimTy` for a `Symbol` such as "str" or "i32".
+ /// Returns `None` if no matching type is found.
+ pub fn from_name(name: Symbol) -> Option<Self> {
+ let ty = match name {
+ // any changes here should also be reflected in `PrimTy::ALL`
+ sym::i8 => Self::Int(IntTy::I8),
+ sym::i16 => Self::Int(IntTy::I16),
+ sym::i32 => Self::Int(IntTy::I32),
+ sym::i64 => Self::Int(IntTy::I64),
+ sym::i128 => Self::Int(IntTy::I128),
+ sym::isize => Self::Int(IntTy::Isize),
+ sym::u8 => Self::Uint(UintTy::U8),
+ sym::u16 => Self::Uint(UintTy::U16),
+ sym::u32 => Self::Uint(UintTy::U32),
+ sym::u64 => Self::Uint(UintTy::U64),
+ sym::u128 => Self::Uint(UintTy::U128),
+ sym::usize => Self::Uint(UintTy::Usize),
+ sym::f32 => Self::Float(FloatTy::F32),
+ sym::f64 => Self::Float(FloatTy::F64),
+ sym::bool => Self::Bool,
+ sym::char => Self::Char,
+ sym::str => Self::Str,
+ _ => return None,
+ };
+ Some(ty)
+ }
}
#[derive(Debug, HashStable_Generic)]
@@ -2107,7 +2291,9 @@
AsyncFn,
/// `let _: impl Trait = ...`
Binding,
- /// Impl trait in type aliases, consts, statics, bounds.
+ /// type aliases: `type Foo = impl Trait;`
+ TyAlias,
+ /// Impl trait consts, statics, bounds.
Misc,
}
@@ -2141,7 +2327,7 @@
OpaqueDef(ItemId, &'hir [GenericArg<'hir>]),
/// A trait object type `Bound1 + Bound2 + Bound3`
/// where `Bound` is a trait or a lifetime.
- TraitObject(&'hir [PolyTraitRef<'hir>], Lifetime),
+ TraitObject(&'hir [PolyTraitRef<'hir>], Lifetime, TraitObjectSyntax),
/// Unused for now.
Typeof(AnonConst),
/// `TyKind::Infer` means the type should be inferred instead of it having been
@@ -2201,7 +2387,7 @@
pub line_spans: &'hir [Span],
}
-#[derive(Copy, Clone, Encodable, Decodable, Debug, HashStable_Generic, PartialEq)]
+#[derive(Copy, Clone, Encodable, Decodable, Debug, Hash, HashStable_Generic, PartialEq)]
pub struct LlvmInlineAsmOutput {
pub constraint: Symbol,
pub is_rw: bool,
@@ -2212,7 +2398,7 @@
// NOTE(eddyb) This is used within MIR as well, so unlike the rest of the HIR,
// it needs to be `Clone` and `Decodable` and use plain `Vec<T>` instead of
// arena-allocated slice.
-#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic, PartialEq)]
+#[derive(Clone, Encodable, Decodable, Debug, Hash, HashStable_Generic, PartialEq)]
pub struct LlvmInlineAsmInner {
pub asm: Symbol,
pub asm_str_style: StrStyle,
@@ -2234,7 +2420,6 @@
/// Represents a parameter in a function header.
#[derive(Debug, HashStable_Generic)]
pub struct Param<'hir> {
- pub attrs: &'hir [Attribute],
pub hir_id: HirId,
pub pat: &'hir Pat<'hir>,
pub ty_span: Span,
@@ -2352,8 +2537,6 @@
/// Name of the variant.
#[stable_hasher(project(name))]
pub ident: Ident,
- /// Attributes of the variant.
- pub attrs: &'hir [Attribute],
/// Id of the variant (not the constructor, see `VariantData::ctor_hir_id()`).
pub id: HirId,
/// Fields and constructor id of the variant.
@@ -2440,17 +2623,16 @@
}
#[derive(Debug, HashStable_Generic)]
-pub struct StructField<'hir> {
+pub struct FieldDef<'hir> {
pub span: Span,
#[stable_hasher(project(name))]
pub ident: Ident,
pub vis: Visibility<'hir>,
pub hir_id: HirId,
pub ty: &'hir Ty<'hir>,
- pub attrs: &'hir [Attribute],
}
-impl StructField<'_> {
+impl FieldDef<'_> {
// Still necessary in couple of places
pub fn is_positional(&self) -> bool {
let first = self.ident.as_str().as_bytes()[0];
@@ -2464,11 +2646,11 @@
/// A struct variant.
///
/// E.g., `Bar { .. }` as in `enum Foo { Bar { .. } }`.
- Struct(&'hir [StructField<'hir>], /* recovered */ bool),
+ Struct(&'hir [FieldDef<'hir>], /* recovered */ bool),
/// A tuple variant.
///
/// E.g., `Bar(..)` as in `enum Foo { Bar(..) }`.
- Tuple(&'hir [StructField<'hir>], HirId),
+ Tuple(&'hir [FieldDef<'hir>], HirId),
/// A unit variant.
///
/// E.g., `Bar = ..` as in `enum Foo { Bar = .. }`.
@@ -2477,7 +2659,7 @@
impl VariantData<'hir> {
/// Return the fields of this variant.
- pub fn fields(&self) -> &'hir [StructField<'hir>] {
+ pub fn fields(&self) -> &'hir [FieldDef<'hir>] {
match *self {
VariantData::Struct(ref fields, ..) | VariantData::Tuple(ref fields, ..) => fields,
_ => &[],
@@ -2496,9 +2678,17 @@
// The bodies for items are stored "out of line", in a separate
// hashmap in the `Crate`. Here we just record the hir-id of the item
// so it can fetched later.
-#[derive(Copy, Clone, Encodable, Debug)]
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Encodable, Debug, Hash)]
pub struct ItemId {
- pub id: HirId,
+ pub def_id: LocalDefId,
+}
+
+impl ItemId {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
}
/// An item
@@ -2507,13 +2697,24 @@
#[derive(Debug)]
pub struct Item<'hir> {
pub ident: Ident,
- pub hir_id: HirId,
- pub attrs: &'hir [Attribute],
+ pub def_id: LocalDefId,
pub kind: ItemKind<'hir>,
pub vis: Visibility<'hir>,
pub span: Span,
}
+impl Item<'_> {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
+
+ pub fn item_id(&self) -> ItemId {
+ ItemId { def_id: self.def_id }
+ }
+}
+
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
#[derive(Encodable, Decodable, HashStable_Generic)]
pub enum Unsafety {
@@ -2684,7 +2885,15 @@
// so it can fetched later.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Encodable, Debug)]
pub struct ForeignItemId {
- pub hir_id: HirId,
+ pub def_id: LocalDefId,
+}
+
+impl ForeignItemId {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
}
/// A reference from a foreign block to one of its items. This
@@ -2702,17 +2911,27 @@
pub vis: Visibility<'hir>,
}
-#[derive(Debug, HashStable_Generic)]
+#[derive(Debug)]
pub struct ForeignItem<'hir> {
- #[stable_hasher(project(name))]
pub ident: Ident,
- pub attrs: &'hir [Attribute],
pub kind: ForeignItemKind<'hir>,
- pub hir_id: HirId,
+ pub def_id: LocalDefId,
pub span: Span,
pub vis: Visibility<'hir>,
}
+impl ForeignItem<'_> {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
+
+ pub fn foreign_item_id(&self) -> ForeignItemId {
+ ForeignItemId { def_id: self.def_id }
+ }
+}
+
/// An item within an `extern` block.
#[derive(Debug, HashStable_Generic)]
pub enum ForeignItemKind<'hir> {
@@ -2748,7 +2967,7 @@
TraitItem(&'hir TraitItem<'hir>),
ImplItem(&'hir ImplItem<'hir>),
Variant(&'hir Variant<'hir>),
- Field(&'hir StructField<'hir>),
+ Field(&'hir FieldDef<'hir>),
AnonConst(&'hir AnonConst),
Expr(&'hir Expr<'hir>),
Stmt(&'hir Stmt<'hir>),
@@ -2779,7 +2998,7 @@
Node::TraitItem(TraitItem { ident, .. })
| Node::ImplItem(ImplItem { ident, .. })
| Node::ForeignItem(ForeignItem { ident, .. })
- | Node::Field(StructField { ident, .. })
+ | Node::Field(FieldDef { ident, .. })
| Node::Variant(Variant { ident, .. })
| Node::MacroDef(MacroDef { ident, .. })
| Node::Item(Item { ident, .. }) => Some(*ident),
@@ -2822,11 +3041,12 @@
pub fn hir_id(&self) -> Option<HirId> {
match self {
- Node::Item(Item { hir_id, .. })
- | Node::ForeignItem(ForeignItem { hir_id, .. })
- | Node::TraitItem(TraitItem { hir_id, .. })
- | Node::ImplItem(ImplItem { hir_id, .. })
- | Node::Field(StructField { hir_id, .. })
+ Node::Item(Item { def_id, .. })
+ | Node::TraitItem(TraitItem { def_id, .. })
+ | Node::ImplItem(ImplItem { def_id, .. })
+ | Node::ForeignItem(ForeignItem { def_id, .. })
+ | Node::MacroDef(MacroDef { def_id, .. }) => Some(HirId::make_owner(*def_id)),
+ Node::Field(FieldDef { hir_id, .. })
| Node::AnonConst(AnonConst { hir_id, .. })
| Node::Expr(Expr { hir_id, .. })
| Node::Stmt(Stmt { hir_id, .. })
@@ -2836,7 +3056,6 @@
| Node::Arm(Arm { hir_id, .. })
| Node::Block(Block { hir_id, .. })
| Node::Local(Local { hir_id, .. })
- | Node::MacroDef(MacroDef { hir_id, .. })
| Node::Lifetime(Lifetime { hir_id, .. })
| Node::Param(Param { hir_id, .. })
| Node::GenericParam(GenericParam { hir_id, .. }) => Some(*hir_id),
@@ -2848,3 +3067,18 @@
}
}
}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ rustc_data_structures::static_assert_size!(super::Block<'static>, 48);
+ rustc_data_structures::static_assert_size!(super::Expr<'static>, 64);
+ rustc_data_structures::static_assert_size!(super::Pat<'static>, 88);
+ rustc_data_structures::static_assert_size!(super::QPath<'static>, 24);
+ rustc_data_structures::static_assert_size!(super::Ty<'static>, 72);
+
+ rustc_data_structures::static_assert_size!(super::Item<'static>, 184);
+ rustc_data_structures::static_assert_size!(super::TraitItem<'static>, 128);
+ rustc_data_structures::static_assert_size!(super::ImplItem<'static>, 152);
+ rustc_data_structures::static_assert_size!(super::ForeignItem<'static>, 136);
+}
diff --git a/compiler/rustc_hir/src/hir_id.rs b/compiler/rustc_hir/src/hir_id.rs
index cc8ac4c..e0b3d90 100644
--- a/compiler/rustc_hir/src/hir_id.rs
+++ b/compiler/rustc_hir/src/hir_id.rs
@@ -1,4 +1,5 @@
use crate::def_id::{LocalDefId, CRATE_DEF_INDEX};
+use rustc_index::vec::IndexVec;
use std::fmt;
/// Uniquely identifies a node in the HIR of the current crate. It is
@@ -18,6 +19,22 @@
pub local_id: ItemLocalId,
}
+impl HirId {
+ pub fn expect_owner(self) -> LocalDefId {
+ assert_eq!(self.local_id.index(), 0);
+ self.owner
+ }
+
+ pub fn as_owner(self) -> Option<LocalDefId> {
+ if self.local_id.index() == 0 { Some(self.owner) } else { None }
+ }
+
+ #[inline]
+ pub fn make_owner(owner: LocalDefId) -> Self {
+ Self { owner, local_id: ItemLocalId::from_u32(0) }
+ }
+}
+
impl fmt::Display for HirId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
@@ -45,3 +62,69 @@
owner: LocalDefId { local_def_index: CRATE_DEF_INDEX },
local_id: ItemLocalId::from_u32(0),
};
+
+#[derive(Clone, Default, Debug, Encodable, Decodable)]
+pub struct HirIdVec<T> {
+ map: IndexVec<LocalDefId, IndexVec<ItemLocalId, T>>,
+}
+
+impl<T> HirIdVec<T> {
+ pub fn push_owner(&mut self, id: LocalDefId) {
+ self.map.ensure_contains_elem(id, IndexVec::new);
+ }
+
+ pub fn push(&mut self, id: HirId, value: T) {
+ if id.local_id == ItemLocalId::from_u32(0) {
+ self.push_owner(id.owner);
+ }
+ let submap = &mut self.map[id.owner];
+ let _ret_id = submap.push(value);
+ debug_assert_eq!(_ret_id, id.local_id);
+ }
+
+ pub fn push_sparse(&mut self, id: HirId, value: T)
+ where
+ T: Default,
+ {
+ self.map.ensure_contains_elem(id.owner, IndexVec::new);
+ let submap = &mut self.map[id.owner];
+ let i = id.local_id.index();
+ let len = submap.len();
+ if i >= len {
+ submap.extend(std::iter::repeat_with(T::default).take(i - len + 1));
+ }
+ submap[id.local_id] = value;
+ }
+
+ pub fn get(&self, id: HirId) -> Option<&T> {
+ self.map.get(id.owner)?.get(id.local_id)
+ }
+
+ pub fn get_owner(&self, id: LocalDefId) -> &IndexVec<ItemLocalId, T> {
+ &self.map[id]
+ }
+
+ pub fn iter(&self) -> impl Iterator<Item = &T> {
+ self.map.iter().flat_map(|la| la.iter())
+ }
+
+ pub fn iter_enumerated(&self) -> impl Iterator<Item = (HirId, &T)> {
+ self.map.iter_enumerated().flat_map(|(owner, la)| {
+ la.iter_enumerated().map(move |(local_id, attr)| (HirId { owner, local_id }, attr))
+ })
+ }
+}
+
+impl<T> std::ops::Index<HirId> for HirIdVec<T> {
+ type Output = T;
+
+ fn index(&self, id: HirId) -> &T {
+ &self.map[id.owner][id.local_id]
+ }
+}
+
+impl<T> std::ops::IndexMut<HirId> for HirIdVec<T> {
+ fn index_mut(&mut self, id: HirId) -> &mut T {
+ &mut self.map[id.owner][id.local_id]
+ }
+}
diff --git a/compiler/rustc_hir/src/intravisit.rs b/compiler/rustc_hir/src/intravisit.rs
index f8b3f0d..701e4a6 100644
--- a/compiler/rustc_hir/src/intravisit.rs
+++ b/compiler/rustc_hir/src/intravisit.rs
@@ -101,29 +101,21 @@
#[derive(Copy, Clone)]
pub enum FnKind<'a> {
/// `#[xxx] pub async/const/extern "Abi" fn foo()`
- ItemFn(Ident, &'a Generics<'a>, FnHeader, &'a Visibility<'a>, &'a [Attribute]),
+ ItemFn(Ident, &'a Generics<'a>, FnHeader, &'a Visibility<'a>),
/// `fn foo(&self)`
- Method(Ident, &'a FnSig<'a>, Option<&'a Visibility<'a>>, &'a [Attribute]),
+ Method(Ident, &'a FnSig<'a>, Option<&'a Visibility<'a>>),
/// `|x, y| {}`
- Closure(&'a [Attribute]),
+ Closure,
}
impl<'a> FnKind<'a> {
- pub fn attrs(&self) -> &'a [Attribute] {
- match *self {
- FnKind::ItemFn(.., attrs) => attrs,
- FnKind::Method(.., attrs) => attrs,
- FnKind::Closure(attrs) => attrs,
- }
- }
-
pub fn header(&self) -> Option<&FnHeader> {
match *self {
- FnKind::ItemFn(_, _, ref header, _, _) => Some(header),
- FnKind::Method(_, ref sig, _, _) => Some(&sig.header),
- FnKind::Closure(_) => None,
+ FnKind::ItemFn(_, _, ref header, _) => Some(header),
+ FnKind::Method(_, ref sig, _) => Some(&sig.header),
+ FnKind::Closure => None,
}
}
}
@@ -133,7 +125,7 @@
/// Retrieves the `Node` corresponding to `id`, returning `None` if cannot be found.
fn find(&self, hir_id: HirId) -> Option<Node<'hir>>;
fn body(&self, id: BodyId) -> &'hir Body<'hir>;
- fn item(&self, id: HirId) -> &'hir Item<'hir>;
+ fn item(&self, id: ItemId) -> &'hir Item<'hir>;
fn trait_item(&self, id: TraitItemId) -> &'hir TraitItem<'hir>;
fn impl_item(&self, id: ImplItemId) -> &'hir ImplItem<'hir>;
fn foreign_item(&self, id: ForeignItemId) -> &'hir ForeignItem<'hir>;
@@ -150,7 +142,7 @@
fn body(&self, id: BodyId) -> &'hir Body<'hir> {
self.0.body(id)
}
- fn item(&self, id: HirId) -> &'hir Item<'hir> {
+ fn item(&self, id: ItemId) -> &'hir Item<'hir> {
self.0.item(id)
}
fn trait_item(&self, id: TraitItemId) -> &'hir TraitItem<'hir> {
@@ -269,7 +261,7 @@
/// reason to override this method is if you want a nested pattern
/// but cannot supply a `Map`; see `nested_visit_map` for advice.
fn visit_nested_item(&mut self, id: ItemId) {
- let opt_item = self.nested_visit_map().inter().map(|map| map.item(id.id));
+ let opt_item = self.nested_visit_map().inter().map(|map| map.item(id));
walk_list!(self, visit_item, opt_item);
}
@@ -423,8 +415,8 @@
) {
walk_struct_def(self, s)
}
- fn visit_struct_field(&mut self, s: &'v StructField<'v>) {
- walk_struct_field(self, s)
+ fn visit_field_def(&mut self, s: &'v FieldDef<'v>) {
+ walk_field_def(self, s)
}
fn visit_enum_def(
&mut self,
@@ -466,7 +458,7 @@
fn visit_assoc_type_binding(&mut self, type_binding: &'v TypeBinding<'v>) {
walk_assoc_type_binding(self, type_binding)
}
- fn visit_attribute(&mut self, _attr: &'v Attribute) {}
+ fn visit_attribute(&mut self, _id: HirId, _attr: &'v Attribute) {}
fn visit_macro_def(&mut self, macro_def: &'v MacroDef<'v>) {
walk_macro_def(self, macro_def)
}
@@ -484,14 +476,17 @@
/// Walks the contents of a crate. See also `Crate::visit_all_items`.
pub fn walk_crate<'v, V: Visitor<'v>>(visitor: &mut V, krate: &'v Crate<'v>) {
visitor.visit_mod(&krate.item.module, krate.item.span, CRATE_HIR_ID);
- walk_list!(visitor, visit_attribute, krate.item.attrs);
walk_list!(visitor, visit_macro_def, krate.exported_macros);
+ for (&id, attrs) in krate.attrs.iter() {
+ for a in *attrs {
+ visitor.visit_attribute(id, a)
+ }
+ }
}
pub fn walk_macro_def<'v, V: Visitor<'v>>(visitor: &mut V, macro_def: &'v MacroDef<'v>) {
- visitor.visit_id(macro_def.hir_id);
+ visitor.visit_id(macro_def.hir_id());
visitor.visit_ident(macro_def.ident);
- walk_list!(visitor, visit_attribute, macro_def.attrs);
}
pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod<'v>, mod_hir_id: HirId) {
@@ -510,7 +505,6 @@
// Intentionally visiting the expr first - the initialization expr
// dominates the local's definition.
walk_list!(visitor, visit_expr, &local.init);
- walk_list!(visitor, visit_attribute, local.attrs.iter());
visitor.visit_id(local.hir_id);
visitor.visit_pat(&local.pat);
walk_list!(visitor, visit_ty, &local.ty);
@@ -557,7 +551,6 @@
pub fn walk_param<'v, V: Visitor<'v>>(visitor: &mut V, param: &'v Param<'v>) {
visitor.visit_id(param.hir_id);
visitor.visit_pat(¶m.pat);
- walk_list!(visitor, visit_attribute, param.attrs);
}
pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) {
@@ -565,51 +558,51 @@
visitor.visit_ident(item.ident);
match item.kind {
ItemKind::ExternCrate(orig_name) => {
- visitor.visit_id(item.hir_id);
+ visitor.visit_id(item.hir_id());
if let Some(orig_name) = orig_name {
visitor.visit_name(item.span, orig_name);
}
}
ItemKind::Use(ref path, _) => {
- visitor.visit_use(path, item.hir_id);
+ visitor.visit_use(path, item.hir_id());
}
ItemKind::Static(ref typ, _, body) | ItemKind::Const(ref typ, body) => {
- visitor.visit_id(item.hir_id);
+ visitor.visit_id(item.hir_id());
visitor.visit_ty(typ);
visitor.visit_nested_body(body);
}
ItemKind::Fn(ref sig, ref generics, body_id) => visitor.visit_fn(
- FnKind::ItemFn(item.ident, generics, sig.header, &item.vis, &item.attrs),
+ FnKind::ItemFn(item.ident, generics, sig.header, &item.vis),
&sig.decl,
body_id,
item.span,
- item.hir_id,
+ item.hir_id(),
),
ItemKind::Mod(ref module) => {
// `visit_mod()` takes care of visiting the `Item`'s `HirId`.
- visitor.visit_mod(module, item.span, item.hir_id)
+ visitor.visit_mod(module, item.span, item.hir_id())
}
ItemKind::ForeignMod { abi: _, items } => {
- visitor.visit_id(item.hir_id);
+ visitor.visit_id(item.hir_id());
walk_list!(visitor, visit_foreign_item_ref, items);
}
ItemKind::GlobalAsm(_) => {
- visitor.visit_id(item.hir_id);
+ visitor.visit_id(item.hir_id());
}
ItemKind::TyAlias(ref ty, ref generics) => {
- visitor.visit_id(item.hir_id);
+ visitor.visit_id(item.hir_id());
visitor.visit_ty(ty);
visitor.visit_generics(generics)
}
ItemKind::OpaqueTy(OpaqueTy { ref generics, bounds, .. }) => {
- visitor.visit_id(item.hir_id);
+ visitor.visit_id(item.hir_id());
walk_generics(visitor, generics);
walk_list!(visitor, visit_param_bound, bounds);
}
ItemKind::Enum(ref enum_definition, ref generics) => {
visitor.visit_generics(generics);
// `visit_enum_def()` takes care of visiting the `Item`'s `HirId`.
- visitor.visit_enum_def(enum_definition, generics, item.hir_id, item.span)
+ visitor.visit_enum_def(enum_definition, generics, item.hir_id(), item.span)
}
ItemKind::Impl(Impl {
unsafety: _,
@@ -622,7 +615,7 @@
ref self_ty,
items,
}) => {
- visitor.visit_id(item.hir_id);
+ visitor.visit_id(item.hir_id());
visitor.visit_generics(generics);
walk_list!(visitor, visit_trait_ref, of_trait);
visitor.visit_ty(self_ty);
@@ -631,28 +624,27 @@
ItemKind::Struct(ref struct_definition, ref generics)
| ItemKind::Union(ref struct_definition, ref generics) => {
visitor.visit_generics(generics);
- visitor.visit_id(item.hir_id);
+ visitor.visit_id(item.hir_id());
visitor.visit_variant_data(
struct_definition,
item.ident.name,
generics,
- item.hir_id,
+ item.hir_id(),
item.span,
);
}
ItemKind::Trait(.., ref generics, bounds, trait_item_refs) => {
- visitor.visit_id(item.hir_id);
+ visitor.visit_id(item.hir_id());
visitor.visit_generics(generics);
walk_list!(visitor, visit_param_bound, bounds);
walk_list!(visitor, visit_trait_item_ref, trait_item_refs);
}
ItemKind::TraitAlias(ref generics, bounds) => {
- visitor.visit_id(item.hir_id);
+ visitor.visit_id(item.hir_id());
visitor.visit_generics(generics);
walk_list!(visitor, visit_param_bound, bounds);
}
}
- walk_list!(visitor, visit_attribute, item.attrs);
}
pub fn walk_use<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path<'v>, hir_id: HirId) {
@@ -686,7 +678,6 @@
variant.span,
);
walk_list!(visitor, visit_anon_const, &variant.disr_expr);
- walk_list!(visitor, visit_attribute, variant.attrs);
}
pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty<'v>) {
@@ -718,7 +709,7 @@
visitor.visit_ty(ty);
visitor.visit_anon_const(length)
}
- TyKind::TraitObject(bounds, ref lifetime) => {
+ TyKind::TraitObject(bounds, ref lifetime, _syntax) => {
for bound in bounds {
visitor.visit_poly_trait_ref(bound, TraitBoundModifier::None);
}
@@ -836,7 +827,7 @@
}
pub fn walk_foreign_item<'v, V: Visitor<'v>>(visitor: &mut V, foreign_item: &'v ForeignItem<'v>) {
- visitor.visit_id(foreign_item.hir_id);
+ visitor.visit_id(foreign_item.hir_id());
visitor.visit_vis(&foreign_item.vis);
visitor.visit_ident(foreign_item.ident);
@@ -851,8 +842,6 @@
ForeignItemKind::Static(ref typ, _) => visitor.visit_ty(typ),
ForeignItemKind::Type => (),
}
-
- walk_list!(visitor, visit_attribute, foreign_item.attrs);
}
pub fn walk_param_bound<'v, V: Visitor<'v>>(visitor: &mut V, bound: &'v GenericBound<'v>) {
@@ -870,7 +859,6 @@
pub fn walk_generic_param<'v, V: Visitor<'v>>(visitor: &mut V, param: &'v GenericParam<'v>) {
visitor.visit_id(param.hir_id);
- walk_list!(visitor, visit_attribute, param.attrs);
match param.name {
ParamName::Plain(ident) => visitor.visit_ident(ident),
ParamName::Error | ParamName::Fresh(_) => {}
@@ -940,7 +928,7 @@
FnKind::ItemFn(_, generics, ..) => {
visitor.visit_generics(generics);
}
- FnKind::Method(..) | FnKind::Closure(_) => {}
+ FnKind::Method(..) | FnKind::Closure => {}
}
}
@@ -960,16 +948,15 @@
pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v TraitItem<'v>) {
visitor.visit_ident(trait_item.ident);
- walk_list!(visitor, visit_attribute, trait_item.attrs);
visitor.visit_generics(&trait_item.generics);
match trait_item.kind {
TraitItemKind::Const(ref ty, default) => {
- visitor.visit_id(trait_item.hir_id);
+ visitor.visit_id(trait_item.hir_id());
visitor.visit_ty(ty);
walk_list!(visitor, visit_nested_body, default);
}
TraitItemKind::Fn(ref sig, TraitFn::Required(param_names)) => {
- visitor.visit_id(trait_item.hir_id);
+ visitor.visit_id(trait_item.hir_id());
visitor.visit_fn_decl(&sig.decl);
for ¶m_name in param_names {
visitor.visit_ident(param_name);
@@ -977,15 +964,15 @@
}
TraitItemKind::Fn(ref sig, TraitFn::Provided(body_id)) => {
visitor.visit_fn(
- FnKind::Method(trait_item.ident, sig, None, &trait_item.attrs),
+ FnKind::Method(trait_item.ident, sig, None),
&sig.decl,
body_id,
trait_item.span,
- trait_item.hir_id,
+ trait_item.hir_id(),
);
}
TraitItemKind::Type(bounds, ref default) => {
- visitor.visit_id(trait_item.hir_id);
+ visitor.visit_id(trait_item.hir_id());
walk_list!(visitor, visit_param_bound, bounds);
walk_list!(visitor, visit_ty, default);
}
@@ -1003,39 +990,30 @@
pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplItem<'v>) {
// N.B., deliberately force a compilation error if/when new fields are added.
- let ImplItem {
- hir_id: _,
- ident,
- ref vis,
- ref defaultness,
- attrs,
- ref generics,
- ref kind,
- span: _,
- } = *impl_item;
+ let ImplItem { def_id: _, ident, ref vis, ref defaultness, ref generics, ref kind, span: _ } =
+ *impl_item;
visitor.visit_ident(ident);
visitor.visit_vis(vis);
visitor.visit_defaultness(defaultness);
- walk_list!(visitor, visit_attribute, attrs);
visitor.visit_generics(generics);
match *kind {
ImplItemKind::Const(ref ty, body) => {
- visitor.visit_id(impl_item.hir_id);
+ visitor.visit_id(impl_item.hir_id());
visitor.visit_ty(ty);
visitor.visit_nested_body(body);
}
ImplItemKind::Fn(ref sig, body_id) => {
visitor.visit_fn(
- FnKind::Method(impl_item.ident, sig, Some(&impl_item.vis), &impl_item.attrs),
+ FnKind::Method(impl_item.ident, sig, Some(&impl_item.vis)),
&sig.decl,
body_id,
impl_item.span,
- impl_item.hir_id,
+ impl_item.hir_id(),
);
}
ImplItemKind::TyAlias(ref ty) => {
- visitor.visit_id(impl_item.hir_id);
+ visitor.visit_id(impl_item.hir_id());
visitor.visit_ty(ty);
}
}
@@ -1067,15 +1045,14 @@
struct_definition: &'v VariantData<'v>,
) {
walk_list!(visitor, visit_id, struct_definition.ctor_hir_id());
- walk_list!(visitor, visit_struct_field, struct_definition.fields());
+ walk_list!(visitor, visit_field_def, struct_definition.fields());
}
-pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V, struct_field: &'v StructField<'v>) {
- visitor.visit_id(struct_field.hir_id);
- visitor.visit_vis(&struct_field.vis);
- visitor.visit_ident(struct_field.ident);
- visitor.visit_ty(&struct_field.ty);
- walk_list!(visitor, visit_attribute, struct_field.attrs);
+pub fn walk_field_def<'v, V: Visitor<'v>>(visitor: &mut V, field: &'v FieldDef<'v>) {
+ visitor.visit_id(field.hir_id);
+ visitor.visit_vis(&field.vis);
+ visitor.visit_ident(field.ident);
+ visitor.visit_ty(&field.ty);
}
pub fn walk_block<'v, V: Visitor<'v>>(visitor: &mut V, block: &'v Block<'v>) {
@@ -1102,7 +1079,6 @@
pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>) {
visitor.visit_id(expression.hir_id);
- walk_list!(visitor, visit_attribute, expression.attrs.iter());
match expression.kind {
ExprKind::Box(ref subexpression) => visitor.visit_expr(subexpression),
ExprKind::Array(subexpressions) => {
@@ -1162,7 +1138,7 @@
}
ExprKind::Closure(_, ref function_declaration, body, _fn_decl_span, _gen) => visitor
.visit_fn(
- FnKind::Closure(&expression.attrs),
+ FnKind::Closure,
function_declaration,
body,
expression.span,
@@ -1246,7 +1222,6 @@
}
}
visitor.visit_expr(&arm.body);
- walk_list!(visitor, visit_attribute, arm.attrs);
}
pub fn walk_vis<'v, V: Visitor<'v>>(visitor: &mut V, vis: &'v Visibility<'v>) {
diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs
index 26ce30c..498000d 100644
--- a/compiler/rustc_hir/src/lang_items.rs
+++ b/compiler/rustc_hir/src/lang_items.rs
@@ -38,27 +38,34 @@
// So you probably just want to nip down to the end.
macro_rules! language_item_table {
(
- $( $variant:ident $($group:expr)?, $name:expr, $method:ident, $target:expr; )*
+ $( $(#[$attr:meta])* $variant:ident $($group:expr)?, $module:ident :: $name:ident, $method:ident, $target:expr; )*
) => {
enum_from_u32! {
/// A representation of all the valid language items in Rust.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Encodable, Decodable)]
pub enum LangItem {
- $($variant,)*
+ $(
+ #[doc = concat!("The `", stringify!($name), "` lang item.")]
+ ///
+ $(#[$attr])*
+ $variant,
+ )*
}
}
impl LangItem {
/// Returns the `name` symbol in `#[lang = "$name"]`.
- /// For example, `LangItem::EqTraitLangItem`,
- /// that is `#[lang = "eq"]` would result in `sym::eq`.
+ /// For example, [`LangItem::PartialEq`]`.name()`
+ /// would result in [`sym::eq`] since it is `#[lang = "eq"]`.
pub fn name(self) -> Symbol {
match self {
- $( LangItem::$variant => $name, )*
+ $( LangItem::$variant => $module::$name, )*
}
}
+ /// The [group](LangItemGroup) that this lang item belongs to,
+ /// or `None` if it doesn't belong to a group.
pub fn group(self) -> Option<LangItemGroup> {
use LangItemGroup::*;
match self {
@@ -67,15 +74,17 @@
}
}
+ /// All of the language items, defined or not.
+ /// Defined lang items can come from the current crate or its dependencies.
#[derive(HashStable_Generic, Debug)]
pub struct LanguageItems {
- /// Mappings from lang items to their possibly found `DefId`s.
- /// The index corresponds to the order in `LangItem`.
+ /// Mappings from lang items to their possibly found [`DefId`]s.
+ /// The index corresponds to the order in [`LangItem`].
pub items: Vec<Option<DefId>>,
/// Lang items that were not found during collection.
pub missing: Vec<LangItem>,
- /// Mapping from `LangItemGroup` discriminants to all
- /// `DefId`s of lang items in that group.
+ /// Mapping from [`LangItemGroup`] discriminants to all
+ /// [`DefId`]s of lang items in that group.
pub groups: [Vec<DefId>; NUM_GROUPS],
}
@@ -103,14 +112,13 @@
self.items[it as usize].ok_or_else(|| format!("requires `{}` lang_item", it.name()))
}
+ /// Returns the [`DefId`]s of all lang items in a group.
pub fn group(&self, group: LangItemGroup) -> &[DefId] {
self.groups[group as usize].as_ref()
}
$(
- /// Returns the corresponding `DefId` for the lang item if it
- /// exists.
- #[allow(dead_code)]
+ #[doc = concat!("Returns the [`DefId`] of the `", stringify!($name), "` lang item if it is defined.")]
pub fn $method(&self) -> Option<DefId> {
self.items[LangItem::$variant as usize]
}
@@ -120,7 +128,7 @@
/// A mapping from the name of the lang item to its order and the form it must be of.
pub static ITEM_REFS: SyncLazy<FxHashMap<Symbol, (usize, Target)>> = SyncLazy::new(|| {
let mut item_refs = FxHashMap::default();
- $( item_refs.insert($name, (LangItem::$variant as usize, $target)); )*
+ $( item_refs.insert($module::$name, (LangItem::$variant as usize, $target)); )*
item_refs
});
@@ -140,7 +148,7 @@
///
/// About the `check_name` argument: passing in a `Session` would be simpler,
/// because then we could call `Session::check_name` directly. But we want to
-/// avoid the need for `librustc_hir` to depend on `librustc_session`, so we
+/// avoid the need for `rustc_hir` to depend on `rustc_session`, so we
/// use a closure instead.
pub fn extract<'a, F>(check_name: F, attrs: &'a [ast::Attribute]) -> Option<(Symbol, Span)>
where
@@ -190,17 +198,21 @@
Sized, sym::sized, sized_trait, Target::Trait;
Unsize, sym::unsize, unsize_trait, Target::Trait;
- // Trait injected by #[derive(PartialEq)], (i.e. "Partial EQ").
+ /// Trait injected by `#[derive(PartialEq)]`, (i.e. "Partial EQ").
StructuralPeq, sym::structural_peq, structural_peq_trait, Target::Trait;
- // Trait injected by #[derive(Eq)], (i.e. "Total EQ"; no, I will not apologize).
+ /// Trait injected by `#[derive(Eq)]`, (i.e. "Total EQ"; no, I will not apologize).
StructuralTeq, sym::structural_teq, structural_teq_trait, Target::Trait;
Copy, sym::copy, copy_trait, Target::Trait;
Clone, sym::clone, clone_trait, Target::Trait;
Sync, sym::sync, sync_trait, Target::Trait;
DiscriminantKind, sym::discriminant_kind, discriminant_kind_trait, Target::Trait;
- // The associated item of `trait DiscriminantKind`.
+ /// The associated item of the [`DiscriminantKind`] trait.
Discriminant, sym::discriminant_type, discriminant_type, Target::AssocTy;
+ PointeeTrait, sym::pointee_trait, pointee_trait, Target::Trait;
+ Metadata, sym::metadata_type, metadata_type, Target::AssocTy;
+ DynMetadata, sym::dyn_metadata, dyn_metadata, Target::Struct;
+
Freeze, sym::freeze, freeze_trait, Target::Trait;
Drop, sym::drop, drop_trait, Target::Trait;
@@ -238,6 +250,7 @@
Deref, sym::deref, deref_trait, Target::Trait;
DerefMut, sym::deref_mut, deref_mut_trait, Target::Trait;
+ DerefTarget, sym::deref_target, deref_target, Target::AssocTy;
Receiver, sym::receiver, receiver_trait, Target::Trait;
Fn, kw::Fn, fn_trait, Target::Trait;
@@ -268,7 +281,7 @@
PanicInfo, sym::panic_info, panic_info, Target::Struct;
PanicLocation, sym::panic_location, panic_location, Target::Struct;
PanicImpl, sym::panic_impl, panic_impl, Target::Fn;
- // libstd panic entry point. Necessary for const eval to be able to catch it
+ /// libstd panic entry point. Necessary for const eval to be able to catch it
BeginPanic, sym::begin_panic, begin_panic_fn, Target::Fn;
ExchangeMalloc, sym::exchange_malloc, exchange_malloc_fn, Target::Fn;
@@ -290,7 +303,7 @@
MaybeUninit, sym::maybe_uninit, maybe_uninit, Target::Union;
- // Align offset for stride != 1; must not panic.
+ /// Align offset for stride != 1; must not panic.
AlignOffset, sym::align_offset, align_offset_fn, Target::Fn;
Termination, sym::termination, termination, Target::Trait;
diff --git a/compiler/rustc_hir/src/lib.rs b/compiler/rustc_hir/src/lib.rs
index c69a9b0..45bb911 100644
--- a/compiler/rustc_hir/src/lib.rs
+++ b/compiler/rustc_hir/src/lib.rs
@@ -5,6 +5,7 @@
#![feature(crate_visibility_modifier)]
#![feature(const_fn)] // For the unsizing cast on `&[]`
#![feature(const_panic)]
+#![feature(extended_key_value_attributes)]
#![feature(in_band_lifetimes)]
#![feature(once_cell)]
#![feature(or_patterns)]
@@ -30,6 +31,9 @@
mod target;
pub mod weak_lang_items;
+#[cfg(test)]
+mod tests;
+
pub use hir::*;
pub use hir_id::*;
pub use lang_items::{LangItem, LanguageItems};
diff --git a/compiler/rustc_hir/src/stable_hash_impls.rs b/compiler/rustc_hir/src/stable_hash_impls.rs
index 439fb88..55e8766 100644
--- a/compiler/rustc_hir/src/stable_hash_impls.rs
+++ b/compiler/rustc_hir/src/stable_hash_impls.rs
@@ -1,8 +1,8 @@
use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
use crate::hir::{
- BodyId, Expr, ForeignItemId, ImplItem, ImplItemId, Item, ItemId, Mod, TraitItem, TraitItemId,
- Ty, VisibilityKind,
+ BodyId, Expr, ForeignItem, ForeignItemId, ImplItem, ImplItemId, Item, ItemId, MacroDef, Mod,
+ TraitItem, TraitItemId, Ty, VisibilityKind,
};
use crate::hir_id::{HirId, ItemLocalId};
use rustc_span::def_id::{DefPathHash, LocalDefId};
@@ -34,30 +34,39 @@
}
}
-impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for TraitItemId {
- type KeyType = (DefPathHash, ItemLocalId);
+impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for ItemId {
+ type KeyType = DefPathHash;
#[inline]
- fn to_stable_hash_key(&self, hcx: &HirCtx) -> (DefPathHash, ItemLocalId) {
- self.hir_id.to_stable_hash_key(hcx)
+ fn to_stable_hash_key(&self, hcx: &HirCtx) -> DefPathHash {
+ hcx.local_def_path_hash(self.def_id)
+ }
+}
+
+impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for TraitItemId {
+ type KeyType = DefPathHash;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &HirCtx) -> DefPathHash {
+ hcx.local_def_path_hash(self.def_id)
}
}
impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for ImplItemId {
- type KeyType = (DefPathHash, ItemLocalId);
+ type KeyType = DefPathHash;
#[inline]
- fn to_stable_hash_key(&self, hcx: &HirCtx) -> (DefPathHash, ItemLocalId) {
- self.hir_id.to_stable_hash_key(hcx)
+ fn to_stable_hash_key(&self, hcx: &HirCtx) -> DefPathHash {
+ hcx.local_def_path_hash(self.def_id)
}
}
impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for ForeignItemId {
- type KeyType = (DefPathHash, ItemLocalId);
+ type KeyType = DefPathHash;
#[inline]
- fn to_stable_hash_key(&self, hcx: &HirCtx) -> (DefPathHash, ItemLocalId) {
- self.hir_id.to_stable_hash_key(hcx)
+ fn to_stable_hash_key(&self, hcx: &HirCtx) -> DefPathHash {
+ hcx.local_def_path_hash(self.def_id)
}
}
@@ -82,25 +91,25 @@
impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for ItemId {
fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
- hcx.hash_reference_to_item(self.id, hasher)
+ hcx.hash_reference_to_item(self.hir_id(), hasher)
}
}
impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for ForeignItemId {
fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
- hcx.hash_reference_to_item(self.hir_id, hasher)
+ hcx.hash_reference_to_item(self.hir_id(), hasher)
}
}
impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for ImplItemId {
fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
- hcx.hash_reference_to_item(self.hir_id, hasher)
+ hcx.hash_reference_to_item(self.hir_id(), hasher)
}
}
impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for TraitItemId {
fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
- hcx.hash_reference_to_item(self.hir_id, hasher)
+ hcx.hash_reference_to_item(self.hir_id(), hasher)
}
}
@@ -130,11 +139,10 @@
impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for TraitItem<'_> {
fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
- let TraitItem { hir_id: _, ident, ref attrs, ref generics, ref kind, span } = *self;
+ let TraitItem { def_id: _, ident, ref generics, ref kind, span } = *self;
hcx.hash_hir_item_like(|hcx| {
ident.name.hash_stable(hcx, hasher);
- attrs.hash_stable(hcx, hasher);
generics.hash_stable(hcx, hasher);
kind.hash_stable(hcx, hasher);
span.hash_stable(hcx, hasher);
@@ -144,22 +152,13 @@
impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for ImplItem<'_> {
fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
- let ImplItem {
- hir_id: _,
- ident,
- ref vis,
- defaultness,
- ref attrs,
- ref generics,
- ref kind,
- span,
- } = *self;
+ let ImplItem { def_id: _, ident, ref vis, defaultness, ref generics, ref kind, span } =
+ *self;
hcx.hash_hir_item_like(|hcx| {
ident.name.hash_stable(hcx, hasher);
vis.hash_stable(hcx, hasher);
defaultness.hash_stable(hcx, hasher);
- attrs.hash_stable(hcx, hasher);
generics.hash_stable(hcx, hasher);
kind.hash_stable(hcx, hasher);
span.hash_stable(hcx, hasher);
@@ -167,14 +166,39 @@
}
}
-impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for Item<'_> {
+impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for ForeignItem<'_> {
fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
- let Item { ident, ref attrs, hir_id: _, ref kind, ref vis, span } = *self;
+ let ForeignItem { def_id: _, ident, ref kind, span, ref vis } = *self;
hcx.hash_hir_item_like(|hcx| {
ident.name.hash_stable(hcx, hasher);
- attrs.hash_stable(hcx, hasher);
kind.hash_stable(hcx, hasher);
+ span.hash_stable(hcx, hasher);
+ vis.hash_stable(hcx, hasher);
+ });
+ }
+}
+
+impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for Item<'_> {
+ fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
+ let Item { ident, def_id: _, ref kind, ref vis, span } = *self;
+
+ hcx.hash_hir_item_like(|hcx| {
+ ident.name.hash_stable(hcx, hasher);
+ kind.hash_stable(hcx, hasher);
+ vis.hash_stable(hcx, hasher);
+ span.hash_stable(hcx, hasher);
+ });
+ }
+}
+
+impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for MacroDef<'_> {
+ fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
+ let MacroDef { ident, def_id: _, ref ast, ref vis, span } = *self;
+
+ hcx.hash_hir_item_like(|hcx| {
+ ident.name.hash_stable(hcx, hasher);
+ ast.hash_stable(hcx, hasher);
vis.hash_stable(hcx, hasher);
span.hash_stable(hcx, hasher);
});
diff --git a/compiler/rustc_hir/src/target.rs b/compiler/rustc_hir/src/target.rs
index 6dbcfb9..473477b 100644
--- a/compiler/rustc_hir/src/target.rs
+++ b/compiler/rustc_hir/src/target.rs
@@ -54,6 +54,7 @@
ForeignTy,
GenericParam(GenericParamKind),
MacroDef,
+ Param,
}
impl Display for Target {
@@ -96,6 +97,7 @@
GenericParamKind::Const => "const parameter",
},
Target::MacroDef => "macro def",
+ Target::Param => "function param",
}
)
}
diff --git a/compiler/rustc_hir/src/tests.rs b/compiler/rustc_hir/src/tests.rs
new file mode 100644
index 0000000..2aafc6a
--- /dev/null
+++ b/compiler/rustc_hir/src/tests.rs
@@ -0,0 +1,39 @@
+use crate::definitions::{DefKey, DefPathData, DisambiguatedDefPathData};
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_span::crate_disambiguator::CrateDisambiguator;
+use rustc_span::def_id::{DefPathHash, StableCrateId};
+
+#[test]
+fn def_path_hash_depends_on_crate_id() {
+ // This test makes sure that *both* halves of a DefPathHash depend on
+ // the crate-id of the defining crate. This is a desirable property
+ // because the crate-id can be more easily changed than the DefPath
+ // of an item, so, in the case of a crate-local DefPathHash collision,
+ // the user can simply "role the dice again" for all DefPathHashes in
+ // the crate by changing the crate disambiguator (e.g. via bumping the
+ // crate's version number).
+
+ let d0 = CrateDisambiguator::from(Fingerprint::new(12, 34));
+ let d1 = CrateDisambiguator::from(Fingerprint::new(56, 78));
+
+ let h0 = mk_test_hash("foo", d0);
+ let h1 = mk_test_hash("foo", d1);
+
+ assert_ne!(h0.stable_crate_id(), h1.stable_crate_id());
+ assert_ne!(h0.local_hash(), h1.local_hash());
+
+ fn mk_test_hash(crate_name: &str, crate_disambiguator: CrateDisambiguator) -> DefPathHash {
+ let stable_crate_id = StableCrateId::new(crate_name, crate_disambiguator);
+ let parent_hash = DefPathHash::new(stable_crate_id, 0);
+
+ let key = DefKey {
+ parent: None,
+ disambiguated_data: DisambiguatedDefPathData {
+ data: DefPathData::CrateRoot,
+ disambiguator: 0,
+ },
+ };
+
+ key.compute_stable_hash(parent_hash)
+ }
+}
diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs
index 4595855..b37a3e1 100644
--- a/compiler/rustc_hir_pretty/src/lib.rs
+++ b/compiler/rustc_hir_pretty/src/lib.rs
@@ -16,6 +16,7 @@
use std::borrow::Cow;
use std::cell::Cell;
+use std::collections::BTreeMap;
use std::vec;
pub fn id_to_string(map: &dyn rustc_hir::intravisit::Map<'_>, hir_id: hir::HirId) -> String {
@@ -54,7 +55,7 @@
impl PpAnn for hir::Crate<'_> {
fn nested(&self, state: &mut State<'_>, nested: Nested) {
match nested {
- Nested::Item(id) => state.print_item(self.item(id.id)),
+ Nested::Item(id) => state.print_item(self.item(id)),
Nested::TraitItem(id) => state.print_trait_item(self.trait_item(id)),
Nested::ImplItem(id) => state.print_impl_item(self.impl_item(id)),
Nested::ForeignItem(id) => state.print_foreign_item(self.foreign_item(id)),
@@ -69,7 +70,7 @@
impl PpAnn for &dyn rustc_hir::intravisit::Map<'_> {
fn nested(&self, state: &mut State<'_>, nested: Nested) {
match nested {
- Nested::Item(id) => state.print_item(self.item(id.id)),
+ Nested::Item(id) => state.print_item(self.item(id)),
Nested::TraitItem(id) => state.print_trait_item(self.trait_item(id)),
Nested::ImplItem(id) => state.print_impl_item(self.impl_item(id)),
Nested::ForeignItem(id) => state.print_foreign_item(self.foreign_item(id)),
@@ -82,6 +83,7 @@
pub struct State<'a> {
pub s: pp::Printer,
comments: Option<Comments<'a>>,
+ attrs: &'a BTreeMap<hir::HirId, &'a [ast::Attribute]>,
ann: &'a (dyn PpAnn + 'a),
}
@@ -112,7 +114,7 @@
Node::Lifetime(a) => self.print_lifetime(&a),
Node::Visibility(a) => self.print_visibility(&a),
Node::GenericParam(_) => panic!("cannot print Node::GenericParam"),
- Node::Field(_) => panic!("cannot print StructField"),
+ Node::Field(_) => panic!("cannot print Node::Field"),
// These cases do not carry enough information in the
// `hir_map` to reconstruct their full structure for pretty
// printing.
@@ -163,12 +165,12 @@
input: String,
ann: &'a dyn PpAnn,
) -> String {
- let mut s = State::new_from_input(sm, filename, input, ann);
+ let mut s = State::new_from_input(sm, filename, input, &krate.attrs, ann);
// When printing the AST, we sometimes need to inject `#[no_std]` here.
// Since you can't compile the HIR, it's not necessary.
- s.print_mod(&krate.item.module, &krate.item.attrs);
+ s.print_mod(&krate.item.module, s.attrs(hir::CRATE_HIR_ID));
s.print_remaining_comments();
s.s.eof()
}
@@ -178,9 +180,19 @@
sm: &'a SourceMap,
filename: FileName,
input: String,
+ attrs: &'a BTreeMap<hir::HirId, &[ast::Attribute]>,
ann: &'a dyn PpAnn,
) -> State<'a> {
- State { s: pp::mk_printer(), comments: Some(Comments::new(sm, filename, input)), ann }
+ State {
+ s: pp::mk_printer(),
+ comments: Some(Comments::new(sm, filename, input)),
+ attrs,
+ ann,
+ }
+ }
+
+ fn attrs(&self, id: hir::HirId) -> &'a [ast::Attribute] {
+ self.attrs.get(&id).map_or(&[], |la| *la)
}
}
@@ -188,7 +200,8 @@
where
F: FnOnce(&mut State<'_>),
{
- let mut printer = State { s: pp::mk_printer(), comments: None, ann };
+ let mut printer =
+ State { s: pp::mk_printer(), comments: None, attrs: &BTreeMap::default(), ann };
f(&mut printer);
printer.s.eof()
}
@@ -392,12 +405,15 @@
&f.decl,
None,
&f.generic_params,
- &f.param_names[..],
+ f.param_names,
);
}
hir::TyKind::OpaqueDef(..) => self.s.word("/*impl Trait*/"),
hir::TyKind::Path(ref qpath) => self.print_qpath(qpath, false),
- hir::TyKind::TraitObject(bounds, ref lifetime) => {
+ hir::TyKind::TraitObject(bounds, ref lifetime, syntax) => {
+ if syntax == ast::TraitObjectSyntax::Dyn {
+ self.word_space("dyn");
+ }
let mut first = true;
for bound in bounds {
if first {
@@ -441,7 +457,7 @@
pub fn print_foreign_item(&mut self, item: &hir::ForeignItem<'_>) {
self.hardbreak_if_not_bol();
self.maybe_print_comment(item.span.lo());
- self.print_outer_attributes(&item.attrs);
+ self.print_outer_attributes(self.attrs(item.hir_id()));
match item.kind {
hir::ForeignItemKind::Fn(ref decl, ref arg_names, ref generics) => {
self.head("");
@@ -549,7 +565,8 @@
pub fn print_item(&mut self, item: &hir::Item<'_>) {
self.hardbreak_if_not_bol();
self.maybe_print_comment(item.span.lo());
- self.print_outer_attributes(&item.attrs);
+ let attrs = self.attrs(item.hir_id());
+ self.print_outer_attributes(attrs);
self.ann.pre(self, AnnNode::Item(item));
match item.kind {
hir::ItemKind::ExternCrate(orig_name) => {
@@ -634,14 +651,14 @@
self.print_ident(item.ident);
self.nbsp();
self.bopen();
- self.print_mod(_mod, &item.attrs);
+ self.print_mod(_mod, attrs);
self.bclose(item.span);
}
hir::ItemKind::ForeignMod { abi, items } => {
self.head("extern");
self.word_nbsp(abi.to_string());
self.bopen();
- self.print_inner_attributes(item.attrs);
+ self.print_inner_attributes(self.attrs(item.hir_id()));
for item in items {
self.ann.nested(self, Nested::ForeignItem(item.id));
}
@@ -725,7 +742,7 @@
self.s.space();
self.bopen();
- self.print_inner_attributes(&item.attrs);
+ self.print_inner_attributes(attrs);
for impl_item in items {
self.ann.nested(self, Nested::ImplItem(impl_item.id));
}
@@ -822,7 +839,7 @@
for v in variants {
self.space_if_not_bol();
self.maybe_print_comment(v.span.lo());
- self.print_outer_attributes(&v.attrs);
+ self.print_outer_attributes(self.attrs(v.id));
self.ibox(INDENT_UNIT);
self.print_variant(v);
self.s.word(",");
@@ -876,7 +893,7 @@
self.popen();
self.commasep(Inconsistent, struct_def.fields(), |s, field| {
s.maybe_print_comment(field.span.lo());
- s.print_outer_attributes(&field.attrs);
+ s.print_outer_attributes(s.attrs(field.hir_id));
s.print_visibility(&field.vis);
s.print_type(&field.ty)
});
@@ -898,7 +915,7 @@
for field in struct_def.fields() {
self.hardbreak_if_not_bol();
self.maybe_print_comment(field.span.lo());
- self.print_outer_attributes(&field.attrs);
+ self.print_outer_attributes(self.attrs(field.hir_id));
self.print_visibility(&field.vis);
self.print_ident(field.ident);
self.word_nbsp(":");
@@ -934,10 +951,10 @@
}
pub fn print_trait_item(&mut self, ti: &hir::TraitItem<'_>) {
- self.ann.pre(self, AnnNode::SubItem(ti.hir_id));
+ self.ann.pre(self, AnnNode::SubItem(ti.hir_id()));
self.hardbreak_if_not_bol();
self.maybe_print_comment(ti.span.lo());
- self.print_outer_attributes(&ti.attrs);
+ self.print_outer_attributes(self.attrs(ti.hir_id()));
match ti.kind {
hir::TraitItemKind::Const(ref ty, default) => {
let vis =
@@ -969,14 +986,14 @@
);
}
}
- self.ann.post(self, AnnNode::SubItem(ti.hir_id))
+ self.ann.post(self, AnnNode::SubItem(ti.hir_id()))
}
pub fn print_impl_item(&mut self, ii: &hir::ImplItem<'_>) {
- self.ann.pre(self, AnnNode::SubItem(ii.hir_id));
+ self.ann.pre(self, AnnNode::SubItem(ii.hir_id()));
self.hardbreak_if_not_bol();
self.maybe_print_comment(ii.span.lo());
- self.print_outer_attributes(&ii.attrs);
+ self.print_outer_attributes(self.attrs(ii.hir_id()));
self.print_defaultness(ii.defaultness);
match ii.kind {
@@ -995,7 +1012,7 @@
self.print_associated_type(ii.ident, &ii.generics, None, Some(ty));
}
}
- self.ann.post(self, AnnNode::SubItem(ii.hir_id))
+ self.ann.post(self, AnnNode::SubItem(ii.hir_id()))
}
pub fn print_local(&mut self, init: Option<&hir::Expr<'_>>, decl: impl Fn(&mut Self)) {
@@ -1193,14 +1210,14 @@
fn print_expr_struct(
&mut self,
qpath: &hir::QPath<'_>,
- fields: &[hir::Field<'_>],
+ fields: &[hir::ExprField<'_>],
wth: &Option<&hir::Expr<'_>>,
) {
self.print_qpath(qpath, true);
self.s.word("{");
self.commasep_cmnt(
Consistent,
- &fields[..],
+ fields,
|s, field| {
s.ibox(INDENT_UNIT);
if !field.is_shorthand {
@@ -1321,7 +1338,7 @@
pub fn print_expr(&mut self, expr: &hir::Expr<'_>) {
self.maybe_print_comment(expr.span.lo());
- self.print_outer_attributes(&expr.attrs);
+ self.print_outer_attributes(self.attrs(expr.hir_id));
self.ibox(INDENT_UNIT);
self.ann.pre(self, AnnNode::Expr(expr));
match expr.kind {
@@ -2020,20 +2037,20 @@
}
pub fn print_param(&mut self, arg: &hir::Param<'_>) {
- self.print_outer_attributes(&arg.attrs);
+ self.print_outer_attributes(self.attrs(arg.hir_id));
self.print_pat(&arg.pat);
}
pub fn print_arm(&mut self, arm: &hir::Arm<'_>) {
// I have no idea why this check is necessary, but here it
// is :(
- if arm.attrs.is_empty() {
+ if self.attrs(arm.hir_id).is_empty() {
self.s.space();
}
self.cbox(INDENT_UNIT);
self.ann.pre(self, AnnNode::Arm(arm));
self.ibox(0);
- self.print_outer_attributes(&arm.attrs);
+ self.print_outer_attributes(&self.attrs(arm.hir_id));
self.print_pat(&arm.pat);
self.s.space();
if let Some(ref g) = arm.guard {
diff --git a/compiler/rustc_incremental/src/assert_dep_graph.rs b/compiler/rustc_incremental/src/assert_dep_graph.rs
index f39a92b..f1f69f1 100644
--- a/compiler/rustc_incremental/src/assert_dep_graph.rs
+++ b/compiler/rustc_incremental/src/assert_dep_graph.rs
@@ -68,7 +68,7 @@
let (if_this_changed, then_this_would_need) = {
let mut visitor =
IfThisChanged { tcx, if_this_changed: vec![], then_this_would_need: vec![] };
- visitor.process_attrs(hir::CRATE_HIR_ID, &tcx.hir().krate().item.attrs);
+ visitor.process_attrs(hir::CRATE_HIR_ID);
tcx.hir().krate().visit_all_item_likes(&mut visitor.as_deep_visitor());
(visitor.if_this_changed, visitor.then_this_would_need)
};
@@ -113,9 +113,10 @@
value
}
- fn process_attrs(&mut self, hir_id: hir::HirId, attrs: &[ast::Attribute]) {
+ fn process_attrs(&mut self, hir_id: hir::HirId) {
let def_id = self.tcx.hir().local_def_id(hir_id);
let def_path_hash = self.tcx.def_path_hash(def_id.to_def_id());
+ let attrs = self.tcx.hir().attrs(hir_id);
for attr in attrs {
if self.tcx.sess.check_name(attr, sym::rustc_if_this_changed) {
let dep_node_interned = self.argument(attr);
@@ -167,23 +168,23 @@
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- self.process_attrs(item.hir_id, &item.attrs);
+ self.process_attrs(item.hir_id());
intravisit::walk_item(self, item);
}
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
- self.process_attrs(trait_item.hir_id, &trait_item.attrs);
+ self.process_attrs(trait_item.hir_id());
intravisit::walk_trait_item(self, trait_item);
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
- self.process_attrs(impl_item.hir_id, &impl_item.attrs);
+ self.process_attrs(impl_item.hir_id());
intravisit::walk_impl_item(self, impl_item);
}
- fn visit_struct_field(&mut self, s: &'tcx hir::StructField<'tcx>) {
- self.process_attrs(s.hir_id, &s.attrs);
- intravisit::walk_struct_field(self, s);
+ fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
+ self.process_attrs(s.hir_id);
+ intravisit::walk_field_def(self, s);
}
}
diff --git a/compiler/rustc_incremental/src/assert_module_sources.rs b/compiler/rustc_incremental/src/assert_module_sources.rs
index 17d8ac9..5fb2c1c 100644
--- a/compiler/rustc_incremental/src/assert_module_sources.rs
+++ b/compiler/rustc_incremental/src/assert_module_sources.rs
@@ -44,7 +44,7 @@
let ams = AssertModuleSource { tcx, available_cgus };
- for attr in tcx.hir().krate().item.attrs {
+ for attr in tcx.hir().attrs(rustc_hir::CRATE_HIR_ID) {
ams.check_attr(attr);
}
})
diff --git a/compiler/rustc_incremental/src/persist/dirty_clean.rs b/compiler/rustc_incremental/src/persist/dirty_clean.rs
index e1c6005..0b544b8 100644
--- a/compiler/rustc_incremental/src/persist/dirty_clean.rs
+++ b/compiler/rustc_incremental/src/persist/dirty_clean.rs
@@ -17,7 +17,7 @@
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
-use rustc_hir::def_id::DefId;
+use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::intravisit;
use rustc_hir::itemlikevisit::ItemLikeVisitor;
use rustc_hir::Node as HirNode;
@@ -168,7 +168,7 @@
// Note that we cannot use the existing "unused attribute"-infrastructure
// here, since that is running before codegen. This is also the reason why
// all codegen-specific attributes are `AssumedUsed` in rustc_ast::feature_gate.
- all_attrs.report_unchecked_attrs(&dirty_clean_visitor.checked_attrs);
+ all_attrs.report_unchecked_attrs(dirty_clean_visitor.checked_attrs);
})
}
@@ -179,7 +179,7 @@
impl DirtyCleanVisitor<'tcx> {
/// Possibly "deserialize" the attribute into a clean/dirty assertion
- fn assertion_maybe(&mut self, item_id: hir::HirId, attr: &Attribute) -> Option<Assertion> {
+ fn assertion_maybe(&mut self, item_id: LocalDefId, attr: &Attribute) -> Option<Assertion> {
let is_clean = if self.tcx.sess.check_name(attr, sym::rustc_dirty) {
false
} else if self.tcx.sess.check_name(attr, sym::rustc_clean) {
@@ -207,7 +207,7 @@
/// Gets the "auto" assertion on pre-validated attr, along with the `except` labels.
fn assertion_auto(
&mut self,
- item_id: hir::HirId,
+ item_id: LocalDefId,
attr: &Attribute,
is_clean: bool,
) -> Assertion {
@@ -253,8 +253,9 @@
/// Return all DepNode labels that should be asserted for this item.
/// index=0 is the "name" used for error messages
- fn auto_labels(&mut self, item_id: hir::HirId, attr: &Attribute) -> (&'static str, Labels) {
- let node = self.tcx.hir().get(item_id);
+ fn auto_labels(&mut self, item_id: LocalDefId, attr: &Attribute) -> (&'static str, Labels) {
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(item_id);
+ let node = self.tcx.hir().get(hir_id);
let (name, labels) = match node {
HirNode::Item(item) => {
match item.kind {
@@ -430,18 +431,17 @@
}
}
- fn check_item(&mut self, item_id: hir::HirId, item_span: Span) {
- let def_id = self.tcx.hir().local_def_id(item_id);
- for attr in self.tcx.get_attrs(def_id.to_def_id()).iter() {
+ fn check_item(&mut self, item_id: LocalDefId, item_span: Span) {
+ for attr in self.tcx.get_attrs(item_id.to_def_id()).iter() {
let assertion = match self.assertion_maybe(item_id, attr) {
Some(a) => a,
None => continue,
};
self.checked_attrs.insert(attr.id);
- for dep_node in self.dep_nodes(&assertion.clean, def_id.to_def_id()) {
+ for dep_node in self.dep_nodes(&assertion.clean, item_id.to_def_id()) {
self.assert_clean(item_span, dep_node);
}
- for dep_node in self.dep_nodes(&assertion.dirty, def_id.to_def_id()) {
+ for dep_node in self.dep_nodes(&assertion.dirty, item_id.to_def_id()) {
self.assert_dirty(item_span, dep_node);
}
}
@@ -450,19 +450,19 @@
impl ItemLikeVisitor<'tcx> for DirtyCleanVisitor<'tcx> {
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- self.check_item(item.hir_id, item.span);
+ self.check_item(item.def_id, item.span);
}
fn visit_trait_item(&mut self, item: &hir::TraitItem<'_>) {
- self.check_item(item.hir_id, item.span);
+ self.check_item(item.def_id, item.span);
}
fn visit_impl_item(&mut self, item: &hir::ImplItem<'_>) {
- self.check_item(item.hir_id, item.span);
+ self.check_item(item.def_id, item.span);
}
fn visit_foreign_item(&mut self, item: &hir::ForeignItem<'_>) {
- self.check_item(item.hir_id, item.span);
+ self.check_item(item.def_id, item.span);
}
}
@@ -535,13 +535,14 @@
false
}
- fn report_unchecked_attrs(&self, checked_attrs: &FxHashSet<ast::AttrId>) {
+ fn report_unchecked_attrs(&self, mut checked_attrs: FxHashSet<ast::AttrId>) {
for attr in &self.found_attrs {
if !checked_attrs.contains(&attr.id) {
self.tcx.sess.span_err(
attr.span,
"found unchecked `#[rustc_dirty]` / `#[rustc_clean]` attribute",
);
+ checked_attrs.insert(attr.id);
}
}
}
@@ -554,7 +555,7 @@
intravisit::NestedVisitorMap::All(self.tcx.hir())
}
- fn visit_attribute(&mut self, attr: &'tcx Attribute) {
+ fn visit_attribute(&mut self, _: hir::HirId, attr: &'tcx Attribute) {
if self.is_active_attr(attr) {
self.found_attrs.push(attr);
}
diff --git a/compiler/rustc_incremental/src/persist/file_format.rs b/compiler/rustc_incremental/src/persist/file_format.rs
index 087f83c..374a9eb 100644
--- a/compiler/rustc_incremental/src/persist/file_format.rs
+++ b/compiler/rustc_incremental/src/persist/file_format.rs
@@ -109,7 +109,7 @@
debug!("read_file: {}", message);
if report_incremental_info {
- println!(
+ eprintln!(
"[incremental] ignoring cache artifact `{}`: {}",
file.file_name().unwrap().to_string_lossy(),
message
diff --git a/compiler/rustc_incremental/src/persist/fs.rs b/compiler/rustc_incremental/src/persist/fs.rs
index 7a1976b..c7a6c11 100644
--- a/compiler/rustc_incremental/src/persist/fs.rs
+++ b/compiler/rustc_incremental/src/persist/fs.rs
@@ -440,12 +440,12 @@
}
if sess.opts.debugging_opts.incremental_info {
- println!(
+ eprintln!(
"[incremental] session directory: \
{} files hard-linked",
files_linked
);
- println!(
+ eprintln!(
"[incremental] session directory: \
{} files copied",
files_copied
diff --git a/compiler/rustc_incremental/src/persist/load.rs b/compiler/rustc_incremental/src/persist/load.rs
index 0add0c5..2b5649b 100644
--- a/compiler/rustc_incremental/src/persist/load.rs
+++ b/compiler/rustc_incremental/src/persist/load.rs
@@ -170,7 +170,7 @@
if prev_commandline_args_hash != expected_hash {
if report_incremental_info {
- println!(
+ eprintln!(
"[incremental] completely ignoring cache because of \
differing commandline arguments"
);
diff --git a/compiler/rustc_index/src/lib.rs b/compiler/rustc_index/src/lib.rs
index eaef4c7..995034e 100644
--- a/compiler/rustc_index/src/lib.rs
+++ b/compiler/rustc_index/src/lib.rs
@@ -8,3 +8,7 @@
pub mod bit_set;
pub mod vec;
+
+// FIXME(#56935): Work around ICEs during cross-compilation.
+#[allow(unused)]
+extern crate rustc_macros;
diff --git a/compiler/rustc_index/src/vec.rs b/compiler/rustc_index/src/vec.rs
index 2420f82..3882818 100644
--- a/compiler/rustc_index/src/vec.rs
+++ b/compiler/rustc_index/src/vec.rs
@@ -111,6 +111,7 @@
}
impl Clone for $type {
+ #[inline]
fn clone(&self) -> Self {
*self
}
@@ -694,9 +695,7 @@
pub fn convert_index_type<Ix: Idx>(self) -> IndexVec<Ix, T> {
IndexVec { raw: self.raw, _marker: PhantomData }
}
-}
-impl<I: Idx, T: Clone> IndexVec<I, T> {
/// Grows the index vector so that it contains an entry for
/// `elem`; if that is already true, then has no
/// effect. Otherwise, inserts new values as needed by invoking
@@ -710,17 +709,19 @@
}
#[inline]
- pub fn resize(&mut self, new_len: usize, value: T) {
- self.raw.resize(new_len, value)
- }
-
- #[inline]
pub fn resize_to_elem(&mut self, elem: I, fill_value: impl FnMut() -> T) {
let min_new_len = elem.index() + 1;
self.raw.resize_with(min_new_len, fill_value);
}
}
+impl<I: Idx, T: Clone> IndexVec<I, T> {
+ #[inline]
+ pub fn resize(&mut self, new_len: usize, value: T) {
+ self.raw.resize(new_len, value)
+ }
+}
+
impl<I: Idx, T: Ord> IndexVec<I, T> {
#[inline]
pub fn binary_search(&self, value: &T) -> Result<I, I> {
diff --git a/compiler/rustc_infer/Cargo.toml b/compiler/rustc_infer/Cargo.toml
index 5dba410..a75ad7b 100644
--- a/compiler/rustc_infer/Cargo.toml
+++ b/compiler/rustc_infer/Cargo.toml
@@ -20,5 +20,5 @@
rustc_serialize = { path = "../rustc_serialize" }
rustc_span = { path = "../rustc_span" }
rustc_target = { path = "../rustc_target" }
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
rustc_ast = { path = "../rustc_ast" }
diff --git a/compiler/rustc_infer/src/infer/at.rs b/compiler/rustc_infer/src/infer/at.rs
index a7749d3..11ee8fb 100644
--- a/compiler/rustc_infer/src/infer/at.rs
+++ b/compiler/rustc_infer/src/infer/at.rs
@@ -55,6 +55,7 @@
pub trait ToTrace<'tcx>: Relate<'tcx> + Copy {
fn to_trace(
+ tcx: TyCtxt<'tcx>,
cause: &ObligationCause<'tcx>,
a_is_expected: bool,
a: Self,
@@ -178,7 +179,7 @@
where
T: ToTrace<'tcx>,
{
- let trace = ToTrace::to_trace(self.cause, a_is_expected, a, b);
+ let trace = ToTrace::to_trace(self.infcx.tcx, self.cause, a_is_expected, a, b);
Trace { at: self, trace, a_is_expected }
}
}
@@ -251,6 +252,7 @@
impl<'tcx> ToTrace<'tcx> for Ty<'tcx> {
fn to_trace(
+ _: TyCtxt<'tcx>,
cause: &ObligationCause<'tcx>,
a_is_expected: bool,
a: Self,
@@ -262,6 +264,7 @@
impl<'tcx> ToTrace<'tcx> for ty::Region<'tcx> {
fn to_trace(
+ _: TyCtxt<'tcx>,
cause: &ObligationCause<'tcx>,
a_is_expected: bool,
a: Self,
@@ -273,6 +276,7 @@
impl<'tcx> ToTrace<'tcx> for &'tcx Const<'tcx> {
fn to_trace(
+ _: TyCtxt<'tcx>,
cause: &ObligationCause<'tcx>,
a_is_expected: bool,
a: Self,
@@ -284,6 +288,7 @@
impl<'tcx> ToTrace<'tcx> for ty::TraitRef<'tcx> {
fn to_trace(
+ _: TyCtxt<'tcx>,
cause: &ObligationCause<'tcx>,
a_is_expected: bool,
a: Self,
@@ -298,6 +303,7 @@
impl<'tcx> ToTrace<'tcx> for ty::PolyTraitRef<'tcx> {
fn to_trace(
+ _: TyCtxt<'tcx>,
cause: &ObligationCause<'tcx>,
a_is_expected: bool,
a: Self,
@@ -309,3 +315,20 @@
}
}
}
+
+impl<'tcx> ToTrace<'tcx> for ty::ProjectionTy<'tcx> {
+ fn to_trace(
+ tcx: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ let a_ty = tcx.mk_projection(a.item_def_id, a.substs);
+ let b_ty = tcx.mk_projection(b.item_def_id, b.substs);
+ TypeTrace {
+ cause: cause.clone(),
+ values: Types(ExpectedFound::new(a_is_expected, a_ty, b_ty)),
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/canonical/query_response.rs b/compiler/rustc_infer/src/infer/canonical/query_response.rs
index 1546c1e..2ec9b9e 100644
--- a/compiler/rustc_infer/src/infer/canonical/query_response.rs
+++ b/compiler/rustc_infer/src/infer/canonical/query_response.rs
@@ -507,12 +507,7 @@
// Unify the original value for each variable with the value
// taken from `query_response` (after applying `result_subst`).
- Ok(self.unify_canonical_vars(
- cause,
- param_env,
- original_values,
- substituted_query_response,
- )?)
+ self.unify_canonical_vars(cause, param_env, original_values, substituted_query_response)
}
/// Converts the region constraints resulting from a query into an
@@ -639,6 +634,10 @@
}
impl<'tcx> TypeRelatingDelegate<'tcx> for QueryTypeRelatingDelegate<'_, 'tcx> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+
fn create_next_universe(&mut self) -> ty::UniverseIndex {
self.infcx.create_next_universe()
}
diff --git a/compiler/rustc_infer/src/infer/combine.rs b/compiler/rustc_infer/src/infer/combine.rs
index e034ac5..5e11932 100644
--- a/compiler/rustc_infer/src/infer/combine.rs
+++ b/compiler/rustc_infer/src/infer/combine.rs
@@ -221,6 +221,7 @@
/// As `3 + 4` contains `N` in its substs, this must not succeed.
///
/// See `src/test/ui/const-generics/occurs-check/` for more examples where this is relevant.
+ #[instrument(level = "debug", skip(self))]
fn unify_const_variable(
&self,
param_env: ty::ParamEnv<'tcx>,
diff --git a/compiler/rustc_infer/src/infer/error_reporting/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/mod.rs
index 84aa19a..eeff48a 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/mod.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/mod.rs
@@ -50,6 +50,7 @@
use super::{InferCtxt, RegionVariableOrigin, SubregionOrigin, TypeTrace, ValuePairs};
use crate::infer;
+use crate::infer::error_reporting::nice_region_error::find_anon_type::find_anon_type;
use crate::traits::error_reporting::report_object_safety_error;
use crate::traits::{
IfExpressionCause, MatchExpressionArmCause, ObligationCause, ObligationCauseCode,
@@ -179,7 +180,14 @@
}
ty::ReFree(ref fr) => match fr.bound_region {
ty::BrAnon(idx) => {
- (format!("the anonymous lifetime #{} defined on", idx + 1), tcx.hir().span(node))
+ if let Some((ty, _)) = find_anon_type(tcx, region, &fr.bound_region) {
+ ("the anonymous lifetime defined on".to_string(), ty.span)
+ } else {
+ (
+ format!("the anonymous lifetime #{} defined on", idx + 1),
+ tcx.hir().span(node),
+ )
+ }
}
_ => (
format!("the lifetime `{}` as defined on", region),
@@ -1484,13 +1492,16 @@
for (key, values) in types.iter() {
let count = values.len();
let kind = key.descr();
+ let mut returned_async_output_error = false;
for sp in values {
err.span_label(
*sp,
format!(
"{}{}{} {}{}",
- if sp.is_desugaring(DesugaringKind::Async) {
- "the `Output` of this `async fn`'s "
+ if sp.is_desugaring(DesugaringKind::Async)
+ && !returned_async_output_error
+ {
+ "checked the `Output` of this `async fn`, "
} else if count == 1 {
"the "
} else {
@@ -1502,6 +1513,12 @@
pluralize!(count),
),
);
+ if sp.is_desugaring(DesugaringKind::Async)
+ && returned_async_output_error == false
+ {
+ err.note("while checking the return type of the `async fn`");
+ returned_async_output_error = true;
+ }
}
}
}
@@ -1509,7 +1526,7 @@
impl<'tcx> ty::fold::TypeVisitor<'tcx> for OpaqueTypesVisitor<'tcx> {
fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- if let Some((kind, def_id)) = TyCategory::from_ty(t) {
+ if let Some((kind, def_id)) = TyCategory::from_ty(self.tcx, t) {
let span = self.tcx.def_span(def_id);
// Avoid cluttering the output when the "found" and error span overlap:
//
@@ -1582,11 +1599,11 @@
};
if let Some((expected, found)) = expected_found {
let expected_label = match exp_found {
- Mismatch::Variable(ef) => ef.expected.prefix_string(),
+ Mismatch::Variable(ef) => ef.expected.prefix_string(self.tcx),
Mismatch::Fixed(s) => s.into(),
};
let found_label = match exp_found {
- Mismatch::Variable(ef) => ef.found.prefix_string(),
+ Mismatch::Variable(ef) => ef.found.prefix_string(self.tcx),
Mismatch::Fixed(s) => s.into(),
};
let exp_found = match exp_found {
@@ -2248,13 +2265,18 @@
"...",
);
if let Some(infer::RelateParamBound(_, t)) = origin {
+ let return_impl_trait = self
+ .in_progress_typeck_results
+ .map(|typeck_results| typeck_results.borrow().hir_owner)
+ .and_then(|owner| self.tcx.return_type_impl_trait(owner))
+ .is_some();
let t = self.resolve_vars_if_possible(t);
match t.kind() {
// We've got:
// fn get_later<G, T>(g: G, dest: &mut T) -> impl FnOnce() + '_
// suggest:
// fn get_later<'a, G: 'a, T>(g: G, dest: &mut T) -> impl FnOnce() + '_ + 'a
- ty::Closure(_, _substs) | ty::Opaque(_, _substs) => {
+ ty::Closure(_, _substs) | ty::Opaque(_, _substs) if return_impl_trait => {
new_binding_suggestion(&mut err, type_param_span, bound_kind);
}
_ => {
@@ -2484,7 +2506,7 @@
pub enum TyCategory {
Closure,
Opaque,
- Generator,
+ Generator(hir::GeneratorKind),
Foreign,
}
@@ -2493,16 +2515,18 @@
match self {
Self::Closure => "closure",
Self::Opaque => "opaque type",
- Self::Generator => "generator",
+ Self::Generator(gk) => gk.descr(),
Self::Foreign => "foreign type",
}
}
- pub fn from_ty(ty: Ty<'_>) -> Option<(Self, DefId)> {
+ pub fn from_ty(tcx: TyCtxt<'_>, ty: Ty<'_>) -> Option<(Self, DefId)> {
match *ty.kind() {
ty::Closure(def_id, _) => Some((Self::Closure, def_id)),
ty::Opaque(def_id, _) => Some((Self::Opaque, def_id)),
- ty::Generator(def_id, ..) => Some((Self::Generator, def_id)),
+ ty::Generator(def_id, ..) => {
+ Some((Self::Generator(tcx.generator_kind(def_id).unwrap()), def_id))
+ }
ty::Foreign(def_id) => Some((Self::Foreign, def_id)),
_ => None,
}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
index bd43d3c..d533e26 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
@@ -383,7 +383,7 @@
InferenceDiagnosticsData {
name: s,
span: None,
- kind: UnderspecifiedArgKind::Type { prefix: ty.prefix_string() },
+ kind: UnderspecifiedArgKind::Type { prefix: ty.prefix_string(self.tcx) },
parent: None,
}
}
@@ -671,7 +671,7 @@
if !impl_candidates.is_empty() && e.span.contains(span) {
if let Some(expr) = exprs.first() {
if let ExprKind::Path(hir::QPath::Resolved(_, path)) = expr.kind {
- if let [path_segment] = &path.segments[..] {
+ if let [path_segment] = path.segments {
let candidate_len = impl_candidates.len();
let suggestions = impl_candidates.iter().map(|candidate| {
format!(
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs
index cdd68d8..1b35c40 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs
@@ -1,6 +1,7 @@
//! Error Reporting for Anonymous Region Lifetime Errors
//! where both the regions are anonymous.
+use crate::infer::error_reporting::nice_region_error::find_anon_type::find_anon_type;
use crate::infer::error_reporting::nice_region_error::util::AnonymousParamInfo;
use crate::infer::error_reporting::nice_region_error::NiceRegionError;
use crate::infer::lexical_region_resolve::RegionResolutionError;
@@ -66,9 +67,9 @@
let scope_def_id_sub = anon_reg_sub.def_id;
let bregion_sub = anon_reg_sub.boundregion;
- let ty_sup = self.find_anon_type(sup, &bregion_sup)?;
+ let ty_sup = find_anon_type(self.tcx(), sup, &bregion_sup)?;
- let ty_sub = self.find_anon_type(sub, &bregion_sub)?;
+ let ty_sub = find_anon_type(self.tcx(), sub, &bregion_sub)?;
debug!(
"try_report_anon_anon_conflict: found_param1={:?} sup={:?} br1={:?}",
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs
index b014b98..35b9bc9 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs
@@ -1,4 +1,3 @@
-use crate::infer::error_reporting::nice_region_error::NiceRegionError;
use rustc_hir as hir;
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc_hir::Node;
@@ -6,69 +5,66 @@
use rustc_middle::middle::resolve_lifetime as rl;
use rustc_middle::ty::{self, Region, TyCtxt};
-impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
- /// This function calls the `visit_ty` method for the parameters
- /// corresponding to the anonymous regions. The `nested_visitor.found_type`
- /// contains the anonymous type.
- ///
- /// # Arguments
- /// region - the anonymous region corresponding to the anon_anon conflict
- /// br - the bound region corresponding to the above region which is of type `BrAnon(_)`
- ///
- /// # Example
- /// ```
- /// fn foo(x: &mut Vec<&u8>, y: &u8)
- /// { x.push(y); }
- /// ```
- /// The function returns the nested type corresponding to the anonymous region
- /// for e.g., `&u8` and Vec<`&u8`.
- pub(super) fn find_anon_type(
- &self,
- region: Region<'tcx>,
- br: &ty::BoundRegionKind,
- ) -> Option<(&hir::Ty<'tcx>, &hir::FnDecl<'tcx>)> {
- if let Some(anon_reg) = self.tcx().is_suitable_region(region) {
- let hir_id = self.tcx().hir().local_def_id_to_hir_id(anon_reg.def_id);
- let fndecl = match self.tcx().hir().get(hir_id) {
- Node::Item(&hir::Item { kind: hir::ItemKind::Fn(ref m, ..), .. })
- | Node::TraitItem(&hir::TraitItem {
- kind: hir::TraitItemKind::Fn(ref m, ..),
- ..
- })
- | Node::ImplItem(&hir::ImplItem {
- kind: hir::ImplItemKind::Fn(ref m, ..), ..
- }) => &m.decl,
- _ => return None,
- };
-
- fndecl
- .inputs
- .iter()
- .find_map(|arg| self.find_component_for_bound_region(arg, br))
- .map(|ty| (ty, &**fndecl))
- } else {
- None
- }
- }
-
- // This method creates a FindNestedTypeVisitor which returns the type corresponding
- // to the anonymous region.
- fn find_component_for_bound_region(
- &self,
- arg: &'tcx hir::Ty<'tcx>,
- br: &ty::BoundRegionKind,
- ) -> Option<&'tcx hir::Ty<'tcx>> {
- let mut nested_visitor = FindNestedTypeVisitor {
- tcx: self.tcx(),
- bound_region: *br,
- found_type: None,
- current_index: ty::INNERMOST,
+/// This function calls the `visit_ty` method for the parameters
+/// corresponding to the anonymous regions. The `nested_visitor.found_type`
+/// contains the anonymous type.
+///
+/// # Arguments
+/// region - the anonymous region corresponding to the anon_anon conflict
+/// br - the bound region corresponding to the above region which is of type `BrAnon(_)`
+///
+/// # Example
+/// ```
+/// fn foo(x: &mut Vec<&u8>, y: &u8)
+/// { x.push(y); }
+/// ```
+/// The function returns the nested type corresponding to the anonymous region
+/// for e.g., `&u8` and `Vec<&u8>`.
+pub(crate) fn find_anon_type(
+ tcx: TyCtxt<'tcx>,
+ region: Region<'tcx>,
+ br: &ty::BoundRegionKind,
+) -> Option<(&'tcx hir::Ty<'tcx>, &'tcx hir::FnDecl<'tcx>)> {
+ if let Some(anon_reg) = tcx.is_suitable_region(region) {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(anon_reg.def_id);
+ let fndecl = match tcx.hir().get(hir_id) {
+ Node::Item(&hir::Item { kind: hir::ItemKind::Fn(ref m, ..), .. })
+ | Node::TraitItem(&hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(ref m, ..), ..
+ })
+ | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(ref m, ..), .. }) => {
+ &m.decl
+ }
+ _ => return None,
};
- nested_visitor.visit_ty(arg);
- nested_visitor.found_type
+
+ fndecl
+ .inputs
+ .iter()
+ .find_map(|arg| find_component_for_bound_region(tcx, arg, br))
+ .map(|ty| (ty, &**fndecl))
+ } else {
+ None
}
}
+// This method creates a FindNestedTypeVisitor which returns the type corresponding
+// to the anonymous region.
+fn find_component_for_bound_region(
+ tcx: TyCtxt<'tcx>,
+ arg: &'tcx hir::Ty<'tcx>,
+ br: &ty::BoundRegionKind,
+) -> Option<&'tcx hir::Ty<'tcx>> {
+ let mut nested_visitor = FindNestedTypeVisitor {
+ tcx,
+ bound_region: *br,
+ found_type: None,
+ current_index: ty::INNERMOST,
+ };
+ nested_visitor.visit_ty(arg);
+ nested_visitor.found_type
+}
+
// The FindNestedTypeVisitor captures the corresponding `hir::Ty` of the
// anonymous region. The example above would lead to a conflict between
// the two anonymous lifetimes for &u8 in x and y respectively. This visitor
@@ -103,7 +99,7 @@
return;
}
- hir::TyKind::TraitObject(bounds, _) => {
+ hir::TyKind::TraitObject(bounds, ..) => {
for bound in bounds {
self.current_index.shift_in(1);
self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs
index cc8f181..e204366 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs
@@ -6,7 +6,7 @@
use rustc_span::source_map::Span;
mod different_lifetimes;
-mod find_anon_type;
+pub mod find_anon_type;
mod named_anon_conflict;
mod placeholder_error;
mod static_impl_trait;
@@ -43,7 +43,7 @@
self.infcx.tcx
}
- pub fn try_report_from_nll(&self) -> Option<DiagnosticBuilder<'cx>> {
+ pub fn try_report_from_nll(&self) -> Option<DiagnosticBuilder<'tcx>> {
// Due to the improved diagnostics returned by the MIR borrow checker, only a subset of
// the nice region errors are required when running under the MIR borrow checker.
self.try_report_named_anon_conflict().or_else(|| self.try_report_placeholder_conflict())
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs
index e3c613b..2f3c0d6 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs
@@ -1,5 +1,6 @@
//! Error Reporting for Anonymous Region Lifetime Errors
//! where one region is named and the other is anonymous.
+use crate::infer::error_reporting::nice_region_error::find_anon_type::find_anon_type;
use crate::infer::error_reporting::nice_region_error::NiceRegionError;
use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder};
use rustc_hir::intravisit::Visitor;
@@ -9,7 +10,7 @@
impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
/// When given a `ConcreteFailure` for a function with parameters containing a named region and
/// an anonymous region, emit an descriptive diagnostic error.
- pub(super) fn try_report_named_anon_conflict(&self) -> Option<DiagnosticBuilder<'a>> {
+ pub(super) fn try_report_named_anon_conflict(&self) -> Option<DiagnosticBuilder<'tcx>> {
let (span, sub, sup) = self.regions()?;
debug!(
@@ -74,7 +75,7 @@
return None;
}
- if let Some((_, fndecl)) = self.find_anon_type(anon, &br) {
+ if let Some((_, fndecl)) = find_anon_type(self.tcx(), anon, &br) {
if self.is_self_anon(is_first, scope_def_id) {
return None;
}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
index e8e0326..4aecc2f 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
@@ -16,7 +16,7 @@
impl NiceRegionError<'me, 'tcx> {
/// When given a `ConcreteFailure` for a function with arguments containing a named region and
/// an anonymous region, emit a descriptive diagnostic error.
- pub(super) fn try_report_placeholder_conflict(&self) -> Option<DiagnosticBuilder<'me>> {
+ pub(super) fn try_report_placeholder_conflict(&self) -> Option<DiagnosticBuilder<'tcx>> {
match &self.error {
///////////////////////////////////////////////////////////////////////////
// NB. The ordering of cases in this match is very
@@ -30,157 +30,153 @@
Some(RegionResolutionError::SubSupConflict(
vid,
_,
- SubregionOrigin::Subtype(box TypeTrace {
- cause,
- values: ValuePairs::TraitRefs(ExpectedFound { expected, found }),
- }),
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
sub_placeholder @ ty::RePlaceholder(_),
_,
sup_placeholder @ ty::RePlaceholder(_),
- )) if expected.def_id == found.def_id => Some(self.try_report_placeholders_trait(
+ )) => self.try_report_trait_placeholder_mismatch(
Some(self.tcx().mk_region(ty::ReVar(*vid))),
cause,
Some(sub_placeholder),
Some(sup_placeholder),
- expected.def_id,
- expected.substs,
- found.substs,
- )),
+ values,
+ ),
Some(RegionResolutionError::SubSupConflict(
vid,
_,
- SubregionOrigin::Subtype(box TypeTrace {
- cause,
- values: ValuePairs::TraitRefs(ExpectedFound { expected, found }),
- }),
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
sub_placeholder @ ty::RePlaceholder(_),
_,
_,
- )) if expected.def_id == found.def_id => Some(self.try_report_placeholders_trait(
+ )) => self.try_report_trait_placeholder_mismatch(
Some(self.tcx().mk_region(ty::ReVar(*vid))),
cause,
Some(sub_placeholder),
None,
- expected.def_id,
- expected.substs,
- found.substs,
- )),
+ values,
+ ),
Some(RegionResolutionError::SubSupConflict(
vid,
_,
- SubregionOrigin::Subtype(box TypeTrace {
- cause,
- values: ValuePairs::TraitRefs(ExpectedFound { expected, found }),
- }),
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
_,
_,
sup_placeholder @ ty::RePlaceholder(_),
- )) if expected.def_id == found.def_id => Some(self.try_report_placeholders_trait(
+ )) => self.try_report_trait_placeholder_mismatch(
Some(self.tcx().mk_region(ty::ReVar(*vid))),
cause,
None,
Some(*sup_placeholder),
- expected.def_id,
- expected.substs,
- found.substs,
- )),
+ values,
+ ),
Some(RegionResolutionError::SubSupConflict(
vid,
_,
_,
_,
- SubregionOrigin::Subtype(box TypeTrace {
- cause,
- values: ValuePairs::TraitRefs(ExpectedFound { expected, found }),
- }),
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
sup_placeholder @ ty::RePlaceholder(_),
- )) if expected.def_id == found.def_id => Some(self.try_report_placeholders_trait(
+ )) => self.try_report_trait_placeholder_mismatch(
Some(self.tcx().mk_region(ty::ReVar(*vid))),
cause,
None,
Some(*sup_placeholder),
- expected.def_id,
- expected.substs,
- found.substs,
- )),
+ values,
+ ),
Some(RegionResolutionError::UpperBoundUniverseConflict(
vid,
_,
_,
- SubregionOrigin::Subtype(box TypeTrace {
- cause,
- values: ValuePairs::TraitRefs(ExpectedFound { expected, found }),
- }),
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
sup_placeholder @ ty::RePlaceholder(_),
- )) if expected.def_id == found.def_id => Some(self.try_report_placeholders_trait(
+ )) => self.try_report_trait_placeholder_mismatch(
Some(self.tcx().mk_region(ty::ReVar(*vid))),
cause,
None,
Some(*sup_placeholder),
- expected.def_id,
- expected.substs,
- found.substs,
- )),
+ values,
+ ),
Some(RegionResolutionError::ConcreteFailure(
- SubregionOrigin::Subtype(box TypeTrace {
- cause,
- values: ValuePairs::TraitRefs(ExpectedFound { expected, found }),
- }),
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
sub_region @ ty::RePlaceholder(_),
sup_region @ ty::RePlaceholder(_),
- )) if expected.def_id == found.def_id => Some(self.try_report_placeholders_trait(
+ )) => self.try_report_trait_placeholder_mismatch(
None,
cause,
Some(*sub_region),
Some(*sup_region),
- expected.def_id,
- expected.substs,
- found.substs,
- )),
+ values,
+ ),
Some(RegionResolutionError::ConcreteFailure(
- SubregionOrigin::Subtype(box TypeTrace {
- cause,
- values: ValuePairs::TraitRefs(ExpectedFound { expected, found }),
- }),
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
sub_region @ ty::RePlaceholder(_),
sup_region,
- )) if expected.def_id == found.def_id => Some(self.try_report_placeholders_trait(
- Some(sup_region),
+ )) => self.try_report_trait_placeholder_mismatch(
+ (!sup_region.has_name()).then_some(sup_region),
cause,
- Some(*sub_region),
+ Some(sub_region),
None,
- expected.def_id,
- expected.substs,
- found.substs,
- )),
+ values,
+ ),
Some(RegionResolutionError::ConcreteFailure(
- SubregionOrigin::Subtype(box TypeTrace {
- cause,
- values: ValuePairs::TraitRefs(ExpectedFound { expected, found }),
- }),
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
sub_region,
sup_region @ ty::RePlaceholder(_),
- )) if expected.def_id == found.def_id => Some(self.try_report_placeholders_trait(
- Some(sub_region),
+ )) => self.try_report_trait_placeholder_mismatch(
+ (!sub_region.has_name()).then_some(sub_region),
cause,
None,
- Some(*sup_region),
- expected.def_id,
- expected.substs,
- found.substs,
- )),
+ Some(sup_region),
+ values,
+ ),
_ => None,
}
}
+ fn try_report_trait_placeholder_mismatch(
+ &self,
+ vid: Option<ty::Region<'tcx>>,
+ cause: &ObligationCause<'tcx>,
+ sub_placeholder: Option<ty::Region<'tcx>>,
+ sup_placeholder: Option<ty::Region<'tcx>>,
+ value_pairs: &ValuePairs<'tcx>,
+ ) -> Option<DiagnosticBuilder<'tcx>> {
+ let (expected_substs, found_substs, trait_def_id) = match value_pairs {
+ ValuePairs::TraitRefs(ExpectedFound { expected, found })
+ if expected.def_id == found.def_id =>
+ {
+ (expected.substs, found.substs, expected.def_id)
+ }
+ ValuePairs::PolyTraitRefs(ExpectedFound { expected, found })
+ if expected.def_id() == found.def_id() =>
+ {
+ // It's possible that the placeholders come from a binder
+ // outside of this value pair. Use `no_bound_vars` as a
+ // simple heuristic for that.
+ (expected.no_bound_vars()?.substs, found.no_bound_vars()?.substs, expected.def_id())
+ }
+ _ => return None,
+ };
+
+ Some(self.report_trait_placeholder_mismatch(
+ vid,
+ cause,
+ sub_placeholder,
+ sup_placeholder,
+ trait_def_id,
+ expected_substs,
+ found_substs,
+ ))
+ }
+
// error[E0308]: implementation of `Foo` does not apply to enough lifetimes
// --> /home/nmatsakis/tmp/foo.rs:12:5
// |
@@ -190,7 +186,8 @@
// = note: Due to a where-clause on the function `all`,
// = note: `T` must implement `...` for any two lifetimes `'1` and `'2`.
// = note: However, the type `T` only implements `...` for some specific lifetime `'2`.
- fn try_report_placeholders_trait(
+ #[instrument(level = "debug", skip(self))]
+ fn report_trait_placeholder_mismatch(
&self,
vid: Option<ty::Region<'tcx>>,
cause: &ObligationCause<'tcx>,
@@ -199,28 +196,13 @@
trait_def_id: DefId,
expected_substs: SubstsRef<'tcx>,
actual_substs: SubstsRef<'tcx>,
- ) -> DiagnosticBuilder<'me> {
- debug!(
- "try_report_placeholders_trait(\
- vid={:?}, \
- sub_placeholder={:?}, \
- sup_placeholder={:?}, \
- trait_def_id={:?}, \
- expected_substs={:?}, \
- actual_substs={:?})",
- vid, sub_placeholder, sup_placeholder, trait_def_id, expected_substs, actual_substs
- );
-
+ ) -> DiagnosticBuilder<'tcx> {
let span = cause.span(self.tcx());
let msg = format!(
"implementation of `{}` is not general enough",
self.tcx().def_path_str(trait_def_id),
);
let mut err = self.tcx().sess.struct_span_err(span, &msg);
- err.span_label(
- self.tcx().def_span(trait_def_id),
- format!("trait `{}` defined here", self.tcx().def_path_str(trait_def_id)),
- );
let leading_ellipsis = if let ObligationCauseCode::ItemObligation(def_id) = cause.code {
err.span_label(span, "doesn't satisfy where-clause");
@@ -285,17 +267,13 @@
let any_self_ty_has_vid = actual_self_ty_has_vid || expected_self_ty_has_vid;
- debug!("try_report_placeholders_trait: actual_has_vid={:?}", actual_has_vid);
- debug!("try_report_placeholders_trait: expected_has_vid={:?}", expected_has_vid);
- debug!("try_report_placeholders_trait: has_sub={:?}", has_sub);
- debug!("try_report_placeholders_trait: has_sup={:?}", has_sup);
debug!(
- "try_report_placeholders_trait: actual_self_ty_has_vid={:?}",
- actual_self_ty_has_vid
- );
- debug!(
- "try_report_placeholders_trait: expected_self_ty_has_vid={:?}",
- expected_self_ty_has_vid
+ ?actual_has_vid,
+ ?expected_has_vid,
+ ?has_sub,
+ ?has_sup,
+ ?actual_self_ty_has_vid,
+ ?expected_self_ty_has_vid,
);
self.explain_actual_impl_that_was_found(
@@ -388,6 +366,8 @@
value: trait_ref,
};
+ let same_self_type = actual_trait_ref.self_ty() == expected_trait_ref.self_ty();
+
let mut expected_trait_ref = highlight_trait_ref(expected_trait_ref);
expected_trait_ref.highlight.maybe_highlighting_region(sub_placeholder, has_sub);
expected_trait_ref.highlight.maybe_highlighting_region(sup_placeholder, has_sup);
@@ -403,7 +383,42 @@
}
};
- let mut note = if passive_voice {
+ let mut note = if same_self_type {
+ let mut self_ty = expected_trait_ref.map(|tr| tr.self_ty());
+ self_ty.highlight.maybe_highlighting_region(vid, actual_has_vid);
+
+ if self_ty.value.is_closure()
+ && self
+ .tcx()
+ .fn_trait_kind_from_lang_item(expected_trait_ref.value.def_id)
+ .is_some()
+ {
+ let closure_sig = self_ty.map(|closure| {
+ if let ty::Closure(_, substs) = closure.kind() {
+ self.tcx().signature_unclosure(
+ substs.as_closure().sig(),
+ rustc_hir::Unsafety::Normal,
+ )
+ } else {
+ bug!("type is not longer closure");
+ }
+ });
+
+ format!(
+ "{}closure with signature `{}` must implement `{}`",
+ if leading_ellipsis { "..." } else { "" },
+ closure_sig,
+ expected_trait_ref.map(|tr| tr.print_only_trait_path()),
+ )
+ } else {
+ format!(
+ "{}`{}` must implement `{}`",
+ if leading_ellipsis { "..." } else { "" },
+ self_ty,
+ expected_trait_ref.map(|tr| tr.print_only_trait_path()),
+ )
+ }
+ } else if passive_voice {
format!(
"{}`{}` would have to be implemented for the type `{}`",
if leading_ellipsis { "..." } else { "" },
@@ -449,7 +464,12 @@
None => true,
};
- let mut note = if passive_voice {
+ let mut note = if same_self_type {
+ format!(
+ "...but it actually implements `{}`",
+ actual_trait_ref.map(|tr| tr.print_only_trait_path()),
+ )
+ } else if passive_voice {
format!(
"...but `{}` is actually implemented for the type `{}`",
actual_trait_ref.map(|tr| tr.print_only_trait_path()),
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
index c6ae71b..1e92698 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
@@ -7,10 +7,7 @@
use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorReported};
use rustc_hir::def_id::DefId;
use rustc_hir::intravisit::{walk_ty, ErasedMap, NestedVisitorMap, Visitor};
-use rustc_hir::{
- self as hir, GenericBound, ImplItem, Item, ItemKind, Lifetime, LifetimeName, Node, TraitItem,
- TyKind,
-};
+use rustc_hir::{self as hir, GenericBound, Item, ItemKind, Lifetime, LifetimeName, Node, TyKind};
use rustc_middle::ty::{self, AssocItemContainer, RegionKind, Ty, TypeFoldable, TypeVisitor};
use rustc_span::symbol::Ident;
use rustc_span::{MultiSpan, Span};
@@ -234,7 +231,7 @@
}
match fn_return.kind {
TyKind::OpaqueDef(item_id, _) => {
- let item = tcx.hir().item(item_id.id);
+ let item = tcx.hir().item(item_id);
let opaque = if let ItemKind::OpaqueTy(opaque) = &item.kind {
opaque
} else {
@@ -295,7 +292,7 @@
);
}
}
- TyKind::TraitObject(_, lt) => match lt.name {
+ TyKind::TraitObject(_, lt, _) => match lt.name {
LifetimeName::ImplicitObjectLifetimeDefault => {
err.span_suggestion_verbose(
fn_return.span.shrink_to_hi(),
@@ -343,17 +340,17 @@
) -> Option<(Ident, &'tcx hir::Ty<'tcx>)> {
let tcx = self.tcx();
match tcx.hir().get_if_local(def_id) {
- Some(Node::ImplItem(ImplItem { ident, hir_id, .. })) => {
- match tcx.hir().find(tcx.hir().get_parent_item(*hir_id)) {
+ Some(Node::ImplItem(impl_item)) => {
+ match tcx.hir().find(tcx.hir().get_parent_item(impl_item.hir_id())) {
Some(Node::Item(Item {
kind: ItemKind::Impl(hir::Impl { self_ty, .. }),
..
- })) => Some((*ident, self_ty)),
+ })) => Some((impl_item.ident, self_ty)),
_ => None,
}
}
- Some(Node::TraitItem(TraitItem { ident, hir_id, .. })) => {
- let parent_id = tcx.hir().get_parent_item(*hir_id);
+ Some(Node::TraitItem(trait_item)) => {
+ let parent_id = tcx.hir().get_parent_item(trait_item.hir_id());
match tcx.hir().find(parent_id) {
Some(Node::Item(Item { kind: ItemKind::Trait(..), .. })) => {
// The method being called is defined in the `trait`, but the `'static`
@@ -364,8 +361,7 @@
.hir()
.trait_impls(trait_did)
.iter()
- .filter_map(|impl_node| {
- let impl_did = tcx.hir().local_def_id(*impl_node);
+ .filter_map(|&impl_did| {
match tcx.hir().get_if_local(impl_did.to_def_id()) {
Some(Node::Item(Item {
kind: ItemKind::Impl(hir::Impl { self_ty, .. }),
@@ -389,7 +385,7 @@
})
.next()
{
- Some(self_ty) => Some((*ident, self_ty)),
+ Some(self_ty) => Some((trait_item.ident, self_ty)),
_ => None,
}
}
@@ -502,6 +498,7 @@
if let TyKind::TraitObject(
poly_trait_refs,
Lifetime { name: LifetimeName::ImplicitObjectLifetimeDefault, .. },
+ _,
) = t.kind
{
for ptr in poly_trait_refs {
diff --git a/compiler/rustc_infer/src/infer/lexical_region_resolve/README.md b/compiler/rustc_infer/src/infer/lexical_region_resolve/README.md
index e0b2c0b..0a7da8c 100644
--- a/compiler/rustc_infer/src/infer/lexical_region_resolve/README.md
+++ b/compiler/rustc_infer/src/infer/lexical_region_resolve/README.md
@@ -1,4 +1,3 @@
-
Lexical Region Resolution was removed in https://github.com/rust-lang/rust/pull/64790.
Rust now uses Non-lexical lifetimes. For more info, please see the [borrowck
diff --git a/compiler/rustc_infer/src/infer/mod.rs b/compiler/rustc_infer/src/infer/mod.rs
index 09eecd7..3df58cb 100644
--- a/compiler/rustc_infer/src/infer/mod.rs
+++ b/compiler/rustc_infer/src/infer/mod.rs
@@ -408,7 +408,7 @@
}
// `SubregionOrigin` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(SubregionOrigin<'_>, 32);
/// Times when we replace late-bound regions with variables:
diff --git a/compiler/rustc_infer/src/infer/nll_relate/mod.rs b/compiler/rustc_infer/src/infer/nll_relate/mod.rs
index 97ef685..e5eb771 100644
--- a/compiler/rustc_infer/src/infer/nll_relate/mod.rs
+++ b/compiler/rustc_infer/src/infer/nll_relate/mod.rs
@@ -44,7 +44,7 @@
{
infcx: &'me InferCtxt<'me, 'tcx>,
- /// Callback to use when we deduce an outlives relationship
+ /// Callback to use when we deduce an outlives relationship.
delegate: D,
/// How are we relating `a` and `b`?
@@ -72,6 +72,8 @@
}
pub trait TypeRelatingDelegate<'tcx> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx>;
+
/// Push a constraint `sup: sub` -- this constraint must be
/// satisfied for the two types to be related. `sub` and `sup` may
/// be regions from the type or new variables created through the
@@ -473,9 +475,8 @@
self.infcx.tcx
}
- // FIXME(oli-obk): not sure how to get the correct ParamEnv
fn param_env(&self) -> ty::ParamEnv<'tcx> {
- ty::ParamEnv::empty()
+ self.delegate.param_env()
}
fn tag(&self) -> &'static str {
@@ -767,7 +768,7 @@
}
}
-/// The "type generalize" is used when handling inference variables.
+/// The "type generalizer" is used when handling inference variables.
///
/// The basic strategy for handling a constraint like `?A <: B` is to
/// apply a "generalization strategy" to the type `B` -- this replaces
@@ -819,9 +820,8 @@
self.infcx.tcx
}
- // FIXME(oli-obk): not sure how to get the correct ParamEnv
fn param_env(&self) -> ty::ParamEnv<'tcx> {
- ty::ParamEnv::empty()
+ self.delegate.param_env()
}
fn tag(&self) -> &'static str {
diff --git a/compiler/rustc_infer/src/infer/undo_log.rs b/compiler/rustc_infer/src/infer/undo_log.rs
index 2cfd6bb..4be0e79 100644
--- a/compiler/rustc_infer/src/infer/undo_log.rs
+++ b/compiler/rustc_infer/src/infer/undo_log.rs
@@ -15,7 +15,7 @@
_marker: PhantomData<&'tcx ()>,
}
-/// Records the 'undo' data fora single operation that affects some form of inference variable.
+/// Records the "undo" data for a single operation that affects some form of inference variable.
pub(crate) enum UndoLog<'tcx> {
TypeVariables(type_variable::UndoLog<'tcx>),
ConstUnificationTable(sv::UndoLog<ut::Delegate<ty::ConstVid<'tcx>>>),
diff --git a/compiler/rustc_infer/src/lib.rs b/compiler/rustc_infer/src/lib.rs
index 3690a88..f9170ef5 100644
--- a/compiler/rustc_infer/src/lib.rs
+++ b/compiler/rustc_infer/src/lib.rs
@@ -27,7 +27,7 @@
#[macro_use]
extern crate rustc_macros;
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
#[macro_use]
extern crate rustc_data_structures;
#[macro_use]
diff --git a/compiler/rustc_infer/src/traits/error_reporting/mod.rs b/compiler/rustc_infer/src/traits/error_reporting/mod.rs
index 835f75e..ad15af9 100644
--- a/compiler/rustc_infer/src/traits/error_reporting/mod.rs
+++ b/compiler/rustc_infer/src/traits/error_reporting/mod.rs
@@ -104,7 +104,7 @@
<https://doc.rust-lang.org/reference/items/traits.html#object-safety>",
);
- if tcx.sess.trait_methods_not_found.borrow().contains(&span) {
+ if tcx.sess.trait_methods_not_found.borrow().iter().any(|full_span| full_span.contains(span)) {
// Avoid emitting error caused by non-existing method (#58734)
err.cancel();
}
diff --git a/compiler/rustc_infer/src/traits/mod.rs b/compiler/rustc_infer/src/traits/mod.rs
index aaf5e95..0882d68 100644
--- a/compiler/rustc_infer/src/traits/mod.rs
+++ b/compiler/rustc_infer/src/traits/mod.rs
@@ -56,7 +56,7 @@
pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>;
// `PredicateObligation` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(PredicateObligation<'_>, 32);
pub type PredicateObligations<'tcx> = Vec<PredicateObligation<'tcx>>;
diff --git a/compiler/rustc_infer/src/traits/util.rs b/compiler/rustc_infer/src/traits/util.rs
index 13cf1e10..87684c2 100644
--- a/compiler/rustc_infer/src/traits/util.rs
+++ b/compiler/rustc_infer/src/traits/util.rs
@@ -1,9 +1,10 @@
use smallvec::smallvec;
use crate::traits::{Obligation, ObligationCause, PredicateObligation};
-use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
use rustc_middle::ty::outlives::Component;
use rustc_middle::ty::{self, ToPredicate, TyCtxt, WithConstness};
+use rustc_span::symbol::Ident;
pub fn anonymize_predicate<'tcx>(
tcx: TyCtxt<'tcx>,
@@ -282,6 +283,44 @@
elaborate_trait_refs(tcx, bounds).filter_to_traits()
}
+/// A specialized variant of `elaborate_trait_refs` that only elaborates trait references that may
+/// define the given associated type `assoc_name`. It uses the
+/// `super_predicates_that_define_assoc_type` query to avoid enumerating super-predicates that
+/// aren't related to `assoc_item`. This is used when resolving types like `Self::Item` or
+/// `T::Item` and helps to avoid cycle errors (see e.g. #35237).
+pub fn transitive_bounds_that_define_assoc_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ bounds: impl Iterator<Item = ty::PolyTraitRef<'tcx>>,
+ assoc_name: Ident,
+) -> impl Iterator<Item = ty::PolyTraitRef<'tcx>> {
+ let mut stack: Vec<_> = bounds.collect();
+ let mut visited = FxIndexSet::default();
+
+ std::iter::from_fn(move || {
+ while let Some(trait_ref) = stack.pop() {
+ let anon_trait_ref = tcx.anonymize_late_bound_regions(trait_ref);
+ if visited.insert(anon_trait_ref) {
+ let super_predicates = tcx.super_predicates_that_define_assoc_type((
+ trait_ref.def_id(),
+ Some(assoc_name),
+ ));
+ for (super_predicate, _) in super_predicates.predicates {
+ let bound_predicate = super_predicate.kind();
+ let subst_predicate = super_predicate
+ .subst_supertrait(tcx, &bound_predicate.rebind(trait_ref.skip_binder()));
+ if let Some(binder) = subst_predicate.to_opt_poly_trait_ref() {
+ stack.push(binder.value);
+ }
+ }
+
+ return Some(trait_ref);
+ }
+ }
+
+ return None;
+ })
+}
+
///////////////////////////////////////////////////////////////////////////
// Other
///////////////////////////////////////////////////////////////////////////
diff --git a/compiler/rustc_interface/Cargo.toml b/compiler/rustc_interface/Cargo.toml
index 2481a27..3bfe8da 100644
--- a/compiler/rustc_interface/Cargo.toml
+++ b/compiler/rustc_interface/Cargo.toml
@@ -10,8 +10,9 @@
[dependencies]
libc = "0.2"
tracing = "0.1"
-rayon = { version = "0.3.0", package = "rustc-rayon" }
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+rustc-rayon-core = "0.3.1"
+rayon = { version = "0.3.1", package = "rustc-rayon" }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
rustc_ast = { path = "../rustc_ast" }
rustc_attr = { path = "../rustc_attr" }
rustc_builtin_macros = { path = "../rustc_builtin_macros" }
@@ -30,6 +31,7 @@
rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
rustc_codegen_llvm = { path = "../rustc_codegen_llvm", optional = true }
rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
rustc_metadata = { path = "../rustc_metadata" }
rustc_mir = { path = "../rustc_mir" }
rustc_mir_build = { path = "../rustc_mir_build" }
@@ -39,6 +41,7 @@
rustc_errors = { path = "../rustc_errors" }
rustc_plugin_impl = { path = "../rustc_plugin_impl" }
rustc_privacy = { path = "../rustc_privacy" }
+rustc_query_impl = { path = "../rustc_query_impl" }
rustc_resolve = { path = "../rustc_resolve" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
rustc_ty_utils = { path = "../rustc_ty_utils" }
diff --git a/compiler/rustc_interface/src/interface.rs b/compiler/rustc_interface/src/interface.rs
index 28eb1fe..a1090ee 100644
--- a/compiler/rustc_interface/src/interface.rs
+++ b/compiler/rustc_interface/src/interface.rs
@@ -8,7 +8,7 @@
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::OnDrop;
use rustc_errors::registry::Registry;
-use rustc_errors::ErrorReported;
+use rustc_errors::{ErrorReported, Handler};
use rustc_lint::LintStore;
use rustc_middle::ty;
use rustc_parse::new_parser_from_source_str;
@@ -142,6 +142,9 @@
pub lint_caps: FxHashMap<lint::LintId, lint::Level>,
+ /// This is a callback from the driver that is called when [`ParseSess`] is created.
+ pub parse_sess_created: Option<Box<dyn FnOnce(&mut ParseSess) + Send>>,
+
/// This is a callback from the driver that is called when we're registering lints;
/// it is called during plugin registration when we have the LintStore in a non-shared state.
///
@@ -166,7 +169,7 @@
pub fn create_compiler_and_run<R>(config: Config, f: impl FnOnce(&Compiler) -> R) -> R {
let registry = &config.registry;
- let (sess, codegen_backend) = util::create_session(
+ let (mut sess, codegen_backend) = util::create_session(
config.opts,
config.crate_cfg,
config.diagnostic_output,
@@ -177,6 +180,14 @@
registry.clone(),
);
+ if let Some(parse_sess_created) = config.parse_sess_created {
+ parse_sess_created(
+ &mut Lrc::get_mut(&mut sess)
+ .expect("create_session() should never share the returned session")
+ .parse_sess,
+ );
+ }
+
let compiler = Compiler {
sess,
codegen_backend,
@@ -213,3 +224,24 @@
|| create_compiler_and_run(config, f),
)
}
+
+pub fn try_print_query_stack(handler: &Handler, num_frames: Option<usize>) {
+ eprintln!("query stack during panic:");
+
+ // Be careful relying on global state here: this code is called from
+ // a panic hook, which means that the global `Handler` may be in a weird
+ // state if it was responsible for triggering the panic.
+ let i = ty::tls::with_context_opt(|icx| {
+ if let Some(icx) = icx {
+ icx.tcx.queries.try_print_query_stack(icx.tcx, icx.query, handler, num_frames)
+ } else {
+ 0
+ }
+ });
+
+ if num_frames == None || num_frames >= Some(i) {
+ eprintln!("end of query stack");
+ } else {
+ eprintln!("we're just showing a limited slice of the query stack");
+ }
+}
diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs
index 56aa393..94be7a0 100644
--- a/compiler/rustc_interface/src/passes.rs
+++ b/compiler/rustc_interface/src/passes.rs
@@ -15,6 +15,7 @@
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc_hir::definitions::Definitions;
use rustc_hir::Crate;
+use rustc_index::vec::IndexVec;
use rustc_lint::LintStore;
use rustc_middle::arena::Arena;
use rustc_middle::dep_graph::DepGraph;
@@ -27,6 +28,7 @@
use rustc_parse::{parse_crate_from_file, parse_crate_from_source_str};
use rustc_passes::{self, hir_stats, layout_test};
use rustc_plugin_impl as plugin;
+use rustc_query_impl::Queries as TcxQueries;
use rustc_resolve::{Resolver, ResolverArenas};
use rustc_session::config::{CrateType, Input, OutputFilenames, OutputType, PpMode, PpSourceMode};
use rustc_session::lint;
@@ -64,8 +66,8 @@
}
if sess.opts.debugging_opts.input_stats {
- println!("Lines of code: {}", sess.source_map().count_lines());
- println!("Pre-expansion node count: {}", count_nodes(&krate));
+ eprintln!("Lines of code: {}", sess.source_map().count_lines());
+ eprintln!("Pre-expansion node count: {}", count_nodes(&krate));
}
if let Some(ref s) = sess.opts.debugging_opts.show_span {
@@ -300,8 +302,10 @@
..rustc_expand::expand::ExpansionConfig::default(crate_name.to_string())
};
- let extern_mod_loaded = |k: &ast::Crate, ident: Ident| {
- pre_expansion_lint(sess, lint_store, k, &*ident.name.as_str())
+ let extern_mod_loaded = |ident: Ident, attrs, items, span| {
+ let krate = ast::Crate { attrs, items, span, proc_macros: vec![] };
+ pre_expansion_lint(sess, lint_store, &krate, &ident.name.as_str());
+ (krate.attrs, krate.items)
};
let mut ecx = ExtCtxt::new(&sess, cfg, &mut resolver, Some(&extern_mod_loaded));
@@ -348,7 +352,7 @@
rustc_builtin_macros::test_harness::inject(&sess, &mut resolver, &mut krate)
});
- if let Some(PpMode::PpmSource(PpSourceMode::PpmEveryBodyLoops)) = sess.opts.pretty {
+ if let Some(PpMode::Source(PpSourceMode::EveryBodyLoops)) = sess.opts.pretty {
tracing::debug!("replacing bodies with loop {{}}");
util::ReplaceBodyWithLoop::new(&mut resolver).visit_crate(&mut krate);
}
@@ -394,7 +398,7 @@
// Done with macro expansion!
if sess.opts.debugging_opts.input_stats {
- println!("Post-expansion node count: {}", count_nodes(&krate));
+ eprintln!("Post-expansion node count: {}", count_nodes(&krate));
}
if sess.opts.debugging_opts.hir_stats {
@@ -738,20 +742,18 @@
extern_providers
});
-pub struct QueryContext<'tcx>(&'tcx GlobalCtxt<'tcx>);
+pub struct QueryContext<'tcx> {
+ gcx: &'tcx GlobalCtxt<'tcx>,
+}
impl<'tcx> QueryContext<'tcx> {
pub fn enter<F, R>(&mut self, f: F) -> R
where
F: FnOnce(TyCtxt<'tcx>) -> R,
{
- let icx = ty::tls::ImplicitCtxt::new(self.0);
+ let icx = ty::tls::ImplicitCtxt::new(self.gcx);
ty::tls::enter_context(&icx, |_| f(icx.tcx))
}
-
- pub fn print_stats(&mut self) {
- self.enter(ty::query::print_stats)
- }
}
pub fn create_global_ctxt<'tcx>(
@@ -762,6 +764,7 @@
mut resolver_outputs: ResolverOutputs,
outputs: OutputFilenames,
crate_name: &str,
+ queries: &'tcx OnceCell<TcxQueries<'tcx>>,
global_ctxt: &'tcx OnceCell<GlobalCtxt<'tcx>>,
arena: &'tcx WorkerLocal<Arena<'tcx>>,
) -> QueryContext<'tcx> {
@@ -785,26 +788,33 @@
callback(sess, &mut local_providers, &mut extern_providers);
}
+ let queries = {
+ let crates = resolver_outputs.cstore.crates_untracked();
+ let max_cnum = crates.iter().map(|c| c.as_usize()).max().unwrap_or(0);
+ let mut providers = IndexVec::from_elem_n(extern_providers, max_cnum + 1);
+ providers[LOCAL_CRATE] = local_providers;
+ queries.get_or_init(|| TcxQueries::new(providers, extern_providers))
+ };
+
let gcx = sess.time("setup_global_ctxt", || {
global_ctxt.get_or_init(|| {
TyCtxt::create_global_ctxt(
sess,
lint_store,
- local_providers,
- extern_providers,
arena,
resolver_outputs,
krate,
defs,
dep_graph,
query_result_on_disk_cache,
+ queries.as_dyn(),
&crate_name,
&outputs,
)
})
});
- QueryContext(gcx)
+ QueryContext { gcx }
}
/// Runs the resolution, type-checking, region checking and other
@@ -831,12 +841,11 @@
},
{
par_iter(&tcx.hir().krate().modules).for_each(|(&module, _)| {
- let local_def_id = tcx.hir().local_def_id(module);
- tcx.ensure().check_mod_loops(local_def_id);
- tcx.ensure().check_mod_attrs(local_def_id);
- tcx.ensure().check_mod_naked_functions(local_def_id);
- tcx.ensure().check_mod_unstable_api_usage(local_def_id);
- tcx.ensure().check_mod_const_bodies(local_def_id);
+ tcx.ensure().check_mod_loops(module);
+ tcx.ensure().check_mod_attrs(module);
+ tcx.ensure().check_mod_naked_functions(module);
+ tcx.ensure().check_mod_unstable_api_usage(module);
+ tcx.ensure().check_mod_const_bodies(module);
});
}
);
@@ -861,10 +870,8 @@
// "not all control paths return a value" is reported here.
//
// maybe move the check to a MIR pass?
- let local_def_id = tcx.hir().local_def_id(module);
-
- tcx.ensure().check_mod_liveness(local_def_id);
- tcx.ensure().check_mod_intrinsics(local_def_id);
+ tcx.ensure().check_mod_liveness(module);
+ tcx.ensure().check_mod_intrinsics(module);
});
});
}
@@ -926,7 +933,7 @@
{
sess.time("privacy_checking_modules", || {
par_iter(&tcx.hir().krate().modules).for_each(|(&module, _)| {
- tcx.ensure().check_mod_privacy(tcx.hir().local_def_id(module));
+ tcx.ensure().check_mod_privacy(module);
});
});
}
@@ -983,7 +990,7 @@
.unwrap_or_else(|err| tcx.sess.fatal(&format!("couldn't create a temp dir: {}", err)));
let metadata_tmpdir = MaybeTempDir::new(metadata_tmpdir, tcx.sess.opts.cg.save_temps);
let metadata_filename = emit_metadata(tcx.sess, &metadata, &metadata_tmpdir);
- if let Err(e) = fs::rename(&metadata_filename, &out_filename) {
+ if let Err(e) = util::non_durable_rename(&metadata_filename, &out_filename) {
tcx.sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e));
}
if tcx.sess.opts.json_artifact_notifications {
diff --git a/compiler/rustc_interface/src/proc_macro_decls.rs b/compiler/rustc_interface/src/proc_macro_decls.rs
index de08a4c..4637055 100644
--- a/compiler/rustc_interface/src/proc_macro_decls.rs
+++ b/compiler/rustc_interface/src/proc_macro_decls.rs
@@ -25,8 +25,9 @@
impl<'v> ItemLikeVisitor<'v> for Finder<'_> {
fn visit_item(&mut self, item: &hir::Item<'_>) {
- if self.tcx.sess.contains_name(&item.attrs, sym::rustc_proc_macro_decls) {
- self.decls = Some(item.hir_id);
+ let attrs = self.tcx.hir().attrs(item.hir_id());
+ if self.tcx.sess.contains_name(attrs, sym::rustc_proc_macro_decls) {
+ self.decls = Some(item.hir_id());
}
}
diff --git a/compiler/rustc_interface/src/queries.rs b/compiler/rustc_interface/src/queries.rs
index ac6b6d0..9c38d2b 100644
--- a/compiler/rustc_interface/src/queries.rs
+++ b/compiler/rustc_interface/src/queries.rs
@@ -14,6 +14,7 @@
use rustc_middle::arena::Arena;
use rustc_middle::dep_graph::DepGraph;
use rustc_middle::ty::{GlobalCtxt, ResolverOutputs, TyCtxt};
+use rustc_query_impl::Queries as TcxQueries;
use rustc_serialize::json;
use rustc_session::config::{self, OutputFilenames, OutputType};
use rustc_session::{output::find_crate_name, Session};
@@ -71,6 +72,7 @@
pub struct Queries<'tcx> {
compiler: &'tcx Compiler,
gcx: OnceCell<GlobalCtxt<'tcx>>,
+ queries: OnceCell<TcxQueries<'tcx>>,
arena: WorkerLocal<Arena<'tcx>>,
hir_arena: WorkerLocal<rustc_ast_lowering::Arena<'tcx>>,
@@ -92,6 +94,7 @@
Queries {
compiler,
gcx: OnceCell::new(),
+ queries: OnceCell::new(),
arena: WorkerLocal::new(|_| Arena::default()),
hir_arena: WorkerLocal::new(|_| rustc_ast_lowering::Arena::default()),
dep_graph_future: Default::default(),
@@ -265,6 +268,7 @@
resolver_outputs.steal(),
outputs,
&crate_name,
+ &self.queries,
&self.gcx,
&self.arena,
))
@@ -425,11 +429,11 @@
{
let _prof_timer =
queries.session().prof.generic_activity("self_profile_alloc_query_strings");
- gcx.enter(|tcx| tcx.alloc_self_profile_query_strings());
+ gcx.enter(rustc_query_impl::alloc_self_profile_query_strings);
}
if self.session().opts.debugging_opts.query_stats {
- gcx.print_stats();
+ gcx.enter(rustc_query_impl::print_stats);
}
}
diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs
index f9c3406..93ba2e6 100644
--- a/compiler/rustc_interface/src/tests.rs
+++ b/compiler/rustc_interface/src/tests.rs
@@ -20,6 +20,7 @@
use rustc_target::spec::{RelocModel, RelroLevel, SplitDebuginfo, TlsModel};
use std::collections::{BTreeMap, BTreeSet};
use std::iter::FromIterator;
+use std::num::NonZeroUsize;
use std::path::{Path, PathBuf};
type CfgSpecs = FxHashSet<(String, Option<String>)>;
@@ -556,15 +557,15 @@
tracked!(function_sections, Some(false));
tracked!(human_readable_cgu_names, true);
tracked!(inline_in_all_cgus, Some(true));
- tracked!(inline_mir_threshold, 123);
- tracked!(inline_mir_hint_threshold, 123);
- tracked!(insert_sideeffect, true);
+ tracked!(inline_mir, Some(true));
+ tracked!(inline_mir_threshold, Some(123));
+ tracked!(inline_mir_hint_threshold, Some(123));
tracked!(instrument_coverage, true);
tracked!(instrument_mcount, true);
tracked!(link_only, true);
tracked!(merge_functions, Some(MergeFunctions::Disabled));
tracked!(mir_emit_retag, true);
- tracked!(mir_opt_level, 3);
+ tracked!(mir_opt_level, Some(4));
tracked!(mutable_noalias, true);
tracked!(new_llvm_pass_manager, true);
tracked!(no_codegen, true);
@@ -595,7 +596,7 @@
tracked!(tune_cpu, Some(String::from("abc")));
tracked!(tls_model, Some(TlsModel::GeneralDynamic));
tracked!(trap_unreachable, Some(false));
- tracked!(treat_err_as_bug, Some(1));
+ tracked!(treat_err_as_bug, NonZeroUsize::new(1));
tracked!(unleash_the_miri_inside_of_you, true);
tracked!(use_ctors_section, Some(true));
tracked!(verify_llvm_ir, true);
diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs
index b7dc539..341cfa4 100644
--- a/compiler/rustc_interface/src/util.rs
+++ b/compiler/rustc_interface/src/util.rs
@@ -10,6 +10,8 @@
use rustc_data_structures::sync::Lrc;
use rustc_errors::registry::Registry;
use rustc_metadata::dynamic_lib::DynamicLibrary;
+#[cfg(parallel_compiler)]
+use rustc_middle::ty::tls;
use rustc_resolve::{self, Resolver};
use rustc_session as session;
use rustc_session::config::{self, CrateType};
@@ -29,11 +31,12 @@
use std::lazy::SyncOnceCell;
use std::mem;
use std::ops::DerefMut;
+#[cfg(not(parallel_compiler))]
+use std::panic;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, Once};
-#[cfg(not(parallel_compiler))]
-use std::{panic, thread};
+use std::thread;
use tracing::info;
/// Adds `target_feature = "..."` cfgs for a variety of platform
@@ -156,6 +159,28 @@
scoped_thread(cfg, main_handler)
}
+/// Creates a new thread and forwards information in thread locals to it.
+/// The new thread runs the deadlock handler.
+/// Must only be called when a deadlock is about to happen.
+#[cfg(parallel_compiler)]
+unsafe fn handle_deadlock() {
+ let registry = rustc_rayon_core::Registry::current();
+
+ let context = tls::get_tlv();
+ assert!(context != 0);
+ rustc_data_structures::sync::assert_sync::<tls::ImplicitCtxt<'_, '_>>();
+ let icx: &tls::ImplicitCtxt<'_, '_> = &*(context as *const tls::ImplicitCtxt<'_, '_>);
+
+ let session_globals = rustc_span::SESSION_GLOBALS.with(|sg| sg as *const _);
+ let session_globals = &*session_globals;
+ thread::spawn(move || {
+ tls::enter_context(icx, |_| {
+ rustc_span::SESSION_GLOBALS
+ .set(session_globals, || tls::with(|tcx| tcx.queries.deadlock(tcx, ®istry)))
+ });
+ });
+}
+
#[cfg(parallel_compiler)]
pub fn setup_callbacks_and_run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
edition: Edition,
@@ -163,7 +188,6 @@
stderr: &Option<Arc<Mutex<Vec<u8>>>>,
f: F,
) -> R {
- use rustc_middle::ty;
crate::callbacks::setup_callbacks();
let mut config = rayon::ThreadPoolBuilder::new()
@@ -171,7 +195,7 @@
.acquire_thread_handler(jobserver::acquire_thread)
.release_thread_handler(jobserver::release_thread)
.num_threads(threads)
- .deadlock_handler(|| unsafe { ty::query::handle_deadlock() });
+ .deadlock_handler(|| unsafe { handle_deadlock() });
if let Some(size) = get_stack_size() {
config = config.stack_size(size);
@@ -670,16 +694,42 @@
}
}
-// Note: Also used by librustdoc, see PR #43348. Consider moving this struct elsewhere.
-//
-// FIXME: Currently the `everybody_loops` transformation is not applied to:
-// * `const fn`, due to issue #43636 that `loop` is not supported for const evaluation. We are
-// waiting for miri to fix that.
-// * `impl Trait`, due to issue #43869 that functions returning impl Trait cannot be diverging.
-// Solving this may require `!` to implement every trait, which relies on the an even more
-// ambitious form of the closed RFC #1637. See also [#34511].
-//
-// [#34511]: https://github.com/rust-lang/rust/issues/34511#issuecomment-322340401
+#[cfg(not(target_os = "linux"))]
+pub fn non_durable_rename(src: &Path, dst: &Path) -> std::io::Result<()> {
+ std::fs::rename(src, dst)
+}
+
+/// This function attempts to bypass the auto_da_alloc heuristic implemented by some filesystems
+/// such as btrfs and ext4. When renaming over a file that already exists then they will "helpfully"
+/// write back the source file before committing the rename in case a developer forgot some of
+/// the fsyncs in the open/write/fsync(file)/rename/fsync(dir) dance for atomic file updates.
+///
+/// To avoid triggering this heuristic we delete the destination first, if it exists.
+/// The cost of an extra syscall is much lower than getting descheduled for the sync IO.
+#[cfg(target_os = "linux")]
+pub fn non_durable_rename(src: &Path, dst: &Path) -> std::io::Result<()> {
+ let _ = std::fs::remove_file(dst);
+ std::fs::rename(src, dst)
+}
+
+/// Replaces function bodies with `loop {}` (an infinite loop). This gets rid of
+/// all semantic errors in the body while still satisfying the return type,
+/// except in certain cases, see below for more.
+///
+/// This pass is known as `everybody_loops`. Very punny.
+///
+/// As of March 2021, `everybody_loops` is only used for the
+/// `-Z unpretty=everybody_loops` debugging option.
+///
+/// FIXME: Currently the `everybody_loops` transformation is not applied to:
+/// * `const fn`; support could be added, but hasn't. Originally `const fn`
+/// was skipped due to issue #43636 that `loop` was not supported for
+/// const evaluation.
+/// * `impl Trait`, due to issue #43869 that functions returning impl Trait cannot be diverging.
+/// Solving this may require `!` to implement every trait, which relies on the an even more
+/// ambitious form of the closed RFC #1637. See also [#34511].
+///
+/// [#34511]: https://github.com/rust-lang/rust/issues/34511#issuecomment-322340401
pub struct ReplaceBodyWithLoop<'a, 'b> {
within_static_or_const: bool,
nested_blocks: Option<Vec<ast::Block>>,
diff --git a/compiler/rustc_lint/Cargo.toml b/compiler/rustc_lint/Cargo.toml
index c56eb09..90badd3 100644
--- a/compiler/rustc_lint/Cargo.toml
+++ b/compiler/rustc_lint/Cargo.toml
@@ -19,5 +19,6 @@
rustc_feature = { path = "../rustc_feature" }
rustc_index = { path = "../rustc_index" }
rustc_session = { path = "../rustc_session" }
+rustc_serialize = { path = "../rustc_serialize" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
rustc_parse_format = { path = "../rustc_parse_format" }
diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs
index 2cedef6..1a8bbb6 100644
--- a/compiler/rustc_lint/src/builtin.rs
+++ b/compiler/rustc_lint/src/builtin.rs
@@ -24,7 +24,7 @@
types::{transparent_newtype_field, CItemKind},
EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext,
};
-use rustc_ast::attr::{self, HasAttrs};
+use rustc_ast::attr;
use rustc_ast::tokenstream::{TokenStream, TokenTree};
use rustc_ast::visit::{FnCtxt, FnKind};
use rustc_ast::{self as ast, *};
@@ -36,9 +36,9 @@
use rustc_feature::{GateIssue, Stability};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
-use rustc_hir::def_id::DefId;
+use rustc_hir::def_id::{DefId, LocalDefId, LocalDefIdSet};
use rustc_hir::{ForeignItemKind, GenericParamKind, PatKind};
-use rustc_hir::{HirId, HirIdSet, Node};
+use rustc_hir::{HirId, Node};
use rustc_index::vec::Idx;
use rustc_middle::lint::LintDiagnosticBuilder;
use rustc_middle::ty::print::with_no_trimmed_paths;
@@ -173,8 +173,7 @@
| hir::ItemKind::Enum(..)
| hir::ItemKind::Struct(..)
| hir::ItemKind::Union(..) => {
- let def_id = cx.tcx.hir().local_def_id(it.hir_id);
- self.check_heap_type(cx, it.span, cx.tcx.type_of(def_id))
+ self.check_heap_type(cx, it.span, cx.tcx.type_of(it.def_id))
}
_ => (),
}
@@ -328,6 +327,18 @@
cx.struct_span_lint(UNSAFE_CODE, span, decorate);
}
+
+ fn report_overriden_symbol_name(&self, cx: &EarlyContext<'_>, span: Span, msg: &str) {
+ self.report_unsafe(cx, span, |lint| {
+ lint.build(msg)
+ .note(
+ "the linker's behavior with multiple libraries exporting duplicate symbol \
+ names is undefined and Rust cannot provide guarantees when you manually \
+ override them",
+ )
+ .emit();
+ })
+ }
}
impl EarlyLintPass for UnsafeCode {
@@ -367,6 +378,40 @@
lint.build("implementation of an `unsafe` trait").emit()
}),
+ ast::ItemKind::Fn(..) => {
+ if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::no_mangle) {
+ self.report_overriden_symbol_name(
+ cx,
+ attr.span,
+ "declaration of a `no_mangle` function",
+ );
+ }
+ if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::export_name) {
+ self.report_overriden_symbol_name(
+ cx,
+ attr.span,
+ "declaration of a function with `export_name`",
+ );
+ }
+ }
+
+ ast::ItemKind::Static(..) => {
+ if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::no_mangle) {
+ self.report_overriden_symbol_name(
+ cx,
+ attr.span,
+ "declaration of a `no_mangle` static",
+ );
+ }
+ if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::export_name) {
+ self.report_overriden_symbol_name(
+ cx,
+ attr.span,
+ "declaration of a static with `export_name`",
+ );
+ }
+ }
+
_ => {}
}
}
@@ -463,8 +508,7 @@
fn check_missing_docs_attrs(
&self,
cx: &LateContext<'_>,
- id: Option<hir::HirId>,
- attrs: &[ast::Attribute],
+ id: hir::HirId,
sp: Span,
article: &'static str,
desc: &'static str,
@@ -483,12 +527,13 @@
// Only check publicly-visible items, using the result from the privacy pass.
// It's an option so the crate root can also use this function (it doesn't
// have a `NodeId`).
- if let Some(id) = id {
+ if id != hir::CRATE_HIR_ID {
if !cx.access_levels.is_exported(id) {
return;
}
}
+ let attrs = cx.tcx.hir().attrs(id);
let has_doc = attrs.iter().any(|a| has_doc(cx.sess(), a));
if !has_doc {
cx.struct_span_lint(
@@ -520,10 +565,11 @@
}
fn check_crate(&mut self, cx: &LateContext<'_>, krate: &hir::Crate<'_>) {
- self.check_missing_docs_attrs(cx, None, &krate.item.attrs, krate.item.span, "the", "crate");
+ self.check_missing_docs_attrs(cx, hir::CRATE_HIR_ID, krate.item.span, "the", "crate");
for macro_def in krate.exported_macros {
- let has_doc = macro_def.attrs.iter().any(|a| has_doc(cx.sess(), a));
+ let attrs = cx.tcx.hir().attrs(macro_def.hir_id());
+ let has_doc = attrs.iter().any(|a| has_doc(cx.sess(), a));
if !has_doc {
cx.struct_span_lint(
MISSING_DOCS,
@@ -539,9 +585,9 @@
hir::ItemKind::Trait(.., trait_item_refs) => {
// Issue #11592: traits are always considered exported, even when private.
if let hir::VisibilityKind::Inherited = it.vis.node {
- self.private_traits.insert(it.hir_id);
+ self.private_traits.insert(it.hir_id());
for trait_item_ref in trait_item_refs {
- self.private_traits.insert(trait_item_ref.id.hir_id);
+ self.private_traits.insert(trait_item_ref.id.hir_id());
}
return;
}
@@ -555,7 +601,7 @@
if let Some(Node::Item(item)) = cx.tcx.hir().find(hir_id) {
if let hir::VisibilityKind::Inherited = item.vis.node {
for impl_item_ref in items {
- self.private_traits.insert(impl_item_ref.id.hir_id);
+ self.private_traits.insert(impl_item_ref.id.hir_id());
}
}
}
@@ -575,76 +621,44 @@
_ => return,
};
- let def_id = cx.tcx.hir().local_def_id(it.hir_id);
- let (article, desc) = cx.tcx.article_and_description(def_id.to_def_id());
+ let (article, desc) = cx.tcx.article_and_description(it.def_id.to_def_id());
- self.check_missing_docs_attrs(cx, Some(it.hir_id), &it.attrs, it.span, article, desc);
+ self.check_missing_docs_attrs(cx, it.hir_id(), it.span, article, desc);
}
fn check_trait_item(&mut self, cx: &LateContext<'_>, trait_item: &hir::TraitItem<'_>) {
- if self.private_traits.contains(&trait_item.hir_id) {
+ if self.private_traits.contains(&trait_item.hir_id()) {
return;
}
- let def_id = cx.tcx.hir().local_def_id(trait_item.hir_id);
- let (article, desc) = cx.tcx.article_and_description(def_id.to_def_id());
+ let (article, desc) = cx.tcx.article_and_description(trait_item.def_id.to_def_id());
- self.check_missing_docs_attrs(
- cx,
- Some(trait_item.hir_id),
- &trait_item.attrs,
- trait_item.span,
- article,
- desc,
- );
+ self.check_missing_docs_attrs(cx, trait_item.hir_id(), trait_item.span, article, desc);
}
fn check_impl_item(&mut self, cx: &LateContext<'_>, impl_item: &hir::ImplItem<'_>) {
// If the method is an impl for a trait, don't doc.
- if method_context(cx, impl_item.hir_id) == MethodLateContext::TraitImpl {
+ if method_context(cx, impl_item.hir_id()) == MethodLateContext::TraitImpl {
return;
}
- let def_id = cx.tcx.hir().local_def_id(impl_item.hir_id);
- let (article, desc) = cx.tcx.article_and_description(def_id.to_def_id());
- self.check_missing_docs_attrs(
- cx,
- Some(impl_item.hir_id),
- &impl_item.attrs,
- impl_item.span,
- article,
- desc,
- );
+ let (article, desc) = cx.tcx.article_and_description(impl_item.def_id.to_def_id());
+ self.check_missing_docs_attrs(cx, impl_item.hir_id(), impl_item.span, article, desc);
}
fn check_foreign_item(&mut self, cx: &LateContext<'_>, foreign_item: &hir::ForeignItem<'_>) {
- let def_id = cx.tcx.hir().local_def_id(foreign_item.hir_id);
- let (article, desc) = cx.tcx.article_and_description(def_id.to_def_id());
- self.check_missing_docs_attrs(
- cx,
- Some(foreign_item.hir_id),
- &foreign_item.attrs,
- foreign_item.span,
- article,
- desc,
- );
+ let (article, desc) = cx.tcx.article_and_description(foreign_item.def_id.to_def_id());
+ self.check_missing_docs_attrs(cx, foreign_item.hir_id(), foreign_item.span, article, desc);
}
- fn check_struct_field(&mut self, cx: &LateContext<'_>, sf: &hir::StructField<'_>) {
+ fn check_field_def(&mut self, cx: &LateContext<'_>, sf: &hir::FieldDef<'_>) {
if !sf.is_positional() {
- self.check_missing_docs_attrs(
- cx,
- Some(sf.hir_id),
- &sf.attrs,
- sf.span,
- "a",
- "struct field",
- )
+ self.check_missing_docs_attrs(cx, sf.hir_id, sf.span, "a", "struct field")
}
}
fn check_variant(&mut self, cx: &LateContext<'_>, v: &hir::Variant<'_>) {
- self.check_missing_docs_attrs(cx, Some(v.id), &v.attrs, v.span, "a", "variant");
+ self.check_missing_docs_attrs(cx, v.id, v.span, "a", "variant");
}
}
@@ -686,7 +700,7 @@
impl<'tcx> LateLintPass<'tcx> for MissingCopyImplementations {
fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) {
- if !cx.access_levels.is_reachable(item.hir_id) {
+ if !cx.access_levels.is_reachable(item.hir_id()) {
return;
}
let (def, ty) = match item.kind {
@@ -694,21 +708,21 @@
if !ast_generics.params.is_empty() {
return;
}
- let def = cx.tcx.adt_def(cx.tcx.hir().local_def_id(item.hir_id));
+ let def = cx.tcx.adt_def(item.def_id);
(def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[])))
}
hir::ItemKind::Union(_, ref ast_generics) => {
if !ast_generics.params.is_empty() {
return;
}
- let def = cx.tcx.adt_def(cx.tcx.hir().local_def_id(item.hir_id));
+ let def = cx.tcx.adt_def(item.def_id);
(def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[])))
}
hir::ItemKind::Enum(_, ref ast_generics) => {
if !ast_generics.params.is_empty() {
return;
}
- let def = cx.tcx.adt_def(cx.tcx.hir().local_def_id(item.hir_id));
+ let def = cx.tcx.adt_def(item.def_id);
(def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[])))
}
_ => return,
@@ -766,14 +780,14 @@
#[derive(Default)]
pub struct MissingDebugImplementations {
- impling_types: Option<HirIdSet>,
+ impling_types: Option<LocalDefIdSet>,
}
impl_lint_pass!(MissingDebugImplementations => [MISSING_DEBUG_IMPLEMENTATIONS]);
impl<'tcx> LateLintPass<'tcx> for MissingDebugImplementations {
fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) {
- if !cx.access_levels.is_reachable(item.hir_id) {
+ if !cx.access_levels.is_reachable(item.hir_id()) {
return;
}
@@ -788,11 +802,11 @@
};
if self.impling_types.is_none() {
- let mut impls = HirIdSet::default();
+ let mut impls = LocalDefIdSet::default();
cx.tcx.for_each_impl(debug, |d| {
if let Some(ty_def) = cx.tcx.type_of(d).ty_adt_def() {
if let Some(def_id) = ty_def.did.as_local() {
- impls.insert(cx.tcx.hir().local_def_id_to_hir_id(def_id));
+ impls.insert(def_id);
}
}
});
@@ -801,7 +815,7 @@
debug!("{:?}", self.impling_types);
}
- if !self.impling_types.as_ref().unwrap().contains(&item.hir_id) {
+ if !self.impling_types.as_ref().unwrap().contains(&item.def_id) {
cx.struct_span_lint(MISSING_DEBUG_IMPLEMENTATIONS, item.span, |lint| {
lint.build(&format!(
"type does not implement `{}`; consider adding `#[derive(Debug)]` \
@@ -1078,9 +1092,10 @@
impl<'tcx> LateLintPass<'tcx> for InvalidNoMangleItems {
fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
+ let attrs = cx.tcx.hir().attrs(it.hir_id());
match it.kind {
hir::ItemKind::Fn(.., ref generics, _) => {
- if let Some(no_mangle_attr) = cx.sess().find_by_name(&it.attrs, sym::no_mangle) {
+ if let Some(no_mangle_attr) = cx.sess().find_by_name(attrs, sym::no_mangle) {
for param in generics.params {
match param.kind {
GenericParamKind::Lifetime { .. } => {}
@@ -1106,7 +1121,7 @@
}
}
hir::ItemKind::Const(..) => {
- if cx.sess().contains_name(&it.attrs, sym::no_mangle) {
+ if cx.sess().contains_name(attrs, sym::no_mangle) {
// Const items do not refer to a particular location in memory, and therefore
// don't have anything to attach a symbol to
cx.struct_span_lint(NO_MANGLE_CONST_ITEMS, it.span, |lint| {
@@ -1316,26 +1331,26 @@
impl<'tcx> LateLintPass<'tcx> for UnreachablePub {
fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) {
- self.perform_lint(cx, "item", item.hir_id, &item.vis, item.span, true);
+ self.perform_lint(cx, "item", item.hir_id(), &item.vis, item.span, true);
}
fn check_foreign_item(&mut self, cx: &LateContext<'_>, foreign_item: &hir::ForeignItem<'tcx>) {
self.perform_lint(
cx,
"item",
- foreign_item.hir_id,
+ foreign_item.hir_id(),
&foreign_item.vis,
foreign_item.span,
true,
);
}
- fn check_struct_field(&mut self, cx: &LateContext<'_>, field: &hir::StructField<'_>) {
+ fn check_field_def(&mut self, cx: &LateContext<'_>, field: &hir::FieldDef<'_>) {
self.perform_lint(cx, "field", field.hir_id, &field.vis, field.span, false);
}
fn check_impl_item(&mut self, cx: &LateContext<'_>, impl_item: &hir::ImplItem<'_>) {
- self.perform_lint(cx, "item", impl_item.hir_id, &impl_item.vis, impl_item.span, false);
+ self.perform_lint(cx, "item", impl_item.hir_id(), &impl_item.vis, impl_item.span, false);
}
}
@@ -1557,8 +1572,7 @@
use rustc_middle::ty::PredicateKind::*;
if cx.tcx.features().trivial_bounds {
- let def_id = cx.tcx.hir().local_def_id(item.hir_id);
- let predicates = cx.tcx.predicates_of(def_id);
+ let predicates = cx.tcx.predicates_of(item.def_id);
for &(predicate, span) in predicates.predicates {
let predicate_kind_name = match predicate.kind().skip_binder() {
Trait(..) => "Trait",
@@ -1764,7 +1778,7 @@
}
pub struct UnnameableTestItems {
- boundary: Option<hir::HirId>, // HirId of the item under which things are not nameable
+ boundary: Option<LocalDefId>, // Id of the item under which things are not nameable
items_nameable: bool,
}
@@ -1782,12 +1796,13 @@
if let hir::ItemKind::Mod(..) = it.kind {
} else {
self.items_nameable = false;
- self.boundary = Some(it.hir_id);
+ self.boundary = Some(it.def_id);
}
return;
}
- if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::rustc_test_marker) {
+ let attrs = cx.tcx.hir().attrs(it.hir_id());
+ if let Some(attr) = cx.sess().find_by_name(attrs, sym::rustc_test_marker) {
cx.struct_span_lint(UNNAMEABLE_TEST_ITEMS, attr.span, |lint| {
lint.build("cannot test inner items").emit()
});
@@ -1795,7 +1810,7 @@
}
fn check_item_post(&mut self, _cx: &LateContext<'_>, it: &hir::Item<'_>) {
- if !self.items_nameable && self.boundary == Some(it.hir_id) {
+ if !self.items_nameable && self.boundary == Some(it.def_id) {
self.items_nameable = true;
}
}
@@ -2079,7 +2094,7 @@
use rustc_middle::middle::resolve_lifetime::Region;
let infer_static = cx.tcx.features().infer_static_outlives_requirements;
- let def_id = cx.tcx.hir().local_def_id(item.hir_id);
+ let def_id = item.def_id;
if let hir::ItemKind::Struct(_, ref hir_generics)
| hir::ItemKind::Enum(_, ref hir_generics)
| hir::ItemKind::Union(_, ref hir_generics) = item.kind
@@ -2634,10 +2649,7 @@
/// Insert a new foreign item into the seen set. If a symbol with the same name already exists
/// for the item, return its HirId without updating the set.
fn insert(&mut self, tcx: TyCtxt<'_>, fi: &hir::ForeignItem<'_>) -> Option<HirId> {
- let hid = fi.hir_id;
-
- let local_did = tcx.hir().local_def_id(fi.hir_id);
- let did = local_did.to_def_id();
+ let did = fi.def_id.to_def_id();
let instance = Instance::new(did, ty::List::identity_for_item(tcx, did));
let name = Symbol::intern(tcx.symbol_name(instance).name);
if let Some(&hir_id) = self.seen_decls.get(&name) {
@@ -2646,7 +2658,7 @@
// This lets us avoid emitting "knock-on" diagnostics.
Some(hir_id)
} else {
- self.seen_decls.insert(name, hid)
+ self.seen_decls.insert(name, fi.hir_id())
}
}
@@ -2654,16 +2666,15 @@
/// the name specified in a #[link_name = ...] attribute if one was specified, else, just the
/// symbol's name.
fn name_of_extern_decl(tcx: TyCtxt<'_>, fi: &hir::ForeignItem<'_>) -> SymbolName {
- let did = tcx.hir().local_def_id(fi.hir_id);
if let Some((overridden_link_name, overridden_link_name_span)) =
- tcx.codegen_fn_attrs(did).link_name.map(|overridden_link_name| {
+ tcx.codegen_fn_attrs(fi.def_id).link_name.map(|overridden_link_name| {
// FIXME: Instead of searching through the attributes again to get span
// information, we could have codegen_fn_attrs also give span information back for
// where the attribute was defined. However, until this is found to be a
// bottleneck, this does just fine.
(
overridden_link_name,
- tcx.get_attrs(did.to_def_id())
+ tcx.get_attrs(fi.def_id.to_def_id())
.iter()
.find(|at| tcx.sess.check_name(at, sym::link_name))
.unwrap()
@@ -2891,10 +2902,10 @@
let tcx = cx.tcx;
if let Some(existing_hid) = self.insert(tcx, this_fi) {
let existing_decl_ty = tcx.type_of(tcx.hir().local_def_id(existing_hid));
- let this_decl_ty = tcx.type_of(tcx.hir().local_def_id(this_fi.hir_id));
+ let this_decl_ty = tcx.type_of(this_fi.def_id);
debug!(
"ClashingExternDeclarations: Comparing existing {:?}: {:?} to this {:?}: {:?}",
- existing_hid, existing_decl_ty, this_fi.hir_id, this_decl_ty
+ existing_hid, existing_decl_ty, this_fi.def_id, this_decl_ty
);
// Check that the declarations match.
if !Self::structurally_same_type(
@@ -2916,7 +2927,7 @@
// Finally, emit the diagnostic.
tcx.struct_span_lint_hir(
CLASHING_EXTERN_DECLARATIONS,
- this_fi.hir_id,
+ this_fi.hir_id(),
get_relevant_span(this_fi),
|lint| {
let mut expected_str = DiagnosticStyledString::new();
diff --git a/compiler/rustc_lint/src/context.rs b/compiler/rustc_lint/src/context.rs
index 58a9064..c9de85a 100644
--- a/compiler/rustc_lint/src/context.rs
+++ b/compiler/rustc_lint/src/context.rs
@@ -21,7 +21,9 @@
use rustc_ast as ast;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync;
-use rustc_errors::{add_elided_lifetime_in_path_suggestion, struct_span_err, Applicability};
+use rustc_errors::{
+ add_elided_lifetime_in_path_suggestion, struct_span_err, Applicability, SuggestionStyle,
+};
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_hir::def_id::{CrateNum, DefId};
@@ -32,7 +34,8 @@
use rustc_middle::ty::layout::{LayoutError, TyAndLayout};
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{self, print::Printer, subst::GenericArg, Ty, TyCtxt};
-use rustc_session::lint::BuiltinLintDiagnostics;
+use rustc_serialize::json::Json;
+use rustc_session::lint::{BuiltinLintDiagnostics, ExternDepSpec};
use rustc_session::lint::{FutureIncompatibleInfo, Level, Lint, LintBuffer, LintId};
use rustc_session::Session;
use rustc_session::SessionLintStore;
@@ -86,6 +89,7 @@
}
/// The target of the `by_name` map, which accounts for renaming/deprecation.
+#[derive(Debug)]
enum TargetLint {
/// A direct lint target
Id(LintId),
@@ -96,6 +100,11 @@
/// Lint with this name existed previously, but has been removed/deprecated.
/// The string argument is the reason for removal.
Removed(String),
+
+ /// A lint name that should give no warnings and have no effect.
+ ///
+ /// This is used by rustc to avoid warning about old rustdoc lints before rustdoc registers them as tool lints.
+ Ignored,
}
pub enum FindLintError {
@@ -262,6 +271,33 @@
}
}
+ /// This lint should be available with either the old or the new name.
+ ///
+ /// Using the old name will not give a warning.
+ /// You must register a lint with the new name before calling this function.
+ #[track_caller]
+ pub fn register_alias(&mut self, old_name: &str, new_name: &str) {
+ let target = match self.by_name.get(new_name) {
+ Some(&Id(lint_id)) => lint_id,
+ _ => bug!("cannot add alias {} for lint {} that does not exist", old_name, new_name),
+ };
+ match self.by_name.insert(old_name.to_string(), Id(target)) {
+ None | Some(Ignored) => {}
+ Some(x) => bug!("duplicate specification of lint {} (was {:?})", old_name, x),
+ }
+ }
+
+ /// This lint should give no warning and have no effect.
+ ///
+ /// This is used by rustc to avoid warning about old rustdoc lints before rustdoc registers them as tool lints.
+ #[track_caller]
+ pub fn register_ignored(&mut self, name: &str) {
+ if self.by_name.insert(name.to_string(), Ignored).is_some() {
+ bug!("duplicate specification of lint {}", name);
+ }
+ }
+
+ /// This lint has been renamed; warn about using the new name and apply the lint.
#[track_caller]
pub fn register_renamed(&mut self, old_name: &str, new_name: &str) {
let target = match self.by_name.get(new_name) {
@@ -280,6 +316,7 @@
Some(&Id(lint_id)) => Ok(vec![lint_id]),
Some(&Renamed(_, lint_id)) => Ok(vec![lint_id]),
Some(&Removed(_)) => Err(FindLintError::Removed),
+ Some(&Ignored) => Ok(vec![]),
None => loop {
return match self.lint_groups.get(lint_name) {
Some(LintGroup { lint_ids, depr, .. }) => {
@@ -423,6 +460,7 @@
}
},
Some(&Id(ref id)) => CheckLintNameResult::Ok(slice::from_ref(id)),
+ Some(&Ignored) => CheckLintNameResult::Ok(&[]),
}
}
@@ -467,7 +505,10 @@
Some(&Id(ref id)) => {
CheckLintNameResult::Tool(Err((Some(slice::from_ref(id)), complete_name)))
}
- _ => CheckLintNameResult::NoLint(None),
+ Some(other) => {
+ tracing::debug!("got renamed lint {:?}", other);
+ CheckLintNameResult::NoLint(None)
+ }
}
}
}
@@ -636,6 +677,36 @@
db.span_label(span, "ABI should be specified here");
db.help(&format!("the default ABI is {}", default_abi.name()));
}
+ BuiltinLintDiagnostics::LegacyDeriveHelpers(span) => {
+ db.span_label(span, "the attribute is introduced here");
+ }
+ BuiltinLintDiagnostics::ExternDepSpec(krate, loc) => {
+ let json = match loc {
+ ExternDepSpec::Json(json) => {
+ db.help(&format!("remove unnecessary dependency `{}`", krate));
+ json
+ }
+ ExternDepSpec::Raw(raw) => {
+ db.help(&format!("remove unnecessary dependency `{}` at `{}`", krate, raw));
+ db.span_suggestion_with_style(
+ DUMMY_SP,
+ "raw extern location",
+ raw.clone(),
+ Applicability::Unspecified,
+ SuggestionStyle::CompletelyHidden,
+ );
+ Json::String(raw)
+ }
+ };
+ db.tool_only_suggestion_with_metadata(
+ "json extern location",
+ Applicability::Unspecified,
+ json
+ );
+ }
+ BuiltinLintDiagnostics::ProcMacroBackCompat(note) => {
+ db.note(¬e);
+ }
}
// Rewrap `db`, and pass control to the user.
decorate(LintDiagnosticBuilder::new(db));
@@ -677,7 +748,7 @@
sess,
krate,
lint_store,
- builder: LintLevelsBuilder::new(sess, warn_about_weird_lints, lint_store),
+ builder: LintLevelsBuilder::new(sess, warn_about_weird_lints, lint_store, &krate.attrs),
buffered,
}
}
diff --git a/compiler/rustc_lint/src/early.rs b/compiler/rustc_lint/src/early.rs
index 231edf4..647ecad 100644
--- a/compiler/rustc_lint/src/early.rs
+++ b/compiler/rustc_lint/src/early.rs
@@ -18,7 +18,7 @@
use crate::passes::{EarlyLintPass, EarlyLintPassObject};
use rustc_ast as ast;
use rustc_ast::visit as ast_visit;
-use rustc_attr::HasAttrs;
+use rustc_ast::AstLike;
use rustc_session::lint::{BufferedEarlyLint, LintBuffer, LintPass};
use rustc_session::Session;
use rustc_span::symbol::Ident;
@@ -163,10 +163,10 @@
run_early_pass!(self, check_struct_def_post, s);
}
- fn visit_struct_field(&mut self, s: &'a ast::StructField) {
+ fn visit_field_def(&mut self, s: &'a ast::FieldDef) {
self.with_lint_attrs(s.id, &s.attrs, |cx| {
- run_early_pass!(cx, check_struct_field, s);
- ast_visit::walk_struct_field(cx, s);
+ run_early_pass!(cx, check_field_def, s);
+ ast_visit::walk_field_def(cx, s);
})
}
@@ -188,13 +188,6 @@
run_early_pass!(self, check_ident, ident);
}
- fn visit_mod(&mut self, m: &'a ast::Mod, s: Span, _a: &[ast::Attribute], n: ast::NodeId) {
- run_early_pass!(self, check_mod, m, s, n);
- self.check_id(n);
- ast_visit::walk_mod(self, m);
- run_early_pass!(self, check_mod_post, m, s, n);
- }
-
fn visit_local(&mut self, l: &'a ast::Local) {
self.with_lint_attrs(l.id, &l.attrs, |cx| {
run_early_pass!(cx, check_local, l);
diff --git a/compiler/rustc_lint/src/internal.rs b/compiler/rustc_lint/src/internal.rs
index 26e536e..9b1a339 100644
--- a/compiler/rustc_lint/src/internal.rs
+++ b/compiler/rustc_lint/src/internal.rs
@@ -283,7 +283,7 @@
impl<'tcx> LateLintPass<'tcx> for ExistingDocKeyword {
fn check_item(&mut self, cx: &LateContext<'_>, item: &rustc_hir::Item<'_>) {
- for attr in item.attrs {
+ for attr in cx.tcx.hir().attrs(item.hir_id()) {
if !attr.has_name(sym::doc) {
continue;
}
diff --git a/compiler/rustc_lint/src/late.rs b/compiler/rustc_lint/src/late.rs
index 3821a39..d325b5f 100644
--- a/compiler/rustc_lint/src/late.rs
+++ b/compiler/rustc_lint/src/late.rs
@@ -16,7 +16,6 @@
use crate::{passes::LateLintPassObject, LateContext, LateLintPass, LintStore};
use rustc_ast as ast;
-use rustc_ast::walk_list;
use rustc_data_structures::sync::{join, par_iter, ParallelIterator};
use rustc_hir as hir;
use rustc_hir::def_id::{LocalDefId, LOCAL_CRATE};
@@ -53,10 +52,11 @@
/// Merge the lints specified by any lint attributes into the
/// current lint context, call the provided function, then reset the
/// lints in effect to their previous state.
- fn with_lint_attrs<F>(&mut self, id: hir::HirId, attrs: &'tcx [ast::Attribute], f: F)
+ fn with_lint_attrs<F>(&mut self, id: hir::HirId, f: F)
where
F: FnOnce(&mut Self),
{
+ let attrs = self.context.tcx.hir().attrs(id);
let prev = self.context.last_node_with_lint_attrs;
self.context.last_node_with_lint_attrs = id;
self.enter_attrs(attrs);
@@ -125,7 +125,7 @@
}
fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
- self.with_lint_attrs(param.hir_id, ¶m.attrs, |cx| {
+ self.with_lint_attrs(param.hir_id, |cx| {
lint_callback!(cx, check_param, param);
hir_visit::walk_param(cx, param);
});
@@ -142,8 +142,8 @@
self.context.generics = it.kind.generics();
let old_cached_typeck_results = self.context.cached_typeck_results.take();
let old_enclosing_body = self.context.enclosing_body.take();
- self.with_lint_attrs(it.hir_id, &it.attrs, |cx| {
- cx.with_param_env(it.hir_id, |cx| {
+ self.with_lint_attrs(it.hir_id(), |cx| {
+ cx.with_param_env(it.hir_id(), |cx| {
lint_callback!(cx, check_item, it);
hir_visit::walk_item(cx, it);
lint_callback!(cx, check_item_post, it);
@@ -155,8 +155,8 @@
}
fn visit_foreign_item(&mut self, it: &'tcx hir::ForeignItem<'tcx>) {
- self.with_lint_attrs(it.hir_id, &it.attrs, |cx| {
- cx.with_param_env(it.hir_id, |cx| {
+ self.with_lint_attrs(it.hir_id(), |cx| {
+ cx.with_param_env(it.hir_id(), |cx| {
lint_callback!(cx, check_foreign_item, it);
hir_visit::walk_foreign_item(cx, it);
lint_callback!(cx, check_foreign_item_post, it);
@@ -170,7 +170,7 @@
}
fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
- self.with_lint_attrs(e.hir_id, &e.attrs, |cx| {
+ self.with_lint_attrs(e.hir_id, |cx| {
lint_callback!(cx, check_expr, e);
hir_visit::walk_expr(cx, e);
lint_callback!(cx, check_expr_post, e);
@@ -178,11 +178,9 @@
}
fn visit_stmt(&mut self, s: &'tcx hir::Stmt<'tcx>) {
- let get_item = |id: hir::ItemId| self.context.tcx.hir().item(id.id);
- let attrs = &s.kind.attrs(get_item);
// See `EarlyContextAndPass::visit_stmt` for an explanation
// of why we call `walk_stmt` outside of `with_lint_attrs`
- self.with_lint_attrs(s.hir_id, attrs, |cx| {
+ self.with_lint_attrs(s.hir_id, |cx| {
lint_callback!(cx, check_stmt, s);
});
hir_visit::walk_stmt(self, s);
@@ -221,10 +219,10 @@
lint_callback!(self, check_struct_def_post, s);
}
- fn visit_struct_field(&mut self, s: &'tcx hir::StructField<'tcx>) {
- self.with_lint_attrs(s.hir_id, &s.attrs, |cx| {
- lint_callback!(cx, check_struct_field, s);
- hir_visit::walk_struct_field(cx, s);
+ fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
+ self.with_lint_attrs(s.hir_id, |cx| {
+ lint_callback!(cx, check_field_def, s);
+ hir_visit::walk_field_def(cx, s);
})
}
@@ -234,7 +232,7 @@
g: &'tcx hir::Generics<'tcx>,
item_id: hir::HirId,
) {
- self.with_lint_attrs(v.id, &v.attrs, |cx| {
+ self.with_lint_attrs(v.id, |cx| {
lint_callback!(cx, check_variant, v);
hir_visit::walk_variant(cx, v, g, item_id);
lint_callback!(cx, check_variant_post, v);
@@ -257,7 +255,7 @@
}
fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) {
- self.with_lint_attrs(l.hir_id, &l.attrs, |cx| {
+ self.with_lint_attrs(l.hir_id, |cx| {
lint_callback!(cx, check_local, l);
hir_visit::walk_local(cx, l);
})
@@ -301,8 +299,8 @@
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
let generics = self.context.generics.take();
self.context.generics = Some(&trait_item.generics);
- self.with_lint_attrs(trait_item.hir_id, &trait_item.attrs, |cx| {
- cx.with_param_env(trait_item.hir_id, |cx| {
+ self.with_lint_attrs(trait_item.hir_id(), |cx| {
+ cx.with_param_env(trait_item.hir_id(), |cx| {
lint_callback!(cx, check_trait_item, trait_item);
hir_visit::walk_trait_item(cx, trait_item);
lint_callback!(cx, check_trait_item_post, trait_item);
@@ -314,8 +312,8 @@
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
let generics = self.context.generics.take();
self.context.generics = Some(&impl_item.generics);
- self.with_lint_attrs(impl_item.hir_id, &impl_item.attrs, |cx| {
- cx.with_param_env(impl_item.hir_id, |cx| {
+ self.with_lint_attrs(impl_item.hir_id(), |cx| {
+ cx.with_param_env(impl_item.hir_id(), |cx| {
lint_callback!(cx, check_impl_item, impl_item);
hir_visit::walk_impl_item(cx, impl_item);
lint_callback!(cx, check_impl_item_post, impl_item);
@@ -334,8 +332,10 @@
hir_visit::walk_path(self, p);
}
- fn visit_attribute(&mut self, attr: &'tcx ast::Attribute) {
- lint_callback!(self, check_attribute, attr);
+ fn visit_attribute(&mut self, hir_id: hir::HirId, attr: &'tcx ast::Attribute) {
+ self.with_lint_attrs(hir_id, |cx| {
+ lint_callback!(cx, check_attribute, attr);
+ })
}
}
@@ -396,7 +396,9 @@
// Visit the crate attributes
if hir_id == hir::CRATE_HIR_ID {
- walk_list!(cx, visit_attribute, tcx.hir().attrs(hir::CRATE_HIR_ID));
+ for attr in tcx.hir().attrs(hir::CRATE_HIR_ID).iter() {
+ cx.visit_attribute(hir_id, attr)
+ }
}
}
@@ -440,7 +442,7 @@
let mut cx = LateContextAndPass { context, pass };
// Visit the whole crate.
- cx.with_lint_attrs(hir::CRATE_HIR_ID, &krate.item.attrs, |cx| {
+ cx.with_lint_attrs(hir::CRATE_HIR_ID, |cx| {
// since the root module isn't visited as an item (because it isn't an
// item), warn for it here.
lint_callback!(cx, check_crate, krate);
@@ -496,7 +498,7 @@
tcx.sess.time("module_lints", || {
// Run per-module lints
par_iter(&tcx.hir().krate().modules).for_each(|(&module, _)| {
- tcx.ensure().lint_mod(tcx.hir().local_def_id(module));
+ tcx.ensure().lint_mod(module);
});
});
},
diff --git a/compiler/rustc_lint/src/levels.rs b/compiler/rustc_lint/src/levels.rs
index 1fc2bd0..a332c30 100644
--- a/compiler/rustc_lint/src/levels.rs
+++ b/compiler/rustc_lint/src/levels.rs
@@ -1,13 +1,12 @@
use crate::context::{CheckLintNameResult, LintStore};
use crate::late::unerased_lint_store;
use rustc_ast as ast;
-use rustc_ast::attr;
use rustc_ast::unwrap_or;
use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder};
use rustc_hir as hir;
-use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
+use rustc_hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::{intravisit, HirId};
use rustc_middle::hir::map::Map;
use rustc_middle::lint::LevelAndSource;
@@ -32,16 +31,17 @@
fn lint_levels(tcx: TyCtxt<'_>, cnum: CrateNum) -> LintLevelMap {
assert_eq!(cnum, LOCAL_CRATE);
let store = unerased_lint_store(tcx);
- let levels = LintLevelsBuilder::new(tcx.sess, false, &store);
+ let crate_attrs = tcx.get_attrs(DefId { krate: cnum, index: CRATE_DEF_INDEX });
+ let levels = LintLevelsBuilder::new(tcx.sess, false, &store, crate_attrs);
let mut builder = LintLevelMapBuilder { levels, tcx, store };
let krate = tcx.hir().krate();
builder.levels.id_to_set.reserve(krate.exported_macros.len() + 1);
- let push = builder.levels.push(&krate.item.attrs, &store, true);
+ let push = builder.levels.push(tcx.hir().attrs(hir::CRATE_HIR_ID), &store, true);
builder.levels.register_id(hir::CRATE_HIR_ID);
for macro_def in krate.exported_macros {
- builder.levels.register_id(macro_def.hir_id);
+ builder.levels.register_id(macro_def.hir_id());
}
intravisit::walk_crate(&mut builder, krate);
builder.levels.pop(push);
@@ -56,6 +56,7 @@
cur: u32,
warn_about_weird_lints: bool,
store: &'s LintStore,
+ crate_attrs: &'s [ast::Attribute],
}
pub struct BuilderPush {
@@ -64,7 +65,12 @@
}
impl<'s> LintLevelsBuilder<'s> {
- pub fn new(sess: &'s Session, warn_about_weird_lints: bool, store: &'s LintStore) -> Self {
+ pub fn new(
+ sess: &'s Session,
+ warn_about_weird_lints: bool,
+ store: &'s LintStore,
+ crate_attrs: &'s [ast::Attribute],
+ ) -> Self {
let mut builder = LintLevelsBuilder {
sess,
sets: LintLevelSets::new(),
@@ -72,6 +78,7 @@
id_to_set: Default::default(),
warn_about_weird_lints,
store,
+ crate_attrs,
};
builder.process_command_line(sess, store);
assert_eq!(builder.sets.list.len(), 1);
@@ -304,15 +311,22 @@
};
let tool_name = if meta_item.path.segments.len() > 1 {
let tool_ident = meta_item.path.segments[0].ident;
- if !attr::is_known_lint_tool(tool_ident) {
- struct_span_err!(
+ if !is_known_lint_tool(tool_ident.name, sess, &self.crate_attrs) {
+ let mut err = struct_span_err!(
sess,
tool_ident.span,
E0710,
- "an unknown tool name found in scoped lint: `{}`",
+ "unknown tool name `{}` found in scoped lint: `{}`",
+ tool_ident.name,
pprust::path_to_string(&meta_item.path),
- )
- .emit();
+ );
+ if sess.is_nightly_build() {
+ err.help(&format!(
+ "add `#![register_tool({})]` to the crate root",
+ tool_ident.name
+ ));
+ }
+ err.emit();
continue;
}
@@ -321,17 +335,18 @@
None
};
let name = meta_item.path.segments.last().expect("empty lint name").ident.name;
- match store.check_lint_name(&name.as_str(), tool_name) {
+ let lint_result = store.check_lint_name(&name.as_str(), tool_name);
+ match &lint_result {
CheckLintNameResult::Ok(ids) => {
let src = LintLevelSource::Node(name, li.span(), reason);
- for &id in ids {
+ for &id in *ids {
self.check_gated_lint(id, attr.span);
self.insert_spec(&mut specs, id, (level, src));
}
}
CheckLintNameResult::Tool(result) => {
- match result {
+ match *result {
Ok(ids) => {
let complete_name = &format!("{}::{}", tool_name.unwrap(), name);
let src = LintLevelSource::Node(
@@ -343,7 +358,7 @@
self.insert_spec(&mut specs, *id, (level, src));
}
}
- Err((Some(ids), new_lint_name)) => {
+ Err((Some(ids), ref new_lint_name)) => {
let lint = builtin::RENAMED_AND_REMOVED_LINTS;
let (lvl, src) =
self.sets.get_lint_level(lint, self.cur, Some(&specs), &sess);
@@ -392,21 +407,21 @@
CheckLintNameResult::Warning(msg, renamed) => {
let lint = builtin::RENAMED_AND_REMOVED_LINTS;
- let (level, src) =
+ let (renamed_lint_level, src) =
self.sets.get_lint_level(lint, self.cur, Some(&specs), &sess);
struct_lint_level(
self.sess,
lint,
- level,
+ renamed_lint_level,
src,
Some(li.span().into()),
|lint| {
let mut err = lint.build(&msg);
- if let Some(new_name) = renamed {
+ if let Some(new_name) = &renamed {
err.span_suggestion(
li.span(),
"use the new name",
- new_name,
+ new_name.to_string(),
Applicability::MachineApplicable,
);
}
@@ -444,6 +459,22 @@
);
}
}
+ // If this lint was renamed, apply the new lint instead of ignoring the attribute.
+ // This happens outside of the match because the new lint should be applied even if
+ // we don't warn about the name change.
+ if let CheckLintNameResult::Warning(_, Some(new_name)) = lint_result {
+ // Ignore any errors or warnings that happen because the new name is inaccurate
+ if let CheckLintNameResult::Ok(ids) =
+ store.check_lint_name(&new_name, tool_name)
+ {
+ let src =
+ LintLevelSource::Node(Symbol::intern(&new_name), li.span(), reason);
+ for &id in ids {
+ self.check_gated_lint(id, attr.span);
+ self.insert_spec(&mut specs, id, (level, src));
+ }
+ }
+ }
}
}
@@ -542,6 +573,20 @@
}
}
+fn is_known_lint_tool(m_item: Symbol, sess: &Session, attrs: &[ast::Attribute]) -> bool {
+ if [sym::clippy, sym::rustc, sym::rustdoc].contains(&m_item) {
+ return true;
+ }
+ // Look for registered tools
+ // NOTE: does no error handling; error handling is done by rustc_resolve.
+ sess.filter_by_name(attrs, sym::register_tool)
+ .filter_map(|attr| attr.meta_item_list())
+ .flat_map(std::convert::identity)
+ .filter_map(|nested_meta| nested_meta.ident())
+ .map(|ident| ident.name)
+ .any(|name| name == m_item)
+}
+
struct LintLevelMapBuilder<'a, 'tcx> {
levels: LintLevelsBuilder<'tcx>,
tcx: TyCtxt<'tcx>,
@@ -549,11 +594,12 @@
}
impl LintLevelMapBuilder<'_, '_> {
- fn with_lint_attrs<F>(&mut self, id: hir::HirId, attrs: &[ast::Attribute], f: F)
+ fn with_lint_attrs<F>(&mut self, id: hir::HirId, f: F)
where
F: FnOnce(&mut Self),
{
let is_crate_hir = id == hir::CRATE_HIR_ID;
+ let attrs = self.tcx.hir().attrs(id);
let push = self.levels.push(attrs, self.store, is_crate_hir);
if push.changed {
self.levels.register_id(id);
@@ -571,19 +617,19 @@
}
fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
- self.with_lint_attrs(param.hir_id, ¶m.attrs, |builder| {
+ self.with_lint_attrs(param.hir_id, |builder| {
intravisit::walk_param(builder, param);
});
}
fn visit_item(&mut self, it: &'tcx hir::Item<'tcx>) {
- self.with_lint_attrs(it.hir_id, &it.attrs, |builder| {
+ self.with_lint_attrs(it.hir_id(), |builder| {
intravisit::walk_item(builder, it);
});
}
fn visit_foreign_item(&mut self, it: &'tcx hir::ForeignItem<'tcx>) {
- self.with_lint_attrs(it.hir_id, &it.attrs, |builder| {
+ self.with_lint_attrs(it.hir_id(), |builder| {
intravisit::walk_foreign_item(builder, it);
})
}
@@ -596,14 +642,14 @@
}
fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
- self.with_lint_attrs(e.hir_id, &e.attrs, |builder| {
+ self.with_lint_attrs(e.hir_id, |builder| {
intravisit::walk_expr(builder, e);
})
}
- fn visit_struct_field(&mut self, s: &'tcx hir::StructField<'tcx>) {
- self.with_lint_attrs(s.hir_id, &s.attrs, |builder| {
- intravisit::walk_struct_field(builder, s);
+ fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
+ self.with_lint_attrs(s.hir_id, |builder| {
+ intravisit::walk_field_def(builder, s);
})
}
@@ -613,31 +659,31 @@
g: &'tcx hir::Generics<'tcx>,
item_id: hir::HirId,
) {
- self.with_lint_attrs(v.id, &v.attrs, |builder| {
+ self.with_lint_attrs(v.id, |builder| {
intravisit::walk_variant(builder, v, g, item_id);
})
}
fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) {
- self.with_lint_attrs(l.hir_id, &l.attrs, |builder| {
+ self.with_lint_attrs(l.hir_id, |builder| {
intravisit::walk_local(builder, l);
})
}
fn visit_arm(&mut self, a: &'tcx hir::Arm<'tcx>) {
- self.with_lint_attrs(a.hir_id, &a.attrs, |builder| {
+ self.with_lint_attrs(a.hir_id, |builder| {
intravisit::walk_arm(builder, a);
})
}
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
- self.with_lint_attrs(trait_item.hir_id, &trait_item.attrs, |builder| {
+ self.with_lint_attrs(trait_item.hir_id(), |builder| {
intravisit::walk_trait_item(builder, trait_item);
});
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
- self.with_lint_attrs(impl_item.hir_id, &impl_item.attrs, |builder| {
+ self.with_lint_attrs(impl_item.hir_id(), |builder| {
intravisit::walk_impl_item(builder, impl_item);
});
}
diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs
index 638b73c..4c3dbca 100644
--- a/compiler/rustc_lint/src/lib.rs
+++ b/compiler/rustc_lint/src/lib.rs
@@ -57,6 +57,7 @@
mod non_ascii_idents;
mod non_fmt_panic;
mod nonstandard_style;
+mod noop_method_call;
mod passes;
mod redundant_semicolon;
mod traits;
@@ -69,9 +70,7 @@
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_session::lint::builtin::{
- BARE_TRAIT_OBJECTS, BROKEN_INTRA_DOC_LINKS, ELIDED_LIFETIMES_IN_PATHS,
- EXPLICIT_OUTLIVES_REQUIREMENTS, INVALID_CODEBLOCK_ATTRIBUTES, INVALID_HTML_TAGS,
- MISSING_DOC_CODE_EXAMPLES, NON_AUTOLINKS, PRIVATE_DOC_TESTS,
+ BARE_TRAIT_OBJECTS, ELIDED_LIFETIMES_IN_PATHS, EXPLICIT_OUTLIVES_REQUIREMENTS,
};
use rustc_span::symbol::{Ident, Symbol};
use rustc_span::Span;
@@ -83,6 +82,7 @@
use non_ascii_idents::*;
use non_fmt_panic::NonPanicFmt;
use nonstandard_style::*;
+use noop_method_call::*;
use redundant_semicolon::*;
use traits::*;
use types::*;
@@ -170,6 +170,7 @@
DropTraitConstraints: DropTraitConstraints,
TemporaryCStringAsPtr: TemporaryCStringAsPtr,
NonPanicFmt: NonPanicFmt,
+ NoopMethodCall: NoopMethodCall,
]
);
};
@@ -314,17 +315,6 @@
// MACRO_USE_EXTERN_CRATE
);
- add_lint_group!(
- "rustdoc",
- NON_AUTOLINKS,
- BROKEN_INTRA_DOC_LINKS,
- PRIVATE_INTRA_DOC_LINKS,
- INVALID_CODEBLOCK_ATTRIBUTES,
- MISSING_DOC_CODE_EXAMPLES,
- PRIVATE_DOC_TESTS,
- INVALID_HTML_TAGS
- );
-
// Register renamed and removed lints.
store.register_renamed("single_use_lifetime", "single_use_lifetimes");
store.register_renamed("elided_lifetime_in_path", "elided_lifetimes_in_paths");
@@ -334,8 +324,30 @@
store.register_renamed("async_idents", "keyword_idents");
store.register_renamed("exceeding_bitshifts", "arithmetic_overflow");
store.register_renamed("redundant_semicolon", "redundant_semicolons");
- store.register_renamed("intra_doc_link_resolution_failure", "broken_intra_doc_links");
store.register_renamed("overlapping_patterns", "overlapping_range_endpoints");
+
+ // These were moved to tool lints, but rustc still sees them when compiling normally, before
+ // tool lints are registered, so `check_tool_name_for_backwards_compat` doesn't work. Use
+ // `register_removed` explicitly.
+ const RUSTDOC_LINTS: &[&str] = &[
+ "broken_intra_doc_links",
+ "private_intra_doc_links",
+ "missing_crate_level_docs",
+ "missing_doc_code_examples",
+ "private_doc_tests",
+ "invalid_codeblock_attributes",
+ "invalid_html_tags",
+ "non_autolinks",
+ ];
+ for rustdoc_lint in RUSTDOC_LINTS {
+ store.register_ignored(rustdoc_lint);
+ }
+ store.register_removed(
+ "intra_doc_link_resolution_failure",
+ "use `rustdoc::broken_intra_doc_links` instead",
+ );
+ store.register_removed("rustdoc", "use `rustdoc::all` instead");
+
store.register_removed("unknown_features", "replaced by an error");
store.register_removed("unsigned_negation", "replaced by negate_unsigned feature gate");
store.register_removed("negate_unsigned", "cast a signed value instead");
diff --git a/compiler/rustc_lint/src/non_fmt_panic.rs b/compiler/rustc_lint/src/non_fmt_panic.rs
index e98297b..5a27135 100644
--- a/compiler/rustc_lint/src/non_fmt_panic.rs
+++ b/compiler/rustc_lint/src/non_fmt_panic.rs
@@ -69,23 +69,65 @@
let (span, panic) = panic_call(cx, f);
- cx.struct_span_lint(NON_FMT_PANIC, arg.span, |lint| {
+ // Find the span of the argument to `panic!()`, before expansion in the
+ // case of `panic!(some_macro!())`.
+ // We don't use source_callsite(), because this `panic!(..)` might itself
+ // be expanded from another macro, in which case we want to stop at that
+ // expansion.
+ let mut arg_span = arg.span;
+ let mut arg_macro = None;
+ while !span.contains(arg_span) {
+ let expn = arg_span.ctxt().outer_expn_data();
+ if expn.is_root() {
+ break;
+ }
+ arg_macro = expn.macro_def_id;
+ arg_span = expn.call_site;
+ }
+
+ cx.struct_span_lint(NON_FMT_PANIC, arg_span, |lint| {
let mut l = lint.build("panic message is not a string literal");
l.note("this is no longer accepted in Rust 2021");
- if span.contains(arg.span) {
+ if !span.contains(arg_span) {
+ // No clue where this argument is coming from.
+ l.emit();
+ return;
+ }
+ if arg_macro.map_or(false, |id| cx.tcx.is_diagnostic_item(sym::format_macro, id)) {
+ // A case of `panic!(format!(..))`.
+ l.note("the panic!() macro supports formatting, so there's no need for the format!() macro here");
+ if let Some((open, close, _)) = find_delimiters(cx, arg_span) {
+ l.multipart_suggestion(
+ "remove the `format!(..)` macro call",
+ vec![
+ (arg_span.until(open.shrink_to_hi()), "".into()),
+ (close.until(arg_span.shrink_to_hi()), "".into()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+ } else {
l.span_suggestion_verbose(
- arg.span.shrink_to_lo(),
+ arg_span.shrink_to_lo(),
"add a \"{}\" format string to Display the message",
"\"{}\", ".into(),
Applicability::MaybeIncorrect,
);
if panic == sym::std_panic_macro {
- l.span_suggestion_verbose(
- span.until(arg.span),
- "or use std::panic::panic_any instead",
- "std::panic::panic_any(".into(),
- Applicability::MachineApplicable,
- );
+ if let Some((open, close, del)) = find_delimiters(cx, span) {
+ l.multipart_suggestion(
+ "or use std::panic::panic_any instead",
+ if del == '(' {
+ vec![(span.until(open), "std::panic::panic_any".into())]
+ } else {
+ vec![
+ (span.until(open.shrink_to_hi()), "std::panic::panic_any(".into()),
+ (close, ")".into()),
+ ]
+ },
+ Applicability::MachineApplicable,
+ );
+ }
}
}
l.emit();
@@ -159,7 +201,7 @@
Some(v) if v.len() == 1 => "panic message contains a brace",
_ => "panic message contains braces",
};
- cx.struct_span_lint(NON_FMT_PANIC, brace_spans.unwrap_or(vec![span]), |lint| {
+ cx.struct_span_lint(NON_FMT_PANIC, brace_spans.unwrap_or_else(|| vec![span]), |lint| {
let mut l = lint.build(msg);
l.note("this message is not used as a format string, but will be in Rust 2021");
if span.contains(arg.span) {
@@ -175,6 +217,19 @@
}
}
+/// Given the span of `some_macro!(args);`, gives the span of `(` and `)`,
+/// and the type of (opening) delimiter used.
+fn find_delimiters<'tcx>(cx: &LateContext<'tcx>, span: Span) -> Option<(Span, Span, char)> {
+ let snippet = cx.sess().parse_sess.source_map().span_to_snippet(span).ok()?;
+ let (open, open_ch) = snippet.char_indices().find(|&(_, c)| "([{".contains(c))?;
+ let close = snippet.rfind(|c| ")]}".contains(c))?;
+ Some((
+ span.from_inner(InnerSpan { start: open, end: open + 1 }),
+ span.from_inner(InnerSpan { start: close, end: close + 1 }),
+ open_ch,
+ ))
+}
+
fn panic_call<'tcx>(cx: &LateContext<'tcx>, f: &'tcx hir::Expr<'tcx>) -> (Span, Symbol) {
let mut expn = f.span.ctxt().outer_expn_data();
diff --git a/compiler/rustc_lint/src/nonstandard_style.rs b/compiler/rustc_lint/src/nonstandard_style.rs
index 121dde3..be9c6ea 100644
--- a/compiler/rustc_lint/src/nonstandard_style.rs
+++ b/compiler/rustc_lint/src/nonstandard_style.rs
@@ -400,14 +400,15 @@
}
_ => (),
},
- FnKind::ItemFn(ident, _, header, _, attrs) => {
+ FnKind::ItemFn(ident, _, header, _) => {
+ let attrs = cx.tcx.hir().attrs(id);
// Skip foreign-ABI #[no_mangle] functions (Issue #31924)
if header.abi != Abi::Rust && cx.sess().contains_name(attrs, sym::no_mangle) {
return;
}
self.check_snake_case(cx, "function", ident);
}
- FnKind::Closure(_) => (),
+ FnKind::Closure => (),
}
}
@@ -504,8 +505,9 @@
impl<'tcx> LateLintPass<'tcx> for NonUpperCaseGlobals {
fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
+ let attrs = cx.tcx.hir().attrs(it.hir_id());
match it.kind {
- hir::ItemKind::Static(..) if !cx.sess().contains_name(&it.attrs, sym::no_mangle) => {
+ hir::ItemKind::Static(..) if !cx.sess().contains_name(attrs, sym::no_mangle) => {
NonUpperCaseGlobals::check_upper_case(cx, "static variable", &it.ident);
}
hir::ItemKind::Const(..) => {
diff --git a/compiler/rustc_lint/src/noop_method_call.rs b/compiler/rustc_lint/src/noop_method_call.rs
new file mode 100644
index 0000000..479cc00
--- /dev/null
+++ b/compiler/rustc_lint/src/noop_method_call.rs
@@ -0,0 +1,111 @@
+use crate::context::LintContext;
+use crate::rustc_middle::ty::TypeFoldable;
+use crate::LateContext;
+use crate::LateLintPass;
+use rustc_hir::def::DefKind;
+use rustc_hir::{Expr, ExprKind};
+use rustc_middle::ty;
+use rustc_span::symbol::sym;
+
+declare_lint! {
+ /// The `noop_method_call` lint detects specific calls to noop methods
+ /// such as a calling `<&T as Clone>::clone` where `T: !Clone`.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// # #![allow(unused)]
+ /// #![warn(noop_method_call)]
+ /// struct Foo;
+ /// let foo = &Foo;
+ /// let clone: &Foo = foo.clone();
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Some method calls are noops meaning that they do nothing. Usually such methods
+ /// are the result of blanket implementations that happen to create some method invocations
+ /// that end up not doing anything. For instance, `Clone` is implemented on all `&T`, but
+ /// calling `clone` on a `&T` where `T` does not implement clone, actually doesn't do anything
+ /// as references are copy. This lint detects these calls and warns the user about them.
+ pub NOOP_METHOD_CALL,
+ Allow,
+ "detects the use of well-known noop methods"
+}
+
+declare_lint_pass!(NoopMethodCall => [NOOP_METHOD_CALL]);
+
+impl<'tcx> LateLintPass<'tcx> for NoopMethodCall {
+ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
+ // We only care about method calls.
+ let (call, elements) = match expr.kind {
+ ExprKind::MethodCall(call, _, elements, _) => (call, elements),
+ _ => return,
+ };
+ // We only care about method calls corresponding to the `Clone`, `Deref` and `Borrow`
+ // traits and ignore any other method call.
+ let (trait_id, did) = match cx.typeck_results().type_dependent_def(expr.hir_id) {
+ // Verify we are dealing with a method/associated function.
+ Some((DefKind::AssocFn, did)) => match cx.tcx.trait_of_item(did) {
+ // Check that we're dealing with a trait method for one of the traits we care about.
+ Some(trait_id)
+ if [sym::Clone, sym::Deref, sym::Borrow]
+ .iter()
+ .any(|s| cx.tcx.is_diagnostic_item(*s, trait_id)) =>
+ {
+ (trait_id, did)
+ }
+ _ => return,
+ },
+ _ => return,
+ };
+ let substs = cx.typeck_results().node_substs(expr.hir_id);
+ if substs.needs_subst() {
+ // We can't resolve on types that require monomorphization, so we don't handle them if
+ // we need to perfom substitution.
+ return;
+ }
+ let param_env = cx.tcx.param_env(trait_id);
+ // Resolve the trait method instance.
+ let i = match ty::Instance::resolve(cx.tcx, param_env, did, substs) {
+ Ok(Some(i)) => i,
+ _ => return,
+ };
+ // (Re)check that it implements the noop diagnostic.
+ for s in [sym::noop_method_clone, sym::noop_method_deref, sym::noop_method_borrow].iter() {
+ if cx.tcx.is_diagnostic_item(*s, i.def_id()) {
+ let method = &call.ident.name;
+ let receiver = &elements[0];
+ let receiver_ty = cx.typeck_results().expr_ty(receiver);
+ let expr_ty = cx.typeck_results().expr_ty_adjusted(expr);
+ if receiver_ty != expr_ty {
+ // This lint will only trigger if the receiver type and resulting expression \
+ // type are the same, implying that the method call is unnecessary.
+ return;
+ }
+ let expr_span = expr.span;
+ let note = format!(
+ "the type `{:?}` which `{}` is being called on is the same as \
+ the type returned from `{}`, so the method call does not do \
+ anything and can be removed",
+ receiver_ty, method, method,
+ );
+
+ let span = expr_span.with_lo(receiver.span.hi());
+ cx.struct_span_lint(NOOP_METHOD_CALL, span, |lint| {
+ let method = &call.ident.name;
+ let message = format!(
+ "call to `.{}()` on a reference in this situation does nothing",
+ &method,
+ );
+ lint.build(&message)
+ .span_label(span, "unnecessary method call")
+ .note(¬e)
+ .emit()
+ });
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_lint/src/passes.rs b/compiler/rustc_lint/src/passes.rs
index 828f283..bbe17dc 100644
--- a/compiler/rustc_lint/src/passes.rs
+++ b/compiler/rustc_lint/src/passes.rs
@@ -57,7 +57,7 @@
fn check_impl_item_post(a: &$hir hir::ImplItem<$hir>);
fn check_struct_def(a: &$hir hir::VariantData<$hir>);
fn check_struct_def_post(a: &$hir hir::VariantData<$hir>);
- fn check_struct_field(a: &$hir hir::StructField<$hir>);
+ fn check_field_def(a: &$hir hir::FieldDef<$hir>);
fn check_variant(a: &$hir hir::Variant<$hir>);
fn check_variant_post(a: &$hir hir::Variant<$hir>);
fn check_lifetime(a: &$hir hir::Lifetime);
@@ -160,8 +160,6 @@
fn check_ident(a: Ident);
fn check_crate(a: &ast::Crate);
fn check_crate_post(a: &ast::Crate);
- fn check_mod(a: &ast::Mod, b: Span, c: ast::NodeId);
- fn check_mod_post(a: &ast::Mod, b: Span, c: ast::NodeId);
fn check_foreign_item(a: &ast::ForeignItem);
fn check_foreign_item_post(a: &ast::ForeignItem);
fn check_item(a: &ast::Item);
@@ -195,7 +193,7 @@
fn check_impl_item_post(a: &ast::AssocItem);
fn check_struct_def(a: &ast::VariantData);
fn check_struct_def_post(a: &ast::VariantData);
- fn check_struct_field(a: &ast::StructField);
+ fn check_field_def(a: &ast::FieldDef);
fn check_variant(a: &ast::Variant);
fn check_variant_post(a: &ast::Variant);
fn check_lifetime(a: &ast::Lifetime);
diff --git a/compiler/rustc_lint/src/traits.rs b/compiler/rustc_lint/src/traits.rs
index b031c11..e632f29 100644
--- a/compiler/rustc_lint/src/traits.rs
+++ b/compiler/rustc_lint/src/traits.rs
@@ -47,8 +47,7 @@
fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) {
use rustc_middle::ty::PredicateKind::*;
- let def_id = cx.tcx.hir().local_def_id(item.hir_id);
- let predicates = cx.tcx.explicit_predicates_of(def_id);
+ let predicates = cx.tcx.explicit_predicates_of(item.def_id);
for &(predicate, span) in predicates.predicates {
let trait_predicate = match predicate.kind().skip_binder() {
Trait(trait_predicate, _constness) => trait_predicate,
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
index 1e879d2..2d311cc3 100644
--- a/compiler/rustc_lint/src/types.rs
+++ b/compiler/rustc_lint/src/types.rs
@@ -217,7 +217,11 @@
cx.struct_span_lint(OVERFLOWING_LITERALS, expr.span, |lint| {
let (t, actually) = match ty {
attr::IntType::SignedInt(t) => {
- let actually = size.sign_extend(val) as i128;
+ let actually = if negative {
+ -(size.sign_extend(val) as i128)
+ } else {
+ size.sign_extend(val) as i128
+ };
(t.name_str(), actually.to_string())
}
attr::IntType::UnsignedInt(t) => {
@@ -225,12 +229,23 @@
(t.name_str(), actually.to_string())
}
};
- let mut err = lint.build(&format!("literal out of range for {}", t));
- err.note(&format!(
- "the literal `{}` (decimal `{}`) does not fit into \
- the type `{}` and will become `{}{}`",
- repr_str, val, t, actually, t
- ));
+ let mut err = lint.build(&format!("literal out of range for `{}`", t));
+ if negative {
+ // If the value is negative,
+ // emits a note about the value itself, apart from the literal.
+ err.note(&format!(
+ "the literal `{}` (decimal `{}`) does not fit into \
+ the type `{}`",
+ repr_str, val, t
+ ));
+ err.note(&format!("and the value `-{}` will become `{}{}`", repr_str, actually, t));
+ } else {
+ err.note(&format!(
+ "the literal `{}` (decimal `{}`) does not fit into \
+ the type `{}` and will become `{}{}`",
+ repr_str, val, t, actually, t
+ ));
+ }
if let Some(sugg_ty) =
get_type_suggestion(&cx.typeck_results().node_type(expr.hir_id), val, negative)
{
@@ -238,12 +253,12 @@
let (sans_suffix, _) = repr_str.split_at(pos);
err.span_suggestion(
expr.span,
- &format!("consider using `{}` instead", sugg_ty),
+ &format!("consider using the type `{}` instead", sugg_ty),
format!("{}{}", sans_suffix, sugg_ty),
Applicability::MachineApplicable,
);
} else {
- err.help(&format!("consider using `{}` instead", sugg_ty));
+ err.help(&format!("consider using the type `{}` instead", sugg_ty));
}
}
err.emit();
@@ -338,18 +353,23 @@
}
cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| {
- lint.build(&format!("literal out of range for `{}`", t.name_str()))
- .note(&format!(
- "the literal `{}` does not fit into the type `{}` whose range is `{}..={}`",
- cx.sess()
- .source_map()
- .span_to_snippet(lit.span)
- .expect("must get snippet from literal"),
- t.name_str(),
- min,
- max,
- ))
- .emit();
+ let mut err = lint.build(&format!("literal out of range for `{}`", t.name_str()));
+ err.note(&format!(
+ "the literal `{}` does not fit into the type `{}` whose range is `{}..={}`",
+ cx.sess()
+ .source_map()
+ .span_to_snippet(lit.span)
+ .expect("must get snippet from literal"),
+ t.name_str(),
+ min,
+ max,
+ ));
+ if let Some(sugg_ty) =
+ get_type_suggestion(&cx.typeck_results().node_type(e.hir_id), v, negative)
+ {
+ err.help(&format!("consider using the type `{}` instead", sugg_ty));
+ }
+ err.emit();
});
}
}
@@ -472,7 +492,7 @@
impl<'tcx> LateLintPass<'tcx> for TypeLimits {
fn check_expr(&mut self, cx: &LateContext<'tcx>, e: &'tcx hir::Expr<'tcx>) {
match e.kind {
- hir::ExprKind::Unary(hir::UnOp::UnNeg, ref expr) => {
+ hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => {
// propagate negation, if the negation itself isn't negated
if self.negated_expr_id != Some(e.hir_id) {
self.negated_expr_id = Some(expr.hir_id);
@@ -672,7 +692,7 @@
}
/// Is type known to be non-null?
-crate fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool {
+fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool {
let tcx = cx.tcx;
match ty.kind() {
ty::FnPtr(_) => true,
@@ -685,6 +705,12 @@
return true;
}
+ // Types with a `#[repr(no_niche)]` attribute have their niche hidden.
+ // The attribute is used by the UnsafeCell for example (the only use so far).
+ if def.repr.hide_niche() {
+ return false;
+ }
+
for variant in &def.variants {
if let Some(field) = transparent_newtype_field(cx.tcx, variant) {
if ty_is_known_nonnull(cx, field.ty(tcx, substs), mode) {
@@ -1256,15 +1282,15 @@
impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDeclarations {
fn check_foreign_item(&mut self, cx: &LateContext<'_>, it: &hir::ForeignItem<'_>) {
let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Declaration };
- let abi = cx.tcx.hir().get_foreign_abi(it.hir_id);
+ let abi = cx.tcx.hir().get_foreign_abi(it.hir_id());
if !vis.is_internal_abi(abi) {
match it.kind {
hir::ForeignItemKind::Fn(ref decl, _, _) => {
- vis.check_foreign_fn(it.hir_id, decl);
+ vis.check_foreign_fn(it.hir_id(), decl);
}
hir::ForeignItemKind::Static(ref ty, _) => {
- vis.check_foreign_static(it.hir_id, ty.span);
+ vis.check_foreign_static(it.hir_id(), ty.span);
}
hir::ForeignItemKind::Type => (),
}
@@ -1302,8 +1328,7 @@
impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
if let hir::ItemKind::Enum(ref enum_definition, _) = it.kind {
- let item_def_id = cx.tcx.hir().local_def_id(it.hir_id);
- let t = cx.tcx.type_of(item_def_id);
+ let t = cx.tcx.type_of(it.def_id);
let ty = cx.tcx.erase_regions(t);
let layout = match cx.layout_of(ty) {
Ok(layout) => layout,
diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs
index b611aeb..67946df 100644
--- a/compiler/rustc_lint/src/unused.rs
+++ b/compiler/rustc_lint/src/unused.rs
@@ -406,6 +406,8 @@
if !cx.sess().is_attr_used(attr) {
debug!("emitting warning for: {:?}", attr);
cx.struct_span_lint(UNUSED_ATTRIBUTES, attr.span, |lint| {
+ // Mark as used to avoid duplicate warnings.
+ cx.sess().mark_attr_used(attr);
lint.build("unused attribute").emit()
});
// Is it a builtin attribute that must be used at the crate level?
@@ -602,7 +604,7 @@
use rustc_ast::ExprKind::*;
let (value, ctx, followed_by_block, left_pos, right_pos) = match e.kind {
// Do not lint `unused_braces` in `if let` expressions.
- If(ref cond, ref block, ..)
+ If(ref cond, ref block, _)
if !matches!(cond.kind, Let(_, _)) || Self::LINT_EXPR_IN_PATTERN_MATCHING_CTX =>
{
let left = e.span.lo() + rustc_span::BytePos(2);
@@ -816,8 +818,33 @@
impl EarlyLintPass for UnusedParens {
fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) {
- if let ExprKind::Let(ref pat, ..) | ExprKind::ForLoop(ref pat, ..) = e.kind {
- self.check_unused_parens_pat(cx, pat, false, false);
+ match e.kind {
+ ExprKind::Let(ref pat, _) | ExprKind::ForLoop(ref pat, ..) => {
+ self.check_unused_parens_pat(cx, pat, false, false);
+ }
+ // We ignore parens in cases like `if (((let Some(0) = Some(1))))` because we already
+ // handle a hard error for them during AST lowering in `lower_expr_mut`, but we still
+ // want to complain about things like `if let 42 = (42)`.
+ ExprKind::If(ref cond, ref block, ref else_)
+ if matches!(cond.peel_parens().kind, ExprKind::Let(..)) =>
+ {
+ self.check_unused_delims_expr(
+ cx,
+ cond.peel_parens(),
+ UnusedDelimsCtx::LetScrutineeExpr,
+ true,
+ None,
+ None,
+ );
+ for stmt in &block.stmts {
+ <Self as UnusedDelimLint>::check_stmt(self, cx, stmt);
+ }
+ if let Some(e) = else_ {
+ <Self as UnusedDelimLint>::check_expr(self, cx, e);
+ }
+ return;
+ }
+ _ => {}
}
<Self as UnusedDelimLint>::check_expr(self, cx, e)
@@ -847,7 +874,7 @@
fn check_stmt(&mut self, cx: &EarlyContext<'_>, s: &ast::Stmt) {
if let StmtKind::Local(ref local) = s.kind {
- self.check_unused_parens_pat(cx, &local.pat, false, false);
+ self.check_unused_parens_pat(cx, &local.pat, true, false);
}
<Self as UnusedDelimLint>::check_stmt(self, cx, s)
diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs
index da62ad3..005c4f9 100644
--- a/compiler/rustc_lint_defs/src/builtin.rs
+++ b/compiler/rustc_lint_defs/src/builtin.rs
@@ -1,15 +1,13 @@
// ignore-tidy-filelength
+
//! Some lints that are built in to the compiler.
//!
//! These are the built-in lints that are emitted direct in the main
//! compiler code, rather than using their own custom pass. Those
//! lints are all available in `rustc_lint::builtin`.
-// ignore-tidy-filelength
-
-use crate::{declare_lint, declare_lint_pass};
+use crate::{declare_lint, declare_lint_pass, FutureBreakage};
use rustc_span::edition::Edition;
-use rustc_span::symbol::sym;
declare_lint! {
/// The `forbidden_lint_groups` lint detects violations of
@@ -1081,6 +1079,7 @@
pub UNALIGNED_REFERENCES,
Allow,
"detects unaligned references to fields of packed structs",
+ report_in_external_macro
}
declare_lint! {
@@ -1815,14 +1814,12 @@
}
declare_lint! {
- /// The `irrefutable_let_patterns` lint detects detects [irrefutable
- /// patterns] in [if-let] and [while-let] statements.
- ///
- ///
+ /// The `irrefutable_let_patterns` lint detects [irrefutable patterns]
+ /// in [`if let`]s, [`while let`]s, and `if let` guards.
///
/// ### Example
///
- /// ```rust
+ /// ```
/// if let _ = 123 {
/// println!("always runs!");
/// }
@@ -1833,7 +1830,7 @@
/// ### Explanation
///
/// There usually isn't a reason to have an irrefutable pattern in an
- /// if-let or while-let statement, because the pattern will always match
+ /// `if let` or `while let` statement, because the pattern will always match
/// successfully. A [`let`] or [`loop`] statement will suffice. However,
/// when generating code with a macro, forbidding irrefutable patterns
/// would require awkward workarounds in situations where the macro
@@ -1844,14 +1841,14 @@
/// See [RFC 2086] for more details.
///
/// [irrefutable patterns]: https://doc.rust-lang.org/reference/patterns.html#refutability
- /// [if-let]: https://doc.rust-lang.org/reference/expressions/if-expr.html#if-let-expressions
- /// [while-let]: https://doc.rust-lang.org/reference/expressions/loop-expr.html#predicate-pattern-loops
+ /// [`if let`]: https://doc.rust-lang.org/reference/expressions/if-expr.html#if-let-expressions
+ /// [`while let`]: https://doc.rust-lang.org/reference/expressions/loop-expr.html#predicate-pattern-loops
/// [`let`]: https://doc.rust-lang.org/reference/statements.html#let-statements
/// [`loop`]: https://doc.rust-lang.org/reference/expressions/loop-expr.html#infinite-loops
/// [RFC 2086]: https://github.com/rust-lang/rfcs/blob/master/text/2086-allow-if-let-irrefutables.md
pub IRREFUTABLE_LET_PATTERNS,
Warn,
- "detects irrefutable patterns in if-let and while-let statements"
+ "detects irrefutable patterns in `if let` and `while let` statements"
}
declare_lint! {
@@ -1878,93 +1875,6 @@
}
declare_lint! {
- /// The `broken_intra_doc_links` lint detects failures in resolving
- /// intra-doc link targets. This is a `rustdoc` only lint, see the
- /// documentation in the [rustdoc book].
- ///
- /// [rustdoc book]: ../../../rustdoc/lints.html#broken_intra_doc_links
- pub BROKEN_INTRA_DOC_LINKS,
- Warn,
- "failures in resolving intra-doc link targets"
-}
-
-declare_lint! {
- /// This is a subset of `broken_intra_doc_links` that warns when linking from
- /// a public item to a private one. This is a `rustdoc` only lint, see the
- /// documentation in the [rustdoc book].
- ///
- /// [rustdoc book]: ../../../rustdoc/lints.html#private_intra_doc_links
- pub PRIVATE_INTRA_DOC_LINKS,
- Warn,
- "linking from a public item to a private one"
-}
-
-declare_lint! {
- /// The `invalid_codeblock_attributes` lint detects code block attributes
- /// in documentation examples that have potentially mis-typed values. This
- /// is a `rustdoc` only lint, see the documentation in the [rustdoc book].
- ///
- /// [rustdoc book]: ../../../rustdoc/lints.html#invalid_codeblock_attributes
- pub INVALID_CODEBLOCK_ATTRIBUTES,
- Warn,
- "codeblock attribute looks a lot like a known one"
-}
-
-declare_lint! {
- /// The `missing_crate_level_docs` lint detects if documentation is
- /// missing at the crate root. This is a `rustdoc` only lint, see the
- /// documentation in the [rustdoc book].
- ///
- /// [rustdoc book]: ../../../rustdoc/lints.html#missing_crate_level_docs
- pub MISSING_CRATE_LEVEL_DOCS,
- Allow,
- "detects crates with no crate-level documentation"
-}
-
-declare_lint! {
- /// The `missing_doc_code_examples` lint detects publicly-exported items
- /// without code samples in their documentation. This is a `rustdoc` only
- /// lint, see the documentation in the [rustdoc book].
- ///
- /// [rustdoc book]: ../../../rustdoc/lints.html#missing_doc_code_examples
- pub MISSING_DOC_CODE_EXAMPLES,
- Allow,
- "detects publicly-exported items without code samples in their documentation"
-}
-
-declare_lint! {
- /// The `private_doc_tests` lint detects code samples in docs of private
- /// items not documented by `rustdoc`. This is a `rustdoc` only lint, see
- /// the documentation in the [rustdoc book].
- ///
- /// [rustdoc book]: ../../../rustdoc/lints.html#private_doc_tests
- pub PRIVATE_DOC_TESTS,
- Allow,
- "detects code samples in docs of private items not documented by rustdoc"
-}
-
-declare_lint! {
- /// The `invalid_html_tags` lint detects invalid HTML tags. This is a
- /// `rustdoc` only lint, see the documentation in the [rustdoc book].
- ///
- /// [rustdoc book]: ../../../rustdoc/lints.html#invalid_html_tags
- pub INVALID_HTML_TAGS,
- Allow,
- "detects invalid HTML tags in doc comments"
-}
-
-declare_lint! {
- /// The `non_autolinks` lint detects when a URL could be written using
- /// only angle brackets. This is a `rustdoc` only lint, see the
- /// documentation in the [rustdoc book].
- ///
- /// [rustdoc book]: ../../../rustdoc/lints.html#non_autolinks
- pub NON_AUTOLINKS,
- Warn,
- "detects URLs that could be written using only angle brackets"
-}
-
-declare_lint! {
/// The `where_clauses_object_safety` lint detects for [object safety] of
/// [where clauses].
///
@@ -2578,16 +2488,11 @@
declare_lint! {
/// The `unsafe_op_in_unsafe_fn` lint detects unsafe operations in unsafe
- /// functions without an explicit unsafe block. This lint only works on
- /// the [**nightly channel**] with the
- /// `#![feature(unsafe_block_in_unsafe_fn)]` feature.
- ///
- /// [**nightly channel**]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+ /// functions without an explicit unsafe block.
///
/// ### Example
///
/// ```rust,compile_fail
- /// #![feature(unsafe_block_in_unsafe_fn)]
/// #![deny(unsafe_op_in_unsafe_fn)]
///
/// unsafe fn foo() {}
@@ -2625,7 +2530,6 @@
pub UNSAFE_OP_IN_UNSAFE_FN,
Allow,
"unsafe operations in unsafe functions without an explicit unsafe block are deprecated",
- @feature_gate = sym::unsafe_block_in_unsafe_fn;
}
declare_lint! {
@@ -2922,6 +2826,52 @@
};
}
+declare_lint! {
+ /// The `legacy_derive_helpers` lint detects derive helper attributes
+ /// that are used before they are introduced.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (needs extern crate)
+ /// #[serde(rename_all = "camelCase")]
+ /// #[derive(Deserialize)]
+ /// struct S { /* fields */ }
+ /// ```
+ ///
+ /// produces:
+ ///
+ /// ```text
+ /// warning: derive helper attribute is used before it is introduced
+ /// --> $DIR/legacy-derive-helpers.rs:1:3
+ /// |
+ /// 1 | #[serde(rename_all = "camelCase")]
+ /// | ^^^^^
+ /// ...
+ /// 2 | #[derive(Deserialize)]
+ /// | ----------- the attribute is introduced here
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// Attributes like this work for historical reasons, but attribute expansion works in
+ /// left-to-right order in general, so, to resolve `#[serde]`, compiler has to try to "look
+ /// into the future" at not yet expanded part of the item , but such attempts are not always
+ /// reliable.
+ ///
+ /// To fix the warning place the helper attribute after its corresponding derive.
+ /// ```rust,ignore (needs extern crate)
+ /// #[derive(Deserialize)]
+ /// #[serde(rename_all = "camelCase")]
+ /// struct S { /* fields */ }
+ /// ```
+ pub LEGACY_DERIVE_HELPERS,
+ Warn,
+ "detects derive helper attributes that are used before they are introduced",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #79202 <https://github.com/rust-lang/rust/issues/79202>",
+ };
+}
+
declare_lint_pass! {
/// Does nothing as a lint pass, but registers some `Lint`s
/// that are used by other parts of the compiler.
@@ -2976,14 +2926,6 @@
ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE,
UNSTABLE_NAME_COLLISIONS,
IRREFUTABLE_LET_PATTERNS,
- BROKEN_INTRA_DOC_LINKS,
- PRIVATE_INTRA_DOC_LINKS,
- INVALID_CODEBLOCK_ATTRIBUTES,
- MISSING_CRATE_LEVEL_DOCS,
- MISSING_DOC_CODE_EXAMPLES,
- INVALID_HTML_TAGS,
- PRIVATE_DOC_TESTS,
- NON_AUTOLINKS,
WHERE_CLAUSES_OBJECT_SAFETY,
PROC_MACRO_DERIVE_RESOLUTION_FALLBACK,
MACRO_USE_EXTERN_CRATE,
@@ -3012,6 +2954,8 @@
MISSING_ABI,
SEMICOLON_IN_EXPRESSIONS_FROM_MACROS,
DISJOINT_CAPTURE_DROP_REORDER,
+ LEGACY_DERIVE_HELPERS,
+ PROC_MACRO_BACK_COMPAT,
]
}
@@ -3109,3 +3053,83 @@
Allow,
"No declared ABI for extern declaration"
}
+
+declare_lint! {
+ /// The `invalid_doc_attributes` lint detects when the `#[doc(...)]` is
+ /// misused.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(warnings)]
+ ///
+ /// pub mod submodule {
+ /// #![doc(test(no_crate_inject))]
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Previously, there were very like checks being performed on `#[doc(..)]`
+ /// unlike the other attributes. It'll now catch all the issues that it
+ /// silently ignored previously.
+ pub INVALID_DOC_ATTRIBUTES,
+ Warn,
+ "detects invalid `#[doc(...)]` attributes",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #82730 <https://github.com/rust-lang/rust/issues/82730>",
+ edition: None,
+ };
+}
+
+declare_lint! {
+ /// The `proc_macro_back_compat` lint detects uses of old versions of certain
+ /// proc-macro crates, which have hardcoded workarounds in the compiler.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (needs-dependency)
+ ///
+ /// use time_macros_impl::impl_macros;
+ /// struct Foo;
+ /// impl_macros!(Foo);
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// warning: using an old version of `time-macros-impl`
+ /// ::: $DIR/group-compat-hack.rs:27:5
+ /// |
+ /// LL | impl_macros!(Foo);
+ /// | ------------------ in this macro invocation
+ /// |
+ /// = note: `#[warn(proc_macro_back_compat)]` on by default
+ /// = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ /// = note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
+ /// = note: the `time-macros-impl` crate will stop compiling in futures version of Rust. Please update to the latest version of the `time` crate to avoid breakage
+ /// = note: this warning originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info)
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// Eventually, the backwards-compatibility hacks present in the compiler will be removed,
+ /// causing older versions of certain crates to stop compiling.
+ /// This is a [future-incompatible] lint to ease the transition to an error.
+ /// See [issue #83125] for more details.
+ ///
+ /// [issue #83125]: https://github.com/rust-lang/rust/issues/83125
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub PROC_MACRO_BACK_COMPAT,
+ Warn,
+ "detects usage of old versions of certain proc-macro crates",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #83125 <https://github.com/rust-lang/rust/issues/83125>",
+ edition: None,
+ future_breakage: Some(FutureBreakage {
+ date: None
+ })
+ };
+}
diff --git a/compiler/rustc_lint_defs/src/lib.rs b/compiler/rustc_lint_defs/src/lib.rs
index 9d60a51..400b367 100644
--- a/compiler/rustc_lint_defs/src/lib.rs
+++ b/compiler/rustc_lint_defs/src/lib.rs
@@ -4,6 +4,7 @@
pub use self::Level::*;
use rustc_ast::node_id::{NodeId, NodeMap};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
+use rustc_serialize::json::Json;
use rustc_span::edition::Edition;
use rustc_span::{sym, symbol::Ident, MultiSpan, Span, Symbol};
use rustc_target::spec::abi::Abi;
@@ -239,6 +240,13 @@
}
}
+// Duplicated from rustc_session::config::ExternDepSpec to avoid cyclic dependency
+#[derive(PartialEq)]
+pub enum ExternDepSpec {
+ Json(Json),
+ Raw(String),
+}
+
// This could be a closure, but then implementing derive trait
// becomes hacky (and it gets allocated).
#[derive(PartialEq)]
@@ -256,6 +264,9 @@
MissingAbi(Span, Abi),
UnusedDocComment(Span),
PatternsInFnsWithoutBody(Span, Ident),
+ LegacyDeriveHelpers(Span),
+ ExternDepSpec(String, ExternDepSpec),
+ ProcMacroBackCompat(String),
}
/// Lints that are buffered up early on in the `Session` before the
diff --git a/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp
index 25badc3..e97d96e 100644
--- a/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp
@@ -8,6 +8,17 @@
using namespace llvm;
+struct LLVMRustCounterMappingRegion {
+ coverage::Counter Count;
+ uint32_t FileID;
+ uint32_t ExpandedFileID;
+ uint32_t LineStart;
+ uint32_t ColumnStart;
+ uint32_t LineEnd;
+ uint32_t ColumnEnd;
+ coverage::CounterMappingRegion::RegionKind Kind;
+};
+
extern "C" void LLVMRustCoverageWriteFilenamesSectionToBuffer(
const char* const Filenames[],
size_t FilenamesLen,
@@ -27,13 +38,22 @@
unsigned NumVirtualFileMappingIDs,
const coverage::CounterExpression *Expressions,
unsigned NumExpressions,
- coverage::CounterMappingRegion *MappingRegions,
+ LLVMRustCounterMappingRegion *RustMappingRegions,
unsigned NumMappingRegions,
RustStringRef BufferOut) {
+ // Convert from FFI representation to LLVM representation.
+ SmallVector<coverage::CounterMappingRegion, 0> MappingRegions;
+ MappingRegions.reserve(NumMappingRegions);
+ for (const auto &Region : makeArrayRef(RustMappingRegions, NumMappingRegions)) {
+ MappingRegions.emplace_back(
+ Region.Count, Region.FileID, Region.ExpandedFileID,
+ Region.LineStart, Region.ColumnStart, Region.LineEnd, Region.ColumnEnd,
+ Region.Kind);
+ }
auto CoverageMappingWriter = coverage::CoverageMappingWriter(
makeArrayRef(VirtualFileMappingIDs, NumVirtualFileMappingIDs),
makeArrayRef(Expressions, NumExpressions),
- makeMutableArrayRef(MappingRegions, NumMappingRegions));
+ MappingRegions);
RawRustStringOstream OS(BufferOut);
CoverageMappingWriter.write(OS);
}
diff --git a/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h b/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h
index 57b8664..f67e067 100644
--- a/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h
+++ b/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h
@@ -85,6 +85,8 @@
ReturnsTwice = 25,
ReadNone = 26,
InaccessibleMemOnly = 27,
+ SanitizeHWAddress = 28,
+ WillReturn = 29,
};
typedef struct OpaqueRustString *RustStringRef;
diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
index 2264908..51c80cf 100644
--- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
@@ -5,6 +5,7 @@
#include "LLVMWrapper.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
@@ -33,6 +34,7 @@
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
+#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
#include "llvm/Transforms/Utils/NameAnonGlobals.h"
@@ -133,6 +135,12 @@
return wrap(createThreadSanitizerLegacyPassPass());
}
+extern "C" LLVMPassRef LLVMRustCreateHWAddressSanitizerPass(bool Recover) {
+ const bool CompileKernel = false;
+
+ return wrap(createHWAddressSanitizerLegacyPassPass(CompileKernel, Recover));
+}
+
extern "C" LLVMRustPassKind LLVMRustPassKind(LLVMPassRef RustPass) {
assert(RustPass);
Pass *Pass = unwrap(RustPass);
@@ -676,6 +684,25 @@
PassInstrumentationCallbacks& PIC, void* LlvmSelfProfiler,
LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
+#if LLVM_VERSION_GE(12, 0)
+ PIC.registerBeforeNonSkippedPassCallback([LlvmSelfProfiler, BeforePassCallback](
+ StringRef Pass, llvm::Any Ir) {
+ std::string PassName = Pass.str();
+ std::string IrName = LLVMRustwrappedIrGetName(Ir);
+ BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
+ });
+
+ PIC.registerAfterPassCallback(
+ [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any IR,
+ const PreservedAnalyses &Preserved) {
+ AfterPassCallback(LlvmSelfProfiler);
+ });
+
+ PIC.registerAfterPassInvalidatedCallback(
+ [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, const PreservedAnalyses &Preserved) {
+ AfterPassCallback(LlvmSelfProfiler);
+ });
+#else
PIC.registerBeforePassCallback([LlvmSelfProfiler, BeforePassCallback](
StringRef Pass, llvm::Any Ir) {
std::string PassName = Pass.str();
@@ -693,6 +720,7 @@
[LlvmSelfProfiler, AfterPassCallback](StringRef Pass) {
AfterPassCallback(LlvmSelfProfiler);
});
+#endif
PIC.registerBeforeAnalysisCallback([LlvmSelfProfiler, BeforePassCallback](
StringRef Pass, llvm::Any Ir) {
@@ -722,6 +750,8 @@
bool SanitizeMemoryRecover;
int SanitizeMemoryTrackOrigins;
bool SanitizeThread;
+ bool SanitizeHWAddress;
+ bool SanitizeHWAddressRecover;
};
extern "C" void
@@ -742,17 +772,28 @@
TargetMachine *TM = unwrap(TMRef);
PassBuilder::OptimizationLevel OptLevel = fromRust(OptLevelRust);
- // FIXME: MergeFunctions is not supported by NewPM yet.
- (void) MergeFunctions;
PipelineTuningOptions PTO;
PTO.LoopUnrolling = UnrollLoops;
PTO.LoopInterleaving = UnrollLoops;
PTO.LoopVectorization = LoopVectorize;
PTO.SLPVectorization = SLPVectorize;
+#if LLVM_VERSION_GE(12, 0)
+ PTO.MergeFunctions = MergeFunctions;
+#else
+ // MergeFunctions is not supported by NewPM in older LLVM versions.
+ (void) MergeFunctions;
+#endif
+
+ // FIXME: We may want to expose this as an option.
+ bool DebugPassManager = false;
PassInstrumentationCallbacks PIC;
+#if LLVM_VERSION_GE(12, 0)
+ StandardInstrumentations SI(DebugPassManager);
+#else
StandardInstrumentations SI;
+#endif
SI.registerCallbacks(PIC);
if (LlvmSelfProfiler){
@@ -768,10 +809,12 @@
PGOOpt = PGOOptions(PGOUsePath, "", "", PGOOptions::IRUse);
}
+#if LLVM_VERSION_GE(12, 0)
+ PassBuilder PB(DebugPassManager, TM, PTO, PGOOpt, &PIC);
+#else
PassBuilder PB(TM, PTO, PGOOpt, &PIC);
+#endif
- // FIXME: We may want to expose this as an option.
- bool DebugPassManager = false;
LoopAnalysisManager LAM(DebugPassManager);
FunctionAnalysisManager FAM(DebugPassManager);
CGSCCAnalysisManager CGAM(DebugPassManager);
@@ -793,7 +836,8 @@
// We manually collect pipeline callbacks so we can apply them at O0, where the
// PassBuilder does not create a pipeline.
- std::vector<std::function<void(ModulePassManager &)>> PipelineStartEPCallbacks;
+ std::vector<std::function<void(ModulePassManager &, PassBuilder::OptimizationLevel)>>
+ PipelineStartEPCallbacks;
#if LLVM_VERSION_GE(11, 0)
std::vector<std::function<void(ModulePassManager &, PassBuilder::OptimizationLevel)>>
OptimizerLastEPCallbacks;
@@ -803,9 +847,11 @@
#endif
if (VerifyIR) {
- PipelineStartEPCallbacks.push_back([VerifyIR](ModulePassManager &MPM) {
+ PipelineStartEPCallbacks.push_back(
+ [VerifyIR](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
MPM.addPass(VerifierPass());
- });
+ }
+ );
}
if (SanitizerOptions) {
@@ -823,9 +869,11 @@
);
#else
#if LLVM_VERSION_GE(10, 0)
- PipelineStartEPCallbacks.push_back([Options](ModulePassManager &MPM) {
- MPM.addPass(MemorySanitizerPass(Options));
- });
+ PipelineStartEPCallbacks.push_back(
+ [Options](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(MemorySanitizerPass(Options));
+ }
+ );
#endif
OptimizerLastEPCallbacks.push_back(
[Options](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
@@ -845,9 +893,11 @@
);
#else
#if LLVM_VERSION_GE(10, 0)
- PipelineStartEPCallbacks.push_back([](ModulePassManager &MPM) {
- MPM.addPass(ThreadSanitizerPass());
- });
+ PipelineStartEPCallbacks.push_back(
+ [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(ThreadSanitizerPass());
+ }
+ );
#endif
OptimizerLastEPCallbacks.push_back(
[](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
@@ -870,9 +920,11 @@
}
);
#else
- PipelineStartEPCallbacks.push_back([&](ModulePassManager &MPM) {
- MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
- });
+ PipelineStartEPCallbacks.push_back(
+ [&](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
+ }
+ );
OptimizerLastEPCallbacks.push_back(
[SanitizerOptions](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
FPM.addPass(AddressSanitizerPass(
@@ -881,45 +933,80 @@
}
);
PipelineStartEPCallbacks.push_back(
- [SanitizerOptions](ModulePassManager &MPM) {
+ [SanitizerOptions](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
MPM.addPass(ModuleAddressSanitizerPass(
/*CompileKernel=*/false, SanitizerOptions->SanitizeAddressRecover));
}
);
#endif
}
+ if (SanitizerOptions->SanitizeHWAddress) {
+#if LLVM_VERSION_GE(11, 0)
+ OptimizerLastEPCallbacks.push_back(
+ [SanitizerOptions](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(HWAddressSanitizerPass(
+ /*CompileKernel=*/false, SanitizerOptions->SanitizeHWAddressRecover));
+ }
+ );
+#else
+ PipelineStartEPCallbacks.push_back(
+ [SanitizerOptions](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(HWAddressSanitizerPass(
+ /*CompileKernel=*/false, SanitizerOptions->SanitizeHWAddressRecover));
+ }
+ );
+#endif
+ }
}
ModulePassManager MPM(DebugPassManager);
+ bool NeedThinLTOBufferPasses = UseThinLTOBuffers;
if (!NoPrepopulatePasses) {
if (OptLevel == PassBuilder::OptimizationLevel::O0) {
+#if LLVM_VERSION_GE(12, 0)
for (const auto &C : PipelineStartEPCallbacks)
- C(MPM);
+ PB.registerPipelineStartEPCallback(C);
+ for (const auto &C : OptimizerLastEPCallbacks)
+ PB.registerOptimizerLastEPCallback(C);
-#if LLVM_VERSION_GE(11, 0)
+ // Pass false as we manually schedule ThinLTOBufferPasses below.
+ MPM = PB.buildO0DefaultPipeline(OptLevel, /* PreLinkLTO */ false);
+#else
+ for (const auto &C : PipelineStartEPCallbacks)
+ C(MPM, OptLevel);
+
+# if LLVM_VERSION_GE(11, 0)
for (const auto &C : OptimizerLastEPCallbacks)
C(MPM, OptLevel);
-#else
+# else
if (!OptimizerLastEPCallbacks.empty()) {
FunctionPassManager FPM(DebugPassManager);
for (const auto &C : OptimizerLastEPCallbacks)
C(FPM, OptLevel);
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
}
-#endif
+# endif
MPM.addPass(AlwaysInlinerPass(EmitLifetimeMarkers));
-#if LLVM_VERSION_GE(10, 0)
+# if LLVM_VERSION_GE(10, 0)
if (PGOOpt) {
PB.addPGOInstrPassesForO0(
MPM, DebugPassManager, PGOOpt->Action == PGOOptions::IRInstr,
/*IsCS=*/false, PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile);
}
+# endif
#endif
} else {
+#if LLVM_VERSION_GE(12, 0)
for (const auto &C : PipelineStartEPCallbacks)
PB.registerPipelineStartEPCallback(C);
+#else
+ for (const auto &C : PipelineStartEPCallbacks)
+ PB.registerPipelineStartEPCallback([C, OptLevel](ModulePassManager &MPM) {
+ C(MPM, OptLevel);
+ });
+#endif
if (OptStage != LLVMRustOptStage::PreLinkThinLTO) {
for (const auto &C : OptimizerLastEPCallbacks)
PB.registerOptimizerLastEPCallback(C);
@@ -930,7 +1017,17 @@
MPM = PB.buildPerModuleDefaultPipeline(OptLevel, DebugPassManager);
break;
case LLVMRustOptStage::PreLinkThinLTO:
+#if LLVM_VERSION_GE(12, 0)
+ MPM = PB.buildThinLTOPreLinkDefaultPipeline(OptLevel);
+ // The ThinLTOPreLink pipeline already includes ThinLTOBuffer passes. However, callback
+ // passes may still run afterwards. This means we need to run the buffer passes again.
+ // FIXME: In LLVM 13, the ThinLTOPreLink pipeline also runs OptimizerLastEPCallbacks
+ // before the RequiredLTOPreLinkPasses, in which case we can remove these hacks.
+ if (OptimizerLastEPCallbacks.empty())
+ NeedThinLTOBufferPasses = false;
+#else
MPM = PB.buildThinLTOPreLinkDefaultPipeline(OptLevel, DebugPassManager);
+#endif
#if LLVM_VERSION_GE(11, 0)
for (const auto &C : OptimizerLastEPCallbacks)
C(MPM, OptLevel);
@@ -944,21 +1041,34 @@
#endif
break;
case LLVMRustOptStage::PreLinkFatLTO:
+#if LLVM_VERSION_GE(12, 0)
+ MPM = PB.buildLTOPreLinkDefaultPipeline(OptLevel);
+ NeedThinLTOBufferPasses = false;
+#else
MPM = PB.buildLTOPreLinkDefaultPipeline(OptLevel, DebugPassManager);
+#endif
break;
case LLVMRustOptStage::ThinLTO:
// FIXME: Does it make sense to pass the ModuleSummaryIndex?
// It only seems to be needed for C++ specific optimizations.
+#if LLVM_VERSION_GE(12, 0)
+ MPM = PB.buildThinLTODefaultPipeline(OptLevel, nullptr);
+#else
MPM = PB.buildThinLTODefaultPipeline(OptLevel, DebugPassManager, nullptr);
+#endif
break;
case LLVMRustOptStage::FatLTO:
+#if LLVM_VERSION_GE(12, 0)
+ MPM = PB.buildLTODefaultPipeline(OptLevel, nullptr);
+#else
MPM = PB.buildLTODefaultPipeline(OptLevel, DebugPassManager, nullptr);
+#endif
break;
}
}
}
- if (UseThinLTOBuffers) {
+ if (NeedThinLTOBufferPasses) {
MPM.addPass(CanonicalizeAliasesPass());
MPM.addPass(NameAnonGlobalPass());
}
@@ -1154,6 +1264,14 @@
unwrap(M)->setPIELevel(PIELevel::Level::Large);
}
+extern "C" void LLVMRustSetModuleCodeModel(LLVMModuleRef M,
+ LLVMRustCodeModel Model) {
+ auto CM = fromRust(Model);
+ if (!CM.hasValue())
+ return;
+ unwrap(M)->setCodeModel(*CM);
+}
+
// Here you'll find an implementation of ThinLTO as used by the Rust compiler
// right now. This ThinLTO support is only enabled on "recent ish" versions of
// LLVM, and otherwise it's just blanket rejected from other compilers.
diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
index 4118e93..a853659 100644
--- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
@@ -205,6 +205,10 @@
return Attribute::ReadNone;
case InaccessibleMemOnly:
return Attribute::InaccessibleMemOnly;
+ case SanitizeHWAddress:
+ return Attribute::SanitizeHWAddress;
+ case WillReturn:
+ return Attribute::WillReturn;
}
report_fatal_error("bad AttributeKind");
}
@@ -261,6 +265,17 @@
Call->addAttribute(Index, Attr);
}
+extern "C" void LLVMRustAddStructRetCallSiteAttr(LLVMValueRef Instr, unsigned Index,
+ LLVMTypeRef Ty) {
+ CallBase *Call = unwrap<CallBase>(Instr);
+#if LLVM_VERSION_GE(12, 0)
+ Attribute Attr = Attribute::getWithStructRetType(Call->getContext(), unwrap(Ty));
+#else
+ Attribute Attr = Attribute::get(Call->getContext(), Attribute::StructRet);
+#endif
+ Call->addAttribute(Index, Attr);
+}
+
extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index,
LLVMRustAttribute RustAttr) {
Function *A = unwrap<Function>(Fn);
@@ -302,6 +317,17 @@
F->addAttribute(Index, Attr);
}
+extern "C" void LLVMRustAddStructRetAttr(LLVMValueRef Fn, unsigned Index,
+ LLVMTypeRef Ty) {
+ Function *F = unwrap<Function>(Fn);
+#if LLVM_VERSION_GE(12, 0)
+ Attribute Attr = Attribute::getWithStructRetType(F->getContext(), unwrap(Ty));
+#else
+ Attribute Attr = Attribute::get(F->getContext(), Attribute::StructRet);
+#endif
+ F->addAttribute(Index, Attr);
+}
+
extern "C" void LLVMRustAddFunctionAttrStringValue(LLVMValueRef Fn,
unsigned Index,
const char *Name,
@@ -1005,12 +1031,19 @@
extern "C" LLVMMetadataRef
LLVMRustDIBuilderCreateDebugLocation(unsigned Line, unsigned Column,
- LLVMMetadataRef Scope,
+ LLVMMetadataRef ScopeRef,
LLVMMetadataRef InlinedAt) {
- DebugLoc debug_loc = DebugLoc::get(Line, Column, unwrapDIPtr<MDNode>(Scope),
+#if LLVM_VERSION_GE(12, 0)
+ MDNode *Scope = unwrapDIPtr<MDNode>(ScopeRef);
+ DILocation *Loc = DILocation::get(
+ Scope->getContext(), Line, Column, Scope,
+ unwrapDIPtr<MDNode>(InlinedAt));
+ return wrap(Loc);
+#else
+ DebugLoc debug_loc = DebugLoc::get(Line, Column, unwrapDIPtr<MDNode>(ScopeRef),
unwrapDIPtr<MDNode>(InlinedAt));
-
return wrap(debug_loc.getAsMDNode());
+#endif
}
extern "C" int64_t LLVMRustDIBuilderCreateOpDeref() {
@@ -1261,6 +1294,10 @@
case Type::BFloatTyID:
return LLVMBFloatTypeKind;
#endif
+#if LLVM_VERSION_GE(12, 0)
+ case Type::X86_AMXTyID:
+ return LLVMX86_AMXTypeKind;
+#endif
}
report_fatal_error("Unhandled TypeID.");
}
@@ -1706,11 +1743,23 @@
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceFMin(LLVMBuilderRef B, LLVMValueRef Src, bool NoNaN) {
- return wrap(unwrap(B)->CreateFPMinReduce(unwrap(Src), NoNaN));
+#if LLVM_VERSION_GE(12, 0)
+ Instruction *I = unwrap(B)->CreateFPMinReduce(unwrap(Src));
+ I->setHasNoNaNs(NoNaN);
+ return wrap(I);
+#else
+ return wrap(unwrap(B)->CreateFPMinReduce(unwrap(Src), NoNaN));
+#endif
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceFMax(LLVMBuilderRef B, LLVMValueRef Src, bool NoNaN) {
+#if LLVM_VERSION_GE(12, 0)
+ Instruction *I = unwrap(B)->CreateFPMaxReduce(unwrap(Src));
+ I->setHasNoNaNs(NoNaN);
+ return wrap(I);
+#else
return wrap(unwrap(B)->CreateFPMaxReduce(unwrap(Src), NoNaN));
+#endif
}
extern "C" LLVMValueRef
diff --git a/compiler/rustc_macros/src/hash_stable.rs b/compiler/rustc_macros/src/hash_stable.rs
index c955c13..30569f2 100644
--- a/compiler/rustc_macros/src/hash_stable.rs
+++ b/compiler/rustc_macros/src/hash_stable.rs
@@ -74,6 +74,7 @@
s.bound_impl(
quote!(::rustc_data_structures::stable_hasher::HashStable<__CTX>),
quote! {
+ #[inline]
fn hash_stable(
&self,
__hcx: &mut __CTX,
@@ -119,6 +120,7 @@
>
),
quote! {
+ #[inline]
fn hash_stable(
&self,
__hcx: &mut ::rustc_middle::ich::StableHashingContext<'__ctx>,
diff --git a/compiler/rustc_macros/src/query.rs b/compiler/rustc_macros/src/query.rs
index cff8e98..291e7ef 100644
--- a/compiler/rustc_macros/src/query.rs
+++ b/compiler/rustc_macros/src/query.rs
@@ -97,7 +97,7 @@
Ok(QueryModifier::Cache(args, block))
} else if modifier == "load_cached" {
// Parse a load_cached modifier like:
- // `load_cached(tcx, id) { tcx.queries.on_disk_cache.try_load_query_result(tcx, id) }`
+ // `load_cached(tcx, id) { tcx.on_disk_cache.try_load_query_result(tcx, id) }`
let args;
parenthesized!(args in input);
let tcx = args.parse()?;
@@ -344,7 +344,6 @@
impls: &mut proc_macro2::TokenStream,
) {
let name = &query.name;
- let arg = &query.arg;
let key = &query.key.0;
// Find out if we should cache the query on disk
@@ -354,7 +353,7 @@
quote! {
#[inline]
fn try_load_from_disk(
- #tcx: TyCtxt<'tcx>,
+ #tcx: QueryCtxt<'tcx>,
#id: SerializedDepNodeIndex
) -> Option<Self::Value> {
#block
@@ -365,10 +364,10 @@
quote! {
#[inline]
fn try_load_from_disk(
- tcx: TyCtxt<'tcx>,
+ tcx: QueryCtxt<'tcx>,
id: SerializedDepNodeIndex
) -> Option<Self::Value> {
- tcx.queries.on_disk_cache.as_ref().and_then(|c| c.try_load_query_result(tcx, id))
+ tcx.on_disk_cache.as_ref()?.try_load_query_result(*tcx, id)
}
}
};
@@ -379,21 +378,21 @@
let t = &(t.0).0;
quote! { #t }
})
- .unwrap_or(quote! { _ });
+ .unwrap_or_else(|| quote! { _ });
let value = args
.as_ref()
.map(|t| {
let t = &(t.1).0;
quote! { #t }
})
- .unwrap_or(quote! { _ });
+ .unwrap_or_else(|| quote! { _ });
// expr is a `Block`, meaning that `{ #expr }` gets expanded
// to `{ { stmts... } }`, which triggers the `unused_braces` lint.
quote! {
#[inline]
#[allow(unused_variables, unused_braces)]
fn cache_on_disk(
- #tcx: TyCtxt<'tcx>,
+ #tcx: QueryCtxt<'tcx>,
#key: &Self::Key,
#value: Option<&Self::Value>
) -> bool {
@@ -410,20 +409,18 @@
};
let (tcx, desc) = modifiers.desc;
- let tcx = tcx.as_ref().map_or(quote! { _ }, |t| quote! { #t });
+ let tcx = tcx.as_ref().map_or_else(|| quote! { _ }, |t| quote! { #t });
let desc = quote! {
#[allow(unused_variables)]
- fn describe(
- #tcx: TyCtxt<'tcx>,
- #key: #arg,
- ) -> Cow<'static, str> {
+ fn describe(tcx: QueryCtxt<'tcx>, key: Self::Key) -> String {
+ let (#tcx, #key) = (*tcx, key);
::rustc_middle::ty::print::with_no_trimmed_paths(|| format!(#desc).into())
}
};
impls.extend(quote! {
- impl<'tcx> QueryDescription<TyCtxt<'tcx>> for queries::#name<'tcx> {
+ impl<'tcx> QueryDescription<QueryCtxt<'tcx>> for queries::#name<'tcx> {
#desc
#cache
}
@@ -498,6 +495,7 @@
}
TokenStream::from(quote! {
+ #[macro_export]
macro_rules! rustc_query_append {
([$($macro:tt)*][$($other:tt)*]) => {
$($macro)* {
@@ -517,12 +515,15 @@
);
}
}
+ #[macro_export]
macro_rules! rustc_cached_queries {
($($macro:tt)*) => {
$($macro)*(#cached_queries);
}
}
-
- #query_description_stream
+ #[macro_export]
+ macro_rules! rustc_query_description {
+ () => { #query_description_stream }
+ }
})
}
diff --git a/compiler/rustc_macros/src/session_diagnostic.rs b/compiler/rustc_macros/src/session_diagnostic.rs
index 5c061a9..8a0fce2 100644
--- a/compiler/rustc_macros/src/session_diagnostic.rs
+++ b/compiler/rustc_macros/src/session_diagnostic.rs
@@ -473,9 +473,9 @@
.map(
|applicability_idx| quote!(#binding.#applicability_idx),
)
- .unwrap_or(quote!(
- rustc_errors::Applicability::Unspecified
- ));
+ .unwrap_or_else(|| {
+ quote!(rustc_errors::Applicability::Unspecified)
+ });
return Ok((span, applicability));
}
throw_span_err!(
diff --git a/compiler/rustc_metadata/Cargo.toml b/compiler/rustc_metadata/Cargo.toml
index f1975e7..48effed 100644
--- a/compiler/rustc_metadata/Cargo.toml
+++ b/compiler/rustc_metadata/Cargo.toml
@@ -11,8 +11,8 @@
libc = "0.2"
snap = "1"
tracing = "0.1"
-memmap = "0.7"
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+memmap2 = "0.2.1"
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
rustc_middle = { path = "../rustc_middle" }
rustc_attr = { path = "../rustc_attr" }
rustc_data_structures = { path = "../rustc_data_structures" }
diff --git a/compiler/rustc_metadata/src/creader.rs b/compiler/rustc_metadata/src/creader.rs
index e3fbd1a2..b5506ac 100644
--- a/compiler/rustc_metadata/src/creader.rs
+++ b/compiler/rustc_metadata/src/creader.rs
@@ -6,18 +6,19 @@
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_ast::{self as ast, *};
-use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::Lrc;
use rustc_expand::base::SyntaxExtension;
-use rustc_hir::def_id::{CrateNum, LocalDefId, LOCAL_CRATE};
+use rustc_hir::def_id::{CrateNum, LocalDefId, StableCrateId, LOCAL_CRATE};
use rustc_hir::definitions::Definitions;
use rustc_index::vec::IndexVec;
use rustc_middle::middle::cstore::{CrateDepKind, CrateSource, ExternCrate};
use rustc_middle::middle::cstore::{ExternCrateSource, MetadataLoaderDyn};
use rustc_middle::ty::TyCtxt;
+use rustc_serialize::json::ToJson;
use rustc_session::config::{self, CrateType, ExternLocation};
-use rustc_session::lint;
+use rustc_session::lint::{self, BuiltinLintDiagnostics, ExternDepSpec};
use rustc_session::output::validate_crate_name;
use rustc_session::search_paths::PathKind;
use rustc_session::{CrateDisambiguator, Session};
@@ -27,6 +28,7 @@
use rustc_target::spec::{PanicStrategy, TargetTriple};
use proc_macro::bridge::client::ProcMacro;
+use std::collections::BTreeMap;
use std::path::Path;
use std::{cmp, env};
use tracing::{debug, info};
@@ -40,6 +42,10 @@
allocator_kind: Option<AllocatorKind>,
/// This crate has a `#[global_allocator]` item.
has_global_allocator: bool,
+
+ /// This map is used to verify we get no hash conflicts between
+ /// `StableCrateId` values.
+ stable_crate_ids: FxHashMap<StableCrateId, CrateNum>,
}
pub struct CrateLoader<'a> {
@@ -192,6 +198,11 @@
metadata_loader: &'a MetadataLoaderDyn,
local_crate_name: &str,
) -> Self {
+ let local_crate_stable_id =
+ StableCrateId::new(local_crate_name, sess.local_crate_disambiguator());
+ let mut stable_crate_ids = FxHashMap::default();
+ stable_crate_ids.insert(local_crate_stable_id, LOCAL_CRATE);
+
CrateLoader {
sess,
metadata_loader,
@@ -205,6 +216,7 @@
injected_panic_runtime: None,
allocator_kind: None,
has_global_allocator: false,
+ stable_crate_ids,
},
used_extern_options: Default::default(),
}
@@ -311,6 +323,20 @@
res
}
+ fn verify_no_stable_crate_id_hash_conflicts(
+ &mut self,
+ root: &CrateRoot<'_>,
+ cnum: CrateNum,
+ ) -> Result<(), CrateError> {
+ if let Some(existing) = self.cstore.stable_crate_ids.insert(root.stable_crate_id(), cnum) {
+ let crate_name0 = root.name();
+ let crate_name1 = self.cstore.get_crate_data(existing).name();
+ return Err(CrateError::StableCrateIdCollision(crate_name0, crate_name1));
+ }
+
+ Ok(())
+ }
+
fn register_crate(
&mut self,
host_lib: Option<Library>,
@@ -332,6 +358,8 @@
// Claim this crate number and cache it
let cnum = self.cstore.alloc_new_crate_num();
+ self.verify_no_stable_crate_id_hash_conflicts(&crate_root, cnum)?;
+
info!(
"register crate `{}` (cnum = {}. private_dep = {})",
crate_root.name(),
@@ -871,8 +899,25 @@
// Don't worry about pathless `--extern foo` sysroot references
continue;
}
- if !self.used_extern_options.contains(&Symbol::intern(name)) {
- self.sess.parse_sess.buffer_lint(
+ if self.used_extern_options.contains(&Symbol::intern(name)) {
+ continue;
+ }
+
+ // Got a real unused --extern
+ let diag = match self.sess.opts.extern_dep_specs.get(name) {
+ Some(loc) => BuiltinLintDiagnostics::ExternDepSpec(name.clone(), loc.into()),
+ None => {
+ // If we don't have a specific location, provide a json encoding of the `--extern`
+ // option.
+ let meta: BTreeMap<String, String> =
+ std::iter::once(("name".to_string(), name.to_string())).collect();
+ BuiltinLintDiagnostics::ExternDepSpec(
+ name.clone(),
+ ExternDepSpec::Json(meta.to_json()),
+ )
+ }
+ };
+ self.sess.parse_sess.buffer_lint_with_diagnostic(
lint::builtin::UNUSED_CRATE_DEPENDENCIES,
span,
ast::CRATE_NODE_ID,
@@ -881,8 +926,8 @@
name,
self.local_crate_name,
name),
+ diag,
);
- }
}
}
diff --git a/compiler/rustc_metadata/src/foreign_modules.rs b/compiler/rustc_metadata/src/foreign_modules.rs
index 4785b6c..3d3071c 100644
--- a/compiler/rustc_metadata/src/foreign_modules.rs
+++ b/compiler/rustc_metadata/src/foreign_modules.rs
@@ -4,29 +4,24 @@
use rustc_middle::ty::TyCtxt;
crate fn collect(tcx: TyCtxt<'_>) -> Vec<ForeignModule> {
- let mut collector = Collector { tcx, modules: Vec::new() };
+ let mut collector = Collector { modules: Vec::new() };
tcx.hir().krate().visit_all_item_likes(&mut collector);
collector.modules
}
-struct Collector<'tcx> {
- tcx: TyCtxt<'tcx>,
+struct Collector {
modules: Vec<ForeignModule>,
}
-impl ItemLikeVisitor<'tcx> for Collector<'tcx> {
+impl ItemLikeVisitor<'tcx> for Collector {
fn visit_item(&mut self, it: &'tcx hir::Item<'tcx>) {
let items = match it.kind {
hir::ItemKind::ForeignMod { items, .. } => items,
_ => return,
};
- let foreign_items =
- items.iter().map(|it| self.tcx.hir().local_def_id(it.id.hir_id).to_def_id()).collect();
- self.modules.push(ForeignModule {
- foreign_items,
- def_id: self.tcx.hir().local_def_id(it.hir_id).to_def_id(),
- });
+ let foreign_items = items.iter().map(|it| it.id.def_id.to_def_id()).collect();
+ self.modules.push(ForeignModule { foreign_items, def_id: it.def_id.to_def_id() });
}
fn visit_trait_item(&mut self, _it: &'tcx hir::TraitItem<'tcx>) {}
diff --git a/compiler/rustc_metadata/src/link_args.rs b/compiler/rustc_metadata/src/link_args.rs
index d088288..9e1ac33 100644
--- a/compiler/rustc_metadata/src/link_args.rs
+++ b/compiler/rustc_metadata/src/link_args.rs
@@ -8,7 +8,7 @@
let mut collector = Collector { tcx, args: Vec::new() };
tcx.hir().krate().visit_all_item_likes(&mut collector);
- for attr in tcx.hir().krate().item.attrs.iter() {
+ for attr in tcx.hir().attrs(hir::CRATE_HIR_ID).iter() {
if attr.has_name(sym::link_args) {
if let Some(linkarg) = attr.value_str() {
collector.add_link_args(linkarg);
@@ -36,7 +36,9 @@
// First, add all of the custom #[link_args] attributes
let sess = &self.tcx.sess;
- for m in it.attrs.iter().filter(|a| sess.check_name(a, sym::link_args)) {
+ for m in
+ self.tcx.hir().attrs(it.hir_id()).iter().filter(|a| sess.check_name(a, sym::link_args))
+ {
if let Some(linkarg) = m.value_str() {
self.add_link_args(linkarg);
}
diff --git a/compiler/rustc_metadata/src/locator.rs b/compiler/rustc_metadata/src/locator.rs
index b66c6cf..39a3991 100644
--- a/compiler/rustc_metadata/src/locator.rs
+++ b/compiler/rustc_metadata/src/locator.rs
@@ -728,7 +728,7 @@
}
/// A trivial wrapper for `Mmap` that implements `StableDeref`.
-struct StableDerefMmap(memmap::Mmap);
+struct StableDerefMmap(memmap2::Mmap);
impl Deref for StableDerefMmap {
type Target = [u8];
@@ -779,7 +779,7 @@
// mmap the file, because only a small fraction of it is read.
let file = std::fs::File::open(filename)
.map_err(|_| format!("failed to open rmeta metadata: '{}'", filename.display()))?;
- let mmap = unsafe { memmap::Mmap::map(&file) };
+ let mmap = unsafe { memmap2::Mmap::map(&file) };
let mmap = mmap
.map_err(|_| format!("failed to mmap rmeta metadata: '{}'", filename.display()))?;
@@ -888,6 +888,7 @@
MultipleMatchingCrates(Symbol, FxHashMap<Svh, Library>),
SymbolConflictsCurrent(Symbol),
SymbolConflictsOthers(Symbol),
+ StableCrateIdCollision(Symbol, Symbol),
DlOpen(String),
DlSym(String),
LocatorCombined(CombinedLocatorError),
@@ -970,6 +971,13 @@
`-C metadata`. This will result in symbol conflicts between the two.",
root_name,
),
+ CrateError::StableCrateIdCollision(crate_name0, crate_name1) => {
+ let msg = format!(
+ "found crates (`{}` and `{}`) with colliding StableCrateId values.",
+ crate_name0, crate_name1
+ );
+ sess.struct_span_err(span, &msg)
+ }
CrateError::DlOpen(s) | CrateError::DlSym(s) => sess.struct_span_err(span, &s),
CrateError::LocatorCombined(locator) => {
let crate_name = locator.crate_name;
diff --git a/compiler/rustc_metadata/src/native_libs.rs b/compiler/rustc_metadata/src/native_libs.rs
index 8d09943..523e016 100644
--- a/compiler/rustc_metadata/src/native_libs.rs
+++ b/compiler/rustc_metadata/src/native_libs.rs
@@ -44,7 +44,8 @@
// Process all of the #[link(..)]-style arguments
let sess = &self.tcx.sess;
- for m in it.attrs.iter().filter(|a| sess.check_name(a, sym::link)) {
+ for m in self.tcx.hir().attrs(it.hir_id()).iter().filter(|a| sess.check_name(a, sym::link))
+ {
let items = match m.meta_item_list() {
Some(item) => item,
None => continue,
@@ -53,7 +54,7 @@
name: None,
kind: NativeLibKind::Unspecified,
cfg: None,
- foreign_module: Some(self.tcx.hir().local_def_id(it.hir_id).to_def_id()),
+ foreign_module: Some(it.def_id.to_def_id()),
wasm_import_module: None,
};
let mut kind_specified = false;
diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs
index e3c3539..e9b8388 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder.rs
@@ -635,6 +635,10 @@
self.hash
}
+ crate fn stable_crate_id(&self) -> StableCrateId {
+ self.stable_crate_id
+ }
+
crate fn triple(&self) -> &TargetTriple {
&self.triple
}
diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
index 828c025..0f860d1 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
@@ -468,6 +468,10 @@
pub fn num_def_ids(&self, cnum: CrateNum) -> usize {
self.get_crate_data(cnum).num_def_ids()
}
+
+ pub fn item_attrs(&self, def_id: DefId, sess: &Session) -> Vec<ast::Attribute> {
+ self.get_crate_data(def_id.krate).get_item_attrs(def_id.index, sess).collect()
+ }
}
impl CrateStore for CStore {
diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs
index dd6a6fe..254954c 100644
--- a/compiler/rustc_metadata/src/rmeta/encoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/encoder.rs
@@ -7,7 +7,9 @@
use rustc_data_structures::sync::{join, par_iter, Lrc, ParallelIterator};
use rustc_hir as hir;
use rustc_hir::def::{CtorOf, DefKind};
-use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
+use rustc_hir::def_id::{
+ CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_ID, CRATE_DEF_INDEX, LOCAL_CRATE,
+};
use rustc_hir::definitions::DefPathData;
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc_hir::itemlikevisit::ItemLikeVisitor;
@@ -431,7 +433,7 @@
fn encode_info_for_items(&mut self) {
let krate = self.tcx.hir().krate();
- self.encode_info_for_mod(hir::CRATE_HIR_ID, &krate.item.module);
+ self.encode_info_for_mod(CRATE_DEF_ID, &krate.item.module);
// Proc-macro crates only export proc-macro items, which are looked
// up using `proc_macro_data`
@@ -572,10 +574,14 @@
let tcx = self.tcx;
+ // Encode MIR.
+ i = self.position();
+ self.encode_mir();
+ let mir_bytes = self.position() - i;
+
// Encode the items.
i = self.position();
self.encode_def_ids();
- self.encode_mir();
self.encode_info_for_items();
let item_bytes = self.position() - i;
@@ -647,6 +653,7 @@
triple: tcx.sess.opts.target_triple.clone(),
hash: tcx.crate_hash(LOCAL_CRATE),
disambiguator: tcx.sess.local_crate_disambiguator(),
+ stable_crate_id: tcx.def_path_hash(LOCAL_CRATE.as_def_id()).stable_crate_id(),
panic_strategy: tcx.sess.panic_strategy(),
edition: tcx.sess.edition(),
has_global_allocator: tcx.has_global_allocator(LOCAL_CRATE),
@@ -689,22 +696,23 @@
}
}
- println!("metadata stats:");
- println!(" dep bytes: {}", dep_bytes);
- println!(" lib feature bytes: {}", lib_feature_bytes);
- println!(" lang item bytes: {}", lang_item_bytes);
- println!(" diagnostic item bytes: {}", diagnostic_item_bytes);
- println!(" native bytes: {}", native_lib_bytes);
- println!(" source_map bytes: {}", source_map_bytes);
- println!(" impl bytes: {}", impl_bytes);
- println!(" exp. symbols bytes: {}", exported_symbols_bytes);
- println!(" def-path table bytes: {}", def_path_table_bytes);
- println!(" proc-macro-data-bytes: {}", proc_macro_data_bytes);
- println!(" item bytes: {}", item_bytes);
- println!(" table bytes: {}", tables_bytes);
- println!(" hygiene bytes: {}", hygiene_bytes);
- println!(" zero bytes: {}", zero_bytes);
- println!(" total bytes: {}", total_bytes);
+ eprintln!("metadata stats:");
+ eprintln!(" dep bytes: {}", dep_bytes);
+ eprintln!(" lib feature bytes: {}", lib_feature_bytes);
+ eprintln!(" lang item bytes: {}", lang_item_bytes);
+ eprintln!(" diagnostic item bytes: {}", diagnostic_item_bytes);
+ eprintln!(" native bytes: {}", native_lib_bytes);
+ eprintln!(" source_map bytes: {}", source_map_bytes);
+ eprintln!(" impl bytes: {}", impl_bytes);
+ eprintln!(" exp. symbols bytes: {}", exported_symbols_bytes);
+ eprintln!(" def-path table bytes: {}", def_path_table_bytes);
+ eprintln!(" proc-macro-data-bytes: {}", proc_macro_data_bytes);
+ eprintln!(" mir bytes: {}", mir_bytes);
+ eprintln!(" item bytes: {}", item_bytes);
+ eprintln!(" table bytes: {}", tables_bytes);
+ eprintln!(" hygiene bytes: {}", hygiene_bytes);
+ eprintln!(" zero bytes: {}", zero_bytes);
+ eprintln!(" total bytes: {}", total_bytes);
}
root
@@ -828,6 +836,76 @@
}
}
+fn should_encode_variances(def_kind: DefKind) -> bool {
+ match def_kind {
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Variant
+ | DefKind::Fn
+ | DefKind::Ctor(..)
+ | DefKind::AssocFn => true,
+ DefKind::Mod
+ | DefKind::Field
+ | DefKind::AssocTy
+ | DefKind::AssocConst
+ | DefKind::TyParam
+ | DefKind::ConstParam
+ | DefKind::Static
+ | DefKind::Const
+ | DefKind::ForeignMod
+ | DefKind::TyAlias
+ | DefKind::OpaqueTy
+ | DefKind::Impl
+ | DefKind::Trait
+ | DefKind::TraitAlias
+ | DefKind::Macro(..)
+ | DefKind::ForeignTy
+ | DefKind::Use
+ | DefKind::LifetimeParam
+ | DefKind::AnonConst
+ | DefKind::GlobalAsm
+ | DefKind::Closure
+ | DefKind::Generator
+ | DefKind::ExternCrate => false,
+ }
+}
+
+fn should_encode_generics(def_kind: DefKind) -> bool {
+ match def_kind {
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Variant
+ | DefKind::Trait
+ | DefKind::TyAlias
+ | DefKind::ForeignTy
+ | DefKind::TraitAlias
+ | DefKind::AssocTy
+ | DefKind::Fn
+ | DefKind::Const
+ | DefKind::Static
+ | DefKind::Ctor(..)
+ | DefKind::AssocFn
+ | DefKind::AssocConst
+ | DefKind::AnonConst
+ | DefKind::OpaqueTy
+ | DefKind::Impl
+ | DefKind::Closure
+ | DefKind::Generator => true,
+ DefKind::Mod
+ | DefKind::Field
+ | DefKind::ForeignMod
+ | DefKind::TyParam
+ | DefKind::ConstParam
+ | DefKind::Macro(..)
+ | DefKind::Use
+ | DefKind::LifetimeParam
+ | DefKind::GlobalAsm
+ | DefKind::ExternCrate => false,
+ }
+}
+
impl EncodeContext<'a, 'tcx> {
fn encode_def_ids(&mut self) {
if self.is_proc_macro {
@@ -856,12 +934,34 @@
self.encode_const_stability(def_id);
self.encode_deprecation(def_id);
}
+ if should_encode_variances(def_kind) {
+ let v = self.tcx.variances_of(def_id);
+ record!(self.tables.variances[def_id] <- v);
+ }
+ if should_encode_generics(def_kind) {
+ let g = tcx.generics_of(def_id);
+ record!(self.tables.generics[def_id] <- g);
+ record!(self.tables.explicit_predicates[def_id] <- self.tcx.explicit_predicates_of(def_id));
+ let inferred_outlives = self.tcx.inferred_outlives_of(def_id);
+ if !inferred_outlives.is_empty() {
+ record!(self.tables.inferred_outlives[def_id] <- inferred_outlives);
+ }
+ }
+ if let DefKind::Trait | DefKind::TraitAlias = def_kind {
+ record!(self.tables.super_predicates[def_id] <- self.tcx.super_predicates_of(def_id));
+ }
}
- }
-
- fn encode_variances_of(&mut self, def_id: DefId) {
- debug!("EncodeContext::encode_variances_of({:?})", def_id);
- record!(self.tables.variances[def_id] <- &self.tcx.variances_of(def_id)[..]);
+ let inherent_impls = tcx.crate_inherent_impls(LOCAL_CRATE);
+ for (def_id, implementations) in inherent_impls.inherent_impls.iter() {
+ assert!(def_id.is_local());
+ if implementations.is_empty() {
+ continue;
+ }
+ record!(self.tables.inherent_impls[def_id] <- implementations.iter().map(|&def_id| {
+ assert!(def_id.is_local());
+ def_id.index
+ }));
+ }
}
fn encode_item_type(&mut self, def_id: DefId) {
@@ -894,12 +994,7 @@
if let Some(ctor_def_id) = variant.ctor_def_id {
record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(ctor_def_id));
}
- // FIXME(eddyb) is this ever used?
- self.encode_variances_of(def_id);
}
- self.encode_generics(def_id);
- self.encode_explicit_predicates(def_id);
- self.encode_inferred_outlives(def_id);
}
fn encode_enum_variant_ctor(&mut self, def: &ty::AdtDef, index: VariantIdx) {
@@ -920,16 +1015,11 @@
self.encode_item_type(def_id);
if variant.ctor_kind == CtorKind::Fn {
record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
- self.encode_variances_of(def_id);
}
- self.encode_generics(def_id);
- self.encode_explicit_predicates(def_id);
- self.encode_inferred_outlives(def_id);
}
- fn encode_info_for_mod(&mut self, id: hir::HirId, md: &hir::Mod<'_>) {
+ fn encode_info_for_mod(&mut self, local_def_id: LocalDefId, md: &hir::Mod<'_>) {
let tcx = self.tcx;
- let local_def_id = tcx.hir().local_def_id(id);
let def_id = local_def_id.to_def_id();
debug!("EncodeContext::encode_info_for_mod({:?})", def_id);
@@ -964,7 +1054,7 @@
record!(self.tables.children[def_id] <- &[]);
} else {
record!(self.tables.children[def_id] <- md.item_ids.iter().map(|item_id| {
- tcx.hir().local_def_id(item_id.id).local_def_index
+ item_id.def_id.local_def_index
}));
}
}
@@ -984,9 +1074,6 @@
record!(self.tables.kind[def_id] <- EntryKind::Field);
self.encode_ident_span(def_id, field.ident);
self.encode_item_type(def_id);
- self.encode_generics(def_id);
- self.encode_explicit_predicates(def_id);
- self.encode_inferred_outlives(def_id);
}
fn encode_struct_ctor(&mut self, adt_def: &ty::AdtDef, def_id: DefId) {
@@ -1005,35 +1092,7 @@
self.encode_item_type(def_id);
if variant.ctor_kind == CtorKind::Fn {
record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
- self.encode_variances_of(def_id);
}
- self.encode_generics(def_id);
- self.encode_explicit_predicates(def_id);
- self.encode_inferred_outlives(def_id);
- }
-
- fn encode_generics(&mut self, def_id: DefId) {
- debug!("EncodeContext::encode_generics({:?})", def_id);
- record!(self.tables.generics[def_id] <- self.tcx.generics_of(def_id));
- }
-
- fn encode_explicit_predicates(&mut self, def_id: DefId) {
- debug!("EncodeContext::encode_explicit_predicates({:?})", def_id);
- record!(self.tables.explicit_predicates[def_id] <-
- self.tcx.explicit_predicates_of(def_id));
- }
-
- fn encode_inferred_outlives(&mut self, def_id: DefId) {
- debug!("EncodeContext::encode_inferred_outlives({:?})", def_id);
- let inferred_outlives = self.tcx.inferred_outlives_of(def_id);
- if !inferred_outlives.is_empty() {
- record!(self.tables.inferred_outlives[def_id] <- inferred_outlives);
- }
- }
-
- fn encode_super_predicates(&mut self, def_id: DefId) {
- debug!("EncodeContext::encode_super_predicates({:?})", def_id);
- record!(self.tables.super_predicates[def_id] <- self.tcx.super_predicates_of(def_id));
}
fn encode_explicit_item_bounds(&mut self, def_id: DefId) {
@@ -1110,11 +1169,7 @@
}
if trait_item.kind == ty::AssocKind::Fn {
record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
- self.encode_variances_of(def_id);
}
- self.encode_generics(def_id);
- self.encode_explicit_predicates(def_id);
- self.encode_inferred_outlives(def_id);
}
fn encode_info_for_impl_item(&mut self, def_id: DefId) {
@@ -1171,11 +1226,7 @@
self.encode_item_type(def_id);
if impl_item.kind == ty::AssocKind::Fn {
record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
- self.encode_variances_of(def_id);
}
- self.encode_generics(def_id);
- self.encode_explicit_predicates(def_id);
- self.encode_inferred_outlives(def_id);
}
fn encode_fn_param_names_for_body(&mut self, body_id: hir::BodyId) -> Lazy<[Ident]> {
@@ -1230,18 +1281,6 @@
}
}
- // Encodes the inherent implementations of a structure, enumeration, or trait.
- fn encode_inherent_implementations(&mut self, def_id: DefId) {
- debug!("EncodeContext::encode_inherent_implementations({:?})", def_id);
- let implementations = self.tcx.inherent_impls(def_id);
- if !implementations.is_empty() {
- record!(self.tables.inherent_impls[def_id] <- implementations.iter().map(|&def_id| {
- assert!(def_id.is_local());
- def_id.index
- }));
- }
- }
-
fn encode_stability(&mut self, def_id: DefId) {
debug!("EncodeContext::encode_stability({:?})", def_id);
@@ -1307,7 +1346,7 @@
EntryKind::Fn(self.lazy(data))
}
hir::ItemKind::Mod(ref m) => {
- return self.encode_info_for_mod(item.hir_id, m);
+ return self.encode_info_for_mod(item.def_id, m);
}
hir::ItemKind::ForeignMod { .. } => EntryKind::ForeignMod,
hir::ItemKind::GlobalAsm(..) => EntryKind::GlobalAsm,
@@ -1405,8 +1444,7 @@
hir::ItemKind::ForeignMod { items, .. } => record!(self.tables.children[def_id] <-
items
.iter()
- .map(|foreign_item| tcx.hir().local_def_id(
- foreign_item.id.hir_id).local_def_index)
+ .map(|foreign_item| foreign_item.id.def_id.local_def_index)
),
hir::ItemKind::Enum(..) => record!(self.tables.children[def_id] <-
self.tcx.adt_def(def_id).variants.iter().map(|v| {
@@ -1453,43 +1491,11 @@
record!(self.tables.impl_trait_ref[def_id] <- trait_ref);
}
}
- self.encode_inherent_implementations(def_id);
- match item.kind {
- hir::ItemKind::Enum(..)
- | hir::ItemKind::Struct(..)
- | hir::ItemKind::Union(..)
- | hir::ItemKind::Fn(..) => self.encode_variances_of(def_id),
- _ => {}
- }
- match item.kind {
- hir::ItemKind::Static(..)
- | hir::ItemKind::Const(..)
- | hir::ItemKind::Fn(..)
- | hir::ItemKind::TyAlias(..)
- | hir::ItemKind::Enum(..)
- | hir::ItemKind::Struct(..)
- | hir::ItemKind::Union(..)
- | hir::ItemKind::Impl { .. }
- | hir::ItemKind::OpaqueTy(..)
- | hir::ItemKind::Trait(..)
- | hir::ItemKind::TraitAlias(..) => {
- self.encode_generics(def_id);
- self.encode_explicit_predicates(def_id);
- self.encode_inferred_outlives(def_id);
- }
- _ => {}
- }
- match item.kind {
- hir::ItemKind::Trait(..) | hir::ItemKind::TraitAlias(..) => {
- self.encode_super_predicates(def_id);
- }
- _ => {}
- }
}
/// Serialize the text of exported macros
fn encode_info_for_macro_def(&mut self, macro_def: &hir::MacroDef<'_>) {
- let def_id = self.tcx.hir().local_def_id(macro_def.hir_id).to_def_id();
+ let def_id = macro_def.def_id.to_def_id();
record!(self.tables.kind[def_id] <- EntryKind::MacroDef(self.lazy(macro_def.ast.clone())));
self.encode_ident_span(def_id, macro_def.ident);
}
@@ -1525,7 +1531,6 @@
if let ty::Closure(def_id, substs) = *ty.kind() {
record!(self.tables.fn_sig[def_id] <- substs.as_closure().sig());
}
- self.encode_generics(def_id.to_def_id());
}
fn encode_info_for_anon_const(&mut self, def_id: LocalDefId) {
@@ -1537,9 +1542,6 @@
record!(self.tables.kind[def_id.to_def_id()] <- EntryKind::AnonConst(qualifs, const_data));
self.encode_item_type(def_id.to_def_id());
- self.encode_generics(def_id.to_def_id());
- self.encode_explicit_predicates(def_id.to_def_id());
- self.encode_inferred_outlives(def_id.to_def_id());
}
fn encode_native_libraries(&mut self) -> Lazy<[NativeLib]> {
@@ -1816,14 +1818,9 @@
}
self.encode_ident_span(def_id, nitem.ident);
self.encode_item_type(def_id);
- self.encode_inherent_implementations(def_id);
if let hir::ForeignItemKind::Fn(..) = nitem.kind {
record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
- self.encode_variances_of(def_id);
}
- self.encode_generics(def_id);
- self.encode_explicit_predicates(def_id);
- self.encode_inferred_outlives(def_id);
}
}
@@ -1845,17 +1842,15 @@
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
intravisit::walk_item(self, item);
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
match item.kind {
hir::ItemKind::ExternCrate(_) | hir::ItemKind::Use(..) => {} // ignore these
- _ => self.encode_info_for_item(def_id.to_def_id(), item),
+ _ => self.encode_info_for_item(item.def_id.to_def_id(), item),
}
self.encode_addl_info_for_item(item);
}
fn visit_foreign_item(&mut self, ni: &'tcx hir::ForeignItem<'tcx>) {
intravisit::walk_foreign_item(self, ni);
- let def_id = self.tcx.hir().local_def_id(ni.hir_id);
- self.encode_info_for_foreign_item(def_id.to_def_id(), ni);
+ self.encode_info_for_foreign_item(ni.def_id.to_def_id(), ni);
}
fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
intravisit::walk_generics(self, generics);
@@ -1915,7 +1910,6 @@
/// so it's easier to do that here then to wait until we would encounter
/// normally in the visitor walk.
fn encode_addl_info_for_item(&mut self, item: &hir::Item<'_>) {
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
match item.kind {
hir::ItemKind::Static(..)
| hir::ItemKind::Const(..)
@@ -1931,7 +1925,7 @@
// no sub-item recording needed in these cases
}
hir::ItemKind::Enum(..) => {
- let def = self.tcx.adt_def(def_id.to_def_id());
+ let def = self.tcx.adt_def(item.def_id.to_def_id());
self.encode_fields(def);
for (i, variant) in def.variants.iter_enumerated() {
@@ -1943,7 +1937,7 @@
}
}
hir::ItemKind::Struct(ref struct_def, _) => {
- let def = self.tcx.adt_def(def_id.to_def_id());
+ let def = self.tcx.adt_def(item.def_id.to_def_id());
self.encode_fields(def);
// If the struct has a constructor, encode it.
@@ -1953,18 +1947,19 @@
}
}
hir::ItemKind::Union(..) => {
- let def = self.tcx.adt_def(def_id.to_def_id());
+ let def = self.tcx.adt_def(item.def_id.to_def_id());
self.encode_fields(def);
}
hir::ItemKind::Impl { .. } => {
for &trait_item_def_id in
- self.tcx.associated_item_def_ids(def_id.to_def_id()).iter()
+ self.tcx.associated_item_def_ids(item.def_id.to_def_id()).iter()
{
self.encode_info_for_impl_item(trait_item_def_id);
}
}
hir::ItemKind::Trait(..) => {
- for &item_def_id in self.tcx.associated_item_def_ids(def_id.to_def_id()).iter() {
+ for &item_def_id in self.tcx.associated_item_def_ids(item.def_id.to_def_id()).iter()
+ {
self.encode_info_for_trait_item(item_def_id);
}
}
@@ -1980,15 +1975,14 @@
impl<'tcx, 'v> ItemLikeVisitor<'v> for ImplVisitor<'tcx> {
fn visit_item(&mut self, item: &hir::Item<'_>) {
if let hir::ItemKind::Impl { .. } = item.kind {
- let impl_id = self.tcx.hir().local_def_id(item.hir_id);
- if let Some(trait_ref) = self.tcx.impl_trait_ref(impl_id.to_def_id()) {
+ if let Some(trait_ref) = self.tcx.impl_trait_ref(item.def_id.to_def_id()) {
let simplified_self_ty =
ty::fast_reject::simplify_type(self.tcx, trait_ref.self_ty(), false);
self.impls
.entry(trait_ref.def_id)
.or_default()
- .push((impl_id.local_def_index, simplified_self_ty));
+ .push((item.def_id.local_def_index, simplified_self_ty));
}
}
}
diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs
index b44c3bf..6105289 100644
--- a/compiler/rustc_metadata/src/rmeta/mod.rs
+++ b/compiler/rustc_metadata/src/rmeta/mod.rs
@@ -7,7 +7,7 @@
use rustc_data_structures::sync::MetadataRef;
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, DefKind};
-use rustc_hir::def_id::{DefId, DefIndex, DefPathHash};
+use rustc_hir::def_id::{DefId, DefIndex, DefPathHash, StableCrateId};
use rustc_hir::definitions::DefKey;
use rustc_hir::lang_items;
use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec};
@@ -203,6 +203,7 @@
extra_filename: String,
hash: Svh,
disambiguator: CrateDisambiguator,
+ stable_crate_id: StableCrateId,
panic_strategy: PanicStrategy,
edition: Edition,
has_global_allocator: bool,
diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml
index d33aad3..8cb30e7 100644
--- a/compiler/rustc_middle/Cargo.toml
+++ b/compiler/rustc_middle/Cargo.toml
@@ -11,7 +11,7 @@
rustc_arena = { path = "../rustc_arena" }
bitflags = "1.2.1"
tracing = "0.1"
-rustc-rayon-core = "0.3.0"
+rustc-rayon-core = "0.3.1"
polonius-engine = "0.12.0"
rustc_apfloat = { path = "../rustc_apfloat" }
rustc_attr = { path = "../rustc_attr" }
@@ -27,7 +27,7 @@
rustc_ast = { path = "../rustc_ast" }
rustc_span = { path = "../rustc_span" }
chalk-ir = "0.55.0"
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
-measureme = "9.0.0"
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
+measureme = "9.1.0"
rustc_session = { path = "../rustc_session" }
rustc_type_ir = { path = "../rustc_type_ir" }
diff --git a/compiler/rustc_middle/src/dep_graph/dep_node.rs b/compiler/rustc_middle/src/dep_graph/dep_node.rs
index 1cb7575..ba9d0a4 100644
--- a/compiler/rustc_middle/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_middle/src/dep_graph/dep_node.rs
@@ -62,7 +62,6 @@
use rustc_hir::definitions::DefPathHash;
use rustc_hir::HirId;
use rustc_span::symbol::Symbol;
-use rustc_span::DUMMY_SP;
use std::hash::Hash;
pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams};
@@ -91,53 +90,6 @@
// FIXME: Make this a simple boolean once DepNodeParams::can_reconstruct_query_key
// can be made a specialized associated const.
can_reconstruct_query_key: fn() -> bool,
-
- /// The red/green evaluation system will try to mark a specific DepNode in the
- /// dependency graph as green by recursively trying to mark the dependencies of
- /// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
- /// where we don't know if it is red or green and we therefore actually have
- /// to recompute its value in order to find out. Since the only piece of
- /// information that we have at that point is the `DepNode` we are trying to
- /// re-evaluate, we need some way to re-run a query from just that. This is what
- /// `force_from_dep_node()` implements.
- ///
- /// In the general case, a `DepNode` consists of a `DepKind` and an opaque
- /// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
- /// is usually constructed by computing a stable hash of the query-key that the
- /// `DepNode` corresponds to. Consequently, it is not in general possible to go
- /// back from hash to query-key (since hash functions are not reversible). For
- /// this reason `force_from_dep_node()` is expected to fail from time to time
- /// because we just cannot find out, from the `DepNode` alone, what the
- /// corresponding query-key is and therefore cannot re-run the query.
- ///
- /// The system deals with this case letting `try_mark_green` fail which forces
- /// the root query to be re-evaluated.
- ///
- /// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
- /// Fortunately, we can use some contextual information that will allow us to
- /// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
- /// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
- /// valid `DefPathHash`. Since we also always build a huge table that maps every
- /// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
- /// everything we need to re-run the query.
- ///
- /// Take the `mir_promoted` query as an example. Like many other queries, it
- /// just has a single parameter: the `DefId` of the item it will compute the
- /// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
- /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
- /// is actually a `DefPathHash`, and can therefore just look up the corresponding
- /// `DefId` in `tcx.def_path_hash_to_def_id`.
- ///
- /// When you implement a new query, it will likely have a corresponding new
- /// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As
- /// a rule of thumb, if your query takes a `DefId` or `LocalDefId` as sole parameter,
- /// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
- /// add it to the "We don't have enough information to reconstruct..." group in
- /// the match below.
- pub(super) force_from_dep_node: fn(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool,
-
- /// Invoke a query to put the on-disk cached value in memory.
- pub(super) try_load_from_on_disk_cache: fn(TyCtxt<'_>, &DepNode),
}
impl std::ops::Deref for DepKind {
@@ -196,8 +148,7 @@
#[allow(non_upper_case_globals)]
pub mod dep_kind {
use super::*;
- use crate::ty::query::{queries, query_keys};
- use rustc_query_system::query::{force_query, QueryDescription};
+ use crate::ty::query::query_keys;
// We use this for most things when incr. comp. is turned off.
pub const Null: DepKindStruct = DepKindStruct {
@@ -206,8 +157,6 @@
is_eval_always: false,
can_reconstruct_query_key: || true,
- force_from_dep_node: |_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node),
- try_load_from_on_disk_cache: |_, _| {},
};
pub const TraitSelect: DepKindStruct = DepKindStruct {
@@ -216,8 +165,6 @@
is_eval_always: false,
can_reconstruct_query_key: || true,
- force_from_dep_node: |_, _| false,
- try_load_from_on_disk_cache: |_, _| {},
};
pub const CompileCodegenUnit: DepKindStruct = DepKindStruct {
@@ -226,8 +173,6 @@
is_eval_always: false,
can_reconstruct_query_key: || false,
- force_from_dep_node: |_, _| false,
- try_load_from_on_disk_cache: |_, _| {},
};
macro_rules! define_query_dep_kinds {
@@ -246,59 +191,11 @@
::can_reconstruct_query_key()
}
- fn recover<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<query_keys::$variant<'tcx>> {
- <query_keys::$variant<'_> as DepNodeParams<TyCtxt<'_>>>::recover(tcx, dep_node)
- }
-
- fn force_from_dep_node(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool {
- if is_anon {
- return false;
- }
-
- if !can_reconstruct_query_key() {
- return false;
- }
-
- if let Some(key) = recover(tcx, dep_node) {
- force_query::<queries::$variant<'_>, _>(
- tcx,
- key,
- DUMMY_SP,
- *dep_node
- );
- return true;
- }
-
- false
- }
-
- fn try_load_from_on_disk_cache(tcx: TyCtxt<'_>, dep_node: &DepNode) {
- if is_anon {
- return
- }
-
- if !can_reconstruct_query_key() {
- return
- }
-
- debug_assert!(tcx.dep_graph
- .node_color(dep_node)
- .map(|c| c.is_green())
- .unwrap_or(false));
-
- let key = recover(tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash));
- if queries::$variant::cache_on_disk(tcx, &key, None) {
- let _ = tcx.$variant(key);
- }
- }
-
DepKindStruct {
has_params,
is_anon,
is_eval_always,
can_reconstruct_query_key,
- force_from_dep_node,
- try_load_from_on_disk_cache,
}
};)*
);
@@ -314,7 +211,12 @@
$variant:ident $(( $tuple_arg_ty:ty $(,)? ))*
,)*
) => (
- static DEP_KINDS: &[DepKindStruct] = &[ $(dep_kind::$variant),* ];
+ #[macro_export]
+ macro_rules! make_dep_kind_array {
+ ($mod:ident) => {[ $(($mod::$variant),)* ]};
+ }
+
+ static DEP_KINDS: &[DepKindStruct] = &make_dep_kind_array!(dep_kind);
/// This enum serves as an index into the `DEP_KINDS` array.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
@@ -414,10 +316,7 @@
/// has been removed.
fn extract_def_id(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> {
if self.kind.can_reconstruct_query_key() {
- tcx.queries
- .on_disk_cache
- .as_ref()?
- .def_path_hash_to_def_id(tcx, DefPathHash(self.hash.into()))
+ tcx.on_disk_cache.as_ref()?.def_path_hash_to_def_id(tcx, DefPathHash(self.hash.into()))
} else {
None
}
@@ -472,7 +371,7 @@
// we will use the old DefIndex as an initial guess for
// a lookup into the crate metadata.
if !self.is_local() {
- if let Some(cache) = &tcx.queries.on_disk_cache {
+ if let Some(cache) = &tcx.on_disk_cache {
cache.store_foreign_def_id_hash(*self, hash);
}
}
diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs
index b88ffa2..c688b23 100644
--- a/compiler/rustc_middle/src/dep_graph/mod.rs
+++ b/compiler/rustc_middle/src/dep_graph/mod.rs
@@ -2,10 +2,9 @@
use crate::ty::{self, TyCtxt};
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sync::Lock;
-use rustc_data_structures::thin_vec::ThinVec;
-use rustc_errors::Diagnostic;
-use rustc_hir::def_id::LocalDefId;
+use rustc_session::Session;
+#[macro_use]
mod dep_node;
pub use rustc_query_system::dep_graph::{
@@ -94,7 +93,7 @@
type StableHashingContext = StableHashingContext<'tcx>;
fn register_reused_dep_node(&self, dep_node: &DepNode) {
- if let Some(cache) = self.queries.on_disk_cache.as_ref() {
+ if let Some(cache) = self.on_disk_cache.as_ref() {
cache.register_reused_dep_node(*self, dep_node)
}
}
@@ -103,112 +102,18 @@
TyCtxt::create_stable_hashing_context(*self)
}
- fn debug_dep_tasks(&self) -> bool {
- self.sess.opts.debugging_opts.dep_tasks
- }
- fn debug_dep_node(&self) -> bool {
- self.sess.opts.debugging_opts.incremental_info
- || self.sess.opts.debugging_opts.query_dep_graph
+ #[inline]
+ fn dep_graph(&self) -> &DepGraph {
+ &self.dep_graph
}
- fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
- // FIXME: This match is just a workaround for incremental bugs and should
- // be removed. https://github.com/rust-lang/rust/issues/62649 is one such
- // bug that must be fixed before removing this.
- match dep_node.kind {
- DepKind::hir_owner | DepKind::hir_owner_nodes => {
- if let Some(def_id) = dep_node.extract_def_id(*self) {
- if !def_id_corresponds_to_hir_dep_node(*self, def_id.expect_local()) {
- // This `DefPath` does not have a
- // corresponding `DepNode` (e.g. a
- // struct field), and the ` DefPath`
- // collided with the `DefPath` of a
- // proper item that existed in the
- // previous compilation session.
- //
- // Since the given `DefPath` does not
- // denote the item that previously
- // existed, we just fail to mark green.
- return false;
- }
- } else {
- // If the node does not exist anymore, we
- // just fail to mark green.
- return false;
- }
- }
- _ => {
- // For other kinds of nodes it's OK to be
- // forced.
- }
- }
-
- debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
-
- // We must avoid ever having to call `force_from_dep_node()` for a
- // `DepNode::codegen_unit`:
- // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
- // would always end up having to evaluate the first caller of the
- // `codegen_unit` query that *is* reconstructible. This might very well be
- // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
- // to re-trigger calling the `codegen_unit` query with the right key. At
- // that point we would already have re-done all the work we are trying to
- // avoid doing in the first place.
- // The solution is simple: Just explicitly call the `codegen_unit` query for
- // each CGU, right after partitioning. This way `try_mark_green` will always
- // hit the cache instead of having to go through `force_from_dep_node`.
- // This assertion makes sure, we actually keep applying the solution above.
- debug_assert!(
- dep_node.kind != DepKind::codegen_unit,
- "calling force_from_dep_node() on DepKind::codegen_unit"
- );
-
- (dep_node.kind.force_from_dep_node)(*self, dep_node)
- }
-
- fn has_errors_or_delayed_span_bugs(&self) -> bool {
- self.sess.has_errors_or_delayed_span_bugs()
- }
-
- fn diagnostic(&self) -> &rustc_errors::Handler {
- self.sess.diagnostic()
- }
-
- // Interactions with on_disk_cache
- fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) {
- (dep_node.kind.try_load_from_on_disk_cache)(*self, dep_node)
- }
-
- fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
- self.queries
- .on_disk_cache
- .as_ref()
- .map(|c| c.load_diagnostics(*self, prev_dep_node_index))
- .unwrap_or_default()
- }
-
- fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
- if let Some(c) = self.queries.on_disk_cache.as_ref() {
- c.store_diagnostics(dep_node_index, diagnostics)
- }
- }
-
- fn store_diagnostics_for_anon_node(
- &self,
- dep_node_index: DepNodeIndex,
- diagnostics: ThinVec<Diagnostic>,
- ) {
- if let Some(c) = self.queries.on_disk_cache.as_ref() {
- c.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
- }
- }
-
+ #[inline(always)]
fn profiler(&self) -> &SelfProfilerRef {
&self.prof
}
-}
-fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
- def_id == hir_id.owner
+ #[inline(always)]
+ fn sess(&self) -> &Session {
+ self.sess
+ }
}
diff --git a/compiler/rustc_middle/src/hir/map/blocks.rs b/compiler/rustc_middle/src/hir/map/blocks.rs
index 9d392c7..706c790 100644
--- a/compiler/rustc_middle/src/hir/map/blocks.rs
+++ b/compiler/rustc_middle/src/hir/map/blocks.rs
@@ -12,7 +12,6 @@
//! for the `Code` associated with a particular NodeId.
use crate::hir::map::Map;
-use rustc_ast::Attribute;
use rustc_hir as hir;
use rustc_hir::intravisit::FnKind;
use rustc_hir::{Expr, FnDecl, Node};
@@ -105,7 +104,6 @@
body: hir::BodyId,
id: hir::HirId,
span: Span,
- attrs: &'a [Attribute],
}
/// These are all the components one can extract from a closure expr
@@ -115,18 +113,11 @@
body: hir::BodyId,
id: hir::HirId,
span: Span,
- attrs: &'a [Attribute],
}
impl<'a> ClosureParts<'a> {
- fn new(
- d: &'a FnDecl<'a>,
- b: hir::BodyId,
- id: hir::HirId,
- s: Span,
- attrs: &'a [Attribute],
- ) -> Self {
- ClosureParts { decl: d, body: b, id, span: s, attrs }
+ fn new(d: &'a FnDecl<'a>, b: hir::BodyId, id: hir::HirId, s: Span) -> Self {
+ ClosureParts { decl: d, body: b, id, span: s }
}
}
@@ -146,7 +137,7 @@
pub fn body(self) -> hir::BodyId {
self.handle(
|i: ItemFnParts<'a>| i.body,
- |_, _, _: &'a hir::FnSig<'a>, _, body: hir::BodyId, _, _| body,
+ |_, _, _: &'a hir::FnSig<'a>, _, body: hir::BodyId, _| body,
|c: ClosureParts<'a>| c.body,
)
}
@@ -154,7 +145,7 @@
pub fn decl(self) -> &'a FnDecl<'a> {
self.handle(
|i: ItemFnParts<'a>| &*i.decl,
- |_, _, sig: &'a hir::FnSig<'a>, _, _, _, _| &sig.decl,
+ |_, _, sig: &'a hir::FnSig<'a>, _, _, _| &sig.decl,
|c: ClosureParts<'a>| c.decl,
)
}
@@ -162,7 +153,7 @@
pub fn span(self) -> Span {
self.handle(
|i: ItemFnParts<'_>| i.span,
- |_, _, _: &'a hir::FnSig<'a>, _, _, span, _| span,
+ |_, _, _: &'a hir::FnSig<'a>, _, _, span| span,
|c: ClosureParts<'_>| c.span,
)
}
@@ -170,7 +161,7 @@
pub fn id(self) -> hir::HirId {
self.handle(
|i: ItemFnParts<'_>| i.id,
- |id, _, _: &'a hir::FnSig<'a>, _, _, _, _| id,
+ |id, _, _: &'a hir::FnSig<'a>, _, _, _| id,
|c: ClosureParts<'_>| c.id,
)
}
@@ -189,12 +180,11 @@
pub fn kind(self) -> FnKind<'a> {
let item = |p: ItemFnParts<'a>| -> FnKind<'a> {
- FnKind::ItemFn(p.ident, p.generics, p.header, p.vis, p.attrs)
+ FnKind::ItemFn(p.ident, p.generics, p.header, p.vis)
};
- let closure = |c: ClosureParts<'a>| FnKind::Closure(c.attrs);
- let method = |_, ident: Ident, sig: &'a hir::FnSig<'a>, vis, _, _, attrs| {
- FnKind::Method(ident, sig, vis, attrs)
- };
+ let closure = |_: ClosureParts<'a>| FnKind::Closure;
+ let method =
+ |_, ident: Ident, sig: &'a hir::FnSig<'a>, vis, _, _| FnKind::Method(ident, sig, vis);
self.handle(item, method, closure)
}
@@ -208,20 +198,18 @@
Option<&'a hir::Visibility<'a>>,
hir::BodyId,
Span,
- &'a [Attribute],
) -> A,
C: FnOnce(ClosureParts<'a>) -> A,
{
match self.node {
Node::Item(i) => match i.kind {
hir::ItemKind::Fn(ref sig, ref generics, block) => item_fn(ItemFnParts {
- id: i.hir_id,
+ id: i.hir_id(),
ident: i.ident,
decl: &sig.decl,
body: block,
vis: &i.vis,
span: i.span,
- attrs: &i.attrs,
header: sig.header,
generics,
}),
@@ -229,19 +217,19 @@
},
Node::TraitItem(ti) => match ti.kind {
hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Provided(body)) => {
- method(ti.hir_id, ti.ident, sig, None, body, ti.span, &ti.attrs)
+ method(ti.hir_id(), ti.ident, sig, None, body, ti.span)
}
_ => bug!("trait method FnLikeNode that is not fn-like"),
},
Node::ImplItem(ii) => match ii.kind {
hir::ImplItemKind::Fn(ref sig, body) => {
- method(ii.hir_id, ii.ident, sig, Some(&ii.vis), body, ii.span, &ii.attrs)
+ method(ii.hir_id(), ii.ident, sig, Some(&ii.vis), body, ii.span)
}
_ => bug!("impl method FnLikeNode that is not fn-like"),
},
Node::Expr(e) => match e.kind {
hir::ExprKind::Closure(_, ref decl, block, _fn_decl_span, _gen) => {
- closure(ClosureParts::new(&decl, block, e.hir_id, e.span, &e.attrs))
+ closure(ClosureParts::new(&decl, block, e.hir_id, e.span))
}
_ => bug!("expr FnLikeNode that is not fn-like"),
},
diff --git a/compiler/rustc_middle/src/hir/map/collector.rs b/compiler/rustc_middle/src/hir/map/collector.rs
index 872fcb0..a3d891f 100644
--- a/compiler/rustc_middle/src/hir/map/collector.rs
+++ b/compiler/rustc_middle/src/hir/map/collector.rs
@@ -52,25 +52,23 @@
if i >= len {
map.extend(repeat(None).take(i - len + 1));
}
+ debug_assert!(map[k].is_none());
map[k] = Some(v);
}
-fn hash(
- hcx: &mut StableHashingContext<'_>,
- input: impl for<'a> HashStable<StableHashingContext<'a>>,
-) -> Fingerprint {
- let mut stable_hasher = StableHasher::new();
- input.hash_stable(hcx, &mut stable_hasher);
- stable_hasher.finish()
-}
-
fn hash_body(
hcx: &mut StableHashingContext<'_>,
def_path_hash: DefPathHash,
item_like: impl for<'a> HashStable<StableHashingContext<'a>>,
hir_body_nodes: &mut Vec<(DefPathHash, Fingerprint)>,
) -> Fingerprint {
- let hash = hash(hcx, HirItemLike { item_like: &item_like });
+ let hash = {
+ let mut stable_hasher = StableHasher::new();
+ hcx.while_hashing_hir_bodies(true, |hcx| {
+ item_like.hash_stable(hcx, &mut stable_hasher);
+ });
+ stable_hasher.finish()
+ };
hir_body_nodes.push((def_path_hash, hash));
hash
}
@@ -119,6 +117,7 @@
modules: _,
proc_macros: _,
trait_map: _,
+ attrs: _,
} = *krate;
hash_body(&mut hcx, root_mod_def_path_hash, item, &mut hir_body_nodes)
@@ -218,11 +217,21 @@
// Overwrite the dummy hash with the real HIR owner hash.
nodes.hash = hash;
- // FIXME: feature(impl_trait_in_bindings) broken and trigger this assert
- //assert!(data.signature.is_none());
-
+ debug_assert!(data.signature.is_none());
data.signature =
Some(self.arena.alloc(Owner { parent: entry.parent, node: entry.node }));
+
+ let dk_parent = self.definitions.def_key(id.owner).parent;
+ if let Some(dk_parent) = dk_parent {
+ let dk_parent = LocalDefId { local_def_index: dk_parent };
+ let dk_parent = self.definitions.local_def_id_to_hir_id(dk_parent);
+ if dk_parent.owner != entry.parent.owner {
+ panic!(
+ "Different parents for {:?} => dk_parent={:?} actual={:?}",
+ id.owner, dk_parent, entry.parent,
+ )
+ }
+ }
} else {
assert_eq!(entry.parent.owner, id.owner);
insert_vec_map(
@@ -309,7 +318,7 @@
fn visit_nested_item(&mut self, item: ItemId) {
debug!("visit_nested_item: {:?}", item);
- self.visit_item(self.krate.item(item.id));
+ self.visit_item(self.krate.item(item));
}
fn visit_nested_trait_item(&mut self, item_id: TraitItemId) {
@@ -338,13 +347,10 @@
fn visit_item(&mut self, i: &'hir Item<'hir>) {
debug!("visit_item: {:?}", i);
- debug_assert_eq!(
- i.hir_id.owner,
- self.definitions.opt_hir_id_to_local_def_id(i.hir_id).unwrap()
- );
- self.with_dep_node_owner(i.hir_id.owner, i, |this, hash| {
- this.insert_with_hash(i.span, i.hir_id, Node::Item(i), hash);
- this.with_parent(i.hir_id, |this| {
+ self.with_dep_node_owner(i.def_id, i, |this, hash| {
+ let hir_id = i.hir_id();
+ this.insert_with_hash(i.span, hir_id, Node::Item(i), hash);
+ this.with_parent(hir_id, |this| {
if let ItemKind::Struct(ref struct_def, _) = i.kind {
// If this is a tuple or unit-like struct, register the constructor.
if let Some(ctor_hir_id) = struct_def.ctor_hir_id() {
@@ -357,14 +363,10 @@
}
fn visit_foreign_item(&mut self, fi: &'hir ForeignItem<'hir>) {
- debug_assert_eq!(
- fi.hir_id.owner,
- self.definitions.opt_hir_id_to_local_def_id(fi.hir_id).unwrap()
- );
- self.with_dep_node_owner(fi.hir_id.owner, fi, |this, hash| {
- this.insert_with_hash(fi.span, fi.hir_id, Node::ForeignItem(fi), hash);
+ self.with_dep_node_owner(fi.def_id, fi, |this, hash| {
+ this.insert_with_hash(fi.span, fi.hir_id(), Node::ForeignItem(fi), hash);
- this.with_parent(fi.hir_id, |this| {
+ this.with_parent(fi.hir_id(), |this| {
intravisit::walk_foreign_item(this, fi);
});
});
@@ -394,28 +396,20 @@
}
fn visit_trait_item(&mut self, ti: &'hir TraitItem<'hir>) {
- debug_assert_eq!(
- ti.hir_id.owner,
- self.definitions.opt_hir_id_to_local_def_id(ti.hir_id).unwrap()
- );
- self.with_dep_node_owner(ti.hir_id.owner, ti, |this, hash| {
- this.insert_with_hash(ti.span, ti.hir_id, Node::TraitItem(ti), hash);
+ self.with_dep_node_owner(ti.def_id, ti, |this, hash| {
+ this.insert_with_hash(ti.span, ti.hir_id(), Node::TraitItem(ti), hash);
- this.with_parent(ti.hir_id, |this| {
+ this.with_parent(ti.hir_id(), |this| {
intravisit::walk_trait_item(this, ti);
});
});
}
fn visit_impl_item(&mut self, ii: &'hir ImplItem<'hir>) {
- debug_assert_eq!(
- ii.hir_id.owner,
- self.definitions.opt_hir_id_to_local_def_id(ii.hir_id).unwrap()
- );
- self.with_dep_node_owner(ii.hir_id.owner, ii, |this, hash| {
- this.insert_with_hash(ii.span, ii.hir_id, Node::ImplItem(ii), hash);
+ self.with_dep_node_owner(ii.def_id, ii, |this, hash| {
+ this.insert_with_hash(ii.span, ii.hir_id(), Node::ImplItem(ii), hash);
- this.with_parent(ii.hir_id, |this| {
+ this.with_parent(ii.hir_id(), |this| {
intravisit::walk_impl_item(this, ii);
});
});
@@ -532,15 +526,15 @@
// Exported macros are visited directly from the crate root,
// so they do not have `parent_node` set.
// Find the correct enclosing module from their DefKey.
- let def_key = self.definitions.def_key(macro_def.hir_id.owner);
+ let def_key = self.definitions.def_key(macro_def.def_id);
let parent = def_key.parent.map_or(hir::CRATE_HIR_ID, |local_def_index| {
self.definitions.local_def_id_to_hir_id(LocalDefId { local_def_index })
});
self.with_parent(parent, |this| {
- this.with_dep_node_owner(macro_def.hir_id.owner, macro_def, |this, hash| {
+ this.with_dep_node_owner(macro_def.def_id, macro_def, |this, hash| {
this.insert_with_hash(
macro_def.span,
- macro_def.hir_id,
+ macro_def.hir_id(),
Node::MacroDef(macro_def),
hash,
);
@@ -559,10 +553,10 @@
});
}
- fn visit_struct_field(&mut self, field: &'hir StructField<'hir>) {
+ fn visit_field_def(&mut self, field: &'hir FieldDef<'hir>) {
self.insert(field.span, field.hir_id, Node::Field(field));
self.with_parent(field.hir_id, |this| {
- intravisit::walk_struct_field(this, field);
+ intravisit::walk_field_def(this, field);
});
}
@@ -590,18 +584,3 @@
self.visit_nested_foreign_item(id);
}
}
-
-struct HirItemLike<T> {
- item_like: T,
-}
-
-impl<'hir, T> HashStable<StableHashingContext<'hir>> for HirItemLike<T>
-where
- T: HashStable<StableHashingContext<'hir>>,
-{
- fn hash_stable(&self, hcx: &mut StableHashingContext<'hir>, hasher: &mut StableHasher) {
- hcx.while_hashing_hir_bodies(true, |hcx| {
- self.item_like.hash_stable(hcx, hasher);
- });
- }
-}
diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs
index ee12c0e..9d00f07 100644
--- a/compiler/rustc_middle/src/hir/map/mod.rs
+++ b/compiler/rustc_middle/src/hir/map/mod.rs
@@ -180,11 +180,6 @@
self.tcx.definitions.local_def_id_to_hir_id(def_id)
}
- #[inline]
- pub fn opt_local_def_id_to_hir_id(&self, def_id: LocalDefId) -> Option<HirId> {
- self.tcx.definitions.opt_local_def_id_to_hir_id(def_id)
- }
-
pub fn iter_local_def_id(&self) -> impl Iterator<Item = LocalDefId> + '_ {
self.tcx.definitions.iter_local_def_id()
}
@@ -285,7 +280,7 @@
let owner = self.tcx.hir_owner_nodes(id.owner);
owner.and_then(|owner| {
let node = owner.nodes[id.local_id].as_ref();
- // FIXME(eddyb) use a single generic type insted of having both
+ // FIXME(eddyb) use a single generic type instead of having both
// `Entry` and `ParentedNode`, which are effectively the same.
// Alternatively, rewrite code using `Entry` to use `ParentedNode`.
node.map(|node| Entry {
@@ -300,29 +295,29 @@
self.find_entry(id).unwrap()
}
- pub fn item(&self, id: HirId) -> &'hir Item<'hir> {
- match self.find(id).unwrap() {
+ pub fn item(&self, id: ItemId) -> &'hir Item<'hir> {
+ match self.find(id.hir_id()).unwrap() {
Node::Item(item) => item,
_ => bug!(),
}
}
pub fn trait_item(&self, id: TraitItemId) -> &'hir TraitItem<'hir> {
- match self.find(id.hir_id).unwrap() {
+ match self.find(id.hir_id()).unwrap() {
Node::TraitItem(item) => item,
_ => bug!(),
}
}
pub fn impl_item(&self, id: ImplItemId) -> &'hir ImplItem<'hir> {
- match self.find(id.hir_id).unwrap() {
+ match self.find(id.hir_id()).unwrap() {
Node::ImplItem(item) => item,
_ => bug!(),
}
}
pub fn foreign_item(&self, id: ForeignItemId) -> &'hir ForeignItem<'hir> {
- match self.find(id.hir_id).unwrap() {
+ match self.find(id.hir_id()).unwrap() {
Node::ForeignItem(item) => item,
_ => bug!(),
}
@@ -449,7 +444,7 @@
}
}
- pub fn trait_impls(&self, trait_did: DefId) -> &'hir [HirId] {
+ pub fn trait_impls(&self, trait_did: DefId) -> &'hir [LocalDefId] {
self.tcx.all_local_trait_impls(LOCAL_CRATE).get(&trait_did).map_or(&[], |xs| &xs[..])
}
@@ -457,10 +452,7 @@
/// invoking `krate.attrs` because it registers a tighter
/// dep-graph access.
pub fn krate_attrs(&self) -> &'hir [ast::Attribute] {
- match self.get_entry(CRATE_HIR_ID).node {
- Node::Crate(item) => item.attrs,
- _ => bug!(),
- }
+ self.attrs(CRATE_HIR_ID)
}
pub fn get_module(&self, module: LocalDefId) -> (&'hir Mod<'hir>, Span, HirId) {
@@ -479,19 +471,19 @@
let module = self.tcx.hir_module_items(module);
for id in &module.items {
- visitor.visit_item(self.expect_item(*id));
+ visitor.visit_item(self.item(*id));
}
for id in &module.trait_items {
- visitor.visit_trait_item(self.expect_trait_item(id.hir_id));
+ visitor.visit_trait_item(self.trait_item(*id));
}
for id in &module.impl_items {
- visitor.visit_impl_item(self.expect_impl_item(id.hir_id));
+ visitor.visit_impl_item(self.impl_item(*id));
}
for id in &module.foreign_items {
- visitor.visit_foreign_item(self.expect_foreign_item(id.hir_id));
+ visitor.visit_foreign_item(self.foreign_item(*id));
}
}
@@ -500,7 +492,7 @@
V: Visitor<'hir>,
{
for id in self.krate().exported_macros {
- visitor.visit_macro_def(self.expect_macro_def(id.hir_id));
+ visitor.visit_macro_def(self.expect_macro_def(id.hir_id()));
}
}
@@ -853,34 +845,7 @@
/// Given a node ID, gets a list of attributes associated with the AST
/// corresponding to the node-ID.
pub fn attrs(&self, id: HirId) -> &'hir [ast::Attribute] {
- self.find_entry(id).map_or(&[], |entry| match entry.node {
- Node::Param(a) => &a.attrs[..],
- Node::Local(l) => &l.attrs[..],
- Node::Item(i) => &i.attrs[..],
- Node::ForeignItem(fi) => &fi.attrs[..],
- Node::TraitItem(ref ti) => &ti.attrs[..],
- Node::ImplItem(ref ii) => &ii.attrs[..],
- Node::Variant(ref v) => &v.attrs[..],
- Node::Field(ref f) => &f.attrs[..],
- Node::Expr(ref e) => &*e.attrs,
- Node::Stmt(ref s) => s.kind.attrs(|id| self.item(id.id)),
- Node::Arm(ref a) => &*a.attrs,
- Node::GenericParam(param) => ¶m.attrs[..],
- // Unit/tuple structs/variants take the attributes straight from
- // the struct/variant definition.
- Node::Ctor(..) => self.attrs(self.get_parent_item(id)),
- Node::Crate(item) => &item.attrs[..],
- Node::MacroDef(def) => def.attrs,
- Node::AnonConst(..)
- | Node::PathSegment(..)
- | Node::Ty(..)
- | Node::Pat(..)
- | Node::Binding(..)
- | Node::TraitRef(..)
- | Node::Block(..)
- | Node::Lifetime(..)
- | Node::Visibility(..) => &[],
- })
+ self.tcx.hir_attrs(id.owner).get(id.local_id)
}
/// Gets the span of the definition of the specified HIR node.
@@ -977,7 +942,7 @@
self.body(id)
}
- fn item(&self, id: HirId) -> &'hir Item<'hir> {
+ fn item(&self, id: ItemId) -> &'hir Item<'hir> {
self.item(id)
}
@@ -994,47 +959,6 @@
}
}
-trait Named {
- fn name(&self) -> Symbol;
-}
-
-impl<T: Named> Named for Spanned<T> {
- fn name(&self) -> Symbol {
- self.node.name()
- }
-}
-
-impl Named for Item<'_> {
- fn name(&self) -> Symbol {
- self.ident.name
- }
-}
-impl Named for ForeignItem<'_> {
- fn name(&self) -> Symbol {
- self.ident.name
- }
-}
-impl Named for Variant<'_> {
- fn name(&self) -> Symbol {
- self.ident.name
- }
-}
-impl Named for StructField<'_> {
- fn name(&self) -> Symbol {
- self.ident.name
- }
-}
-impl Named for TraitItem<'_> {
- fn name(&self) -> Symbol {
- self.ident.name
- }
-}
-impl Named for ImplItem<'_> {
- fn name(&self) -> Symbol {
- self.ident.name
- }
-}
-
pub(super) fn index_hir<'tcx>(tcx: TyCtxt<'tcx>, cnum: CrateNum) -> &'tcx IndexedHir<'tcx> {
assert_eq!(cnum, LOCAL_CRATE);
diff --git a/compiler/rustc_middle/src/hir/mod.rs b/compiler/rustc_middle/src/hir/mod.rs
index 6934e06..cf4e473 100644
--- a/compiler/rustc_middle/src/hir/mod.rs
+++ b/compiler/rustc_middle/src/hir/mod.rs
@@ -9,6 +9,7 @@
use crate::ich::StableHashingContext;
use crate::ty::query::Providers;
use crate::ty::TyCtxt;
+use rustc_ast::Attribute;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
@@ -16,6 +17,7 @@
use rustc_hir::*;
use rustc_index::vec::IndexVec;
use rustc_span::DUMMY_SP;
+use std::collections::BTreeMap;
#[derive(Debug)]
pub struct Owner<'tcx> {
@@ -55,6 +57,48 @@
}
}
+#[derive(Copy, Clone)]
+pub struct AttributeMap<'tcx> {
+ map: &'tcx BTreeMap<HirId, &'tcx [Attribute]>,
+ prefix: LocalDefId,
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for AttributeMap<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let range = self.range();
+
+ range.clone().count().hash_stable(hcx, hasher);
+ for (key, value) in range {
+ key.hash_stable(hcx, hasher);
+ value.hash_stable(hcx, hasher);
+ }
+ }
+}
+
+impl<'tcx> std::fmt::Debug for AttributeMap<'tcx> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("AttributeMap")
+ .field("prefix", &self.prefix)
+ .field("range", &&self.range().collect::<Vec<_>>()[..])
+ .finish()
+ }
+}
+
+impl<'tcx> AttributeMap<'tcx> {
+ fn get(&self, id: ItemLocalId) -> &'tcx [Attribute] {
+ self.map.get(&HirId { owner: self.prefix, local_id: id }).copied().unwrap_or(&[])
+ }
+
+ fn range(&self) -> std::collections::btree_map::Range<'_, rustc_hir::HirId, &[Attribute]> {
+ let local_zero = ItemLocalId::from_u32(0);
+ let range = HirId { owner: self.prefix, local_id: local_zero }..HirId {
+ owner: LocalDefId { local_def_index: self.prefix.local_def_index + 1 },
+ local_id: local_zero,
+ };
+ self.map.range(range)
+ }
+}
+
impl<'tcx> TyCtxt<'tcx> {
#[inline(always)]
pub fn hir(self) -> map::Map<'tcx> {
@@ -73,13 +117,10 @@
};
providers.hir_crate = |tcx, _| tcx.untracked_crate;
providers.index_hir = map::index_hir;
- providers.hir_module_items = |tcx, id| {
- let hir = tcx.hir();
- let module = hir.local_def_id_to_hir_id(id);
- &tcx.untracked_crate.modules[&module]
- };
+ providers.hir_module_items = |tcx, id| &tcx.untracked_crate.modules[&id];
providers.hir_owner = |tcx, id| tcx.index_hir(LOCAL_CRATE).map[id].signature;
providers.hir_owner_nodes = |tcx, id| tcx.index_hir(LOCAL_CRATE).map[id].with_bodies.as_deref();
+ providers.hir_attrs = |tcx, id| AttributeMap { map: &tcx.untracked_crate.attrs, prefix: id };
providers.def_span = |tcx, def_id| tcx.hir().span_if_local(def_id).unwrap_or(DUMMY_SP);
providers.fn_arg_names = |tcx, id| {
let hir = tcx.hir();
diff --git a/compiler/rustc_middle/src/ich/hcx.rs b/compiler/rustc_middle/src/ich/hcx.rs
index 51b650e..cf29d21 100644
--- a/compiler/rustc_middle/src/ich/hcx.rs
+++ b/compiler/rustc_middle/src/ich/hcx.rs
@@ -250,13 +250,6 @@
&CACHE
}
- fn byte_pos_to_line_and_col(
- &mut self,
- byte: BytePos,
- ) -> Option<(Lrc<SourceFile>, usize, BytePos)> {
- self.source_map().byte_pos_to_line_and_col(byte)
- }
-
fn span_data_to_lines_and_cols(
&mut self,
span: &SpanData,
diff --git a/compiler/rustc_middle/src/ich/impls_hir.rs b/compiler/rustc_middle/src/ich/impls_hir.rs
index d6c6cef..abf5683 100644
--- a/compiler/rustc_middle/src/ich/impls_hir.rs
+++ b/compiler/rustc_middle/src/ich/impls_hir.rs
@@ -55,8 +55,7 @@
let item_ids_hash = item_ids
.iter()
.map(|id| {
- let (def_path_hash, local_id) = id.id.to_stable_hash_key(hcx);
- debug_assert_eq!(local_id, hir::ItemLocalId::from_u32(0));
+ let def_path_hash = id.to_stable_hash_key(hcx);
def_path_hash.0
})
.fold(Fingerprint::ZERO, |a, b| a.combine_commutative(b));
@@ -67,11 +66,10 @@
fn hash_hir_expr(&mut self, expr: &hir::Expr<'_>, hasher: &mut StableHasher) {
self.while_hashing_hir_bodies(true, |hcx| {
- let hir::Expr { hir_id: _, ref span, ref kind, ref attrs } = *expr;
+ let hir::Expr { hir_id: _, ref span, ref kind } = *expr;
span.hash_stable(hcx, hasher);
kind.hash_stable(hcx, hasher);
- attrs.hash_stable(hcx, hasher);
})
}
diff --git a/compiler/rustc_middle/src/ich/impls_syntax.rs b/compiler/rustc_middle/src/ich/impls_syntax.rs
index bfbe157..aacec86 100644
--- a/compiler/rustc_middle/src/ich/impls_syntax.rs
+++ b/compiler/rustc_middle/src/ich/impls_syntax.rs
@@ -45,7 +45,7 @@
item.hash_stable(self, hasher);
style.hash_stable(self, hasher);
span.hash_stable(self, hasher);
- tokens.as_ref().expect_none("Tokens should have been removed during lowering!");
+ assert!(tokens.as_ref().is_none(), "Tokens should have been removed during lowering!");
} else {
unreachable!();
}
diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs
index 6ae83a7..4976516 100644
--- a/compiler/rustc_middle/src/lib.rs
+++ b/compiler/rustc_middle/src/lib.rs
@@ -24,7 +24,6 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(array_windows)]
-#![feature(assoc_char_funcs)]
#![feature(backtrace)]
#![feature(bool_to_option)]
#![feature(box_patterns)]
@@ -38,7 +37,6 @@
#![feature(extern_types)]
#![feature(nll)]
#![feature(once_cell)]
-#![feature(option_expect_none)]
#![feature(or_patterns)]
#![feature(min_specialization)]
#![feature(trusted_len)]
@@ -76,6 +74,7 @@
#[macro_use]
pub mod arena;
+#[macro_use]
pub mod dep_graph;
pub mod hir;
pub mod ich;
diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs
index ca73481..26e61ec 100644
--- a/compiler/rustc_middle/src/lint.rs
+++ b/compiler/rustc_middle/src/lint.rs
@@ -353,12 +353,12 @@
it will become a hard error";
let explanation = if lint_id == LintId::of(builtin::UNSTABLE_NAME_COLLISIONS) {
- "once this method is added to the standard library, \
- the ambiguity may cause an error or change in behavior!"
+ "once this associated item is added to the standard library, the ambiguity may \
+ cause an error or change in behavior!"
.to_owned()
} else if lint_id == LintId::of(builtin::MUTABLE_BORROW_RESERVATION_CONFLICT) {
- "this borrowing pattern was not meant to be accepted, \
- and may become a hard error in the future"
+ "this borrowing pattern was not meant to be accepted, and may become a hard error \
+ in the future"
.to_owned()
} else if let Some(edition) = future_incompatible.edition {
format!("{} in the {} edition!", STANDARD_MESSAGE, edition)
diff --git a/compiler/rustc_middle/src/mir/coverage.rs b/compiler/rustc_middle/src/mir/coverage.rs
index 95096d0..eae02a8 100644
--- a/compiler/rustc_middle/src/mir/coverage.rs
+++ b/compiler/rustc_middle/src/mir/coverage.rs
@@ -92,7 +92,7 @@
}
}
-#[derive(Clone, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable)]
pub enum CoverageKind {
Counter {
function_source_hash: u64,
@@ -148,7 +148,18 @@
}
}
-#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(
+ Clone,
+ TyEncodable,
+ TyDecodable,
+ Hash,
+ HashStable,
+ TypeFoldable,
+ PartialEq,
+ Eq,
+ PartialOrd,
+ Ord
+)]
pub struct CodeRegion {
pub file_name: Symbol,
pub start_line: u32,
@@ -167,7 +178,7 @@
}
}
-#[derive(Copy, Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+#[derive(Copy, Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable)]
pub enum Op {
Subtract,
Add,
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
index 5ebe38b..898c375 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -266,7 +266,7 @@
let range = self.check_bounds(ptr.offset, size);
self.mark_init(ptr, size, true);
- self.clear_relocations(cx, ptr, size)?;
+ self.clear_relocations(cx, ptr, size);
AllocationExtra::memory_written(self, ptr, size)?;
@@ -339,7 +339,7 @@
for dest in bytes {
*dest = src.next().expect("iterator was shorter than it said it would be");
}
- src.next().expect_none("iterator was longer than it said it would be");
+ assert!(src.next().is_none(), "iterator was longer than it said it would be");
Ok(())
}
@@ -484,18 +484,13 @@
/// uninitialized. This is a somewhat odd "spooky action at a distance",
/// but it allows strictly more code to run than if we would just error
/// immediately in that case.
- fn clear_relocations(
- &mut self,
- cx: &impl HasDataLayout,
- ptr: Pointer<Tag>,
- size: Size,
- ) -> InterpResult<'tcx> {
+ fn clear_relocations(&mut self, cx: &impl HasDataLayout, ptr: Pointer<Tag>, size: Size) {
// Find the start and end of the given range and its outermost relocations.
let (first, last) = {
// Find all relocations overlapping the given range.
let relocations = self.get_relocations(cx, ptr, size);
if relocations.is_empty() {
- return Ok(());
+ return;
}
(
@@ -517,8 +512,6 @@
// Forget all the relocations.
self.relocations.remove_range(first..last);
-
- Ok(())
}
/// Errors if there are relocations overlapping with the edges of the
@@ -550,12 +543,12 @@
/// error which will report the first range of bytes which is uninitialized.
fn check_init(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
self.is_init(ptr, size).or_else(|idx_range| {
- throw_ub!(InvalidUninitBytes(Some(Box::new(UninitBytesAccess {
+ throw_ub!(InvalidUninitBytes(Some(UninitBytesAccess {
access_ptr: ptr.erase_tag(),
access_size: size,
uninit_ptr: Pointer::new(ptr.alloc_id, idx_range.start),
uninit_size: idx_range.end - idx_range.start, // `Size` subtraction
- }))))
+ })))
})
}
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
index cf931ec..b2b969e 100644
--- a/compiler/rustc_middle/src/mir/interpret/error.rs
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -9,7 +9,7 @@
use rustc_session::CtfeBacktrace;
use rustc_span::def_id::DefId;
use rustc_target::abi::{Align, Size};
-use std::{any::Any, backtrace::Backtrace, fmt, mem};
+use std::{any::Any, backtrace::Backtrace, fmt};
#[derive(Debug, Copy, Clone, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
pub enum ErrorHandled {
@@ -40,29 +40,45 @@
struct_span_err!(tcx.sess, tcx.span, E0080, "{}", msg)
}
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(InterpErrorInfo<'_>, 8);
+
/// Packages the kind of error we got from the const code interpreter
/// up with a Rust-level backtrace of where the error occurred.
/// Thsese should always be constructed by calling `.into()` on
/// a `InterpError`. In `librustc_mir::interpret`, we have `throw_err_*`
/// macros for this.
#[derive(Debug)]
-pub struct InterpErrorInfo<'tcx> {
- pub kind: InterpError<'tcx>,
+pub struct InterpErrorInfo<'tcx>(Box<InterpErrorInfoInner<'tcx>>);
+
+#[derive(Debug)]
+struct InterpErrorInfoInner<'tcx> {
+ kind: InterpError<'tcx>,
backtrace: Option<Box<Backtrace>>,
}
impl fmt::Display for InterpErrorInfo<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "{}", self.kind)
+ write!(f, "{}", self.0.kind)
}
}
-impl InterpErrorInfo<'_> {
+impl InterpErrorInfo<'tcx> {
pub fn print_backtrace(&self) {
- if let Some(backtrace) = self.backtrace.as_ref() {
+ if let Some(backtrace) = self.0.backtrace.as_ref() {
print_backtrace(backtrace);
}
}
+
+ pub fn into_kind(self) -> InterpError<'tcx> {
+ let InterpErrorInfo(box InterpErrorInfoInner { kind, .. }) = self;
+ kind
+ }
+
+ #[inline]
+ pub fn kind(&self) -> &InterpError<'tcx> {
+ &self.0.kind
+ }
}
fn print_backtrace(backtrace: &Backtrace) {
@@ -108,7 +124,7 @@
}
};
- InterpErrorInfo { kind, backtrace }
+ InterpErrorInfo(Box::new(InterpErrorInfoInner { kind, backtrace }))
}
}
@@ -247,7 +263,7 @@
/// Using a string that is not valid UTF-8,
InvalidStr(std::str::Utf8Error),
/// Using uninitialized data where it is not allowed.
- InvalidUninitBytes(Option<Box<UninitBytesAccess>>),
+ InvalidUninitBytes(Option<UninitBytesAccess>),
/// Working with a local that is not currently live.
DeadLocal,
/// Data size is not equal to target size.
@@ -428,8 +444,8 @@
}
}
-#[cfg(target_arch = "x86_64")]
-static_assert_size!(InterpError<'_>, 40);
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(InterpError<'_>, 72);
pub enum InterpError<'tcx> {
/// The program caused undefined behavior.
@@ -470,19 +486,14 @@
}
impl InterpError<'_> {
- /// Some errors allocate to be created as they contain free-form strings.
- /// And sometimes we want to be sure that did not happen as it is a
- /// waste of resources.
- pub fn allocates(&self) -> bool {
+ /// Some errors to string formatting even if the error is never printed.
+ /// To avoid performance issues, there are places where we want to be sure to never raise these formatting errors,
+ /// so this method lets us detect them and `bug!` on unexpected errors.
+ pub fn formatted_string(&self) -> bool {
match self {
- // Zero-sized boxes do not allocate.
- InterpError::MachineStop(b) => mem::size_of_val::<dyn MachineStopType>(&**b) > 0,
InterpError::Unsupported(UnsupportedOpInfo::Unsupported(_))
| InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ValidationFailure(_))
- | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::Ub(_))
- | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::InvalidUninitBytes(Some(_))) => {
- true
- }
+ | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::Ub(_)) => true,
_ => false,
}
}
diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs
index 0517ec5..3e7b93b 100644
--- a/compiler/rustc_middle/src/mir/interpret/queries.rs
+++ b/compiler/rustc_middle/src/mir/interpret/queries.rs
@@ -31,6 +31,7 @@
/// constant `bar::<T>()` requires a substitution for `T`, if the substitution for `T` is still
/// too generic for the constant to be evaluated then `Err(ErrorHandled::TooGeneric)` is
/// returned.
+ #[instrument(level = "debug", skip(self))]
pub fn const_eval_resolve(
self,
param_env: ty::ParamEnv<'tcx>,
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
index 4bb39fe..5172dfd 100644
--- a/compiler/rustc_middle/src/mir/interpret/value.rs
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -1,4 +1,4 @@
-use std::convert::TryFrom;
+use std::convert::{TryFrom, TryInto};
use std::fmt;
use rustc_apfloat::{
@@ -8,12 +8,12 @@
use rustc_macros::HashStable;
use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout};
-use crate::ty::{ParamEnv, ScalarInt, Ty, TyCtxt};
+use crate::ty::{Lift, ParamEnv, ScalarInt, Ty, TyCtxt};
use super::{AllocId, Allocation, InterpResult, Pointer, PointerArithmetic};
/// Represents the result of const evaluation via the `eval_to_allocation` query.
-#[derive(Clone, HashStable, TyEncodable, TyDecodable, Debug)]
+#[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)]
pub struct ConstAlloc<'tcx> {
// the value lives here, at offset 0, and that allocation definitely is a `AllocKind::Memory`
// (so you can use `AllocMap::unwrap_memory`).
@@ -44,9 +44,30 @@
},
}
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ConstValue<'_>, 32);
+impl From<Scalar> for ConstValue<'tcx> {
+ fn from(s: Scalar) -> Self {
+ Self::Scalar(s)
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ConstValue<'a> {
+ type Lifted = ConstValue<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ConstValue<'tcx>> {
+ Some(match self {
+ ConstValue::Scalar(s) => ConstValue::Scalar(s),
+ ConstValue::Slice { data, start, end } => {
+ ConstValue::Slice { data: tcx.lift(data)?, start, end }
+ }
+ ConstValue::ByRef { alloc, offset } => {
+ ConstValue::ByRef { alloc: tcx.lift(alloc)?, offset }
+ }
+ })
+ }
+}
+
impl<'tcx> ConstValue<'tcx> {
#[inline]
pub fn try_to_scalar(&self) -> Option<Scalar> {
@@ -56,20 +77,20 @@
}
}
+ pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
+ Some(self.try_to_scalar()?.assert_int())
+ }
+
pub fn try_to_bits(&self, size: Size) -> Option<u128> {
- self.try_to_scalar()?.to_bits(size).ok()
+ self.try_to_scalar_int()?.to_bits(size).ok()
}
pub fn try_to_bool(&self) -> Option<bool> {
- match self.try_to_bits(Size::from_bytes(1))? {
- 0 => Some(false),
- 1 => Some(true),
- _ => None,
- }
+ self.try_to_scalar_int()?.try_into().ok()
}
pub fn try_to_machine_usize(&self, tcx: TyCtxt<'tcx>) -> Option<u64> {
- Some(self.try_to_bits(tcx.data_layout.pointer_size)? as u64)
+ self.try_to_scalar_int()?.try_to_machine_usize(tcx).ok()
}
pub fn try_to_bits_for_ty(
@@ -111,7 +132,7 @@
Ptr(Pointer<Tag>),
}
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(Scalar, 24);
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
@@ -503,13 +524,20 @@
}
}
+impl<Tag> From<ScalarInt> for Scalar<Tag> {
+ #[inline(always)]
+ fn from(ptr: ScalarInt) -> Self {
+ Scalar::Int(ptr)
+ }
+}
+
#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)]
pub enum ScalarMaybeUninit<Tag = ()> {
Scalar(Scalar<Tag>),
Uninit,
}
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ScalarMaybeUninit, 24);
impl<Tag> From<Scalar<Tag>> for ScalarMaybeUninit<Tag> {
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
index 718e81c..90fda9e 100644
--- a/compiler/rustc_middle/src/mir/mod.rs
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -11,12 +11,12 @@
use crate::ty::print::{FmtPrinter, Printer};
use crate::ty::subst::{Subst, SubstsRef};
use crate::ty::{self, List, Ty, TyCtxt};
-use crate::ty::{AdtDef, InstanceDef, Region, UserTypeAnnotationIndex};
+use crate::ty::{AdtDef, InstanceDef, Region, ScalarInt, UserTypeAnnotationIndex};
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, Namespace};
use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX};
use rustc_hir::{self, GeneratorKind};
-use rustc_target::abi::VariantIdx;
+use rustc_target::abi::{Size, VariantIdx};
use polonius_engine::Atom;
pub use rustc_ast::Mutability;
@@ -30,6 +30,7 @@
use rustc_span::{Span, DUMMY_SP};
use rustc_target::asm::InlineAsmRegOrRegClass;
use std::borrow::Cow;
+use std::convert::TryInto;
use std::fmt::{self, Debug, Display, Formatter, Write};
use std::ops::{ControlFlow, Index, IndexMut};
use std::slice;
@@ -61,12 +62,14 @@
}
impl<'tcx> HasLocalDecls<'tcx> for LocalDecls<'tcx> {
+ #[inline]
fn local_decls(&self) -> &LocalDecls<'tcx> {
self
}
}
impl<'tcx> HasLocalDecls<'tcx> for Body<'tcx> {
+ #[inline]
fn local_decls(&self) -> &LocalDecls<'tcx> {
&self.local_decls
}
@@ -144,6 +147,22 @@
}
}
+#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable)]
+pub struct GeneratorInfo<'tcx> {
+ /// The yield type of the function, if it is a generator.
+ pub yield_ty: Option<Ty<'tcx>>,
+
+ /// Generator drop glue.
+ pub generator_drop: Option<Body<'tcx>>,
+
+ /// The layout of a generator. Produced by the state transformation.
+ pub generator_layout: Option<GeneratorLayout<'tcx>>,
+
+ /// If this is a generator then record the type of source expression that caused this generator
+ /// to be created.
+ pub generator_kind: GeneratorKind,
+}
+
/// The lowered representation of a single function.
#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable)]
pub struct Body<'tcx> {
@@ -164,18 +183,7 @@
/// and used for debuginfo. Indexed by a `SourceScope`.
pub source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
- /// The yield type of the function, if it is a generator.
- pub yield_ty: Option<Ty<'tcx>>,
-
- /// Generator drop glue.
- pub generator_drop: Option<Box<Body<'tcx>>>,
-
- /// The layout of a generator. Produced by the state transformation.
- pub generator_layout: Option<GeneratorLayout<'tcx>>,
-
- /// If this is a generator then record the type of source expression that caused this generator
- /// to be created.
- pub generator_kind: Option<GeneratorKind>,
+ pub generator: Option<Box<GeneratorInfo<'tcx>>>,
/// Declarations of locals.
///
@@ -257,10 +265,14 @@
source,
basic_blocks,
source_scopes,
- yield_ty: None,
- generator_drop: None,
- generator_layout: None,
- generator_kind,
+ generator: generator_kind.map(|generator_kind| {
+ Box::new(GeneratorInfo {
+ yield_ty: None,
+ generator_drop: None,
+ generator_layout: None,
+ generator_kind,
+ })
+ }),
local_decls,
user_type_annotations,
arg_count,
@@ -287,16 +299,13 @@
source: MirSource::item(DefId::local(CRATE_DEF_INDEX)),
basic_blocks,
source_scopes: IndexVec::new(),
- yield_ty: None,
- generator_drop: None,
- generator_layout: None,
+ generator: None,
local_decls: IndexVec::new(),
user_type_annotations: IndexVec::new(),
arg_count: 0,
spread_arg: None,
span: DUMMY_SP,
required_consts: Vec::new(),
- generator_kind: None,
var_debug_info: Vec::new(),
is_polymorphic: false,
predecessor_cache: PredecessorCache::new(),
@@ -478,6 +487,26 @@
pub fn dominators(&self) -> Dominators<BasicBlock> {
dominators(self)
}
+
+ #[inline]
+ pub fn yield_ty(&self) -> Option<Ty<'tcx>> {
+ self.generator.as_ref().and_then(|generator| generator.yield_ty)
+ }
+
+ #[inline]
+ pub fn generator_layout(&self) -> Option<&GeneratorLayout<'tcx>> {
+ self.generator.as_ref().and_then(|generator| generator.generator_layout.as_ref())
+ }
+
+ #[inline]
+ pub fn generator_drop(&self) -> Option<&Body<'tcx>> {
+ self.generator.as_ref().and_then(|generator| generator.generator_drop.as_ref())
+ }
+
+ #[inline]
+ pub fn generator_kind(&self) -> Option<GeneratorKind> {
+ self.generator.as_ref().map(|generator| generator.generator_kind)
+ }
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, TyEncodable, TyDecodable, HashStable)]
@@ -594,7 +623,7 @@
// Borrow kinds
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, TyEncodable, TyDecodable)]
-#[derive(HashStable)]
+#[derive(Hash, HashStable)]
pub enum BorrowKind {
/// Data must be immutable and is aliasable.
Shared,
@@ -923,7 +952,7 @@
}
// `LocalDecl` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(LocalDecl<'_>, 56);
/// Extra information about a some locals that's used for diagnostics and for
@@ -1163,7 +1192,7 @@
}
/// Information about an assertion failure.
-#[derive(Clone, TyEncodable, TyDecodable, HashStable, PartialEq)]
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)]
pub enum AssertKind<O> {
BoundsCheck { len: O, index: O },
Overflow(BinOp, O, O),
@@ -1174,7 +1203,17 @@
ResumedAfterPanic(GeneratorKind),
}
-#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+#[derive(
+ Clone,
+ Debug,
+ PartialEq,
+ PartialOrd,
+ TyEncodable,
+ TyDecodable,
+ Hash,
+ HashStable,
+ TypeFoldable
+)]
pub enum InlineAsmOperand<'tcx> {
In {
reg: InlineAsmRegOrRegClass,
@@ -1430,7 +1469,7 @@
}
// `Statement` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(Statement<'_>, 32);
impl Statement<'_> {
@@ -1449,7 +1488,7 @@
}
}
-#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable)]
pub enum StatementKind<'tcx> {
/// Write the RHS Rvalue to the LHS Place.
Assign(Box<(Place<'tcx>, Rvalue<'tcx>)>),
@@ -1503,12 +1542,24 @@
/// counter varible at runtime, each time the code region is executed.
Coverage(Box<Coverage>),
+ /// Denotes a call to the intrinsic function copy_overlapping, where `src_dst` denotes the
+ /// memory being read from and written to(one field to save memory), and size
+ /// indicates how many bytes are being copied over.
+ CopyNonOverlapping(Box<CopyNonOverlapping<'tcx>>),
+
/// No-op. Useful for deleting instructions without affecting statement indices.
Nop,
}
impl<'tcx> StatementKind<'tcx> {
- pub fn as_assign_mut(&mut self) -> Option<&mut Box<(Place<'tcx>, Rvalue<'tcx>)>> {
+ pub fn as_assign_mut(&mut self) -> Option<&mut (Place<'tcx>, Rvalue<'tcx>)> {
+ match self {
+ StatementKind::Assign(x) => Some(x),
+ _ => None,
+ }
+ }
+
+ pub fn as_assign(&self) -> Option<&(Place<'tcx>, Rvalue<'tcx>)> {
match self {
StatementKind::Assign(x) => Some(x),
_ => None,
@@ -1517,7 +1568,7 @@
}
/// Describes what kind of retag is to be performed.
-#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, HashStable)]
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, Hash, HashStable)]
pub enum RetagKind {
/// The initial retag when entering a function.
FnEntry,
@@ -1530,7 +1581,7 @@
}
/// The `FakeReadCause` describes the type of pattern why a FakeRead statement exists.
-#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, HashStable, PartialEq)]
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, Hash, HashStable, PartialEq)]
pub enum FakeReadCause {
/// Inject a fake read of the borrowed input at the end of each guards
/// code.
@@ -1572,7 +1623,7 @@
ForIndex,
}
-#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable)]
pub struct LlvmInlineAsm<'tcx> {
pub asm: hir::LlvmInlineAsmInner,
pub outputs: Box<[Place<'tcx>]>,
@@ -1614,17 +1665,32 @@
write!(fmt, "Coverage::{:?}", coverage.kind)
}
}
+ CopyNonOverlapping(box crate::mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ write!(fmt, "copy_nonoverlapping(src={:?}, dst={:?}, count={:?})", src, dst, count)
+ }
Nop => write!(fmt, "nop"),
}
}
}
-#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable)]
pub struct Coverage {
pub kind: CoverageKind,
pub code_region: Option<CodeRegion>,
}
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable)]
+pub struct CopyNonOverlapping<'tcx> {
+ pub src: Operand<'tcx>,
+ pub dst: Operand<'tcx>,
+ /// Number of elements to copy from src to dest, not bytes.
+ pub count: Operand<'tcx>,
+}
+
///////////////////////////////////////////////////////////////////////////
// Places
@@ -1638,6 +1704,9 @@
pub projection: &'tcx List<PlaceElem<'tcx>>,
}
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(Place<'_>, 16);
+
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[derive(TyEncodable, TyDecodable, HashStable)]
pub enum ProjectionElem<V, T> {
@@ -1707,7 +1776,7 @@
pub type PlaceElem<'tcx> = ProjectionElem<Local, Ty<'tcx>>;
// At least on 64 bit systems, `PlaceElem` should not be larger than two pointers.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(PlaceElem<'_>, 24);
/// Alias for projections as they appear in `UserTypeProjection`, where we
@@ -1755,6 +1824,7 @@
self.as_ref().as_local()
}
+ #[inline]
pub fn as_ref(&self) -> PlaceRef<'tcx> {
PlaceRef { local: self.local, projection: &self.projection }
}
@@ -1766,6 +1836,7 @@
/// - (a.b, .c)
///
/// Given a place without projections, the iterator is empty.
+ #[inline]
pub fn iter_projections(
self,
) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + DoubleEndedIterator {
@@ -1915,7 +1986,7 @@
/// These are values that can appear inside an rvalue. They are intentionally
/// limited to prevent rvalues from being nested in one another.
-#[derive(Clone, PartialEq, TyEncodable, TyDecodable, HashStable)]
+#[derive(Clone, PartialEq, PartialOrd, TyEncodable, TyDecodable, Hash, HashStable)]
pub enum Operand<'tcx> {
/// Copy: The value must be available for use afterwards.
///
@@ -1934,6 +2005,9 @@
Constant(Box<Constant<'tcx>>),
}
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(Operand<'_>, 24);
+
impl<'tcx> Debug for Operand<'tcx> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
use self::Operand::*;
@@ -1959,7 +2033,7 @@
Operand::Constant(box Constant {
span,
user_ty: None,
- literal: ty::Const::zero_sized(tcx, ty),
+ literal: ConstantKind::Ty(ty::Const::zero_sized(tcx, ty)),
})
}
@@ -1990,7 +2064,7 @@
Operand::Constant(box Constant {
span,
user_ty: None,
- literal: ty::Const::from_scalar(tcx, val, ty),
+ literal: ConstantKind::Val(val.into(), ty),
})
}
@@ -2023,7 +2097,7 @@
///////////////////////////////////////////////////////////////////////////
/// Rvalues
-#[derive(Clone, TyEncodable, TyDecodable, HashStable, PartialEq)]
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
pub enum Rvalue<'tcx> {
/// x (either a move or copy, depending on type of x)
Use(Operand<'tcx>),
@@ -2049,8 +2123,8 @@
Cast(CastKind, Operand<'tcx>, Ty<'tcx>),
- BinaryOp(BinOp, Operand<'tcx>, Operand<'tcx>),
- CheckedBinaryOp(BinOp, Operand<'tcx>, Operand<'tcx>),
+ BinaryOp(BinOp, Box<(Operand<'tcx>, Operand<'tcx>)>),
+ CheckedBinaryOp(BinOp, Box<(Operand<'tcx>, Operand<'tcx>)>),
NullaryOp(NullOp, Ty<'tcx>),
UnaryOp(UnOp, Operand<'tcx>),
@@ -2069,13 +2143,16 @@
Aggregate(Box<AggregateKind<'tcx>>, Vec<Operand<'tcx>>),
}
-#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(Rvalue<'_>, 40);
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
pub enum CastKind {
Misc,
Pointer(PointerCast),
}
-#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
pub enum AggregateKind<'tcx> {
/// The type is of the element
Array(Ty<'tcx>),
@@ -2092,7 +2169,10 @@
Generator(DefId, SubstsRef<'tcx>, hir::Movability),
}
-#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(AggregateKind<'_>, 48);
+
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
pub enum BinOp {
/// The `+` operator (addition)
Add,
@@ -2137,7 +2217,7 @@
}
}
-#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
pub enum NullOp {
/// Returns the size of a value of that type
SizeOf,
@@ -2145,7 +2225,7 @@
Box,
}
-#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
pub enum UnOp {
/// The `!` operator for logical inversion
Not,
@@ -2168,8 +2248,8 @@
Cast(ref kind, ref place, ref ty) => {
write!(fmt, "{:?} as {:?} ({:?})", place, ty, kind)
}
- BinaryOp(ref op, ref a, ref b) => write!(fmt, "{:?}({:?}, {:?})", op, a, b),
- CheckedBinaryOp(ref op, ref a, ref b) => {
+ BinaryOp(ref op, box (ref a, ref b)) => write!(fmt, "{:?}({:?}, {:?})", op, a, b),
+ CheckedBinaryOp(ref op, box (ref a, ref b)) => {
write!(fmt, "Checked{:?}({:?}, {:?})", op, a, b)
}
UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a),
@@ -2315,7 +2395,7 @@
/// this does not necessarily mean that they are `==` in Rust. In
/// particular, one must be wary of `NaN`!
-#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, HashStable)]
+#[derive(Clone, Copy, PartialEq, PartialOrd, TyEncodable, TyDecodable, Hash, HashStable)]
pub struct Constant<'tcx> {
pub span: Span,
@@ -2326,12 +2406,21 @@
/// Needed for NLL to impose user-given type constraints.
pub user_ty: Option<UserTypeAnnotationIndex>,
- pub literal: &'tcx ty::Const<'tcx>,
+ pub literal: ConstantKind<'tcx>,
+}
+
+#[derive(Clone, Copy, PartialEq, PartialOrd, TyEncodable, TyDecodable, Hash, HashStable, Debug)]
+pub enum ConstantKind<'tcx> {
+ /// This constant came from the type system
+ Ty(&'tcx ty::Const<'tcx>),
+ /// This constant cannot go back into the type system, as it represents
+ /// something the type system cannot handle (e.g. pointers).
+ Val(interpret::ConstValue<'tcx>, Ty<'tcx>),
}
impl Constant<'tcx> {
pub fn check_static_ptr(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
- match self.literal.val.try_to_scalar() {
+ match self.literal.const_for_ty()?.val.try_to_scalar() {
Some(Scalar::Ptr(ptr)) => match tcx.global_alloc(ptr.alloc_id) {
GlobalAlloc::Static(def_id) => {
assert!(!tcx.is_thread_local_static(def_id));
@@ -2342,6 +2431,94 @@
_ => None,
}
}
+ pub fn ty(&self) -> Ty<'tcx> {
+ self.literal.ty()
+ }
+}
+
+impl From<&'tcx ty::Const<'tcx>> for ConstantKind<'tcx> {
+ fn from(ct: &'tcx ty::Const<'tcx>) -> Self {
+ Self::Ty(ct)
+ }
+}
+
+impl ConstantKind<'tcx> {
+ /// Returns `None` if the constant is not trivially safe for use in the type system.
+ pub fn const_for_ty(&self) -> Option<&'tcx ty::Const<'tcx>> {
+ match self {
+ ConstantKind::Ty(c) => Some(c),
+ ConstantKind::Val(..) => None,
+ }
+ }
+
+ pub fn ty(&self) -> Ty<'tcx> {
+ match self {
+ ConstantKind::Ty(c) => c.ty,
+ ConstantKind::Val(_, ty) => ty,
+ }
+ }
+
+ #[inline]
+ pub fn try_to_value(self) -> Option<interpret::ConstValue<'tcx>> {
+ match self {
+ ConstantKind::Ty(c) => c.val.try_to_value(),
+ ConstantKind::Val(val, _) => Some(val),
+ }
+ }
+
+ #[inline]
+ pub fn try_to_scalar(self) -> Option<Scalar> {
+ self.try_to_value()?.try_to_scalar()
+ }
+
+ #[inline]
+ pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
+ Some(self.try_to_value()?.try_to_scalar()?.assert_int())
+ }
+
+ #[inline]
+ pub fn try_to_bits(self, size: Size) -> Option<u128> {
+ self.try_to_scalar_int()?.to_bits(size).ok()
+ }
+
+ #[inline]
+ pub fn try_to_bool(self) -> Option<bool> {
+ self.try_to_scalar_int()?.try_into().ok()
+ }
+
+ #[inline]
+ pub fn try_eval_bits(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Option<u128> {
+ match self {
+ Self::Ty(ct) => ct.try_eval_bits(tcx, param_env, ty),
+ Self::Val(val, t) => {
+ assert_eq!(*t, ty);
+ let size =
+ tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
+ val.try_to_bits(size)
+ }
+ }
+ }
+
+ #[inline]
+ pub fn try_eval_bool(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Option<bool> {
+ match self {
+ Self::Ty(ct) => ct.try_eval_bool(tcx, param_env),
+ Self::Val(val, _) => val.try_to_bool(),
+ }
+ }
+
+ #[inline]
+ pub fn try_eval_usize(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Option<u64> {
+ match self {
+ Self::Ty(ct) => ct.try_eval_usize(tcx, param_env),
+ Self::Val(val, _) => val.try_to_machine_usize(tcx),
+ }
+ }
}
/// A collection of projections into user types.
@@ -2449,7 +2626,7 @@
/// * `let (x, _): T = ...` -- here, the `projs` vector would contain
/// `field[0]` (aka `.0`), indicating that the type of `s` is
/// determined by finding the type of the `.0` field from `T`.
-#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, PartialEq)]
+#[derive(Clone, Debug, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
pub struct UserTypeProjection {
pub base: UserTypeAnnotationIndex,
pub projs: Vec<ProjectionKind>,
@@ -2527,11 +2704,14 @@
impl<'tcx> Display for Constant<'tcx> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- match self.literal.ty.kind() {
+ match self.ty().kind() {
ty::FnDef(..) => {}
_ => write!(fmt, "const ")?,
}
- pretty_print_const(self.literal, fmt, true)
+ match self.literal {
+ ConstantKind::Ty(c) => pretty_print_const(c, fmt, true),
+ ConstantKind::Val(val, ty) => pretty_print_const_value(val, ty, fmt, true),
+ }
}
}
@@ -2550,6 +2730,23 @@
})
}
+fn pretty_print_const_value(
+ val: interpret::ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+ fmt: &mut Formatter<'_>,
+ print_types: bool,
+) -> fmt::Result {
+ use crate::ty::print::PrettyPrinter;
+ ty::tls::with(|tcx| {
+ let val = tcx.lift(val).unwrap();
+ let ty = tcx.lift(ty).unwrap();
+ let mut cx = FmtPrinter::new(tcx, fmt, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ cx.pretty_print_const_value(val, ty, print_types)?;
+ Ok(())
+ })
+}
+
impl<'tcx> graph::DirectedGraph for Body<'tcx> {
type Node = BasicBlock;
}
diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs
index eb13c89..6c2468b 100644
--- a/compiler/rustc_middle/src/mir/mono.rs
+++ b/compiler/rustc_middle/src/mir/mono.rs
@@ -7,7 +7,7 @@
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
-use rustc_hir::HirId;
+use rustc_hir::{HirId, ItemId};
use rustc_session::config::OptLevel;
use rustc_span::source_map::Span;
use rustc_span::symbol::Symbol;
@@ -43,7 +43,7 @@
pub enum MonoItem<'tcx> {
Fn(Instance<'tcx>),
Static(DefId),
- GlobalAsm(HirId),
+ GlobalAsm(ItemId),
}
impl<'tcx> MonoItem<'tcx> {
@@ -71,9 +71,8 @@
match *self {
MonoItem::Fn(instance) => tcx.symbol_name(instance),
MonoItem::Static(def_id) => tcx.symbol_name(Instance::mono(tcx, def_id)),
- MonoItem::GlobalAsm(hir_id) => {
- let def_id = tcx.hir().local_def_id(hir_id);
- SymbolName::new(tcx, &format!("global_asm_{:?}", def_id))
+ MonoItem::GlobalAsm(item_id) => {
+ SymbolName::new(tcx, &format!("global_asm_{:?}", item_id.def_id))
}
}
}
@@ -178,7 +177,7 @@
MonoItem::Static(def_id) => {
def_id.as_local().map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id))
}
- MonoItem::GlobalAsm(hir_id) => Some(hir_id),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.hir_id()),
}
.map(|hir_id| tcx.hir().span(hir_id))
}
@@ -195,9 +194,9 @@
MonoItem::Static(def_id) => {
def_id.hash_stable(hcx, hasher);
}
- MonoItem::GlobalAsm(node_id) => {
+ MonoItem::GlobalAsm(item_id) => {
hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
- node_id.hash_stable(hcx, hasher);
+ item_id.hash_stable(hcx, hasher);
})
}
}
@@ -351,7 +350,7 @@
MonoItem::Static(def_id) => {
def_id.as_local().map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id))
}
- MonoItem::GlobalAsm(hir_id) => Some(hir_id),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.hir_id()),
},
item.symbol_name(tcx),
)
diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs
index c293fbe..bde4801 100644
--- a/compiler/rustc_middle/src/mir/query.rs
+++ b/compiler/rustc_middle/src/mir/query.rs
@@ -28,11 +28,9 @@
BorrowPacked,
/// Unsafe operation in an `unsafe fn` but outside an `unsafe` block.
/// Has to be handled as a lint for backwards compatibility.
- /// Should stay gated under `#![feature(unsafe_block_in_unsafe_fn)]`.
UnsafeFn,
/// Borrow of packed field in an `unsafe fn` but outside an `unsafe` block.
/// Has to be handled as a lint for backwards compatibility.
- /// Should stay gated under `#![feature(unsafe_block_in_unsafe_fn)]`.
UnsafeFnBorrowPacked,
}
@@ -439,18 +437,6 @@
}
#[inline]
- pub fn optimized_mir_or_const_arg_mir(
- self,
- def: ty::WithOptConstParam<DefId>,
- ) -> &'tcx Body<'tcx> {
- if let Some((did, param_did)) = def.as_const_arg() {
- self.mir_for_ctfe_of_const_arg((did, param_did))
- } else {
- self.optimized_mir(def.did)
- }
- }
-
- #[inline]
pub fn mir_for_ctfe_opt_const_arg(self, def: ty::WithOptConstParam<DefId>) -> &'tcx Body<'tcx> {
if let Some((did, param_did)) = def.as_const_arg() {
self.mir_for_ctfe_of_const_arg((did, param_did))
diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs
index 1b2c107..6e81914 100644
--- a/compiler/rustc_middle/src/mir/tcx.rs
+++ b/compiler/rustc_middle/src/mir/tcx.rs
@@ -17,10 +17,11 @@
}
// At least on 64 bit systems, `PlaceTy` should not be larger than two or three pointers.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(PlaceTy<'_>, 16);
impl<'tcx> PlaceTy<'tcx> {
+ #[inline]
pub fn from_ty(ty: Ty<'tcx>) -> PlaceTy<'tcx> {
PlaceTy { ty, variant_index: None }
}
@@ -181,12 +182,12 @@
}
Rvalue::Len(..) => tcx.types.usize,
Rvalue::Cast(.., ty) => ty,
- Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
+ Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
let lhs_ty = lhs.ty(local_decls, tcx);
let rhs_ty = rhs.ty(local_decls, tcx);
op.ty(tcx, lhs_ty, rhs_ty)
}
- Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
+ Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
let lhs_ty = lhs.ty(local_decls, tcx);
let rhs_ty = rhs.ty(local_decls, tcx);
let ty = op.ty(tcx, lhs_ty, rhs_ty);
@@ -226,7 +227,7 @@
{
match self {
&Operand::Copy(ref l) | &Operand::Move(ref l) => l.ty(local_decls, tcx).ty,
- &Operand::Constant(ref c) => c.literal.ty,
+ &Operand::Constant(ref c) => c.literal.ty(),
}
}
}
diff --git a/compiler/rustc_middle/src/mir/terminator.rs b/compiler/rustc_middle/src/mir/terminator.rs
index 709ffc3..887dbef 100644
--- a/compiler/rustc_middle/src/mir/terminator.rs
+++ b/compiler/rustc_middle/src/mir/terminator.rs
@@ -17,7 +17,7 @@
pub use super::query::*;
-#[derive(Debug, Clone, TyEncodable, TyDecodable, HashStable, PartialEq)]
+#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)]
pub struct SwitchTargets {
/// Possible values. The locations to branch to in each case
/// are found in the corresponding indices from the `targets` vector.
@@ -98,7 +98,7 @@
impl<'a> ExactSizeIterator for SwitchTargetsIter<'a> {}
-#[derive(Clone, TyEncodable, TyDecodable, HashStable, PartialEq)]
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)]
pub enum TerminatorKind<'tcx> {
/// Block should have one successor in the graph; we jump there.
Goto { target: BasicBlock },
@@ -407,6 +407,22 @@
| TerminatorKind::FalseUnwind { ref mut unwind, .. } => Some(unwind),
}
}
+
+ pub fn as_switch(&self) -> Option<(&Operand<'tcx>, Ty<'tcx>, &SwitchTargets)> {
+ match self {
+ TerminatorKind::SwitchInt { discr, switch_ty, targets } => {
+ Some((discr, switch_ty, targets))
+ }
+ _ => None,
+ }
+ }
+
+ pub fn as_goto(&self) -> Option<BasicBlock> {
+ match self {
+ TerminatorKind::Goto { target } => Some(*target),
+ _ => None,
+ }
+ }
}
impl<'tcx> Debug for TerminatorKind<'tcx> {
diff --git a/compiler/rustc_middle/src/mir/type_foldable.rs b/compiler/rustc_middle/src/mir/type_foldable.rs
index da8e189..cb59927 100644
--- a/compiler/rustc_middle/src/mir/type_foldable.rs
+++ b/compiler/rustc_middle/src/mir/type_foldable.rs
@@ -181,9 +181,11 @@
AddressOf(mutability, place) => AddressOf(mutability, place.fold_with(folder)),
Len(place) => Len(place.fold_with(folder)),
Cast(kind, op, ty) => Cast(kind, op.fold_with(folder), ty.fold_with(folder)),
- BinaryOp(op, rhs, lhs) => BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)),
- CheckedBinaryOp(op, rhs, lhs) => {
- CheckedBinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder))
+ BinaryOp(op, box (rhs, lhs)) => {
+ BinaryOp(op, box (rhs.fold_with(folder), lhs.fold_with(folder)))
+ }
+ CheckedBinaryOp(op, box (rhs, lhs)) => {
+ CheckedBinaryOp(op, box (rhs.fold_with(folder), lhs.fold_with(folder)))
}
UnaryOp(op, val) => UnaryOp(op, val.fold_with(folder)),
Discriminant(place) => Discriminant(place.fold_with(folder)),
@@ -227,7 +229,7 @@
op.visit_with(visitor)?;
ty.visit_with(visitor)
}
- BinaryOp(_, ref rhs, ref lhs) | CheckedBinaryOp(_, ref rhs, ref lhs) => {
+ BinaryOp(_, box (ref rhs, ref lhs)) | CheckedBinaryOp(_, box (ref rhs, ref lhs)) => {
rhs.visit_with(visitor)?;
lhs.visit_with(visitor)
}
@@ -340,6 +342,23 @@
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.literal.visit_with(visitor)
+ self.literal.visit_with(visitor)?;
+ self.user_ty.visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ConstantKind<'tcx> {
+ fn super_fold_with<F: TypeFolder<'tcx>>(self, folder: &mut F) -> Self {
+ match self {
+ ConstantKind::Ty(c) => ConstantKind::Ty(c.fold_with(folder)),
+ ConstantKind::Val(v, t) => ConstantKind::Val(v, t.fold_with(folder)),
+ }
+ }
+
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ match *self {
+ ConstantKind::Ty(c) => c.visit_with(visitor),
+ ConstantKind::Val(_, t) => t.visit_with(visitor),
+ }
}
}
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
index 023555d..be248cc 100644
--- a/compiler/rustc_middle/src/mir/visit.rs
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -241,11 +241,13 @@
body: &$($mutability)? Body<'tcx>,
) {
let span = body.span;
- if let Some(yield_ty) = &$($mutability)? body.yield_ty {
- self.visit_ty(
- yield_ty,
- TyContext::YieldTy(SourceInfo::outermost(span))
- );
+ if let Some(gen) = &$($mutability)? body.generator {
+ if let Some(yield_ty) = &$($mutability)? gen.yield_ty {
+ self.visit_ty(
+ yield_ty,
+ TyContext::YieldTy(SourceInfo::outermost(span))
+ );
+ }
}
// for best performance, we want to use an iterator rather
@@ -434,6 +436,15 @@
location
)
}
+ StatementKind::CopyNonOverlapping(box crate::mir::CopyNonOverlapping{
+ ref $($mutability)? src,
+ ref $($mutability)? dst,
+ ref $($mutability)? count,
+ }) => {
+ self.visit_operand(src, location);
+ self.visit_operand(dst, location);
+ self.visit_operand(count, location)
+ }
StatementKind::Nop => {}
}
}
@@ -685,8 +696,8 @@
self.visit_ty(ty, TyContext::Location(location));
}
- Rvalue::BinaryOp(_bin_op, lhs, rhs)
- | Rvalue::CheckedBinaryOp(_bin_op, lhs, rhs) => {
+ Rvalue::BinaryOp(_bin_op, box(lhs, rhs))
+ | Rvalue::CheckedBinaryOp(_bin_op, box(lhs, rhs)) => {
self.visit_operand(lhs, location);
self.visit_operand(rhs, location);
}
@@ -860,7 +871,10 @@
self.visit_span(span);
drop(user_ty); // no visit method for this
- self.visit_const(literal, location);
+ match literal {
+ ConstantKind::Ty(ct) => self.visit_const(ct, location),
+ ConstantKind::Val(_, t) => self.visit_ty(t, TyContext::Location(location)),
+ }
}
fn super_span(&mut self, _span: & $($mutability)? Span) {
@@ -998,12 +1012,11 @@
() => {
fn visit_projection(
&mut self,
- local: Local,
- projection: &[PlaceElem<'tcx>],
+ place_ref: PlaceRef<'tcx>,
context: PlaceContext,
location: Location,
) {
- self.super_projection(local, projection, context, location);
+ self.super_projection(place_ref, context, location);
}
fn visit_projection_elem(
@@ -1033,20 +1046,20 @@
self.visit_local(&place.local, context, location);
- self.visit_projection(place.local, &place.projection, context, location);
+ self.visit_projection(place.as_ref(), context, location);
}
fn super_projection(
&mut self,
- local: Local,
- projection: &[PlaceElem<'tcx>],
+ place_ref: PlaceRef<'tcx>,
context: PlaceContext,
location: Location,
) {
- let mut cursor = projection;
+ // FIXME: Use PlaceRef::iter_projections, once that exists.
+ let mut cursor = place_ref.projection;
while let &[ref proj_base @ .., elem] = cursor {
cursor = proj_base;
- self.visit_projection_elem(local, cursor, elem, context, location);
+ self.visit_projection_elem(place_ref.local, cursor, elem, context, location);
}
}
@@ -1202,6 +1215,7 @@
impl PlaceContext {
/// Returns `true` if this place context represents a drop.
+ #[inline]
pub fn is_drop(&self) -> bool {
matches!(self, PlaceContext::MutatingUse(MutatingUseContext::Drop))
}
@@ -1219,6 +1233,7 @@
}
/// Returns `true` if this place context represents a storage live or storage dead marker.
+ #[inline]
pub fn is_storage_marker(&self) -> bool {
matches!(
self,
@@ -1227,16 +1242,19 @@
}
/// Returns `true` if this place context represents a use that potentially changes the value.
+ #[inline]
pub fn is_mutating_use(&self) -> bool {
matches!(self, PlaceContext::MutatingUse(..))
}
/// Returns `true` if this place context represents a use that does not change the value.
+ #[inline]
pub fn is_nonmutating_use(&self) -> bool {
matches!(self, PlaceContext::NonMutatingUse(..))
}
/// Returns `true` if this place context represents a use.
+ #[inline]
pub fn is_use(&self) -> bool {
!matches!(self, PlaceContext::NonUse(..))
}
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
index ca528b2..ae367db 100644
--- a/compiler/rustc_middle/src/query/mod.rs
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -1,28 +1,3 @@
-use crate::dep_graph::SerializedDepNodeIndex;
-use crate::mir::interpret::{GlobalId, LitToConstInput};
-use crate::traits;
-use crate::traits::query::{
- CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal,
- CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal,
- CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal,
-};
-use crate::ty::query::queries;
-use crate::ty::subst::{GenericArg, SubstsRef};
-use crate::ty::{self, ParamEnvAnd, Ty, TyCtxt};
-use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
-use rustc_query_system::query::QueryDescription;
-
-use rustc_span::symbol::Symbol;
-use std::borrow::Cow;
-
-fn describe_as_module(def_id: LocalDefId, tcx: TyCtxt<'_>) -> String {
- if def_id.is_top_level_module() {
- "top-level module".to_string()
- } else {
- format!("module `{}`", tcx.def_path_str(def_id.to_def_id()))
- }
-}
-
// Each of these queries corresponds to a function pointer field in the
// `Providers` struct for requesting a value of that type, and a method
// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
@@ -86,6 +61,15 @@
desc { |tcx| "HIR owner items in `{}`", tcx.def_path_str(key.to_def_id()) }
}
+ /// Gives access to the HIR attributes inside the HIR owner `key`.
+ ///
+ /// This can be conveniently accessed by methods on `tcx.hir()`.
+ /// Avoid calling this query directly.
+ query hir_attrs(key: LocalDefId) -> rustc_middle::hir::AttributeMap<'tcx> {
+ eval_always
+ desc { |tcx| "HIR owner attributes in `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
/// Computes the `DefId` of the corresponding const parameter in case the `key` is a
/// const argument and returns `None` otherwise.
///
@@ -126,11 +110,6 @@
desc { |tcx| "computing generics of `{}`", tcx.def_path_str(key) }
storage(ArenaCacheSelector<'tcx>)
cache_on_disk_if { key.is_local() }
- load_cached(tcx, id) {
- let generics: Option<ty::Generics> = tcx.queries.on_disk_cache.as_ref()
- .and_then(|c| c.try_load_query_result(tcx, id));
- generics
- }
}
/// Maps from the `DefId` of an item (trait/struct/enum/fn) to the
@@ -443,12 +422,23 @@
/// full predicates are available (note that supertraits have
/// additional acyclicity requirements).
query super_predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
- desc { |tcx| "computing the supertraits of `{}`", tcx.def_path_str(key) }
+ desc { |tcx| "computing the super predicates of `{}`", tcx.def_path_str(key) }
+ }
+
+ /// The `Option<Ident>` is the name of an associated type. If it is `None`, then this query
+ /// returns the full set of predicates. If `Some<Ident>`, then the query returns only the
+ /// subset of super-predicates that reference traits that define the given associated type.
+ /// This is used to avoid cycles in resolving types like `T::Item`.
+ query super_predicates_that_define_assoc_type(key: (DefId, Option<rustc_span::symbol::Ident>)) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing the super traits of `{}`{}",
+ tcx.def_path_str(key.0),
+ if let Some(assoc_name) = key.1 { format!(" with associated type name `{}`", assoc_name) } else { "".to_string() },
+ }
}
/// To avoid cycles within the predicates of a single item we compute
/// per-type-parameter predicates for resolving `T::AssocTy`.
- query type_param_predicates(key: (DefId, LocalDefId)) -> ty::GenericPredicates<'tcx> {
+ query type_param_predicates(key: (DefId, LocalDefId, rustc_span::symbol::Ident)) -> ty::GenericPredicates<'tcx> {
desc { |tcx| "computing the bounds for type parameter `{}`", {
let id = tcx.hir().local_def_id_to_hir_id(key.1);
tcx.hir().ty_param_name(id)
@@ -692,8 +682,8 @@
cache_on_disk_if { true }
load_cached(tcx, id) {
let typeck_results: Option<ty::TypeckResults<'tcx>> = tcx
- .queries.on_disk_cache.as_ref()
- .and_then(|c| c.try_load_query_result(tcx, id));
+ .on_disk_cache.as_ref()
+ .and_then(|c| c.try_load_query_result(*tcx, id));
typeck_results.map(|x| &*tcx.arena.alloc(x))
}
@@ -795,6 +785,14 @@
cache_on_disk_if { true }
}
+ /// Convert an evaluated constant to a type level constant or
+ /// return `None` if that is not possible.
+ query const_to_valtree(
+ key: ty::ParamEnvAnd<'tcx, ConstAlloc<'tcx>>
+ ) -> Option<ty::ValTree<'tcx>> {
+ desc { "destructure constant" }
+ }
+
/// Destructure a constant ADT or array into its variant index and its
/// field values.
query destructure_const(
@@ -946,7 +944,7 @@
/// Passing in any other crate will cause an ICE.
///
/// [`LOCAL_CRATE`]: rustc_hir::def_id::LOCAL_CRATE
- query all_local_trait_impls(local_crate: CrateNum) -> &'tcx BTreeMap<DefId, Vec<hir::HirId>> {
+ query all_local_trait_impls(local_crate: CrateNum) -> &'tcx BTreeMap<DefId, Vec<LocalDefId>> {
desc { "local trait impls" }
}
@@ -975,7 +973,7 @@
desc { |tcx| "computing normalized predicates of `{}`", tcx.def_path_str(def_id) }
}
- /// Like `param_env`, but returns the `ParamEnv in `Reveal::All` mode.
+ /// Like `param_env`, but returns the `ParamEnv` in `Reveal::All` mode.
/// Prefer this over `tcx.param_env(def_id).with_reveal_all_normalized(tcx)`,
/// as this method is more efficient.
query param_env_reveal_all_normalized(def_id: DefId) -> ty::ParamEnv<'tcx> {
@@ -1288,6 +1286,8 @@
desc { |tcx| "collecting child items of `{}`", tcx.def_path_str(def_id) }
}
query extern_mod_stmt_cnum(def_id: LocalDefId) -> Option<CrateNum> {
+ // This depends on untracked global state (`tcx.extern_crate_map`)
+ eval_always
desc { |tcx| "computing crate imported by `{}`", tcx.def_path_str(def_id.to_def_id()) }
}
@@ -1407,6 +1407,14 @@
query is_codegened_item(def_id: DefId) -> bool {
desc { |tcx| "determining whether `{}` needs codegen", tcx.def_path_str(def_id) }
}
+
+ /// All items participating in code generation together with items inlined into them.
+ query codegened_and_inlined_items(_: CrateNum)
+ -> &'tcx DefIdSet {
+ eval_always
+ desc { "codegened_and_inlined_items" }
+ }
+
query codegen_unit(_: Symbol) -> &'tcx CodegenUnit<'tcx> {
desc { "codegen_unit" }
}
@@ -1622,4 +1630,14 @@
query normalize_opaque_types(key: &'tcx ty::List<ty::Predicate<'tcx>>) -> &'tcx ty::List<ty::Predicate<'tcx>> {
desc { "normalizing opaque types in {:?}", key }
}
+
+ /// Checks whether a type is definitely uninhabited. This is
+ /// conservative: for some types that are uninhabited we return `false`,
+ /// but we only return `true` for types that are definitely uninhabited.
+ /// `ty.conservative_is_privately_uninhabited` implies that any value of type `ty`
+ /// will be `Abi::Uninhabited`. (Note that uninhabited types may have nonzero
+ /// size, to account for partial initialisation. See #49298 for details.)
+ query conservative_is_privately_uninhabited(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "conservatively checking if {:?} is privately uninhabited", key }
+ }
}
diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs
index 163b400..0bd0a70 100644
--- a/compiler/rustc_middle/src/traits/mod.rs
+++ b/compiler/rustc_middle/src/traits/mod.rs
@@ -228,7 +228,10 @@
/// Inline asm operand type must be `Sized`.
InlineAsmSized,
/// `[T, ..n]` implies that `T` must be `Copy`.
- RepeatVec,
+ /// If the function in the array repeat expression is a `const fn`,
+ /// display a help message suggesting to move the function call to a
+ /// new `const` item while saying that `T` doesn't implement `Copy`.
+ RepeatVec(bool),
/// Types of fields (other than the last, except for packed structs) in a struct must be sized.
FieldSized {
@@ -337,7 +340,7 @@
}
// `ObligationCauseCode` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ObligationCauseCode<'_>, 32);
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
@@ -476,6 +479,9 @@
/// ImplSource for a builtin `DeterminantKind` trait implementation.
DiscriminantKind(ImplSourceDiscriminantKindData),
+ /// ImplSource for a builtin `Pointee` trait implementation.
+ Pointee(ImplSourcePointeeData),
+
/// ImplSource automatically generated for a generator.
Generator(ImplSourceGeneratorData<'tcx, N>),
@@ -494,7 +500,8 @@
ImplSource::Generator(c) => c.nested,
ImplSource::Object(d) => d.nested,
ImplSource::FnPointer(d) => d.nested,
- ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData) => Vec::new(),
+ ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
+ | ImplSource::Pointee(ImplSourcePointeeData) => Vec::new(),
ImplSource::TraitAlias(d) => d.nested,
}
}
@@ -509,7 +516,8 @@
ImplSource::Generator(c) => &c.nested[..],
ImplSource::Object(d) => &d.nested[..],
ImplSource::FnPointer(d) => &d.nested[..],
- ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData) => &[],
+ ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
+ | ImplSource::Pointee(ImplSourcePointeeData) => &[],
ImplSource::TraitAlias(d) => &d.nested[..],
}
}
@@ -554,6 +562,9 @@
ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData) => {
ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
}
+ ImplSource::Pointee(ImplSourcePointeeData) => {
+ ImplSource::Pointee(ImplSourcePointeeData)
+ }
ImplSource::TraitAlias(d) => ImplSource::TraitAlias(ImplSourceTraitAliasData {
alias_def_id: d.alias_def_id,
substs: d.substs,
@@ -632,6 +643,9 @@
#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
pub struct ImplSourceDiscriminantKindData;
+#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub struct ImplSourcePointeeData;
+
#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)]
pub struct ImplSourceTraitAliasData<'tcx, N> {
pub alias_def_id: DefId,
diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs
index e056240..ab08517 100644
--- a/compiler/rustc_middle/src/traits/select.rs
+++ b/compiler/rustc_middle/src/traits/select.rs
@@ -125,6 +125,9 @@
/// Builtin implementation of `DiscriminantKind`.
DiscriminantKindCandidate,
+ /// Builtin implementation of `Pointee`.
+ PointeeCandidate,
+
TraitAliasCandidate(DefId),
/// Matching `dyn Trait` with a supertrait of `Trait`. The index is the
diff --git a/compiler/rustc_middle/src/traits/structural_impls.rs b/compiler/rustc_middle/src/traits/structural_impls.rs
index 5a17d38..4f978e6 100644
--- a/compiler/rustc_middle/src/traits/structural_impls.rs
+++ b/compiler/rustc_middle/src/traits/structural_impls.rs
@@ -19,6 +19,8 @@
super::ImplSource::DiscriminantKind(ref d) => write!(f, "{:?}", d),
+ super::ImplSource::Pointee(ref d) => write!(f, "{:?}", d),
+
super::ImplSource::Object(ref d) => write!(f, "{:?}", d),
super::ImplSource::Param(ref n, ct) => {
@@ -110,4 +112,5 @@
TrivialTypeFoldableAndLiftImpls! {
super::IfExpressionCause,
super::ImplSourceDiscriminantKindData,
+ super::ImplSourcePointeeData,
}
diff --git a/compiler/rustc_middle/src/ty/adjustment.rs b/compiler/rustc_middle/src/ty/adjustment.rs
index 89d0e13..a50dda6 100644
--- a/compiler/rustc_middle/src/ty/adjustment.rs
+++ b/compiler/rustc_middle/src/ty/adjustment.rs
@@ -6,7 +6,7 @@
use rustc_macros::HashStable;
use rustc_span::Span;
-#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
pub enum PointerCast {
/// Go from a fn-item type to a fn-pointer type.
ReifyFnPointer,
diff --git a/compiler/rustc_middle/src/ty/adt.rs b/compiler/rustc_middle/src/ty/adt.rs
new file mode 100644
index 0000000..95159ea
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/adt.rs
@@ -0,0 +1,482 @@
+use crate::ich::StableHashingContext;
+use crate::mir::interpret::ErrorHandled;
+use crate::ty;
+use crate::ty::util::{Discr, IntTypeExt};
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_errors::ErrorReported;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_serialize::{self, Encodable, Encoder};
+use rustc_session::DataTypeKind;
+use rustc_span::symbol::sym;
+use rustc_target::abi::VariantIdx;
+
+use std::cell::RefCell;
+use std::cmp::Ordering;
+use std::hash::{Hash, Hasher};
+use std::ops::Range;
+use std::{ptr, str};
+
+use super::{
+ Destructor, FieldDef, GenericPredicates, ReprOptions, Ty, TyCtxt, VariantDef, VariantDiscr,
+};
+
+#[derive(Clone, HashStable, Debug)]
+pub struct AdtSizedConstraint<'tcx>(pub &'tcx [Ty<'tcx>]);
+
+bitflags! {
+ #[derive(HashStable)]
+ pub struct AdtFlags: u32 {
+ const NO_ADT_FLAGS = 0;
+ /// Indicates whether the ADT is an enum.
+ const IS_ENUM = 1 << 0;
+ /// Indicates whether the ADT is a union.
+ const IS_UNION = 1 << 1;
+ /// Indicates whether the ADT is a struct.
+ const IS_STRUCT = 1 << 2;
+ /// Indicates whether the ADT is a struct and has a constructor.
+ const HAS_CTOR = 1 << 3;
+ /// Indicates whether the type is `PhantomData`.
+ const IS_PHANTOM_DATA = 1 << 4;
+ /// Indicates whether the type has a `#[fundamental]` attribute.
+ const IS_FUNDAMENTAL = 1 << 5;
+ /// Indicates whether the type is `Box`.
+ const IS_BOX = 1 << 6;
+ /// Indicates whether the type is `ManuallyDrop`.
+ const IS_MANUALLY_DROP = 1 << 7;
+ /// Indicates whether the variant list of this ADT is `#[non_exhaustive]`.
+ /// (i.e., this flag is never set unless this ADT is an enum).
+ const IS_VARIANT_LIST_NON_EXHAUSTIVE = 1 << 8;
+ }
+}
+
+/// The definition of a user-defined type, e.g., a `struct`, `enum`, or `union`.
+///
+/// These are all interned (by `alloc_adt_def`) into the global arena.
+///
+/// The initialism *ADT* stands for an [*algebraic data type (ADT)*][adt].
+/// This is slightly wrong because `union`s are not ADTs.
+/// Moreover, Rust only allows recursive data types through indirection.
+///
+/// [adt]: https://en.wikipedia.org/wiki/Algebraic_data_type
+pub struct AdtDef {
+ /// The `DefId` of the struct, enum or union item.
+ pub did: DefId,
+ /// Variants of the ADT. If this is a struct or union, then there will be a single variant.
+ pub variants: IndexVec<VariantIdx, VariantDef>,
+ /// Flags of the ADT (e.g., is this a struct? is this non-exhaustive?).
+ flags: AdtFlags,
+ /// Repr options provided by the user.
+ pub repr: ReprOptions,
+}
+
+impl PartialOrd for AdtDef {
+ fn partial_cmp(&self, other: &AdtDef) -> Option<Ordering> {
+ Some(self.cmp(&other))
+ }
+}
+
+/// There should be only one AdtDef for each `did`, therefore
+/// it is fine to implement `Ord` only based on `did`.
+impl Ord for AdtDef {
+ fn cmp(&self, other: &AdtDef) -> Ordering {
+ self.did.cmp(&other.did)
+ }
+}
+
+impl PartialEq for AdtDef {
+ // `AdtDef`s are always interned, and this is part of `TyS` equality.
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ ptr::eq(self, other)
+ }
+}
+
+impl Eq for AdtDef {}
+
+impl Hash for AdtDef {
+ #[inline]
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ (self as *const AdtDef).hash(s)
+ }
+}
+
+impl<S: Encoder> Encodable<S> for AdtDef {
+ fn encode(&self, s: &mut S) -> Result<(), S::Error> {
+ self.did.encode(s)
+ }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for AdtDef {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ thread_local! {
+ static CACHE: RefCell<FxHashMap<usize, Fingerprint>> = Default::default();
+ }
+
+ let hash: Fingerprint = CACHE.with(|cache| {
+ let addr = self as *const AdtDef as usize;
+ *cache.borrow_mut().entry(addr).or_insert_with(|| {
+ let ty::AdtDef { did, ref variants, ref flags, ref repr } = *self;
+
+ let mut hasher = StableHasher::new();
+ did.hash_stable(hcx, &mut hasher);
+ variants.hash_stable(hcx, &mut hasher);
+ flags.hash_stable(hcx, &mut hasher);
+ repr.hash_stable(hcx, &mut hasher);
+
+ hasher.finish()
+ })
+ });
+
+ hash.hash_stable(hcx, hasher);
+ }
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
+pub enum AdtKind {
+ Struct,
+ Union,
+ Enum,
+}
+
+impl Into<DataTypeKind> for AdtKind {
+ fn into(self) -> DataTypeKind {
+ match self {
+ AdtKind::Struct => DataTypeKind::Struct,
+ AdtKind::Union => DataTypeKind::Union,
+ AdtKind::Enum => DataTypeKind::Enum,
+ }
+ }
+}
+
+impl<'tcx> AdtDef {
+ /// Creates a new `AdtDef`.
+ pub(super) fn new(
+ tcx: TyCtxt<'_>,
+ did: DefId,
+ kind: AdtKind,
+ variants: IndexVec<VariantIdx, VariantDef>,
+ repr: ReprOptions,
+ ) -> Self {
+ debug!("AdtDef::new({:?}, {:?}, {:?}, {:?})", did, kind, variants, repr);
+ let mut flags = AdtFlags::NO_ADT_FLAGS;
+
+ if kind == AdtKind::Enum && tcx.has_attr(did, sym::non_exhaustive) {
+ debug!("found non-exhaustive variant list for {:?}", did);
+ flags = flags | AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE;
+ }
+
+ flags |= match kind {
+ AdtKind::Enum => AdtFlags::IS_ENUM,
+ AdtKind::Union => AdtFlags::IS_UNION,
+ AdtKind::Struct => AdtFlags::IS_STRUCT,
+ };
+
+ if kind == AdtKind::Struct && variants[VariantIdx::new(0)].ctor_def_id.is_some() {
+ flags |= AdtFlags::HAS_CTOR;
+ }
+
+ let attrs = tcx.get_attrs(did);
+ if tcx.sess.contains_name(&attrs, sym::fundamental) {
+ flags |= AdtFlags::IS_FUNDAMENTAL;
+ }
+ if Some(did) == tcx.lang_items().phantom_data() {
+ flags |= AdtFlags::IS_PHANTOM_DATA;
+ }
+ if Some(did) == tcx.lang_items().owned_box() {
+ flags |= AdtFlags::IS_BOX;
+ }
+ if Some(did) == tcx.lang_items().manually_drop() {
+ flags |= AdtFlags::IS_MANUALLY_DROP;
+ }
+
+ AdtDef { did, variants, flags, repr }
+ }
+
+ /// Returns `true` if this is a struct.
+ #[inline]
+ pub fn is_struct(&self) -> bool {
+ self.flags.contains(AdtFlags::IS_STRUCT)
+ }
+
+ /// Returns `true` if this is a union.
+ #[inline]
+ pub fn is_union(&self) -> bool {
+ self.flags.contains(AdtFlags::IS_UNION)
+ }
+
+ /// Returns `true` if this is a enum.
+ #[inline]
+ pub fn is_enum(&self) -> bool {
+ self.flags.contains(AdtFlags::IS_ENUM)
+ }
+
+ /// Returns `true` if the variant list of this ADT is `#[non_exhaustive]`.
+ #[inline]
+ pub fn is_variant_list_non_exhaustive(&self) -> bool {
+ self.flags.contains(AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE)
+ }
+
+ /// Returns the kind of the ADT.
+ #[inline]
+ pub fn adt_kind(&self) -> AdtKind {
+ if self.is_enum() {
+ AdtKind::Enum
+ } else if self.is_union() {
+ AdtKind::Union
+ } else {
+ AdtKind::Struct
+ }
+ }
+
+ /// Returns a description of this abstract data type.
+ pub fn descr(&self) -> &'static str {
+ match self.adt_kind() {
+ AdtKind::Struct => "struct",
+ AdtKind::Union => "union",
+ AdtKind::Enum => "enum",
+ }
+ }
+
+ /// Returns a description of a variant of this abstract data type.
+ #[inline]
+ pub fn variant_descr(&self) -> &'static str {
+ match self.adt_kind() {
+ AdtKind::Struct => "struct",
+ AdtKind::Union => "union",
+ AdtKind::Enum => "variant",
+ }
+ }
+
+ /// If this function returns `true`, it implies that `is_struct` must return `true`.
+ #[inline]
+ pub fn has_ctor(&self) -> bool {
+ self.flags.contains(AdtFlags::HAS_CTOR)
+ }
+
+ /// Returns `true` if this type is `#[fundamental]` for the purposes
+ /// of coherence checking.
+ #[inline]
+ pub fn is_fundamental(&self) -> bool {
+ self.flags.contains(AdtFlags::IS_FUNDAMENTAL)
+ }
+
+ /// Returns `true` if this is `PhantomData<T>`.
+ #[inline]
+ pub fn is_phantom_data(&self) -> bool {
+ self.flags.contains(AdtFlags::IS_PHANTOM_DATA)
+ }
+
+ /// Returns `true` if this is Box<T>.
+ #[inline]
+ pub fn is_box(&self) -> bool {
+ self.flags.contains(AdtFlags::IS_BOX)
+ }
+
+ /// Returns `true` if this is `ManuallyDrop<T>`.
+ #[inline]
+ pub fn is_manually_drop(&self) -> bool {
+ self.flags.contains(AdtFlags::IS_MANUALLY_DROP)
+ }
+
+ /// Returns `true` if this type has a destructor.
+ pub fn has_dtor(&self, tcx: TyCtxt<'tcx>) -> bool {
+ self.destructor(tcx).is_some()
+ }
+
+ /// Asserts this is a struct or union and returns its unique variant.
+ pub fn non_enum_variant(&self) -> &VariantDef {
+ assert!(self.is_struct() || self.is_union());
+ &self.variants[VariantIdx::new(0)]
+ }
+
+ #[inline]
+ pub fn predicates(&self, tcx: TyCtxt<'tcx>) -> GenericPredicates<'tcx> {
+ tcx.predicates_of(self.did)
+ }
+
+ /// Returns an iterator over all fields contained
+ /// by this ADT.
+ #[inline]
+ pub fn all_fields(&self) -> impl Iterator<Item = &FieldDef> + Clone {
+ self.variants.iter().flat_map(|v| v.fields.iter())
+ }
+
+ /// Whether the ADT lacks fields. Note that this includes uninhabited enums,
+ /// e.g., `enum Void {}` is considered payload free as well.
+ pub fn is_payloadfree(&self) -> bool {
+ self.variants.iter().all(|v| v.fields.is_empty())
+ }
+
+ /// Return a `VariantDef` given a variant id.
+ pub fn variant_with_id(&self, vid: DefId) -> &VariantDef {
+ self.variants.iter().find(|v| v.def_id == vid).expect("variant_with_id: unknown variant")
+ }
+
+ /// Return a `VariantDef` given a constructor id.
+ pub fn variant_with_ctor_id(&self, cid: DefId) -> &VariantDef {
+ self.variants
+ .iter()
+ .find(|v| v.ctor_def_id == Some(cid))
+ .expect("variant_with_ctor_id: unknown variant")
+ }
+
+ /// Return the index of `VariantDef` given a variant id.
+ pub fn variant_index_with_id(&self, vid: DefId) -> VariantIdx {
+ self.variants
+ .iter_enumerated()
+ .find(|(_, v)| v.def_id == vid)
+ .expect("variant_index_with_id: unknown variant")
+ .0
+ }
+
+ /// Return the index of `VariantDef` given a constructor id.
+ pub fn variant_index_with_ctor_id(&self, cid: DefId) -> VariantIdx {
+ self.variants
+ .iter_enumerated()
+ .find(|(_, v)| v.ctor_def_id == Some(cid))
+ .expect("variant_index_with_ctor_id: unknown variant")
+ .0
+ }
+
+ pub fn variant_of_res(&self, res: Res) -> &VariantDef {
+ match res {
+ Res::Def(DefKind::Variant, vid) => self.variant_with_id(vid),
+ Res::Def(DefKind::Ctor(..), cid) => self.variant_with_ctor_id(cid),
+ Res::Def(DefKind::Struct, _)
+ | Res::Def(DefKind::Union, _)
+ | Res::Def(DefKind::TyAlias, _)
+ | Res::Def(DefKind::AssocTy, _)
+ | Res::SelfTy(..)
+ | Res::SelfCtor(..) => self.non_enum_variant(),
+ _ => bug!("unexpected res {:?} in variant_of_res", res),
+ }
+ }
+
+ #[inline]
+ pub fn eval_explicit_discr(&self, tcx: TyCtxt<'tcx>, expr_did: DefId) -> Option<Discr<'tcx>> {
+ assert!(self.is_enum());
+ let param_env = tcx.param_env(expr_did);
+ let repr_type = self.repr.discr_type();
+ match tcx.const_eval_poly(expr_did) {
+ Ok(val) => {
+ let ty = repr_type.to_ty(tcx);
+ if let Some(b) = val.try_to_bits_for_ty(tcx, param_env, ty) {
+ trace!("discriminants: {} ({:?})", b, repr_type);
+ Some(Discr { val: b, ty })
+ } else {
+ info!("invalid enum discriminant: {:#?}", val);
+ crate::mir::interpret::struct_error(
+ tcx.at(tcx.def_span(expr_did)),
+ "constant evaluation of enum discriminant resulted in non-integer",
+ )
+ .emit();
+ None
+ }
+ }
+ Err(err) => {
+ let msg = match err {
+ ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
+ "enum discriminant evaluation failed"
+ }
+ ErrorHandled::TooGeneric => "enum discriminant depends on generics",
+ };
+ tcx.sess.delay_span_bug(tcx.def_span(expr_did), msg);
+ None
+ }
+ }
+ }
+
+ #[inline]
+ pub fn discriminants(
+ &'tcx self,
+ tcx: TyCtxt<'tcx>,
+ ) -> impl Iterator<Item = (VariantIdx, Discr<'tcx>)> + Captures<'tcx> {
+ assert!(self.is_enum());
+ let repr_type = self.repr.discr_type();
+ let initial = repr_type.initial_discriminant(tcx);
+ let mut prev_discr = None::<Discr<'tcx>>;
+ self.variants.iter_enumerated().map(move |(i, v)| {
+ let mut discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx));
+ if let VariantDiscr::Explicit(expr_did) = v.discr {
+ if let Some(new_discr) = self.eval_explicit_discr(tcx, expr_did) {
+ discr = new_discr;
+ }
+ }
+ prev_discr = Some(discr);
+
+ (i, discr)
+ })
+ }
+
+ #[inline]
+ pub fn variant_range(&self) -> Range<VariantIdx> {
+ VariantIdx::new(0)..VariantIdx::new(self.variants.len())
+ }
+
+ /// Computes the discriminant value used by a specific variant.
+ /// Unlike `discriminants`, this is (amortized) constant-time,
+ /// only doing at most one query for evaluating an explicit
+ /// discriminant (the last one before the requested variant),
+ /// assuming there are no constant-evaluation errors there.
+ #[inline]
+ pub fn discriminant_for_variant(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ variant_index: VariantIdx,
+ ) -> Discr<'tcx> {
+ assert!(self.is_enum());
+ let (val, offset) = self.discriminant_def_for_variant(variant_index);
+ let explicit_value = val
+ .and_then(|expr_did| self.eval_explicit_discr(tcx, expr_did))
+ .unwrap_or_else(|| self.repr.discr_type().initial_discriminant(tcx));
+ explicit_value.checked_add(tcx, offset as u128).0
+ }
+
+ /// Yields a `DefId` for the discriminant and an offset to add to it
+ /// Alternatively, if there is no explicit discriminant, returns the
+ /// inferred discriminant directly.
+ pub fn discriminant_def_for_variant(&self, variant_index: VariantIdx) -> (Option<DefId>, u32) {
+ assert!(!self.variants.is_empty());
+ let mut explicit_index = variant_index.as_u32();
+ let expr_did;
+ loop {
+ match self.variants[VariantIdx::from_u32(explicit_index)].discr {
+ ty::VariantDiscr::Relative(0) => {
+ expr_did = None;
+ break;
+ }
+ ty::VariantDiscr::Relative(distance) => {
+ explicit_index -= distance;
+ }
+ ty::VariantDiscr::Explicit(did) => {
+ expr_did = Some(did);
+ break;
+ }
+ }
+ }
+ (expr_did, variant_index.as_u32() - explicit_index)
+ }
+
+ pub fn destructor(&self, tcx: TyCtxt<'tcx>) -> Option<Destructor> {
+ tcx.adt_destructor(self.did)
+ }
+
+ /// Returns a list of types such that `Self: Sized` if and only
+ /// if that type is `Sized`, or `TyErr` if this type is recursive.
+ ///
+ /// Oddly enough, checking that the sized-constraint is `Sized` is
+ /// actually more expressive than checking all members:
+ /// the `Sized` trait is inductive, so an associated type that references
+ /// `Self` would prevent its containing ADT from being `Sized`.
+ ///
+ /// Due to normalization being eager, this applies even if
+ /// the associated type is behind a pointer (e.g., issue #31299).
+ pub fn sized_constraint(&self, tcx: TyCtxt<'tcx>) -> &'tcx [Ty<'tcx>] {
+ tcx.adt_sized_constraint(self.did).0
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/assoc.rs b/compiler/rustc_middle/src/ty/assoc.rs
new file mode 100644
index 0000000..d3770fa
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/assoc.rs
@@ -0,0 +1,170 @@
+pub use self::AssocItemContainer::*;
+
+use crate::ty;
+use rustc_data_structures::sorted_map::SortedIndexMultiMap;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Namespace};
+use rustc_hir::def_id::DefId;
+use rustc_span::symbol::{Ident, Symbol};
+
+use super::{TyCtxt, Visibility};
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable, Hash)]
+pub enum AssocItemContainer {
+ TraitContainer(DefId),
+ ImplContainer(DefId),
+}
+
+impl AssocItemContainer {
+ /// Asserts that this is the `DefId` of an associated item declared
+ /// in a trait, and returns the trait `DefId`.
+ pub fn assert_trait(&self) -> DefId {
+ match *self {
+ TraitContainer(id) => id,
+ _ => bug!("associated item has wrong container type: {:?}", self),
+ }
+ }
+
+ pub fn id(&self) -> DefId {
+ match *self {
+ TraitContainer(id) => id,
+ ImplContainer(id) => id,
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, HashStable, Eq, Hash)]
+pub struct AssocItem {
+ pub def_id: DefId,
+ #[stable_hasher(project(name))]
+ pub ident: Ident,
+ pub kind: AssocKind,
+ pub vis: Visibility,
+ pub defaultness: hir::Defaultness,
+ pub container: AssocItemContainer,
+
+ /// Whether this is a method with an explicit self
+ /// as its first parameter, allowing method calls.
+ pub fn_has_self_parameter: bool,
+}
+
+impl AssocItem {
+ pub fn signature(&self, tcx: TyCtxt<'_>) -> String {
+ match self.kind {
+ ty::AssocKind::Fn => {
+ // We skip the binder here because the binder would deanonymize all
+ // late-bound regions, and we don't want method signatures to show up
+ // `as for<'r> fn(&'r MyType)`. Pretty-printing handles late-bound
+ // regions just fine, showing `fn(&MyType)`.
+ tcx.fn_sig(self.def_id).skip_binder().to_string()
+ }
+ ty::AssocKind::Type => format!("type {};", self.ident),
+ ty::AssocKind::Const => {
+ format!("const {}: {:?};", self.ident, tcx.type_of(self.def_id))
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, HashStable, Eq, Hash)]
+pub enum AssocKind {
+ Const,
+ Fn,
+ Type,
+}
+
+impl AssocKind {
+ pub fn namespace(&self) -> Namespace {
+ match *self {
+ ty::AssocKind::Type => Namespace::TypeNS,
+ ty::AssocKind::Const | ty::AssocKind::Fn => Namespace::ValueNS,
+ }
+ }
+
+ pub fn as_def_kind(&self) -> DefKind {
+ match self {
+ AssocKind::Const => DefKind::AssocConst,
+ AssocKind::Fn => DefKind::AssocFn,
+ AssocKind::Type => DefKind::AssocTy,
+ }
+ }
+}
+
+/// A list of `ty::AssocItem`s in definition order that allows for efficient lookup by name.
+///
+/// When doing lookup by name, we try to postpone hygienic comparison for as long as possible since
+/// it is relatively expensive. Instead, items are indexed by `Symbol` and hygienic comparison is
+/// done only on items with the same name.
+#[derive(Debug, Clone, PartialEq, HashStable)]
+pub struct AssociatedItems<'tcx> {
+ pub(super) items: SortedIndexMultiMap<u32, Symbol, &'tcx ty::AssocItem>,
+}
+
+impl<'tcx> AssociatedItems<'tcx> {
+ /// Constructs an `AssociatedItems` map from a series of `ty::AssocItem`s in definition order.
+ pub fn new(items_in_def_order: impl IntoIterator<Item = &'tcx ty::AssocItem>) -> Self {
+ let items = items_in_def_order.into_iter().map(|item| (item.ident.name, item)).collect();
+ AssociatedItems { items }
+ }
+
+ /// Returns a slice of associated items in the order they were defined.
+ ///
+ /// New code should avoid relying on definition order. If you need a particular associated item
+ /// for a known trait, make that trait a lang item instead of indexing this array.
+ pub fn in_definition_order(&self) -> impl '_ + Iterator<Item = &ty::AssocItem> {
+ self.items.iter().map(|(_, v)| *v)
+ }
+
+ pub fn len(&self) -> usize {
+ self.items.len()
+ }
+
+ /// Returns an iterator over all associated items with the given name, ignoring hygiene.
+ pub fn filter_by_name_unhygienic(
+ &self,
+ name: Symbol,
+ ) -> impl '_ + Iterator<Item = &ty::AssocItem> {
+ self.items.get_by_key(&name).copied()
+ }
+
+ /// Returns an iterator over all associated items with the given name.
+ ///
+ /// Multiple items may have the same name if they are in different `Namespace`s. For example,
+ /// an associated type can have the same name as a method. Use one of the `find_by_name_and_*`
+ /// methods below if you know which item you are looking for.
+ pub fn filter_by_name(
+ &'a self,
+ tcx: TyCtxt<'a>,
+ ident: Ident,
+ parent_def_id: DefId,
+ ) -> impl 'a + Iterator<Item = &'a ty::AssocItem> {
+ self.filter_by_name_unhygienic(ident.name)
+ .filter(move |item| tcx.hygienic_eq(ident, item.ident, parent_def_id))
+ }
+
+ /// Returns the associated item with the given name and `AssocKind`, if one exists.
+ pub fn find_by_name_and_kind(
+ &self,
+ tcx: TyCtxt<'_>,
+ ident: Ident,
+ kind: AssocKind,
+ parent_def_id: DefId,
+ ) -> Option<&ty::AssocItem> {
+ self.filter_by_name_unhygienic(ident.name)
+ .filter(|item| item.kind == kind)
+ .find(|item| tcx.hygienic_eq(ident, item.ident, parent_def_id))
+ }
+
+ /// Returns the associated item with the given name in the given `Namespace`, if one exists.
+ pub fn find_by_name_and_namespace(
+ &self,
+ tcx: TyCtxt<'_>,
+ ident: Ident,
+ ns: Namespace,
+ parent_def_id: DefId,
+ ) -> Option<&ty::AssocItem> {
+ self.filter_by_name_unhygienic(ident.name)
+ .filter(|item| item.kind.namespace() == ns)
+ .find(|item| tcx.hygienic_eq(ident, item.ident, parent_def_id))
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/closure.rs b/compiler/rustc_middle/src/ty/closure.rs
new file mode 100644
index 0000000..c31a882
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/closure.rs
@@ -0,0 +1,388 @@
+use crate::hir::place::{
+ Place as HirPlace, PlaceBase as HirPlaceBase, ProjectionKind as HirProjectionKind,
+};
+use crate::ty;
+
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_span::Span;
+
+use super::{Ty, TyCtxt};
+
+use self::BorrowKind::*;
+
+#[derive(
+ Clone,
+ Copy,
+ Debug,
+ PartialEq,
+ Eq,
+ Hash,
+ TyEncodable,
+ TyDecodable,
+ TypeFoldable,
+ HashStable
+)]
+pub struct UpvarPath {
+ pub hir_id: hir::HirId,
+}
+
+/// Upvars do not get their own `NodeId`. Instead, we use the pair of
+/// the original var ID (that is, the root variable that is referenced
+/// by the upvar) and the ID of the closure expression.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
+pub struct UpvarId {
+ pub var_path: UpvarPath,
+ pub closure_expr_id: LocalDefId,
+}
+
+impl UpvarId {
+ pub fn new(var_hir_id: hir::HirId, closure_def_id: LocalDefId) -> UpvarId {
+ UpvarId { var_path: UpvarPath { hir_id: var_hir_id }, closure_expr_id: closure_def_id }
+ }
+}
+
+/// Information describing the capture of an upvar. This is computed
+/// during `typeck`, specifically by `regionck`.
+#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
+pub enum UpvarCapture<'tcx> {
+ /// Upvar is captured by value. This is always true when the
+ /// closure is labeled `move`, but can also be true in other cases
+ /// depending on inference.
+ ///
+ /// If the upvar was inferred to be captured by value (e.g. `move`
+ /// was not used), then the `Span` points to a usage that
+ /// required it. There may be more than one such usage
+ /// (e.g. `|| { a; a; }`), in which case we pick an
+ /// arbitrary one.
+ ByValue(Option<Span>),
+
+ /// Upvar is captured by reference.
+ ByRef(UpvarBorrow<'tcx>),
+}
+
+#[derive(PartialEq, Clone, Copy, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
+pub struct UpvarBorrow<'tcx> {
+ /// The kind of borrow: by-ref upvars have access to shared
+ /// immutable borrows, which are not part of the normal language
+ /// syntax.
+ pub kind: BorrowKind,
+
+ /// Region of the resulting reference.
+ pub region: ty::Region<'tcx>,
+}
+
+pub type UpvarListMap = FxHashMap<DefId, FxIndexMap<hir::HirId, UpvarId>>;
+pub type UpvarCaptureMap<'tcx> = FxHashMap<UpvarId, UpvarCapture<'tcx>>;
+
+/// Given the closure DefId this map provides a map of root variables to minimum
+/// set of `CapturedPlace`s that need to be tracked to support all captures of that closure.
+pub type MinCaptureInformationMap<'tcx> = FxHashMap<DefId, RootVariableMinCaptureList<'tcx>>;
+
+/// Part of `MinCaptureInformationMap`; Maps a root variable to the list of `CapturedPlace`.
+/// Used to track the minimum set of `Place`s that need to be captured to support all
+/// Places captured by the closure starting at a given root variable.
+///
+/// This provides a convenient and quick way of checking if a variable being used within
+/// a closure is a capture of a local variable.
+pub type RootVariableMinCaptureList<'tcx> = FxIndexMap<hir::HirId, MinCaptureList<'tcx>>;
+
+/// Part of `MinCaptureInformationMap`; List of `CapturePlace`s.
+pub type MinCaptureList<'tcx> = Vec<CapturedPlace<'tcx>>;
+
+/// Represents the various closure traits in the language. This
+/// will determine the type of the environment (`self`, in the
+/// desugaring) argument that the closure expects.
+///
+/// You can get the environment type of a closure using
+/// `tcx.closure_env_ty()`.
+#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum ClosureKind {
+ // Warning: Ordering is significant here! The ordering is chosen
+ // because the trait Fn is a subtrait of FnMut and so in turn, and
+ // hence we order it so that Fn < FnMut < FnOnce.
+ Fn,
+ FnMut,
+ FnOnce,
+}
+
+impl<'tcx> ClosureKind {
+ // This is the initial value used when doing upvar inference.
+ pub const LATTICE_BOTTOM: ClosureKind = ClosureKind::Fn;
+
+ pub fn trait_did(&self, tcx: TyCtxt<'tcx>) -> DefId {
+ match *self {
+ ClosureKind::Fn => tcx.require_lang_item(LangItem::Fn, None),
+ ClosureKind::FnMut => tcx.require_lang_item(LangItem::FnMut, None),
+ ClosureKind::FnOnce => tcx.require_lang_item(LangItem::FnOnce, None),
+ }
+ }
+
+ /// Returns `true` if a type that impls this closure kind
+ /// must also implement `other`.
+ pub fn extends(self, other: ty::ClosureKind) -> bool {
+ matches!(
+ (self, other),
+ (ClosureKind::Fn, ClosureKind::Fn)
+ | (ClosureKind::Fn, ClosureKind::FnMut)
+ | (ClosureKind::Fn, ClosureKind::FnOnce)
+ | (ClosureKind::FnMut, ClosureKind::FnMut)
+ | (ClosureKind::FnMut, ClosureKind::FnOnce)
+ | (ClosureKind::FnOnce, ClosureKind::FnOnce)
+ )
+ }
+
+ /// Returns the representative scalar type for this closure kind.
+ /// See `TyS::to_opt_closure_kind` for more details.
+ pub fn to_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match self {
+ ty::ClosureKind::Fn => tcx.types.i8,
+ ty::ClosureKind::FnMut => tcx.types.i16,
+ ty::ClosureKind::FnOnce => tcx.types.i32,
+ }
+ }
+}
+
+/// A composite describing a `Place` that is captured by a closure.
+#[derive(PartialEq, Clone, Debug, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
+pub struct CapturedPlace<'tcx> {
+ /// The `Place` that is captured.
+ pub place: HirPlace<'tcx>,
+
+ /// `CaptureKind` and expression(s) that resulted in such capture of `place`.
+ pub info: CaptureInfo<'tcx>,
+
+ /// Represents if `place` can be mutated or not.
+ pub mutability: hir::Mutability,
+}
+
+impl CapturedPlace<'tcx> {
+ /// Returns the hir-id of the root variable for the captured place.
+ /// e.g., if `a.b.c` was captured, would return the hir-id for `a`.
+ pub fn get_root_variable(&self) -> hir::HirId {
+ match self.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ base => bug!("Expected upvar, found={:?}", base),
+ }
+ }
+
+ /// Returns the `LocalDefId` of the closure that captureed this Place
+ pub fn get_closure_local_def_id(&self) -> LocalDefId {
+ match self.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.closure_expr_id,
+ base => bug!("expected upvar, found={:?}", base),
+ }
+ }
+
+ /// Return span pointing to use that resulted in selecting the current capture kind
+ pub fn get_capture_kind_span(&self, tcx: TyCtxt<'tcx>) -> Span {
+ if let Some(capture_kind_expr_id) = self.info.capture_kind_expr_id {
+ tcx.hir().span(capture_kind_expr_id)
+ } else if let Some(path_expr_id) = self.info.path_expr_id {
+ tcx.hir().span(path_expr_id)
+ } else {
+ // Fallback on upvars mentioned if neither path or capture expr id is captured
+
+ // Safe to unwrap since we know this place is captured by the closure, therefore the closure must have upvars.
+ tcx.upvars_mentioned(self.get_closure_local_def_id()).unwrap()
+ [&self.get_root_variable()]
+ .span
+ }
+ }
+}
+
+/// Return true if the `proj_possible_ancestor` represents an ancestor path
+/// to `proj_capture` or `proj_possible_ancestor` is same as `proj_capture`,
+/// assuming they both start off of the same root variable.
+///
+/// **Note:** It's the caller's responsibility to ensure that both lists of projections
+/// start off of the same root variable.
+///
+/// Eg: 1. `foo.x` which is represented using `projections=[Field(x)]` is an ancestor of
+/// `foo.x.y` which is represented using `projections=[Field(x), Field(y)]`.
+/// Note both `foo.x` and `foo.x.y` start off of the same root variable `foo`.
+/// 2. Since we only look at the projections here function will return `bar.x` as an a valid
+/// ancestor of `foo.x.y`. It's the caller's responsibility to ensure that both projections
+/// list are being applied to the same root variable.
+pub fn is_ancestor_or_same_capture(
+ proj_possible_ancestor: &[HirProjectionKind],
+ proj_capture: &[HirProjectionKind],
+) -> bool {
+ // We want to make sure `is_ancestor_or_same_capture("x.0.0", "x.0")` to return false.
+ // Therefore we can't just check if all projections are same in the zipped iterator below.
+ if proj_possible_ancestor.len() > proj_capture.len() {
+ return false;
+ }
+
+ proj_possible_ancestor.iter().zip(proj_capture).all(|(a, b)| a == b)
+}
+
+/// Part of `MinCaptureInformationMap`; describes the capture kind (&, &mut, move)
+/// for a particular capture as well as identifying the part of the source code
+/// that triggered this capture to occur.
+#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
+pub struct CaptureInfo<'tcx> {
+ /// Expr Id pointing to use that resulted in selecting the current capture kind
+ ///
+ /// Eg:
+ /// ```rust,no_run
+ /// let mut t = (0,1);
+ ///
+ /// let c = || {
+ /// println!("{}",t); // L1
+ /// t.1 = 4; // L2
+ /// };
+ /// ```
+ /// `capture_kind_expr_id` will point to the use on L2 and `path_expr_id` will point to the
+ /// use on L1.
+ ///
+ /// If the user doesn't enable feature `capture_disjoint_fields` (RFC 2229) then, it is
+ /// possible that we don't see the use of a particular place resulting in capture_kind_expr_id being
+ /// None. In such case we fallback on uvpars_mentioned for span.
+ ///
+ /// Eg:
+ /// ```rust,no_run
+ /// let x = 5;
+ ///
+ /// let c = || {
+ /// let _ = x
+ /// };
+ /// ```
+ ///
+ /// In this example, if `capture_disjoint_fields` is **not** set, then x will be captured,
+ /// but we won't see it being used during capture analysis, since it's essentially a discard.
+ pub capture_kind_expr_id: Option<hir::HirId>,
+ /// Expr Id pointing to use that resulted the corresponding place being captured
+ ///
+ /// See `capture_kind_expr_id` for example.
+ ///
+ pub path_expr_id: Option<hir::HirId>,
+
+ /// Capture mode that was selected
+ pub capture_kind: UpvarCapture<'tcx>,
+}
+
+pub fn place_to_string_for_capture(tcx: TyCtxt<'tcx>, place: &HirPlace<'tcx>) -> String {
+ let name = match place.base {
+ HirPlaceBase::Upvar(upvar_id) => tcx.hir().name(upvar_id.var_path.hir_id).to_string(),
+ _ => bug!("Capture_information should only contain upvars"),
+ };
+ let mut curr_string = name;
+
+ for (i, proj) in place.projections.iter().enumerate() {
+ match proj.kind {
+ HirProjectionKind::Deref => {
+ curr_string = format!("*{}", curr_string);
+ }
+ HirProjectionKind::Field(idx, variant) => match place.ty_before_projection(i).kind() {
+ ty::Adt(def, ..) => {
+ curr_string = format!(
+ "{}.{}",
+ curr_string,
+ def.variants[variant].fields[idx as usize].ident.name.as_str()
+ );
+ }
+ ty::Tuple(_) => {
+ curr_string = format!("{}.{}", curr_string, idx);
+ }
+ _ => {
+ bug!(
+ "Field projection applied to a type other than Adt or Tuple: {:?}.",
+ place.ty_before_projection(i).kind()
+ )
+ }
+ },
+ proj => bug!("{:?} unexpected because it isn't captured", proj),
+ }
+ }
+
+ curr_string.to_string()
+}
+
+#[derive(Clone, PartialEq, Debug, TyEncodable, TyDecodable, TypeFoldable, Copy, HashStable)]
+pub enum BorrowKind {
+ /// Data must be immutable and is aliasable.
+ ImmBorrow,
+
+ /// Data must be immutable but not aliasable. This kind of borrow
+ /// cannot currently be expressed by the user and is used only in
+ /// implicit closure bindings. It is needed when the closure
+ /// is borrowing or mutating a mutable referent, e.g.:
+ ///
+ /// ```
+ /// let x: &mut isize = ...;
+ /// let y = || *x += 5;
+ /// ```
+ ///
+ /// If we were to try to translate this closure into a more explicit
+ /// form, we'd encounter an error with the code as written:
+ ///
+ /// ```
+ /// struct Env { x: & &mut isize }
+ /// let x: &mut isize = ...;
+ /// let y = (&mut Env { &x }, fn_ptr); // Closure is pair of env and fn
+ /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
+ /// ```
+ ///
+ /// This is then illegal because you cannot mutate a `&mut` found
+ /// in an aliasable location. To solve, you'd have to translate with
+ /// an `&mut` borrow:
+ ///
+ /// ```
+ /// struct Env { x: & &mut isize }
+ /// let x: &mut isize = ...;
+ /// let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x
+ /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
+ /// ```
+ ///
+ /// Now the assignment to `**env.x` is legal, but creating a
+ /// mutable pointer to `x` is not because `x` is not mutable. We
+ /// could fix this by declaring `x` as `let mut x`. This is ok in
+ /// user code, if awkward, but extra weird for closures, since the
+ /// borrow is hidden.
+ ///
+ /// So we introduce a "unique imm" borrow -- the referent is
+ /// immutable, but not aliasable. This solves the problem. For
+ /// simplicity, we don't give users the way to express this
+ /// borrow, it's just used when translating closures.
+ UniqueImmBorrow,
+
+ /// Data is mutable and not aliasable.
+ MutBorrow,
+}
+
+impl BorrowKind {
+ pub fn from_mutbl(m: hir::Mutability) -> BorrowKind {
+ match m {
+ hir::Mutability::Mut => MutBorrow,
+ hir::Mutability::Not => ImmBorrow,
+ }
+ }
+
+ /// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow
+ /// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a
+ /// mutability that is stronger than necessary so that it at least *would permit* the borrow in
+ /// question.
+ pub fn to_mutbl_lossy(self) -> hir::Mutability {
+ match self {
+ MutBorrow => hir::Mutability::Mut,
+ ImmBorrow => hir::Mutability::Not,
+
+ // We have no type corresponding to a unique imm borrow, so
+ // use `&mut`. It gives all the capabilities of an `&uniq`
+ // and hence is a safe "over approximation".
+ UniqueImmBorrow => hir::Mutability::Mut,
+ }
+ }
+
+ pub fn to_user_str(&self) -> &'static str {
+ match *self {
+ MutBorrow => "mutable",
+ ImmBorrow => "immutable",
+ UniqueImmBorrow => "uniquely immutable",
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs
index 0dad5df..f796534 100644
--- a/compiler/rustc_middle/src/ty/codec.rs
+++ b/compiler/rustc_middle/src/ty/codec.rs
@@ -253,7 +253,7 @@
fn decode(decoder: &mut D) -> Result<Self, D::Error> {
let len = decoder.read_usize()?;
let tcx = decoder.tcx();
- Ok(tcx.mk_substs((0..len).map(|_| Decodable::decode(decoder)))?)
+ tcx.mk_substs((0..len).map(|_| Decodable::decode(decoder)))
}
}
@@ -314,7 +314,7 @@
impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::List<Ty<'tcx>> {
fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> {
let len = decoder.read_usize()?;
- Ok(decoder.tcx().mk_type_list((0..len).map(|_| Decodable::decode(decoder)))?)
+ decoder.tcx().mk_type_list((0..len).map(|_| Decodable::decode(decoder)))
}
}
@@ -323,9 +323,7 @@
{
fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> {
let len = decoder.read_usize()?;
- Ok(decoder
- .tcx()
- .mk_poly_existential_predicates((0..len).map(|_| Decodable::decode(decoder)))?)
+ decoder.tcx().mk_poly_existential_predicates((0..len).map(|_| Decodable::decode(decoder)))
}
}
@@ -335,6 +333,16 @@
}
}
+impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for [ty::ValTree<'tcx>] {
+ fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> {
+ Ok(decoder.tcx().arena.alloc_from_iter(
+ (0..decoder.read_usize()?)
+ .map(|_| Decodable::decode(decoder))
+ .collect::<Result<Vec<_>, _>>()?,
+ ))
+ }
+}
+
impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for Allocation {
fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> {
Ok(decoder.tcx().intern_const_alloc(Decodable::decode(decoder)?))
diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs
index 041c040..622f8e8 100644
--- a/compiler/rustc_middle/src/ty/consts.rs
+++ b/compiler/rustc_middle/src/ty/consts.rs
@@ -10,9 +10,11 @@
mod int;
mod kind;
+mod valtree;
pub use int::*;
pub use kind::*;
+pub use valtree::*;
/// Typed constant value.
#[derive(Copy, Clone, Debug, Hash, TyEncodable, TyDecodable, Eq, PartialEq, Ord, PartialOrd)]
@@ -23,7 +25,7 @@
pub val: ConstKind<'tcx>,
}
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(Const<'_>, 48);
impl<'tcx> Const<'tcx> {
@@ -55,7 +57,7 @@
let lit_input = match expr.kind {
hir::ExprKind::Lit(ref lit) => Some(LitToConstInput { lit: &lit.node, ty, neg: false }),
- hir::ExprKind::Unary(hir::UnOp::UnNeg, ref expr) => match expr.kind {
+ hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => match expr.kind {
hir::ExprKind::Lit(ref lit) => {
Some(LitToConstInput { lit: &lit.node, ty, neg: true })
}
diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs
index 63e95f2..8ed8ea6 100644
--- a/compiler/rustc_middle/src/ty/consts/int.rs
+++ b/compiler/rustc_middle/src/ty/consts/int.rs
@@ -5,6 +5,8 @@
use std::convert::{TryFrom, TryInto};
use std::fmt;
+use crate::ty::TyCtxt;
+
#[derive(Copy, Clone)]
/// A type for representing any integer. Only used for printing.
pub struct ConstInt {
@@ -239,6 +241,11 @@
Err(self.size())
}
}
+
+ #[inline]
+ pub fn try_to_machine_usize(&self, tcx: TyCtxt<'tcx>) -> Result<u64, Size> {
+ Ok(self.to_bits(tcx.data_layout.pointer_size)? as u64)
+ }
}
macro_rules! from {
@@ -277,6 +284,18 @@
from!(u8, u16, u32, u64, u128, bool);
try_from!(u8, u16, u32, u64, u128);
+impl TryFrom<ScalarInt> for bool {
+ type Error = Size;
+ #[inline]
+ fn try_from(int: ScalarInt) -> Result<Self, Size> {
+ int.to_bits(Size::from_bytes(1)).and_then(|u| match u {
+ 0 => Ok(false),
+ 1 => Ok(true),
+ _ => Err(Size::from_bytes(1)),
+ })
+ }
+}
+
impl From<char> for ScalarInt {
#[inline]
fn from(c: char) -> Self {
diff --git a/compiler/rustc_middle/src/ty/consts/kind.rs b/compiler/rustc_middle/src/ty/consts/kind.rs
index a2638d8..43e22ce 100644
--- a/compiler/rustc_middle/src/ty/consts/kind.rs
+++ b/compiler/rustc_middle/src/ty/consts/kind.rs
@@ -1,3 +1,5 @@
+use std::convert::TryInto;
+
use crate::mir::interpret::ConstValue;
use crate::mir::interpret::Scalar;
use crate::mir::Promoted;
@@ -9,6 +11,8 @@
use rustc_macros::HashStable;
use rustc_target::abi::Size;
+use super::ScalarInt;
+
/// Represents a constant in Rust.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable)]
@@ -37,7 +41,7 @@
Error(ty::DelaySpanBugEmitted),
}
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ConstKind<'_>, 40);
impl<'tcx> ConstKind<'tcx> {
@@ -52,13 +56,18 @@
}
#[inline]
+ pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
+ Some(self.try_to_value()?.try_to_scalar()?.assert_int())
+ }
+
+ #[inline]
pub fn try_to_bits(self, size: Size) -> Option<u128> {
- self.try_to_value()?.try_to_bits(size)
+ self.try_to_scalar_int()?.to_bits(size).ok()
}
#[inline]
pub fn try_to_bool(self) -> Option<bool> {
- self.try_to_value()?.try_to_bool()
+ self.try_to_scalar_int()?.try_into().ok()
}
#[inline]
diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs
new file mode 100644
index 0000000..f1b78c8
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/consts/valtree.rs
@@ -0,0 +1,34 @@
+use super::ScalarInt;
+use rustc_macros::HashStable;
+
+#[derive(Copy, Clone, Debug, Hash, TyEncodable, TyDecodable, Eq, PartialEq, Ord, PartialOrd)]
+#[derive(HashStable)]
+/// This datastructure is used to represent the value of constants used in the type system.
+///
+/// We explicitly choose a different datastructure from the way values are processed within
+/// CTFE, as in the type system equal values (according to their `PartialEq`) must also have
+/// equal representation (`==` on the rustc data structure, e.g. `ValTree`) and vice versa.
+/// Since CTFE uses `AllocId` to represent pointers, it often happens that two different
+/// `AllocId`s point to equal values. So we may end up with different representations for
+/// two constants whose value is `&42`. Furthermore any kind of struct that has padding will
+/// have arbitrary values within that padding, even if the values of the struct are the same.
+///
+/// `ValTree` does not have this problem with representation, as it only contains integers or
+/// lists of (nested) `ValTree`.
+pub enum ValTree<'tcx> {
+ /// ZSTs, integers, `bool`, `char` are represented as scalars.
+ /// See the `ScalarInt` documentation for how `ScalarInt` guarantees that equal values
+ /// of these types have the same representation.
+ Leaf(ScalarInt),
+ /// The fields of any kind of aggregate. Structs, tuples and arrays are represented by
+ /// listing their fields' values in order.
+ /// Enums are represented by storing their discriminant as a field, followed by all
+ /// the fields of the variant.
+ Branch(&'tcx [ValTree<'tcx>]),
+}
+
+impl ValTree<'tcx> {
+ pub fn zst() -> Self {
+ Self::Branch(&[])
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index 1255302..41a8bc1 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -14,7 +14,7 @@
use crate::mir::interpret::{self, Allocation, ConstValue, Scalar};
use crate::mir::{Body, Field, Local, Place, PlaceElem, ProjectionKind, Promoted};
use crate::traits;
-use crate::ty::query::{self, TyCtxtAt};
+use crate::ty::query::{self, OnDiskCache, TyCtxtAt};
use crate::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSubsts};
use crate::ty::TyKind::*;
use crate::ty::{
@@ -30,9 +30,7 @@
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap};
-use rustc_data_structures::stable_hasher::{
- hash_stable_hashmap, HashStable, StableHasher, StableVec,
-};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableVec};
use rustc_data_structures::steal::Steal;
use rustc_data_structures::sync::{self, Lock, Lrc, WorkerLocal};
use rustc_errors::ErrorReported;
@@ -47,12 +45,13 @@
};
use rustc_index::vec::{Idx, IndexVec};
use rustc_macros::HashStable;
+use rustc_middle::mir::FakeReadCause;
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
use rustc_session::config::{BorrowckMode, CrateType, OutputFilenames};
use rustc_session::lint::{Level, Lint};
use rustc_session::Session;
use rustc_span::source_map::MultiSpan;
-use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
use rustc_target::abi::{Layout, TargetDataLayout, VariantIdx};
use rustc_target::spec::abi;
@@ -94,6 +93,8 @@
projs: InternedSet<'tcx, List<ProjectionKind>>,
place_elems: InternedSet<'tcx, List<PlaceElem<'tcx>>>,
const_: InternedSet<'tcx, Const<'tcx>>,
+ /// Const allocations.
+ allocation: InternedSet<'tcx, Allocation>,
}
impl<'tcx> CtxtInterners<'tcx> {
@@ -111,6 +112,7 @@
projs: Default::default(),
place_elems: Default::default(),
const_: Default::default(),
+ allocation: Default::default(),
}
}
@@ -206,19 +208,26 @@
/// would be in a different frame of reference and using its `local_id`
/// would result in lookup errors, or worse, in silently wrong data being
/// stored/returned.
+#[inline]
fn validate_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) {
if hir_id.owner != hir_owner {
- ty::tls::with(|tcx| {
- bug!(
- "node {} with HirId::owner {:?} cannot be placed in TypeckResults with hir_owner {:?}",
- tcx.hir().node_to_string(hir_id),
- hir_id.owner,
- hir_owner
- )
- });
+ invalid_hir_id_for_typeck_results(hir_owner, hir_id);
}
}
+#[cold]
+#[inline(never)]
+fn invalid_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) {
+ ty::tls::with(|tcx| {
+ bug!(
+ "node {} with HirId::owner {:?} cannot be placed in TypeckResults with hir_owner {:?}",
+ tcx.hir().node_to_string(hir_id),
+ hir_id.owner,
+ hir_owner
+ )
+ });
+}
+
impl<'a, V> LocalTableInContext<'a, V> {
pub fn contains_key(&self, id: hir::HirId) -> bool {
validate_hir_id_for_typeck_results(self.hir_owner, id);
@@ -375,9 +384,6 @@
/// <https://github.com/rust-lang/rfcs/blob/master/text/2005-match-ergonomics.md#definitions>
pat_adjustments: ItemLocalMap<Vec<Ty<'tcx>>>,
- /// Borrows
- pub upvar_capture_map: ty::UpvarCaptureMap<'tcx>,
-
/// Records the reasons that we picked the kind of each closure;
/// not all closures are present in the map.
closure_kind_origins: ItemLocalMap<(Span, HirPlace<'tcx>)>,
@@ -413,16 +419,34 @@
/// by this function.
pub concrete_opaque_types: FxHashMap<DefId, ResolvedOpaqueTy<'tcx>>,
- /// Given the closure ID this map provides the list of UpvarIDs used by it.
- /// The upvarID contains the HIR node ID and it also contains the full path
- /// leading to the member of the struct or tuple that is used instead of the
- /// entire variable.
- pub closure_captures: ty::UpvarListMap,
-
/// Tracks the minimum captures required for a closure;
/// see `MinCaptureInformationMap` for more details.
pub closure_min_captures: ty::MinCaptureInformationMap<'tcx>,
+ /// Tracks the fake reads required for a closure and the reason for the fake read.
+ /// When performing pattern matching for closures, there are times we don't end up
+ /// reading places that are mentioned in a closure (because of _ patterns). However,
+ /// to ensure the places are initialized, we introduce fake reads.
+ /// Consider these two examples:
+ /// ``` (discriminant matching with only wildcard arm)
+ /// let x: u8;
+ /// let c = || match x { _ => () };
+ /// ```
+ /// In this example, we don't need to actually read/borrow `x` in `c`, and so we don't
+ /// want to capture it. However, we do still want an error here, because `x` should have
+ /// to be initialized at the point where c is created. Therefore, we add a "fake read"
+ /// instead.
+ /// ``` (destructured assignments)
+ /// let c = || {
+ /// let (t1, t2) = t;
+ /// }
+ /// ```
+ /// In the second example, we capture the disjoint fields of `t` (`t.0` & `t.1`), but
+ /// we never capture `t`. This becomes an issue when we build MIR as we require
+ /// information on `t` in order to create place `t.0` and `t.1`. We can solve this
+ /// issue by fake reading `t`.
+ pub closure_fake_reads: FxHashMap<DefId, Vec<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>>,
+
/// Stores the type, expression, span and optional scope span of all types
/// that are live across the yield of this generator (if a generator).
pub generator_interior_types: ty::Binder<Vec<GeneratorInteriorTypeCause<'tcx>>>,
@@ -447,7 +471,6 @@
adjustments: Default::default(),
pat_binding_modes: Default::default(),
pat_adjustments: Default::default(),
- upvar_capture_map: Default::default(),
closure_kind_origins: Default::default(),
liberated_fn_sigs: Default::default(),
fru_field_types: Default::default(),
@@ -455,8 +478,8 @@
used_trait_imports: Lrc::new(Default::default()),
tainted_by_errors: None,
concrete_opaque_types: Default::default(),
- closure_captures: Default::default(),
closure_min_captures: Default::default(),
+ closure_fake_reads: Default::default(),
generator_interior_types: ty::Binder::dummy(Default::default()),
treat_byte_string_as_slice: Default::default(),
}
@@ -639,10 +662,6 @@
.flatten()
}
- pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> ty::UpvarCapture<'tcx> {
- self.upvar_capture_map[&upvar_id]
- }
-
pub fn closure_kind_origins(&self) -> LocalTableInContext<'_, (Span, HirPlace<'tcx>)> {
LocalTableInContext { hir_owner: self.hir_owner, data: &self.closure_kind_origins }
}
@@ -696,23 +715,22 @@
ref adjustments,
ref pat_binding_modes,
ref pat_adjustments,
- ref upvar_capture_map,
ref closure_kind_origins,
ref liberated_fn_sigs,
ref fru_field_types,
-
ref coercion_casts,
-
ref used_trait_imports,
tainted_by_errors,
ref concrete_opaque_types,
- ref closure_captures,
ref closure_min_captures,
+ ref closure_fake_reads,
ref generator_interior_types,
ref treat_byte_string_as_slice,
} = *self;
hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+ hcx.local_def_path_hash(hir_owner);
+
type_dependent_defs.hash_stable(hcx, hasher);
field_indices.hash_stable(hcx, hasher);
user_provided_types.hash_stable(hcx, hasher);
@@ -722,17 +740,6 @@
adjustments.hash_stable(hcx, hasher);
pat_binding_modes.hash_stable(hcx, hasher);
pat_adjustments.hash_stable(hcx, hasher);
- hash_stable_hashmap(hcx, hasher, upvar_capture_map, |up_var_id, hcx| {
- let ty::UpvarId { var_path, closure_expr_id } = *up_var_id;
-
- assert_eq!(var_path.hir_id.owner, hir_owner);
-
- (
- hcx.local_def_path_hash(var_path.hir_id.owner),
- var_path.hir_id.local_id,
- hcx.local_def_path_hash(closure_expr_id),
- )
- });
closure_kind_origins.hash_stable(hcx, hasher);
liberated_fn_sigs.hash_stable(hcx, hasher);
@@ -741,8 +748,8 @@
used_trait_imports.hash_stable(hcx, hasher);
tainted_by_errors.hash_stable(hcx, hasher);
concrete_opaque_types.hash_stable(hcx, hasher);
- closure_captures.hash_stable(hcx, hasher);
closure_min_captures.hash_stable(hcx, hasher);
+ closure_fake_reads.hash_stable(hcx, hasher);
generator_interior_types.hash_stable(hcx, hasher);
treat_byte_string_as_slice.hash_stable(hcx, hasher);
})
@@ -962,7 +969,14 @@
pub(crate) untracked_crate: &'tcx hir::Crate<'tcx>,
pub(crate) definitions: &'tcx Definitions,
- pub queries: query::Queries<'tcx>,
+ /// This provides access to the incremental compilation on-disk cache for query results.
+ /// Do not access this directly. It is only meant to be used by
+ /// `DepGraph::try_mark_green()` and the query infrastructure.
+ /// This is `None` if we are not incremental compilation mode
+ pub on_disk_cache: Option<OnDiskCache<'tcx>>,
+
+ pub queries: &'tcx dyn query::QueryEngine<'tcx>,
+ pub query_caches: query::QueryCaches<'tcx>,
maybe_unused_trait_imports: FxHashSet<LocalDefId>,
maybe_unused_extern_crates: Vec<(LocalDefId, Span)>,
@@ -999,9 +1013,6 @@
/// `#[rustc_const_stable]` and `#[rustc_const_unstable]` attributes
const_stability_interner: ShardedHashMap<&'tcx attr::ConstStability, ()>,
- /// Stores the value of constants (and deduplicates the actual memory)
- allocation_interner: ShardedHashMap<&'tcx Allocation, ()>,
-
/// Stores memory for globals (statics/consts).
pub(crate) alloc_map: Lock<interpret::AllocMap<'tcx>>,
@@ -1044,7 +1055,10 @@
}
pub fn intern_const_alloc(self, alloc: Allocation) -> &'tcx Allocation {
- self.allocation_interner.intern(alloc, |alloc| self.arena.alloc(alloc))
+ self.interners
+ .allocation
+ .intern(alloc, |alloc| Interned(self.interners.arena.alloc(alloc)))
+ .0
}
/// Allocates a read-only byte or string literal for `mir::interpret`.
@@ -1077,13 +1091,16 @@
None => return Bound::Unbounded,
};
debug!("layout_scalar_valid_range: attr={:?}", attr);
- for meta in attr.meta_item_list().expect("rustc_layout_scalar_valid_range takes args") {
- match meta.literal().expect("attribute takes lit").kind {
- ast::LitKind::Int(a, _) => return Bound::Included(a),
- _ => span_bug!(attr.span, "rustc_layout_scalar_valid_range expects int arg"),
- }
+ if let Some(
+ &[ast::NestedMetaItem::Literal(ast::Lit { kind: ast::LitKind::Int(a, _), .. })],
+ ) = attr.meta_item_list().as_deref()
+ {
+ Bound::Included(a)
+ } else {
+ self.sess
+ .delay_span_bug(attr.span, "invalid rustc_layout_scalar_valid_range attribute");
+ Bound::Unbounded
}
- span_bug!(attr.span, "no arguments to `rustc_layout_scalar_valid_range` attribute");
};
(
get(sym::rustc_layout_scalar_valid_range_start),
@@ -1102,14 +1119,13 @@
pub fn create_global_ctxt(
s: &'tcx Session,
lint_store: Lrc<dyn Any + sync::Send + sync::Sync>,
- local_providers: ty::query::Providers,
- extern_providers: ty::query::Providers,
arena: &'tcx WorkerLocal<Arena<'tcx>>,
resolutions: ty::ResolverOutputs,
krate: &'tcx hir::Crate<'tcx>,
definitions: &'tcx Definitions,
dep_graph: DepGraph,
- on_disk_query_result_cache: Option<query::OnDiskCache<'tcx>>,
+ on_disk_cache: Option<query::OnDiskCache<'tcx>>,
+ queries: &'tcx dyn query::QueryEngine<'tcx>,
crate_name: &str,
output_filenames: &OutputFilenames,
) -> GlobalCtxt<'tcx> {
@@ -1121,10 +1137,6 @@
let common_lifetimes = CommonLifetimes::new(&interners);
let common_consts = CommonConsts::new(&interners, &common_types);
let cstore = resolutions.cstore;
- let crates = cstore.crates_untracked();
- let max_cnum = crates.iter().map(|c| c.as_usize()).max().unwrap_or(0);
- let mut providers = IndexVec::from_elem_n(extern_providers, max_cnum + 1);
- providers[LOCAL_CRATE] = local_providers;
let mut trait_map: FxHashMap<_, FxHashMap<_, _>> = FxHashMap::default();
for (hir_id, v) in krate.trait_map.iter() {
@@ -1153,7 +1165,9 @@
extern_prelude: resolutions.extern_prelude,
untracked_crate: krate,
definitions,
- queries: query::Queries::new(providers, extern_providers, on_disk_query_result_cache),
+ on_disk_cache,
+ queries,
+ query_caches: query::QueryCaches::default(),
ty_rcache: Default::default(),
pred_rcache: Default::default(),
selection_cache: Default::default(),
@@ -1163,7 +1177,6 @@
layout_interner: Default::default(),
stability_interner: Default::default(),
const_stability_interner: Default::default(),
- allocation_interner: Default::default(),
alloc_map: Lock::new(interpret::AllocMap::new()),
output_filenames: Arc::new(output_filenames.clone()),
}
@@ -1318,7 +1331,7 @@
}
pub fn serialize_query_result_cache(self, encoder: &mut FileEncoder) -> FileEncodeResult {
- self.queries.on_disk_cache.as_ref().map_or(Ok(()), |c| c.serialize(self, encoder))
+ self.on_disk_cache.as_ref().map_or(Ok(()), |c| c.serialize(self, encoder))
}
/// If `true`, we should use the MIR-based borrowck, but also
@@ -1599,6 +1612,7 @@
nop_lift! {type_; Ty<'a> => Ty<'tcx>}
nop_lift! {region; Region<'a> => Region<'tcx>}
nop_lift! {const_; &'a Const<'a> => &'tcx Const<'tcx>}
+nop_lift! {allocation; &'a Allocation => &'tcx Allocation}
nop_lift! {predicate; &'a PredicateInner<'a> => &'tcx PredicateInner<'tcx>}
nop_list_lift! {type_list; Ty<'a> => Ty<'tcx>}
@@ -1889,7 +1903,7 @@
"Const Stability interner: #{}",
self.0.const_stability_interner.len()
)?;
- writeln!(fmt, "Allocation interner: #{}", self.0.allocation_interner.len())?;
+ writeln!(fmt, "Allocation interner: #{}", self.0.interners.allocation.len())?;
writeln!(fmt, "Layout interner: #{}", self.0.layout_interner.len())?;
Ok(())
@@ -1990,6 +2004,26 @@
}
}
+impl<'tcx> Borrow<Allocation> for Interned<'tcx, Allocation> {
+ fn borrow<'a>(&'a self) -> &'a Allocation {
+ &self.0
+ }
+}
+
+impl<'tcx> PartialEq for Interned<'tcx, Allocation> {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+}
+
+impl<'tcx> Eq for Interned<'tcx, Allocation> {}
+
+impl<'tcx> Hash for Interned<'tcx, Allocation> {
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ self.0.hash(s)
+ }
+}
+
macro_rules! direct_interners {
($($name:ident: $method:ident($ty:ty),)+) => {
$(impl<'tcx> PartialEq for Interned<'tcx, $ty> {
@@ -2053,6 +2087,42 @@
self.mk_fn_ptr(sig.map_bound(|sig| ty::FnSig { unsafety: hir::Unsafety::Unsafe, ..sig }))
}
+ /// Given the def_id of a Trait `trait_def_id` and the name of an associated item `assoc_name`
+ /// returns true if the `trait_def_id` defines an associated item of name `assoc_name`.
+ pub fn trait_may_define_assoc_type(self, trait_def_id: DefId, assoc_name: Ident) -> bool {
+ self.super_traits_of(trait_def_id).any(|trait_did| {
+ self.associated_items(trait_did)
+ .find_by_name_and_kind(self, assoc_name, ty::AssocKind::Type, trait_did)
+ .is_some()
+ })
+ }
+
+ /// Computes the def-ids of the transitive super-traits of `trait_def_id`. This (intentionally)
+ /// does not compute the full elaborated super-predicates but just the set of def-ids. It is used
+ /// to identify which traits may define a given associated type to help avoid cycle errors.
+ /// Returns a `DefId` iterator.
+ fn super_traits_of(self, trait_def_id: DefId) -> impl Iterator<Item = DefId> + 'tcx {
+ let mut set = FxHashSet::default();
+ let mut stack = vec![trait_def_id];
+
+ set.insert(trait_def_id);
+
+ iter::from_fn(move || -> Option<DefId> {
+ let trait_did = stack.pop()?;
+ let generic_predicates = self.super_predicates_of(trait_did);
+
+ for (predicate, _) in generic_predicates.predicates {
+ if let ty::PredicateKind::Trait(data, _) = predicate.kind().skip_binder() {
+ if set.insert(data.def_id()) {
+ stack.push(data.def_id());
+ }
+ }
+ }
+
+ Some(trait_did)
+ })
+ }
+
/// Given a closure signature, returns an equivalent fn signature. Detuples
/// and so forth -- so e.g., if we have a sig with `Fn<(u32, i32)>` then
/// you would get a `fn(u32, i32)`.
diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs
index 4a131a4..982c8a3 100644
--- a/compiler/rustc_middle/src/ty/diagnostics.rs
+++ b/compiler/rustc_middle/src/ty/diagnostics.rs
@@ -75,6 +75,36 @@
}
}
+pub fn suggest_arbitrary_trait_bound(
+ generics: &hir::Generics<'_>,
+ err: &mut DiagnosticBuilder<'_>,
+ param_name: &str,
+ constraint: &str,
+) -> bool {
+ let param = generics.params.iter().find(|p| p.name.ident().as_str() == param_name);
+ match (param, param_name) {
+ (Some(_), "Self") => return false,
+ _ => {}
+ }
+ // Suggest a where clause bound for a non-type paremeter.
+ let (action, prefix) = if generics.where_clause.predicates.is_empty() {
+ ("introducing a", " where ")
+ } else {
+ ("extending the", ", ")
+ };
+ err.span_suggestion_verbose(
+ generics.where_clause.tail_span_for_suggestion(),
+ &format!(
+ "consider {} `where` bound, but there might be an alternative better way to express \
+ this requirement",
+ action,
+ ),
+ format!("{}{}: {}", prefix, param_name, constraint),
+ Applicability::MaybeIncorrect,
+ );
+ true
+}
+
/// Suggest restricting a type param with a new bound.
pub fn suggest_constraining_type_param(
tcx: TyCtxt<'_>,
@@ -284,12 +314,13 @@
hir::LifetimeName::ImplicitObjectLifetimeDefault | hir::LifetimeName::Static,
..
},
+ _,
) => {
self.0.push(ty);
}
hir::TyKind::OpaqueDef(item_id, _) => {
self.0.push(ty);
- let item = self.1.expect_item(item_id.id);
+ let item = self.1.item(item_id);
hir::intravisit::walk_item(self, item);
}
_ => {}
diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs
index 1669c59..f19cc99 100644
--- a/compiler/rustc_middle/src/ty/error.rs
+++ b/compiler/rustc_middle/src/ty/error.rs
@@ -1,5 +1,6 @@
use crate::traits::{ObligationCause, ObligationCauseCode};
use crate::ty::diagnostics::suggest_constraining_type_param;
+use crate::ty::print::{FmtPrinter, Printer};
use crate::ty::{self, BoundRegionKind, Region, Ty, TyCtxt};
use rustc_errors::Applicability::{MachineApplicable, MaybeIncorrect};
use rustc_errors::{pluralize, DiagnosticBuilder};
@@ -11,7 +12,6 @@
use std::borrow::Cow;
use std::fmt;
-use std::ops::Deref;
#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable)]
pub struct ExpectedFound<T> {
@@ -228,12 +228,17 @@
ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.def_path_str(def.did)).into(),
ty::Foreign(def_id) => format!("extern type `{}`", tcx.def_path_str(def_id)).into(),
ty::Array(t, n) => {
- let n = tcx.lift(n).unwrap();
- match n.try_eval_usize(tcx, ty::ParamEnv::empty()) {
- _ if t.is_simple_ty() => format!("array `{}`", self).into(),
- Some(n) => format!("array of {} element{}", n, pluralize!(n)).into(),
- None => "array".into(),
+ if t.is_simple_ty() {
+ return format!("array `{}`", self).into();
}
+
+ let n = tcx.lift(n).unwrap();
+ if let ty::ConstKind::Value(v) = n.val {
+ if let Some(n) = v.try_to_machine_usize(tcx) {
+ return format!("array of {} element{}", n, pluralize!(n)).into();
+ }
+ }
+ "array".into()
}
ty::Slice(ty) if ty.is_simple_ty() => format!("slice `{}`", self).into(),
ty::Slice(_) => "slice".into(),
@@ -264,7 +269,7 @@
}
}
ty::Closure(..) => "closure".into(),
- ty::Generator(..) => "generator".into(),
+ ty::Generator(def_id, ..) => tcx.generator_kind(def_id).unwrap().descr().into(),
ty::GeneratorWitness(..) => "generator witness".into(),
ty::Tuple(..) => "tuple".into(),
ty::Infer(ty::TyVar(_)) => "inferred type".into(),
@@ -282,7 +287,7 @@
}
}
- pub fn prefix_string(&self) -> Cow<'static, str> {
+ pub fn prefix_string(&self, tcx: TyCtxt<'_>) -> Cow<'static, str> {
match *self.kind() {
ty::Infer(_)
| ty::Error(_)
@@ -308,7 +313,7 @@
ty::FnPtr(_) => "fn pointer".into(),
ty::Dynamic(..) => "trait object".into(),
ty::Closure(..) => "closure".into(),
- ty::Generator(..) => "generator".into(),
+ ty::Generator(def_id, ..) => tcx.generator_kind(def_id).unwrap().descr().into(),
ty::GeneratorWitness(..) => "generator witness".into(),
ty::Tuple(..) => "tuple".into(),
ty::Placeholder(..) => "higher-ranked type".into(),
@@ -400,14 +405,22 @@
{
// Synthesize the associated type restriction `Add<Output = Expected>`.
// FIXME: extract this logic for use in other diagnostics.
- let trait_ref = proj.trait_ref(self);
+ let (trait_ref, assoc_substs) = proj.trait_ref_and_own_substs(self);
let path =
self.def_path_str_with_substs(trait_ref.def_id, trait_ref.substs);
let item_name = self.item_name(proj.item_def_id);
+ let item_args = self.format_generic_args(assoc_substs);
+
let path = if path.ends_with('>') {
- format!("{}, {} = {}>", &path[..path.len() - 1], item_name, p)
+ format!(
+ "{}, {}{} = {}>",
+ &path[..path.len() - 1],
+ item_name,
+ item_args,
+ p
+ )
} else {
- format!("{}<{} = {}>", path, item_name, p)
+ format!("{}<{}{} = {}>", path, item_name, item_args, p)
};
note = !suggest_constraining_type_param(
self,
@@ -534,7 +547,6 @@
TargetFeatureCast(def_id) => {
let attrs = self.get_attrs(*def_id);
let target_spans = attrs
- .deref()
.iter()
.filter(|attr| attr.has_name(sym::target_feature))
.map(|attr| attr.span);
@@ -556,7 +568,7 @@
ty: Ty<'tcx>,
) -> bool {
let assoc = self.associated_item(proj_ty.item_def_id);
- let trait_ref = proj_ty.trait_ref(self);
+ let (trait_ref, assoc_substs) = proj_ty.trait_ref_and_own_substs(self);
if let Some(item) = self.hir().get_if_local(body_owner_def_id) {
if let Some(hir_generics) = item.generics() {
// Get the `DefId` for the type parameter corresponding to `A` in `<A as T>::Foo`.
@@ -590,6 +602,7 @@
&trait_ref,
pred.bounds,
&assoc,
+ assoc_substs,
ty,
msg,
) {
@@ -607,6 +620,7 @@
&trait_ref,
param.bounds,
&assoc,
+ assoc_substs,
ty,
msg,
);
@@ -692,6 +706,7 @@
db,
self.def_span(def_id),
&assoc,
+ proj_ty.trait_ref_and_own_substs(self).1,
values.found,
&msg,
) {
@@ -816,7 +831,7 @@
// an assoc type as a return type (#72076).
if let hir::Defaultness::Default { has_value: true } = item.defaultness
{
- if self.type_of(self.hir().local_def_id(item.id.hir_id)) == found {
+ if self.type_of(item.id.def_id) == found {
db.span_label(
item.span,
"associated type defaults can't be assumed inside the \
@@ -836,7 +851,7 @@
})) => {
for item in &items[..] {
if let hir::AssocItemKind::Type = item.kind {
- if self.type_of(self.hir().local_def_id(item.id.hir_id)) == found {
+ if self.type_of(item.id.def_id) == found {
db.span_label(item.span, "expected this associated type");
return true;
}
@@ -856,6 +871,7 @@
trait_ref: &ty::TraitRef<'tcx>,
bounds: hir::GenericBounds<'_>,
assoc: &ty::AssocItem,
+ assoc_substs: &[ty::GenericArg<'tcx>],
ty: Ty<'tcx>,
msg: &str,
) -> bool {
@@ -865,7 +881,12 @@
// Relate the type param against `T` in `<A as T>::Foo`.
ptr.trait_ref.trait_def_id() == Some(trait_ref.def_id)
&& self.constrain_associated_type_structured_suggestion(
- db, ptr.span, assoc, ty, msg,
+ db,
+ ptr.span,
+ assoc,
+ assoc_substs,
+ ty,
+ msg,
)
}
_ => false,
@@ -879,6 +900,7 @@
db: &mut DiagnosticBuilder<'_>,
span: Span,
assoc: &ty::AssocItem,
+ assoc_substs: &[ty::GenericArg<'tcx>],
ty: Ty<'tcx>,
msg: &str,
) -> bool {
@@ -890,11 +912,20 @@
let span = Span::new(pos, pos, span.ctxt());
(span, format!(", {} = {}", assoc.ident, ty))
} else {
- (span.shrink_to_hi(), format!("<{} = {}>", assoc.ident, ty))
+ let item_args = self.format_generic_args(assoc_substs);
+ (span.shrink_to_hi(), format!("<{}{} = {}>", assoc.ident, item_args, ty))
};
db.span_suggestion_verbose(span, msg, sugg, MaybeIncorrect);
return true;
}
false
}
+
+ fn format_generic_args(self, args: &[ty::GenericArg<'tcx>]) -> String {
+ let mut item_args = String::new();
+ FmtPrinter::new(self, &mut item_args, hir::def::Namespace::TypeNS)
+ .path_generic_args(Ok, args)
+ .expect("could not write to `String`.");
+ item_args
+ }
}
diff --git a/compiler/rustc_middle/src/ty/fold.rs b/compiler/rustc_middle/src/ty/fold.rs
index 382f370..a6a1d1f 100644
--- a/compiler/rustc_middle/src/ty/fold.rs
+++ b/compiler/rustc_middle/src/ty/fold.rs
@@ -439,18 +439,18 @@
/// the ones we have visited.
current_index: ty::DebruijnIndex,
- fld_r: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a),
- fld_t: &'a mut (dyn FnMut(ty::BoundTy) -> Ty<'tcx> + 'a),
- fld_c: &'a mut (dyn FnMut(ty::BoundVar, Ty<'tcx>) -> &'tcx ty::Const<'tcx> + 'a),
+ fld_r: Option<&'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a)>,
+ fld_t: Option<&'a mut (dyn FnMut(ty::BoundTy) -> Ty<'tcx> + 'a)>,
+ fld_c: Option<&'a mut (dyn FnMut(ty::BoundVar, Ty<'tcx>) -> &'tcx ty::Const<'tcx> + 'a)>,
}
impl<'a, 'tcx> BoundVarReplacer<'a, 'tcx> {
- fn new<F, G, H>(tcx: TyCtxt<'tcx>, fld_r: &'a mut F, fld_t: &'a mut G, fld_c: &'a mut H) -> Self
- where
- F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
- G: FnMut(ty::BoundTy) -> Ty<'tcx>,
- H: FnMut(ty::BoundVar, Ty<'tcx>) -> &'tcx ty::Const<'tcx>,
- {
+ fn new(
+ tcx: TyCtxt<'tcx>,
+ fld_r: Option<&'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a)>,
+ fld_t: Option<&'a mut (dyn FnMut(ty::BoundTy) -> Ty<'tcx> + 'a)>,
+ fld_c: Option<&'a mut (dyn FnMut(ty::BoundVar, Ty<'tcx>) -> &'tcx ty::Const<'tcx> + 'a)>,
+ ) -> Self {
BoundVarReplacer { tcx, current_index: ty::INNERMOST, fld_r, fld_t, fld_c }
}
}
@@ -469,63 +469,58 @@
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
match *t.kind() {
- ty::Bound(debruijn, bound_ty) => {
- if debruijn == self.current_index {
- let fld_t = &mut self.fld_t;
+ ty::Bound(debruijn, bound_ty) if debruijn == self.current_index => {
+ if let Some(fld_t) = self.fld_t.as_mut() {
let ty = fld_t(bound_ty);
- ty::fold::shift_vars(self.tcx, &ty, self.current_index.as_u32())
- } else {
- t
+ return ty::fold::shift_vars(self.tcx, &ty, self.current_index.as_u32());
}
}
- _ => {
- if !t.has_vars_bound_at_or_above(self.current_index) {
- // Nothing more to substitute.
- t
- } else {
- t.super_fold_with(self)
- }
+ _ if t.has_vars_bound_at_or_above(self.current_index) => {
+ return t.super_fold_with(self);
}
+ _ => {}
}
+ t
}
fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
match *r {
ty::ReLateBound(debruijn, br) if debruijn == self.current_index => {
- let fld_r = &mut self.fld_r;
- let region = fld_r(br);
- if let ty::ReLateBound(debruijn1, br) = *region {
- // If the callback returns a late-bound region,
- // that region should always use the INNERMOST
- // debruijn index. Then we adjust it to the
- // correct depth.
- assert_eq!(debruijn1, ty::INNERMOST);
- self.tcx.mk_region(ty::ReLateBound(debruijn, br))
- } else {
- region
+ if let Some(fld_r) = self.fld_r.as_mut() {
+ let region = fld_r(br);
+ return if let ty::ReLateBound(debruijn1, br) = *region {
+ // If the callback returns a late-bound region,
+ // that region should always use the INNERMOST
+ // debruijn index. Then we adjust it to the
+ // correct depth.
+ assert_eq!(debruijn1, ty::INNERMOST);
+ self.tcx.mk_region(ty::ReLateBound(debruijn, br))
+ } else {
+ region
+ };
}
}
- _ => r,
+ _ => {}
}
+ r
}
fn fold_const(&mut self, ct: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> {
- if let ty::Const { val: ty::ConstKind::Bound(debruijn, bound_const), ty } = *ct {
- if debruijn == self.current_index {
- let fld_c = &mut self.fld_c;
- let ct = fld_c(bound_const, ty);
- ty::fold::shift_vars(self.tcx, &ct, self.current_index.as_u32())
- } else {
- ct
+ match *ct {
+ ty::Const { val: ty::ConstKind::Bound(debruijn, bound_const), ty }
+ if debruijn == self.current_index =>
+ {
+ if let Some(fld_c) = self.fld_c.as_mut() {
+ let ct = fld_c(bound_const, ty);
+ return ty::fold::shift_vars(self.tcx, &ct, self.current_index.as_u32());
+ }
}
- } else {
- if !ct.has_vars_bound_at_or_above(self.current_index) {
- // Nothing more to substitute.
- ct
- } else {
- ct.super_fold_with(self)
+ _ if ct.has_vars_bound_at_or_above(self.current_index) => {
+ return ct.super_fold_with(self);
}
+ _ => {}
}
+ ct
}
}
@@ -550,14 +545,16 @@
F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
T: TypeFoldable<'tcx>,
{
- // identity for bound types and consts
- let fld_t = |bound_ty| self.mk_ty(ty::Bound(ty::INNERMOST, bound_ty));
- let fld_c = |bound_ct, ty| {
- self.mk_const(ty::Const { val: ty::ConstKind::Bound(ty::INNERMOST, bound_ct), ty })
- };
let mut region_map = BTreeMap::new();
- let real_fld_r = |br: ty::BoundRegion| *region_map.entry(br).or_insert_with(|| fld_r(br));
- let value = self.replace_escaping_bound_vars(value.skip_binder(), real_fld_r, fld_t, fld_c);
+ let mut real_fld_r =
+ |br: ty::BoundRegion| *region_map.entry(br).or_insert_with(|| fld_r(br));
+ let value = value.skip_binder();
+ let value = if !value.has_escaping_bound_vars() {
+ value
+ } else {
+ let mut replacer = BoundVarReplacer::new(self, Some(&mut real_fld_r), None, None);
+ value.fold_with(&mut replacer)
+ };
(value, region_map)
}
@@ -580,7 +577,8 @@
if !value.has_escaping_bound_vars() {
value
} else {
- let mut replacer = BoundVarReplacer::new(self, &mut fld_r, &mut fld_t, &mut fld_c);
+ let mut replacer =
+ BoundVarReplacer::new(self, Some(&mut fld_r), Some(&mut fld_t), Some(&mut fld_c));
value.fold_with(&mut replacer)
}
}
@@ -837,6 +835,7 @@
result
}
+ #[inline]
fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
// If the outer-exclusive-binder is *strictly greater* than
// `outer_index`, that means that `t` contains some content
@@ -850,6 +849,7 @@
}
}
+ #[inline]
fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
// If the region is bound by `outer_index` or anything outside
// of outer index, then it escapes the binders we have
@@ -875,6 +875,7 @@
}
}
+ #[inline]
fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
if predicate.inner.outer_exclusive_binder > self.outer_index {
ControlFlow::Break(FoundEscapingVars)
@@ -895,6 +896,7 @@
impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor {
type BreakTy = FoundFlags;
+ #[inline]
fn visit_ty(&mut self, t: Ty<'_>) -> ControlFlow<Self::BreakTy> {
debug!(
"HasTypeFlagsVisitor: t={:?} t.flags={:?} self.flags={:?}",
@@ -909,6 +911,7 @@
}
}
+ #[inline]
fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
let flags = r.type_flags();
debug!("HasTypeFlagsVisitor: r={:?} r.flags={:?} self.flags={:?}", r, flags, self.flags);
@@ -919,6 +922,7 @@
}
}
+ #[inline]
fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
let flags = FlagComputation::for_const(c);
debug!("HasTypeFlagsVisitor: c={:?} c.flags={:?} self.flags={:?}", c, flags, self.flags);
@@ -929,6 +933,7 @@
}
}
+ #[inline]
fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
debug!(
"HasTypeFlagsVisitor: predicate={:?} predicate.flags={:?} self.flags={:?}",
diff --git a/compiler/rustc_middle/src/ty/generics.rs b/compiler/rustc_middle/src/ty/generics.rs
new file mode 100644
index 0000000..79cd26f
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/generics.rs
@@ -0,0 +1,257 @@
+use crate::middle::resolve_lifetime::ObjectLifetimeDefault;
+use crate::ty;
+use crate::ty::subst::{Subst, SubstsRef};
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+
+use super::{EarlyBoundRegion, InstantiatedPredicates, ParamConst, ParamTy, Predicate, TyCtxt};
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum GenericParamDefKind {
+ Lifetime,
+ Type {
+ has_default: bool,
+ object_lifetime_default: ObjectLifetimeDefault,
+ synthetic: Option<hir::SyntheticTyParamKind>,
+ },
+ Const,
+}
+
+impl GenericParamDefKind {
+ pub fn descr(&self) -> &'static str {
+ match self {
+ GenericParamDefKind::Lifetime => "lifetime",
+ GenericParamDefKind::Type { .. } => "type",
+ GenericParamDefKind::Const => "constant",
+ }
+ }
+ pub fn to_ord(&self, tcx: TyCtxt<'_>) -> ast::ParamKindOrd {
+ match self {
+ GenericParamDefKind::Lifetime => ast::ParamKindOrd::Lifetime,
+ GenericParamDefKind::Type { .. } => ast::ParamKindOrd::Type,
+ GenericParamDefKind::Const => {
+ ast::ParamKindOrd::Const { unordered: tcx.features().const_generics }
+ }
+ }
+ }
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct GenericParamDef {
+ pub name: Symbol,
+ pub def_id: DefId,
+ pub index: u32,
+
+ /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute
+ /// on generic parameter `'a`/`T`, asserts data behind the parameter
+ /// `'a`/`T` won't be accessed during the parent type's `Drop` impl.
+ pub pure_wrt_drop: bool,
+
+ pub kind: GenericParamDefKind,
+}
+
+impl GenericParamDef {
+ pub fn to_early_bound_region_data(&self) -> ty::EarlyBoundRegion {
+ if let GenericParamDefKind::Lifetime = self.kind {
+ ty::EarlyBoundRegion { def_id: self.def_id, index: self.index, name: self.name }
+ } else {
+ bug!("cannot convert a non-lifetime parameter def to an early bound region")
+ }
+ }
+}
+
+#[derive(Default)]
+pub struct GenericParamCount {
+ pub lifetimes: usize,
+ pub types: usize,
+ pub consts: usize,
+}
+
+/// Information about the formal type/lifetime parameters associated
+/// with an item or method. Analogous to `hir::Generics`.
+///
+/// The ordering of parameters is the same as in `Subst` (excluding child generics):
+/// `Self` (optionally), `Lifetime` params..., `Type` params...
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct Generics {
+ pub parent: Option<DefId>,
+ pub parent_count: usize,
+ pub params: Vec<GenericParamDef>,
+
+ /// Reverse map to the `index` field of each `GenericParamDef`.
+ #[stable_hasher(ignore)]
+ pub param_def_id_to_index: FxHashMap<DefId, u32>,
+
+ pub has_self: bool,
+ pub has_late_bound_regions: Option<Span>,
+}
+
+impl<'tcx> Generics {
+ pub fn count(&self) -> usize {
+ self.parent_count + self.params.len()
+ }
+
+ pub fn own_counts(&self) -> GenericParamCount {
+ // We could cache this as a property of `GenericParamCount`, but
+ // the aim is to refactor this away entirely eventually and the
+ // presence of this method will be a constant reminder.
+ let mut own_counts = GenericParamCount::default();
+
+ for param in &self.params {
+ match param.kind {
+ GenericParamDefKind::Lifetime => own_counts.lifetimes += 1,
+ GenericParamDefKind::Type { .. } => own_counts.types += 1,
+ GenericParamDefKind::Const => own_counts.consts += 1,
+ }
+ }
+
+ own_counts
+ }
+
+ pub fn own_defaults(&self) -> GenericParamCount {
+ let mut own_defaults = GenericParamCount::default();
+
+ for param in &self.params {
+ match param.kind {
+ GenericParamDefKind::Lifetime => (),
+ GenericParamDefKind::Type { has_default, .. } => {
+ own_defaults.types += has_default as usize;
+ }
+ GenericParamDefKind::Const => {
+ // FIXME(const_generics:defaults)
+ }
+ }
+ }
+
+ own_defaults
+ }
+
+ pub fn requires_monomorphization(&self, tcx: TyCtxt<'tcx>) -> bool {
+ if self.own_requires_monomorphization() {
+ return true;
+ }
+
+ if let Some(parent_def_id) = self.parent {
+ let parent = tcx.generics_of(parent_def_id);
+ parent.requires_monomorphization(tcx)
+ } else {
+ false
+ }
+ }
+
+ pub fn own_requires_monomorphization(&self) -> bool {
+ for param in &self.params {
+ match param.kind {
+ GenericParamDefKind::Type { .. } | GenericParamDefKind::Const => return true,
+ GenericParamDefKind::Lifetime => {}
+ }
+ }
+ false
+ }
+
+ /// Returns the `GenericParamDef` with the given index.
+ pub fn param_at(&'tcx self, param_index: usize, tcx: TyCtxt<'tcx>) -> &'tcx GenericParamDef {
+ if let Some(index) = param_index.checked_sub(self.parent_count) {
+ &self.params[index]
+ } else {
+ tcx.generics_of(self.parent.expect("parent_count > 0 but no parent?"))
+ .param_at(param_index, tcx)
+ }
+ }
+
+ /// Returns the `GenericParamDef` associated with this `EarlyBoundRegion`.
+ pub fn region_param(
+ &'tcx self,
+ param: &EarlyBoundRegion,
+ tcx: TyCtxt<'tcx>,
+ ) -> &'tcx GenericParamDef {
+ let param = self.param_at(param.index as usize, tcx);
+ match param.kind {
+ GenericParamDefKind::Lifetime => param,
+ _ => bug!("expected lifetime parameter, but found another generic parameter"),
+ }
+ }
+
+ /// Returns the `GenericParamDef` associated with this `ParamTy`.
+ pub fn type_param(&'tcx self, param: &ParamTy, tcx: TyCtxt<'tcx>) -> &'tcx GenericParamDef {
+ let param = self.param_at(param.index as usize, tcx);
+ match param.kind {
+ GenericParamDefKind::Type { .. } => param,
+ _ => bug!("expected type parameter, but found another generic parameter"),
+ }
+ }
+
+ /// Returns the `GenericParamDef` associated with this `ParamConst`.
+ pub fn const_param(&'tcx self, param: &ParamConst, tcx: TyCtxt<'tcx>) -> &GenericParamDef {
+ let param = self.param_at(param.index as usize, tcx);
+ match param.kind {
+ GenericParamDefKind::Const => param,
+ _ => bug!("expected const parameter, but found another generic parameter"),
+ }
+ }
+}
+
+/// Bounds on generics.
+#[derive(Copy, Clone, Default, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct GenericPredicates<'tcx> {
+ pub parent: Option<DefId>,
+ pub predicates: &'tcx [(Predicate<'tcx>, Span)],
+}
+
+impl<'tcx> GenericPredicates<'tcx> {
+ pub fn instantiate(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> InstantiatedPredicates<'tcx> {
+ let mut instantiated = InstantiatedPredicates::empty();
+ self.instantiate_into(tcx, &mut instantiated, substs);
+ instantiated
+ }
+
+ pub fn instantiate_own(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> InstantiatedPredicates<'tcx> {
+ InstantiatedPredicates {
+ predicates: self.predicates.iter().map(|(p, _)| p.subst(tcx, substs)).collect(),
+ spans: self.predicates.iter().map(|(_, sp)| *sp).collect(),
+ }
+ }
+
+ fn instantiate_into(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ instantiated: &mut InstantiatedPredicates<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) {
+ if let Some(def_id) = self.parent {
+ tcx.predicates_of(def_id).instantiate_into(tcx, instantiated, substs);
+ }
+ instantiated.predicates.extend(self.predicates.iter().map(|(p, _)| p.subst(tcx, substs)));
+ instantiated.spans.extend(self.predicates.iter().map(|(_, sp)| *sp));
+ }
+
+ pub fn instantiate_identity(&self, tcx: TyCtxt<'tcx>) -> InstantiatedPredicates<'tcx> {
+ let mut instantiated = InstantiatedPredicates::empty();
+ self.instantiate_identity_into(tcx, &mut instantiated);
+ instantiated
+ }
+
+ fn instantiate_identity_into(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ instantiated: &mut InstantiatedPredicates<'tcx>,
+ ) {
+ if let Some(def_id) = self.parent {
+ tcx.predicates_of(def_id).instantiate_identity_into(tcx, instantiated);
+ }
+ instantiated.predicates.extend(self.predicates.iter().map(|(p, _)| p));
+ instantiated.spans.extend(self.predicates.iter().map(|(_, s)| s));
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs
index 6ca5dcc..23cedfd 100644
--- a/compiler/rustc_middle/src/ty/instance.rs
+++ b/compiler/rustc_middle/src/ty/instance.rs
@@ -347,6 +347,7 @@
}
// This should be kept up to date with `resolve`.
+ #[instrument(level = "debug", skip(tcx))]
pub fn resolve_opt_const_arg(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
@@ -498,7 +499,7 @@
}
/// Returns a new `Instance` where generic parameters in `instance.substs` are replaced by
- /// identify parameters if they are determined to be unused in `instance.def`.
+ /// identity parameters if they are determined to be unused in `instance.def`.
pub fn polymorphize(self, tcx: TyCtxt<'tcx>) -> Self {
debug!("polymorphize: running polymorphization analysis");
if !tcx.sess.opts.debugging_opts.polymorphize {
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index 596e4f6..814581a 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -130,6 +130,7 @@
if repr.c() {
match &tcx.sess.target.arch[..] {
+ "hexagon" => min_from_extern = Some(I8),
// WARNING: the ARM EABI has two variants; the one corresponding
// to `at_least == I32` appears to be used on Linux and NetBSD,
// but some systems may use the variant corresponding to no
@@ -188,6 +189,13 @@
/// - For a slice, this is the length.
pub const FAT_PTR_EXTRA: usize = 1;
+/// The maximum supported number of lanes in a SIMD vector.
+///
+/// This value is selected based on backend support:
+/// * LLVM does not appear to have a vector width limit.
+/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
+pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
+
#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
pub enum LayoutError<'tcx> {
Unknown(Ty<'tcx>),
@@ -224,7 +232,7 @@
let layout = cx.layout_raw_uncached(ty);
// Type-level uninhabitedness should always imply ABI uninhabitedness.
if let Ok(layout) = layout {
- if ty.conservative_is_privately_uninhabited(tcx) {
+ if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
assert!(layout.abi.is_uninhabited());
}
}
@@ -576,11 +584,12 @@
let size =
element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
- let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
- Abi::Uninhabited
- } else {
- Abi::Aggregate { sized: true }
- };
+ let abi =
+ if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
+ Abi::Uninhabited
+ } else {
+ Abi::Aggregate { sized: true }
+ };
let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
@@ -717,10 +726,17 @@
};
// SIMD vectors of zero length are not supported.
+ // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
+ // support.
//
// Can't be caught in typeck if the array length is generic.
if e_len == 0 {
tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
+ } else if e_len > MAX_SIMD_LANES {
+ tcx.sess.fatal(&format!(
+ "monomorphising SIMD type `{}` of length greater than {}",
+ ty, MAX_SIMD_LANES,
+ ));
}
// Compute the ABI of the element type:
@@ -2546,6 +2562,7 @@
panic_strategy: PanicStrategy,
codegen_fn_attr_flags: CodegenFnAttrFlags,
call_conv: Conv,
+ abi: SpecAbi,
) -> bool {
if panic_strategy != PanicStrategy::Unwind {
// In panic=abort mode we assume nothing can unwind anywhere, so
@@ -2570,17 +2587,34 @@
//
// 2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
//
- // Foreign items (case 1) are assumed to not unwind; it is
- // UB otherwise. (At least for now; see also
- // rust-lang/rust#63909 and Rust RFC 2753.)
- //
- // Items defined in Rust with non-Rust ABIs (case 2) are also
- // not supposed to unwind. Whether this should be enforced
- // (versus stating it is UB) and *how* it would be enforced
- // is currently under discussion; see rust-lang/rust#58794.
- //
- // In either case, we mark item as explicitly nounwind.
- false
+ // In both of these cases, we should refer to the ABI to determine whether or not we
+ // should unwind. See Rust RFC 2945 for more information on this behavior, here:
+ // https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
+ use SpecAbi::*;
+ match abi {
+ C { unwind } | Stdcall { unwind } | System { unwind } | Thiscall { unwind } => {
+ unwind
+ }
+ Cdecl
+ | Fastcall
+ | Vectorcall
+ | Aapcs
+ | Win64
+ | SysV64
+ | PtxKernel
+ | Msp430Interrupt
+ | X86Interrupt
+ | AmdGpuKernel
+ | EfiApi
+ | AvrInterrupt
+ | AvrNonBlockingInterrupt
+ | CCmseNonSecureCall
+ | RustIntrinsic
+ | PlatformIntrinsic
+ | Unadjusted => false,
+ // In the `if` above, we checked for functions with the Rust calling convention.
+ Rust | RustCall => unreachable!(),
+ }
}
}
}
@@ -2638,14 +2672,14 @@
RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
// It's the ABI's job to select this, not ours.
- System => bug!("system abi should be selected elsewhere"),
+ System { .. } => bug!("system abi should be selected elsewhere"),
EfiApi => bug!("eficall abi should be selected elsewhere"),
- Stdcall => Conv::X86Stdcall,
+ Stdcall { .. } => Conv::X86Stdcall,
Fastcall => Conv::X86Fastcall,
Vectorcall => Conv::X86VectorCall,
- Thiscall => Conv::X86ThisCall,
- C => Conv::C,
+ Thiscall { .. } => Conv::X86ThisCall,
+ C { .. } => Conv::C,
Unadjusted => Conv::C,
Win64 => Conv::X86_64Win64,
SysV64 => Conv::X86_64SysV,
@@ -2807,7 +2841,12 @@
c_variadic: sig.c_variadic,
fixed_count: inputs.len(),
conv,
- can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
+ can_unwind: fn_can_unwind(
+ cx.tcx().sess.panic_strategy(),
+ codegen_fn_attr_flags,
+ conv,
+ sig.abi,
+ ),
};
fn_abi.adjust_for_abi(cx, sig.abi);
debug!("FnAbi::new_internal = {:?}", fn_abi);
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index babab00..5bbf7b3 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -9,107 +9,78 @@
//!
//! ["The `ty` module: representing types"]: https://rustc-dev-guide.rust-lang.org/ty.html
-// ignore-tidy-filelength
pub use self::fold::{TypeFoldable, TypeFolder, TypeVisitor};
pub use self::AssocItemContainer::*;
pub use self::BorrowKind::*;
pub use self::IntVarValue::*;
pub use self::Variance::*;
+pub use adt::*;
+pub use assoc::*;
+pub use closure::*;
+pub use generics::*;
use crate::hir::exports::ExportMap;
-use crate::hir::place::{
- Place as HirPlace, PlaceBase as HirPlaceBase, ProjectionKind as HirProjectionKind,
-};
use crate::ich::StableHashingContext;
use crate::middle::cstore::CrateStoreDyn;
-use crate::middle::resolve_lifetime::ObjectLifetimeDefault;
-use crate::mir::interpret::ErrorHandled;
-use crate::mir::Body;
-use crate::mir::GeneratorLayout;
+use crate::mir::{Body, GeneratorLayout};
use crate::traits::{self, Reveal};
use crate::ty;
use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef};
-use crate::ty::util::{Discr, IntTypeExt};
+use crate::ty::util::Discr;
use rustc_ast as ast;
use rustc_attr as attr;
use rustc_data_structures::captures::Captures;
-use rustc_data_structures::fingerprint::Fingerprint;
-use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::fx::FxHashSet;
-use rustc_data_structures::fx::FxIndexMap;
-use rustc_data_structures::sorted_map::SortedIndexMultiMap;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::{self, par_iter, ParallelIterator};
use rustc_data_structures::tagged_ptr::CopyTaggedPtr;
-use rustc_errors::ErrorReported;
use rustc_hir as hir;
-use rustc_hir::def::{CtorKind, CtorOf, DefKind, Namespace, Res};
+use rustc_hir::def::{CtorKind, CtorOf, DefKind, Res};
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, CRATE_DEF_INDEX};
-use rustc_hir::lang_items::LangItem;
use rustc_hir::{Constness, Node};
-use rustc_index::vec::{Idx, IndexVec};
use rustc_macros::HashStable;
-use rustc_serialize::{self, Encodable, Encoder};
-use rustc_session::DataTypeKind;
use rustc_span::hygiene::ExpnId;
-use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::symbol::{kw, Ident, Symbol};
use rustc_span::Span;
-use rustc_target::abi::{Align, VariantIdx};
+use rustc_target::abi::Align;
-use std::cell::RefCell;
use std::cmp::Ordering;
-use std::fmt;
use std::hash::{Hash, Hasher};
-use std::ops::{ControlFlow, Range};
-use std::ptr;
-use std::str;
+use std::ops::ControlFlow;
+use std::{fmt, ptr, str};
-pub use self::sty::BoundRegionKind::*;
-pub use self::sty::RegionKind;
-pub use self::sty::RegionKind::*;
-pub use self::sty::TyKind::*;
-pub use self::sty::{Binder, BoundTy, BoundTyKind, BoundVar};
-pub use self::sty::{BoundRegion, BoundRegionKind, EarlyBoundRegion, FreeRegion, Region};
-pub use self::sty::{CanonicalPolyFnSig, FnSig, GenSig, PolyFnSig, PolyGenSig};
-pub use self::sty::{ClosureSubsts, GeneratorSubsts, TypeAndMut, UpvarSubsts};
-pub use self::sty::{ClosureSubstsParts, GeneratorSubstsParts};
-pub use self::sty::{ConstVid, RegionVid};
-pub use self::sty::{ExistentialPredicate, ParamConst, ParamTy, ProjectionTy};
-pub use self::sty::{ExistentialProjection, PolyExistentialProjection};
-pub use self::sty::{ExistentialTraitRef, PolyExistentialTraitRef};
-pub use self::sty::{PolyTraitRef, TraitRef, TyKind};
pub use crate::ty::diagnostics::*;
pub use rustc_type_ir::InferTy::*;
pub use rustc_type_ir::*;
pub use self::binding::BindingMode;
pub use self::binding::BindingMode::*;
-
-pub use self::context::{tls, FreeRegionInfo, TyCtxt};
+pub use self::consts::{Const, ConstInt, ConstKind, InferConst, ScalarInt, ValTree};
pub use self::context::{
- CanonicalUserType, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations,
- DelaySpanBugEmitted, ResolvedOpaqueTy, UserType, UserTypeAnnotationIndex,
+ tls, CanonicalUserType, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations,
+ CtxtInterners, DelaySpanBugEmitted, FreeRegionInfo, GeneratorInteriorTypeCause, GlobalCtxt,
+ Lift, ResolvedOpaqueTy, TyCtxt, TypeckResults, UserType, UserTypeAnnotationIndex,
};
-pub use self::context::{
- CtxtInterners, GeneratorInteriorTypeCause, GlobalCtxt, Lift, TypeckResults,
-};
-
pub use self::instance::{Instance, InstanceDef};
-
pub use self::list::List;
-
+pub use self::sty::BoundRegionKind::*;
+pub use self::sty::RegionKind::*;
+pub use self::sty::TyKind::*;
+pub use self::sty::{
+ Binder, BoundRegion, BoundRegionKind, BoundTy, BoundTyKind, BoundVar, CanonicalPolyFnSig,
+ ClosureSubsts, ClosureSubstsParts, ConstVid, EarlyBoundRegion, ExistentialPredicate,
+ ExistentialProjection, ExistentialTraitRef, FnSig, FreeRegion, GenSig, GeneratorSubsts,
+ GeneratorSubstsParts, ParamConst, ParamTy, PolyExistentialProjection, PolyExistentialTraitRef,
+ PolyFnSig, PolyGenSig, PolyTraitRef, ProjectionTy, Region, RegionKind, RegionVid, TraitRef,
+ TyKind, TypeAndMut, UpvarSubsts,
+};
pub use self::trait_def::TraitDef;
-pub use self::query::queries;
-
-pub use self::consts::{Const, ConstInt, ConstKind, InferConst, ScalarInt};
-
pub mod _match;
pub mod adjustment;
pub mod binding;
pub mod cast;
pub mod codec;
-mod erase_regions;
pub mod error;
pub mod fast_reject;
pub mod flags;
@@ -126,9 +97,14 @@
pub mod util;
pub mod walk;
+mod adt;
+mod assoc;
+mod closure;
mod consts;
mod context;
mod diagnostics;
+mod erase_regions;
+mod generics;
mod instance;
mod list;
mod structural_impls;
@@ -150,30 +126,6 @@
pub extern_prelude: FxHashMap<Symbol, bool>,
}
-#[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable, Hash)]
-pub enum AssocItemContainer {
- TraitContainer(DefId),
- ImplContainer(DefId),
-}
-
-impl AssocItemContainer {
- /// Asserts that this is the `DefId` of an associated item declared
- /// in a trait, and returns the trait `DefId`.
- pub fn assert_trait(&self) -> DefId {
- match *self {
- TraitContainer(id) => id,
- _ => bug!("associated item has wrong container type: {:?}", self),
- }
- }
-
- pub fn id(&self) -> DefId {
- match *self {
- TraitContainer(id) => id,
- ImplContainer(id) => id,
- }
- }
-}
-
/// The "header" of an impl is everything outside the body: a Self type, a trait
/// ref (in the case of a trait impl), and a set of predicates (from the
/// bounds / where-clauses).
@@ -198,142 +150,6 @@
Reservation,
}
-#[derive(Copy, Clone, Debug, PartialEq, HashStable, Eq, Hash)]
-pub struct AssocItem {
- pub def_id: DefId,
- #[stable_hasher(project(name))]
- pub ident: Ident,
- pub kind: AssocKind,
- pub vis: Visibility,
- pub defaultness: hir::Defaultness,
- pub container: AssocItemContainer,
-
- /// Whether this is a method with an explicit self
- /// as its first parameter, allowing method calls.
- pub fn_has_self_parameter: bool,
-}
-
-#[derive(Copy, Clone, PartialEq, Debug, HashStable, Eq, Hash)]
-pub enum AssocKind {
- Const,
- Fn,
- Type,
-}
-
-impl AssocKind {
- pub fn namespace(&self) -> Namespace {
- match *self {
- ty::AssocKind::Type => Namespace::TypeNS,
- ty::AssocKind::Const | ty::AssocKind::Fn => Namespace::ValueNS,
- }
- }
-
- pub fn as_def_kind(&self) -> DefKind {
- match self {
- AssocKind::Const => DefKind::AssocConst,
- AssocKind::Fn => DefKind::AssocFn,
- AssocKind::Type => DefKind::AssocTy,
- }
- }
-}
-
-impl AssocItem {
- pub fn signature(&self, tcx: TyCtxt<'_>) -> String {
- match self.kind {
- ty::AssocKind::Fn => {
- // We skip the binder here because the binder would deanonymize all
- // late-bound regions, and we don't want method signatures to show up
- // `as for<'r> fn(&'r MyType)`. Pretty-printing handles late-bound
- // regions just fine, showing `fn(&MyType)`.
- tcx.fn_sig(self.def_id).skip_binder().to_string()
- }
- ty::AssocKind::Type => format!("type {};", self.ident),
- ty::AssocKind::Const => {
- format!("const {}: {:?};", self.ident, tcx.type_of(self.def_id))
- }
- }
- }
-}
-
-/// A list of `ty::AssocItem`s in definition order that allows for efficient lookup by name.
-///
-/// When doing lookup by name, we try to postpone hygienic comparison for as long as possible since
-/// it is relatively expensive. Instead, items are indexed by `Symbol` and hygienic comparison is
-/// done only on items with the same name.
-#[derive(Debug, Clone, PartialEq, HashStable)]
-pub struct AssociatedItems<'tcx> {
- items: SortedIndexMultiMap<u32, Symbol, &'tcx ty::AssocItem>,
-}
-
-impl<'tcx> AssociatedItems<'tcx> {
- /// Constructs an `AssociatedItems` map from a series of `ty::AssocItem`s in definition order.
- pub fn new(items_in_def_order: impl IntoIterator<Item = &'tcx ty::AssocItem>) -> Self {
- let items = items_in_def_order.into_iter().map(|item| (item.ident.name, item)).collect();
- AssociatedItems { items }
- }
-
- /// Returns a slice of associated items in the order they were defined.
- ///
- /// New code should avoid relying on definition order. If you need a particular associated item
- /// for a known trait, make that trait a lang item instead of indexing this array.
- pub fn in_definition_order(&self) -> impl '_ + Iterator<Item = &ty::AssocItem> {
- self.items.iter().map(|(_, v)| *v)
- }
-
- pub fn len(&self) -> usize {
- self.items.len()
- }
-
- /// Returns an iterator over all associated items with the given name, ignoring hygiene.
- pub fn filter_by_name_unhygienic(
- &self,
- name: Symbol,
- ) -> impl '_ + Iterator<Item = &ty::AssocItem> {
- self.items.get_by_key(&name).copied()
- }
-
- /// Returns an iterator over all associated items with the given name.
- ///
- /// Multiple items may have the same name if they are in different `Namespace`s. For example,
- /// an associated type can have the same name as a method. Use one of the `find_by_name_and_*`
- /// methods below if you know which item you are looking for.
- pub fn filter_by_name(
- &'a self,
- tcx: TyCtxt<'a>,
- ident: Ident,
- parent_def_id: DefId,
- ) -> impl 'a + Iterator<Item = &'a ty::AssocItem> {
- self.filter_by_name_unhygienic(ident.name)
- .filter(move |item| tcx.hygienic_eq(ident, item.ident, parent_def_id))
- }
-
- /// Returns the associated item with the given name and `AssocKind`, if one exists.
- pub fn find_by_name_and_kind(
- &self,
- tcx: TyCtxt<'_>,
- ident: Ident,
- kind: AssocKind,
- parent_def_id: DefId,
- ) -> Option<&ty::AssocItem> {
- self.filter_by_name_unhygienic(ident.name)
- .filter(|item| item.kind == kind)
- .find(|item| tcx.hygienic_eq(ident, item.ident, parent_def_id))
- }
-
- /// Returns the associated item with the given name in the given `Namespace`, if one exists.
- pub fn find_by_name_and_namespace(
- &self,
- tcx: TyCtxt<'_>,
- ident: Ident,
- ns: Namespace,
- parent_def_id: DefId,
- ) -> Option<&ty::AssocItem> {
- self.filter_by_name_unhygienic(ident.name)
- .filter(|item| item.kind.namespace() == ns)
- .find(|item| tcx.hygienic_eq(ident, item.ident, parent_def_id))
- }
-}
-
#[derive(Clone, Debug, PartialEq, Eq, Copy, Hash, TyEncodable, TyDecodable, HashStable)]
pub enum Visibility {
/// Visible everywhere (including in other crates).
@@ -485,7 +301,7 @@
}
// `TyS` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(TyS<'_>, 32);
impl<'tcx> Ord for TyS<'tcx> {
@@ -533,243 +349,6 @@
#[rustc_diagnostic_item = "Ty"]
pub type Ty<'tcx> = &'tcx TyS<'tcx>;
-#[derive(
- Clone,
- Copy,
- Debug,
- PartialEq,
- Eq,
- Hash,
- TyEncodable,
- TyDecodable,
- TypeFoldable,
- HashStable
-)]
-pub struct UpvarPath {
- pub hir_id: hir::HirId,
-}
-
-/// Upvars do not get their own `NodeId`. Instead, we use the pair of
-/// the original var ID (that is, the root variable that is referenced
-/// by the upvar) and the ID of the closure expression.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
-pub struct UpvarId {
- pub var_path: UpvarPath,
- pub closure_expr_id: LocalDefId,
-}
-
-impl UpvarId {
- pub fn new(var_hir_id: hir::HirId, closure_def_id: LocalDefId) -> UpvarId {
- UpvarId { var_path: UpvarPath { hir_id: var_hir_id }, closure_expr_id: closure_def_id }
- }
-}
-
-#[derive(Clone, PartialEq, Debug, TyEncodable, TyDecodable, TypeFoldable, Copy, HashStable)]
-pub enum BorrowKind {
- /// Data must be immutable and is aliasable.
- ImmBorrow,
-
- /// Data must be immutable but not aliasable. This kind of borrow
- /// cannot currently be expressed by the user and is used only in
- /// implicit closure bindings. It is needed when the closure
- /// is borrowing or mutating a mutable referent, e.g.:
- ///
- /// ```
- /// let x: &mut isize = ...;
- /// let y = || *x += 5;
- /// ```
- ///
- /// If we were to try to translate this closure into a more explicit
- /// form, we'd encounter an error with the code as written:
- ///
- /// ```
- /// struct Env { x: & &mut isize }
- /// let x: &mut isize = ...;
- /// let y = (&mut Env { &x }, fn_ptr); // Closure is pair of env and fn
- /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
- /// ```
- ///
- /// This is then illegal because you cannot mutate a `&mut` found
- /// in an aliasable location. To solve, you'd have to translate with
- /// an `&mut` borrow:
- ///
- /// ```
- /// struct Env { x: & &mut isize }
- /// let x: &mut isize = ...;
- /// let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x
- /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
- /// ```
- ///
- /// Now the assignment to `**env.x` is legal, but creating a
- /// mutable pointer to `x` is not because `x` is not mutable. We
- /// could fix this by declaring `x` as `let mut x`. This is ok in
- /// user code, if awkward, but extra weird for closures, since the
- /// borrow is hidden.
- ///
- /// So we introduce a "unique imm" borrow -- the referent is
- /// immutable, but not aliasable. This solves the problem. For
- /// simplicity, we don't give users the way to express this
- /// borrow, it's just used when translating closures.
- UniqueImmBorrow,
-
- /// Data is mutable and not aliasable.
- MutBorrow,
-}
-
-/// Information describing the capture of an upvar. This is computed
-/// during `typeck`, specifically by `regionck`.
-#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
-pub enum UpvarCapture<'tcx> {
- /// Upvar is captured by value. This is always true when the
- /// closure is labeled `move`, but can also be true in other cases
- /// depending on inference.
- ///
- /// If the upvar was inferred to be captured by value (e.g. `move`
- /// was not used), then the `Span` points to a usage that
- /// required it. There may be more than one such usage
- /// (e.g. `|| { a; a; }`), in which case we pick an
- /// arbitrary one.
- ByValue(Option<Span>),
-
- /// Upvar is captured by reference.
- ByRef(UpvarBorrow<'tcx>),
-}
-
-#[derive(PartialEq, Clone, Copy, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
-pub struct UpvarBorrow<'tcx> {
- /// The kind of borrow: by-ref upvars have access to shared
- /// immutable borrows, which are not part of the normal language
- /// syntax.
- pub kind: BorrowKind,
-
- /// Region of the resulting reference.
- pub region: ty::Region<'tcx>,
-}
-
-/// Given the closure DefId this map provides a map of root variables to minimum
-/// set of `CapturedPlace`s that need to be tracked to support all captures of that closure.
-pub type MinCaptureInformationMap<'tcx> = FxHashMap<DefId, RootVariableMinCaptureList<'tcx>>;
-
-/// Part of `MinCaptureInformationMap`; Maps a root variable to the list of `CapturedPlace`.
-/// Used to track the minimum set of `Place`s that need to be captured to support all
-/// Places captured by the closure starting at a given root variable.
-///
-/// This provides a convenient and quick way of checking if a variable being used within
-/// a closure is a capture of a local variable.
-pub type RootVariableMinCaptureList<'tcx> = FxIndexMap<hir::HirId, MinCaptureList<'tcx>>;
-
-/// Part of `MinCaptureInformationMap`; List of `CapturePlace`s.
-pub type MinCaptureList<'tcx> = Vec<CapturedPlace<'tcx>>;
-
-/// A composite describing a `Place` that is captured by a closure.
-#[derive(PartialEq, Clone, Debug, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
-pub struct CapturedPlace<'tcx> {
- /// The `Place` that is captured.
- pub place: HirPlace<'tcx>,
-
- /// `CaptureKind` and expression(s) that resulted in such capture of `place`.
- pub info: CaptureInfo<'tcx>,
-
- /// Represents if `place` can be mutated or not.
- pub mutability: hir::Mutability,
-}
-
-impl CapturedPlace<'tcx> {
- /// Returns the hir-id of the root variable for the captured place.
- /// e.g., if `a.b.c` was captured, would return the hir-id for `a`.
- pub fn get_root_variable(&self) -> hir::HirId {
- match self.place.base {
- HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
- base => bug!("Expected upvar, found={:?}", base),
- }
- }
-}
-
-pub fn place_to_string_for_capture(tcx: TyCtxt<'tcx>, place: &HirPlace<'tcx>) -> String {
- let name = match place.base {
- HirPlaceBase::Upvar(upvar_id) => tcx.hir().name(upvar_id.var_path.hir_id).to_string(),
- _ => bug!("Capture_information should only contain upvars"),
- };
- let mut curr_string = name;
-
- for (i, proj) in place.projections.iter().enumerate() {
- match proj.kind {
- HirProjectionKind::Deref => {
- curr_string = format!("*{}", curr_string);
- }
- HirProjectionKind::Field(idx, variant) => match place.ty_before_projection(i).kind() {
- ty::Adt(def, ..) => {
- curr_string = format!(
- "{}.{}",
- curr_string,
- def.variants[variant].fields[idx as usize].ident.name.as_str()
- );
- }
- ty::Tuple(_) => {
- curr_string = format!("{}.{}", curr_string, idx);
- }
- _ => {
- bug!(
- "Field projection applied to a type other than Adt or Tuple: {:?}.",
- place.ty_before_projection(i).kind()
- )
- }
- },
- proj => bug!("{:?} unexpected because it isn't captured", proj),
- }
- }
-
- curr_string.to_string()
-}
-
-/// Part of `MinCaptureInformationMap`; describes the capture kind (&, &mut, move)
-/// for a particular capture as well as identifying the part of the source code
-/// that triggered this capture to occur.
-#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
-pub struct CaptureInfo<'tcx> {
- /// Expr Id pointing to use that resulted in selecting the current capture kind
- ///
- /// Eg:
- /// ```rust,no_run
- /// let mut t = (0,1);
- ///
- /// let c = || {
- /// println!("{}",t); // L1
- /// t.1 = 4; // L2
- /// };
- /// ```
- /// `capture_kind_expr_id` will point to the use on L2 and `path_expr_id` will point to the
- /// use on L1.
- ///
- /// If the user doesn't enable feature `capture_disjoint_fields` (RFC 2229) then, it is
- /// possible that we don't see the use of a particular place resulting in capture_kind_expr_id being
- /// None. In such case we fallback on uvpars_mentioned for span.
- ///
- /// Eg:
- /// ```rust,no_run
- /// let x = 5;
- ///
- /// let c = || {
- /// let _ = x
- /// };
- /// ```
- ///
- /// In this example, if `capture_disjoint_fields` is **not** set, then x will be captured,
- /// but we won't see it being used during capture analysis, since it's essentially a discard.
- pub capture_kind_expr_id: Option<hir::HirId>,
- /// Expr Id pointing to use that resulted the corresponding place being captured
- ///
- /// See `capture_kind_expr_id` for example.
- ///
- pub path_expr_id: Option<hir::HirId>,
-
- /// Capture mode that was selected
- pub capture_kind: UpvarCapture<'tcx>,
-}
-
-pub type UpvarListMap = FxHashMap<DefId, FxIndexMap<hir::HirId, UpvarId>>;
-pub type UpvarCaptureMap<'tcx> = FxHashMap<UpvarId, UpvarCapture<'tcx>>;
-
impl ty::EarlyBoundRegion {
/// Does this early bound region have a name? Early bound regions normally
/// always have names except when using anonymous lifetimes (`'_`).
@@ -778,252 +357,6 @@
}
}
-#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
-pub enum GenericParamDefKind {
- Lifetime,
- Type {
- has_default: bool,
- object_lifetime_default: ObjectLifetimeDefault,
- synthetic: Option<hir::SyntheticTyParamKind>,
- },
- Const,
-}
-
-impl GenericParamDefKind {
- pub fn descr(&self) -> &'static str {
- match self {
- GenericParamDefKind::Lifetime => "lifetime",
- GenericParamDefKind::Type { .. } => "type",
- GenericParamDefKind::Const => "constant",
- }
- }
- pub fn to_ord(&self, tcx: TyCtxt<'_>) -> ast::ParamKindOrd {
- match self {
- GenericParamDefKind::Lifetime => ast::ParamKindOrd::Lifetime,
- GenericParamDefKind::Type { .. } => ast::ParamKindOrd::Type,
- GenericParamDefKind::Const => {
- ast::ParamKindOrd::Const { unordered: tcx.features().const_generics }
- }
- }
- }
-}
-
-#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
-pub struct GenericParamDef {
- pub name: Symbol,
- pub def_id: DefId,
- pub index: u32,
-
- /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute
- /// on generic parameter `'a`/`T`, asserts data behind the parameter
- /// `'a`/`T` won't be accessed during the parent type's `Drop` impl.
- pub pure_wrt_drop: bool,
-
- pub kind: GenericParamDefKind,
-}
-
-impl GenericParamDef {
- pub fn to_early_bound_region_data(&self) -> ty::EarlyBoundRegion {
- if let GenericParamDefKind::Lifetime = self.kind {
- ty::EarlyBoundRegion { def_id: self.def_id, index: self.index, name: self.name }
- } else {
- bug!("cannot convert a non-lifetime parameter def to an early bound region")
- }
- }
-}
-
-#[derive(Default)]
-pub struct GenericParamCount {
- pub lifetimes: usize,
- pub types: usize,
- pub consts: usize,
-}
-
-/// Information about the formal type/lifetime parameters associated
-/// with an item or method. Analogous to `hir::Generics`.
-///
-/// The ordering of parameters is the same as in `Subst` (excluding child generics):
-/// `Self` (optionally), `Lifetime` params..., `Type` params...
-#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
-pub struct Generics {
- pub parent: Option<DefId>,
- pub parent_count: usize,
- pub params: Vec<GenericParamDef>,
-
- /// Reverse map to the `index` field of each `GenericParamDef`.
- #[stable_hasher(ignore)]
- pub param_def_id_to_index: FxHashMap<DefId, u32>,
-
- pub has_self: bool,
- pub has_late_bound_regions: Option<Span>,
-}
-
-impl<'tcx> Generics {
- pub fn count(&self) -> usize {
- self.parent_count + self.params.len()
- }
-
- pub fn own_counts(&self) -> GenericParamCount {
- // We could cache this as a property of `GenericParamCount`, but
- // the aim is to refactor this away entirely eventually and the
- // presence of this method will be a constant reminder.
- let mut own_counts = GenericParamCount::default();
-
- for param in &self.params {
- match param.kind {
- GenericParamDefKind::Lifetime => own_counts.lifetimes += 1,
- GenericParamDefKind::Type { .. } => own_counts.types += 1,
- GenericParamDefKind::Const => own_counts.consts += 1,
- }
- }
-
- own_counts
- }
-
- pub fn own_defaults(&self) -> GenericParamCount {
- let mut own_defaults = GenericParamCount::default();
-
- for param in &self.params {
- match param.kind {
- GenericParamDefKind::Lifetime => (),
- GenericParamDefKind::Type { has_default, .. } => {
- own_defaults.types += has_default as usize;
- }
- GenericParamDefKind::Const => {
- // FIXME(const_generics:defaults)
- }
- }
- }
-
- own_defaults
- }
-
- pub fn requires_monomorphization(&self, tcx: TyCtxt<'tcx>) -> bool {
- if self.own_requires_monomorphization() {
- return true;
- }
-
- if let Some(parent_def_id) = self.parent {
- let parent = tcx.generics_of(parent_def_id);
- parent.requires_monomorphization(tcx)
- } else {
- false
- }
- }
-
- pub fn own_requires_monomorphization(&self) -> bool {
- for param in &self.params {
- match param.kind {
- GenericParamDefKind::Type { .. } | GenericParamDefKind::Const => return true,
- GenericParamDefKind::Lifetime => {}
- }
- }
- false
- }
-
- /// Returns the `GenericParamDef` with the given index.
- pub fn param_at(&'tcx self, param_index: usize, tcx: TyCtxt<'tcx>) -> &'tcx GenericParamDef {
- if let Some(index) = param_index.checked_sub(self.parent_count) {
- &self.params[index]
- } else {
- tcx.generics_of(self.parent.expect("parent_count > 0 but no parent?"))
- .param_at(param_index, tcx)
- }
- }
-
- /// Returns the `GenericParamDef` associated with this `EarlyBoundRegion`.
- pub fn region_param(
- &'tcx self,
- param: &EarlyBoundRegion,
- tcx: TyCtxt<'tcx>,
- ) -> &'tcx GenericParamDef {
- let param = self.param_at(param.index as usize, tcx);
- match param.kind {
- GenericParamDefKind::Lifetime => param,
- _ => bug!("expected lifetime parameter, but found another generic parameter"),
- }
- }
-
- /// Returns the `GenericParamDef` associated with this `ParamTy`.
- pub fn type_param(&'tcx self, param: &ParamTy, tcx: TyCtxt<'tcx>) -> &'tcx GenericParamDef {
- let param = self.param_at(param.index as usize, tcx);
- match param.kind {
- GenericParamDefKind::Type { .. } => param,
- _ => bug!("expected type parameter, but found another generic parameter"),
- }
- }
-
- /// Returns the `GenericParamDef` associated with this `ParamConst`.
- pub fn const_param(&'tcx self, param: &ParamConst, tcx: TyCtxt<'tcx>) -> &GenericParamDef {
- let param = self.param_at(param.index as usize, tcx);
- match param.kind {
- GenericParamDefKind::Const => param,
- _ => bug!("expected const parameter, but found another generic parameter"),
- }
- }
-}
-
-/// Bounds on generics.
-#[derive(Copy, Clone, Default, Debug, TyEncodable, TyDecodable, HashStable)]
-pub struct GenericPredicates<'tcx> {
- pub parent: Option<DefId>,
- pub predicates: &'tcx [(Predicate<'tcx>, Span)],
-}
-
-impl<'tcx> GenericPredicates<'tcx> {
- pub fn instantiate(
- &self,
- tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
- ) -> InstantiatedPredicates<'tcx> {
- let mut instantiated = InstantiatedPredicates::empty();
- self.instantiate_into(tcx, &mut instantiated, substs);
- instantiated
- }
-
- pub fn instantiate_own(
- &self,
- tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
- ) -> InstantiatedPredicates<'tcx> {
- InstantiatedPredicates {
- predicates: self.predicates.iter().map(|(p, _)| p.subst(tcx, substs)).collect(),
- spans: self.predicates.iter().map(|(_, sp)| *sp).collect(),
- }
- }
-
- fn instantiate_into(
- &self,
- tcx: TyCtxt<'tcx>,
- instantiated: &mut InstantiatedPredicates<'tcx>,
- substs: SubstsRef<'tcx>,
- ) {
- if let Some(def_id) = self.parent {
- tcx.predicates_of(def_id).instantiate_into(tcx, instantiated, substs);
- }
- instantiated.predicates.extend(self.predicates.iter().map(|(p, _)| p.subst(tcx, substs)));
- instantiated.spans.extend(self.predicates.iter().map(|(_, sp)| *sp));
- }
-
- pub fn instantiate_identity(&self, tcx: TyCtxt<'tcx>) -> InstantiatedPredicates<'tcx> {
- let mut instantiated = InstantiatedPredicates::empty();
- self.instantiate_identity_into(tcx, &mut instantiated);
- instantiated
- }
-
- fn instantiate_identity_into(
- &self,
- tcx: TyCtxt<'tcx>,
- instantiated: &mut InstantiatedPredicates<'tcx>,
- ) {
- if let Some(def_id) = self.parent {
- tcx.predicates_of(def_id).instantiate_identity_into(tcx, instantiated);
- }
- instantiated.predicates.extend(self.predicates.iter().map(|(p, _)| p));
- instantiated.spans.extend(self.predicates.iter().map(|(_, s)| s));
- }
-}
-
#[derive(Debug)]
crate struct PredicateInner<'tcx> {
kind: Binder<PredicateKind<'tcx>>,
@@ -1032,7 +365,7 @@
outer_exclusive_binder: ty::DebruijnIndex,
}
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(PredicateInner<'_>, 40);
#[derive(Clone, Copy, Lift)]
@@ -1057,6 +390,7 @@
impl<'tcx> Predicate<'tcx> {
/// Gets the inner `Binder<PredicateKind<'tcx>>`.
+ #[inline]
pub fn kind(self) -> Binder<PredicateKind<'tcx>> {
self.inner.kind
}
@@ -1289,8 +623,22 @@
self.skip_binder().projection_ty.item_def_id
}
+ /// Returns the `DefId` of the trait of the associated item being projected.
#[inline]
- pub fn to_poly_trait_ref(&self, tcx: TyCtxt<'tcx>) -> PolyTraitRef<'tcx> {
+ pub fn trait_def_id(&self, tcx: TyCtxt<'tcx>) -> DefId {
+ self.skip_binder().projection_ty.trait_def_id(tcx)
+ }
+
+ #[inline]
+ pub fn projection_self_ty(&self) -> Binder<Ty<'tcx>> {
+ self.map_bound(|predicate| predicate.projection_ty.self_ty())
+ }
+
+ /// Get the [PolyTraitRef] required for this projection to be well formed.
+ /// Note that for generic associated types the predicates of the associated
+ /// type also need to be checked.
+ #[inline]
+ pub fn required_poly_trait_ref(&self, tcx: TyCtxt<'tcx>) -> PolyTraitRef<'tcx> {
// Note: unlike with `TraitRef::to_poly_trait_ref()`,
// `self.0.trait_ref` is permitted to have escaping regions.
// This is because here `self` has a `Binder` and so does our
@@ -1919,32 +1267,6 @@
bitflags! {
#[derive(HashStable)]
- pub struct AdtFlags: u32 {
- const NO_ADT_FLAGS = 0;
- /// Indicates whether the ADT is an enum.
- const IS_ENUM = 1 << 0;
- /// Indicates whether the ADT is a union.
- const IS_UNION = 1 << 1;
- /// Indicates whether the ADT is a struct.
- const IS_STRUCT = 1 << 2;
- /// Indicates whether the ADT is a struct and has a constructor.
- const HAS_CTOR = 1 << 3;
- /// Indicates whether the type is `PhantomData`.
- const IS_PHANTOM_DATA = 1 << 4;
- /// Indicates whether the type has a `#[fundamental]` attribute.
- const IS_FUNDAMENTAL = 1 << 5;
- /// Indicates whether the type is `Box`.
- const IS_BOX = 1 << 6;
- /// Indicates whether the type is `ManuallyDrop`.
- const IS_MANUALLY_DROP = 1 << 7;
- /// Indicates whether the variant list of this ADT is `#[non_exhaustive]`.
- /// (i.e., this flag is never set unless this ADT is an enum).
- const IS_VARIANT_LIST_NON_EXHAUSTIVE = 1 << 8;
- }
-}
-
-bitflags! {
- #[derive(HashStable)]
pub struct VariantFlags: u32 {
const NO_VARIANT_FLAGS = 0;
/// Indicates whether the field list of this variant is `#[non_exhaustive]`.
@@ -2066,105 +1388,6 @@
pub vis: Visibility,
}
-/// The definition of a user-defined type, e.g., a `struct`, `enum`, or `union`.
-///
-/// These are all interned (by `alloc_adt_def`) into the global arena.
-///
-/// The initialism *ADT* stands for an [*algebraic data type (ADT)*][adt].
-/// This is slightly wrong because `union`s are not ADTs.
-/// Moreover, Rust only allows recursive data types through indirection.
-///
-/// [adt]: https://en.wikipedia.org/wiki/Algebraic_data_type
-pub struct AdtDef {
- /// The `DefId` of the struct, enum or union item.
- pub did: DefId,
- /// Variants of the ADT. If this is a struct or union, then there will be a single variant.
- pub variants: IndexVec<VariantIdx, VariantDef>,
- /// Flags of the ADT (e.g., is this a struct? is this non-exhaustive?).
- flags: AdtFlags,
- /// Repr options provided by the user.
- pub repr: ReprOptions,
-}
-
-impl PartialOrd for AdtDef {
- fn partial_cmp(&self, other: &AdtDef) -> Option<Ordering> {
- Some(self.cmp(&other))
- }
-}
-
-/// There should be only one AdtDef for each `did`, therefore
-/// it is fine to implement `Ord` only based on `did`.
-impl Ord for AdtDef {
- fn cmp(&self, other: &AdtDef) -> Ordering {
- self.did.cmp(&other.did)
- }
-}
-
-impl PartialEq for AdtDef {
- // `AdtDef`s are always interned, and this is part of `TyS` equality.
- #[inline]
- fn eq(&self, other: &Self) -> bool {
- ptr::eq(self, other)
- }
-}
-
-impl Eq for AdtDef {}
-
-impl Hash for AdtDef {
- #[inline]
- fn hash<H: Hasher>(&self, s: &mut H) {
- (self as *const AdtDef).hash(s)
- }
-}
-
-impl<S: Encoder> Encodable<S> for AdtDef {
- fn encode(&self, s: &mut S) -> Result<(), S::Error> {
- self.did.encode(s)
- }
-}
-
-impl<'a> HashStable<StableHashingContext<'a>> for AdtDef {
- fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
- thread_local! {
- static CACHE: RefCell<FxHashMap<usize, Fingerprint>> = Default::default();
- }
-
- let hash: Fingerprint = CACHE.with(|cache| {
- let addr = self as *const AdtDef as usize;
- *cache.borrow_mut().entry(addr).or_insert_with(|| {
- let ty::AdtDef { did, ref variants, ref flags, ref repr } = *self;
-
- let mut hasher = StableHasher::new();
- did.hash_stable(hcx, &mut hasher);
- variants.hash_stable(hcx, &mut hasher);
- flags.hash_stable(hcx, &mut hasher);
- repr.hash_stable(hcx, &mut hasher);
-
- hasher.finish()
- })
- });
-
- hash.hash_stable(hcx, hasher);
- }
-}
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
-pub enum AdtKind {
- Struct,
- Union,
- Enum,
-}
-
-impl Into<DataTypeKind> for AdtKind {
- fn into(self) -> DataTypeKind {
- match self {
- AdtKind::Struct => DataTypeKind::Struct,
- AdtKind::Union => DataTypeKind::Union,
- AdtKind::Enum => DataTypeKind::Enum,
- }
- }
-}
-
bitflags! {
#[derive(TyEncodable, TyDecodable, Default, HashStable)]
pub struct ReprFlags: u8 {
@@ -2287,334 +1510,6 @@
}
}
-impl<'tcx> AdtDef {
- /// Creates a new `AdtDef`.
- fn new(
- tcx: TyCtxt<'_>,
- did: DefId,
- kind: AdtKind,
- variants: IndexVec<VariantIdx, VariantDef>,
- repr: ReprOptions,
- ) -> Self {
- debug!("AdtDef::new({:?}, {:?}, {:?}, {:?})", did, kind, variants, repr);
- let mut flags = AdtFlags::NO_ADT_FLAGS;
-
- if kind == AdtKind::Enum && tcx.has_attr(did, sym::non_exhaustive) {
- debug!("found non-exhaustive variant list for {:?}", did);
- flags = flags | AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE;
- }
-
- flags |= match kind {
- AdtKind::Enum => AdtFlags::IS_ENUM,
- AdtKind::Union => AdtFlags::IS_UNION,
- AdtKind::Struct => AdtFlags::IS_STRUCT,
- };
-
- if kind == AdtKind::Struct && variants[VariantIdx::new(0)].ctor_def_id.is_some() {
- flags |= AdtFlags::HAS_CTOR;
- }
-
- let attrs = tcx.get_attrs(did);
- if tcx.sess.contains_name(&attrs, sym::fundamental) {
- flags |= AdtFlags::IS_FUNDAMENTAL;
- }
- if Some(did) == tcx.lang_items().phantom_data() {
- flags |= AdtFlags::IS_PHANTOM_DATA;
- }
- if Some(did) == tcx.lang_items().owned_box() {
- flags |= AdtFlags::IS_BOX;
- }
- if Some(did) == tcx.lang_items().manually_drop() {
- flags |= AdtFlags::IS_MANUALLY_DROP;
- }
-
- AdtDef { did, variants, flags, repr }
- }
-
- /// Returns `true` if this is a struct.
- #[inline]
- pub fn is_struct(&self) -> bool {
- self.flags.contains(AdtFlags::IS_STRUCT)
- }
-
- /// Returns `true` if this is a union.
- #[inline]
- pub fn is_union(&self) -> bool {
- self.flags.contains(AdtFlags::IS_UNION)
- }
-
- /// Returns `true` if this is a enum.
- #[inline]
- pub fn is_enum(&self) -> bool {
- self.flags.contains(AdtFlags::IS_ENUM)
- }
-
- /// Returns `true` if the variant list of this ADT is `#[non_exhaustive]`.
- #[inline]
- pub fn is_variant_list_non_exhaustive(&self) -> bool {
- self.flags.contains(AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE)
- }
-
- /// Returns the kind of the ADT.
- #[inline]
- pub fn adt_kind(&self) -> AdtKind {
- if self.is_enum() {
- AdtKind::Enum
- } else if self.is_union() {
- AdtKind::Union
- } else {
- AdtKind::Struct
- }
- }
-
- /// Returns a description of this abstract data type.
- pub fn descr(&self) -> &'static str {
- match self.adt_kind() {
- AdtKind::Struct => "struct",
- AdtKind::Union => "union",
- AdtKind::Enum => "enum",
- }
- }
-
- /// Returns a description of a variant of this abstract data type.
- #[inline]
- pub fn variant_descr(&self) -> &'static str {
- match self.adt_kind() {
- AdtKind::Struct => "struct",
- AdtKind::Union => "union",
- AdtKind::Enum => "variant",
- }
- }
-
- /// If this function returns `true`, it implies that `is_struct` must return `true`.
- #[inline]
- pub fn has_ctor(&self) -> bool {
- self.flags.contains(AdtFlags::HAS_CTOR)
- }
-
- /// Returns `true` if this type is `#[fundamental]` for the purposes
- /// of coherence checking.
- #[inline]
- pub fn is_fundamental(&self) -> bool {
- self.flags.contains(AdtFlags::IS_FUNDAMENTAL)
- }
-
- /// Returns `true` if this is `PhantomData<T>`.
- #[inline]
- pub fn is_phantom_data(&self) -> bool {
- self.flags.contains(AdtFlags::IS_PHANTOM_DATA)
- }
-
- /// Returns `true` if this is Box<T>.
- #[inline]
- pub fn is_box(&self) -> bool {
- self.flags.contains(AdtFlags::IS_BOX)
- }
-
- /// Returns `true` if this is `ManuallyDrop<T>`.
- #[inline]
- pub fn is_manually_drop(&self) -> bool {
- self.flags.contains(AdtFlags::IS_MANUALLY_DROP)
- }
-
- /// Returns `true` if this type has a destructor.
- pub fn has_dtor(&self, tcx: TyCtxt<'tcx>) -> bool {
- self.destructor(tcx).is_some()
- }
-
- /// Asserts this is a struct or union and returns its unique variant.
- pub fn non_enum_variant(&self) -> &VariantDef {
- assert!(self.is_struct() || self.is_union());
- &self.variants[VariantIdx::new(0)]
- }
-
- #[inline]
- pub fn predicates(&self, tcx: TyCtxt<'tcx>) -> GenericPredicates<'tcx> {
- tcx.predicates_of(self.did)
- }
-
- /// Returns an iterator over all fields contained
- /// by this ADT.
- #[inline]
- pub fn all_fields(&self) -> impl Iterator<Item = &FieldDef> + Clone {
- self.variants.iter().flat_map(|v| v.fields.iter())
- }
-
- /// Whether the ADT lacks fields. Note that this includes uninhabited enums,
- /// e.g., `enum Void {}` is considered payload free as well.
- pub fn is_payloadfree(&self) -> bool {
- self.variants.iter().all(|v| v.fields.is_empty())
- }
-
- /// Return a `VariantDef` given a variant id.
- pub fn variant_with_id(&self, vid: DefId) -> &VariantDef {
- self.variants.iter().find(|v| v.def_id == vid).expect("variant_with_id: unknown variant")
- }
-
- /// Return a `VariantDef` given a constructor id.
- pub fn variant_with_ctor_id(&self, cid: DefId) -> &VariantDef {
- self.variants
- .iter()
- .find(|v| v.ctor_def_id == Some(cid))
- .expect("variant_with_ctor_id: unknown variant")
- }
-
- /// Return the index of `VariantDef` given a variant id.
- pub fn variant_index_with_id(&self, vid: DefId) -> VariantIdx {
- self.variants
- .iter_enumerated()
- .find(|(_, v)| v.def_id == vid)
- .expect("variant_index_with_id: unknown variant")
- .0
- }
-
- /// Return the index of `VariantDef` given a constructor id.
- pub fn variant_index_with_ctor_id(&self, cid: DefId) -> VariantIdx {
- self.variants
- .iter_enumerated()
- .find(|(_, v)| v.ctor_def_id == Some(cid))
- .expect("variant_index_with_ctor_id: unknown variant")
- .0
- }
-
- pub fn variant_of_res(&self, res: Res) -> &VariantDef {
- match res {
- Res::Def(DefKind::Variant, vid) => self.variant_with_id(vid),
- Res::Def(DefKind::Ctor(..), cid) => self.variant_with_ctor_id(cid),
- Res::Def(DefKind::Struct, _)
- | Res::Def(DefKind::Union, _)
- | Res::Def(DefKind::TyAlias, _)
- | Res::Def(DefKind::AssocTy, _)
- | Res::SelfTy(..)
- | Res::SelfCtor(..) => self.non_enum_variant(),
- _ => bug!("unexpected res {:?} in variant_of_res", res),
- }
- }
-
- #[inline]
- pub fn eval_explicit_discr(&self, tcx: TyCtxt<'tcx>, expr_did: DefId) -> Option<Discr<'tcx>> {
- assert!(self.is_enum());
- let param_env = tcx.param_env(expr_did);
- let repr_type = self.repr.discr_type();
- match tcx.const_eval_poly(expr_did) {
- Ok(val) => {
- let ty = repr_type.to_ty(tcx);
- if let Some(b) = val.try_to_bits_for_ty(tcx, param_env, ty) {
- trace!("discriminants: {} ({:?})", b, repr_type);
- Some(Discr { val: b, ty })
- } else {
- info!("invalid enum discriminant: {:#?}", val);
- crate::mir::interpret::struct_error(
- tcx.at(tcx.def_span(expr_did)),
- "constant evaluation of enum discriminant resulted in non-integer",
- )
- .emit();
- None
- }
- }
- Err(err) => {
- let msg = match err {
- ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
- "enum discriminant evaluation failed"
- }
- ErrorHandled::TooGeneric => "enum discriminant depends on generics",
- };
- tcx.sess.delay_span_bug(tcx.def_span(expr_did), msg);
- None
- }
- }
- }
-
- #[inline]
- pub fn discriminants(
- &'tcx self,
- tcx: TyCtxt<'tcx>,
- ) -> impl Iterator<Item = (VariantIdx, Discr<'tcx>)> + Captures<'tcx> {
- assert!(self.is_enum());
- let repr_type = self.repr.discr_type();
- let initial = repr_type.initial_discriminant(tcx);
- let mut prev_discr = None::<Discr<'tcx>>;
- self.variants.iter_enumerated().map(move |(i, v)| {
- let mut discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx));
- if let VariantDiscr::Explicit(expr_did) = v.discr {
- if let Some(new_discr) = self.eval_explicit_discr(tcx, expr_did) {
- discr = new_discr;
- }
- }
- prev_discr = Some(discr);
-
- (i, discr)
- })
- }
-
- #[inline]
- pub fn variant_range(&self) -> Range<VariantIdx> {
- VariantIdx::new(0)..VariantIdx::new(self.variants.len())
- }
-
- /// Computes the discriminant value used by a specific variant.
- /// Unlike `discriminants`, this is (amortized) constant-time,
- /// only doing at most one query for evaluating an explicit
- /// discriminant (the last one before the requested variant),
- /// assuming there are no constant-evaluation errors there.
- #[inline]
- pub fn discriminant_for_variant(
- &self,
- tcx: TyCtxt<'tcx>,
- variant_index: VariantIdx,
- ) -> Discr<'tcx> {
- assert!(self.is_enum());
- let (val, offset) = self.discriminant_def_for_variant(variant_index);
- let explicit_value = val
- .and_then(|expr_did| self.eval_explicit_discr(tcx, expr_did))
- .unwrap_or_else(|| self.repr.discr_type().initial_discriminant(tcx));
- explicit_value.checked_add(tcx, offset as u128).0
- }
-
- /// Yields a `DefId` for the discriminant and an offset to add to it
- /// Alternatively, if there is no explicit discriminant, returns the
- /// inferred discriminant directly.
- pub fn discriminant_def_for_variant(&self, variant_index: VariantIdx) -> (Option<DefId>, u32) {
- assert!(!self.variants.is_empty());
- let mut explicit_index = variant_index.as_u32();
- let expr_did;
- loop {
- match self.variants[VariantIdx::from_u32(explicit_index)].discr {
- ty::VariantDiscr::Relative(0) => {
- expr_did = None;
- break;
- }
- ty::VariantDiscr::Relative(distance) => {
- explicit_index -= distance;
- }
- ty::VariantDiscr::Explicit(did) => {
- expr_did = Some(did);
- break;
- }
- }
- }
- (expr_did, variant_index.as_u32() - explicit_index)
- }
-
- pub fn destructor(&self, tcx: TyCtxt<'tcx>) -> Option<Destructor> {
- tcx.adt_destructor(self.did)
- }
-
- /// Returns a list of types such that `Self: Sized` if and only
- /// if that type is `Sized`, or `TyErr` if this type is recursive.
- ///
- /// Oddly enough, checking that the sized-constraint is `Sized` is
- /// actually more expressive than checking all members:
- /// the `Sized` trait is inductive, so an associated type that references
- /// `Self` would prevent its containing ADT from being `Sized`.
- ///
- /// Due to normalization being eager, this applies even if
- /// the associated type is behind a pointer (e.g., issue #31299).
- pub fn sized_constraint(&self, tcx: TyCtxt<'tcx>) -> &'tcx [Ty<'tcx>] {
- tcx.adt_sized_constraint(self.did).0
- }
-}
-
impl<'tcx> FieldDef {
/// Returns the type of this field. The `subst` is typically obtained
/// via the second field of `TyKind::AdtDef`.
@@ -2623,93 +1518,6 @@
}
}
-/// Represents the various closure traits in the language. This
-/// will determine the type of the environment (`self`, in the
-/// desugaring) argument that the closure expects.
-///
-/// You can get the environment type of a closure using
-/// `tcx.closure_env_ty()`.
-#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
-#[derive(HashStable)]
-pub enum ClosureKind {
- // Warning: Ordering is significant here! The ordering is chosen
- // because the trait Fn is a subtrait of FnMut and so in turn, and
- // hence we order it so that Fn < FnMut < FnOnce.
- Fn,
- FnMut,
- FnOnce,
-}
-
-impl<'tcx> ClosureKind {
- // This is the initial value used when doing upvar inference.
- pub const LATTICE_BOTTOM: ClosureKind = ClosureKind::Fn;
-
- pub fn trait_did(&self, tcx: TyCtxt<'tcx>) -> DefId {
- match *self {
- ClosureKind::Fn => tcx.require_lang_item(LangItem::Fn, None),
- ClosureKind::FnMut => tcx.require_lang_item(LangItem::FnMut, None),
- ClosureKind::FnOnce => tcx.require_lang_item(LangItem::FnOnce, None),
- }
- }
-
- /// Returns `true` if a type that impls this closure kind
- /// must also implement `other`.
- pub fn extends(self, other: ty::ClosureKind) -> bool {
- matches!(
- (self, other),
- (ClosureKind::Fn, ClosureKind::Fn)
- | (ClosureKind::Fn, ClosureKind::FnMut)
- | (ClosureKind::Fn, ClosureKind::FnOnce)
- | (ClosureKind::FnMut, ClosureKind::FnMut)
- | (ClosureKind::FnMut, ClosureKind::FnOnce)
- | (ClosureKind::FnOnce, ClosureKind::FnOnce)
- )
- }
-
- /// Returns the representative scalar type for this closure kind.
- /// See `TyS::to_opt_closure_kind` for more details.
- pub fn to_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
- match self {
- ty::ClosureKind::Fn => tcx.types.i8,
- ty::ClosureKind::FnMut => tcx.types.i16,
- ty::ClosureKind::FnOnce => tcx.types.i32,
- }
- }
-}
-
-impl BorrowKind {
- pub fn from_mutbl(m: hir::Mutability) -> BorrowKind {
- match m {
- hir::Mutability::Mut => MutBorrow,
- hir::Mutability::Not => ImmBorrow,
- }
- }
-
- /// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow
- /// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a
- /// mutability that is stronger than necessary so that it at least *would permit* the borrow in
- /// question.
- pub fn to_mutbl_lossy(self) -> hir::Mutability {
- match self {
- MutBorrow => hir::Mutability::Mut,
- ImmBorrow => hir::Mutability::Not,
-
- // We have no type corresponding to a unique imm borrow, so
- // use `&mut`. It gives all the capabilities of an `&uniq`
- // and hence is a safe "over approximation".
- UniqueImmBorrow => hir::Mutability::Mut,
- }
- }
-
- pub fn to_user_str(&self) -> &'static str {
- match *self {
- MutBorrow => "mutable",
- ImmBorrow => "immutable",
- UniqueImmBorrow => "uniquely immutable",
- }
- }
-}
-
pub type Attributes<'tcx> = &'tcx [ast::Attribute];
#[derive(Debug, PartialEq, Eq)]
@@ -2950,7 +1758,10 @@
| DefKind::AnonConst => self.mir_for_ctfe_opt_const_arg(def),
// If the caller wants `mir_for_ctfe` of a function they should not be using
// `instance_mir`, so we'll assume const fn also wants the optimized version.
- _ => self.optimized_mir_or_const_arg_mir(def),
+ _ => {
+ assert_eq!(def.const_param_did, None);
+ self.optimized_mir(def.did)
+ }
},
ty::InstanceDef::VtableShim(..)
| ty::InstanceDef::ReifyShim(..)
@@ -2985,7 +1796,7 @@
/// Returns layout of a generator. Layout might be unavailable if the
/// generator is tainted by errors.
pub fn generator_layout(self, def_id: DefId) -> Option<&'tcx GeneratorLayout<'tcx>> {
- self.optimized_mir(def_id).generator_layout.as_ref()
+ self.optimized_mir(def_id).generator_layout()
}
/// Given the `DefId` of an impl, returns the `DefId` of the trait it implements.
@@ -3064,9 +1875,6 @@
}
}
-#[derive(Clone, HashStable, Debug)]
-pub struct AdtSizedConstraint<'tcx>(pub &'tcx [Ty<'tcx>]);
-
/// Yields the parent function's `DefId` if `def_id` is an `impl Trait` definition.
pub fn is_impl_trait_defn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<DefId> {
if let Some(def_id) = def_id.as_local() {
diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs
index 4937fdd..7946d17 100644
--- a/compiler/rustc_middle/src/ty/print/pretty.rs
+++ b/compiler/rustc_middle/src/ty/print/pretty.rs
@@ -607,7 +607,7 @@
return Ok(self);
}
- return Ok(with_no_queries(|| {
+ return with_no_queries(|| {
let def_key = self.tcx().def_key(def_id);
if let Some(name) = def_key.disambiguated_data.data.get_opt_name() {
p!(write("{}", name));
@@ -649,7 +649,7 @@
p!(" Sized");
}
Ok(self)
- })?);
+ });
}
ty::Str => p!("str"),
ty::Generator(did, substs, movability) => {
@@ -956,32 +956,40 @@
}
fn pretty_print_const_scalar(
- mut self,
+ self,
scalar: Scalar,
ty: Ty<'tcx>,
print_ty: bool,
) -> Result<Self::Const, Self::Error> {
+ match scalar {
+ Scalar::Ptr(ptr) => self.pretty_print_const_scalar_ptr(ptr, ty, print_ty),
+ Scalar::Int(int) => self.pretty_print_const_scalar_int(int, ty, print_ty),
+ }
+ }
+
+ fn pretty_print_const_scalar_ptr(
+ mut self,
+ ptr: Pointer,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
define_scoped_cx!(self);
- match (scalar, &ty.kind()) {
+ match ty.kind() {
// Byte strings (&[u8; N])
- (
- Scalar::Ptr(ptr),
- ty::Ref(
- _,
- ty::TyS {
- kind:
- ty::Array(
- ty::TyS { kind: ty::Uint(ty::UintTy::U8), .. },
- ty::Const {
- val: ty::ConstKind::Value(ConstValue::Scalar(int)),
- ..
- },
- ),
- ..
- },
- _,
- ),
+ ty::Ref(
+ _,
+ ty::TyS {
+ kind:
+ ty::Array(
+ ty::TyS { kind: ty::Uint(ty::UintTy::U8), .. },
+ ty::Const {
+ val: ty::ConstKind::Value(ConstValue::Scalar(int)), ..
+ },
+ ),
+ ..
+ },
+ _,
) => match self.tcx().get_global_alloc(ptr.alloc_id) {
Some(GlobalAlloc::Memory(alloc)) => {
let bytes = int.assert_bits(self.tcx().data_layout.pointer_size);
@@ -997,28 +1005,59 @@
Some(GlobalAlloc::Function(_)) => p!("<function>"),
None => p!("<dangling pointer>"),
},
+ ty::FnPtr(_) => {
+ // FIXME: We should probably have a helper method to share code with the "Byte strings"
+ // printing above (which also has to handle pointers to all sorts of things).
+ match self.tcx().get_global_alloc(ptr.alloc_id) {
+ Some(GlobalAlloc::Function(instance)) => {
+ self = self.typed_value(
+ |this| this.print_value_path(instance.def_id(), instance.substs),
+ |this| this.print_type(ty),
+ " as ",
+ )?;
+ }
+ _ => self = self.pretty_print_const_pointer(ptr, ty, print_ty)?,
+ }
+ }
+ // Any pointer values not covered by a branch above
+ _ => {
+ self = self.pretty_print_const_pointer(ptr, ty, print_ty)?;
+ }
+ }
+ Ok(self)
+ }
+
+ fn pretty_print_const_scalar_int(
+ mut self,
+ int: ScalarInt,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ define_scoped_cx!(self);
+
+ match ty.kind() {
// Bool
- (Scalar::Int(int), ty::Bool) if int == ScalarInt::FALSE => p!("false"),
- (Scalar::Int(int), ty::Bool) if int == ScalarInt::TRUE => p!("true"),
+ ty::Bool if int == ScalarInt::FALSE => p!("false"),
+ ty::Bool if int == ScalarInt::TRUE => p!("true"),
// Float
- (Scalar::Int(int), ty::Float(ty::FloatTy::F32)) => {
+ ty::Float(ty::FloatTy::F32) => {
p!(write("{}f32", Single::try_from(int).unwrap()))
}
- (Scalar::Int(int), ty::Float(ty::FloatTy::F64)) => {
+ ty::Float(ty::FloatTy::F64) => {
p!(write("{}f64", Double::try_from(int).unwrap()))
}
// Int
- (Scalar::Int(int), ty::Uint(_) | ty::Int(_)) => {
+ ty::Uint(_) | ty::Int(_) => {
let int =
ConstInt::new(int, matches!(ty.kind(), ty::Int(_)), ty.is_ptr_sized_integral());
if print_ty { p!(write("{:#?}", int)) } else { p!(write("{:?}", int)) }
}
// Char
- (Scalar::Int(int), ty::Char) if char::try_from(int).is_ok() => {
+ ty::Char if char::try_from(int).is_ok() => {
p!(write("{:?}", char::try_from(int).unwrap()))
}
// Raw pointers
- (Scalar::Int(int), ty::RawPtr(_)) => {
+ ty::RawPtr(_) | ty::FnPtr(_) => {
let data = int.assert_bits(self.tcx().data_layout.pointer_size);
self = self.typed_value(
|mut this| {
@@ -1029,23 +1068,12 @@
" as ",
)?;
}
- (Scalar::Ptr(ptr), ty::FnPtr(_)) => {
- // FIXME: this can ICE when the ptr is dangling or points to a non-function.
- // We should probably have a helper method to share code with the "Byte strings"
- // printing above (which also has to handle pointers to all sorts of things).
- let instance = self.tcx().global_alloc(ptr.alloc_id).unwrap_fn();
- self = self.typed_value(
- |this| this.print_value_path(instance.def_id(), instance.substs),
- |this| this.print_type(ty),
- " as ",
- )?;
- }
// For function type zsts just printing the path is enough
- (Scalar::Int(int), ty::FnDef(d, s)) if int == ScalarInt::ZST => {
+ ty::FnDef(d, s) if int == ScalarInt::ZST => {
p!(print_value_path(*d, s))
}
// Nontrivial types with scalar bit representation
- (Scalar::Int(int), _) => {
+ _ => {
let print = |mut this: Self| {
if int.size() == Size::ZERO {
write!(this, "transmute(())")?;
@@ -1060,10 +1088,6 @@
print(self)?
};
}
- // Any pointer values not covered by a branch above
- (Scalar::Ptr(p), _) => {
- self = self.pretty_print_const_pointer(p, ty, print_ty)?;
- }
}
Ok(self)
}
@@ -2107,11 +2131,9 @@
continue;
}
- if let Some(local_def_id) = hir.definitions().opt_hir_id_to_local_def_id(item.hir_id) {
- let def_id = local_def_id.to_def_id();
- let ns = tcx.def_kind(def_id).ns().unwrap_or(Namespace::TypeNS);
- collect_fn(&item.ident, ns, def_id);
- }
+ let def_id = item.def_id.to_def_id();
+ let ns = tcx.def_kind(def_id).ns().unwrap_or(Namespace::TypeNS);
+ collect_fn(&item.ident, ns, def_id);
}
// Now take care of extern crate items.
@@ -2143,6 +2165,7 @@
match child.res {
def::Res::Def(DefKind::AssocTy, _) => {}
+ def::Res::Def(DefKind::TyAlias, _) => {}
def::Res::Def(defkind, def_id) => {
if let Some(ns) = defkind.ns() {
collect_fn(&child.ident, ns, def_id);
diff --git a/compiler/rustc_middle/src/ty/query/job.rs b/compiler/rustc_middle/src/ty/query/job.rs
deleted file mode 100644
index bd2e774..0000000
--- a/compiler/rustc_middle/src/ty/query/job.rs
+++ /dev/null
@@ -1,26 +0,0 @@
-use crate::ty::tls;
-
-use rustc_query_system::query::deadlock;
-use rustc_rayon_core as rayon_core;
-use std::thread;
-
-/// Creates a new thread and forwards information in thread locals to it.
-/// The new thread runs the deadlock handler.
-/// Must only be called when a deadlock is about to happen.
-pub unsafe fn handle_deadlock() {
- let registry = rayon_core::Registry::current();
-
- let context = tls::get_tlv();
- assert!(context != 0);
- rustc_data_structures::sync::assert_sync::<tls::ImplicitCtxt<'_, '_>>();
- let icx: &tls::ImplicitCtxt<'_, '_> = &*(context as *const tls::ImplicitCtxt<'_, '_>);
-
- let session_globals = rustc_span::SESSION_GLOBALS.with(|sg| sg as *const _);
- let session_globals = &*session_globals;
- thread::spawn(move || {
- tls::enter_context(icx, |_| {
- rustc_span::SESSION_GLOBALS
- .set(session_globals, || tls::with(|tcx| deadlock(tcx, ®istry)))
- })
- });
-}
diff --git a/compiler/rustc_middle/src/ty/query/mod.rs b/compiler/rustc_middle/src/ty/query/mod.rs
index f580cb1..48e777f 100644
--- a/compiler/rustc_middle/src/ty/query/mod.rs
+++ b/compiler/rustc_middle/src/ty/query/mod.rs
@@ -14,8 +14,8 @@
use crate::middle::stability::{self, DeprecationEntry};
use crate::mir;
use crate::mir::interpret::GlobalId;
+use crate::mir::interpret::{ConstAlloc, LitToConstError, LitToConstInput};
use crate::mir::interpret::{ConstValue, EvalToAllocationRawResult, EvalToConstValueResult};
-use crate::mir::interpret::{LitToConstError, LitToConstInput};
use crate::mir::mono::CodegenUnit;
use crate::traits::query::{
CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal,
@@ -31,19 +31,19 @@
use crate::ty::subst::{GenericArg, SubstsRef};
use crate::ty::util::AlwaysRequiresDrop;
use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
-use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
use rustc_data_structures::stable_hasher::StableVec;
use rustc_data_structures::steal::Steal;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::Lrc;
-use rustc_errors::ErrorReported;
+use rustc_errors::{ErrorReported, Handler};
use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId};
use rustc_hir::lang_items::{LangItem, LanguageItems};
use rustc_hir::{Crate, ItemLocalId, TraitCandidate};
use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec};
+use rustc_serialize::opaque;
use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion};
use rustc_session::utils::NativeLibKind;
use rustc_session::CrateDisambiguator;
@@ -53,41 +53,216 @@
use rustc_attr as attr;
use rustc_span::symbol::Symbol;
use rustc_span::{Span, DUMMY_SP};
-use std::borrow::Cow;
use std::collections::BTreeMap;
use std::ops::Deref;
use std::path::PathBuf;
use std::sync::Arc;
-#[macro_use]
-mod plumbing;
-pub(crate) use rustc_query_system::query::CycleError;
+pub(crate) use rustc_query_system::query::QueryJobId;
use rustc_query_system::query::*;
-mod stats;
-pub use self::stats::print_stats;
-
-#[cfg(parallel_compiler)]
-mod job;
-#[cfg(parallel_compiler)]
-pub use self::job::handle_deadlock;
-pub use rustc_query_system::query::{QueryInfo, QueryJob, QueryJobId};
-
-mod keys;
-use self::keys::Key;
-
-mod values;
-use self::values::Value;
-
-use rustc_query_system::query::QueryAccessors;
-pub use rustc_query_system::query::QueryConfig;
-pub(crate) use rustc_query_system::query::QueryDescription;
-
-mod on_disk_cache;
+pub mod on_disk_cache;
pub use self::on_disk_cache::OnDiskCache;
-mod profiling_support;
-pub use self::profiling_support::{IntoSelfProfilingString, QueryKeyStringBuilder};
+#[derive(Copy, Clone)]
+pub struct TyCtxtAt<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+ pub span: Span,
+}
+
+impl Deref for TyCtxtAt<'tcx> {
+ type Target = TyCtxt<'tcx>;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ &self.tcx
+ }
+}
+
+#[derive(Copy, Clone)]
+pub struct TyCtxtEnsure<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+}
+
+impl TyCtxt<'tcx> {
+ /// Returns a transparent wrapper for `TyCtxt`, which ensures queries
+ /// are executed instead of just returning their results.
+ #[inline(always)]
+ pub fn ensure(self) -> TyCtxtEnsure<'tcx> {
+ TyCtxtEnsure { tcx: self }
+ }
+
+ /// Returns a transparent wrapper for `TyCtxt` which uses
+ /// `span` as the location of queries performed through it.
+ #[inline(always)]
+ pub fn at(self, span: Span) -> TyCtxtAt<'tcx> {
+ TyCtxtAt { tcx: self, span }
+ }
+
+ pub fn try_mark_green(self, dep_node: &dep_graph::DepNode) -> bool {
+ self.queries.try_mark_green(self, dep_node)
+ }
+}
+
+macro_rules! query_helper_param_ty {
+ (DefId) => { impl IntoQueryParam<DefId> };
+ ($K:ty) => { $K };
+}
+
+macro_rules! query_storage {
+ ([][$K:ty, $V:ty]) => {
+ <DefaultCacheSelector as CacheSelector<$K, $V>>::Cache
+ };
+ ([storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => {
+ <$ty as CacheSelector<$K, $V>>::Cache
+ };
+ ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
+ query_storage!([$($($modifiers)*)*][$($args)*])
+ };
+}
+
+macro_rules! define_callbacks {
+ (<$tcx:tt>
+ $($(#[$attr:meta])*
+ [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
+
+ // HACK(eddyb) this is like the `impl QueryConfig for queries::$name`
+ // below, but using type aliases instead of associated types, to bypass
+ // the limitations around normalizing under HRTB - for example, this:
+ // `for<'tcx> fn(...) -> <queries::$name<'tcx> as QueryConfig<TyCtxt<'tcx>>>::Value`
+ // doesn't currently normalize to `for<'tcx> fn(...) -> query_values::$name<'tcx>`.
+ // This is primarily used by the `provide!` macro in `rustc_metadata`.
+ #[allow(nonstandard_style, unused_lifetimes)]
+ pub mod query_keys {
+ use super::*;
+
+ $(pub type $name<$tcx> = $($K)*;)*
+ }
+ #[allow(nonstandard_style, unused_lifetimes)]
+ pub mod query_values {
+ use super::*;
+
+ $(pub type $name<$tcx> = $V;)*
+ }
+ #[allow(nonstandard_style, unused_lifetimes)]
+ pub mod query_storage {
+ use super::*;
+
+ $(pub type $name<$tcx> = query_storage!([$($modifiers)*][$($K)*, $V]);)*
+ }
+ #[allow(nonstandard_style, unused_lifetimes)]
+ pub mod query_stored {
+ use super::*;
+
+ $(pub type $name<$tcx> = <query_storage::$name<$tcx> as QueryStorage>::Stored;)*
+ }
+
+ #[derive(Default)]
+ pub struct QueryCaches<$tcx> {
+ $($(#[$attr])* pub $name: QueryCacheStore<query_storage::$name<$tcx>>,)*
+ }
+
+ impl TyCtxtEnsure<$tcx> {
+ $($(#[$attr])*
+ #[inline(always)]
+ pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
+ let key = key.into_query_param();
+ let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, |_| {});
+
+ let lookup = match cached {
+ Ok(()) => return,
+ Err(lookup) => lookup,
+ };
+
+ self.tcx.queries.$name(self.tcx, DUMMY_SP, key, lookup, QueryMode::Ensure);
+ })*
+ }
+
+ impl TyCtxt<$tcx> {
+ $($(#[$attr])*
+ #[inline(always)]
+ #[must_use]
+ pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
+ {
+ self.at(DUMMY_SP).$name(key)
+ })*
+ }
+
+ impl TyCtxtAt<$tcx> {
+ $($(#[$attr])*
+ #[inline(always)]
+ pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
+ {
+ let key = key.into_query_param();
+ let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, |value| {
+ value.clone()
+ });
+
+ let lookup = match cached {
+ Ok(value) => return value,
+ Err(lookup) => lookup,
+ };
+
+ self.tcx.queries.$name(self.tcx, self.span, key, lookup, QueryMode::Get).unwrap()
+ })*
+ }
+
+ pub struct Providers {
+ $(pub $name: for<'tcx> fn(
+ TyCtxt<'tcx>,
+ query_keys::$name<'tcx>,
+ ) -> query_values::$name<'tcx>,)*
+ }
+
+ impl Default for Providers {
+ fn default() -> Self {
+ Providers {
+ $($name: |_, key| bug!(
+ "`tcx.{}({:?})` unsupported by its crate",
+ stringify!($name), key
+ ),)*
+ }
+ }
+ }
+
+ impl Copy for Providers {}
+ impl Clone for Providers {
+ fn clone(&self) -> Self { *self }
+ }
+
+ pub trait QueryEngine<'tcx>: rustc_data_structures::sync::Sync {
+ unsafe fn deadlock(&'tcx self, tcx: TyCtxt<'tcx>, registry: &rustc_rayon_core::Registry);
+
+ fn encode_query_results(
+ &'tcx self,
+ tcx: TyCtxt<'tcx>,
+ encoder: &mut on_disk_cache::CacheEncoder<'a, 'tcx, opaque::FileEncoder>,
+ query_result_index: &mut on_disk_cache::EncodedQueryResultIndex,
+ ) -> opaque::FileEncodeResult;
+
+ fn exec_cache_promotions(&'tcx self, tcx: TyCtxt<'tcx>);
+
+ fn try_mark_green(&'tcx self, tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool;
+
+ fn try_print_query_stack(
+ &'tcx self,
+ tcx: TyCtxt<'tcx>,
+ query: Option<QueryJobId<dep_graph::DepKind>>,
+ handler: &Handler,
+ num_frames: Option<usize>,
+ ) -> usize;
+
+ $($(#[$attr])*
+ fn $name(
+ &'tcx self,
+ tcx: TyCtxt<$tcx>,
+ span: Span,
+ key: query_keys::$name<$tcx>,
+ lookup: QueryLookup,
+ mode: QueryMode,
+ ) -> Option<query_stored::$name<$tcx>>;)*
+ }
+ };
+}
// Each of these queries corresponds to a function pointer field in the
// `Providers` struct for requesting a value of that type, and a method
@@ -101,7 +276,7 @@
// Queries marked with `fatal_cycle` do not need the latter implementation,
// as they will raise an fatal error on query cycles instead.
-rustc_query_append! { [define_queries!][<'tcx>] }
+rustc_query_append! { [define_callbacks!][<'tcx>] }
mod sealed {
use super::{DefId, LocalDefId};
diff --git a/compiler/rustc_middle/src/ty/query/on_disk_cache.rs b/compiler/rustc_middle/src/ty/query/on_disk_cache.rs
index cfe4700..78193ac 100644
--- a/compiler/rustc_middle/src/ty/query/on_disk_cache.rs
+++ b/compiler/rustc_middle/src/ty/query/on_disk_cache.rs
@@ -14,6 +14,8 @@
use rustc_hir::definitions::DefPathHash;
use rustc_hir::definitions::Definitions;
use rustc_index::vec::{Idx, IndexVec};
+use rustc_query_system::dep_graph::DepContext;
+use rustc_query_system::query::QueryContext;
use rustc_serialize::{
opaque::{self, FileEncodeResult, FileEncoder},
Decodable, Decoder, Encodable, Encoder,
@@ -132,7 +134,7 @@
foreign_def_path_hashes: UnhashMap<DefPathHash, RawDefId>,
}
-type EncodedQueryResultIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
+pub type EncodedQueryResultIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
type EncodedDiagnosticsIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
type EncodedDiagnostics = Vec<Diagnostic>;
@@ -140,7 +142,7 @@
struct SourceFileIndex(u32);
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Encodable, Decodable)]
-struct AbsoluteBytePos(u32);
+pub struct AbsoluteBytePos(u32);
impl AbsoluteBytePos {
fn new(pos: usize) -> AbsoluteBytePos {
@@ -284,7 +286,7 @@
// Do this *before* we clone 'latest_foreign_def_path_hashes', since
// loading existing queries may cause us to create new DepNodes, which
// may in turn end up invoking `store_foreign_def_id_hash`
- tcx.dep_graph.exec_cache_promotions(tcx);
+ tcx.queries.exec_cache_promotions(tcx);
let latest_foreign_def_path_hashes = self.latest_foreign_def_path_hashes.lock().clone();
let hygiene_encode_context = HygieneEncodeContext::default();
@@ -307,22 +309,7 @@
tcx.sess.time("encode_query_results", || -> FileEncodeResult {
let enc = &mut encoder;
let qri = &mut query_result_index;
-
- macro_rules! encode_queries {
- ($($query:ident,)*) => {
- $(
- encode_query_results::<ty::query::queries::$query<'_>>(
- tcx,
- enc,
- qri
- )?;
- )*
- }
- }
-
- rustc_cached_queries!(encode_queries!);
-
- Ok(())
+ tcx.queries.encode_query_results(tcx, enc, qri)
})?;
// Encode diagnostics.
@@ -427,7 +414,7 @@
fn sorted_cnums_including_local_crate(tcx: TyCtxt<'_>) -> Vec<CrateNum> {
let mut cnums = vec![LOCAL_CRATE];
- cnums.extend_from_slice(&tcx.crates()[..]);
+ cnums.extend_from_slice(tcx.crates());
cnums.sort_unstable();
// Just to be sure...
cnums.dedup();
@@ -515,7 +502,7 @@
/// Returns the cached query result if there is something in the cache for
/// the given `SerializedDepNodeIndex`; otherwise returns `None`.
- crate fn try_load_query_result<'tcx, T>(
+ pub fn try_load_query_result<'tcx, T>(
&self,
tcx: TyCtxt<'tcx>,
dep_node_index: SerializedDepNodeIndex,
@@ -678,7 +665,7 @@
/// A decoder that can read from the incremental compilation cache. It is similar to the one
/// we use for crate metadata decoding in that it can rebase spans and eventually
/// will also handle things that contain `Ty` instances.
-crate struct CacheDecoder<'a, 'tcx> {
+pub struct CacheDecoder<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
opaque: opaque::Decoder<'a>,
source_map: &'a SourceMap,
@@ -918,7 +905,6 @@
// which means that the definition with this hash is guaranteed to
// still exist in the current compilation session.
Ok(d.tcx()
- .queries
.on_disk_cache
.as_ref()
.unwrap()
@@ -973,7 +959,7 @@
//- ENCODING -------------------------------------------------------------------
-trait OpaqueEncoder: Encoder {
+pub trait OpaqueEncoder: Encoder {
fn position(&self) -> usize;
}
@@ -985,7 +971,7 @@
}
/// An encoder that can write to the incremental compilation cache.
-struct CacheEncoder<'a, 'tcx, E: OpaqueEncoder> {
+pub struct CacheEncoder<'a, 'tcx, E: OpaqueEncoder> {
tcx: TyCtxt<'tcx>,
encoder: &'a mut E,
type_shorthands: FxHashMap<Ty<'tcx>, usize>,
@@ -1059,12 +1045,12 @@
E: 'a + OpaqueEncoder,
{
fn encode(&self, s: &mut CacheEncoder<'a, 'tcx, E>) -> Result<(), E::Error> {
- if *self == DUMMY_SP {
+ let span_data = self.data();
+ if self.is_dummy() {
TAG_PARTIAL_SPAN.encode(s)?;
- return SyntaxContext::root().encode(s);
+ return span_data.ctxt.encode(s);
}
- let span_data = self.data();
let pos = s.source_map.byte_pos_to_line_and_col(span_data.lo);
let partial_span = match &pos {
Some((file_lo, _, _)) => !file_lo.contains(span_data.hi),
@@ -1230,24 +1216,24 @@
}
}
-fn encode_query_results<'a, 'tcx, Q>(
- tcx: TyCtxt<'tcx>,
+pub fn encode_query_results<'a, 'tcx, CTX, Q>(
+ tcx: CTX,
encoder: &mut CacheEncoder<'a, 'tcx, FileEncoder>,
query_result_index: &mut EncodedQueryResultIndex,
) -> FileEncodeResult
where
- Q: super::QueryDescription<TyCtxt<'tcx>> + super::QueryAccessors<TyCtxt<'tcx>>,
+ CTX: QueryContext + 'tcx,
+ Q: super::QueryDescription<CTX> + super::QueryAccessors<CTX>,
Q::Value: Encodable<CacheEncoder<'a, 'tcx, FileEncoder>>,
{
let _timer = tcx
- .sess
- .prof
+ .dep_context()
+ .profiler()
.extra_verbose_generic_activity("encode_query_results_for", std::any::type_name::<Q>());
- let state = Q::query_state(tcx);
- assert!(state.all_inactive());
-
- state.iter_results(|results| {
+ assert!(Q::query_state(tcx).all_inactive());
+ let cache = Q::query_cache(tcx);
+ cache.iter_results(|results| {
for (key, value, dep_node) in results {
if Q::cache_on_disk(tcx, &key, Some(value)) {
let dep_node = SerializedDepNodeIndex::new(dep_node.index());
diff --git a/compiler/rustc_middle/src/ty/query/plumbing.rs b/compiler/rustc_middle/src/ty/query/plumbing.rs
deleted file mode 100644
index d0730bd..0000000
--- a/compiler/rustc_middle/src/ty/query/plumbing.rs
+++ /dev/null
@@ -1,576 +0,0 @@
-//! The implementation of the query system itself. This defines the macros that
-//! generate the actual methods on tcx which find and execute the provider,
-//! manage the caches, and so forth.
-
-use crate::dep_graph::DepGraph;
-use crate::ty::query::Query;
-use crate::ty::tls::{self, ImplicitCtxt};
-use crate::ty::{self, TyCtxt};
-use rustc_query_system::query::QueryContext;
-use rustc_query_system::query::{CycleError, QueryJobId, QueryJobInfo};
-
-use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::sync::Lock;
-use rustc_data_structures::thin_vec::ThinVec;
-use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level};
-use rustc_span::def_id::DefId;
-use rustc_span::Span;
-
-impl QueryContext for TyCtxt<'tcx> {
- type Query = Query<'tcx>;
-
- fn incremental_verify_ich(&self) -> bool {
- self.sess.opts.debugging_opts.incremental_verify_ich
- }
- fn verbose(&self) -> bool {
- self.sess.verbose()
- }
-
- fn def_path_str(&self, def_id: DefId) -> String {
- TyCtxt::def_path_str(*self, def_id)
- }
-
- fn dep_graph(&self) -> &DepGraph {
- &self.dep_graph
- }
-
- fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>> {
- tls::with_related_context(*self, |icx| icx.query)
- }
-
- fn try_collect_active_jobs(
- &self,
- ) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self::DepKind, Self::Query>>>
- {
- self.queries.try_collect_active_jobs()
- }
-
- /// Executes a job by changing the `ImplicitCtxt` to point to the
- /// new query job while it executes. It returns the diagnostics
- /// captured during execution and the actual result.
- #[inline(always)]
- fn start_query<R>(
- &self,
- token: QueryJobId<Self::DepKind>,
- diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
- compute: impl FnOnce(Self) -> R,
- ) -> R {
- // The `TyCtxt` stored in TLS has the same global interner lifetime
- // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
- // when accessing the `ImplicitCtxt`.
- tls::with_related_context(*self, move |current_icx| {
- // Update the `ImplicitCtxt` to point to our new query job.
- let new_icx = ImplicitCtxt {
- tcx: *self,
- query: Some(token),
- diagnostics,
- layout_depth: current_icx.layout_depth,
- task_deps: current_icx.task_deps,
- };
-
- // Use the `ImplicitCtxt` while we execute the query.
- tls::enter_context(&new_icx, |_| {
- rustc_data_structures::stack::ensure_sufficient_stack(|| compute(*self))
- })
- })
- }
-}
-
-impl<'tcx> TyCtxt<'tcx> {
- #[inline(never)]
- #[cold]
- pub(super) fn report_cycle(
- self,
- CycleError { usage, cycle: stack }: CycleError<Query<'tcx>>,
- ) -> DiagnosticBuilder<'tcx> {
- assert!(!stack.is_empty());
-
- let fix_span = |span: Span, query: &Query<'tcx>| {
- self.sess.source_map().guess_head_span(query.default_span(self, span))
- };
-
- // Disable naming impls with types in this path, since that
- // sometimes cycles itself, leading to extra cycle errors.
- // (And cycle errors around impls tend to occur during the
- // collect/coherence phases anyhow.)
- ty::print::with_forced_impl_filename_line(|| {
- let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
- let mut err = struct_span_err!(
- self.sess,
- span,
- E0391,
- "cycle detected when {}",
- stack[0].query.describe(self)
- );
-
- for i in 1..stack.len() {
- let query = &stack[i].query;
- let span = fix_span(stack[(i + 1) % stack.len()].span, query);
- err.span_note(span, &format!("...which requires {}...", query.describe(self)));
- }
-
- err.note(&format!(
- "...which again requires {}, completing the cycle",
- stack[0].query.describe(self)
- ));
-
- if let Some((span, query)) = usage {
- err.span_note(
- fix_span(span, &query),
- &format!("cycle used when {}", query.describe(self)),
- );
- }
-
- err
- })
- }
-
- pub fn try_print_query_stack(handler: &Handler, num_frames: Option<usize>) {
- eprintln!("query stack during panic:");
-
- // Be careful relying on global state here: this code is called from
- // a panic hook, which means that the global `Handler` may be in a weird
- // state if it was responsible for triggering the panic.
- let mut i = 0;
- ty::tls::with_context_opt(|icx| {
- if let Some(icx) = icx {
- let query_map = icx.tcx.queries.try_collect_active_jobs();
-
- let mut current_query = icx.query;
-
- while let Some(query) = current_query {
- if Some(i) == num_frames {
- break;
- }
- let query_info =
- if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) {
- info
- } else {
- break;
- };
- let mut diag = Diagnostic::new(
- Level::FailureNote,
- &format!(
- "#{} [{}] {}",
- i,
- query_info.info.query.name(),
- query_info.info.query.describe(icx.tcx)
- ),
- );
- diag.span =
- icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into();
- handler.force_print_diagnostic(diag);
-
- current_query = query_info.job.parent;
- i += 1;
- }
- }
- });
-
- if num_frames == None || num_frames >= Some(i) {
- eprintln!("end of query stack");
- } else {
- eprintln!("we're just showing a limited slice of the query stack");
- }
- }
-}
-
-macro_rules! handle_cycle_error {
- ([][$tcx: expr, $error:expr]) => {{
- $tcx.report_cycle($error).emit();
- Value::from_cycle_error($tcx)
- }};
- ([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{
- $tcx.report_cycle($error).emit();
- $tcx.sess.abort_if_errors();
- unreachable!()
- }};
- ([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{
- $tcx.report_cycle($error).delay_as_bug();
- Value::from_cycle_error($tcx)
- }};
- ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
- handle_cycle_error!([$($($modifiers)*)*][$($args)*])
- };
-}
-
-macro_rules! is_anon {
- ([]) => {{
- false
- }};
- ([anon $($rest:tt)*]) => {{
- true
- }};
- ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
- is_anon!([$($($modifiers)*)*])
- };
-}
-
-macro_rules! is_eval_always {
- ([]) => {{
- false
- }};
- ([eval_always $($rest:tt)*]) => {{
- true
- }};
- ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
- is_eval_always!([$($($modifiers)*)*])
- };
-}
-
-macro_rules! query_storage {
- ([][$K:ty, $V:ty]) => {
- <<$K as Key>::CacheSelector as CacheSelector<$K, $V>>::Cache
- };
- ([storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => {
- <$ty as CacheSelector<$K, $V>>::Cache
- };
- ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
- query_storage!([$($($modifiers)*)*][$($args)*])
- };
-}
-
-macro_rules! hash_result {
- ([][$hcx:expr, $result:expr]) => {{
- dep_graph::hash_result($hcx, &$result)
- }};
- ([no_hash $($rest:tt)*][$hcx:expr, $result:expr]) => {{
- None
- }};
- ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
- hash_result!([$($($modifiers)*)*][$($args)*])
- };
-}
-
-macro_rules! query_helper_param_ty {
- (DefId) => { impl IntoQueryParam<DefId> };
- ($K:ty) => { $K };
-}
-
-macro_rules! define_queries {
- (<$tcx:tt>
- $($(#[$attr:meta])*
- [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
-
- use std::mem;
- use crate::{
- rustc_data_structures::stable_hasher::HashStable,
- rustc_data_structures::stable_hasher::StableHasher,
- ich::StableHashingContext
- };
-
- define_queries_struct! {
- tcx: $tcx,
- input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
- }
-
- #[allow(nonstandard_style)]
- #[derive(Clone, Debug)]
- pub enum Query<$tcx> {
- $($(#[$attr])* $name($($K)*)),*
- }
-
- impl<$tcx> Query<$tcx> {
- pub fn name(&self) -> &'static str {
- match *self {
- $(Query::$name(_) => stringify!($name),)*
- }
- }
-
- pub fn describe(&self, tcx: TyCtxt<$tcx>) -> Cow<'static, str> {
- let (r, name) = match *self {
- $(Query::$name(key) => {
- (queries::$name::describe(tcx, key), stringify!($name))
- })*
- };
- if tcx.sess.verbose() {
- format!("{} [{}]", r, name).into()
- } else {
- r
- }
- }
-
- // FIXME(eddyb) Get more valid `Span`s on queries.
- pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span {
- if !span.is_dummy() {
- return span;
- }
- // The `def_span` query is used to calculate `default_span`,
- // so exit to avoid infinite recursion.
- if let Query::def_span(..) = *self {
- return span
- }
- match *self {
- $(Query::$name(key) => key.default_span(tcx),)*
- }
- }
- }
-
- impl<'a, $tcx> HashStable<StableHashingContext<'a>> for Query<$tcx> {
- fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
- mem::discriminant(self).hash_stable(hcx, hasher);
- match *self {
- $(Query::$name(key) => key.hash_stable(hcx, hasher),)*
- }
- }
- }
-
- #[allow(nonstandard_style)]
- pub mod queries {
- use std::marker::PhantomData;
-
- $(pub struct $name<$tcx> {
- data: PhantomData<&$tcx ()>
- })*
- }
-
- // HACK(eddyb) this is like the `impl QueryConfig for queries::$name`
- // below, but using type aliases instead of associated types, to bypass
- // the limitations around normalizing under HRTB - for example, this:
- // `for<'tcx> fn(...) -> <queries::$name<'tcx> as QueryConfig<TyCtxt<'tcx>>>::Value`
- // doesn't currently normalize to `for<'tcx> fn(...) -> query_values::$name<'tcx>`.
- // This is primarily used by the `provide!` macro in `rustc_metadata`.
- #[allow(nonstandard_style, unused_lifetimes)]
- pub mod query_keys {
- use super::*;
-
- $(pub type $name<$tcx> = $($K)*;)*
- }
- #[allow(nonstandard_style, unused_lifetimes)]
- pub mod query_values {
- use super::*;
-
- $(pub type $name<$tcx> = $V;)*
- }
-
- $(impl<$tcx> QueryConfig for queries::$name<$tcx> {
- type Key = $($K)*;
- type Value = $V;
- type Stored = <
- query_storage!([$($modifiers)*][$($K)*, $V])
- as QueryStorage
- >::Stored;
- const NAME: &'static str = stringify!($name);
- }
-
- impl<$tcx> QueryAccessors<TyCtxt<$tcx>> for queries::$name<$tcx> {
- const ANON: bool = is_anon!([$($modifiers)*]);
- const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
- const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$name;
-
- type Cache = query_storage!([$($modifiers)*][$($K)*, $V]);
-
- #[inline(always)]
- fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, <TyCtxt<$tcx> as QueryContext>::Query, Self::Cache> {
- &tcx.queries.$name
- }
-
- #[inline]
- fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value {
- let provider = tcx.queries.providers.get(key.query_crate())
- // HACK(eddyb) it's possible crates may be loaded after
- // the query engine is created, and because crate loading
- // is not yet integrated with the query engine, such crates
- // would be missing appropriate entries in `providers`.
- .unwrap_or(&tcx.queries.fallback_extern_providers)
- .$name;
- provider(tcx, key)
- }
-
- fn hash_result(
- _hcx: &mut StableHashingContext<'_>,
- _result: &Self::Value
- ) -> Option<Fingerprint> {
- hash_result!([$($modifiers)*][_hcx, _result])
- }
-
- fn handle_cycle_error(
- tcx: TyCtxt<'tcx>,
- error: CycleError<Query<'tcx>>
- ) -> Self::Value {
- handle_cycle_error!([$($modifiers)*][tcx, error])
- }
- })*
-
- #[derive(Copy, Clone)]
- pub struct TyCtxtEnsure<'tcx> {
- pub tcx: TyCtxt<'tcx>,
- }
-
- impl TyCtxtEnsure<$tcx> {
- $($(#[$attr])*
- #[inline(always)]
- pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
- ensure_query::<queries::$name<'_>, _>(self.tcx, key.into_query_param())
- })*
- }
-
- #[derive(Copy, Clone)]
- pub struct TyCtxtAt<'tcx> {
- pub tcx: TyCtxt<'tcx>,
- pub span: Span,
- }
-
- impl Deref for TyCtxtAt<'tcx> {
- type Target = TyCtxt<'tcx>;
- #[inline(always)]
- fn deref(&self) -> &Self::Target {
- &self.tcx
- }
- }
-
- impl TyCtxt<$tcx> {
- /// Returns a transparent wrapper for `TyCtxt`, which ensures queries
- /// are executed instead of just returning their results.
- #[inline(always)]
- pub fn ensure(self) -> TyCtxtEnsure<$tcx> {
- TyCtxtEnsure {
- tcx: self,
- }
- }
-
- /// Returns a transparent wrapper for `TyCtxt` which uses
- /// `span` as the location of queries performed through it.
- #[inline(always)]
- pub fn at(self, span: Span) -> TyCtxtAt<$tcx> {
- TyCtxtAt {
- tcx: self,
- span
- }
- }
-
- $($(#[$attr])*
- #[inline(always)]
- #[must_use]
- pub fn $name(self, key: query_helper_param_ty!($($K)*))
- -> <queries::$name<$tcx> as QueryConfig>::Stored
- {
- self.at(DUMMY_SP).$name(key.into_query_param())
- })*
-
- /// All self-profiling events generated by the query engine use
- /// virtual `StringId`s for their `event_id`. This method makes all
- /// those virtual `StringId`s point to actual strings.
- ///
- /// If we are recording only summary data, the ids will point to
- /// just the query names. If we are recording query keys too, we
- /// allocate the corresponding strings here.
- pub fn alloc_self_profile_query_strings(self) {
- use crate::ty::query::profiling_support::{
- alloc_self_profile_query_strings_for_query_cache,
- QueryKeyStringCache,
- };
-
- if !self.prof.enabled() {
- return;
- }
-
- let mut string_cache = QueryKeyStringCache::new();
-
- $({
- alloc_self_profile_query_strings_for_query_cache(
- self,
- stringify!($name),
- &self.queries.$name,
- &mut string_cache,
- );
- })*
- }
- }
-
- impl TyCtxtAt<$tcx> {
- $($(#[$attr])*
- #[inline(always)]
- pub fn $name(self, key: query_helper_param_ty!($($K)*))
- -> <queries::$name<$tcx> as QueryConfig>::Stored
- {
- get_query::<queries::$name<'_>, _>(self.tcx, self.span, key.into_query_param())
- })*
- }
-
- define_provider_struct! {
- tcx: $tcx,
- input: ($(([$($modifiers)*] [$name] [$($K)*] [$V]))*)
- }
-
- impl Copy for Providers {}
- impl Clone for Providers {
- fn clone(&self) -> Self { *self }
- }
- }
-}
-
-// FIXME(eddyb) this macro (and others?) use `$tcx` and `'tcx` interchangeably.
-// We should either not take `$tcx` at all and use `'tcx` everywhere, or use
-// `$tcx` everywhere (even if that isn't necessary due to lack of hygiene).
-macro_rules! define_queries_struct {
- (tcx: $tcx:tt,
- input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
- pub struct Queries<$tcx> {
- /// This provides access to the incremental compilation on-disk cache for query results.
- /// Do not access this directly. It is only meant to be used by
- /// `DepGraph::try_mark_green()` and the query infrastructure.
- /// This is `None` if we are not incremental compilation mode
- pub(crate) on_disk_cache: Option<OnDiskCache<'tcx>>,
-
- providers: IndexVec<CrateNum, Providers>,
- fallback_extern_providers: Box<Providers>,
-
- $($(#[$attr])* $name: QueryState<
- crate::dep_graph::DepKind,
- <TyCtxt<$tcx> as QueryContext>::Query,
- <queries::$name<$tcx> as QueryAccessors<TyCtxt<'tcx>>>::Cache,
- >,)*
- }
-
- impl<$tcx> Queries<$tcx> {
- pub(crate) fn new(
- providers: IndexVec<CrateNum, Providers>,
- fallback_extern_providers: Providers,
- on_disk_cache: Option<OnDiskCache<'tcx>>,
- ) -> Self {
- Queries {
- providers,
- fallback_extern_providers: Box::new(fallback_extern_providers),
- on_disk_cache,
- $($name: Default::default()),*
- }
- }
-
- pub(crate) fn try_collect_active_jobs(
- &self
- ) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<crate::dep_graph::DepKind, <TyCtxt<$tcx> as QueryContext>::Query>>> {
- let mut jobs = FxHashMap::default();
-
- $(
- self.$name.try_collect_active_jobs(
- <queries::$name<'tcx> as QueryAccessors<TyCtxt<'tcx>>>::DEP_KIND,
- Query::$name,
- &mut jobs,
- )?;
- )*
-
- Some(jobs)
- }
- }
- };
-}
-
-macro_rules! define_provider_struct {
- (tcx: $tcx:tt,
- input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => {
- pub struct Providers {
- $(pub $name: for<$tcx> fn(TyCtxt<$tcx>, $K) -> $R,)*
- }
-
- impl Default for Providers {
- fn default() -> Self {
- $(fn $name<$tcx>(_: TyCtxt<$tcx>, key: $K) -> $R {
- bug!("`tcx.{}({:?})` unsupported by its crate",
- stringify!($name), key);
- })*
- Providers { $($name),* }
- }
- }
- };
-}
diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs
index 293b3c6..b41bf70 100644
--- a/compiler/rustc_middle/src/ty/relate.rs
+++ b/compiler/rustc_middle/src/ty/relate.rs
@@ -4,7 +4,7 @@
//! types or regions but can be other things. Examples of type relations are
//! subtyping, type equality, etc.
-use crate::mir::interpret::{get_slice_bytes, ConstValue};
+use crate::mir::interpret::{get_slice_bytes, ConstValue, GlobalAlloc, Scalar};
use crate::ty::error::{ExpectedFound, TypeError};
use crate::ty::subst::{GenericArg, GenericArgKind, SubstsRef};
use crate::ty::{self, Ty, TyCtxt, TypeFoldable};
@@ -154,7 +154,7 @@
relation.relate_with_variance(variance, a, b)
});
- Ok(tcx.mk_substs(params)?)
+ tcx.mk_substs(params)
}
impl<'tcx> Relate<'tcx> for ty::FnSig<'tcx> {
@@ -421,12 +421,14 @@
let t = relation.relate(a_t, b_t)?;
match relation.relate(sz_a, sz_b) {
Ok(sz) => Ok(tcx.mk_ty(ty::Array(t, sz))),
- // FIXME(#72219) Implement improved diagnostics for mismatched array
- // length?
- Err(err) if relation.tcx().lazy_normalization() => Err(err),
Err(err) => {
// Check whether the lengths are both concrete/known values,
// but are unequal, for better diagnostics.
+ //
+ // It might seem dubious to eagerly evaluate these constants here,
+ // we however cannot end up with errors in `Relate` during both
+ // `type_of` and `predicates_of`. This means that evaluating the
+ // constants should not cause cycle errors here.
let sz_a = sz_a.try_eval_usize(tcx, relation.param_env());
let sz_b = sz_b.try_eval_usize(tcx, relation.param_env());
match (sz_a, sz_b) {
@@ -496,104 +498,44 @@
debug!("{}.super_relate_consts(a = {:?}, b = {:?})", relation.tag(), a, b);
let tcx = relation.tcx();
- let eagerly_eval = |x: &'tcx ty::Const<'tcx>| x.eval(tcx, relation.param_env()).val;
+ // FIXME(oli-obk): once const generics can have generic types, this assertion
+ // will likely get triggered. Move to `normalize_erasing_regions` at that point.
+ let a_ty = tcx.erase_regions(a.ty);
+ let b_ty = tcx.erase_regions(b.ty);
+ if a_ty != b_ty {
+ relation.tcx().sess.delay_span_bug(
+ DUMMY_SP,
+ &format!("cannot relate constants of different types: {} != {}", a_ty, b_ty),
+ );
+ }
- // FIXME(eddyb) doesn't look like everything below checks that `a.ty == b.ty`.
- // We could probably always assert it early, as const generic parameters
- // are not allowed to depend on other generic parameters, i.e. are concrete.
- // (although there could be normalization differences)
+ let eagerly_eval = |x: &'tcx ty::Const<'tcx>| x.eval(tcx, relation.param_env());
+ let a = eagerly_eval(a);
+ let b = eagerly_eval(b);
// Currently, the values that can be unified are primitive types,
// and those that derive both `PartialEq` and `Eq`, corresponding
// to structural-match types.
- let new_const_val = match (eagerly_eval(a), eagerly_eval(b)) {
+ let is_match = match (a.val, b.val) {
(ty::ConstKind::Infer(_), _) | (_, ty::ConstKind::Infer(_)) => {
// The caller should handle these cases!
bug!("var types encountered in super_relate_consts: {:?} {:?}", a, b)
}
- (ty::ConstKind::Error(d), _) | (_, ty::ConstKind::Error(d)) => Ok(ty::ConstKind::Error(d)),
+ (ty::ConstKind::Error(_), _) => return Ok(a),
+ (_, ty::ConstKind::Error(_)) => return Ok(b),
- (ty::ConstKind::Param(a_p), ty::ConstKind::Param(b_p)) if a_p.index == b_p.index => {
- return Ok(a);
- }
- (ty::ConstKind::Placeholder(p1), ty::ConstKind::Placeholder(p2)) if p1 == p2 => {
- return Ok(a);
- }
+ (ty::ConstKind::Param(a_p), ty::ConstKind::Param(b_p)) => a_p.index == b_p.index,
+ (ty::ConstKind::Placeholder(p1), ty::ConstKind::Placeholder(p2)) => p1 == p2,
(ty::ConstKind::Value(a_val), ty::ConstKind::Value(b_val)) => {
- let new_val = match (a_val, b_val) {
- (ConstValue::Scalar(a_val), ConstValue::Scalar(b_val)) if a.ty == b.ty => {
- if a_val == b_val {
- Ok(ConstValue::Scalar(a_val))
- } else if let ty::FnPtr(_) = a.ty.kind() {
- let a_instance = tcx.global_alloc(a_val.assert_ptr().alloc_id).unwrap_fn();
- let b_instance = tcx.global_alloc(b_val.assert_ptr().alloc_id).unwrap_fn();
- if a_instance == b_instance {
- Ok(ConstValue::Scalar(a_val))
- } else {
- Err(TypeError::ConstMismatch(expected_found(relation, a, b)))
- }
- } else {
- Err(TypeError::ConstMismatch(expected_found(relation, a, b)))
- }
- }
-
- (ConstValue::Slice { .. }, ConstValue::Slice { .. }) => {
- let a_bytes = get_slice_bytes(&tcx, a_val);
- let b_bytes = get_slice_bytes(&tcx, b_val);
- if a_bytes == b_bytes {
- Ok(a_val)
- } else {
- Err(TypeError::ConstMismatch(expected_found(relation, a, b)))
- }
- }
-
- (ConstValue::ByRef { .. }, ConstValue::ByRef { .. }) => {
- match a.ty.kind() {
- ty::Array(..) | ty::Adt(..) | ty::Tuple(..) => {
- let a_destructured = tcx.destructure_const(relation.param_env().and(a));
- let b_destructured = tcx.destructure_const(relation.param_env().and(b));
-
- // Both the variant and each field have to be equal.
- if a_destructured.variant == b_destructured.variant {
- for (a_field, b_field) in
- a_destructured.fields.iter().zip(b_destructured.fields.iter())
- {
- relation.consts(a_field, b_field)?;
- }
-
- Ok(a_val)
- } else {
- Err(TypeError::ConstMismatch(expected_found(relation, a, b)))
- }
- }
- // FIXME(const_generics): There are probably some `TyKind`s
- // which should be handled here.
- _ => {
- tcx.sess.delay_span_bug(
- DUMMY_SP,
- &format!("unexpected consts: a: {:?}, b: {:?}", a, b),
- );
- Err(TypeError::ConstMismatch(expected_found(relation, a, b)))
- }
- }
- }
-
- _ => Err(TypeError::ConstMismatch(expected_found(relation, a, b))),
- };
-
- new_val.map(ty::ConstKind::Value)
+ check_const_value_eq(relation, a_val, b_val, a, b)?
}
(
ty::ConstKind::Unevaluated(a_def, a_substs, None),
ty::ConstKind::Unevaluated(b_def, b_substs, None),
) if tcx.features().const_evaluatable_checked && !relation.visit_ct_substs() => {
- if tcx.try_unify_abstract_consts(((a_def, a_substs), (b_def, b_substs))) {
- Ok(a.val)
- } else {
- Err(TypeError::ConstMismatch(expected_found(relation, a, b)))
- }
+ tcx.try_unify_abstract_consts(((a_def, a_substs), (b_def, b_substs)))
}
// While this is slightly incorrect, it shouldn't matter for `min_const_generics`
@@ -605,11 +547,64 @@
) if a_def == b_def && a_promoted == b_promoted => {
let substs =
relation.relate_with_variance(ty::Variance::Invariant, a_substs, b_substs)?;
- Ok(ty::ConstKind::Unevaluated(a_def, substs, a_promoted))
+ return Ok(tcx.mk_const(ty::Const {
+ val: ty::ConstKind::Unevaluated(a_def, substs, a_promoted),
+ ty: a.ty,
+ }));
}
- _ => Err(TypeError::ConstMismatch(expected_found(relation, a, b))),
+ _ => false,
};
- new_const_val.map(|val| tcx.mk_const(ty::Const { val, ty: a.ty }))
+ if is_match { Ok(a) } else { Err(TypeError::ConstMismatch(expected_found(relation, a, b))) }
+}
+
+fn check_const_value_eq<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a_val: ConstValue<'tcx>,
+ b_val: ConstValue<'tcx>,
+ // FIXME(oli-obk): these arguments should go away with valtrees
+ a: &'tcx ty::Const<'tcx>,
+ b: &'tcx ty::Const<'tcx>,
+ // FIXME(oli-obk): this should just be `bool` with valtrees
+) -> RelateResult<'tcx, bool> {
+ let tcx = relation.tcx();
+ Ok(match (a_val, b_val) {
+ (ConstValue::Scalar(Scalar::Int(a_val)), ConstValue::Scalar(Scalar::Int(b_val))) => {
+ a_val == b_val
+ }
+ (ConstValue::Scalar(Scalar::Ptr(a_val)), ConstValue::Scalar(Scalar::Ptr(b_val))) => {
+ a_val == b_val
+ || match (tcx.global_alloc(a_val.alloc_id), tcx.global_alloc(b_val.alloc_id)) {
+ (GlobalAlloc::Function(a_instance), GlobalAlloc::Function(b_instance)) => {
+ a_instance == b_instance
+ }
+ _ => false,
+ }
+ }
+
+ (ConstValue::Slice { .. }, ConstValue::Slice { .. }) => {
+ get_slice_bytes(&tcx, a_val) == get_slice_bytes(&tcx, b_val)
+ }
+
+ (ConstValue::ByRef { .. }, ConstValue::ByRef { .. }) => {
+ let a_destructured = tcx.destructure_const(relation.param_env().and(a));
+ let b_destructured = tcx.destructure_const(relation.param_env().and(b));
+
+ // Both the variant and each field have to be equal.
+ if a_destructured.variant == b_destructured.variant {
+ for (a_field, b_field) in
+ a_destructured.fields.iter().zip(b_destructured.fields.iter())
+ {
+ relation.consts(a_field, b_field)?;
+ }
+
+ true
+ } else {
+ false
+ }
+ }
+
+ _ => false,
+ })
}
impl<'tcx> Relate<'tcx> for &'tcx ty::List<ty::Binder<ty::ExistentialPredicate<'tcx>>> {
@@ -647,7 +642,7 @@
_ => Err(TypeError::ExistentialMismatch(expected_found(relation, a, b))),
}
});
- Ok(tcx.mk_poly_existential_predicates(v)?)
+ tcx.mk_poly_existential_predicates(v)
}
}
diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs
index c1fa84d..2cd969d 100644
--- a/compiler/rustc_middle/src/ty/sty.rs
+++ b/compiler/rustc_middle/src/ty/sty.rs
@@ -17,7 +17,7 @@
use rustc_hir::def_id::DefId;
use rustc_index::vec::Idx;
use rustc_macros::HashStable;
-use rustc_span::symbol::{kw, Ident, Symbol};
+use rustc_span::symbol::{kw, Symbol};
use rustc_target::abi::VariantIdx;
use rustc_target::spec::abi;
use std::borrow::Cow;
@@ -231,7 +231,7 @@
}
// `TyKind` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(TyKind<'_>, 24);
/// A closure can be modeled as a struct that looks like:
@@ -1112,36 +1112,35 @@
}
impl<'tcx> ProjectionTy<'tcx> {
- /// Construct a `ProjectionTy` by searching the trait from `trait_ref` for the
- /// associated item named `item_name`.
- pub fn from_ref_and_name(
- tcx: TyCtxt<'_>,
- trait_ref: ty::TraitRef<'tcx>,
- item_name: Ident,
- ) -> ProjectionTy<'tcx> {
- let item_def_id = tcx
- .associated_items(trait_ref.def_id)
- .find_by_name_and_kind(tcx, item_name, ty::AssocKind::Type, trait_ref.def_id)
- .unwrap()
- .def_id;
+ pub fn trait_def_id(&self, tcx: TyCtxt<'tcx>) -> DefId {
+ tcx.associated_item(self.item_def_id).container.id()
+ }
- ProjectionTy { substs: trait_ref.substs, item_def_id }
+ /// Extracts the underlying trait reference and own substs from this projection.
+ /// For example, if this is a projection of `<T as StreamingIterator>::Item<'a>`,
+ /// then this function would return a `T: Iterator` trait reference and `['a]` as the own substs
+ pub fn trait_ref_and_own_substs(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ ) -> (ty::TraitRef<'tcx>, &'tcx [ty::GenericArg<'tcx>]) {
+ let def_id = tcx.associated_item(self.item_def_id).container.id();
+ let trait_generics = tcx.generics_of(def_id);
+ (
+ ty::TraitRef { def_id, substs: self.substs.truncate_to(tcx, trait_generics) },
+ &self.substs[trait_generics.count()..],
+ )
}
/// Extracts the underlying trait reference from this projection.
/// For example, if this is a projection of `<T as Iterator>::Item`,
/// then this function would return a `T: Iterator` trait reference.
+ ///
+ /// WARNING: This will drop the substs for generic associated types
+ /// consider calling [Self::trait_ref_and_own_substs] to get those
+ /// as well.
pub fn trait_ref(&self, tcx: TyCtxt<'tcx>) -> ty::TraitRef<'tcx> {
- // FIXME: This method probably shouldn't exist at all, since it's not
- // clear what this method really intends to do. Be careful when
- // using this method since the resulting TraitRef additionally
- // contains the substs for the assoc_item, which strictly speaking
- // is not correct
- let def_id = tcx.associated_item(self.item_def_id).container.id();
- // Include substitutions for generic arguments of associated types
- let assoc_item = tcx.associated_item(self.item_def_id);
- let substs_assoc_item = self.substs.truncate_to(tcx, tcx.generics_of(assoc_item.def_id));
- ty::TraitRef { def_id, substs: substs_assoc_item }
+ let def_id = self.trait_def_id(tcx);
+ ty::TraitRef { def_id, substs: self.substs.truncate_to(tcx, tcx.generics_of(def_id)) }
}
pub fn self_ty(&self) -> Ty<'tcx> {
@@ -1257,6 +1256,7 @@
ParamTy::new(def.index, def.name)
}
+ #[inline]
pub fn to_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
tcx.mk_ty_param(self.index, self.name)
}
@@ -1493,12 +1493,11 @@
/// For example, if this is a projection of `exists T. <T as Iterator>::Item == X`,
/// then this function would return a `exists T. T: Iterator` existential trait
/// reference.
- pub fn trait_ref(&self, tcx: TyCtxt<'_>) -> ty::ExistentialTraitRef<'tcx> {
- // FIXME(generic_associated_types): substs is the substs of the
- // associated type, which should be truncated to get the correct substs
- // for the trait.
+ pub fn trait_ref(&self, tcx: TyCtxt<'tcx>) -> ty::ExistentialTraitRef<'tcx> {
let def_id = tcx.associated_item(self.item_def_id).container.id();
- ty::ExistentialTraitRef { def_id, substs: self.substs }
+ let subst_count = tcx.generics_of(def_id).count() - 1;
+ let substs = tcx.intern_substs(&self.substs[..subst_count]);
+ ty::ExistentialTraitRef { def_id, substs }
}
pub fn with_self_ty(
@@ -1517,6 +1516,20 @@
ty: self.ty,
}
}
+
+ pub fn erase_self_ty(
+ tcx: TyCtxt<'tcx>,
+ projection_predicate: ty::ProjectionPredicate<'tcx>,
+ ) -> Self {
+ // Assert there is a Self.
+ projection_predicate.projection_ty.substs.type_at(0);
+
+ Self {
+ item_def_id: projection_predicate.projection_ty.item_def_id,
+ substs: tcx.intern_substs(&projection_predicate.projection_ty.substs[1..]),
+ ty: projection_predicate.ty,
+ }
+ }
}
impl<'tcx> PolyExistentialProjection<'tcx> {
@@ -1549,14 +1562,17 @@
}
}
+ #[inline]
pub fn is_late_bound(&self) -> bool {
matches!(*self, ty::ReLateBound(..))
}
+ #[inline]
pub fn is_placeholder(&self) -> bool {
matches!(*self, ty::RePlaceholder(..))
}
+ #[inline]
pub fn bound_at_or_above_binder(&self, index: ty::DebruijnIndex) -> bool {
match *self {
ty::ReLateBound(debruijn, _) => debruijn >= index,
@@ -1685,53 +1701,6 @@
matches!(self.kind(), Never)
}
- /// Checks whether a type is definitely uninhabited. This is
- /// conservative: for some types that are uninhabited we return `false`,
- /// but we only return `true` for types that are definitely uninhabited.
- /// `ty.conservative_is_privately_uninhabited` implies that any value of type `ty`
- /// will be `Abi::Uninhabited`. (Note that uninhabited types may have nonzero
- /// size, to account for partial initialisation. See #49298 for details.)
- pub fn conservative_is_privately_uninhabited(&self, tcx: TyCtxt<'tcx>) -> bool {
- // FIXME(varkor): we can make this less conversative by substituting concrete
- // type arguments.
- match self.kind() {
- ty::Never => true,
- ty::Adt(def, _) if def.is_union() => {
- // For now, `union`s are never considered uninhabited.
- false
- }
- ty::Adt(def, _) => {
- // Any ADT is uninhabited if either:
- // (a) It has no variants (i.e. an empty `enum`);
- // (b) Each of its variants (a single one in the case of a `struct`) has at least
- // one uninhabited field.
- def.variants.iter().all(|var| {
- var.fields.iter().any(|field| {
- tcx.type_of(field.did).conservative_is_privately_uninhabited(tcx)
- })
- })
- }
- ty::Tuple(..) => {
- self.tuple_fields().any(|ty| ty.conservative_is_privately_uninhabited(tcx))
- }
- ty::Array(ty, len) => {
- match len.try_eval_usize(tcx, ParamEnv::empty()) {
- Some(0) | None => false,
- // If the array is definitely non-empty, it's uninhabited if
- // the type of its elements is uninhabited.
- Some(1..) => ty.conservative_is_privately_uninhabited(tcx),
- }
- }
- ty::Ref(..) => {
- // References to uninitialised memory is valid for any type, including
- // uninhabited types, in unsafe code, so we treat all references as
- // inhabited.
- false
- }
- _ => false,
- }
- }
-
#[inline]
pub fn is_primitive(&self) -> bool {
self.kind().is_primitive()
@@ -1845,6 +1814,15 @@
)
}
+ /// Get the mutability of the reference or `None` when not a reference
+ #[inline]
+ pub fn ref_mutability(&self) -> Option<hir::Mutability> {
+ match self.kind() {
+ Ref(_, _, mutability) => Some(*mutability),
+ _ => None,
+ }
+ }
+
#[inline]
pub fn is_unsafe_ptr(&self) -> bool {
matches!(self.kind(), RawPtr(_))
@@ -2124,6 +2102,54 @@
}
}
+ /// Returns the type of metadata for (potentially fat) pointers to this type.
+ pub fn ptr_metadata_ty(&'tcx self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ // FIXME: should this normalize?
+ let tail = tcx.struct_tail_without_normalization(self);
+ match tail.kind() {
+ // Sized types
+ ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+ | ty::Uint(_)
+ | ty::Int(_)
+ | ty::Bool
+ | ty::Float(_)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::RawPtr(..)
+ | ty::Char
+ | ty::Ref(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Array(..)
+ | ty::Closure(..)
+ | ty::Never
+ | ty::Error(_)
+ | ty::Foreign(..)
+ // If returned by `struct_tail_without_normalization` this is a unit struct
+ // without any fields, or not a struct, and therefore is Sized.
+ | ty::Adt(..)
+ // If returned by `struct_tail_without_normalization` this is the empty tuple,
+ // a.k.a. unit type, which is Sized
+ | ty::Tuple(..) => tcx.types.unit,
+
+ ty::Str | ty::Slice(_) => tcx.types.usize,
+ ty::Dynamic(..) => {
+ let dyn_metadata = tcx.lang_items().dyn_metadata().unwrap();
+ tcx.type_of(dyn_metadata).subst(tcx, &[tail.into()])
+ },
+
+ ty::Projection(_)
+ | ty::Param(_)
+ | ty::Opaque(..)
+ | ty::Infer(ty::TyVar(_))
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("`ptr_metadata_ty` applied to unexpected type: {:?}", tail)
+ }
+ }
+ }
+
/// When we create a closure, we record its kind (i.e., what trait
/// it implements) into its `ClosureSubsts` using a type
/// parameter. This is kind of a phantom type, except that the
diff --git a/compiler/rustc_middle/src/ty/trait_def.rs b/compiler/rustc_middle/src/ty/trait_def.rs
index f4d7eac..ce17a72 100644
--- a/compiler/rustc_middle/src/ty/trait_def.rs
+++ b/compiler/rustc_middle/src/ty/trait_def.rs
@@ -4,9 +4,8 @@
use crate::ty::fold::TypeFoldable;
use crate::ty::{Ty, TyCtxt};
use rustc_hir as hir;
-use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
use rustc_hir::definitions::DefPathHash;
-use rustc_hir::HirId;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
@@ -201,7 +200,7 @@
pub(super) fn all_local_trait_impls<'tcx>(
tcx: TyCtxt<'tcx>,
krate: CrateNum,
-) -> &'tcx BTreeMap<DefId, Vec<HirId>> {
+) -> &'tcx BTreeMap<DefId, Vec<LocalDefId>> {
&tcx.hir_crate(krate).trait_impls
}
@@ -229,8 +228,8 @@
}
}
- for &hir_id in tcx.hir().trait_impls(trait_id) {
- let impl_def_id = tcx.hir().local_def_id(hir_id).to_def_id();
+ for &impl_def_id in tcx.hir().trait_impls(trait_id) {
+ let impl_def_id = impl_def_id.to_def_id();
let impl_self_ty = tcx.type_of(impl_def_id);
if impl_self_ty.references_error() {
diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs
index 357a0dd..bb7fc66 100644
--- a/compiler/rustc_middle/src/ty/walk.rs
+++ b/compiler/rustc_middle/src/ty/walk.rs
@@ -13,7 +13,7 @@
pub struct TypeWalker<'tcx> {
stack: TypeWalkerStack<'tcx>,
last_subtree: usize,
- visited: SsoHashSet<GenericArg<'tcx>>,
+ pub visited: SsoHashSet<GenericArg<'tcx>>,
}
/// An iterator for walking the type tree.
diff --git a/compiler/rustc_mir/Cargo.toml b/compiler/rustc_mir/Cargo.toml
index 10dbf35..59a0c9a 100644
--- a/compiler/rustc_mir/Cargo.toml
+++ b/compiler/rustc_mir/Cargo.toml
@@ -31,7 +31,7 @@
rustc_ast = { path = "../rustc_ast" }
rustc_span = { path = "../rustc_span" }
rustc_apfloat = { path = "../rustc_apfloat" }
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
[dev-dependencies]
coverage_test_macros = { path = "src/transform/coverage/test_macros" }
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/conflict_errors.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/conflict_errors.rs
index cd16a88..eb942b1 100644
--- a/compiler/rustc_mir/src/borrow_check/diagnostics/conflict_errors.rs
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/conflict_errors.rs
@@ -8,19 +8,20 @@
use rustc_middle::mir::{
self, AggregateKind, BindingForm, BorrowKind, ClearCrossCrate, ConstraintCategory,
FakeReadCause, Local, LocalDecl, LocalInfo, LocalKind, Location, Operand, Place, PlaceRef,
- ProjectionElem, Rvalue, Statement, StatementKind, TerminatorKind, VarBindingForm,
+ ProjectionElem, Rvalue, Statement, StatementKind, Terminator, TerminatorKind, VarBindingForm,
};
-use rustc_middle::ty::{self, suggest_constraining_type_param, Ty};
+use rustc_middle::ty::{self, suggest_constraining_type_param, Ty, TypeFoldable};
use rustc_span::source_map::DesugaringKind;
-use rustc_span::Span;
+use rustc_span::symbol::sym;
+use rustc_span::{Span, DUMMY_SP};
use crate::dataflow::drop_flag_effects;
use crate::dataflow::indexes::{MoveOutIndex, MovePathIndex};
use crate::util::borrowck_errors;
use crate::borrow_check::{
- borrow_set::BorrowData, prefixes::IsPrefixOf, InitializationRequiringAction, MirBorrowckCtxt,
- PrefixSet, WriteKind,
+ borrow_set::BorrowData, diagnostics::Instance, prefixes::IsPrefixOf,
+ InitializationRequiringAction, MirBorrowckCtxt, PrefixSet, WriteKind,
};
use super::{
@@ -215,12 +216,13 @@
);
}
// Avoid pointing to the same function in multiple different
- // error messages
- if self.fn_self_span_reported.insert(self_arg.span) {
+ // error messages.
+ if span != DUMMY_SP && self.fn_self_span_reported.insert(self_arg.span)
+ {
err.span_note(
- self_arg.span,
- &format!("this function takes ownership of the receiver `self`, which moves {}", place_name)
- );
+ self_arg.span,
+ &format!("this function takes ownership of the receiver `self`, which moves {}", place_name)
+ );
}
}
// Deref::deref takes &self, which cannot cause a move
@@ -1268,6 +1270,29 @@
if return_span != borrow_span {
err.span_label(borrow_span, note);
+
+ let tcx = self.infcx.tcx;
+ let ty_params = ty::List::empty();
+
+ let return_ty = self.regioncx.universal_regions().unnormalized_output_ty;
+ let return_ty = tcx.erase_regions(return_ty);
+
+ // to avoid panics
+ if !return_ty.has_infer_types() {
+ if let Some(iter_trait) = tcx.get_diagnostic_item(sym::Iterator) {
+ if tcx.type_implements_trait((iter_trait, return_ty, ty_params, self.param_env))
+ {
+ if let Ok(snippet) = tcx.sess.source_map().span_to_snippet(return_span) {
+ err.span_suggestion_hidden(
+ return_span,
+ "use `.collect()` to allocate the iterator",
+ format!("{}{}", snippet, ".collect::<Vec<_>>()"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ }
}
Some(err)
@@ -1543,9 +1568,43 @@
None,
);
+ self.explain_deref_coercion(loan, &mut err);
+
err.buffer(&mut self.errors_buffer);
}
+ fn explain_deref_coercion(&mut self, loan: &BorrowData<'tcx>, err: &mut DiagnosticBuilder<'_>) {
+ let tcx = self.infcx.tcx;
+ if let (
+ Some(Terminator { kind: TerminatorKind::Call { from_hir_call: false, .. }, .. }),
+ Some((method_did, method_substs)),
+ ) = (
+ &self.body[loan.reserve_location.block].terminator,
+ crate::util::find_self_call(
+ tcx,
+ self.body,
+ loan.assigned_place.local,
+ loan.reserve_location.block,
+ ),
+ ) {
+ if tcx.is_diagnostic_item(sym::deref_method, method_did) {
+ let deref_target =
+ tcx.get_diagnostic_item(sym::deref_target).and_then(|deref_target| {
+ Instance::resolve(tcx, self.param_env, deref_target, method_substs)
+ .transpose()
+ });
+ if let Some(Ok(instance)) = deref_target {
+ let deref_target_ty = instance.ty(tcx, self.param_env);
+ err.note(&format!(
+ "borrow occurs due to deref coercion to `{}`",
+ deref_target_ty
+ ));
+ err.span_note(tcx.def_span(instance.def_id()), "deref defined here");
+ }
+ }
+ }
+ }
+
/// Reports an illegal reassignment; for example, an assignment to
/// (part of) a non-`mut` local that occurs potentially after that
/// local has already been initialized. `place` is the path being
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/mod.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/mod.rs
index 04ea3cb..ec561fa 100644
--- a/compiler/rustc_mir/src/borrow_check/diagnostics/mod.rs
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/mod.rs
@@ -81,12 +81,12 @@
let terminator = self.body[location.block].terminator();
debug!("add_moved_or_invoked_closure_note: terminator={:?}", terminator);
if let TerminatorKind::Call {
- func: Operand::Constant(box Constant { literal: ty::Const { ty: const_ty, .. }, .. }),
+ func: Operand::Constant(box Constant { literal, .. }),
args,
..
} = &terminator.kind
{
- if let ty::FnDef(id, _) = *const_ty.kind() {
+ if let ty::FnDef(id, _) = *literal.ty().kind() {
debug!("add_moved_or_invoked_closure_note: id={:?}", id);
if self.infcx.tcx.parent(id) == self.infcx.tcx.lang_items().fn_once_trait() {
let closure = match args.first() {
@@ -388,10 +388,14 @@
// so it's safe to call `expect_local`.
//
// We know the field exists so it's safe to call operator[] and `unwrap` here.
- let (&var_id, _) =
- self.infcx.tcx.typeck(def_id.expect_local()).closure_captures[&def_id]
- .get_index(field.index())
- .unwrap();
+ let var_id = self
+ .infcx
+ .tcx
+ .typeck(def_id.expect_local())
+ .closure_min_captures_flattened(def_id)
+ .nth(field.index())
+ .unwrap()
+ .get_root_variable();
self.infcx.tcx.hir().name(var_id).to_string()
}
@@ -966,12 +970,16 @@
let expr = &self.infcx.tcx.hir().expect_expr(hir_id).kind;
debug!("closure_span: hir_id={:?} expr={:?}", hir_id, expr);
if let hir::ExprKind::Closure(.., body_id, args_span, _) = expr {
- for (upvar_hir_id, place) in
- self.infcx.tcx.typeck(def_id.expect_local()).closure_captures[&def_id]
- .keys()
- .zip(places)
+ for (captured_place, place) in self
+ .infcx
+ .tcx
+ .typeck(def_id.expect_local())
+ .closure_min_captures_flattened(def_id)
+ .zip(places)
{
- let span = self.infcx.tcx.upvars_mentioned(local_did)?[upvar_hir_id].span;
+ let upvar_hir_id = captured_place.get_root_variable();
+ //FIXME(project-rfc-2229#8): Use better span from captured_place
+ let span = self.infcx.tcx.upvars_mentioned(local_did)?[&upvar_hir_id].span;
match place {
Operand::Copy(place) | Operand::Move(place)
if target_place == place.as_ref() =>
@@ -979,10 +987,6 @@
debug!("closure_span: found captured local {:?}", place);
let body = self.infcx.tcx.hir().body(*body_id);
let generator_kind = body.generator_kind();
- let upvar_id = ty::UpvarId {
- var_path: ty::UpvarPath { hir_id: *upvar_hir_id },
- closure_expr_id: local_did,
- };
// If we have a more specific span available, point to that.
// We do this even though this span might be part of a borrow error
@@ -990,11 +994,11 @@
// to a span that shows why the upvar is used in the closure,
// so a move-related span is as good as any (and potentially better,
// if the overall error is due to a move of the upvar).
- let usage_span =
- match self.infcx.tcx.typeck(local_did).upvar_capture(upvar_id) {
- ty::UpvarCapture::ByValue(Some(span)) => span,
- _ => span,
- };
+
+ let usage_span = match captured_place.info.capture_kind {
+ ty::UpvarCapture::ByValue(Some(span)) => span,
+ _ => span,
+ };
return Some((*args_span, generator_kind, usage_span));
}
_ => {}
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs
index 333ac07..d1fb999e 100644
--- a/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs
@@ -1,6 +1,7 @@
use rustc_hir as hir;
use rustc_hir::Node;
use rustc_index::vec::Idx;
+use rustc_middle::hir::map::Map;
use rustc_middle::mir::{Mutability, Place, PlaceRef, ProjectionElem};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::{
@@ -376,15 +377,18 @@
opt_assignment_rhs_span.and_then(|span| span.desugaring_kind());
match opt_desugaring_kind {
// on for loops, RHS points to the iterator part
- Some(DesugaringKind::ForLoop(_)) => Some((
- false,
- opt_assignment_rhs_span.unwrap(),
- format!(
- "this iterator yields `{SIGIL}` {DESC}s",
- SIGIL = pointer_sigil,
- DESC = pointer_desc
- ),
- )),
+ Some(DesugaringKind::ForLoop(_)) => {
+ self.suggest_similar_mut_method_for_for_loop(&mut err);
+ Some((
+ false,
+ opt_assignment_rhs_span.unwrap(),
+ format!(
+ "this iterator yields `{SIGIL}` {DESC}s",
+ SIGIL = pointer_sigil,
+ DESC = pointer_desc
+ ),
+ ))
+ }
// don't create labels for compiler-generated spans
Some(_) => None,
None => {
@@ -506,35 +510,153 @@
the_place_err: PlaceRef<'tcx>,
err: &mut DiagnosticBuilder<'_>,
) {
- let id = id.expect_local();
- let tables = tcx.typeck(id);
- let hir_id = tcx.hir().local_def_id_to_hir_id(id);
- let (span, place) = &tables.closure_kind_origins()[hir_id];
- let reason = if let PlaceBase::Upvar(upvar_id) = place.base {
- let upvar = ty::place_to_string_for_capture(tcx, place);
- match tables.upvar_capture(upvar_id) {
- ty::UpvarCapture::ByRef(ty::UpvarBorrow {
- kind: ty::BorrowKind::MutBorrow | ty::BorrowKind::UniqueImmBorrow,
- ..
- }) => {
- format!("mutable borrow of `{}`", upvar)
+ let closure_local_def_id = id.expect_local();
+ let tables = tcx.typeck(closure_local_def_id);
+ let closure_hir_id = tcx.hir().local_def_id_to_hir_id(closure_local_def_id);
+ if let Some((span, closure_kind_origin)) =
+ &tables.closure_kind_origins().get(closure_hir_id)
+ {
+ let reason = if let PlaceBase::Upvar(upvar_id) = closure_kind_origin.base {
+ let upvar = ty::place_to_string_for_capture(tcx, closure_kind_origin);
+ let root_hir_id = upvar_id.var_path.hir_id;
+ // we have a origin for this closure kind starting at this root variable so it's safe to unwrap here
+ let captured_places = tables.closure_min_captures[id].get(&root_hir_id).unwrap();
+
+ let origin_projection = closure_kind_origin
+ .projections
+ .iter()
+ .map(|proj| proj.kind)
+ .collect::<Vec<_>>();
+ let mut capture_reason = String::new();
+ for captured_place in captured_places {
+ let captured_place_kinds = captured_place
+ .place
+ .projections
+ .iter()
+ .map(|proj| proj.kind)
+ .collect::<Vec<_>>();
+ if rustc_middle::ty::is_ancestor_or_same_capture(
+ &captured_place_kinds,
+ &origin_projection,
+ ) {
+ match captured_place.info.capture_kind {
+ ty::UpvarCapture::ByRef(ty::UpvarBorrow {
+ kind: ty::BorrowKind::MutBorrow | ty::BorrowKind::UniqueImmBorrow,
+ ..
+ }) => {
+ capture_reason = format!("mutable borrow of `{}`", upvar);
+ }
+ ty::UpvarCapture::ByValue(_) => {
+ capture_reason = format!("possible mutation of `{}`", upvar);
+ }
+ _ => bug!("upvar `{}` borrowed, but not mutably", upvar),
+ }
+ break;
+ }
}
- ty::UpvarCapture::ByValue(_) => {
- format!("possible mutation of `{}`", upvar)
+ if capture_reason.is_empty() {
+ bug!("upvar `{}` borrowed, but cannot find reason", upvar);
}
- val => bug!("upvar `{}` borrowed, but not mutably: {:?}", upvar, val),
- }
- } else {
- bug!("not an upvar")
+ capture_reason
+ } else {
+ bug!("not an upvar")
+ };
+ err.span_label(
+ *span,
+ format!(
+ "calling `{}` requires mutable binding due to {}",
+ self.describe_place(the_place_err).unwrap(),
+ reason
+ ),
+ );
+ }
+ }
+
+ // Attempt to search similar mutable associated items for suggestion.
+ // In the future, attempt in all path but initially for RHS of for_loop
+ fn suggest_similar_mut_method_for_for_loop(&self, err: &mut DiagnosticBuilder<'_>) {
+ use hir::{
+ BodyId, Expr,
+ ExprKind::{Block, Call, DropTemps, Match, MethodCall},
+ HirId, ImplItem, ImplItemKind, Item, ItemKind,
};
- err.span_label(
- *span,
- format!(
- "calling `{}` requires mutable binding due to {}",
- self.describe_place(the_place_err).unwrap(),
- reason
- ),
- );
+
+ fn maybe_body_id_of_fn(hir_map: &Map<'tcx>, id: HirId) -> Option<BodyId> {
+ match hir_map.find(id) {
+ Some(Node::Item(Item { kind: ItemKind::Fn(_, _, body_id), .. }))
+ | Some(Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(_, body_id), .. })) => {
+ Some(*body_id)
+ }
+ _ => None,
+ }
+ }
+ let hir_map = self.infcx.tcx.hir();
+ let mir_body_hir_id = self.mir_hir_id();
+ if let Some(fn_body_id) = maybe_body_id_of_fn(&hir_map, mir_body_hir_id) {
+ if let Block(
+ hir::Block {
+ expr:
+ Some(Expr {
+ kind:
+ DropTemps(Expr {
+ kind:
+ Match(
+ Expr {
+ kind:
+ Call(
+ _,
+ [Expr {
+ kind: MethodCall(path_segment, ..),
+ hir_id,
+ ..
+ }, ..],
+ ),
+ ..
+ },
+ ..,
+ ),
+ ..
+ }),
+ ..
+ }),
+ ..
+ },
+ _,
+ ) = hir_map.body(fn_body_id).value.kind
+ {
+ let opt_suggestions = path_segment
+ .hir_id
+ .map(|path_hir_id| self.infcx.tcx.typeck(path_hir_id.owner))
+ .and_then(|typeck| typeck.type_dependent_def_id(*hir_id))
+ .and_then(|def_id| self.infcx.tcx.impl_of_method(def_id))
+ .map(|def_id| self.infcx.tcx.associated_items(def_id))
+ .map(|assoc_items| {
+ assoc_items
+ .in_definition_order()
+ .map(|assoc_item_def| assoc_item_def.ident)
+ .filter(|&ident| {
+ let original_method_ident = path_segment.ident;
+ original_method_ident != ident
+ && ident
+ .as_str()
+ .starts_with(&original_method_ident.name.to_string())
+ })
+ .map(|ident| format!("{}()", ident))
+ .peekable()
+ });
+
+ if let Some(mut suggestions) = opt_suggestions {
+ if suggestions.peek().is_some() {
+ err.span_suggestions(
+ path_segment.ident.span,
+ &format!("use mutable method"),
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ };
}
/// Targeted error when encountering an `FnMut` closure where an `Fn` closure was expected.
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs
index cbca012..03738f1 100644
--- a/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs
@@ -634,14 +634,11 @@
| GenericArgKind::Const(_),
_,
) => {
- // I *think* that HIR lowering should ensure this
- // doesn't happen, even in erroneous
- // programs. Else we should use delay-span-bug.
- span_bug!(
+ // HIR lowering sometimes doesn't catch this in erroneous
+ // programs, so we need to use delay_span_bug here. See #82126.
+ self.infcx.tcx.sess.delay_span_bug(
hir_arg.span(),
- "unmatched subst and hir arg: found {:?} vs {:?}",
- kind,
- hir_arg,
+ &format!("unmatched subst and hir arg: found {:?} vs {:?}", kind, hir_arg),
);
}
}
@@ -767,7 +764,7 @@
let hir = self.infcx.tcx.hir();
if let hir::TyKind::OpaqueDef(id, _) = hir_ty.kind {
- let opaque_ty = hir.item(id.id);
+ let opaque_ty = hir.item(id);
if let hir::ItemKind::OpaqueTy(hir::OpaqueTy {
bounds:
[hir::GenericBound::LangItemTrait(
diff --git a/compiler/rustc_mir/src/borrow_check/invalidation.rs b/compiler/rustc_mir/src/borrow_check/invalidation.rs
index 8c05e6f..17c4f3c 100644
--- a/compiler/rustc_mir/src/borrow_check/invalidation.rs
+++ b/compiler/rustc_mir/src/borrow_check/invalidation.rs
@@ -92,6 +92,15 @@
self.consume_operand(location, input);
}
}
+ StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ self.consume_operand(location, src);
+ self.consume_operand(location, dst);
+ self.consume_operand(location, count);
+ }
StatementKind::Nop
| StatementKind::Coverage(..)
| StatementKind::AscribeUserType(..)
@@ -165,7 +174,7 @@
self.consume_operand(location, value);
// Invalidate all borrows of local places
- let borrow_set = self.borrow_set.clone();
+ let borrow_set = self.borrow_set;
let resume = self.location_table.start_index(resume.start_location());
for (i, data) in borrow_set.iter_enumerated() {
if borrow_of_local_data(data.borrowed_place) {
@@ -177,7 +186,7 @@
}
TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
// Invalidate all borrows of local places
- let borrow_set = self.borrow_set.clone();
+ let borrow_set = self.borrow_set;
let start = self.location_table.start_index(location);
for (i, data) in borrow_set.iter_enumerated() {
if borrow_of_local_data(data.borrowed_place) {
@@ -326,8 +335,8 @@
);
}
- Rvalue::BinaryOp(_bin_op, ref operand1, ref operand2)
- | Rvalue::CheckedBinaryOp(_bin_op, ref operand1, ref operand2) => {
+ Rvalue::BinaryOp(_bin_op, box (ref operand1, ref operand2))
+ | Rvalue::CheckedBinaryOp(_bin_op, box (ref operand1, ref operand2)) => {
self.consume_operand(location, operand1);
self.consume_operand(location, operand2);
}
@@ -369,7 +378,7 @@
);
let tcx = self.tcx;
let body = self.body;
- let borrow_set = self.borrow_set.clone();
+ let borrow_set = self.borrow_set;
let indices = self.borrow_set.indices();
each_borrow_involving_path(
self,
@@ -377,7 +386,7 @@
body,
location,
(sd, place),
- &borrow_set.clone(),
+ borrow_set,
indices,
|this, borrow_index, borrow| {
match (rw, borrow.kind) {
diff --git a/compiler/rustc_mir/src/borrow_check/mod.rs b/compiler/rustc_mir/src/borrow_check/mod.rs
index 5db52db..5b8bb72 100644
--- a/compiler/rustc_mir/src/borrow_check/mod.rs
+++ b/compiler/rustc_mir/src/borrow_check/mod.rs
@@ -243,7 +243,7 @@
let regioncx = Rc::new(regioncx);
- let flow_borrows = Borrows::new(tcx, &body, regioncx.clone(), &borrow_set)
+ let flow_borrows = Borrows::new(tcx, &body, ®ioncx, &borrow_set)
.into_engine(tcx, &body)
.pass_name("borrowck")
.iterate_to_fixpoint();
@@ -266,7 +266,6 @@
for (idx, move_data_results) in promoted_errors {
let promoted_body = &promoted[idx];
- let dominators = promoted_body.dominators();
if let Err((move_data, move_errors)) = move_data_results {
let mut promoted_mbcx = MirBorrowckCtxt {
@@ -274,7 +273,7 @@
param_env,
body: promoted_body,
move_data: &move_data,
- location_table: &LocationTable::new(promoted_body),
+ location_table, // no need to create a real one for the promoted, it is not used
movable_generator,
fn_self_span_reported: Default::default(),
locals_are_invalidated_at_exit,
@@ -287,8 +286,8 @@
regioncx: regioncx.clone(),
used_mut: Default::default(),
used_mut_upvars: SmallVec::new(),
- borrow_set: borrow_set.clone(),
- dominators,
+ borrow_set: Rc::clone(&borrow_set),
+ dominators: Dominators::dummy(), // not used
upvars: Vec::new(),
local_names: IndexVec::from_elem(None, &promoted_body.local_decls),
region_names: RefCell::default(),
@@ -317,10 +316,10 @@
move_error_reported: BTreeMap::new(),
uninitialized_error_reported: Default::default(),
errors_buffer,
- regioncx,
+ regioncx: Rc::clone(®ioncx),
used_mut: Default::default(),
used_mut_upvars: SmallVec::new(),
- borrow_set,
+ borrow_set: Rc::clone(&borrow_set),
dominators,
upvars,
local_names,
@@ -627,6 +626,15 @@
self.consume_operand(location, (input, span), flow_state);
}
}
+
+ StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+ ..
+ }) => {
+ span_bug!(
+ span,
+ "Unexpected CopyNonOverlapping, should only appear after lower_intrinsics",
+ )
+ }
StatementKind::Nop
| StatementKind::Coverage(..)
| StatementKind::AscribeUserType(..)
@@ -1317,8 +1325,8 @@
);
}
- Rvalue::BinaryOp(_bin_op, ref operand1, ref operand2)
- | Rvalue::CheckedBinaryOp(_bin_op, ref operand1, ref operand2) => {
+ Rvalue::BinaryOp(_bin_op, box (ref operand1, ref operand2))
+ | Rvalue::CheckedBinaryOp(_bin_op, box (ref operand1, ref operand2)) => {
self.consume_operand(location, (operand1, span), flow_state);
self.consume_operand(location, (operand2, span), flow_state);
}
diff --git a/compiler/rustc_mir/src/borrow_check/region_infer/opaque_types.rs b/compiler/rustc_mir/src/borrow_check/region_infer/opaque_types.rs
index f7c9023..0d1d255 100644
--- a/compiler/rustc_mir/src/borrow_check/region_infer/opaque_types.rs
+++ b/compiler/rustc_mir/src/borrow_check/region_infer/opaque_types.rs
@@ -47,6 +47,7 @@
/// Calling `universal_upper_bound` for such a region gives `fr_fn_body`,
/// which has no `external_name` in which case we use `'empty` as the
/// region to pass to `infer_opaque_definition_from_instantiation`.
+ #[instrument(skip(self, infcx))]
pub(in crate::borrow_check) fn infer_opaque_types(
&self,
infcx: &InferCtxt<'_, 'tcx>,
@@ -56,10 +57,7 @@
opaque_ty_decls
.into_iter()
.map(|(opaque_def_id, ty::ResolvedOpaqueTy { concrete_type, substs })| {
- debug!(
- "infer_opaque_types(concrete_type = {:?}, substs = {:?})",
- concrete_type, substs
- );
+ debug!(?concrete_type, ?substs);
let mut subst_regions = vec![self.universal_regions.fr_static];
let universal_substs =
@@ -110,10 +108,7 @@
}
});
- debug!(
- "infer_opaque_types(universal_concrete_type = {:?}, universal_substs = {:?})",
- universal_concrete_type, universal_substs
- );
+ debug!(?universal_concrete_type, ?universal_substs);
let remapped_type = infcx.infer_opaque_definition_from_instantiation(
opaque_def_id,
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs b/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs
index 157959b..77d9136 100644
--- a/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs
+++ b/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs
@@ -103,11 +103,8 @@
}
}
- assert!(
- body.yield_ty.is_some() && universal_regions.yield_ty.is_some()
- || body.yield_ty.is_none() && universal_regions.yield_ty.is_none()
- );
- if let Some(mir_yield_ty) = body.yield_ty {
+ assert!(body.yield_ty().is_some() == universal_regions.yield_ty.is_some());
+ if let Some(mir_yield_ty) = body.yield_ty() {
let ur_yield_ty = universal_regions.yield_ty.unwrap();
let yield_span = body.local_decls[RETURN_PLACE].source_info.span;
self.equate_normalized_input_or_output(ur_yield_ty, mir_yield_ty, yield_span);
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/mod.rs b/compiler/rustc_mir/src/borrow_check/type_check/mod.rs
index 3ba06bd..cce1549 100644
--- a/compiler/rustc_mir/src/borrow_check/type_check/mod.rs
+++ b/compiler/rustc_mir/src/borrow_check/type_check/mod.rs
@@ -43,6 +43,9 @@
use crate::dataflow::impls::MaybeInitializedPlaces;
use crate::dataflow::move_paths::MoveData;
use crate::dataflow::ResultsCursor;
+use crate::transform::{
+ check_consts::ConstCx, promote_consts::is_const_fn_in_array_repeat_expression,
+};
use crate::borrow_check::{
borrow_set::BorrowSet,
@@ -279,7 +282,7 @@
fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
self.super_constant(constant, location);
- let ty = self.sanitize_type(constant, constant.literal.ty);
+ let ty = self.sanitize_type(constant, constant.literal.ty());
self.cx.infcx.tcx.for_each_free_region(&ty, |live_region| {
let live_region_vid =
@@ -293,7 +296,7 @@
if let Some(annotation_index) = constant.user_ty {
if let Err(terr) = self.cx.relate_type_and_user_type(
- constant.literal.ty,
+ constant.literal.ty(),
ty::Variance::Invariant,
&UserTypeProjection { base: annotation_index, projs: vec![] },
location.to_locations(),
@@ -305,13 +308,22 @@
constant,
"bad constant user type {:?} vs {:?}: {:?}",
annotation,
- constant.literal.ty,
+ constant.literal.ty(),
terr,
);
}
} else {
let tcx = self.tcx();
- if let ty::ConstKind::Unevaluated(def, substs, promoted) = constant.literal.val {
+ let maybe_uneval = match constant.literal {
+ ConstantKind::Ty(ct) => match ct.val {
+ ty::ConstKind::Unevaluated(def, substs, promoted) => {
+ Some((def, substs, promoted))
+ }
+ _ => None,
+ },
+ _ => None,
+ };
+ if let Some((def, substs, promoted)) = maybe_uneval {
if let Some(promoted) = promoted {
let check_err = |verifier: &mut TypeVerifier<'a, 'b, 'tcx>,
promoted: &Body<'tcx>,
@@ -346,7 +358,7 @@
location.to_locations(),
ConstraintCategory::Boring,
self.cx.param_env.and(type_op::ascribe_user_type::AscribeUserType::new(
- constant.literal.ty,
+ constant.literal.ty(),
def.did,
UserSubsts { substs, user_self_ty: None },
)),
@@ -364,7 +376,7 @@
let unnormalized_ty = tcx.type_of(static_def_id);
let locations = location.to_locations();
let normalized_ty = self.cx.normalize(unnormalized_ty, locations);
- let literal_ty = constant.literal.ty.builtin_deref(true).unwrap().ty;
+ let literal_ty = constant.literal.ty().builtin_deref(true).unwrap().ty;
if let Err(terr) = self.cx.eq_types(
normalized_ty,
@@ -376,7 +388,7 @@
}
}
- if let ty::FnDef(def_id, substs) = *constant.literal.ty.kind() {
+ if let ty::FnDef(def_id, substs) = *constant.literal.ty().kind() {
let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, substs);
self.cx.normalize_and_prove_instantiated_predicates(
instantiated_predicates,
@@ -1098,6 +1110,7 @@
) -> Fallible<()> {
relate_tys::relate_types(
self.infcx,
+ self.param_env,
a,
v,
b,
@@ -1516,6 +1529,12 @@
);
}
}
+ StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+ ..
+ }) => span_bug!(
+ stmt.source_info.span,
+ "Unexpected StatementKind::CopyNonOverlapping, should only appear after lowering_intrinsics",
+ ),
StatementKind::FakeRead(..)
| StatementKind::StorageLive(..)
| StatementKind::StorageDead(..)
@@ -1647,7 +1666,7 @@
}
TerminatorKind::Yield { ref value, .. } => {
let value_ty = value.ty(body, tcx);
- match body.yield_ty {
+ match body.yield_ty() {
None => span_mirbug!(self, term, "yield in non-generator"),
Some(ty) => {
if let Err(terr) = self.sub_types(
@@ -1730,7 +1749,10 @@
}
}
None => {
- if !sig.output().conservative_is_privately_uninhabited(self.tcx()) {
+ if !self
+ .tcx()
+ .conservative_is_privately_uninhabited(self.param_env.and(sig.output()))
+ {
span_mirbug!(self, term, "call to converging function {:?} w/o dest", sig);
}
}
@@ -1988,18 +2010,24 @@
Operand::Copy(..) | Operand::Constant(..) => {
// These are always okay: direct use of a const, or a value that can evidently be copied.
}
- Operand::Move(_) => {
+ Operand::Move(place) => {
// Make sure that repeated elements implement `Copy`.
let span = body.source_info(location).span;
let ty = operand.ty(body, tcx);
if !self.infcx.type_is_copy_modulo_regions(self.param_env, ty, span) {
+ let ccx = ConstCx::new_with_param_env(tcx, body, self.param_env);
+ let is_const_fn =
+ is_const_fn_in_array_repeat_expression(&ccx, &place, &body);
+
+ debug!("check_rvalue: is_const_fn={:?}", is_const_fn);
+
let def_id = body.source.def_id().expect_local();
self.infcx.report_selection_error(
&traits::Obligation::new(
ObligationCause::new(
span,
self.tcx().hir().local_def_id_to_hir_id(def_id),
- traits::ObligationCauseCode::RepeatVec,
+ traits::ObligationCauseCode::RepeatVec(is_const_fn),
),
self.param_env,
ty::Binder::bind(ty::TraitRef::new(
@@ -2191,19 +2219,18 @@
CastKind::Pointer(PointerCast::ArrayToPointer) => {
let ty_from = op.ty(body, tcx);
- let opt_ty_elem = match ty_from.kind() {
- ty::RawPtr(ty::TypeAndMut {
- mutbl: hir::Mutability::Not,
- ty: array_ty,
- }) => match array_ty.kind() {
- ty::Array(ty_elem, _) => Some(ty_elem),
- _ => None,
- },
+ let opt_ty_elem_mut = match ty_from.kind() {
+ ty::RawPtr(ty::TypeAndMut { mutbl: array_mut, ty: array_ty }) => {
+ match array_ty.kind() {
+ ty::Array(ty_elem, _) => Some((ty_elem, *array_mut)),
+ _ => None,
+ }
+ }
_ => None,
};
- let ty_elem = match opt_ty_elem {
- Some(ty_elem) => ty_elem,
+ let (ty_elem, ty_mut) = match opt_ty_elem_mut {
+ Some(ty_elem_mut) => ty_elem_mut,
None => {
span_mirbug!(
self,
@@ -2215,11 +2242,10 @@
}
};
- let ty_to = match ty.kind() {
- ty::RawPtr(ty::TypeAndMut {
- mutbl: hir::Mutability::Not,
- ty: ty_to,
- }) => ty_to,
+ let (ty_to, ty_to_mut) = match ty.kind() {
+ ty::RawPtr(ty::TypeAndMut { mutbl: ty_to_mut, ty: ty_to }) => {
+ (ty_to, *ty_to_mut)
+ }
_ => {
span_mirbug!(
self,
@@ -2231,6 +2257,17 @@
}
};
+ if ty_to_mut == Mutability::Mut && ty_mut == Mutability::Not {
+ span_mirbug!(
+ self,
+ rvalue,
+ "ArrayToPointer cast from const {:?} to mut {:?}",
+ ty,
+ ty_to
+ );
+ return;
+ }
+
if let Err(terr) = self.sub_types(
ty_elem,
ty_to,
@@ -2277,8 +2314,7 @@
Rvalue::BinaryOp(
BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge,
- left,
- right,
+ box (left, right),
) => {
let ty_left = left.ty(body, tcx);
match ty_left.kind() {
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/relate_tys.rs b/compiler/rustc_mir/src/borrow_check/type_check/relate_tys.rs
index 6665eb5..249945f 100644
--- a/compiler/rustc_mir/src/borrow_check/type_check/relate_tys.rs
+++ b/compiler/rustc_mir/src/borrow_check/type_check/relate_tys.rs
@@ -18,6 +18,7 @@
/// variables, but not the type `b`.
pub(super) fn relate_types<'tcx>(
infcx: &InferCtxt<'_, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
a: Ty<'tcx>,
v: ty::Variance,
b: Ty<'tcx>,
@@ -28,7 +29,7 @@
debug!("relate_types(a={:?}, v={:?}, b={:?}, locations={:?})", a, v, b, locations);
TypeRelating::new(
infcx,
- NllTypeRelatingDelegate::new(infcx, borrowck_context, locations, category),
+ NllTypeRelatingDelegate::new(infcx, borrowck_context, param_env, locations, category),
v,
)
.relate(a, b)?;
@@ -39,6 +40,8 @@
infcx: &'me InferCtxt<'me, 'tcx>,
borrowck_context: Option<&'me mut BorrowCheckContext<'bccx, 'tcx>>,
+ param_env: ty::ParamEnv<'tcx>,
+
/// Where (and why) is this relation taking place?
locations: Locations,
@@ -50,14 +53,19 @@
fn new(
infcx: &'me InferCtxt<'me, 'tcx>,
borrowck_context: Option<&'me mut BorrowCheckContext<'bccx, 'tcx>>,
+ param_env: ty::ParamEnv<'tcx>,
locations: Locations,
category: ConstraintCategory,
) -> Self {
- Self { infcx, borrowck_context, locations, category }
+ Self { infcx, borrowck_context, param_env, locations, category }
}
}
impl TypeRelatingDelegate<'tcx> for NllTypeRelatingDelegate<'_, '_, 'tcx> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+
fn create_next_universe(&mut self) -> ty::UniverseIndex {
self.infcx.create_next_universe()
}
diff --git a/compiler/rustc_mir/src/const_eval/error.rs b/compiler/rustc_mir/src/const_eval/error.rs
index 0e610e3..754ed0b 100644
--- a/compiler/rustc_mir/src/const_eval/error.rs
+++ b/compiler/rustc_mir/src/const_eval/error.rs
@@ -16,6 +16,7 @@
#[derive(Clone, Debug)]
pub enum ConstEvalErrKind {
NeedsRfc(String),
+ PtrToIntCast,
ConstAccessesStatic,
ModifiedGlobal,
AssertFailure(AssertKind<ConstInt>),
@@ -39,6 +40,12 @@
NeedsRfc(ref msg) => {
write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg)
}
+ PtrToIntCast => {
+ write!(
+ f,
+ "cannot cast pointer to integer because it was not created by cast from integer"
+ )
+ }
ConstAccessesStatic => write!(f, "constant accesses static"),
ModifiedGlobal => {
write!(f, "modifying a static's initial value from another static's initializer")
@@ -77,7 +84,11 @@
{
error.print_backtrace();
let stacktrace = ecx.generate_stacktrace();
- ConstEvalErr { error: error.kind, stacktrace, span: span.unwrap_or_else(|| ecx.cur_span()) }
+ ConstEvalErr {
+ error: error.into_kind(),
+ stacktrace,
+ span: span.unwrap_or_else(|| ecx.cur_span()),
+ }
}
pub fn struct_error(
diff --git a/compiler/rustc_mir/src/const_eval/eval_queries.rs b/compiler/rustc_mir/src/const_eval/eval_queries.rs
index 252f5e7..fa234ff 100644
--- a/compiler/rustc_mir/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_mir/src/const_eval/eval_queries.rs
@@ -50,13 +50,13 @@
let name =
with_no_trimmed_paths(|| ty::tls::with(|tcx| tcx.def_path_str(cid.instance.def_id())));
- let prom = cid.promoted.map_or(String::new(), |p| format!("::promoted[{:?}]", p));
+ let prom = cid.promoted.map_or_else(String::new, |p| format!("::promoted[{:?}]", p));
trace!("eval_body_using_ecx: pushing stack frame for global: {}{}", name, prom);
ecx.push_stack_frame(
cid.instance,
body,
- Some(ret.into()),
+ Some(&ret.into()),
StackPopCleanup::None { cleanup: false },
)?;
@@ -72,7 +72,7 @@
None => InternKind::Constant,
}
};
- intern_const_alloc_recursive(ecx, intern_kind, ret)?;
+ intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
debug!("eval_body_using_ecx done: {:?}", *ret);
Ok(ret)
@@ -105,7 +105,7 @@
/// type system.
pub(super) fn op_to_const<'tcx>(
ecx: &CompileTimeEvalContext<'_, 'tcx>,
- op: OpTy<'tcx>,
+ op: &OpTy<'tcx>,
) -> ConstValue<'tcx> {
// We do not have value optimizations for everything.
// Only scalars and slices, since they are very common.
@@ -137,7 +137,7 @@
op.try_as_mplace(ecx)
};
- let to_const_value = |mplace: MPlaceTy<'_>| match mplace.ptr {
+ let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr {
Scalar::Ptr(ptr) => {
let alloc = ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory();
ConstValue::ByRef { alloc, offset: ptr.offset }
@@ -155,12 +155,12 @@
}
};
match immediate {
- Ok(mplace) => to_const_value(mplace),
+ Ok(ref mplace) => to_const_value(mplace),
// see comment on `let try_as_immediate` above
Err(imm) => match *imm {
Immediate::Scalar(x) => match x {
ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s),
- ScalarMaybeUninit::Uninit => to_const_value(op.assert_mem_place(ecx)),
+ ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place(ecx)),
},
Immediate::ScalarPair(a, b) => {
let (data, start) = match a.check_init().unwrap() {
@@ -201,14 +201,14 @@
"the `eval_to_const_value_raw` query should not be used for statics, use `eval_to_allocation` instead"
);
// Turn this into a proper constant.
- op_to_const(&ecx, mplace.into())
+ op_to_const(&ecx, &mplace.into())
}
pub fn eval_to_const_value_raw_provider<'tcx>(
tcx: TyCtxt<'tcx>,
key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> {
- // see comment in const_eval_raw_provider for what we're doing here
+ // see comment in eval_to_allocation_raw_provider for what we're doing here
if key.param_env.reveal() == Reveal::All {
let mut key = key;
key.param_env = key.param_env.with_user_facing();
@@ -230,7 +230,7 @@
};
return eval_nullary_intrinsic(tcx, key.param_env, def_id, substs).map_err(|error| {
let span = tcx.def_span(def_id);
- let error = ConstEvalErr { error: error.kind, stacktrace: vec![], span };
+ let error = ConstEvalErr { error: error.into_kind(), stacktrace: vec![], span };
error.report_as_error(tcx.at(span), "could not evaluate nullary intrinsic")
});
}
@@ -348,7 +348,7 @@
Some(_) => CtfeValidationMode::Regular, // a `static`
None => CtfeValidationMode::Const { inner, allow_static_ptrs: false },
};
- ecx.const_validate_operand(mplace.into(), path, &mut ref_tracking, mode)?;
+ ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?;
inner = true;
}
};
diff --git a/compiler/rustc_mir/src/const_eval/machine.rs b/compiler/rustc_mir/src/const_eval/machine.rs
index 49126cf..61785a5 100644
--- a/compiler/rustc_mir/src/const_eval/machine.rs
+++ b/compiler/rustc_mir/src/const_eval/machine.rs
@@ -39,8 +39,8 @@
// &str
assert!(args.len() == 1);
- let msg_place = self.deref_operand(args[0])?;
- let msg = Symbol::intern(self.read_str(msg_place)?);
+ let msg_place = self.deref_operand(&args[0])?;
+ let msg = Symbol::intern(self.read_str(&msg_place)?);
let span = self.find_closest_untracked_caller_location();
let (file, line, col) = self.location_triple_for_span(span);
Err(ConstEvalErrKind::Panic { msg, file, line, col }.into())
@@ -222,7 +222,7 @@
instance: ty::Instance<'tcx>,
_abi: Abi,
args: &[OpTy<'tcx>],
- _ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+ _ret: Option<(&PlaceTy<'tcx>, mir::BasicBlock)>,
_unwind: Option<mir::BasicBlock>, // unwinding is not supported in consts
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
debug!("find_mir_or_eval_fn: {:?}", instance);
@@ -245,8 +245,8 @@
Ok(Some(match ecx.load_mir(instance.def, None) {
Ok(body) => body,
Err(err) => {
- if let err_unsup!(NoMirFor(did)) = err.kind {
- let path = ecx.tcx.def_path_str(did);
+ if let err_unsup!(NoMirFor(did)) = err.kind() {
+ let path = ecx.tcx.def_path_str(*did);
return Err(ConstEvalErrKind::NeedsRfc(format!(
"calling extern function `{}`",
path
@@ -262,7 +262,7 @@
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
- ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+ ret: Option<(&PlaceTy<'tcx>, mir::BasicBlock)>,
_unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
// Shared intrinsics.
@@ -284,8 +284,8 @@
};
match intrinsic_name {
sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
- let a = ecx.read_immediate(args[0])?.to_scalar()?;
- let b = ecx.read_immediate(args[1])?.to_scalar()?;
+ let a = ecx.read_immediate(&args[0])?.to_scalar()?;
+ let b = ecx.read_immediate(&args[1])?.to_scalar()?;
let cmp = if intrinsic_name == sym::ptr_guaranteed_eq {
ecx.guaranteed_eq(a, b)
} else {
@@ -294,8 +294,8 @@
ecx.write_scalar(Scalar::from_bool(cmp), dest)?;
}
sym::const_allocate => {
- let size = ecx.read_scalar(args[0])?.to_machine_usize(ecx)?;
- let align = ecx.read_scalar(args[1])?.to_machine_usize(ecx)?;
+ let size = ecx.read_scalar(&args[0])?.to_machine_usize(ecx)?;
+ let align = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
let align = match Align::from_bytes(align) {
Ok(a) => a,
@@ -330,7 +330,7 @@
use rustc_middle::mir::AssertKind::*;
// Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
let eval_to_int =
- |op| ecx.read_immediate(ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
+ |op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
let err = match msg {
BoundsCheck { ref len, ref index } => {
let len = eval_to_int(len)?;
@@ -352,21 +352,21 @@
}
fn ptr_to_int(_mem: &Memory<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx, u64> {
- Err(ConstEvalErrKind::NeedsRfc("pointer-to-integer cast".to_string()).into())
+ Err(ConstEvalErrKind::PtrToIntCast.into())
}
fn binary_ptr_op(
_ecx: &InterpCx<'mir, 'tcx, Self>,
_bin_op: mir::BinOp,
- _left: ImmTy<'tcx>,
- _right: ImmTy<'tcx>,
+ _left: &ImmTy<'tcx>,
+ _right: &ImmTy<'tcx>,
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
Err(ConstEvalErrKind::NeedsRfc("pointer arithmetic or comparison".to_string()).into())
}
fn box_alloc(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
- _dest: PlaceTy<'tcx>,
+ _dest: &PlaceTy<'tcx>,
) -> InterpResult<'tcx> {
Err(ConstEvalErrKind::NeedsRfc("heap allocations via `box` keyword".to_string()).into())
}
diff --git a/compiler/rustc_mir/src/const_eval/mod.rs b/compiler/rustc_mir/src/const_eval/mod.rs
index 9dd2a85..77531ae 100644
--- a/compiler/rustc_mir/src/const_eval/mod.rs
+++ b/compiler/rustc_mir/src/const_eval/mod.rs
@@ -3,12 +3,15 @@
use std::convert::TryFrom;
use rustc_hir::Mutability;
-use rustc_middle::mir;
use rustc_middle::ty::{self, TyCtxt};
+use rustc_middle::{
+ mir::{self, interpret::ConstAlloc},
+ ty::ScalarInt,
+};
use rustc_span::{source_map::DUMMY_SP, symbol::Symbol};
use crate::interpret::{
- intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, MemPlaceMeta, Scalar,
+ intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, MPlaceTy, MemPlaceMeta, Scalar,
};
mod error;
@@ -29,12 +32,106 @@
let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), false);
let loc_place = ecx.alloc_caller_location(file, line, col);
- if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, loc_place).is_err() {
+ if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
bug!("intern_const_alloc_recursive should not error in this case")
}
ConstValue::Scalar(loc_place.ptr)
}
+/// Convert an evaluated constant to a type level constant
+pub(crate) fn const_to_valtree<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ raw: ConstAlloc<'tcx>,
+) -> Option<ty::ValTree<'tcx>> {
+ let ecx = mk_eval_cx(
+ tcx, DUMMY_SP, param_env,
+ // It is absolutely crucial for soundness that
+ // we do not read from static items or other mutable memory.
+ false,
+ );
+ let place = ecx.raw_const_to_mplace(raw).unwrap();
+ const_to_valtree_inner(&ecx, &place)
+}
+
+fn const_to_valtree_inner<'tcx>(
+ ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &MPlaceTy<'tcx>,
+) -> Option<ty::ValTree<'tcx>> {
+ let branches = |n, variant| {
+ let place = match variant {
+ Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(),
+ None => *place,
+ };
+ let variant =
+ variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
+ let fields = (0..n).map(|i| {
+ let field = ecx.mplace_field(&place, i).unwrap();
+ const_to_valtree_inner(ecx, &field)
+ });
+ // For enums, we preped their variant index before the variant's fields so we can figure out
+ // the variant again when just seeing a valtree.
+ let branches = variant.into_iter().chain(fields);
+ Some(ty::ValTree::Branch(
+ ecx.tcx.arena.alloc_from_iter(branches.collect::<Option<Vec<_>>>()?),
+ ))
+ };
+ match place.layout.ty.kind() {
+ ty::FnDef(..) => Some(ty::ValTree::zst()),
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
+ let val = ecx.read_immediate(&place.into()).unwrap();
+ let val = val.to_scalar().unwrap();
+ Some(ty::ValTree::Leaf(val.assert_int()))
+ }
+
+ // Raw pointers are not allowed in type level constants, as we cannot properly test them for
+ // equality at compile-time (see `ptr_guaranteed_eq`/`_ne`).
+ // Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to
+ // agree with runtime equality tests.
+ ty::FnPtr(_) | ty::RawPtr(_) => None,
+ ty::Ref(..) => unimplemented!("need to use deref_const"),
+
+ // Trait objects are not allowed in type level constants, as we have no concept for
+ // resolving their backing type, even if we can do that at const eval time. We may
+ // hypothetically be able to allow `dyn StructuralEq` trait objects in the future,
+ // but it is unclear if this is useful.
+ ty::Dynamic(..) => None,
+
+ ty::Slice(_) | ty::Str => {
+ unimplemented!("need to find the backing data of the slice/str and recurse on that")
+ }
+ ty::Tuple(substs) => branches(substs.len(), None),
+ ty::Array(_, len) => branches(usize::try_from(len.eval_usize(ecx.tcx.tcx, ecx.param_env)).unwrap(), None),
+
+ ty::Adt(def, _) => {
+ if def.variants.is_empty() {
+ bug!("uninhabited types should have errored and never gotten converted to valtree")
+ }
+
+ let variant = ecx.read_discriminant(&place.into()).unwrap().1;
+
+ branches(def.variants[variant].fields.len(), Some(variant))
+ }
+
+ ty::Never
+ | ty::Error(_)
+ | ty::Foreign(..)
+ | ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_))
+ | ty::Projection(..)
+ | ty::Param(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ // FIXME(oli-obk): we could look behind opaque types
+ | ty::Opaque(..)
+ | ty::Infer(_)
+ // FIXME(oli-obk): we can probably encode closures just like structs
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..) => None,
+ }
+}
+
/// This function uses `unwrap` copiously, because an already validated constant
/// must have valid fields and can thus never fail outside of compiler bugs. However, it is
/// invoked from the pretty printer, where it can receive enums with no variants and e.g.
@@ -55,8 +152,8 @@
return mir::DestructuredConst { variant: None, fields: &[] };
}
ty::Adt(def, _) => {
- let variant = ecx.read_discriminant(op).unwrap().1;
- let down = ecx.operand_downcast(op, variant).unwrap();
+ let variant = ecx.read_discriminant(&op).unwrap().1;
+ let down = ecx.operand_downcast(&op, variant).unwrap();
(def.variants[variant].fields.len(), Some(variant), down)
}
ty::Tuple(substs) => (substs.len(), None, op),
@@ -64,8 +161,8 @@
};
let fields_iter = (0..field_count).map(|i| {
- let field_op = ecx.operand_field(down, i).unwrap();
- let val = op_to_const(&ecx, field_op);
+ let field_op = ecx.operand_field(&down, i).unwrap();
+ let val = op_to_const(&ecx, &field_op);
ty::Const::from_value(tcx, val, field_op.layout.ty)
});
let fields = tcx.arena.alloc_from_iter(fields_iter);
@@ -81,7 +178,7 @@
trace!("deref_const: {:?}", val);
let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
let op = ecx.const_to_op(val, None).unwrap();
- let mplace = ecx.deref_operand(op).unwrap();
+ let mplace = ecx.deref_operand(&op).unwrap();
if let Scalar::Ptr(ptr) = mplace.ptr {
assert_eq!(
ecx.memory.get_raw(ptr.alloc_id).unwrap().mutability,
@@ -106,5 +203,5 @@
},
};
- tcx.mk_const(ty::Const { val: ty::ConstKind::Value(op_to_const(&ecx, mplace.into())), ty })
+ tcx.mk_const(ty::Const { val: ty::ConstKind::Value(op_to_const(&ecx, &mplace.into())), ty })
}
diff --git a/compiler/rustc_mir/src/dataflow/framework/mod.rs b/compiler/rustc_mir/src/dataflow/framework/mod.rs
index 524ad0a..3f7808c 100644
--- a/compiler/rustc_mir/src/dataflow/framework/mod.rs
+++ b/compiler/rustc_mir/src/dataflow/framework/mod.rs
@@ -10,7 +10,7 @@
//! fixpoint solution to your dataflow problem, or implement the `ResultsVisitor` interface and use
//! `visit_results`. The following example uses the `ResultsCursor` approach.
//!
-//! ```ignore(cross-crate-imports)
+//! ```ignore (cross-crate-imports)
//! use rustc_mir::dataflow::Analysis; // Makes `into_engine` available.
//!
//! fn do_my_analysis(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>) {
@@ -211,7 +211,7 @@
/// default impl and the one for all `A: GenKillAnalysis` will do the right thing.
/// Its purpose is to enable method chaining like so:
///
- /// ```ignore(cross-crate-imports)
+ /// ```ignore (cross-crate-imports)
/// let results = MyAnalysis::new(tcx, body)
/// .into_engine(tcx, body, def_id)
/// .iterate_to_fixpoint()
diff --git a/compiler/rustc_mir/src/dataflow/impls/borrows.rs b/compiler/rustc_mir/src/dataflow/impls/borrows.rs
index 6b7889c..f24d0f0 100644
--- a/compiler/rustc_mir/src/dataflow/impls/borrows.rs
+++ b/compiler/rustc_mir/src/dataflow/impls/borrows.rs
@@ -11,7 +11,6 @@
use crate::dataflow::{self, fmt::DebugWithContext, GenKill};
use std::fmt;
-use std::rc::Rc;
rustc_index::newtype_index! {
pub struct BorrowIndex {
@@ -30,101 +29,113 @@
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
- borrow_set: Rc<BorrowSet<'tcx>>,
+ borrow_set: &'a BorrowSet<'tcx>,
borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
-
- /// NLL region inference context with which NLL queries should be resolved
- _nonlexical_regioncx: Rc<RegionInferenceContext<'tcx>>,
}
struct StackEntry {
bb: mir::BasicBlock,
lo: usize,
hi: usize,
- first_part_only: bool,
}
-fn precompute_borrows_out_of_scope<'tcx>(
- body: &Body<'tcx>,
- regioncx: &Rc<RegionInferenceContext<'tcx>>,
- borrows_out_of_scope_at_location: &mut FxHashMap<Location, Vec<BorrowIndex>>,
- borrow_index: BorrowIndex,
- borrow_region: RegionVid,
- location: Location,
-) {
- // We visit one BB at a time. The complication is that we may start in the
- // middle of the first BB visited (the one containing `location`), in which
- // case we may have to later on process the first part of that BB if there
- // is a path back to its start.
+struct OutOfScopePrecomputer<'a, 'tcx> {
+ visited: BitSet<mir::BasicBlock>,
+ visit_stack: Vec<StackEntry>,
+ body: &'a Body<'tcx>,
+ regioncx: &'a RegionInferenceContext<'tcx>,
+ borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
+}
- // For visited BBs, we record the index of the first statement processed.
- // (In fully processed BBs this index is 0.) Note also that we add BBs to
- // `visited` once they are added to `stack`, before they are actually
- // processed, because this avoids the need to look them up again on
- // completion.
- let mut visited = FxHashMap::default();
- visited.insert(location.block, location.statement_index);
-
- let mut stack = vec![];
- stack.push(StackEntry {
- bb: location.block,
- lo: location.statement_index,
- hi: body[location.block].statements.len(),
- first_part_only: false,
- });
-
- while let Some(StackEntry { bb, lo, hi, first_part_only }) = stack.pop() {
- let mut finished_early = first_part_only;
- for i in lo..=hi {
- let location = Location { block: bb, statement_index: i };
- // If region does not contain a point at the location, then add to list and skip
- // successor locations.
- if !regioncx.region_contains(borrow_region, location) {
- debug!("borrow {:?} gets killed at {:?}", borrow_index, location);
- borrows_out_of_scope_at_location.entry(location).or_default().push(borrow_index);
- finished_early = true;
- break;
- }
+impl<'a, 'tcx> OutOfScopePrecomputer<'a, 'tcx> {
+ fn new(body: &'a Body<'tcx>, regioncx: &'a RegionInferenceContext<'tcx>) -> Self {
+ OutOfScopePrecomputer {
+ visited: BitSet::new_empty(body.basic_blocks().len()),
+ visit_stack: vec![],
+ body,
+ regioncx,
+ borrows_out_of_scope_at_location: FxHashMap::default(),
}
+ }
+}
- if !finished_early {
- // Add successor BBs to the work list, if necessary.
- let bb_data = &body[bb];
- assert!(hi == bb_data.statements.len());
- for &succ_bb in bb_data.terminator().successors() {
- visited
- .entry(succ_bb)
- .and_modify(|lo| {
- // `succ_bb` has been seen before. If it wasn't
- // fully processed, add its first part to `stack`
- // for processing.
- if *lo > 0 {
- stack.push(StackEntry {
+impl<'tcx> OutOfScopePrecomputer<'_, 'tcx> {
+ fn precompute_borrows_out_of_scope(
+ &mut self,
+ borrow_index: BorrowIndex,
+ borrow_region: RegionVid,
+ location: Location,
+ ) {
+ // We visit one BB at a time. The complication is that we may start in the
+ // middle of the first BB visited (the one containing `location`), in which
+ // case we may have to later on process the first part of that BB if there
+ // is a path back to its start.
+
+ // For visited BBs, we record the index of the first statement processed.
+ // (In fully processed BBs this index is 0.) Note also that we add BBs to
+ // `visited` once they are added to `stack`, before they are actually
+ // processed, because this avoids the need to look them up again on
+ // completion.
+ self.visited.insert(location.block);
+
+ let mut first_lo = location.statement_index;
+ let first_hi = self.body[location.block].statements.len();
+
+ self.visit_stack.push(StackEntry { bb: location.block, lo: first_lo, hi: first_hi });
+
+ while let Some(StackEntry { bb, lo, hi }) = self.visit_stack.pop() {
+ // If we process the first part of the first basic block (i.e. we encounter that block
+ // for the second time), we no longer have to visit its successors again.
+ let mut finished_early = bb == location.block && hi != first_hi;
+ for i in lo..=hi {
+ let location = Location { block: bb, statement_index: i };
+ // If region does not contain a point at the location, then add to list and skip
+ // successor locations.
+ if !self.regioncx.region_contains(borrow_region, location) {
+ debug!("borrow {:?} gets killed at {:?}", borrow_index, location);
+ self.borrows_out_of_scope_at_location
+ .entry(location)
+ .or_default()
+ .push(borrow_index);
+ finished_early = true;
+ break;
+ }
+ }
+
+ if !finished_early {
+ // Add successor BBs to the work list, if necessary.
+ let bb_data = &self.body[bb];
+ debug_assert!(hi == bb_data.statements.len());
+ for &succ_bb in bb_data.terminator().successors() {
+ if self.visited.insert(succ_bb) == false {
+ if succ_bb == location.block && first_lo > 0 {
+ // `succ_bb` has been seen before. If it wasn't
+ // fully processed, add its first part to `stack`
+ // for processing.
+ self.visit_stack.push(StackEntry {
bb: succ_bb,
lo: 0,
- hi: *lo - 1,
- first_part_only: true,
+ hi: first_lo - 1,
});
+
+ // And update this entry with 0, to represent the
+ // whole BB being processed.
+ first_lo = 0;
}
- // And update this entry with 0, to represent the
- // whole BB being processed.
- *lo = 0;
- })
- .or_insert_with(|| {
+ } else {
// succ_bb hasn't been seen before. Add it to
// `stack` for processing.
- stack.push(StackEntry {
+ self.visit_stack.push(StackEntry {
bb: succ_bb,
lo: 0,
- hi: body[succ_bb].statements.len(),
- first_part_only: false,
+ hi: self.body[succ_bb].statements.len(),
});
- // Insert 0 for this BB, to represent the whole BB
- // being processed.
- 0
- });
+ }
+ }
}
}
+
+ self.visited.clear();
}
}
@@ -132,30 +143,22 @@
crate fn new(
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
- nonlexical_regioncx: Rc<RegionInferenceContext<'tcx>>,
- borrow_set: &Rc<BorrowSet<'tcx>>,
+ nonlexical_regioncx: &'a RegionInferenceContext<'tcx>,
+ borrow_set: &'a BorrowSet<'tcx>,
) -> Self {
- let mut borrows_out_of_scope_at_location = FxHashMap::default();
+ let mut prec = OutOfScopePrecomputer::new(body, nonlexical_regioncx);
for (borrow_index, borrow_data) in borrow_set.iter_enumerated() {
let borrow_region = borrow_data.region.to_region_vid();
let location = borrow_data.reserve_location;
- precompute_borrows_out_of_scope(
- body,
- &nonlexical_regioncx,
- &mut borrows_out_of_scope_at_location,
- borrow_index,
- borrow_region,
- location,
- );
+ prec.precompute_borrows_out_of_scope(borrow_index, borrow_region, location);
}
Borrows {
tcx,
body,
- borrow_set: borrow_set.clone(),
- borrows_out_of_scope_at_location,
- _nonlexical_regioncx: nonlexical_regioncx,
+ borrow_set,
+ borrows_out_of_scope_at_location: prec.borrows_out_of_scope_at_location,
}
}
@@ -302,6 +305,7 @@
| mir::StatementKind::Retag { .. }
| mir::StatementKind::AscribeUserType(..)
| mir::StatementKind::Coverage(..)
+ | mir::StatementKind::CopyNonOverlapping(..)
| mir::StatementKind::Nop => {}
}
}
diff --git a/compiler/rustc_mir/src/dataflow/impls/liveness.rs b/compiler/rustc_mir/src/dataflow/impls/liveness.rs
index 85aaff5..2d20f0d 100644
--- a/compiler/rustc_mir/src/dataflow/impls/liveness.rs
+++ b/compiler/rustc_mir/src/dataflow/impls/liveness.rs
@@ -95,7 +95,7 @@
// We purposefully do not call `super_place` here to avoid calling `visit_local` for this
// place with one of the `Projection` variants of `PlaceContext`.
- self.visit_projection(local, projection, context, location);
+ self.visit_projection(place.as_ref(), context, location);
match DefUse::for_place(context) {
// Treat derefs as a use of the base local. `*p = 4` is not a def of `p` but a use.
diff --git a/compiler/rustc_mir/src/dataflow/impls/storage_liveness.rs b/compiler/rustc_mir/src/dataflow/impls/storage_liveness.rs
index 9250cd4..7926645 100644
--- a/compiler/rustc_mir/src/dataflow/impls/storage_liveness.rs
+++ b/compiler/rustc_mir/src/dataflow/impls/storage_liveness.rs
@@ -149,6 +149,7 @@
| StatementKind::FakeRead(..)
| StatementKind::Nop
| StatementKind::Retag(..)
+ | StatementKind::CopyNonOverlapping(..)
| StatementKind::StorageLive(..) => {}
}
}
diff --git a/compiler/rustc_mir/src/dataflow/move_paths/builder.rs b/compiler/rustc_mir/src/dataflow/move_paths/builder.rs
index ee78ff0..1ddd81e 100644
--- a/compiler/rustc_mir/src/dataflow/move_paths/builder.rs
+++ b/compiler/rustc_mir/src/dataflow/move_paths/builder.rs
@@ -318,6 +318,7 @@
StatementKind::Retag { .. }
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
+ | StatementKind::CopyNonOverlapping(..)
| StatementKind::Nop => {}
}
}
@@ -329,8 +330,8 @@
| Rvalue::Repeat(ref operand, _)
| Rvalue::Cast(_, ref operand, _)
| Rvalue::UnaryOp(_, ref operand) => self.gather_operand(operand),
- Rvalue::BinaryOp(ref _binop, ref lhs, ref rhs)
- | Rvalue::CheckedBinaryOp(ref _binop, ref lhs, ref rhs) => {
+ Rvalue::BinaryOp(ref _binop, box (ref lhs, ref rhs))
+ | Rvalue::CheckedBinaryOp(ref _binop, box (ref lhs, ref rhs)) => {
self.gather_operand(lhs);
self.gather_operand(rhs);
}
diff --git a/compiler/rustc_mir/src/interpret/cast.rs b/compiler/rustc_mir/src/interpret/cast.rs
index 128d8cf..2d9e6df 100644
--- a/compiler/rustc_mir/src/interpret/cast.rs
+++ b/compiler/rustc_mir/src/interpret/cast.rs
@@ -17,10 +17,10 @@
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn cast(
&mut self,
- src: OpTy<'tcx, M::PointerTag>,
+ src: &OpTy<'tcx, M::PointerTag>,
cast_kind: CastKind,
cast_ty: Ty<'tcx>,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
use rustc_middle::mir::CastKind::*;
// FIXME: In which cases should we trigger UB when the source is uninit?
@@ -32,7 +32,7 @@
Misc => {
let src = self.read_immediate(src)?;
- let res = self.misc_cast(src, cast_ty)?;
+ let res = self.misc_cast(&src, cast_ty)?;
self.write_immediate(res, dest)?;
}
@@ -107,7 +107,7 @@
fn misc_cast(
&self,
- src: ImmTy<'tcx, M::PointerTag>,
+ src: &ImmTy<'tcx, M::PointerTag>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::PointerTag>> {
use rustc_middle::ty::TyKind::*;
@@ -158,13 +158,13 @@
let dest_layout = self.layout_of(cast_ty)?;
if dest_layout.size == src.layout.size {
// Thin or fat pointer that just hast the ptr kind of target type changed.
- return Ok(*src);
+ return Ok(**src);
} else {
// Casting the metadata away from a fat ptr.
assert_eq!(src.layout.size, 2 * self.memory.pointer_size());
assert_eq!(dest_layout.size, self.memory.pointer_size());
assert!(src.layout.ty.is_unsafe_ptr());
- return match *src {
+ return match **src {
Immediate::ScalarPair(data, _) => Ok(data.into()),
Immediate::Scalar(..) => span_bug!(
self.cur_span(),
@@ -259,8 +259,8 @@
fn unsize_into_ptr(
&mut self,
- src: OpTy<'tcx, M::PointerTag>,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ src: &OpTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
// The pointee types
source_ty: Ty<'tcx>,
cast_ty: Ty<'tcx>,
@@ -300,9 +300,9 @@
fn unsize_into(
&mut self,
- src: OpTy<'tcx, M::PointerTag>,
+ src: &OpTy<'tcx, M::PointerTag>,
cast_ty: TyAndLayout<'tcx>,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty);
match (&src.layout.ty.kind(), &cast_ty.ty.kind()) {
@@ -340,9 +340,9 @@
let src_field = self.operand_field(src, i)?;
let dst_field = self.place_field(dest, i)?;
if src_field.layout.ty == cast_ty_field.ty {
- self.copy_op(src_field, dst_field)?;
+ self.copy_op(&src_field, &dst_field)?;
} else {
- self.unsize_into(src_field, cast_ty_field, dst_field)?;
+ self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
}
}
Ok(())
diff --git a/compiler/rustc_mir/src/interpret/eval_context.rs b/compiler/rustc_mir/src/interpret/eval_context.rs
index 7e9594d..149a9f8 100644
--- a/compiler/rustc_mir/src/interpret/eval_context.rs
+++ b/compiler/rustc_mir/src/interpret/eval_context.rs
@@ -226,6 +226,16 @@
}
impl<'mir, 'tcx, Tag, Extra> Frame<'mir, 'tcx, Tag, Extra> {
+ /// Get the current location within the Frame.
+ ///
+ /// If this is `Err`, we are not currently executing any particular statement in
+ /// this frame (can happen e.g. during frame initialization, and during unwinding on
+ /// frames without cleanup code).
+ /// We basically abuse `Result` as `Either`.
+ pub fn current_loc(&self) -> Result<mir::Location, Span> {
+ self.loc
+ }
+
/// Return the `SourceInfo` of the current instruction.
pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
self.loc.ok().map(|loc| self.body.source_info(loc))
@@ -548,8 +558,8 @@
/// This can fail to provide an answer for extern types.
pub(super) fn size_and_align_of(
&self,
- metadata: MemPlaceMeta<M::PointerTag>,
- layout: TyAndLayout<'tcx>,
+ metadata: &MemPlaceMeta<M::PointerTag>,
+ layout: &TyAndLayout<'tcx>,
) -> InterpResult<'tcx, Option<(Size, Align)>> {
if !layout.is_unsized() {
return Ok(Some((layout.size, layout.align.abi)));
@@ -577,24 +587,25 @@
// the last field). Can't have foreign types here, how would we
// adjust alignment and size for them?
let field = layout.field(self, layout.fields.count() - 1)?;
- let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? {
- Some(size_and_align) => size_and_align,
- None => {
- // A field with extern type. If this field is at offset 0, we behave
- // like the underlying extern type.
- // FIXME: Once we have made decisions for how to handle size and alignment
- // of `extern type`, this should be adapted. It is just a temporary hack
- // to get some code to work that probably ought to work.
- if sized_size == Size::ZERO {
- return Ok(None);
- } else {
- span_bug!(
- self.cur_span(),
- "Fields cannot be extern types, unless they are at offset 0"
- )
+ let (unsized_size, unsized_align) =
+ match self.size_and_align_of(metadata, &field)? {
+ Some(size_and_align) => size_and_align,
+ None => {
+ // A field with extern type. If this field is at offset 0, we behave
+ // like the underlying extern type.
+ // FIXME: Once we have made decisions for how to handle size and alignment
+ // of `extern type`, this should be adapted. It is just a temporary hack
+ // to get some code to work that probably ought to work.
+ if sized_size == Size::ZERO {
+ return Ok(None);
+ } else {
+ span_bug!(
+ self.cur_span(),
+ "Fields cannot be extern types, unless they are at offset 0"
+ )
+ }
}
- }
- };
+ };
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
@@ -645,16 +656,16 @@
#[inline]
pub fn size_and_align_of_mplace(
&self,
- mplace: MPlaceTy<'tcx, M::PointerTag>,
+ mplace: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Option<(Size, Align)>> {
- self.size_and_align_of(mplace.meta, mplace.layout)
+ self.size_and_align_of(&mplace.meta, &mplace.layout)
}
pub fn push_stack_frame(
&mut self,
instance: ty::Instance<'tcx>,
body: &'mir mir::Body<'tcx>,
- return_place: Option<PlaceTy<'tcx, M::PointerTag>>,
+ return_place: Option<&PlaceTy<'tcx, M::PointerTag>>,
return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> {
// first push a stack frame so we have access to the local substs
@@ -662,7 +673,7 @@
body,
loc: Err(body.span), // Span used for errors caused during preamble.
return_to_block,
- return_place,
+ return_place: return_place.copied(),
// empty local array, we fill it in below, after we are inside the stack frame and
// all methods actually know about the frame
locals: IndexVec::new(),
@@ -678,7 +689,7 @@
let span = const_.span;
let const_ =
self.subst_from_current_frame_and_normalize_erasing_regions(const_.literal);
- self.const_to_op(const_, None).map_err(|err| {
+ self.mir_const_to_op(&const_, None).map_err(|err| {
// If there was an error, set the span of the current frame to this constant.
// Avoiding doing this when evaluation succeeds.
self.frame_mut().loc = Err(span);
@@ -777,10 +788,10 @@
if !unwinding {
// Copy the return value to the caller's stack frame.
- if let Some(return_place) = frame.return_place {
+ if let Some(ref return_place) = frame.return_place {
let op = self.access_local(&frame, mir::RETURN_PLACE, None)?;
- self.copy_op_transmute(op, return_place)?;
- trace!("{:?}", self.dump_place(*return_place));
+ self.copy_op_transmute(&op, return_place)?;
+ trace!("{:?}", self.dump_place(**return_place));
} else {
throw_ub!(Unreachable);
}
diff --git a/compiler/rustc_mir/src/interpret/intern.rs b/compiler/rustc_mir/src/interpret/intern.rs
index 01d58c4..95464da 100644
--- a/compiler/rustc_mir/src/interpret/intern.rs
+++ b/compiler/rustc_mir/src/interpret/intern.rs
@@ -167,7 +167,7 @@
fn visit_aggregate(
&mut self,
- mplace: MPlaceTy<'tcx>,
+ mplace: &MPlaceTy<'tcx>,
fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
) -> InterpResult<'tcx> {
// ZSTs cannot contain pointers, so we can skip them.
@@ -191,14 +191,14 @@
self.walk_aggregate(mplace, fields)
}
- fn visit_value(&mut self, mplace: MPlaceTy<'tcx>) -> InterpResult<'tcx> {
+ fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
// Handle Reference types, as these are the only relocations supported by const eval.
// Raw pointers (and boxes) are handled by the `leftover_relocations` logic.
let tcx = self.ecx.tcx;
let ty = mplace.layout.ty;
if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() {
- let value = self.ecx.read_immediate(mplace.into())?;
- let mplace = self.ecx.ref_to_mplace(value)?;
+ let value = self.ecx.read_immediate(&(*mplace).into())?;
+ let mplace = self.ecx.ref_to_mplace(&value)?;
assert_eq!(mplace.layout.ty, referenced_ty);
// Handle trait object vtables.
if let ty::Dynamic(..) =
@@ -292,11 +292,11 @@
/// tracks where in the value we are and thus can show much better error messages.
/// Any errors here would anyway be turned into `const_err` lints, whereas validation failures
/// are hard errors.
-#[tracing::instrument(skip(ecx))]
+#[tracing::instrument(level = "debug", skip(ecx))]
pub fn intern_const_alloc_recursive<M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>(
ecx: &mut InterpCx<'mir, 'tcx, M>,
intern_kind: InternKind,
- ret: MPlaceTy<'tcx>,
+ ret: &MPlaceTy<'tcx>,
) -> Result<(), ErrorReported>
where
'tcx: 'mir,
@@ -328,7 +328,7 @@
Some(ret.layout.ty),
);
- ref_tracking.track((ret, base_intern_mode), || ());
+ ref_tracking.track((*ret, base_intern_mode), || ());
while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() {
let res = InternVisitor {
@@ -338,7 +338,7 @@
leftover_allocations,
inside_unsafe_cell: false,
}
- .visit_value(mplace);
+ .visit_value(&mplace);
// We deliberately *ignore* interpreter errors here. When there is a problem, the remaining
// references are "leftover"-interned, and later validation will show a proper error
// and point at the right part of the value causing the problem.
@@ -352,14 +352,6 @@
error
),
);
- // Some errors shouldn't come up because creating them causes
- // an allocation, which we should avoid. When that happens,
- // dedicated error variants should be introduced instead.
- assert!(
- !error.kind.allocates(),
- "interning encountered allocating error: {}",
- error
- );
}
}
}
@@ -435,11 +427,11 @@
layout: TyAndLayout<'tcx>,
f: impl FnOnce(
&mut InterpCx<'mir, 'tcx, M>,
- MPlaceTy<'tcx, M::PointerTag>,
+ &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, ()>,
) -> InterpResult<'tcx, &'tcx Allocation> {
let dest = self.allocate(layout, MemoryKind::Stack);
- f(self, dest)?;
+ f(self, &dest)?;
let ptr = dest.ptr.assert_ptr();
assert_eq!(ptr.offset, Size::ZERO);
let mut alloc = self.memory.alloc_map.remove(&ptr.alloc_id).unwrap().1;
diff --git a/compiler/rustc_mir/src/interpret/intrinsics.rs b/compiler/rustc_mir/src/interpret/intrinsics.rs
index f4309c9..25c3c2c 100644
--- a/compiler/rustc_mir/src/interpret/intrinsics.rs
+++ b/compiler/rustc_mir/src/interpret/intrinsics.rs
@@ -23,11 +23,7 @@
mod caller_location;
mod type_name;
-fn numeric_intrinsic<'tcx, Tag>(
- name: Symbol,
- bits: u128,
- kind: Primitive,
-) -> InterpResult<'tcx, Scalar<Tag>> {
+fn numeric_intrinsic<Tag>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Tag> {
let size = match kind {
Primitive::Int(integer, _) => integer.size(),
_ => bug!("invalid `{}` argument: {:?}", name, bits),
@@ -41,7 +37,7 @@
sym::bitreverse => (bits << extra).reverse_bits(),
_ => bug!("not a numeric intrinsic: {}", name),
};
- Ok(Scalar::from_uint(bits_out, size))
+ Scalar::from_uint(bits_out, size)
}
/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
@@ -115,7 +111,7 @@
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, M::PointerTag>],
- ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
+ ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
) -> InterpResult<'tcx, bool> {
let substs = instance.substs;
let intrinsic_name = self.tcx.item_name(instance.def_id());
@@ -143,9 +139,9 @@
sym::min_align_of_val | sym::size_of_val => {
// Avoid `deref_operand` -- this is not a deref, the ptr does not have to be
// dereferencable!
- let place = self.ref_to_mplace(self.read_immediate(args[0])?)?;
+ let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
let (size, align) = self
- .size_and_align_of_mplace(place)?
+ .size_and_align_of_mplace(&place)?
.ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
let result = match intrinsic_name {
@@ -177,7 +173,7 @@
self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
let const_ = ty::Const { val: ty::ConstKind::Value(val), ty };
let val = self.const_to_op(&const_, None)?;
- self.copy_op(val, dest)?;
+ self.copy_op(&val, dest)?;
}
sym::ctpop
@@ -189,7 +185,7 @@
| sym::bitreverse => {
let ty = substs.type_at(0);
let layout_of = self.layout_of(ty)?;
- let val = self.read_scalar(args[0])?.check_init()?;
+ let val = self.read_scalar(&args[0])?.check_init()?;
let bits = self.force_bits(val, layout_of.size)?;
let kind = match layout_of.abi {
Abi::Scalar(ref scalar) => scalar.value,
@@ -208,26 +204,29 @@
if nonzero && bits == 0 {
throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
}
- let out_val = numeric_intrinsic(intrinsic_name, bits, kind)?;
+ let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
self.write_scalar(out_val, dest)?;
}
sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
- let lhs = self.read_immediate(args[0])?;
- let rhs = self.read_immediate(args[1])?;
+ let lhs = self.read_immediate(&args[0])?;
+ let rhs = self.read_immediate(&args[1])?;
let bin_op = match intrinsic_name {
sym::add_with_overflow => BinOp::Add,
sym::sub_with_overflow => BinOp::Sub,
sym::mul_with_overflow => BinOp::Mul,
_ => bug!("Already checked for int ops"),
};
- self.binop_with_overflow(bin_op, lhs, rhs, dest)?;
+ self.binop_with_overflow(bin_op, &lhs, &rhs, dest)?;
}
sym::saturating_add | sym::saturating_sub => {
- let l = self.read_immediate(args[0])?;
- let r = self.read_immediate(args[1])?;
+ let l = self.read_immediate(&args[0])?;
+ let r = self.read_immediate(&args[1])?;
let is_add = intrinsic_name == sym::saturating_add;
- let (val, overflowed, _ty) =
- self.overflowing_binary_op(if is_add { BinOp::Add } else { BinOp::Sub }, l, r)?;
+ let (val, overflowed, _ty) = self.overflowing_binary_op(
+ if is_add { BinOp::Add } else { BinOp::Sub },
+ &l,
+ &r,
+ )?;
let val = if overflowed {
let num_bits = l.layout.size.bits();
if l.layout.abi.is_signed() {
@@ -269,8 +268,8 @@
self.write_scalar(val, dest)?;
}
sym::discriminant_value => {
- let place = self.deref_operand(args[0])?;
- let discr_val = self.read_discriminant(place.into())?.0;
+ let place = self.deref_operand(&args[0])?;
+ let discr_val = self.read_discriminant(&place.into())?.0;
self.write_scalar(discr_val, dest)?;
}
sym::unchecked_shl
@@ -280,8 +279,8 @@
| sym::unchecked_mul
| sym::unchecked_div
| sym::unchecked_rem => {
- let l = self.read_immediate(args[0])?;
- let r = self.read_immediate(args[1])?;
+ let l = self.read_immediate(&args[0])?;
+ let r = self.read_immediate(&args[1])?;
let bin_op = match intrinsic_name {
sym::unchecked_shl => BinOp::Shl,
sym::unchecked_shr => BinOp::Shr,
@@ -292,7 +291,7 @@
sym::unchecked_rem => BinOp::Rem,
_ => bug!("Already checked for int ops"),
};
- let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, l, r)?;
+ let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
if overflowed {
let layout = self.layout_of(substs.type_at(0))?;
let r_val = self.force_bits(r.to_scalar()?, layout.size)?;
@@ -308,9 +307,9 @@
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
let layout = self.layout_of(substs.type_at(0))?;
- let val = self.read_scalar(args[0])?.check_init()?;
+ let val = self.read_scalar(&args[0])?.check_init()?;
let val_bits = self.force_bits(val, layout.size)?;
- let raw_shift = self.read_scalar(args[1])?.check_init()?;
+ let raw_shift = self.read_scalar(&args[1])?.check_init()?;
let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
let width_bits = u128::from(layout.size.bits());
let shift_bits = raw_shift_bits % width_bits;
@@ -324,40 +323,20 @@
let result = Scalar::from_uint(truncated_bits, layout.size);
self.write_scalar(result, dest)?;
}
- sym::copy | sym::copy_nonoverlapping => {
- let elem_ty = instance.substs.type_at(0);
- let elem_layout = self.layout_of(elem_ty)?;
- let count = self.read_scalar(args[2])?.to_machine_usize(self)?;
- let elem_align = elem_layout.align.abi;
-
- let size = elem_layout.size.checked_mul(count, self).ok_or_else(|| {
- err_ub_format!("overflow computing total size of `{}`", intrinsic_name)
- })?;
- let src = self.read_scalar(args[0])?.check_init()?;
- let src = self.memory.check_ptr_access(src, size, elem_align)?;
- let dest = self.read_scalar(args[1])?.check_init()?;
- let dest = self.memory.check_ptr_access(dest, size, elem_align)?;
-
- if let (Some(src), Some(dest)) = (src, dest) {
- self.memory.copy(
- src,
- dest,
- size,
- intrinsic_name == sym::copy_nonoverlapping,
- )?;
- }
+ sym::copy => {
+ self.copy(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
}
sym::offset => {
- let ptr = self.read_scalar(args[0])?.check_init()?;
- let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?;
+ let ptr = self.read_scalar(&args[0])?.check_init()?;
+ let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
let pointee_ty = substs.type_at(0);
let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
self.write_scalar(offset_ptr, dest)?;
}
sym::arith_offset => {
- let ptr = self.read_scalar(args[0])?.check_init()?;
- let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?;
+ let ptr = self.read_scalar(&args[0])?.check_init()?;
+ let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
let pointee_ty = substs.type_at(0);
let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
@@ -366,8 +345,8 @@
self.write_scalar(offset_ptr, dest)?;
}
sym::ptr_offset_from => {
- let a = self.read_immediate(args[0])?.to_scalar()?;
- let b = self.read_immediate(args[1])?.to_scalar()?;
+ let a = self.read_immediate(&args[0])?.to_scalar()?;
+ let b = self.read_immediate(&args[1])?.to_scalar()?;
// Special case: if both scalars are *equal integers*
// and not NULL, we pretend there is an allocation of size 0 right there,
@@ -406,16 +385,16 @@
let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout);
let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout);
let (val, _overflowed, _ty) =
- self.overflowing_binary_op(BinOp::Sub, a_offset, b_offset)?;
+ self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?;
let pointee_layout = self.layout_of(substs.type_at(0))?;
let val = ImmTy::from_scalar(val, isize_layout);
let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
- self.exact_div(val, size, dest)?;
+ self.exact_div(&val, &size, dest)?;
}
}
sym::transmute => {
- self.copy_op_transmute(args[0], dest)?;
+ self.copy_op_transmute(&args[0], dest)?;
}
sym::assert_inhabited => {
let ty = instance.substs.type_at(0);
@@ -434,9 +413,9 @@
}
}
sym::simd_insert => {
- let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
- let elem = args[2];
- let input = args[0];
+ let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
+ let elem = &args[2];
+ let input = &args[0];
let (len, e_ty) = input.layout.ty.simd_size_and_type(*self.tcx);
assert!(
index < len,
@@ -458,12 +437,12 @@
for i in 0..len {
let place = self.place_index(dest, i)?;
- let value = if i == index { elem } else { self.operand_index(input, i)? };
- self.copy_op(value, place)?;
+ let value = if i == index { *elem } else { self.operand_index(input, i)? };
+ self.copy_op(&value, &place)?;
}
}
sym::simd_extract => {
- let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
+ let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
let (len, e_ty) = args[0].layout.ty.simd_size_and_type(*self.tcx);
assert!(
index < len,
@@ -477,14 +456,14 @@
"Return type `{}` must match vector element type `{}`",
dest.layout.ty, e_ty
);
- self.copy_op(self.operand_index(args[0], index)?, dest)?;
+ self.copy_op(&self.operand_index(&args[0], index)?, dest)?;
}
sym::likely | sym::unlikely => {
// These just return their argument
- self.copy_op(args[0], dest)?;
+ self.copy_op(&args[0], dest)?;
}
sym::assume => {
- let cond = self.read_scalar(args[0])?.check_init()?.to_bool()?;
+ let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;
if !cond {
throw_ub_format!("`assume` intrinsic called with `false`");
}
@@ -492,21 +471,21 @@
_ => return Ok(false),
}
- trace!("{:?}", self.dump_place(*dest));
+ trace!("{:?}", self.dump_place(**dest));
self.go_to_block(ret);
Ok(true)
}
pub fn exact_div(
&mut self,
- a: ImmTy<'tcx, M::PointerTag>,
- b: ImmTy<'tcx, M::PointerTag>,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ a: &ImmTy<'tcx, M::PointerTag>,
+ b: &ImmTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
// First, check x % y != 0 (or if that computation overflows).
- let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, a, b)?;
+ let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
if overflow || res.assert_bits(a.layout.size) != 0 {
// Then, check if `b` is -1, which is the "MIN / -1" case.
let minus1 = Scalar::from_int(-1, dest.layout.size);
@@ -518,7 +497,7 @@
}
}
// `Rem` says this is all right, so we can let `Div` do its job.
- self.binop_ignore_overflow(BinOp::Div, a, b, dest)
+ self.binop_ignore_overflow(BinOp::Div, &a, &b, dest)
}
/// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
diff --git a/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs
index 5c917f0..4dfdc08 100644
--- a/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs
+++ b/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs
@@ -92,11 +92,11 @@
let location = self.allocate(loc_layout, MemoryKind::CallerLocation);
// Initialize fields.
- self.write_immediate(file.to_ref(), self.mplace_field(location, 0).unwrap().into())
+ self.write_immediate(file.to_ref(), &self.mplace_field(&location, 0).unwrap().into())
.expect("writing to memory we just allocated cannot fail");
- self.write_scalar(line, self.mplace_field(location, 1).unwrap().into())
+ self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into())
.expect("writing to memory we just allocated cannot fail");
- self.write_scalar(col, self.mplace_field(location, 2).unwrap().into())
+ self.write_scalar(col, &self.mplace_field(&location, 2).unwrap().into())
.expect("writing to memory we just allocated cannot fail");
location
diff --git a/compiler/rustc_mir/src/interpret/machine.rs b/compiler/rustc_mir/src/interpret/machine.rs
index 53ac62d..65869f9 100644
--- a/compiler/rustc_mir/src/interpret/machine.rs
+++ b/compiler/rustc_mir/src/interpret/machine.rs
@@ -157,7 +157,7 @@
instance: ty::Instance<'tcx>,
abi: Abi,
args: &[OpTy<'tcx, Self::PointerTag>],
- ret: Option<(PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
+ ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>>;
@@ -168,7 +168,7 @@
fn_val: Self::ExtraFnVal,
abi: Abi,
args: &[OpTy<'tcx, Self::PointerTag>],
- ret: Option<(PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
+ ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx>;
@@ -178,7 +178,7 @@
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Self::PointerTag>],
- ret: Option<(PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
+ ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx>;
@@ -200,14 +200,14 @@
fn binary_ptr_op(
ecx: &InterpCx<'mir, 'tcx, Self>,
bin_op: mir::BinOp,
- left: ImmTy<'tcx, Self::PointerTag>,
- right: ImmTy<'tcx, Self::PointerTag>,
+ left: &ImmTy<'tcx, Self::PointerTag>,
+ right: &ImmTy<'tcx, Self::PointerTag>,
) -> InterpResult<'tcx, (Scalar<Self::PointerTag>, bool, Ty<'tcx>)>;
/// Heap allocations via the `box` keyword.
fn box_alloc(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
- dest: PlaceTy<'tcx, Self::PointerTag>,
+ dest: &PlaceTy<'tcx, Self::PointerTag>,
) -> InterpResult<'tcx>;
/// Called to read the specified `local` from the `frame`.
@@ -327,7 +327,7 @@
fn retag(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_kind: mir::RetagKind,
- _place: PlaceTy<'tcx, Self::PointerTag>,
+ _place: &PlaceTy<'tcx, Self::PointerTag>,
) -> InterpResult<'tcx> {
Ok(())
}
@@ -420,7 +420,7 @@
fn_val: !,
_abi: Abi,
_args: &[OpTy<$tcx>],
- _ret: Option<(PlaceTy<$tcx>, mir::BasicBlock)>,
+ _ret: Option<(&PlaceTy<$tcx>, mir::BasicBlock)>,
_unwind: Option<mir::BasicBlock>,
) -> InterpResult<$tcx> {
match fn_val {}
diff --git a/compiler/rustc_mir/src/interpret/memory.rs b/compiler/rustc_mir/src/interpret/memory.rs
index f3e3738..3648748 100644
--- a/compiler/rustc_mir/src/interpret/memory.rs
+++ b/compiler/rustc_mir/src/interpret/memory.rs
@@ -854,7 +854,7 @@
Some(ptr) => ptr,
None => {
// zero-sized access
- src.next().expect_none("iterator said it was empty but returned an element");
+ assert!(src.next().is_none(), "iterator said it was empty but returned an element");
return Ok(());
}
};
@@ -880,7 +880,7 @@
Some(ptr) => ptr,
None => {
// zero-sized access
- src.next().expect_none("iterator said it was empty but returned an element");
+ assert!(src.next().is_none(), "iterator said it was empty but returned an element");
return Ok(());
}
};
@@ -894,7 +894,7 @@
let offset_ptr = ptr.offset(Size::from_bytes(idx) * 2, &tcx)?; // `Size` multiplication
allocation.write_scalar(&tcx, offset_ptr, val.into(), Size::from_bytes(2))?;
}
- src.next().expect_none("iterator was longer than it said it would be");
+ assert!(src.next().is_none(), "iterator was longer than it said it would be");
Ok(())
}
diff --git a/compiler/rustc_mir/src/interpret/operand.rs b/compiler/rustc_mir/src/interpret/operand.rs
index 8823645..2893349 100644
--- a/compiler/rustc_mir/src/interpret/operand.rs
+++ b/compiler/rustc_mir/src/interpret/operand.rs
@@ -32,6 +32,9 @@
ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
}
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(Immediate, 56);
+
impl<Tag> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
#[inline(always)]
fn from(val: ScalarMaybeUninit<Tag>) -> Self {
@@ -92,6 +95,9 @@
pub layout: TyAndLayout<'tcx>,
}
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
+
impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
/// Helper function for printing a scalar to a FmtPrinter
@@ -106,7 +112,7 @@
}
ScalarMaybeUninit::Uninit => cx.typed_value(
|mut this| {
- this.write_str("{uninit ")?;
+ this.write_str("uninit ")?;
Ok(this)
},
|this| this.print_type(ty),
@@ -156,6 +162,9 @@
pub layout: TyAndLayout<'tcx>,
}
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(OpTy<'_, ()>, 80);
+
impl<'tcx, Tag> std::ops::Deref for OpTy<'tcx, Tag> {
type Target = Operand<Tag>;
#[inline(always)]
@@ -171,6 +180,13 @@
}
}
+impl<'tcx, Tag: Copy> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
+ #[inline(always)]
+ fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self {
+ OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout }
+ }
+}
+
impl<'tcx, Tag> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
#[inline(always)]
fn from(val: ImmTy<'tcx, Tag>) -> Self {
@@ -222,7 +238,7 @@
#[inline]
pub fn force_op_ptr(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
match op.try_as_mplace(self) {
Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()),
@@ -234,7 +250,7 @@
/// Returns `None` if the layout does not permit loading this as a value.
fn try_read_immediate_from_mplace(
&self,
- mplace: MPlaceTy<'tcx, M::PointerTag>,
+ mplace: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
if mplace.layout.is_unsized() {
// Don't touch unsized
@@ -295,14 +311,14 @@
/// in a `Immediate`, not on which data is stored there currently.
pub(crate) fn try_read_immediate(
&self,
- src: OpTy<'tcx, M::PointerTag>,
+ src: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
Ok(match src.try_as_mplace(self) {
- Ok(mplace) => {
+ Ok(ref mplace) => {
if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
Ok(val)
} else {
- Err(mplace)
+ Err(*mplace)
}
}
Err(val) => Ok(val),
@@ -313,7 +329,7 @@
#[inline(always)]
pub fn read_immediate(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
if let Ok(imm) = self.try_read_immediate(op)? {
Ok(imm)
@@ -325,13 +341,13 @@
/// Read a scalar from a place
pub fn read_scalar(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
Ok(self.read_immediate(op)?.to_scalar_or_uninit())
}
// Turn the wide MPlace into a string (must already be dereferenced!)
- pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
+ pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
let len = mplace.len(self)?;
let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?;
let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
@@ -341,11 +357,11 @@
/// Projection functions
pub fn operand_field(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
field: usize,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let base = match op.try_as_mplace(self) {
- Ok(mplace) => {
+ Ok(ref mplace) => {
// We can reuse the mplace field computation logic for indirect operands.
let field = self.mplace_field(mplace, field)?;
return Ok(field.into());
@@ -379,7 +395,7 @@
pub fn operand_index(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
index: u64,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
if let Ok(index) = usize::try_from(index) {
@@ -388,28 +404,28 @@
} else {
// Indexing into a big array. This must be an mplace.
let mplace = op.assert_mem_place(self);
- Ok(self.mplace_index(mplace, index)?.into())
+ Ok(self.mplace_index(&mplace, index)?.into())
}
}
pub fn operand_downcast(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
variant: VariantIdx,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout
Ok(match op.try_as_mplace(self) {
- Ok(mplace) => self.mplace_downcast(mplace, variant)?.into(),
+ Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(),
Err(..) => {
let layout = op.layout.for_variant(self, variant);
- OpTy { layout, ..op }
+ OpTy { layout, ..*op }
}
})
}
pub fn operand_projection(
&self,
- base: OpTy<'tcx, M::PointerTag>,
+ base: &OpTy<'tcx, M::PointerTag>,
proj_elem: mir::PlaceElem<'tcx>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc_middle::mir::ProjectionElem::*;
@@ -421,7 +437,7 @@
// The rest should only occur as mplace, we do not use Immediates for types
// allowing such operations. This matches place_projection forcing an allocation.
let mplace = base.assert_mem_place(self);
- self.mplace_projection(mplace, proj_elem)?.into()
+ self.mplace_projection(&mplace, proj_elem)?.into()
}
})
}
@@ -453,9 +469,9 @@
#[inline(always)]
pub fn place_to_op(
&self,
- place: PlaceTy<'tcx, M::PointerTag>,
+ place: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
- let op = match *place {
+ let op = match **place {
Place::Ptr(mplace) => Operand::Indirect(mplace),
Place::Local { frame, local } => {
*self.access_local(&self.stack()[frame], local, None)?
@@ -480,7 +496,7 @@
let op = place
.projection
.iter()
- .try_fold(base_op, |op, elem| self.operand_projection(op, elem))?;
+ .try_fold(base_op, |op, elem| self.operand_projection(&op, elem))?;
trace!("eval_place_to_op: got {:?}", *op);
// Sanity-check the type we ended up with.
@@ -498,6 +514,7 @@
/// Evaluate the operand, returning a place where you can then find the data.
/// If you already know the layout, you can save two table lookups
/// by passing it in here.
+ #[inline]
pub fn eval_operand(
&self,
mir_op: &mir::Operand<'tcx>,
@@ -515,7 +532,8 @@
// * During ConstProp, with `TooGeneric` or since the `requried_consts` were not all
// checked yet.
// * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
- self.const_to_op(val, layout)?
+
+ self.mir_const_to_op(&val, layout)?
}
};
trace!("{:?}: {:?}", mir_op, *op);
@@ -539,28 +557,45 @@
val: &ty::Const<'tcx>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ match val.val {
+ ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
+ ty::ConstKind::Error(_) => throw_inval!(AlreadyReported(ErrorReported)),
+ ty::ConstKind::Unevaluated(def, substs, promoted) => {
+ let instance = self.resolve(def, substs)?;
+ Ok(self.eval_to_allocation(GlobalId { instance, promoted })?.into())
+ }
+ ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
+ span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", val)
+ }
+ ty::ConstKind::Value(val_val) => self.const_val_to_op(val_val, val.ty, layout),
+ }
+ }
+
+ crate fn mir_const_to_op(
+ &self,
+ val: &mir::ConstantKind<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ match val {
+ mir::ConstantKind::Ty(ct) => self.const_to_op(ct, layout),
+ mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, ty, None),
+ }
+ }
+
+ crate fn const_val_to_op(
+ &self,
+ val_val: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ // Other cases need layout.
let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
Ok(match scalar {
Scalar::Ptr(ptr) => Scalar::Ptr(self.global_base_pointer(ptr)?),
Scalar::Int(int) => Scalar::Int(int),
})
};
- // Early-return cases.
- let val_val = match val.val {
- ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
- ty::ConstKind::Error(_) => throw_inval!(AlreadyReported(ErrorReported)),
- ty::ConstKind::Unevaluated(def, substs, promoted) => {
- let instance = self.resolve(def, substs)?;
- return Ok(self.eval_to_allocation(GlobalId { instance, promoted })?.into());
- }
- ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
- span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", val)
- }
- ty::ConstKind::Value(val_val) => val_val,
- };
- // Other cases need layout.
- let layout =
- from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(val.ty))?;
+ let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
let op = match val_val {
ConstValue::ByRef { alloc, offset } => {
let id = self.tcx.create_memory_alloc(alloc);
@@ -590,7 +625,7 @@
/// Read discriminant, return the runtime value as well as the variant index.
pub fn read_discriminant(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> {
trace!("read_discriminant_value {:#?}", op.layout);
// Get type and layout of the discriminant.
@@ -636,7 +671,7 @@
let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?;
// Read tag and sanity-check `tag_layout`.
- let tag_val = self.read_immediate(self.operand_field(op, tag_field)?)?;
+ let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
assert_eq!(tag_layout.size, tag_val.layout.size);
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
let tag_val = tag_val.to_scalar()?;
@@ -690,7 +725,7 @@
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
- self.binary_op(mir::BinOp::Sub, tag_val, niche_start_val)?;
+ self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
let variant_index_relative = variant_index_relative_val
.to_scalar()?
.assert_bits(tag_val.layout.size);
diff --git a/compiler/rustc_mir/src/interpret/operator.rs b/compiler/rustc_mir/src/interpret/operator.rs
index f508165..3737f87 100644
--- a/compiler/rustc_mir/src/interpret/operator.rs
+++ b/compiler/rustc_mir/src/interpret/operator.rs
@@ -14,11 +14,11 @@
pub fn binop_with_overflow(
&mut self,
op: mir::BinOp,
- left: ImmTy<'tcx, M::PointerTag>,
- right: ImmTy<'tcx, M::PointerTag>,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ left: &ImmTy<'tcx, M::PointerTag>,
+ right: &ImmTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
- let (val, overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
+ let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
debug_assert_eq!(
self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
dest.layout.ty,
@@ -34,9 +34,9 @@
pub fn binop_ignore_overflow(
&mut self,
op: mir::BinOp,
- left: ImmTy<'tcx, M::PointerTag>,
- right: ImmTy<'tcx, M::PointerTag>,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ left: &ImmTy<'tcx, M::PointerTag>,
+ right: &ImmTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
@@ -269,8 +269,8 @@
pub fn overflowing_binary_op(
&self,
bin_op: mir::BinOp,
- left: ImmTy<'tcx, M::PointerTag>,
- right: ImmTy<'tcx, M::PointerTag>,
+ left: &ImmTy<'tcx, M::PointerTag>,
+ right: &ImmTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
trace!(
"Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
@@ -347,8 +347,8 @@
pub fn binary_op(
&self,
bin_op: mir::BinOp,
- left: ImmTy<'tcx, M::PointerTag>,
- right: ImmTy<'tcx, M::PointerTag>,
+ left: &ImmTy<'tcx, M::PointerTag>,
+ right: &ImmTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
@@ -359,7 +359,7 @@
pub fn overflowing_unary_op(
&self,
un_op: mir::UnOp,
- val: ImmTy<'tcx, M::PointerTag>,
+ val: &ImmTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
use rustc_middle::mir::UnOp::*;
@@ -409,7 +409,7 @@
pub fn unary_op(
&self,
un_op: mir::UnOp,
- val: ImmTy<'tcx, M::PointerTag>,
+ val: &ImmTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
diff --git a/compiler/rustc_mir/src/interpret/place.rs b/compiler/rustc_mir/src/interpret/place.rs
index efde7fe..699b531 100644
--- a/compiler/rustc_mir/src/interpret/place.rs
+++ b/compiler/rustc_mir/src/interpret/place.rs
@@ -33,6 +33,9 @@
Poison,
}
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(MemPlaceMeta, 24);
+
impl<Tag> MemPlaceMeta<Tag> {
pub fn unwrap_meta(self) -> Scalar<Tag> {
match self {
@@ -71,6 +74,9 @@
pub meta: MemPlaceMeta<Tag>,
}
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(MemPlace, 56);
+
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
pub enum Place<Tag = ()> {
/// A place referring to a value allocated in the `Memory` system.
@@ -81,12 +87,18 @@
Local { frame: usize, local: mir::Local },
}
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(Place, 64);
+
#[derive(Copy, Clone, Debug)]
pub struct PlaceTy<'tcx, Tag = ()> {
place: Place<Tag>, // Keep this private; it helps enforce invariants.
pub layout: TyAndLayout<'tcx>,
}
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(PlaceTy<'_>, 80);
+
impl<'tcx, Tag> std::ops::Deref for PlaceTy<'tcx, Tag> {
type Target = Place<Tag>;
#[inline(always)]
@@ -102,6 +114,9 @@
pub layout: TyAndLayout<'tcx>,
}
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 72);
+
impl<'tcx, Tag> std::ops::Deref for MPlaceTy<'tcx, Tag> {
type Target = MemPlace<Tag>;
#[inline(always)]
@@ -168,7 +183,7 @@
}
}
-impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
+impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> {
/// Produces a MemPlace that works for ZST but nothing else
#[inline]
pub fn dangling(layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
@@ -180,13 +195,13 @@
/// Replace ptr tag, maintain vtable tag (if any)
#[inline]
- pub fn replace_tag(self, new_tag: Tag) -> Self {
+ pub fn replace_tag(&self, new_tag: Tag) -> Self {
MPlaceTy { mplace: self.mplace.replace_tag(new_tag), layout: self.layout }
}
#[inline]
pub fn offset(
- self,
+ &self,
offset: Size,
meta: MemPlaceMeta<Tag>,
layout: TyAndLayout<'tcx>,
@@ -201,7 +216,7 @@
}
#[inline]
- pub(super) fn len(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
+ pub(super) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
if self.layout.is_unsized() {
// We need to consult `meta` metadata
match self.layout.ty.kind() {
@@ -219,7 +234,7 @@
}
#[inline]
- pub(super) fn vtable(self) -> Scalar<Tag> {
+ pub(super) fn vtable(&self) -> Scalar<Tag> {
match self.layout.ty.kind() {
ty::Dynamic(..) => self.mplace.meta.unwrap_meta(),
_ => bug!("vtable not supported on type {:?}", self.layout.ty),
@@ -233,10 +248,10 @@
/// Note: do not call `as_ref` on the resulting place. This function should only be used to
/// read from the resulting mplace, not to get its address back.
pub fn try_as_mplace(
- self,
+ &self,
cx: &impl HasDataLayout,
) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
- match *self {
+ match **self {
Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
Operand::Immediate(_) if self.layout.is_zst() => {
Ok(MPlaceTy::dangling(self.layout, cx))
@@ -248,7 +263,7 @@
#[inline(always)]
/// Note: do not call `as_ref` on the resulting place. This function should only be used to
/// read from the resulting mplace, not to get its address back.
- pub fn assert_mem_place(self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> {
+ pub fn assert_mem_place(&self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> {
self.try_as_mplace(cx).unwrap()
}
}
@@ -288,12 +303,12 @@
/// Generally prefer `deref_operand`.
pub fn ref_to_mplace(
&self,
- val: ImmTy<'tcx, M::PointerTag>,
+ val: &ImmTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let pointee_type =
val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
let layout = self.layout_of(pointee_type)?;
- let (ptr, meta) = match *val {
+ let (ptr, meta) = match **val {
Immediate::Scalar(ptr) => (ptr.check_init()?, MemPlaceMeta::None),
Immediate::ScalarPair(ptr, meta) => {
(ptr.check_init()?, MemPlaceMeta::Meta(meta.check_init()?))
@@ -316,11 +331,11 @@
/// will always be a MemPlace. Lives in `place.rs` because it creates a place.
pub fn deref_operand(
&self,
- src: OpTy<'tcx, M::PointerTag>,
+ src: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let val = self.read_immediate(src)?;
trace!("deref to {} on {:?}", val.layout.ty, *val);
- let place = self.ref_to_mplace(val)?;
+ let place = self.ref_to_mplace(&val)?;
self.mplace_access_checked(place, None)
}
@@ -333,7 +348,7 @@
#[inline]
pub(super) fn check_mplace_access(
&self,
- place: MPlaceTy<'tcx, M::PointerTag>,
+ place: &MPlaceTy<'tcx, M::PointerTag>,
size: Option<Size>,
) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
let size = size.unwrap_or_else(|| {
@@ -355,13 +370,13 @@
force_align: Option<Align>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let (size, align) = self
- .size_and_align_of_mplace(place)?
+ .size_and_align_of_mplace(&place)?
.unwrap_or((place.layout.size, place.layout.align.abi));
assert!(place.mplace.align <= align, "dynamic alignment less strict than static one?");
// Check (stricter) dynamic alignment, unless forced otherwise.
place.mplace.align = force_align.unwrap_or(align);
// When dereferencing a pointer, it must be non-NULL, aligned, and live.
- if let Some(ptr) = self.check_mplace_access(place, Some(size))? {
+ if let Some(ptr) = self.check_mplace_access(&place, Some(size))? {
place.mplace.ptr = ptr.into();
}
Ok(place)
@@ -386,7 +401,7 @@
#[inline(always)]
pub fn mplace_field(
&self,
- base: MPlaceTy<'tcx, M::PointerTag>,
+ base: &MPlaceTy<'tcx, M::PointerTag>,
field: usize,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let offset = base.layout.fields.offset(field);
@@ -397,7 +412,7 @@
// Re-use parent metadata to determine dynamic field layout.
// With custom DSTS, this *will* execute user-defined code, but the same
// happens at run-time so that's okay.
- let align = match self.size_and_align_of(base.meta, field_layout)? {
+ let align = match self.size_and_align_of(&base.meta, &field_layout)? {
Some((_, align)) => align,
None if offset == Size::ZERO => {
// An extern type at offset 0, we fall back to its static alignment.
@@ -427,7 +442,7 @@
#[inline(always)]
pub fn mplace_index(
&self,
- base: MPlaceTy<'tcx, M::PointerTag>,
+ base: &MPlaceTy<'tcx, M::PointerTag>,
index: u64,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// Not using the layout method because we want to compute on u64
@@ -457,8 +472,8 @@
// same by repeatedly calling `mplace_array`.
pub(super) fn mplace_array_fields(
&self,
- base: MPlaceTy<'tcx, Tag>,
- ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'tcx>
+ base: &'a MPlaceTy<'tcx, Tag>,
+ ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a>
{
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let stride = match base.layout.fields {
@@ -473,7 +488,7 @@
fn mplace_subslice(
&self,
- base: MPlaceTy<'tcx, M::PointerTag>,
+ base: &MPlaceTy<'tcx, M::PointerTag>,
from: u64,
to: u64,
from_end: bool,
@@ -516,32 +531,32 @@
base.offset(from_offset, meta, layout, self)
}
- pub(super) fn mplace_downcast(
+ pub(crate) fn mplace_downcast(
&self,
- base: MPlaceTy<'tcx, M::PointerTag>,
+ base: &MPlaceTy<'tcx, M::PointerTag>,
variant: VariantIdx,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout
assert!(!base.meta.has_meta());
- Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base })
+ Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..*base })
}
/// Project into an mplace
pub(super) fn mplace_projection(
&self,
- base: MPlaceTy<'tcx, M::PointerTag>,
+ base: &MPlaceTy<'tcx, M::PointerTag>,
proj_elem: mir::PlaceElem<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
Field(field, _) => self.mplace_field(base, field.index())?,
Downcast(_, variant) => self.mplace_downcast(base, variant)?,
- Deref => self.deref_operand(base.into())?,
+ Deref => self.deref_operand(&base.into())?,
Index(local) => {
let layout = self.layout_of(self.tcx.types.usize)?;
let n = self.access_local(self.frame(), local, Some(layout))?;
- let n = self.read_scalar(n)?;
+ let n = self.read_scalar(&n)?;
let n = u64::try_from(
self.force_bits(n.check_init()?, self.tcx.data_layout.pointer_size)?,
)
@@ -577,37 +592,37 @@
/// into the field of a local `ScalarPair`, we have to first allocate it.
pub fn place_field(
&mut self,
- base: PlaceTy<'tcx, M::PointerTag>,
+ base: &PlaceTy<'tcx, M::PointerTag>,
field: usize,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// FIXME: We could try to be smarter and avoid allocation for fields that span the
// entire place.
let mplace = self.force_allocation(base)?;
- Ok(self.mplace_field(mplace, field)?.into())
+ Ok(self.mplace_field(&mplace, field)?.into())
}
pub fn place_index(
&mut self,
- base: PlaceTy<'tcx, M::PointerTag>,
+ base: &PlaceTy<'tcx, M::PointerTag>,
index: u64,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
let mplace = self.force_allocation(base)?;
- Ok(self.mplace_index(mplace, index)?.into())
+ Ok(self.mplace_index(&mplace, index)?.into())
}
pub fn place_downcast(
&self,
- base: PlaceTy<'tcx, M::PointerTag>,
+ base: &PlaceTy<'tcx, M::PointerTag>,
variant: VariantIdx,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// Downcast just changes the layout
Ok(match base.place {
Place::Ptr(mplace) => {
- self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into()
+ self.mplace_downcast(&MPlaceTy { mplace, layout: base.layout }, variant)?.into()
}
Place::Local { .. } => {
let layout = base.layout.for_variant(self, variant);
- PlaceTy { layout, ..base }
+ PlaceTy { layout, ..*base }
}
})
}
@@ -615,19 +630,19 @@
/// Projects into a place.
pub fn place_projection(
&mut self,
- base: PlaceTy<'tcx, M::PointerTag>,
+ base: &PlaceTy<'tcx, M::PointerTag>,
&proj_elem: &mir::ProjectionElem<mir::Local, Ty<'tcx>>,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
Field(field, _) => self.place_field(base, field.index())?,
Downcast(_, variant) => self.place_downcast(base, variant)?,
- Deref => self.deref_operand(self.place_to_op(base)?)?.into(),
+ Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
// For the other variants, we have to force an allocation.
// This matches `operand_projection`.
Subslice { .. } | ConstantIndex { .. } | Index(_) => {
let mplace = self.force_allocation(base)?;
- self.mplace_projection(mplace, proj_elem)?.into()
+ self.mplace_projection(&mplace, proj_elem)?.into()
}
})
}
@@ -645,7 +660,7 @@
};
for elem in place.projection.iter() {
- place_ty = self.place_projection(place_ty, &elem)?
+ place_ty = self.place_projection(&place_ty, &elem)?
}
trace!("{:?}", self.dump_place(place_ty.place));
@@ -666,7 +681,7 @@
pub fn write_scalar(
&mut self,
val: impl Into<ScalarMaybeUninit<M::PointerTag>>,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
self.write_immediate(Immediate::Scalar(val.into()), dest)
}
@@ -676,13 +691,13 @@
pub fn write_immediate(
&mut self,
src: Immediate<M::PointerTag>,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
self.write_immediate_no_validate(src, dest)?;
if M::enforce_validity(self) {
// Data got changed, better make sure it matches the type!
- self.validate_operand(self.place_to_op(dest)?)?;
+ self.validate_operand(&self.place_to_op(dest)?)?;
}
Ok(())
@@ -693,13 +708,13 @@
pub fn write_immediate_to_mplace(
&mut self,
src: Immediate<M::PointerTag>,
- dest: MPlaceTy<'tcx, M::PointerTag>,
+ dest: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
self.write_immediate_to_mplace_no_validate(src, dest)?;
if M::enforce_validity(self) {
// Data got changed, better make sure it matches the type!
- self.validate_operand(dest.into())?;
+ self.validate_operand(&dest.into())?;
}
Ok(())
@@ -711,7 +726,7 @@
fn write_immediate_no_validate(
&mut self,
src: Immediate<M::PointerTag>,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
if cfg!(debug_assertions) {
// This is a very common path, avoid some checks in release mode
@@ -754,7 +769,7 @@
let dest = MPlaceTy { mplace, layout: dest.layout };
// This is already in memory, write there.
- self.write_immediate_to_mplace_no_validate(src, dest)
+ self.write_immediate_to_mplace_no_validate(src, &dest)
}
/// Write an immediate to memory.
@@ -763,7 +778,7 @@
fn write_immediate_to_mplace_no_validate(
&mut self,
value: Immediate<M::PointerTag>,
- dest: MPlaceTy<'tcx, M::PointerTag>,
+ dest: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
// Note that it is really important that the type here is the right one, and matches the
// type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here
@@ -828,14 +843,14 @@
#[inline(always)]
pub fn copy_op(
&mut self,
- src: OpTy<'tcx, M::PointerTag>,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ src: &OpTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
self.copy_op_no_validate(src, dest)?;
if M::enforce_validity(self) {
// Data got changed, better make sure it matches the type!
- self.validate_operand(self.place_to_op(dest)?)?;
+ self.validate_operand(&self.place_to_op(dest)?)?;
}
Ok(())
@@ -847,8 +862,8 @@
/// right type.
fn copy_op_no_validate(
&mut self,
- src: OpTy<'tcx, M::PointerTag>,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ src: &OpTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
// We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
@@ -888,10 +903,10 @@
assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");
let src = self
- .check_mplace_access(src, Some(size))
+ .check_mplace_access(&src, Some(size))
.expect("places should be checked on creation");
let dest = self
- .check_mplace_access(dest, Some(size))
+ .check_mplace_access(&dest, Some(size))
.expect("places should be checked on creation");
let (src_ptr, dest_ptr) = match (src, dest) {
(Some(src_ptr), Some(dest_ptr)) => (src_ptr, dest_ptr),
@@ -906,8 +921,8 @@
/// have the same size.
pub fn copy_op_transmute(
&mut self,
- src: OpTy<'tcx, M::PointerTag>,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ src: &OpTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
// Fast path: Just use normal `copy_op`
@@ -944,12 +959,12 @@
let dest = self.force_allocation(dest)?;
self.copy_op_no_validate(
src,
- PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout }),
+ &PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout }),
)?;
if M::enforce_validity(self) {
// Data got changed, better make sure it matches the type!
- self.validate_operand(dest.into())?;
+ self.validate_operand(&dest.into())?;
}
Ok(())
@@ -965,7 +980,7 @@
/// version.
pub fn force_allocation_maybe_sized(
&mut self,
- place: PlaceTy<'tcx, M::PointerTag>,
+ place: &PlaceTy<'tcx, M::PointerTag>,
meta: MemPlaceMeta<M::PointerTag>,
) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> {
let (mplace, size) = match place.place {
@@ -981,7 +996,7 @@
self.layout_of_local(&self.stack()[frame], local, None)?;
// We also need to support unsized types, and hence cannot use `allocate`.
let (size, align) = self
- .size_and_align_of(meta, local_layout)?
+ .size_and_align_of(&meta, &local_layout)?
.expect("Cannot allocate for non-dyn-sized type");
let ptr = self.memory.allocate(size, align, MemoryKind::Stack);
let mplace = MemPlace { ptr: ptr.into(), align, meta };
@@ -990,7 +1005,7 @@
// We don't have to validate as we can assume the local
// was already valid for its type.
let mplace = MPlaceTy { mplace, layout: local_layout };
- self.write_immediate_to_mplace_no_validate(value, mplace)?;
+ self.write_immediate_to_mplace_no_validate(value, &mplace)?;
}
// Now we can call `access_mut` again, asserting it goes well,
// and actually overwrite things.
@@ -1010,7 +1025,7 @@
#[inline(always)]
pub fn force_allocation(
&mut self,
- place: PlaceTy<'tcx, M::PointerTag>,
+ place: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
Ok(self.force_allocation_maybe_sized(place, MemPlaceMeta::None)?.0)
}
@@ -1046,7 +1061,7 @@
pub fn write_discriminant(
&mut self,
variant_index: VariantIdx,
- dest: PlaceTy<'tcx, M::PointerTag>,
+ dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
// Layout computation excludes uninhabited variants from consideration
// therefore there's no way to represent those variants in the given layout.
@@ -1077,7 +1092,7 @@
let tag_val = size.truncate(discr_val);
let tag_dest = self.place_field(dest, tag_field)?;
- self.write_scalar(Scalar::from_uint(tag_val, size), tag_dest)?;
+ self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
}
Variants::Multiple {
tag_encoding:
@@ -1103,12 +1118,12 @@
ImmTy::from_uint(variant_index_relative, tag_layout);
let tag_val = self.binary_op(
mir::BinOp::Add,
- variant_index_relative_val,
- niche_start_val,
+ &variant_index_relative_val,
+ &niche_start_val,
)?;
// Write result.
let niche_dest = self.place_field(dest, tag_field)?;
- self.write_immediate(*tag_val, niche_dest)?;
+ self.write_immediate(*tag_val, &niche_dest)?;
}
}
}
@@ -1131,7 +1146,7 @@
/// Also return some more information so drop doesn't have to run the same code twice.
pub(super) fn unpack_dyn_trait(
&self,
- mplace: MPlaceTy<'tcx, M::PointerTag>,
+ mplace: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
let vtable = mplace.vtable(); // also sanity checks the type
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
@@ -1145,7 +1160,7 @@
assert_eq!(align, layout.align.abi);
}
- let mplace = MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..*mplace }, layout };
+ let mplace = MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..**mplace }, layout };
Ok((instance, mplace))
}
}
diff --git a/compiler/rustc_mir/src/interpret/step.rs b/compiler/rustc_mir/src/interpret/step.rs
index fbc72ad..6084f67 100644
--- a/compiler/rustc_mir/src/interpret/step.rs
+++ b/compiler/rustc_mir/src/interpret/step.rs
@@ -2,6 +2,7 @@
//!
//! The main entry point is the `step` method.
+use crate::interpret::OpTy;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_target::abi::LayoutOf;
@@ -90,7 +91,7 @@
SetDiscriminant { place, variant_index } => {
let dest = self.eval_place(**place)?;
- self.write_discriminant(*variant_index, dest)?;
+ self.write_discriminant(*variant_index, &dest)?;
}
// Mark locals as alive
@@ -110,7 +111,15 @@
// Stacked Borrows.
Retag(kind, place) => {
let dest = self.eval_place(**place)?;
- M::retag(self, *kind, dest)?;
+ M::retag(self, *kind, &dest)?;
+ }
+
+ // Call CopyNonOverlapping
+ CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping { src, dst, count }) => {
+ let src = self.eval_operand(src, None)?;
+ let dst = self.eval_operand(dst, None)?;
+ let count = self.eval_operand(count, None)?;
+ self.copy(&src, &dst, &count, /* nonoverlapping */ true)?;
}
// Statements we do not track.
@@ -140,6 +149,37 @@
Ok(())
}
+ pub(crate) fn copy(
+ &mut self,
+ src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
+ dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
+ count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
+ nonoverlapping: bool,
+ ) -> InterpResult<'tcx> {
+ let count = self.read_scalar(&count)?.to_machine_usize(self)?;
+ let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
+ let (size, align) = (layout.size, layout.align.abi);
+ let size = size.checked_mul(count, self).ok_or_else(|| {
+ err_ub_format!(
+ "overflow computing total size of `{}`",
+ if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
+ )
+ })?;
+
+ // Make sure we check both pointers for an access of the total size and aligment,
+ // *even if* the total size is 0.
+ let src =
+ self.memory.check_ptr_access(self.read_scalar(&src)?.check_init()?, size, align)?;
+
+ let dst =
+ self.memory.check_ptr_access(self.read_scalar(&dst)?.check_init()?, size, align)?;
+
+ if let (Some(src), Some(dst)) = (src, dst) {
+ self.memory.copy(src, dst, size, nonoverlapping)?;
+ }
+ Ok(())
+ }
+
/// Evaluate an assignment statement.
///
/// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
@@ -156,45 +196,45 @@
ThreadLocalRef(did) => {
let id = M::thread_local_static_alloc_id(self, did)?;
let val = self.global_base_pointer(id.into())?;
- self.write_scalar(val, dest)?;
+ self.write_scalar(val, &dest)?;
}
Use(ref operand) => {
// Avoid recomputing the layout
let op = self.eval_operand(operand, Some(dest.layout))?;
- self.copy_op(op, dest)?;
+ self.copy_op(&op, &dest)?;
}
- BinaryOp(bin_op, ref left, ref right) => {
+ BinaryOp(bin_op, box (ref left, ref right)) => {
let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
- let left = self.read_immediate(self.eval_operand(left, layout)?)?;
+ let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
- let right = self.read_immediate(self.eval_operand(right, layout)?)?;
- self.binop_ignore_overflow(bin_op, left, right, dest)?;
+ let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
+ self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
}
- CheckedBinaryOp(bin_op, ref left, ref right) => {
+ CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
// Due to the extra boolean in the result, we can never reuse the `dest.layout`.
- let left = self.read_immediate(self.eval_operand(left, None)?)?;
+ let left = self.read_immediate(&self.eval_operand(left, None)?)?;
let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
- let right = self.read_immediate(self.eval_operand(right, layout)?)?;
- self.binop_with_overflow(bin_op, left, right, dest)?;
+ let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
+ self.binop_with_overflow(bin_op, &left, &right, &dest)?;
}
UnaryOp(un_op, ref operand) => {
// The operand always has the same type as the result.
- let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?;
- let val = self.unary_op(un_op, val)?;
+ let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
+ let val = self.unary_op(un_op, &val)?;
assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
- self.write_immediate(*val, dest)?;
+ self.write_immediate(*val, &dest)?;
}
Aggregate(ref kind, ref operands) => {
let (dest, active_field_index) = match **kind {
mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
- self.write_discriminant(variant_index, dest)?;
+ self.write_discriminant(variant_index, &dest)?;
if adt_def.is_enum() {
- (self.place_downcast(dest, variant_index)?, active_field_index)
+ (self.place_downcast(&dest, variant_index)?, active_field_index)
} else {
(dest, active_field_index)
}
@@ -207,21 +247,21 @@
// Ignore zero-sized fields.
if !op.layout.is_zst() {
let field_index = active_field_index.unwrap_or(i);
- let field_dest = self.place_field(dest, field_index)?;
- self.copy_op(op, field_dest)?;
+ let field_dest = self.place_field(&dest, field_index)?;
+ self.copy_op(&op, &field_dest)?;
}
}
}
Repeat(ref operand, _) => {
let op = self.eval_operand(operand, None)?;
- let dest = self.force_allocation(dest)?;
+ let dest = self.force_allocation(&dest)?;
let length = dest.len(self)?;
- if let Some(first_ptr) = self.check_mplace_access(dest, None)? {
+ if let Some(first_ptr) = self.check_mplace_access(&dest, None)? {
// Write the first.
- let first = self.mplace_field(dest, 0)?;
- self.copy_op(op, first.into())?;
+ let first = self.mplace_field(&dest, 0)?;
+ self.copy_op(&op, &first.into())?;
if length > 1 {
let elem_size = first.layout.size;
@@ -242,23 +282,23 @@
Len(place) => {
// FIXME(CTFE): don't allow computing the length of arrays in const eval
let src = self.eval_place(place)?;
- let mplace = self.force_allocation(src)?;
+ let mplace = self.force_allocation(&src)?;
let len = mplace.len(self)?;
- self.write_scalar(Scalar::from_machine_usize(len, self), dest)?;
+ self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
}
AddressOf(_, place) | Ref(_, _, place) => {
let src = self.eval_place(place)?;
- let place = self.force_allocation(src)?;
+ let place = self.force_allocation(&src)?;
if place.layout.size.bytes() > 0 {
// definitely not a ZST
assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`");
}
- self.write_immediate(place.to_ref(), dest)?;
+ self.write_immediate(place.to_ref(), &dest)?;
}
NullaryOp(mir::NullOp::Box, _) => {
- M::box_alloc(self, dest)?;
+ M::box_alloc(self, &dest)?;
}
NullaryOp(mir::NullOp::SizeOf, ty) => {
@@ -272,19 +312,19 @@
);
throw_inval!(SizeOfUnsizedType(ty));
}
- self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), dest)?;
+ self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), &dest)?;
}
Cast(cast_kind, ref operand, cast_ty) => {
let src = self.eval_operand(operand, None)?;
let cast_ty = self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty);
- self.cast(src, cast_kind, cast_ty, dest)?;
+ self.cast(&src, cast_kind, cast_ty, &dest)?;
}
Discriminant(place) => {
let op = self.eval_place_to_op(place, None)?;
- let discr_val = self.read_discriminant(op)?.0;
- self.write_scalar(discr_val, dest)?;
+ let discr_val = self.read_discriminant(&op)?.0;
+ self.write_scalar(discr_val, &dest)?;
}
}
diff --git a/compiler/rustc_mir/src/interpret/terminator.rs b/compiler/rustc_mir/src/interpret/terminator.rs
index 575667f..4aa1360 100644
--- a/compiler/rustc_mir/src/interpret/terminator.rs
+++ b/compiler/rustc_mir/src/interpret/terminator.rs
@@ -25,7 +25,7 @@
Goto { target } => self.go_to_block(target),
SwitchInt { ref discr, ref targets, switch_ty } => {
- let discr = self.read_immediate(self.eval_operand(discr, None)?)?;
+ let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
trace!("SwitchInt({:?})", *discr);
assert_eq!(discr.layout.ty, switch_ty);
@@ -38,8 +38,8 @@
let res = self
.overflowing_binary_op(
mir::BinOp::Eq,
- discr,
- ImmTy::from_uint(const_int, discr.layout),
+ &discr,
+ &ImmTy::from_uint(const_int, discr.layout),
)?
.0;
if res.to_bool()? {
@@ -58,7 +58,7 @@
let (fn_val, abi) = match *func.layout.ty.kind() {
ty::FnPtr(sig) => {
let caller_abi = sig.abi();
- let fn_ptr = self.read_scalar(func)?.check_init()?;
+ let fn_ptr = self.read_scalar(&func)?.check_init()?;
let fn_val = self.memory.get_fn(fn_ptr)?;
(fn_val, caller_abi)
}
@@ -78,8 +78,12 @@
),
};
let args = self.eval_operands(args)?;
+ let dest_place;
let ret = match destination {
- Some((dest, ret)) => Some((self.eval_place(dest)?, ret)),
+ Some((dest, ret)) => {
+ dest_place = self.eval_place(dest)?;
+ Some((&dest_place, ret))
+ }
None => None,
};
self.eval_fn_call(fn_val, abi, &args[..], ret, *cleanup)?;
@@ -96,12 +100,12 @@
trace!("TerminatorKind::drop: {:?}, type {}", place, ty);
let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
- self.drop_in_place(place, instance, target, unwind)?;
+ self.drop_in_place(&place, instance, target, unwind)?;
}
Assert { ref cond, expected, ref msg, target, cleanup } => {
let cond_val =
- self.read_immediate(self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?;
+ self.read_immediate(&self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?;
if expected == cond_val {
self.go_to_block(target);
} else {
@@ -180,7 +184,7 @@
&mut self,
rust_abi: bool,
caller_arg: &mut impl Iterator<Item = OpTy<'tcx, M::PointerTag>>,
- callee_arg: PlaceTy<'tcx, M::PointerTag>,
+ callee_arg: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
if rust_abi && callee_arg.layout.is_zst() {
// Nothing to do.
@@ -202,7 +206,7 @@
)
}
// We allow some transmutes here
- self.copy_op_transmute(caller_arg, callee_arg)
+ self.copy_op_transmute(&caller_arg, callee_arg)
}
/// Call this function -- pushing the stack frame and initializing the arguments.
@@ -211,7 +215,7 @@
fn_val: FnVal<'tcx, M::ExtraFnVal>,
caller_abi: Abi,
args: &[OpTy<'tcx, M::PointerTag>],
- ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
+ ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
trace!("eval_fn_call: {:#?}", fn_val);
@@ -244,9 +248,9 @@
};
if normalize_abi(caller_abi) != normalize_abi(callee_abi) {
throw_ub_format!(
- "calling a function with ABI {:?} using caller ABI {:?}",
- callee_abi,
- caller_abi
+ "calling a function with ABI {} using caller ABI {}",
+ callee_abi.name(),
+ caller_abi.name()
)
}
}
@@ -314,7 +318,7 @@
let caller_args: Cow<'_, [OpTy<'tcx, M::PointerTag>]> =
if caller_abi == Abi::RustCall && !args.is_empty() {
// Untuple
- let (&untuple_arg, args) = args.split_last().unwrap();
+ let (untuple_arg, args) = args.split_last().unwrap();
trace!("eval_fn_call: Will pass last argument by untupling");
Cow::from(
args.iter()
@@ -344,12 +348,12 @@
if Some(local) == body.spread_arg {
// Must be a tuple
for i in 0..dest.layout.fields.count() {
- let dest = self.place_field(dest, i)?;
- self.pass_argument(rust_abi, &mut caller_iter, dest)?;
+ let dest = self.place_field(&dest, i)?;
+ self.pass_argument(rust_abi, &mut caller_iter, &dest)?;
}
} else {
// Normal argument
- self.pass_argument(rust_abi, &mut caller_iter, dest)?;
+ self.pass_argument(rust_abi, &mut caller_iter, &dest)?;
}
}
// Now we should have no more caller args
@@ -397,7 +401,7 @@
let receiver_place = match args[0].layout.ty.builtin_deref(true) {
Some(_) => {
// Built-in pointer.
- self.deref_operand(args[0])?
+ self.deref_operand(&args[0])?
}
None => {
// Unsized self.
@@ -426,7 +430,7 @@
fn drop_in_place(
&mut self,
- place: PlaceTy<'tcx, M::PointerTag>,
+ place: &PlaceTy<'tcx, M::PointerTag>,
instance: ty::Instance<'tcx>,
target: mir::BasicBlock,
unwind: Option<mir::BasicBlock>,
@@ -440,7 +444,7 @@
let (instance, place) = match place.layout.ty.kind() {
ty::Dynamic(..) => {
// Dropping a trait object.
- self.unpack_dyn_trait(place)?
+ self.unpack_dyn_trait(&place)?
}
_ => (instance, place),
};
@@ -457,7 +461,7 @@
FnVal::Instance(instance),
Abi::Rust,
&[arg.into()],
- Some((dest.into(), target)),
+ Some((&dest.into(), target)),
unwind,
)
}
diff --git a/compiler/rustc_mir/src/interpret/traits.rs b/compiler/rustc_mir/src/interpret/traits.rs
index 09ce6bc..50603bd 100644
--- a/compiler/rustc_mir/src/interpret/traits.rs
+++ b/compiler/rustc_mir/src/interpret/traits.rs
@@ -118,7 +118,7 @@
.get_raw(vtable_slot.alloc_id)?
.read_ptr_sized(self, vtable_slot)?
.check_init()?;
- Ok(self.memory.get_fn(fn_ptr)?)
+ self.memory.get_fn(fn_ptr)
}
/// Returns the drop fn instance as well as the actual dynamic type.
diff --git a/compiler/rustc_mir/src/interpret/validity.rs b/compiler/rustc_mir/src/interpret/validity.rs
index 0b74926..062ef7d 100644
--- a/compiler/rustc_mir/src/interpret/validity.rs
+++ b/compiler/rustc_mir/src/interpret/validity.rs
@@ -11,7 +11,7 @@
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
-use rustc_middle::mir::interpret::{InterpError, InterpErrorInfo};
+use rustc_middle::mir::interpret::InterpError;
use rustc_middle::ty;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_span::symbol::{sym, Symbol};
@@ -21,7 +21,7 @@
use super::{
CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy,
- ValueVisitor,
+ ScalarMaybeUninit, ValueVisitor,
};
macro_rules! throw_validation_failure {
@@ -77,20 +77,23 @@
///
macro_rules! try_validation {
($e:expr, $where:expr,
- $( $( $p:pat )|+ => { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )? ),+ $(,)?
+ $( $( $p:pat )|+ => { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )? ),+ $(,)?
) => {{
match $e {
Ok(x) => x,
// We catch the error and turn it into a validation failure. We are okay with
// allocation here as this can only slow down builds that fail anyway.
- $( $( Err(InterpErrorInfo { kind: $p, .. }) )|+ =>
- throw_validation_failure!(
- $where,
- { $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )?
- ),
- )+
- #[allow(unreachable_patterns)]
- Err(e) => Err::<!, _>(e)?,
+ Err(e) => match e.kind() {
+ $(
+ $($p)|+ =>
+ throw_validation_failure!(
+ $where,
+ { $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )?
+ )
+ ),+,
+ #[allow(unreachable_patterns)]
+ _ => Err::<!, _>(e)?,
+ }
}
}};
}
@@ -241,17 +244,20 @@
// generators and closures.
ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
let mut name = None;
- if let Some(def_id) = def_id.as_local() {
- let tables = self.ecx.tcx.typeck(def_id);
- if let Some(upvars) = tables.closure_captures.get(&def_id.to_def_id()) {
+ // FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar
+ // https://github.com/rust-lang/project-rfc-2229/issues/46
+ if let Some(local_def_id) = def_id.as_local() {
+ let tables = self.ecx.tcx.typeck(local_def_id);
+ if let Some(captured_place) =
+ tables.closure_min_captures_flattened(*def_id).nth(field)
+ {
// Sometimes the index is beyond the number of upvars (seen
// for a generator).
- if let Some((&var_hir_id, _)) = upvars.get_index(field) {
- let node = self.ecx.tcx.hir().get(var_hir_id);
- if let hir::Node::Binding(pat) = node {
- if let hir::PatKind::Binding(_, _, ident, _) = pat.kind {
- name = Some(ident.name);
- }
+ let var_hir_id = captured_place.get_root_variable();
+ let node = self.ecx.tcx.hir().get(var_hir_id);
+ if let hir::Node::Binding(pat) = node {
+ if let hir::PatKind::Binding(_, _, ident, _) = pat.kind {
+ name = Some(ident.name);
}
}
}
@@ -375,14 +381,18 @@
/// Check a reference or `Box`.
fn check_safe_pointer(
&mut self,
- value: OpTy<'tcx, M::PointerTag>,
+ value: &OpTy<'tcx, M::PointerTag>,
kind: &str,
) -> InterpResult<'tcx> {
- let value = self.ecx.read_immediate(value)?;
+ let value = try_validation!(
+ self.ecx.read_immediate(value),
+ self.path,
+ err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" },
+ );
// Handle wide pointers.
// Check metadata early, for better diagnostics
let place = try_validation!(
- self.ecx.ref_to_mplace(value),
+ self.ecx.ref_to_mplace(&value),
self.path,
err_ub!(InvalidUninitBytes(None)) => { "uninitialized {}", kind },
);
@@ -391,7 +401,7 @@
}
// Make sure this is dereferenceable and all.
let size_and_align = try_validation!(
- self.ecx.size_and_align_of_mplace(place),
+ self.ecx.size_and_align_of_mplace(&place),
self.path,
err_ub!(InvalidMeta(msg)) => { "invalid {} metadata: {}", kind, msg },
);
@@ -485,17 +495,28 @@
Ok(())
}
+ fn read_scalar(
+ &self,
+ op: &OpTy<'tcx, M::PointerTag>,
+ ) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
+ Ok(try_validation!(
+ self.ecx.read_scalar(op),
+ self.path,
+ err_unsup!(ReadPointerAsBytes) => { "(potentially part of) a pointer" } expected { "plain (non-pointer) bytes" },
+ ))
+ }
+
/// Check if this is a value of primitive type, and if yes check the validity of the value
/// at that type. Return `true` if the type is indeed primitive.
fn try_visit_primitive(
&mut self,
- value: OpTy<'tcx, M::PointerTag>,
+ value: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, bool> {
// Go over all the primitive types
let ty = value.layout.ty;
match ty.kind() {
ty::Bool => {
- let value = self.ecx.read_scalar(value)?;
+ let value = self.read_scalar(value)?;
try_validation!(
value.to_bool(),
self.path,
@@ -505,7 +526,7 @@
Ok(true)
}
ty::Char => {
- let value = self.ecx.read_scalar(value)?;
+ let value = self.read_scalar(value)?;
try_validation!(
value.to_char(),
self.path,
@@ -515,11 +536,7 @@
Ok(true)
}
ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
- let value = try_validation!(
- self.ecx.read_scalar(value),
- self.path,
- err_unsup!(ReadPointerAsBytes) => { "read of part of a pointer" },
- );
+ let value = self.read_scalar(value)?;
// NOTE: Keep this in sync with the array optimization for int/float
// types below!
if self.ctfe_mode.is_some() {
@@ -541,9 +558,10 @@
// actually enforce the strict rules for raw pointers (mostly because
// that lets us re-use `ref_to_mplace`).
let place = try_validation!(
- self.ecx.ref_to_mplace(self.ecx.read_immediate(value)?),
+ self.ecx.read_immediate(value).and_then(|ref i| self.ecx.ref_to_mplace(i)),
self.path,
err_ub!(InvalidUninitBytes(None)) => { "uninitialized raw pointer" },
+ err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" },
);
if place.layout.is_unsized() {
self.check_wide_ptr_meta(place.meta, place.layout)?;
@@ -569,7 +587,14 @@
Ok(true)
}
ty::FnPtr(_sig) => {
- let value = self.ecx.read_scalar(value)?;
+ let value = try_validation!(
+ self.ecx.read_immediate(value),
+ self.path,
+ err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" },
+ );
+ // Make sure we print a `ScalarMaybeUninit` (and not an `ImmTy`) in the error
+ // message below.
+ let value = value.to_scalar_or_uninit();
let _fn = try_validation!(
value.check_init().and_then(|ptr| self.ecx.memory.get_fn(ptr)),
self.path,
@@ -612,10 +637,10 @@
fn visit_scalar(
&mut self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
scalar_layout: &Scalar,
) -> InterpResult<'tcx> {
- let value = self.ecx.read_scalar(op)?;
+ let value = self.read_scalar(op)?;
let valid_range = &scalar_layout.valid_range;
let (lo, hi) = valid_range.clone().into_inner();
// Determine the allowed range
@@ -686,7 +711,7 @@
fn read_discriminant(
&mut self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, VariantIdx> {
self.with_elem(PathElem::EnumTag, move |this| {
Ok(try_validation!(
@@ -706,9 +731,9 @@
#[inline]
fn visit_field(
&mut self,
- old_op: OpTy<'tcx, M::PointerTag>,
+ old_op: &OpTy<'tcx, M::PointerTag>,
field: usize,
- new_op: OpTy<'tcx, M::PointerTag>,
+ new_op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
let elem = self.aggregate_field_path_elem(old_op.layout, field);
self.with_elem(elem, move |this| this.visit_value(new_op))
@@ -717,9 +742,9 @@
#[inline]
fn visit_variant(
&mut self,
- old_op: OpTy<'tcx, M::PointerTag>,
+ old_op: &OpTy<'tcx, M::PointerTag>,
variant_id: VariantIdx,
- new_op: OpTy<'tcx, M::PointerTag>,
+ new_op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
let name = match old_op.layout.ty.kind() {
ty::Adt(adt, _) => PathElem::Variant(adt.variants[variant_id].ident.name),
@@ -733,14 +758,14 @@
#[inline(always)]
fn visit_union(
&mut self,
- _op: OpTy<'tcx, M::PointerTag>,
+ _op: &OpTy<'tcx, M::PointerTag>,
_fields: NonZeroUsize,
) -> InterpResult<'tcx> {
Ok(())
}
#[inline]
- fn visit_value(&mut self, op: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
+ fn visit_value(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
trace!("visit_value: {:?}, {:?}", *op, op.layout);
// Check primitive types -- the leafs of our recursive descend.
@@ -797,7 +822,7 @@
fn visit_aggregate(
&mut self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
) -> InterpResult<'tcx> {
match op.layout.ty.kind() {
@@ -858,7 +883,7 @@
Err(err) => {
// For some errors we might be able to provide extra information.
// (This custom logic does not fit the `try_validation!` macro.)
- match err.kind {
+ match err.kind() {
err_ub!(InvalidUninitBytes(Some(access))) => {
// Some byte was uninitialized, determine which
// element that byte belongs to so we can
@@ -899,7 +924,7 @@
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
fn validate_operand_internal(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
path: Vec<PathElem>,
ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>,
ctfe_mode: Option<CtfeValidationMode>,
@@ -910,16 +935,16 @@
let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };
// Try to cast to ptr *once* instead of all the time.
- let op = self.force_op_ptr(op).unwrap_or(op);
+ let op = self.force_op_ptr(&op).unwrap_or(*op);
// Run it.
- match visitor.visit_value(op) {
+ match visitor.visit_value(&op) {
Ok(()) => Ok(()),
// Pass through validation failures.
- Err(err) if matches!(err.kind, err_ub!(ValidationFailure { .. })) => Err(err),
+ Err(err) if matches!(err.kind(), err_ub!(ValidationFailure { .. })) => Err(err),
// Also pass through InvalidProgram, those just indicate that we could not
// validate and each caller will know best what to do with them.
- Err(err) if matches!(err.kind, InterpError::InvalidProgram(_)) => Err(err),
+ Err(err) if matches!(err.kind(), InterpError::InvalidProgram(_)) => Err(err),
// Avoid other errors as those do not show *where* in the value the issue lies.
Err(err) => {
err.print_backtrace();
@@ -941,7 +966,7 @@
#[inline(always)]
pub fn const_validate_operand(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
path: Vec<PathElem>,
ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>,
ctfe_mode: CtfeValidationMode,
@@ -953,7 +978,7 @@
/// `op` is assumed to cover valid memory if it is an indirect operand.
/// It will error if the bits at the destination do not match the ones described by the layout.
#[inline(always)]
- pub fn validate_operand(&self, op: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
+ pub fn validate_operand(&self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
self.validate_operand_internal(op, vec![], None, None)
}
}
diff --git a/compiler/rustc_mir/src/interpret/visitor.rs b/compiler/rustc_mir/src/interpret/visitor.rs
index 097b9ae..32edca6 100644
--- a/compiler/rustc_mir/src/interpret/visitor.rs
+++ b/compiler/rustc_mir/src/interpret/visitor.rs
@@ -18,21 +18,25 @@
fn layout(&self) -> TyAndLayout<'tcx>;
/// Makes this into an `OpTy`.
- fn to_op(self, ecx: &InterpCx<'mir, 'tcx, M>) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
+ fn to_op(&self, ecx: &InterpCx<'mir, 'tcx, M>)
+ -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
/// Creates this from an `MPlaceTy`.
fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self;
/// Projects to the given enum variant.
fn project_downcast(
- self,
+ &self,
ecx: &InterpCx<'mir, 'tcx, M>,
variant: VariantIdx,
) -> InterpResult<'tcx, Self>;
/// Projects to the n-th field.
- fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: usize)
- -> InterpResult<'tcx, Self>;
+ fn project_field(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self>;
}
// Operands and memory-places are both values.
@@ -45,10 +49,10 @@
#[inline(always)]
fn to_op(
- self,
+ &self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
- Ok(self)
+ Ok(*self)
}
#[inline(always)]
@@ -58,7 +62,7 @@
#[inline(always)]
fn project_downcast(
- self,
+ &self,
ecx: &InterpCx<'mir, 'tcx, M>,
variant: VariantIdx,
) -> InterpResult<'tcx, Self> {
@@ -67,7 +71,7 @@
#[inline(always)]
fn project_field(
- self,
+ &self,
ecx: &InterpCx<'mir, 'tcx, M>,
field: usize,
) -> InterpResult<'tcx, Self> {
@@ -85,10 +89,10 @@
#[inline(always)]
fn to_op(
- self,
+ &self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
- Ok(self.into())
+ Ok((*self).into())
}
#[inline(always)]
@@ -98,7 +102,7 @@
#[inline(always)]
fn project_downcast(
- self,
+ &self,
ecx: &InterpCx<'mir, 'tcx, M>,
variant: VariantIdx,
) -> InterpResult<'tcx, Self> {
@@ -107,7 +111,7 @@
#[inline(always)]
fn project_field(
- self,
+ &self,
ecx: &InterpCx<'mir, 'tcx, M>,
field: usize,
) -> InterpResult<'tcx, Self> {
@@ -129,7 +133,7 @@
#[inline(always)]
fn read_discriminant(
&mut self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, VariantIdx> {
Ok(self.ecx().read_discriminant(op)?.1)
}
@@ -137,13 +141,13 @@
// Recursive actions, ready to be overloaded.
/// Visits the given value, dispatching as appropriate to more specialized visitors.
#[inline(always)]
- fn visit_value(&mut self, v: Self::V) -> InterpResult<'tcx>
+ fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
{
self.walk_value(v)
}
/// Visits the given value as a union. No automatic recursion can happen here.
#[inline(always)]
- fn visit_union(&mut self, _v: Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
+ fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
{
Ok(())
}
@@ -153,7 +157,7 @@
#[inline(always)]
fn visit_aggregate(
&mut self,
- v: Self::V,
+ v: &Self::V,
fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
) -> InterpResult<'tcx> {
self.walk_aggregate(v, fields)
@@ -167,9 +171,9 @@
#[inline(always)]
fn visit_field(
&mut self,
- _old_val: Self::V,
+ _old_val: &Self::V,
_field: usize,
- new_val: Self::V,
+ new_val: &Self::V,
) -> InterpResult<'tcx> {
self.visit_value(new_val)
}
@@ -179,9 +183,9 @@
#[inline(always)]
fn visit_variant(
&mut self,
- _old_val: Self::V,
+ _old_val: &Self::V,
_variant: VariantIdx,
- new_val: Self::V,
+ new_val: &Self::V,
) -> InterpResult<'tcx> {
self.visit_value(new_val)
}
@@ -189,16 +193,16 @@
// Default recursors. Not meant to be overloaded.
fn walk_aggregate(
&mut self,
- v: Self::V,
+ v: &Self::V,
fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
) -> InterpResult<'tcx> {
// Now iterate over it.
for (idx, field_val) in fields.enumerate() {
- self.visit_field(v, idx, field_val?)?;
+ self.visit_field(v, idx, &field_val?)?;
}
Ok(())
}
- fn walk_value(&mut self, v: Self::V) -> InterpResult<'tcx>
+ fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
{
trace!("walk_value: type: {}", v.layout().ty);
@@ -208,10 +212,10 @@
ty::Dynamic(..) => {
// immediate trait objects are not a thing
let dest = v.to_op(self.ecx())?.assert_mem_place(self.ecx());
- let inner = self.ecx().unpack_dyn_trait(dest)?.1;
+ let inner = self.ecx().unpack_dyn_trait(&dest)?.1;
trace!("walk_value: dyn object layout: {:#?}", inner.layout);
// recurse with the inner type
- return self.visit_field(v, 0, Value::from_mem_place(inner));
+ return self.visit_field(&v, 0, &Value::from_mem_place(inner));
},
// Slices do not need special handling here: they have `Array` field
// placement with length 0, so we enter the `Array` case below which
@@ -241,7 +245,7 @@
// Now we can go over all the fields.
// This uses the *run-time length*, i.e., if we are a slice,
// the dynamic info from the metadata is used.
- let iter = self.ecx().mplace_array_fields(mplace)?
+ let iter = self.ecx().mplace_array_fields(&mplace)?
.map(|f| f.and_then(|f| {
Ok(Value::from_mem_place(f))
}));
@@ -254,11 +258,11 @@
// with *its* fields.
Variants::Multiple { .. } => {
let op = v.to_op(self.ecx())?;
- let idx = self.read_discriminant(op)?;
+ let idx = self.read_discriminant(&op)?;
let inner = v.project_downcast(self.ecx(), idx)?;
trace!("walk_value: variant layout: {:#?}", inner.layout());
// recurse with the inner type
- self.visit_variant(v, idx, inner)
+ self.visit_variant(v, idx, &inner)
}
// For single-variant layouts, we already did anything there is to do.
Variants::Single { .. } => Ok(())
diff --git a/compiler/rustc_mir/src/lib.rs b/compiler/rustc_mir/src/lib.rs
index 8b3881e..bf4eeb4 100644
--- a/compiler/rustc_mir/src/lib.rs
+++ b/compiler/rustc_mir/src/lib.rs
@@ -18,17 +18,17 @@
#![feature(exact_size_is_empty)]
#![feature(exhaustive_patterns)]
#![feature(never_type)]
+#![feature(map_try_insert)]
#![feature(min_specialization)]
#![feature(trusted_len)]
#![feature(try_blocks)]
#![feature(associated_type_defaults)]
#![feature(stmt_expr_attributes)]
#![feature(trait_alias)]
-#![feature(option_expect_none)]
+#![feature(option_get_or_insert_default)]
#![feature(or_patterns)]
#![feature(once_cell)]
#![feature(control_flow_enum)]
-#![feature(str_split_once)]
#![recursion_limit = "256"]
#[macro_use]
@@ -63,6 +63,10 @@
let (param_env, value) = param_env_and_value.into_parts();
const_eval::destructure_const(tcx, param_env, value)
};
+ providers.const_to_valtree = |tcx, param_env_and_value| {
+ let (param_env, raw) = param_env_and_value.into_parts();
+ const_eval::const_to_valtree(tcx, param_env, raw)
+ };
providers.deref_const = |tcx, param_env_and_value| {
let (param_env, value) = param_env_and_value.into_parts();
const_eval::deref_const(tcx, param_env, value)
diff --git a/compiler/rustc_mir/src/monomorphize/collector.rs b/compiler/rustc_mir/src/monomorphize/collector.rs
index 75f80f6..911224d 100644
--- a/compiler/rustc_mir/src/monomorphize/collector.rs
+++ b/compiler/rustc_mir/src/monomorphize/collector.rs
@@ -684,7 +684,7 @@
for op in operands {
match *op {
mir::InlineAsmOperand::SymFn { ref value } => {
- let fn_ty = self.monomorphize(value.literal.ty);
+ let fn_ty = self.monomorphize(value.literal.ty());
visit_fn_use(self.tcx, fn_ty, false, source, &mut self.output);
}
mir::InlineAsmOperand::SymStatic { def_id } => {
@@ -1013,13 +1013,12 @@
| hir::ItemKind::Union(_, ref generics) => {
if generics.params.is_empty() {
if self.mode == MonoItemCollectionMode::Eager {
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
debug!(
"RootCollector: ADT drop-glue for {}",
- self.tcx.def_path_str(def_id.to_def_id())
+ self.tcx.def_path_str(item.def_id.to_def_id())
);
- let ty = Instance::new(def_id.to_def_id(), InternalSubsts::empty())
+ let ty = Instance::new(item.def_id.to_def_id(), InternalSubsts::empty())
.ty(self.tcx, ty::ParamEnv::reveal_all());
visit_drop_use(self.tcx, ty, true, DUMMY_SP, self.output);
}
@@ -1028,29 +1027,28 @@
hir::ItemKind::GlobalAsm(..) => {
debug!(
"RootCollector: ItemKind::GlobalAsm({})",
- self.tcx.def_path_str(self.tcx.hir().local_def_id(item.hir_id).to_def_id())
+ self.tcx.def_path_str(item.def_id.to_def_id())
);
- self.output.push(dummy_spanned(MonoItem::GlobalAsm(item.hir_id)));
+ self.output.push(dummy_spanned(MonoItem::GlobalAsm(item.item_id())));
}
hir::ItemKind::Static(..) => {
- let def_id = self.tcx.hir().local_def_id(item.hir_id).to_def_id();
- debug!("RootCollector: ItemKind::Static({})", self.tcx.def_path_str(def_id));
- self.output.push(dummy_spanned(MonoItem::Static(def_id)));
+ debug!(
+ "RootCollector: ItemKind::Static({})",
+ self.tcx.def_path_str(item.def_id.to_def_id())
+ );
+ self.output.push(dummy_spanned(MonoItem::Static(item.def_id.to_def_id())));
}
hir::ItemKind::Const(..) => {
// const items only generate mono items if they are
// actually used somewhere. Just declaring them is insufficient.
// but even just declaring them must collect the items they refer to
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
-
- if let Ok(val) = self.tcx.const_eval_poly(def_id.to_def_id()) {
+ if let Ok(val) = self.tcx.const_eval_poly(item.def_id.to_def_id()) {
collect_const_value(self.tcx, val, &mut self.output);
}
}
hir::ItemKind::Fn(..) => {
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
- self.push_if_root(def_id);
+ self.push_if_root(item.def_id);
}
}
}
@@ -1062,8 +1060,7 @@
fn visit_impl_item(&mut self, ii: &'v hir::ImplItem<'v>) {
if let hir::ImplItemKind::Fn(hir::FnSig { .. }, _) = ii.kind {
- let def_id = self.tcx.hir().local_def_id(ii.hir_id);
- self.push_if_root(def_id);
+ self.push_if_root(ii.def_id);
}
}
@@ -1156,14 +1153,12 @@
}
}
- let impl_def_id = tcx.hir().local_def_id(item.hir_id);
-
debug!(
"create_mono_items_for_default_impls(item={})",
- tcx.def_path_str(impl_def_id.to_def_id())
+ tcx.def_path_str(item.def_id.to_def_id())
);
- if let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) {
+ if let Some(trait_ref) = tcx.impl_trait_ref(item.def_id) {
let param_env = ty::ParamEnv::reveal_all();
let trait_ref = tcx.normalize_erasing_regions(param_env, trait_ref);
let overridden_methods: FxHashSet<_> =
diff --git a/compiler/rustc_mir/src/monomorphize/partitioning/default.rs b/compiler/rustc_mir/src/monomorphize/partitioning/default.rs
index d5a845d..edd4631 100644
--- a/compiler/rustc_mir/src/monomorphize/partitioning/default.rs
+++ b/compiler/rustc_mir/src/monomorphize/partitioning/default.rs
@@ -314,7 +314,7 @@
Some(def_id)
}
MonoItem::Static(def_id) => Some(def_id),
- MonoItem::GlobalAsm(hir_id) => Some(tcx.hir().local_def_id(hir_id).to_def_id()),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.def_id.to_def_id()),
}
}
@@ -405,11 +405,10 @@
Visibility::Hidden
};
}
- MonoItem::GlobalAsm(hir_id) => {
- let def_id = tcx.hir().local_def_id(*hir_id);
- return if tcx.is_reachable_non_generic(def_id) {
+ MonoItem::GlobalAsm(item_id) => {
+ return if tcx.is_reachable_non_generic(item_id.def_id) {
*can_be_internalized = false;
- default_visibility(tcx, def_id.to_def_id(), false)
+ default_visibility(tcx, item_id.def_id.to_def_id(), false)
} else {
Visibility::Hidden
};
diff --git a/compiler/rustc_mir/src/monomorphize/partitioning/mod.rs b/compiler/rustc_mir/src/monomorphize/partitioning/mod.rs
index b9fcd32..dc2379f 100644
--- a/compiler/rustc_mir/src/monomorphize/partitioning/mod.rs
+++ b/compiler/rustc_mir/src/monomorphize/partitioning/mod.rs
@@ -239,17 +239,22 @@
I: Iterator<Item = &'a CodegenUnit<'tcx>>,
'tcx: 'a,
{
- if cfg!(debug_assertions) {
- debug!("{}", label);
+ let dump = move || {
+ use std::fmt::Write;
+
+ let s = &mut String::new();
+ let _ = writeln!(s, "{}", label);
for cgu in cgus {
- debug!("CodegenUnit {} estimated size {} :", cgu.name(), cgu.size_estimate());
+ let _ =
+ writeln!(s, "CodegenUnit {} estimated size {} :", cgu.name(), cgu.size_estimate());
for (mono_item, linkage) in cgu.items() {
let symbol_name = mono_item.symbol_name(tcx).name;
let symbol_hash_start = symbol_name.rfind('h');
let symbol_hash = symbol_hash_start.map_or("<no hash>", |i| &symbol_name[i..]);
- debug!(
+ let _ = writeln!(
+ s,
" - {} [{:?}] [{}] estimated size {}",
mono_item,
linkage,
@@ -258,9 +263,13 @@
);
}
- debug!("");
+ let _ = writeln!(s, "");
}
- }
+
+ std::mem::take(s)
+ };
+
+ debug!("{}", dump());
}
#[inline(never)] // give this a place in the profiler
@@ -415,8 +424,33 @@
(tcx.arena.alloc(mono_items), codegen_units)
}
+fn codegened_and_inlined_items<'tcx>(tcx: TyCtxt<'tcx>, cnum: CrateNum) -> &'tcx DefIdSet {
+ let (items, cgus) = tcx.collect_and_partition_mono_items(cnum);
+ let mut visited = DefIdSet::default();
+ let mut result = items.clone();
+
+ for cgu in cgus {
+ for (item, _) in cgu.items() {
+ if let MonoItem::Fn(ref instance) = item {
+ let did = instance.def_id();
+ if !visited.insert(did) {
+ continue;
+ }
+ for scope in &tcx.instance_mir(instance.def).source_scopes {
+ if let Some((ref inlined, _)) = scope.inlined {
+ result.insert(inlined.def_id());
+ }
+ }
+ }
+ }
+ }
+
+ tcx.arena.alloc(result)
+}
+
pub fn provide(providers: &mut Providers) {
providers.collect_and_partition_mono_items = collect_and_partition_mono_items;
+ providers.codegened_and_inlined_items = codegened_and_inlined_items;
providers.is_codegened_item = |tcx, def_id| {
let (all_mono_items, _) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
diff --git a/compiler/rustc_mir/src/monomorphize/polymorphize.rs b/compiler/rustc_mir/src/monomorphize/polymorphize.rs
index 4ad71ab..05b0e3a 100644
--- a/compiler/rustc_mir/src/monomorphize/polymorphize.rs
+++ b/compiler/rustc_mir/src/monomorphize/polymorphize.rs
@@ -30,9 +30,8 @@
/// Determine which generic parameters are used by the function/method/closure represented by
/// `def_id`. Returns a bitset where bits representing unused parameters are set (`is_empty`
/// indicates all parameters are used).
+#[instrument(skip(tcx))]
fn unused_generic_params(tcx: TyCtxt<'_>, def_id: DefId) -> FiniteBitSet<u32> {
- debug!("unused_generic_params({:?})", def_id);
-
if !tcx.sess.opts.debugging_opts.polymorphize {
// If polymorphization disabled, then all parameters are used.
return FiniteBitSet::new_empty();
@@ -46,7 +45,7 @@
}
let generics = tcx.generics_of(def_id);
- debug!("unused_generic_params: generics={:?}", generics);
+ debug!(?generics);
// Exit early when there are no parameters to be unused.
if generics.count() == 0 {
@@ -57,11 +56,11 @@
let context = tcx.hir().body_const_context(def_id.expect_local());
match context {
Some(ConstContext::ConstFn) | None if !tcx.is_mir_available(def_id) => {
- debug!("unused_generic_params: (no mir available) def_id={:?}", def_id);
+ debug!("no mir available");
return FiniteBitSet::new_empty();
}
Some(_) if !tcx.is_ctfe_mir_available(def_id) => {
- debug!("unused_generic_params: (no ctfe mir available) def_id={:?}", def_id);
+ debug!("no ctfe mir available");
return FiniteBitSet::new_empty();
}
_ => {}
@@ -72,9 +71,9 @@
generics.count().try_into().expect("more generic parameters than can fit into a `u32`");
let mut unused_parameters = FiniteBitSet::<u32>::new_empty();
unused_parameters.set_range(0..generics_count);
- debug!("unused_generic_params: (start) unused_parameters={:?}", unused_parameters);
+ debug!(?unused_parameters, "(start)");
mark_used_by_default_parameters(tcx, def_id, generics, &mut unused_parameters);
- debug!("unused_generic_params: (after default) unused_parameters={:?}", unused_parameters);
+ debug!(?unused_parameters, "(after default)");
// Visit MIR and accumululate used generic parameters.
let body = match context {
@@ -85,10 +84,10 @@
};
let mut vis = MarkUsedGenericParams { tcx, def_id, unused_parameters: &mut unused_parameters };
vis.visit_body(body);
- debug!("unused_generic_params: (after visitor) unused_parameters={:?}", unused_parameters);
+ debug!(?unused_parameters, "(after visitor)");
mark_used_by_predicates(tcx, def_id, &mut unused_parameters);
- debug!("unused_generic_params: (end) unused_parameters={:?}", unused_parameters);
+ debug!(?unused_parameters, "(end)");
// Emit errors for debugging and testing if enabled.
if !unused_parameters.is_empty() {
@@ -101,24 +100,55 @@
/// Some parameters are considered used-by-default, such as non-generic parameters and the dummy
/// generic parameters from closures, this function marks them as used. `leaf_is_closure` should
/// be `true` if the item that `unused_generic_params` was invoked on is a closure.
+#[instrument(skip(tcx, def_id, generics, unused_parameters))]
fn mark_used_by_default_parameters<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: DefId,
generics: &'tcx ty::Generics,
unused_parameters: &mut FiniteBitSet<u32>,
) {
- if !tcx.is_trait(def_id) && (tcx.is_closure(def_id) || tcx.type_of(def_id).is_generator()) {
- for param in &generics.params {
- debug!("mark_used_by_default_parameters: (closure/gen) param={:?}", param);
- unused_parameters.clear(param.index);
- }
- } else {
- for param in &generics.params {
- debug!("mark_used_by_default_parameters: (other) param={:?}", param);
- if let ty::GenericParamDefKind::Lifetime = param.kind {
+ match tcx.def_kind(def_id) {
+ DefKind::Closure | DefKind::Generator => {
+ for param in &generics.params {
+ debug!(?param, "(closure/gen)");
unused_parameters.clear(param.index);
}
}
+ DefKind::Mod
+ | DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Variant
+ | DefKind::Trait
+ | DefKind::TyAlias
+ | DefKind::ForeignTy
+ | DefKind::TraitAlias
+ | DefKind::AssocTy
+ | DefKind::TyParam
+ | DefKind::Fn
+ | DefKind::Const
+ | DefKind::ConstParam
+ | DefKind::Static
+ | DefKind::Ctor(_, _)
+ | DefKind::AssocFn
+ | DefKind::AssocConst
+ | DefKind::Macro(_)
+ | DefKind::ExternCrate
+ | DefKind::Use
+ | DefKind::ForeignMod
+ | DefKind::AnonConst
+ | DefKind::OpaqueTy
+ | DefKind::Field
+ | DefKind::LifetimeParam
+ | DefKind::GlobalAsm
+ | DefKind::Impl => {
+ for param in &generics.params {
+ debug!(?param, "(other)");
+ if let ty::GenericParamDefKind::Lifetime = param.kind {
+ unused_parameters.clear(param.index);
+ }
+ }
+ }
}
if let Some(parent) = generics.parent {
@@ -128,6 +158,7 @@
/// Search the predicates on used generic parameters for any unused generic parameters, and mark
/// those as used.
+#[instrument(skip(tcx, def_id))]
fn mark_used_by_predicates<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: DefId,
@@ -135,16 +166,12 @@
) {
let def_id = tcx.closure_base_def_id(def_id);
let predicates = tcx.explicit_predicates_of(def_id);
- debug!("mark_used_by_predicates: predicates_of={:?}", predicates);
let mut current_unused_parameters = FiniteBitSet::new_empty();
// Run to a fixed point to support `where T: Trait<U>, U: Trait<V>`, starting with an empty
// bit set so that this is skipped if all parameters are already used.
while current_unused_parameters != *unused_parameters {
- debug!(
- "mark_used_by_predicates: current_unused_parameters={:?} = unused_parameters={:?}",
- current_unused_parameters, unused_parameters
- );
+ debug!(?current_unused_parameters, ?unused_parameters);
current_unused_parameters = *unused_parameters;
for (predicate, _) in predicates.predicates {
@@ -169,13 +196,13 @@
/// Emit errors for the function annotated by `#[rustc_polymorphize_error]`, labelling each generic
/// parameter which was unused.
+#[instrument(skip(tcx, generics))]
fn emit_unused_generic_params_error<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: DefId,
generics: &'tcx ty::Generics,
unused_parameters: &FiniteBitSet<u32>,
) {
- debug!("emit_unused_generic_params_error: def_id={:?}", def_id);
let base_def_id = tcx.closure_base_def_id(def_id);
if !tcx
.get_attrs(base_def_id)
@@ -185,7 +212,6 @@
return;
}
- debug!("emit_unused_generic_params_error: unused_parameters={:?}", unused_parameters);
let fn_span = match tcx.opt_item_name(def_id) {
Some(ident) => ident.span,
_ => tcx.def_span(def_id),
@@ -197,7 +223,7 @@
while let Some(generics) = next_generics {
for param in &generics.params {
if unused_parameters.contains(param.index).unwrap_or(false) {
- debug!("emit_unused_generic_params_error: param={:?}", param);
+ debug!(?param);
let def_span = tcx.def_span(param.def_id);
err.span_label(def_span, &format!("generic parameter `{}` is unused", param.name));
}
@@ -219,25 +245,23 @@
impl<'a, 'tcx> MarkUsedGenericParams<'a, 'tcx> {
/// Invoke `unused_generic_params` on a body contained within the current item (e.g.
/// a closure, generator or constant).
+ #[instrument(skip(self, def_id, substs))]
fn visit_child_body(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) {
let unused = self.tcx.unused_generic_params(def_id);
- debug!(
- "visit_child_body: unused_parameters={:?} unused={:?}",
- self.unused_parameters, unused
- );
+ debug!(?self.unused_parameters, ?unused);
for (i, arg) in substs.iter().enumerate() {
let i = i.try_into().unwrap();
if !unused.contains(i).unwrap_or(false) {
arg.visit_with(self);
}
}
- debug!("visit_child_body: unused_parameters={:?}", self.unused_parameters);
+ debug!(?self.unused_parameters);
}
}
impl<'a, 'tcx> Visitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
+ #[instrument(skip(self, local))]
fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) {
- debug!("visit_local_decl: local_decl={:?}", local_decl);
if local == Local::from_usize(1) {
let def_kind = self.tcx.def_kind(self.def_id);
if matches!(def_kind, DefKind::Closure | DefKind::Generator) {
@@ -245,7 +269,7 @@
// happens because the first argument to the closure is a reference to itself and
// that will call `visit_substs`, resulting in each generic parameter captured being
// considered used by default.
- debug!("visit_local_decl: skipping closure substs");
+ debug!("skipping closure substs");
return;
}
}
@@ -263,15 +287,15 @@
}
impl<'a, 'tcx> TypeVisitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
+ #[instrument(skip(self))]
fn visit_const(&mut self, c: &'tcx Const<'tcx>) -> ControlFlow<Self::BreakTy> {
- debug!("visit_const: c={:?}", c);
if !c.has_param_types_or_consts() {
return ControlFlow::CONTINUE;
}
match c.val {
ty::ConstKind::Param(param) => {
- debug!("visit_const: param={:?}", param);
+ debug!(?param);
self.unused_parameters.clear(param.index);
ControlFlow::CONTINUE
}
@@ -296,15 +320,15 @@
}
}
+ #[instrument(skip(self))]
fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- debug!("visit_ty: ty={:?}", ty);
if !ty.has_param_types_or_consts() {
return ControlFlow::CONTINUE;
}
match *ty.kind() {
ty::Closure(def_id, substs) | ty::Generator(def_id, substs, ..) => {
- debug!("visit_ty: def_id={:?}", def_id);
+ debug!(?def_id);
// Avoid cycle errors with generators.
if def_id == self.def_id {
return ControlFlow::CONTINUE;
@@ -316,7 +340,7 @@
ControlFlow::CONTINUE
}
ty::Param(param) => {
- debug!("visit_ty: param={:?}", param);
+ debug!(?param);
self.unused_parameters.clear(param.index);
ControlFlow::CONTINUE
}
@@ -333,8 +357,8 @@
impl<'a, 'tcx> TypeVisitor<'tcx> for HasUsedGenericParams<'a> {
type BreakTy = ();
+ #[instrument(skip(self))]
fn visit_const(&mut self, c: &'tcx Const<'tcx>) -> ControlFlow<Self::BreakTy> {
- debug!("visit_const: c={:?}", c);
if !c.has_param_types_or_consts() {
return ControlFlow::CONTINUE;
}
@@ -351,8 +375,8 @@
}
}
+ #[instrument(skip(self))]
fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- debug!("visit_ty: ty={:?}", ty);
if !ty.has_param_types_or_consts() {
return ControlFlow::CONTINUE;
}
diff --git a/compiler/rustc_mir/src/shim.rs b/compiler/rustc_mir/src/shim.rs
index 6aaf27b..796d024 100644
--- a/compiler/rustc_mir/src/shim.rs
+++ b/compiler/rustc_mir/src/shim.rs
@@ -134,7 +134,7 @@
// Check if this is a generator, if so, return the drop glue for it
if let Some(&ty::Generator(gen_def_id, substs, _)) = ty.map(|ty| ty.kind()) {
- let body = &**tcx.optimized_mir(gen_def_id).generator_drop.as_ref().unwrap();
+ let body = tcx.optimized_mir(gen_def_id).generator_drop().unwrap();
return body.clone().subst(tcx, substs);
}
@@ -421,7 +421,7 @@
let func = Operand::Constant(box Constant {
span: self.span,
user_ty: None,
- literal: ty::Const::zero_sized(tcx, func_ty),
+ literal: ty::Const::zero_sized(tcx, func_ty).into(),
});
let ref_loc = self.make_place(
@@ -463,7 +463,7 @@
let cond = self.make_place(Mutability::Mut, tcx.types.bool);
let compute_cond = self.make_statement(StatementKind::Assign(box (
cond,
- Rvalue::BinaryOp(BinOp::Ne, Operand::Copy(end), Operand::Copy(beg)),
+ Rvalue::BinaryOp(BinOp::Ne, box (Operand::Copy(end), Operand::Copy(beg))),
)));
// `if end != beg { goto loop_body; } else { goto loop_end; }`
@@ -478,7 +478,7 @@
box Constant {
span: self.span,
user_ty: None,
- literal: ty::Const::from_usize(self.tcx, value),
+ literal: ty::Const::from_usize(self.tcx, value).into(),
}
}
@@ -509,7 +509,7 @@
Rvalue::Use(Operand::Constant(box Constant {
span: self.span,
user_ty: None,
- literal: len,
+ literal: len.into(),
})),
))),
];
@@ -536,8 +536,7 @@
Place::from(beg),
Rvalue::BinaryOp(
BinOp::Add,
- Operand::Copy(Place::from(beg)),
- Operand::Constant(self.make_usize(1)),
+ box (Operand::Copy(Place::from(beg)), Operand::Constant(self.make_usize(1))),
),
)))];
self.block(statements, TerminatorKind::Goto { target: BasicBlock::new(1) }, false);
@@ -590,8 +589,7 @@
Place::from(beg),
Rvalue::BinaryOp(
BinOp::Add,
- Operand::Copy(Place::from(beg)),
- Operand::Constant(self.make_usize(1)),
+ box (Operand::Copy(Place::from(beg)), Operand::Constant(self.make_usize(1))),
),
)));
self.block(vec![statement], TerminatorKind::Goto { target: BasicBlock::new(6) }, true);
@@ -770,7 +768,7 @@
Operand::Constant(box Constant {
span,
user_ty: None,
- literal: ty::Const::zero_sized(tcx, ty),
+ literal: ty::Const::zero_sized(tcx, ty).into(),
}),
rcvr.into_iter().collect::<Vec<_>>(),
)
diff --git a/compiler/rustc_mir/src/transform/check_consts/mod.rs b/compiler/rustc_mir/src/transform/check_consts/mod.rs
index ba7bea4..19aee03 100644
--- a/compiler/rustc_mir/src/transform/check_consts/mod.rs
+++ b/compiler/rustc_mir/src/transform/check_consts/mod.rs
@@ -85,8 +85,7 @@
feature_gate: Symbol,
) -> bool {
let attrs = tcx.get_attrs(def_id);
- attr::rustc_allow_const_fn_unstable(&tcx.sess, attrs)
- .map_or(false, |mut features| features.any(|name| name == feature_gate))
+ attr::rustc_allow_const_fn_unstable(&tcx.sess, attrs).any(|name| name == feature_gate)
}
// Returns `true` if the given `const fn` is "const-stable".
diff --git a/compiler/rustc_mir/src/transform/check_consts/ops.rs b/compiler/rustc_mir/src/transform/check_consts/ops.rs
index 6f98760..a18c1f7 100644
--- a/compiler/rustc_mir/src/transform/check_consts/ops.rs
+++ b/compiler/rustc_mir/src/transform/check_consts/ops.rs
@@ -377,6 +377,18 @@
}
}
+/// A call to a `panic()` lang item where the first argument is _not_ a `&str`.
+#[derive(Debug)]
+pub struct PanicNonStr;
+impl NonConstOp for PanicNonStr {
+ fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+ ccx.tcx.sess.struct_span_err(
+ span,
+ "argument to `panic!()` in a const context must have type `&str`",
+ )
+ }
+}
+
#[derive(Debug)]
pub struct RawPtrComparison;
impl NonConstOp for RawPtrComparison {
diff --git a/compiler/rustc_mir/src/transform/check_consts/qualifs.rs b/compiler/rustc_mir/src/transform/check_consts/qualifs.rs
index 0ce1980..748f65c 100644
--- a/compiler/rustc_mir/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_mir/src/transform/check_consts/qualifs.rs
@@ -168,7 +168,7 @@
| Rvalue::UnaryOp(_, operand)
| Rvalue::Cast(_, operand, _) => in_operand::<Q, _>(cx, in_local, operand),
- Rvalue::BinaryOp(_, lhs, rhs) | Rvalue::CheckedBinaryOp(_, lhs, rhs) => {
+ Rvalue::BinaryOp(_, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(_, box (lhs, rhs)) => {
in_operand::<Q, _>(cx, in_local, lhs) || in_operand::<Q, _>(cx, in_local, rhs)
}
@@ -246,25 +246,27 @@
};
// Check the qualifs of the value of `const` items.
- if let ty::ConstKind::Unevaluated(def, _, promoted) = constant.literal.val {
- assert!(promoted.is_none());
- // Don't peek inside trait associated constants.
- if cx.tcx.trait_of_item(def.did).is_none() {
- let qualifs = if let Some((did, param_did)) = def.as_const_arg() {
- cx.tcx.at(constant.span).mir_const_qualif_const_arg((did, param_did))
- } else {
- cx.tcx.at(constant.span).mir_const_qualif(def.did)
- };
+ if let Some(ct) = constant.literal.const_for_ty() {
+ if let ty::ConstKind::Unevaluated(def, _, promoted) = ct.val {
+ assert!(promoted.is_none());
+ // Don't peek inside trait associated constants.
+ if cx.tcx.trait_of_item(def.did).is_none() {
+ let qualifs = if let Some((did, param_did)) = def.as_const_arg() {
+ cx.tcx.at(constant.span).mir_const_qualif_const_arg((did, param_did))
+ } else {
+ cx.tcx.at(constant.span).mir_const_qualif(def.did)
+ };
- if !Q::in_qualifs(&qualifs) {
- return false;
+ if !Q::in_qualifs(&qualifs) {
+ return false;
+ }
+
+ // Just in case the type is more specific than
+ // the definition, e.g., impl associated const
+ // with type parameters, take it into account.
}
-
- // Just in case the type is more specific than
- // the definition, e.g., impl associated const
- // with type parameters, take it into account.
}
}
// Otherwise use the qualifs of the type.
- Q::in_any_value_of_ty(cx, constant.literal.ty)
+ Q::in_any_value_of_ty(cx, constant.literal.ty())
}
diff --git a/compiler/rustc_mir/src/transform/check_consts/validation.rs b/compiler/rustc_mir/src/transform/check_consts/validation.rs
index a92997d..1ad7b8f 100644
--- a/compiler/rustc_mir/src/transform/check_consts/validation.rs
+++ b/compiler/rustc_mir/src/transform/check_consts/validation.rs
@@ -222,7 +222,7 @@
// `async` functions cannot be `const fn`. This is checked during AST lowering, so there's
// no need to emit duplicate errors here.
- if is_async_fn(self.ccx) || body.generator_kind.is_some() {
+ if is_async_fn(self.ccx) || body.generator.is_some() {
tcx.sess.delay_span_bug(body.span, "`async` functions cannot be `const fn`");
return;
}
@@ -515,7 +515,7 @@
// Special-case reborrows to be more like a copy of a reference.
match *rvalue {
Rvalue::Ref(_, kind, place) => {
- if let Some(reborrowed_proj) = place_as_reborrow(self.tcx, self.body, place) {
+ if let Some(reborrowed_place_ref) = place_as_reborrow(self.tcx, self.body, place) {
let ctx = match kind {
BorrowKind::Shared => {
PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow)
@@ -530,21 +530,21 @@
PlaceContext::MutatingUse(MutatingUseContext::Borrow)
}
};
- self.visit_local(&place.local, ctx, location);
- self.visit_projection(place.local, reborrowed_proj, ctx, location);
+ self.visit_local(&reborrowed_place_ref.local, ctx, location);
+ self.visit_projection(reborrowed_place_ref, ctx, location);
return;
}
}
Rvalue::AddressOf(mutbl, place) => {
- if let Some(reborrowed_proj) = place_as_reborrow(self.tcx, self.body, place) {
+ if let Some(reborrowed_place_ref) = place_as_reborrow(self.tcx, self.body, place) {
let ctx = match mutbl {
Mutability::Not => {
PlaceContext::NonMutatingUse(NonMutatingUseContext::AddressOf)
}
Mutability::Mut => PlaceContext::MutatingUse(MutatingUseContext::AddressOf),
};
- self.visit_local(&place.local, ctx, location);
- self.visit_projection(place.local, reborrowed_proj, ctx, location);
+ self.visit_local(&reborrowed_place_ref.local, ctx, location);
+ self.visit_projection(reborrowed_place_ref, ctx, location);
return;
}
}
@@ -684,8 +684,8 @@
}
}
- Rvalue::BinaryOp(op, ref lhs, ref rhs)
- | Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
+ Rvalue::BinaryOp(op, box (ref lhs, ref rhs))
+ | Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
let lhs_ty = lhs.ty(self.body, self.tcx);
let rhs_ty = rhs.ty(self.body, self.tcx);
@@ -808,18 +808,19 @@
| StatementKind::Retag { .. }
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
+ | StatementKind::CopyNonOverlapping(..)
| StatementKind::Nop => {}
}
}
- #[instrument(skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
use rustc_target::spec::abi::Abi::RustIntrinsic;
self.super_terminator(terminator, location);
match &terminator.kind {
- TerminatorKind::Call { func, .. } => {
+ TerminatorKind::Call { func, args, .. } => {
let ConstCx { tcx, body, param_env, .. } = *self.ccx;
let caller = self.def_id().to_def_id();
@@ -881,9 +882,17 @@
}
// At this point, we are calling a function, `callee`, whose `DefId` is known...
-
if is_lang_panic_fn(tcx, callee) {
self.check_op(ops::Panic);
+
+ // const-eval of the `begin_panic` fn assumes the argument is `&str`
+ if Some(callee) == tcx.lang_items().begin_panic_fn() {
+ match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
+ ty::Ref(_, ty, _) if ty.is_str() => (),
+ _ => self.check_op(ops::PanicNonStr),
+ }
+ }
+
return;
}
@@ -1039,7 +1048,7 @@
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
place: Place<'tcx>,
-) -> Option<&'a [PlaceElem<'tcx>]> {
+) -> Option<PlaceRef<'tcx>> {
match place.as_ref().last_projection() {
Some((place_base, ProjectionElem::Deref)) => {
// A borrow of a `static` also looks like `&(*_1)` in the MIR, but `_1` is a `const`
@@ -1048,13 +1057,14 @@
None
} else {
// Ensure the type being derefed is a reference and not a raw pointer.
- //
// This is sufficient to prevent an access to a `static mut` from being marked as a
// reborrow, even if the check above were to disappear.
let inner_ty = place_base.ty(body, tcx).ty;
- match inner_ty.kind() {
- ty::Ref(..) => Some(place_base.projection),
- _ => None,
+
+ if let ty::Ref(..) = inner_ty.kind() {
+ return Some(place_base);
+ } else {
+ return None;
}
}
}
diff --git a/compiler/rustc_mir/src/transform/check_unsafety.rs b/compiler/rustc_mir/src/transform/check_unsafety.rs
index f047275..532d201 100644
--- a/compiler/rustc_mir/src/transform/check_unsafety.rs
+++ b/compiler/rustc_mir/src/transform/check_unsafety.rs
@@ -123,6 +123,7 @@
UnsafetyViolationKind::General,
UnsafetyViolationDetails::UseOfInlineAssembly,
),
+ StatementKind::CopyNonOverlapping(..) => unreachable!(),
}
self.super_statement(statement, location);
}
@@ -340,7 +341,7 @@
false
}
// With the RFC 2585, no longer allow `unsafe` operations in `unsafe fn`s
- Safety::FnUnsafe if self.tcx.features().unsafe_block_in_unsafe_fn => {
+ Safety::FnUnsafe => {
for violation in violations {
let mut violation = *violation;
@@ -355,8 +356,7 @@
}
false
}
- // `unsafe` function bodies allow unsafe without additional unsafe blocks (before RFC 2585)
- Safety::BuiltinUnsafe | Safety::FnUnsafe => true,
+ Safety::BuiltinUnsafe => true,
Safety::ExplicitUnsafe(hir_id) => {
// mark unsafe block as used if there are any unsafe operations inside
if !violations.is_empty() {
diff --git a/compiler/rustc_mir/src/transform/const_goto.rs b/compiler/rustc_mir/src/transform/const_goto.rs
new file mode 100644
index 0000000..b5c8b4b
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/const_goto.rs
@@ -0,0 +1,122 @@
+//! This pass optimizes the following sequence
+//! ```rust,ignore (example)
+//! bb2: {
+//! _2 = const true;
+//! goto -> bb3;
+//! }
+//!
+//! bb3: {
+//! switchInt(_2) -> [false: bb4, otherwise: bb5];
+//! }
+//! ```
+//! into
+//! ```rust,ignore (example)
+//! bb2: {
+//! _2 = const true;
+//! goto -> bb5;
+//! }
+//! ```
+
+use crate::transform::MirPass;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::{mir::visit::Visitor, ty::ParamEnv};
+
+use super::simplify::{simplify_cfg, simplify_locals};
+
+pub struct ConstGoto;
+
+impl<'tcx> MirPass<'tcx> for ConstGoto {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ if tcx.sess.mir_opt_level() < 4 {
+ return;
+ }
+ trace!("Running ConstGoto on {:?}", body.source);
+ let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
+ let mut opt_finder =
+ ConstGotoOptimizationFinder { tcx, body, optimizations: vec![], param_env };
+ opt_finder.visit_body(body);
+ let should_simplify = !opt_finder.optimizations.is_empty();
+ for opt in opt_finder.optimizations {
+ let terminator = body.basic_blocks_mut()[opt.bb_with_goto].terminator_mut();
+ let new_goto = TerminatorKind::Goto { target: opt.target_to_use_in_goto };
+ debug!("SUCCESS: replacing `{:?}` with `{:?}`", terminator.kind, new_goto);
+ terminator.kind = new_goto;
+ }
+
+ // if we applied optimizations, we potentially have some cfg to cleanup to
+ // make it easier for further passes
+ if should_simplify {
+ simplify_cfg(body);
+ simplify_locals(body, tcx);
+ }
+ }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for ConstGotoOptimizationFinder<'a, 'tcx> {
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ let _: Option<_> = try {
+ let target = terminator.kind.as_goto()?;
+ // We only apply this optimization if the last statement is a const assignment
+ let last_statement = self.body.basic_blocks()[location.block].statements.last()?;
+
+ if let (place, Rvalue::Use(Operand::Constant(_const))) =
+ last_statement.kind.as_assign()?
+ {
+ // We found a constant being assigned to `place`.
+ // Now check that the target of this Goto switches on this place.
+ let target_bb = &self.body.basic_blocks()[target];
+
+ // FIXME(simonvandel): We are conservative here when we don't allow
+ // any statements in the target basic block.
+ // This could probably be relaxed to allow `StorageDead`s which could be
+ // copied to the predecessor of this block.
+ if !target_bb.statements.is_empty() {
+ None?
+ }
+
+ let target_bb_terminator = target_bb.terminator();
+ let (discr, switch_ty, targets) = target_bb_terminator.kind.as_switch()?;
+ if discr.place() == Some(*place) {
+ // We now know that the Switch matches on the const place, and it is statementless
+ // Now find which value in the Switch matches the const value.
+ let const_value =
+ _const.literal.try_eval_bits(self.tcx, self.param_env, switch_ty)?;
+ let found_value_idx_option = targets
+ .iter()
+ .enumerate()
+ .find(|(_, (value, _))| const_value == *value)
+ .map(|(idx, _)| idx);
+
+ let target_to_use_in_goto =
+ if let Some(found_value_idx) = found_value_idx_option {
+ targets.iter().nth(found_value_idx).unwrap().1
+ } else {
+ // If we did not find the const value in values, it must be the otherwise case
+ targets.otherwise()
+ };
+
+ self.optimizations.push(OptimizationToApply {
+ bb_with_goto: location.block,
+ target_to_use_in_goto,
+ });
+ }
+ }
+ Some(())
+ };
+
+ self.super_terminator(terminator, location);
+ }
+}
+
+struct OptimizationToApply {
+ bb_with_goto: BasicBlock,
+ target_to_use_in_goto: BasicBlock,
+}
+
+pub struct ConstGotoOptimizationFinder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ optimizations: Vec<OptimizationToApply>,
+}
diff --git a/compiler/rustc_mir/src/transform/const_prop.rs b/compiler/rustc_mir/src/transform/const_prop.rs
index fd5c223..cc8669d 100644
--- a/compiler/rustc_mir/src/transform/const_prop.rs
+++ b/compiler/rustc_mir/src/transform/const_prop.rs
@@ -13,9 +13,9 @@
MutVisitor, MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor,
};
use rustc_middle::mir::{
- AssertKind, BasicBlock, BinOp, Body, ClearCrossCrate, Constant, Local, LocalDecl, LocalKind,
- Location, Operand, Place, Rvalue, SourceInfo, SourceScope, SourceScopeData, Statement,
- StatementKind, Terminator, TerminatorKind, UnOp, RETURN_PLACE,
+ AssertKind, BasicBlock, BinOp, Body, ClearCrossCrate, Constant, ConstantKind, Local, LocalDecl,
+ LocalKind, Location, Operand, Place, Rvalue, SourceInfo, SourceScope, SourceScopeData,
+ Statement, StatementKind, Terminator, TerminatorKind, UnOp, RETURN_PLACE,
};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutError, TyAndLayout};
use rustc_middle::ty::subst::{InternalSubsts, Subst};
@@ -140,7 +140,7 @@
body.arg_count,
Default::default(),
body.span,
- body.generator_kind,
+ body.generator_kind(),
);
// FIXME(oli-obk, eddyb) Optimize locals (or even local paths) to hold
@@ -197,7 +197,7 @@
_instance: ty::Instance<'tcx>,
_abi: Abi,
_args: &[OpTy<'tcx>],
- _ret: Option<(PlaceTy<'tcx>, BasicBlock)>,
+ _ret: Option<(&PlaceTy<'tcx>, BasicBlock)>,
_unwind: Option<BasicBlock>,
) -> InterpResult<'tcx, Option<&'mir Body<'tcx>>> {
Ok(None)
@@ -207,7 +207,7 @@
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_instance: ty::Instance<'tcx>,
_args: &[OpTy<'tcx>],
- _ret: Option<(PlaceTy<'tcx>, BasicBlock)>,
+ _ret: Option<(&PlaceTy<'tcx>, BasicBlock)>,
_unwind: Option<BasicBlock>,
) -> InterpResult<'tcx> {
throw_machine_stop_str!("calling intrinsics isn't supported in ConstProp")
@@ -228,8 +228,8 @@
fn binary_ptr_op(
_ecx: &InterpCx<'mir, 'tcx, Self>,
_bin_op: BinOp,
- _left: ImmTy<'tcx>,
- _right: ImmTy<'tcx>,
+ _left: &ImmTy<'tcx>,
+ _right: &ImmTy<'tcx>,
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
// We can't do this because aliasing of memory can differ between const eval and llvm
throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp")
@@ -237,7 +237,7 @@
fn box_alloc(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
- _dest: PlaceTy<'tcx>,
+ _dest: &PlaceTy<'tcx>,
) -> InterpResult<'tcx> {
throw_machine_stop_str!("can't const prop heap allocations")
}
@@ -392,12 +392,12 @@
.filter(|ret_layout| {
!ret_layout.is_zst() && ret_layout.size < Size::from_bytes(MAX_ALLOC_LIMIT)
})
- .map(|ret_layout| ecx.allocate(ret_layout, MemoryKind::Stack));
+ .map(|ret_layout| ecx.allocate(ret_layout, MemoryKind::Stack).into());
ecx.push_stack_frame(
Instance::new(def_id, substs),
dummy_body,
- ret.map(Into::into),
+ ret.as_ref(),
StackPopCleanup::None { cleanup: false },
)
.expect("failed to push initial stack frame");
@@ -426,7 +426,7 @@
// Try to read the local as an immediate so that if it is representable as a scalar, we can
// handle it as such, but otherwise, just return the value as is.
- Some(match self.ecx.try_read_immediate(op) {
+ Some(match self.ecx.try_read_immediate(&op) {
Ok(Ok(imm)) => imm.into(),
_ => op,
})
@@ -466,8 +466,8 @@
// an allocation, which we should avoid. When that happens,
// dedicated error variants should be introduced instead.
assert!(
- !error.kind.allocates(),
- "const-prop encountered allocating error: {}",
+ !error.kind().formatted_string(),
+ "const-prop encountered formatting error: {}",
error
);
None
@@ -482,18 +482,21 @@
return None;
}
- match self.ecx.const_to_op(c.literal, None) {
+ match self.ecx.mir_const_to_op(&c.literal, None) {
Ok(op) => Some(op),
Err(error) => {
let tcx = self.ecx.tcx.at(c.span);
let err = ConstEvalErr::new(&self.ecx, error, Some(c.span));
if let Some(lint_root) = self.lint_root(source_info) {
- let lint_only = match c.literal.val {
- // Promoteds must lint and not error as the user didn't ask for them
- ConstKind::Unevaluated(_, _, Some(_)) => true,
- // Out of backwards compatibility we cannot report hard errors in unused
- // generic functions using associated constants of the generic parameters.
- _ => c.literal.needs_subst(),
+ let lint_only = match c.literal {
+ ConstantKind::Ty(ct) => match ct.val {
+ // Promoteds must lint and not error as the user didn't ask for them
+ ConstKind::Unevaluated(_, _, Some(_)) => true,
+ // Out of backwards compatibility we cannot report hard errors in unused
+ // generic functions using associated constants of the generic parameters.
+ _ => c.literal.needs_subst(),
+ },
+ ConstantKind::Val(_, ty) => ty.needs_subst(),
};
if lint_only {
// Out of backwards compatibility we cannot report hard errors in unused
@@ -548,8 +551,8 @@
source_info: SourceInfo,
) -> Option<()> {
if let (val, true) = self.use_ecx(|this| {
- let val = this.ecx.read_immediate(this.ecx.eval_operand(arg, None)?)?;
- let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, val)?;
+ let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?;
+ let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, &val)?;
Ok((val, overflow))
})? {
// `AssertKind` only has an `OverflowNeg` variant, so make sure that is
@@ -573,8 +576,8 @@
right: &Operand<'tcx>,
source_info: SourceInfo,
) -> Option<()> {
- let r = self.use_ecx(|this| this.ecx.read_immediate(this.ecx.eval_operand(right, None)?));
- let l = self.use_ecx(|this| this.ecx.read_immediate(this.ecx.eval_operand(left, None)?));
+ let r = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(right, None)?));
+ let l = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(left, None)?));
// Check for exceeding shifts *even if* we cannot evaluate the LHS.
if op == BinOp::Shr || op == BinOp::Shl {
let r = r?;
@@ -609,7 +612,7 @@
}
}
- if let (Some(l), Some(r)) = (l, r) {
+ if let (Some(l), Some(r)) = (&l, &r) {
// The remaining operators are handled through `overflowing_binary_op`.
if self.use_ecx(|this| {
let (_res, overflow, _ty) = this.ecx.overflowing_binary_op(op, l, r)?;
@@ -630,7 +633,7 @@
match *operand {
Operand::Copy(l) | Operand::Move(l) => {
if let Some(value) = self.get_const(l) {
- if self.should_const_prop(value) {
+ if self.should_const_prop(&value) {
// FIXME(felix91gr): this code only handles `Scalar` cases.
// For now, we're not handling `ScalarPair` cases because
// doing so here would require a lot of code duplication.
@@ -676,11 +679,11 @@
trace!("checking UnaryOp(op = {:?}, arg = {:?})", op, arg);
self.check_unary_op(*op, arg, source_info)?;
}
- Rvalue::BinaryOp(op, left, right) => {
+ Rvalue::BinaryOp(op, box (left, right)) => {
trace!("checking BinaryOp(op = {:?}, left = {:?}, right = {:?})", op, left, right);
self.check_binary_op(*op, left, right, source_info)?;
}
- Rvalue::CheckedBinaryOp(op, left, right) => {
+ Rvalue::CheckedBinaryOp(op, box (left, right)) => {
trace!(
"checking CheckedBinaryOp(op = {:?}, left = {:?}, right = {:?})",
op,
@@ -725,7 +728,7 @@
return None;
}
- if self.tcx.sess.opts.debugging_opts.mir_opt_level >= 3 {
+ if self.tcx.sess.mir_opt_level() >= 4 {
self.eval_rvalue_with_identities(rvalue, place)
} else {
self.use_ecx(|this| this.ecx.eval_rvalue_into_place(rvalue, place))
@@ -740,12 +743,13 @@
) -> Option<()> {
self.use_ecx(|this| {
match rvalue {
- Rvalue::BinaryOp(op, left, right) | Rvalue::CheckedBinaryOp(op, left, right) => {
+ Rvalue::BinaryOp(op, box (left, right))
+ | Rvalue::CheckedBinaryOp(op, box (left, right)) => {
let l = this.ecx.eval_operand(left, None);
let r = this.ecx.eval_operand(right, None);
let const_arg = match (l, r) {
- (Ok(x), Err(_)) | (Err(_), Ok(x)) => this.ecx.read_immediate(x)?,
+ (Ok(ref x), Err(_)) | (Err(_), Ok(ref x)) => this.ecx.read_immediate(x)?,
(Err(e), Err(_)) => return Err(e),
(Ok(_), Ok(_)) => {
this.ecx.eval_rvalue_into_place(rvalue, place)?;
@@ -760,26 +764,26 @@
match op {
BinOp::BitAnd => {
if arg_value == 0 {
- this.ecx.write_immediate(*const_arg, dest)?;
+ this.ecx.write_immediate(*const_arg, &dest)?;
}
}
BinOp::BitOr => {
if arg_value == const_arg.layout.size.truncate(u128::MAX)
|| (const_arg.layout.ty.is_bool() && arg_value == 1)
{
- this.ecx.write_immediate(*const_arg, dest)?;
+ this.ecx.write_immediate(*const_arg, &dest)?;
}
}
BinOp::Mul => {
if const_arg.layout.ty.is_integral() && arg_value == 0 {
- if let Rvalue::CheckedBinaryOp(_, _, _) = rvalue {
+ if let Rvalue::CheckedBinaryOp(_, _) = rvalue {
let val = Immediate::ScalarPair(
const_arg.to_scalar()?.into(),
Scalar::from_bool(false).into(),
);
- this.ecx.write_immediate(val, dest)?;
+ this.ecx.write_immediate(val, &dest)?;
} else {
- this.ecx.write_immediate(*const_arg, dest)?;
+ this.ecx.write_immediate(*const_arg, &dest)?;
}
}
}
@@ -802,20 +806,23 @@
Operand::Constant(Box::new(Constant {
span,
user_ty: None,
- literal: ty::Const::from_scalar(self.tcx, scalar, ty),
+ literal: ty::Const::from_scalar(self.tcx, scalar, ty).into(),
}))
}
fn replace_with_const(
&mut self,
rval: &mut Rvalue<'tcx>,
- value: OpTy<'tcx>,
+ value: &OpTy<'tcx>,
source_info: SourceInfo,
) {
if let Rvalue::Use(Operand::Constant(c)) = rval {
- if !matches!(c.literal.val, ConstKind::Unevaluated(..)) {
- trace!("skipping replace of Rvalue::Use({:?} because it is already a const", c);
- return;
+ match c.literal {
+ ConstantKind::Ty(c) if matches!(c.val, ConstKind::Unevaluated(..)) => {}
+ _ => {
+ trace!("skipping replace of Rvalue::Use({:?} because it is already a const", c);
+ return;
+ }
}
}
@@ -882,13 +889,17 @@
*rval = Rvalue::Use(Operand::Constant(Box::new(Constant {
span: source_info.span,
user_ty: None,
- literal: self.ecx.tcx.mk_const(ty::Const {
- ty,
- val: ty::ConstKind::Value(ConstValue::ByRef {
- alloc,
- offset: Size::ZERO,
- }),
- }),
+ literal: self
+ .ecx
+ .tcx
+ .mk_const(ty::Const {
+ ty,
+ val: ty::ConstKind::Value(ConstValue::ByRef {
+ alloc,
+ offset: Size::ZERO,
+ }),
+ })
+ .into(),
})));
}
}
@@ -902,8 +913,8 @@
}
/// Returns `true` if and only if this `op` should be const-propagated into.
- fn should_const_prop(&mut self, op: OpTy<'tcx>) -> bool {
- let mir_opt_level = self.tcx.sess.opts.debugging_opts.mir_opt_level;
+ fn should_const_prop(&mut self, op: &OpTy<'tcx>) -> bool {
+ let mir_opt_level = self.tcx.sess.mir_opt_level();
if mir_opt_level == 0 {
return false;
@@ -913,7 +924,7 @@
return false;
}
- match *op {
+ match **op {
interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => {
s.is_bits()
}
@@ -1071,9 +1082,9 @@
fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) {
self.super_operand(operand, location);
- // Only const prop copies and moves on `mir_opt_level=2` as doing so
+ // Only const prop copies and moves on `mir_opt_level=3` as doing so
// currently slightly increases compile time in some cases.
- if self.tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
+ if self.tcx.sess.mir_opt_level() >= 3 {
self.propagate_operand(operand)
}
}
@@ -1094,7 +1105,7 @@
// This will return None if the above `const_prop` invocation only "wrote" a
// type whose creation requires no write. E.g. a generator whose initial state
// consists solely of uninitialized memory (so it doesn't capture any locals).
- if let Some(value) = self.get_const(place) {
+ if let Some(ref value) = self.get_const(place) {
if self.should_const_prop(value) {
trace!("replacing {:?} with {:?}", rval, value);
self.replace_with_const(rval, value, source_info);
@@ -1177,10 +1188,10 @@
self.super_terminator(terminator, location);
match &mut terminator.kind {
TerminatorKind::Assert { expected, ref msg, ref mut cond, .. } => {
- if let Some(value) = self.eval_operand(&cond, source_info) {
+ if let Some(ref value) = self.eval_operand(&cond, source_info) {
trace!("assertion on {:?} should be {:?}", value, expected);
let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected));
- let value_const = self.ecx.read_scalar(value).unwrap();
+ let value_const = self.ecx.read_scalar(&value).unwrap();
if expected != value_const {
enum DbgVal<T> {
Val(T),
@@ -1198,9 +1209,9 @@
// This can be `None` if the lhs wasn't const propagated and we just
// triggered the assert on the value of the rhs.
match self.eval_operand(op, source_info) {
- Some(op) => {
- DbgVal::Val(self.ecx.read_immediate(op).unwrap().to_const_int())
- }
+ Some(op) => DbgVal::Val(
+ self.ecx.read_immediate(&op).unwrap().to_const_int(),
+ ),
None => DbgVal::Underscore,
}
};
@@ -1253,7 +1264,7 @@
TerminatorKind::SwitchInt { ref mut discr, .. } => {
// FIXME: This is currently redundant with `visit_operand`, but sadly
// always visiting operands currently causes a perf regression in LLVM codegen, so
- // `visit_operand` currently only runs for propagates places for `mir_opt_level=3`.
+ // `visit_operand` currently only runs for propagates places for `mir_opt_level=4`.
self.propagate_operand(discr)
}
// None of these have Operands to const-propagate.
@@ -1272,7 +1283,7 @@
// Every argument in our function calls have already been propagated in `visit_operand`.
//
// NOTE: because LLVM codegen gives slight performance regressions with it, so this is
- // gated on `mir_opt_level=2`.
+ // gated on `mir_opt_level=3`.
TerminatorKind::Call { .. } => {}
}
diff --git a/compiler/rustc_mir/src/transform/coverage/debug.rs b/compiler/rustc_mir/src/transform/coverage/debug.rs
index 2cd0dc6..aabfee5 100644
--- a/compiler/rustc_mir/src/transform/coverage/debug.rs
+++ b/compiler/rustc_mir/src/transform/coverage/debug.rs
@@ -285,10 +285,8 @@
),
};
counters
- .insert(id, DebugCounter::new(counter_kind.clone(), some_block_label))
- .expect_none(
- "attempt to add the same counter_kind to DebugCounters more than once",
- );
+ .try_insert(id, DebugCounter::new(counter_kind.clone(), some_block_label))
+ .expect("attempt to add the same counter_kind to DebugCounters more than once");
}
}
@@ -479,9 +477,9 @@
counter_kind: &CoverageKind,
) {
if let Some(edge_to_counter) = self.some_edge_to_counter.as_mut() {
- edge_to_counter.insert((from_bcb, to_bb), counter_kind.clone()).expect_none(
- "invalid attempt to insert more than one edge counter for the same edge",
- );
+ edge_to_counter
+ .try_insert((from_bcb, to_bb), counter_kind.clone())
+ .expect("invalid attempt to insert more than one edge counter for the same edge");
}
}
diff --git a/compiler/rustc_mir/src/transform/coverage/graph.rs b/compiler/rustc_mir/src/transform/coverage/graph.rs
index e58b915..6f5fa85 100644
--- a/compiler/rustc_mir/src/transform/coverage/graph.rs
+++ b/compiler/rustc_mir/src/transform/coverage/graph.rs
@@ -392,10 +392,8 @@
}
}
let operand = counter_kind.as_operand_id();
- if let Some(replaced) = self
- .edge_from_bcbs
- .get_or_insert_with(FxHashMap::default)
- .insert(from_bcb, counter_kind)
+ if let Some(replaced) =
+ self.edge_from_bcbs.get_or_insert_default().insert(from_bcb, counter_kind)
{
Error::from_string(format!(
"attempt to set an edge counter more than once; from_bcb: \
diff --git a/compiler/rustc_mir/src/transform/coverage/query.rs b/compiler/rustc_mir/src/transform/coverage/query.rs
index 4b455a6..de8447f 100644
--- a/compiler/rustc_mir/src/transform/coverage/query.rs
+++ b/compiler/rustc_mir/src/transform/coverage/query.rs
@@ -1,8 +1,7 @@
use super::*;
use rustc_middle::mir::coverage::*;
-use rustc_middle::mir::visit::Visitor;
-use rustc_middle::mir::{self, Coverage, CoverageInfo, Location};
+use rustc_middle::mir::{self, Body, Coverage, CoverageInfo};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::def_id::DefId;
@@ -85,10 +84,21 @@
}
}
}
-}
-impl Visitor<'_> for CoverageVisitor {
- fn visit_coverage(&mut self, coverage: &Coverage, _location: Location) {
+ fn visit_body(&mut self, body: &Body<'_>) {
+ for bb_data in body.basic_blocks().iter() {
+ for statement in bb_data.statements.iter() {
+ if let StatementKind::Coverage(box ref coverage) = statement.kind {
+ if is_inlined(body, statement) {
+ continue;
+ }
+ self.visit_coverage(coverage);
+ }
+ }
+ }
+ }
+
+ fn visit_coverage(&mut self, coverage: &Coverage) {
if self.add_missing_operands {
match coverage.kind {
CoverageKind::Expression { lhs, rhs, .. } => {
@@ -129,10 +139,14 @@
}
fn covered_file_name<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Option<Symbol> {
- for bb_data in mir_body(tcx, def_id).basic_blocks().iter() {
+ let body = mir_body(tcx, def_id);
+ for bb_data in body.basic_blocks().iter() {
for statement in bb_data.statements.iter() {
if let StatementKind::Coverage(box ref coverage) = statement.kind {
if let Some(code_region) = coverage.code_region.as_ref() {
+ if is_inlined(body, statement) {
+ continue;
+ }
return Some(code_region.file_name);
}
}
@@ -151,13 +165,17 @@
}
fn covered_code_regions<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Vec<&'tcx CodeRegion> {
- mir_body(tcx, def_id)
- .basic_blocks()
+ let body = mir_body(tcx, def_id);
+ body.basic_blocks()
.iter()
.map(|data| {
data.statements.iter().filter_map(|statement| match statement.kind {
StatementKind::Coverage(box ref coverage) => {
- coverage.code_region.as_ref() // may be None
+ if is_inlined(body, statement) {
+ None
+ } else {
+ coverage.code_region.as_ref() // may be None
+ }
}
_ => None,
})
@@ -165,3 +183,8 @@
.flatten()
.collect()
}
+
+fn is_inlined(body: &Body<'_>, statement: &Statement<'_>) -> bool {
+ let scope_data = &body.source_scopes[statement.source_info.scope];
+ scope_data.inlined.is_some() || scope_data.inlined_parent_scope.is_some()
+}
diff --git a/compiler/rustc_mir/src/transform/coverage/spans.rs b/compiler/rustc_mir/src/transform/coverage/spans.rs
index fd3e782..e7097ce 100644
--- a/compiler/rustc_mir/src/transform/coverage/spans.rs
+++ b/compiler/rustc_mir/src/transform/coverage/spans.rs
@@ -687,6 +687,7 @@
// Retain spans from all other statements
StatementKind::FakeRead(_, _) // Not including `ForGuardBinding`
+ | StatementKind::CopyNonOverlapping(..)
| StatementKind::Assign(_)
| StatementKind::SetDiscriminant { .. }
| StatementKind::LlvmInlineAsm(_)
diff --git a/compiler/rustc_mir/src/transform/coverage/tests.rs b/compiler/rustc_mir/src/transform/coverage/tests.rs
index d36f1b8..7a9bfaa 100644
--- a/compiler/rustc_mir/src/transform/coverage/tests.rs
+++ b/compiler/rustc_mir/src/transform/coverage/tests.rs
@@ -327,7 +327,7 @@
fn test_covgraph_goto_switchint() {
let mir_body = goto_switchint();
if false {
- println!("basic_blocks = {}", debug_basic_blocks(&mir_body));
+ eprintln!("basic_blocks = {}", debug_basic_blocks(&mir_body));
}
let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
print_coverage_graphviz("covgraph_goto_switchint ", &mir_body, &basic_coverage_blocks);
@@ -583,11 +583,11 @@
let mir_body = goto_switchint();
let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
if false {
- println!(
+ eprintln!(
"basic_coverage_blocks = {:?}",
basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
);
- println!("successors = {:?}", basic_coverage_blocks.successors);
+ eprintln!("successors = {:?}", basic_coverage_blocks.successors);
}
let backedges = graph::find_loop_backedges(&basic_coverage_blocks);
assert_eq!(
diff --git a/compiler/rustc_mir/src/transform/deduplicate_blocks.rs b/compiler/rustc_mir/src/transform/deduplicate_blocks.rs
new file mode 100644
index 0000000..e102512
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/deduplicate_blocks.rs
@@ -0,0 +1,193 @@
+//! This pass finds basic blocks that are completely equal,
+//! and replaces all uses with just one of them.
+
+use std::{collections::hash_map::Entry, hash::Hash, hash::Hasher};
+
+use crate::transform::MirPass;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::mir::visit::MutVisitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+use super::simplify::simplify_cfg;
+
+pub struct DeduplicateBlocks;
+
+impl<'tcx> MirPass<'tcx> for DeduplicateBlocks {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ if tcx.sess.mir_opt_level() < 4 {
+ return;
+ }
+ debug!("Running DeduplicateBlocks on `{:?}`", body.source);
+ let duplicates = find_duplicates(body);
+ let has_opts_to_apply = !duplicates.is_empty();
+
+ if has_opts_to_apply {
+ let mut opt_applier = OptApplier { tcx, duplicates };
+ opt_applier.visit_body(body);
+ simplify_cfg(body);
+ }
+ }
+}
+
+struct OptApplier<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ duplicates: FxHashMap<BasicBlock, BasicBlock>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for OptApplier<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) {
+ for target in terminator.successors_mut() {
+ if let Some(replacement) = self.duplicates.get(target) {
+ debug!("SUCCESS: Replacing: `{:?}` with `{:?}`", target, replacement);
+ *target = *replacement;
+ }
+ }
+
+ self.super_terminator(terminator, location);
+ }
+}
+
+fn find_duplicates<'a, 'tcx>(body: &'a Body<'tcx>) -> FxHashMap<BasicBlock, BasicBlock> {
+ let mut duplicates = FxHashMap::default();
+
+ let bbs_to_go_through =
+ body.basic_blocks().iter_enumerated().filter(|(_, bbd)| !bbd.is_cleanup).count();
+
+ let mut same_hashes =
+ FxHashMap::with_capacity_and_hasher(bbs_to_go_through, Default::default());
+
+ // Go through the basic blocks backwards. This means that in case of duplicates,
+ // we can use the basic block with the highest index as the replacement for all lower ones.
+ // For example, if bb1, bb2 and bb3 are duplicates, we will first insert bb3 in same_hashes.
+ // Then we will see that bb2 is a duplicate of bb3,
+ // and insert bb2 with the replacement bb3 in the duplicates list.
+ // When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the duplicates list
+ // with replacement bb3.
+ // When the duplicates are removed, we will end up with only bb3.
+ for (bb, bbd) in body.basic_blocks().iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup)
+ {
+ // Basic blocks can get really big, so to avoid checking for duplicates in basic blocks
+ // that are unlikely to have duplicates, we stop early. The early bail number has been
+ // found experimentally by eprintln while compiling the crates in the rustc-perf suite.
+ if bbd.statements.len() > 10 {
+ continue;
+ }
+
+ let to_hash = BasicBlockHashable { basic_block_data: bbd };
+ let entry = same_hashes.entry(to_hash);
+ match entry {
+ Entry::Occupied(occupied) => {
+ // The basic block was already in the hashmap, which means we have a duplicate
+ let value = *occupied.get();
+ debug!("Inserting {:?} -> {:?}", bb, value);
+ duplicates.try_insert(bb, value).expect("key was already inserted");
+ }
+ Entry::Vacant(vacant) => {
+ vacant.insert(bb);
+ }
+ }
+ }
+
+ duplicates
+}
+
+struct BasicBlockHashable<'tcx, 'a> {
+ basic_block_data: &'a BasicBlockData<'tcx>,
+}
+
+impl<'tcx, 'a> Hash for BasicBlockHashable<'tcx, 'a> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ hash_statements(state, self.basic_block_data.statements.iter());
+ // Note that since we only hash the kind, we lose span information if we deduplicate the blocks
+ self.basic_block_data.terminator().kind.hash(state);
+ }
+}
+
+impl<'tcx, 'a> Eq for BasicBlockHashable<'tcx, 'a> {}
+
+impl<'tcx, 'a> PartialEq for BasicBlockHashable<'tcx, 'a> {
+ fn eq(&self, other: &Self) -> bool {
+ self.basic_block_data.statements.len() == other.basic_block_data.statements.len()
+ && &self.basic_block_data.terminator().kind == &other.basic_block_data.terminator().kind
+ && self
+ .basic_block_data
+ .statements
+ .iter()
+ .zip(&other.basic_block_data.statements)
+ .all(|(x, y)| statement_eq(&x.kind, &y.kind))
+ }
+}
+
+fn hash_statements<'a, 'tcx, H: Hasher>(
+ hasher: &mut H,
+ iter: impl Iterator<Item = &'a Statement<'tcx>>,
+) where
+ 'tcx: 'a,
+{
+ for stmt in iter {
+ statement_hash(hasher, &stmt.kind);
+ }
+}
+
+fn statement_hash<'tcx, H: Hasher>(hasher: &mut H, stmt: &StatementKind<'tcx>) {
+ match stmt {
+ StatementKind::Assign(box (place, rvalue)) => {
+ place.hash(hasher);
+ rvalue_hash(hasher, rvalue)
+ }
+ x => x.hash(hasher),
+ };
+}
+
+fn rvalue_hash<H: Hasher>(hasher: &mut H, rvalue: &Rvalue<'tcx>) {
+ match rvalue {
+ Rvalue::Use(op) => operand_hash(hasher, op),
+ x => x.hash(hasher),
+ };
+}
+
+fn operand_hash<H: Hasher>(hasher: &mut H, operand: &Operand<'tcx>) {
+ match operand {
+ Operand::Constant(box Constant { user_ty: _, literal, span: _ }) => literal.hash(hasher),
+ x => x.hash(hasher),
+ };
+}
+
+fn statement_eq<'tcx>(lhs: &StatementKind<'tcx>, rhs: &StatementKind<'tcx>) -> bool {
+ let res = match (lhs, rhs) {
+ (
+ StatementKind::Assign(box (place, rvalue)),
+ StatementKind::Assign(box (place2, rvalue2)),
+ ) => place == place2 && rvalue_eq(rvalue, rvalue2),
+ (x, y) => x == y,
+ };
+ debug!("statement_eq lhs: `{:?}` rhs: `{:?}` result: {:?}", lhs, rhs, res);
+ res
+}
+
+fn rvalue_eq(lhs: &Rvalue<'tcx>, rhs: &Rvalue<'tcx>) -> bool {
+ let res = match (lhs, rhs) {
+ (Rvalue::Use(op1), Rvalue::Use(op2)) => operand_eq(op1, op2),
+ (x, y) => x == y,
+ };
+ debug!("rvalue_eq lhs: `{:?}` rhs: `{:?}` result: {:?}", lhs, rhs, res);
+ res
+}
+
+fn operand_eq(lhs: &Operand<'tcx>, rhs: &Operand<'tcx>) -> bool {
+ let res = match (lhs, rhs) {
+ (
+ Operand::Constant(box Constant { user_ty: _, literal, span: _ }),
+ Operand::Constant(box Constant { user_ty: _, literal: literal2, span: _ }),
+ ) => literal == literal2,
+ (x, y) => x == y,
+ };
+ debug!("operand_eq lhs: `{:?}` rhs: `{:?}` result: {:?}", lhs, rhs, res);
+ res
+}
diff --git a/compiler/rustc_mir/src/transform/dest_prop.rs b/compiler/rustc_mir/src/transform/dest_prop.rs
index 46de5db..6656dea 100644
--- a/compiler/rustc_mir/src/transform/dest_prop.rs
+++ b/compiler/rustc_mir/src/transform/dest_prop.rs
@@ -127,9 +127,14 @@
impl<'tcx> MirPass<'tcx> for DestinationPropagation {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
- // Only run at mir-opt-level=2 or higher for now (we don't fix up debuginfo and remove
+ // FIXME(#79191, #82678)
+ if !tcx.sess.opts.debugging_opts.unsound_mir_opts {
+ return;
+ }
+
+ // Only run at mir-opt-level=3 or higher for now (we don't fix up debuginfo and remove
// storage statements at the moment).
- if tcx.sess.opts.debugging_opts.mir_opt_level <= 1 {
+ if tcx.sess.mir_opt_level() < 3 {
return;
}
@@ -582,6 +587,7 @@
| StatementKind::FakeRead(..)
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
+ | StatementKind::CopyNonOverlapping(..)
| StatementKind::Nop => {}
}
}
diff --git a/compiler/rustc_mir/src/transform/early_otherwise_branch.rs b/compiler/rustc_mir/src/transform/early_otherwise_branch.rs
index b16a99d..f7ea9fa 100644
--- a/compiler/rustc_mir/src/transform/early_otherwise_branch.rs
+++ b/compiler/rustc_mir/src/transform/early_otherwise_branch.rs
@@ -26,7 +26,12 @@
impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
- if tcx.sess.opts.debugging_opts.mir_opt_level < 2 {
+ // FIXME(#78496)
+ if !tcx.sess.opts.debugging_opts.unsound_mir_opts {
+ return;
+ }
+
+ if tcx.sess.mir_opt_level() < 3 {
return;
}
trace!("running EarlyOtherwiseBranch on {:?}", body.source);
@@ -91,8 +96,10 @@
opt_to_apply.infos[0].first_switch_info.discr_used_in_switch;
let not_equal_rvalue = Rvalue::BinaryOp(
not_equal,
- Operand::Copy(Place::from(second_discriminant_temp)),
- Operand::Copy(first_descriminant_place),
+ box (
+ Operand::Copy(Place::from(second_discriminant_temp)),
+ Operand::Copy(first_descriminant_place),
+ ),
);
patch.add_statement(
end_of_block_location,
diff --git a/compiler/rustc_mir/src/transform/elaborate_drops.rs b/compiler/rustc_mir/src/transform/elaborate_drops.rs
index 3d435f6..c0fcfb6 100644
--- a/compiler/rustc_mir/src/transform/elaborate_drops.rs
+++ b/compiler/rustc_mir/src/transform/elaborate_drops.rs
@@ -471,7 +471,7 @@
Rvalue::Use(Operand::Constant(Box::new(Constant {
span,
user_ty: None,
- literal: ty::Const::from_bool(self.tcx, val),
+ literal: ty::Const::from_bool(self.tcx, val).into(),
})))
}
diff --git a/compiler/rustc_mir/src/transform/generator.rs b/compiler/rustc_mir/src/transform/generator.rs
index dc413f8..c85e9b9 100644
--- a/compiler/rustc_mir/src/transform/generator.rs
+++ b/compiler/rustc_mir/src/transform/generator.rs
@@ -989,7 +989,7 @@
cond: Operand::Constant(box Constant {
span: body.span,
user_ty: None,
- literal: ty::Const::from_bool(tcx, false),
+ literal: ty::Const::from_bool(tcx, false).into(),
}),
expected: true,
msg: message,
@@ -1007,9 +1007,9 @@
assert_block
}
-fn can_return<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
+fn can_return<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
// Returning from a function with an uninhabited return type is undefined behavior.
- if body.return_ty().conservative_is_privately_uninhabited(tcx) {
+ if tcx.conservative_is_privately_uninhabited(param_env.and(body.return_ty())) {
return false;
}
@@ -1111,7 +1111,7 @@
cases.insert(0, (UNRESUMED, BasicBlock::new(0)));
// Panic when resumed on the returned or poisoned state
- let generator_kind = body.generator_kind.unwrap();
+ let generator_kind = body.generator_kind().unwrap();
if can_unwind {
cases.insert(
@@ -1236,14 +1236,14 @@
impl<'tcx> MirPass<'tcx> for StateTransform {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
- let yield_ty = if let Some(yield_ty) = body.yield_ty {
+ let yield_ty = if let Some(yield_ty) = body.yield_ty() {
yield_ty
} else {
// This only applies to generators
return;
};
- assert!(body.generator_drop.is_none());
+ assert!(body.generator_drop().is_none());
// The first argument is the generator type passed by value
let gen_ty = body.local_decls.raw[1].ty;
@@ -1320,7 +1320,7 @@
// `storage_liveness` tells us which locals have live storage at suspension points
let (remap, layout, storage_liveness) = compute_layout(liveness_info, body);
- let can_return = can_return(tcx, body);
+ let can_return = can_return(tcx, body, tcx.param_env(body.source.def_id()));
// Run the transformation which converts Places from Local to generator struct
// accesses for locals in `remap`.
@@ -1340,10 +1340,11 @@
transform.visit_body(body);
// Update our MIR struct to reflect the changes we've made
- body.yield_ty = None;
body.arg_count = 2; // self, resume arg
body.spread_arg = None;
- body.generator_layout = Some(layout);
+
+ body.generator.as_mut().unwrap().yield_ty = None;
+ body.generator.as_mut().unwrap().generator_layout = Some(layout);
// Insert `drop(generator_struct)` which is used to drop upvars for generators in
// the unresumed state.
@@ -1362,7 +1363,7 @@
// Create a copy of our MIR and use it to create the drop shim for the generator
let drop_shim = create_generator_drop_shim(tcx, &transform, gen_ty, body, drop_clean);
- body.generator_drop = Some(box drop_shim);
+ body.generator.as_mut().unwrap().generator_drop = Some(drop_shim);
// Create the Generator::resume function
create_generator_resume_function(tcx, transform, body, can_return);
@@ -1453,6 +1454,7 @@
| StatementKind::Retag(..)
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
+ | StatementKind::CopyNonOverlapping(..)
| StatementKind::Nop => {}
}
}
diff --git a/compiler/rustc_mir/src/transform/inline.rs b/compiler/rustc_mir/src/transform/inline.rs
index 1635a95..12fdbd6 100644
--- a/compiler/rustc_mir/src/transform/inline.rs
+++ b/compiler/rustc_mir/src/transform/inline.rs
@@ -1,6 +1,6 @@
//! Inlining pass for MIR functions
-use rustc_attr as attr;
+use rustc_attr::InlineAttr;
use rustc_hir as hir;
use rustc_index::bit_set::BitSet;
use rustc_index::vec::Idx;
@@ -37,21 +37,18 @@
source_info: SourceInfo,
}
+/// Returns true if MIR inlining is enabled in the current compilation session.
+crate fn is_enabled(tcx: TyCtxt<'_>) -> bool {
+ if let Some(enabled) = tcx.sess.opts.debugging_opts.inline_mir {
+ return enabled;
+ }
+
+ tcx.sess.mir_opt_level() >= 3
+}
+
impl<'tcx> MirPass<'tcx> for Inline {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
- // If you change this optimization level, also change the level in
- // `mir_drops_elaborated_and_const_checked` for the call to `mir_inliner_callees`.
- // Otherwise you will get an ICE about stolen MIR.
- if tcx.sess.opts.debugging_opts.mir_opt_level < 2 {
- return;
- }
-
- if tcx.sess.opts.debugging_opts.instrument_coverage {
- // Since `Inline` happens after `InstrumentCoverage`, the function-specific coverage
- // counters can be invalidated, such as by merging coverage counter statements from
- // a pre-inlined function into a different function. This kind of change is invalid,
- // so inlining must be skipped. Note: This check is performed here so inlining can
- // be disabled without preventing other optimizations (regardless of `mir_opt_level`).
+ if !is_enabled(tcx) {
return;
}
@@ -106,72 +103,90 @@
impl Inliner<'tcx> {
fn process_blocks(&mut self, caller_body: &mut Body<'tcx>, blocks: Range<BasicBlock>) {
for bb in blocks {
- let callsite = match self.get_valid_function_call(bb, &caller_body[bb], caller_body) {
+ let bb_data = &caller_body[bb];
+ if bb_data.is_cleanup {
+ continue;
+ }
+
+ let callsite = match self.resolve_callsite(caller_body, bb, bb_data) {
None => continue,
Some(it) => it,
};
+
let span = trace_span!("process_blocks", %callsite.callee, ?bb);
let _guard = span.enter();
- trace!(
- "checking for self recursion ({:?} vs body_source: {:?})",
- callsite.callee.def_id(),
- caller_body.source.def_id()
- );
- if callsite.callee.def_id() == caller_body.source.def_id() {
- debug!("Not inlining a function into itself");
- continue;
+ match self.try_inlining(caller_body, &callsite) {
+ Err(reason) => {
+ debug!("not-inlined {} [{}]", callsite.callee, reason);
+ continue;
+ }
+ Ok(new_blocks) => {
+ debug!("inlined {}", callsite.callee);
+ self.changed = true;
+ self.history.push(callsite.callee);
+ self.process_blocks(caller_body, new_blocks);
+ self.history.pop();
+ }
}
-
- if !self.is_mir_available(callsite.callee, caller_body) {
- debug!("MIR unavailable {}", callsite.callee);
- continue;
- }
-
- let span = trace_span!("instance_mir", %callsite.callee);
- let instance_mir_guard = span.enter();
- let callee_body = self.tcx.instance_mir(callsite.callee.def);
- drop(instance_mir_guard);
- if !self.should_inline(callsite, callee_body) {
- continue;
- }
-
- if !self.tcx.consider_optimizing(|| {
- format!("Inline {:?} into {}", callee_body.span, callsite.callee)
- }) {
- return;
- }
-
- let callee_body = callsite.callee.subst_mir_and_normalize_erasing_regions(
- self.tcx,
- self.param_env,
- callee_body.clone(),
- );
-
- let old_blocks = caller_body.basic_blocks().next_index();
- self.inline_call(callsite, caller_body, callee_body);
- let new_blocks = old_blocks..caller_body.basic_blocks().next_index();
- self.changed = true;
-
- self.history.push(callsite.callee);
- self.process_blocks(caller_body, new_blocks);
- self.history.pop();
}
}
- #[instrument(skip(self, caller_body))]
- fn is_mir_available(&self, callee: Instance<'tcx>, caller_body: &Body<'tcx>) -> bool {
+ /// Attempts to inline a callsite into the caller body. When successful returns basic blocks
+ /// containing the inlined body. Otherwise returns an error describing why inlining didn't take
+ /// place.
+ fn try_inlining(
+ &self,
+ caller_body: &mut Body<'tcx>,
+ callsite: &CallSite<'tcx>,
+ ) -> Result<std::ops::Range<BasicBlock>, &'static str> {
+ let callee_attrs = self.tcx.codegen_fn_attrs(callsite.callee.def_id());
+ self.check_codegen_attributes(callsite, callee_attrs)?;
+ self.check_mir_is_available(caller_body, &callsite.callee)?;
+ let callee_body = self.tcx.instance_mir(callsite.callee.def);
+ self.check_mir_body(callsite, callee_body, callee_attrs)?;
+
+ if !self.tcx.consider_optimizing(|| {
+ format!("Inline {:?} into {}", callee_body.span, callsite.callee)
+ }) {
+ return Err("optimization fuel exhausted");
+ }
+
+ let callee_body = callsite.callee.subst_mir_and_normalize_erasing_regions(
+ self.tcx,
+ self.param_env,
+ callee_body.clone(),
+ );
+
+ let old_blocks = caller_body.basic_blocks().next_index();
+ self.inline_call(caller_body, &callsite, callee_body);
+ let new_blocks = old_blocks..caller_body.basic_blocks().next_index();
+
+ Ok(new_blocks)
+ }
+
+ fn check_mir_is_available(
+ &self,
+ caller_body: &Body<'tcx>,
+ callee: &Instance<'tcx>,
+ ) -> Result<(), &'static str> {
+ if callee.def_id() == caller_body.source.def_id() {
+ return Err("self-recursion");
+ }
+
match callee.def {
InstanceDef::Item(_) => {
// If there is no MIR available (either because it was not in metadata or
// because it has no MIR because it's an extern function), then the inliner
// won't cause cycles on this.
if !self.tcx.is_mir_available(callee.def_id()) {
- return false;
+ return Err("item MIR unavailable");
}
}
// These have no own callable MIR.
- InstanceDef::Intrinsic(_) | InstanceDef::Virtual(..) => return false,
+ InstanceDef::Intrinsic(_) | InstanceDef::Virtual(..) => {
+ return Err("instance without MIR (intrinsic / virtual)");
+ }
// This cannot result in an immediate cycle since the callee MIR is a shim, which does
// not get any optimizations run on it. Any subsequent inlining may cause cycles, but we
// do not need to catch this here, we can wait until the inliner decides to continue
@@ -181,13 +196,13 @@
| InstanceDef::FnPtrShim(..)
| InstanceDef::ClosureOnceShim { .. }
| InstanceDef::DropGlue(..)
- | InstanceDef::CloneShim(..) => return true,
+ | InstanceDef::CloneShim(..) => return Ok(()),
}
if self.tcx.is_constructor(callee.def_id()) {
trace!("constructors always have MIR");
// Constructor functions cannot cause a query cycle.
- return true;
+ return Ok(());
}
if let Some(callee_def_id) = callee.def_id().as_local() {
@@ -196,39 +211,44 @@
// since their `optimized_mir` is used for layout computation, which can
// create a cycle, even when no attempt is made to inline the function
// in the other direction.
- caller_body.generator_kind.is_none()
- && (
- // Avoid a cycle here by only using `instance_mir` only if we have
- // a lower `HirId` than the callee. This ensures that the callee will
- // not inline us. This trick only works without incremental compilation.
- // So don't do it if that is enabled.
- !self.tcx.dep_graph.is_fully_enabled()
- && self.hir_id < callee_hir_id
- // If we know for sure that the function we're calling will itself try to
- // call us, then we avoid inlining that function.
- || !self.tcx.mir_callgraph_reachable((callee, caller_body.source.def_id().expect_local()))
- )
+ if caller_body.generator.is_some() {
+ return Err("local generator (query cycle avoidance)");
+ }
+
+ // Avoid a cycle here by only using `instance_mir` only if we have
+ // a lower `HirId` than the callee. This ensures that the callee will
+ // not inline us. This trick only works without incremental compilation.
+ // So don't do it if that is enabled.
+ if !self.tcx.dep_graph.is_fully_enabled() && self.hir_id < callee_hir_id {
+ return Ok(());
+ }
+
+ // If we know for sure that the function we're calling will itself try to
+ // call us, then we avoid inlining that function.
+ if self
+ .tcx
+ .mir_callgraph_reachable((*callee, caller_body.source.def_id().expect_local()))
+ {
+ return Err("caller might be reachable from callee (query cycle avoidance)");
+ }
+
+ Ok(())
} else {
// This cannot result in an immediate cycle since the callee MIR is from another crate
// and is already optimized. Any subsequent inlining may cause cycles, but we do
// not need to catch this here, we can wait until the inliner decides to continue
// inlining a second time.
trace!("functions from other crates always have MIR");
- true
+ Ok(())
}
}
- fn get_valid_function_call(
+ fn resolve_callsite(
&self,
+ caller_body: &Body<'tcx>,
bb: BasicBlock,
bb_data: &BasicBlockData<'tcx>,
- caller_body: &Body<'tcx>,
) -> Option<CallSite<'tcx>> {
- // Don't inline calls that are in cleanup blocks.
- if bb_data.is_cleanup {
- return None;
- }
-
// Only consider direct calls to functions
let terminator = bb_data.terminator();
if let TerminatorKind::Call { ref func, ref destination, .. } = terminator.kind {
@@ -258,73 +278,73 @@
None
}
- #[instrument(skip(self, callee_body))]
- fn should_inline(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
- let tcx = self.tcx;
-
- if callsite.fn_sig.c_variadic() {
- debug!("callee is variadic - not inlining");
- return false;
+ /// Returns an error if inlining is not possible based on codegen attributes alone. A success
+ /// indicates that inlining decision should be based on other criteria.
+ fn check_codegen_attributes(
+ &self,
+ callsite: &CallSite<'tcx>,
+ callee_attrs: &CodegenFnAttrs,
+ ) -> Result<(), &'satic str> {
+ if let InlineAttr::Never = callee_attrs.inline {
+ return Err("never inline hint");
}
- let codegen_fn_attrs = tcx.codegen_fn_attrs(callsite.callee.def_id());
-
- let self_features = &self.codegen_fn_attrs.target_features;
- let callee_features = &codegen_fn_attrs.target_features;
- if callee_features.iter().any(|feature| !self_features.contains(feature)) {
- debug!("`callee has extra target features - not inlining");
- return false;
- }
-
- if self.codegen_fn_attrs.no_sanitize != codegen_fn_attrs.no_sanitize {
- debug!("`callee has incompatible no_sanitize attribute - not inlining");
- return false;
- }
-
- if self.codegen_fn_attrs.instruction_set != codegen_fn_attrs.instruction_set {
- debug!("`callee has incompatible instruction set - not inlining");
- return false;
- }
-
- let hinted = match codegen_fn_attrs.inline {
- // Just treat inline(always) as a hint for now,
- // there are cases that prevent inlining that we
- // need to check for first.
- attr::InlineAttr::Always => true,
- attr::InlineAttr::Never => {
- debug!("`#[inline(never)]` present - not inlining");
- return false;
- }
- attr::InlineAttr::Hint => true,
- attr::InlineAttr::None => false,
- };
-
// Only inline local functions if they would be eligible for cross-crate
// inlining. This is to ensure that the final crate doesn't have MIR that
// reference unexported symbols
if callsite.callee.def_id().is_local() {
- if callsite.callee.substs.non_erasable_generics().count() == 0 && !hinted {
- debug!(" callee is an exported function - not inlining");
- return false;
+ let is_generic = callsite.callee.substs.non_erasable_generics().next().is_some();
+ if !is_generic && !callee_attrs.requests_inline() {
+ return Err("not exported");
}
}
- let mut threshold = if hinted {
- self.tcx.sess.opts.debugging_opts.inline_mir_hint_threshold
+ if callsite.fn_sig.c_variadic() {
+ return Err("C variadic");
+ }
+
+ if callee_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+ return Err("naked");
+ }
+
+ if callee_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
+ return Err("cold");
+ }
+
+ if callee_attrs.no_sanitize != self.codegen_fn_attrs.no_sanitize {
+ return Err("incompatible sanitizer set");
+ }
+
+ if callee_attrs.instruction_set != self.codegen_fn_attrs.instruction_set {
+ return Err("incompatible instruction set");
+ }
+
+ for feature in &callee_attrs.target_features {
+ if !self.codegen_fn_attrs.target_features.contains(feature) {
+ return Err("incompatible target feature");
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Returns inlining decision that is based on the examination of callee MIR body.
+ /// Assumes that codegen attributes have been checked for compatibility already.
+ #[instrument(level = "debug", skip(self, callee_body))]
+ fn check_mir_body(
+ &self,
+ callsite: &CallSite<'tcx>,
+ callee_body: &Body<'tcx>,
+ callee_attrs: &CodegenFnAttrs,
+ ) -> Result<(), &'static str> {
+ let tcx = self.tcx;
+
+ let mut threshold = if callee_attrs.requests_inline() {
+ self.tcx.sess.opts.debugging_opts.inline_mir_hint_threshold.unwrap_or(100)
} else {
- self.tcx.sess.opts.debugging_opts.inline_mir_threshold
+ self.tcx.sess.opts.debugging_opts.inline_mir_threshold.unwrap_or(50)
};
- if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
- debug!("#[naked] present - not inlining");
- return false;
- }
-
- if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
- debug!("#[cold] present - not inlining");
- return false;
- }
-
// Give a bonus functions with a small number of blocks,
// We normally have two or three blocks for even
// very small functions.
@@ -387,17 +407,16 @@
TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => {
if let ty::FnDef(def_id, substs) =
- *callsite.callee.subst_mir(self.tcx, &f.literal.ty).kind()
+ *callsite.callee.subst_mir(self.tcx, &f.literal.ty()).kind()
{
let substs = self.tcx.normalize_erasing_regions(self.param_env, substs);
if let Ok(Some(instance)) =
Instance::resolve(self.tcx, self.param_env, def_id, substs)
{
- if callsite.callee.def_id() == instance.def_id()
- || self.history.contains(&instance)
- {
- debug!("`callee is recursive - not inlining");
- return false;
+ if callsite.callee.def_id() == instance.def_id() {
+ return Err("self-recursion");
+ } else if self.history.contains(&instance) {
+ return Err("already inlined");
}
}
// Don't give intrinsics the extra penalty for calls
@@ -450,24 +469,24 @@
}
}
- if let attr::InlineAttr::Always = codegen_fn_attrs.inline {
+ if let InlineAttr::Always = callee_attrs.inline {
debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
- true
+ Ok(())
} else {
if cost <= threshold {
debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
- true
+ Ok(())
} else {
debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
- false
+ Err("cost above threshold")
}
}
}
fn inline_call(
&self,
- callsite: CallSite<'tcx>,
caller_body: &mut Body<'tcx>,
+ callsite: &CallSite<'tcx>,
mut callee_body: Body<'tcx>,
) {
let terminator = caller_body[callsite.block].terminator.take().unwrap();
@@ -609,8 +628,11 @@
// `required_consts`, here we may not only have `ConstKind::Unevaluated`
// because we are calling `subst_and_normalize_erasing_regions`.
caller_body.required_consts.extend(
- callee_body.required_consts.iter().copied().filter(|&constant| {
- matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _))
+ callee_body.required_consts.iter().copied().filter(|&ct| {
+ match ct.literal.const_for_ty() {
+ Some(ct) => matches!(ct.val, ConstKind::Unevaluated(_, _, _)),
+ None => true,
+ }
}),
);
}
diff --git a/compiler/rustc_mir/src/transform/inline/cycle.rs b/compiler/rustc_mir/src/transform/inline/cycle.rs
index e4d403f..295f3ec 100644
--- a/compiler/rustc_mir/src/transform/inline/cycle.rs
+++ b/compiler/rustc_mir/src/transform/inline/cycle.rs
@@ -1,4 +1,5 @@
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::sso::SsoHashSet;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_middle::mir::TerminatorKind;
@@ -7,7 +8,7 @@
// FIXME: check whether it is cheaper to precompute the entire call graph instead of invoking
// this query riddiculously often.
-#[instrument(skip(tcx, root, target))]
+#[instrument(level = "debug", skip(tcx, root, target))]
crate fn mir_callgraph_reachable(
tcx: TyCtxt<'tcx>,
(root, target): (ty::Instance<'tcx>, LocalDefId),
@@ -27,7 +28,10 @@
!tcx.is_constructor(root.def_id()),
"you should not call `mir_callgraph_reachable` on enum/struct constructor functions"
);
- #[instrument(skip(tcx, param_env, target, stack, seen, recursion_limiter, caller))]
+ #[instrument(
+ level = "debug",
+ skip(tcx, param_env, target, stack, seen, recursion_limiter, caller)
+ )]
fn process(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
@@ -137,7 +141,7 @@
// Functions from other crates and MIR shims
_ => tcx.instance_mir(instance),
};
- let mut calls = Vec::new();
+ let mut calls = SsoHashSet::new();
for bb_data in body.basic_blocks() {
let terminator = bb_data.terminator();
if let TerminatorKind::Call { func, .. } = &terminator.kind {
@@ -146,12 +150,8 @@
ty::FnDef(def_id, substs) => (*def_id, *substs),
_ => continue,
};
- // We've seen this before
- if calls.contains(&call) {
- continue;
- }
- calls.push(call);
+ calls.insert(call);
}
}
- tcx.arena.alloc_slice(&calls)
+ tcx.arena.alloc_from_iter(calls.iter().copied())
}
diff --git a/compiler/rustc_mir/src/transform/instcombine.rs b/compiler/rustc_mir/src/transform/instcombine.rs
index 74dadb2..7aaf022 100644
--- a/compiler/rustc_mir/src/transform/instcombine.rs
+++ b/compiler/rustc_mir/src/transform/instcombine.rs
@@ -44,7 +44,7 @@
/// Transform boolean comparisons into logical operations.
fn combine_bool_cmp(&self, source_info: &SourceInfo, rvalue: &mut Rvalue<'tcx>) {
match rvalue {
- Rvalue::BinaryOp(op @ (BinOp::Eq | BinOp::Ne), a, b) => {
+ Rvalue::BinaryOp(op @ (BinOp::Eq | BinOp::Ne), box (a, b)) => {
let new = match (op, self.try_eval_bool(a), self.try_eval_bool(b)) {
// Transform "Eq(a, true)" ==> "a"
(BinOp::Eq, _, Some(true)) => Some(a.clone()),
@@ -79,7 +79,7 @@
fn try_eval_bool(&self, a: &Operand<'_>) -> Option<bool> {
let a = a.constant()?;
- if a.literal.ty.is_bool() { a.literal.val.try_to_bool() } else { None }
+ if a.literal.ty().is_bool() { a.literal.try_to_bool() } else { None }
}
/// Transform "&(*a)" ==> "a".
@@ -110,12 +110,13 @@
fn combine_len(&self, source_info: &SourceInfo, rvalue: &mut Rvalue<'tcx>) {
if let Rvalue::Len(ref place) = *rvalue {
let place_ty = place.ty(self.local_decls, self.tcx).ty;
- if let ty::Array(_, len) = place_ty.kind() {
+ if let ty::Array(_, len) = *place_ty.kind() {
if !self.should_combine(source_info, rvalue) {
return;
}
- let constant = Constant { span: source_info.span, literal: len, user_ty: None };
+ let constant =
+ Constant { span: source_info.span, literal: len.into(), user_ty: None };
*rvalue = Rvalue::Use(Operand::Constant(box constant));
}
}
diff --git a/compiler/rustc_mir/src/transform/lower_intrinsics.rs b/compiler/rustc_mir/src/transform/lower_intrinsics.rs
index f596853..e6ee474 100644
--- a/compiler/rustc_mir/src/transform/lower_intrinsics.rs
+++ b/compiler/rustc_mir/src/transform/lower_intrinsics.rs
@@ -33,13 +33,34 @@
Rvalue::Use(Operand::Constant(box Constant {
span: terminator.source_info.span,
user_ty: None,
- literal: ty::Const::zero_sized(tcx, tcx.types.unit),
+ literal: ty::Const::zero_sized(tcx, tcx.types.unit).into(),
})),
)),
});
terminator.kind = TerminatorKind::Goto { target };
}
}
+ sym::copy_nonoverlapping => {
+ let target = destination.unwrap().1;
+ let mut args = args.drain(..);
+ block.statements.push(Statement {
+ source_info: terminator.source_info,
+ kind: StatementKind::CopyNonOverlapping(
+ box rustc_middle::mir::CopyNonOverlapping {
+ src: args.next().unwrap(),
+ dst: args.next().unwrap(),
+ count: args.next().unwrap(),
+ },
+ ),
+ });
+ assert_eq!(
+ args.next(),
+ None,
+ "Extra argument for copy_non_overlapping intrinsic"
+ );
+ drop(args);
+ terminator.kind = TerminatorKind::Goto { target };
+ }
sym::wrapping_add | sym::wrapping_sub | sym::wrapping_mul => {
if let Some((destination, target)) = *destination {
let lhs;
@@ -59,7 +80,7 @@
source_info: terminator.source_info,
kind: StatementKind::Assign(box (
destination,
- Rvalue::BinaryOp(bin_op, lhs, rhs),
+ Rvalue::BinaryOp(bin_op, box (lhs, rhs)),
)),
});
terminator.kind = TerminatorKind::Goto { target };
diff --git a/compiler/rustc_mir/src/transform/match_branches.rs b/compiler/rustc_mir/src/transform/match_branches.rs
index 53eeecc..d04a701 100644
--- a/compiler/rustc_mir/src/transform/match_branches.rs
+++ b/compiler/rustc_mir/src/transform/match_branches.rs
@@ -2,6 +2,8 @@
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
+use super::simplify::simplify_cfg;
+
pub struct MatchBranchSimplification;
/// If a source block is found that switches between two blocks that are exactly
@@ -38,13 +40,15 @@
impl<'tcx> MirPass<'tcx> for MatchBranchSimplification {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
- if tcx.sess.opts.debugging_opts.mir_opt_level <= 1 {
+ if tcx.sess.mir_opt_level() < 3 {
return;
}
- let param_env = tcx.param_env(body.source.def_id());
let def_id = body.source.def_id();
+ let param_env = tcx.param_env(def_id);
+
let (bbs, local_decls) = body.basic_blocks_and_local_decls_mut();
+ let mut should_cleanup = false;
'outer: for bb_idx in bbs.indices() {
if !tcx.consider_optimizing(|| format!("MatchBranchSimplification {:?} ", def_id)) {
continue;
@@ -89,8 +93,8 @@
StatementKind::Assign(box (lhs_f, Rvalue::Use(Operand::Constant(f_c)))),
StatementKind::Assign(box (lhs_s, Rvalue::Use(Operand::Constant(s_c)))),
) if lhs_f == lhs_s
- && f_c.literal.ty.is_bool()
- && s_c.literal.ty.is_bool()
+ && f_c.literal.ty().is_bool()
+ && s_c.literal.ty().is_bool()
&& f_c.literal.try_eval_bool(tcx, param_env).is_some()
&& s_c.literal.try_eval_bool(tcx, param_env).is_some() => {}
@@ -135,8 +139,7 @@
let op = if f_b { BinOp::Eq } else { BinOp::Ne };
let rhs = Rvalue::BinaryOp(
op,
- Operand::Copy(Place::from(discr_local)),
- const_cmp,
+ box (Operand::Copy(Place::from(discr_local)), const_cmp),
);
Statement {
source_info: f.source_info,
@@ -159,6 +162,11 @@
from.statements
.push(Statement { source_info, kind: StatementKind::StorageDead(discr_local) });
from.terminator_mut().kind = first.terminator().kind.clone();
+ should_cleanup = true;
+ }
+
+ if should_cleanup {
+ simplify_cfg(body);
}
}
}
diff --git a/compiler/rustc_mir/src/transform/mod.rs b/compiler/rustc_mir/src/transform/mod.rs
index 2786127..1354644 100644
--- a/compiler/rustc_mir/src/transform/mod.rs
+++ b/compiler/rustc_mir/src/transform/mod.rs
@@ -22,9 +22,11 @@
pub mod check_unsafety;
pub mod cleanup_post_borrowck;
pub mod const_debuginfo;
+pub mod const_goto;
pub mod const_prop;
pub mod coverage;
pub mod deaggregator;
+pub mod deduplicate_blocks;
pub mod dest_prop;
pub mod dump_mir;
pub mod early_otherwise_branch;
@@ -40,6 +42,7 @@
pub mod nrvo;
pub mod promote_consts;
pub mod remove_noop_landing_pads;
+pub mod remove_storage_markers;
pub mod remove_unneeded_drops;
pub mod required_consts;
pub mod rustc_peek;
@@ -427,8 +430,7 @@
let def = ty::WithOptConstParam::unknown(did);
// Do not compute the mir call graph without said call graph actually being used.
- // Keep this in sync with the mir inliner's optimization level.
- if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
+ if inline::is_enabled(tcx) {
let _ = tcx.mir_inliner_callees(ty::InstanceDef::Item(def));
}
}
@@ -473,7 +475,7 @@
}
fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
- let mir_opt_level = tcx.sess.opts.debugging_opts.mir_opt_level;
+ let mir_opt_level = tcx.sess.mir_opt_level();
// Lowering generator control-flow and variables has to happen before we do anything else
// to them. We run some optimizations before that, because they may be harder to do on the state
@@ -491,6 +493,8 @@
// The main optimizations that we do on MIR.
let optimizations: &[&dyn MirPass<'tcx>] = &[
+ &remove_storage_markers::RemoveStorageMarkers,
+ &const_goto::ConstGoto,
&remove_unneeded_drops::RemoveUnneededDrops,
&match_branches::MatchBranchSimplification,
// inst combine is after MatchBranchSimplification to clean up Ne(_1, false)
@@ -510,6 +514,7 @@
&const_debuginfo::ConstDebugInfo,
&simplify::SimplifyLocals,
&multiple_return_terminators::MultipleReturnTerminators,
+ &deduplicate_blocks::DeduplicateBlocks,
];
// Optimizations to run even if mir optimizations have been disabled.
diff --git a/compiler/rustc_mir/src/transform/multiple_return_terminators.rs b/compiler/rustc_mir/src/transform/multiple_return_terminators.rs
index 6170866..4aaa0ba 100644
--- a/compiler/rustc_mir/src/transform/multiple_return_terminators.rs
+++ b/compiler/rustc_mir/src/transform/multiple_return_terminators.rs
@@ -10,7 +10,7 @@
impl<'tcx> MirPass<'tcx> for MultipleReturnTerminators {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
- if tcx.sess.opts.debugging_opts.mir_opt_level < 3 {
+ if tcx.sess.mir_opt_level() < 4 {
return;
}
diff --git a/compiler/rustc_mir/src/transform/nrvo.rs b/compiler/rustc_mir/src/transform/nrvo.rs
index ce02fb2..445dc12 100644
--- a/compiler/rustc_mir/src/transform/nrvo.rs
+++ b/compiler/rustc_mir/src/transform/nrvo.rs
@@ -34,7 +34,7 @@
impl<'tcx> MirPass<'tcx> for RenameReturnPlace {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut mir::Body<'tcx>) {
- if tcx.sess.opts.debugging_opts.mir_opt_level == 0 {
+ if tcx.sess.mir_opt_level() == 0 {
return;
}
diff --git a/compiler/rustc_mir/src/transform/promote_consts.rs b/compiler/rustc_mir/src/transform/promote_consts.rs
index b4504a0..7db790c 100644
--- a/compiler/rustc_mir/src/transform/promote_consts.rs
+++ b/compiler/rustc_mir/src/transform/promote_consts.rs
@@ -643,7 +643,7 @@
self.validate_operand(operand)?;
}
- Rvalue::BinaryOp(op, lhs, rhs) | Rvalue::CheckedBinaryOp(op, lhs, rhs) => {
+ Rvalue::BinaryOp(op, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(op, box (lhs, rhs)) => {
let op = *op;
let lhs_ty = lhs.ty(self.body, self.tcx);
@@ -921,7 +921,7 @@
let unit = Rvalue::Use(Operand::Constant(box Constant {
span: statement.source_info.span,
user_ty: None,
- literal: ty::Const::zero_sized(self.tcx, self.tcx.types.unit),
+ literal: ty::Const::zero_sized(self.tcx, self.tcx.types.unit).into(),
}));
mem::replace(rhs, unit)
},
@@ -998,20 +998,22 @@
Operand::Constant(Box::new(Constant {
span,
user_ty: None,
- literal: tcx.mk_const(ty::Const {
- ty,
- val: ty::ConstKind::Unevaluated(
- def,
- InternalSubsts::for_item(tcx, def.did, |param, _| {
- if let ty::GenericParamDefKind::Lifetime = param.kind {
- tcx.lifetimes.re_erased.into()
- } else {
- tcx.mk_param_from_def(param)
- }
- }),
- Some(promoted_id),
- ),
- }),
+ literal: tcx
+ .mk_const(ty::Const {
+ ty,
+ val: ty::ConstKind::Unevaluated(
+ def,
+ InternalSubsts::for_item(tcx, def.did, |param, _| {
+ if let ty::GenericParamDefKind::Lifetime = param.kind {
+ tcx.lifetimes.re_erased.into()
+ } else {
+ tcx.mk_param_from_def(param)
+ }
+ }),
+ Some(promoted_id),
+ ),
+ })
+ .into(),
}))
};
let (blocks, local_decls) = self.source.basic_blocks_and_local_decls_mut();
@@ -1177,7 +1179,7 @@
0,
vec![],
body.span,
- body.generator_kind,
+ body.generator_kind(),
);
let promoter = Promoter {
@@ -1231,3 +1233,38 @@
promotions
}
+
+/// This function returns `true` if the function being called in the array
+/// repeat expression is a `const` function.
+crate fn is_const_fn_in_array_repeat_expression<'tcx>(
+ ccx: &ConstCx<'_, 'tcx>,
+ place: &Place<'tcx>,
+ body: &Body<'tcx>,
+) -> bool {
+ match place.as_local() {
+ // rule out cases such as: `let my_var = some_fn(); [my_var; N]`
+ Some(local) if body.local_decls[local].is_user_variable() => return false,
+ None => return false,
+ _ => {}
+ }
+
+ for block in body.basic_blocks() {
+ if let Some(Terminator { kind: TerminatorKind::Call { func, destination, .. }, .. }) =
+ &block.terminator
+ {
+ if let Operand::Constant(box Constant { literal, .. }) = func {
+ if let ty::FnDef(def_id, _) = *literal.ty().kind() {
+ if let Some((destination_place, _)) = destination {
+ if destination_place == place {
+ if is_const_fn(ccx.tcx, def_id) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ false
+}
diff --git a/compiler/rustc_mir/src/transform/remove_noop_landing_pads.rs b/compiler/rustc_mir/src/transform/remove_noop_landing_pads.rs
index 31e201c..5347846 100644
--- a/compiler/rustc_mir/src/transform/remove_noop_landing_pads.rs
+++ b/compiler/rustc_mir/src/transform/remove_noop_landing_pads.rs
@@ -55,6 +55,7 @@
StatementKind::Assign { .. }
| StatementKind::SetDiscriminant { .. }
| StatementKind::LlvmInlineAsm { .. }
+ | StatementKind::CopyNonOverlapping(..)
| StatementKind::Retag { .. } => {
return false;
}
diff --git a/compiler/rustc_mir/src/transform/remove_storage_markers.rs b/compiler/rustc_mir/src/transform/remove_storage_markers.rs
new file mode 100644
index 0000000..2d529fe
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/remove_storage_markers.rs
@@ -0,0 +1,25 @@
+//! This pass removes storage markers if they won't be emitted during codegen.
+
+use crate::transform::MirPass;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+pub struct RemoveStorageMarkers;
+
+impl<'tcx> MirPass<'tcx> for RemoveStorageMarkers {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ if tcx.sess.emit_lifetime_markers() {
+ return;
+ }
+
+ trace!("Running RemoveStorageMarkers on {:?}", body.source);
+ for data in body.basic_blocks_mut() {
+ data.statements.retain(|statement| match statement.kind {
+ StatementKind::StorageLive(..)
+ | StatementKind::StorageDead(..)
+ | StatementKind::Nop => false,
+ _ => true,
+ })
+ }
+ }
+}
diff --git a/compiler/rustc_mir/src/transform/required_consts.rs b/compiler/rustc_mir/src/transform/required_consts.rs
index a63ab30..2b518bd 100644
--- a/compiler/rustc_mir/src/transform/required_consts.rs
+++ b/compiler/rustc_mir/src/transform/required_consts.rs
@@ -14,10 +14,10 @@
impl<'a, 'tcx> Visitor<'tcx> for RequiredConstsVisitor<'a, 'tcx> {
fn visit_constant(&mut self, constant: &Constant<'tcx>, _: Location) {
- let const_kind = constant.literal.val;
-
- if let ConstKind::Unevaluated(_, _, _) = const_kind {
- self.required_consts.push(*constant);
+ if let Some(ct) = constant.literal.const_for_ty() {
+ if let ConstKind::Unevaluated(_, _, _) = ct.val {
+ self.required_consts.push(*constant);
+ }
}
}
}
diff --git a/compiler/rustc_mir/src/transform/rustc_peek.rs b/compiler/rustc_mir/src/transform/rustc_peek.rs
index 7598be4..a6b8f20 100644
--- a/compiler/rustc_mir/src/transform/rustc_peek.rs
+++ b/compiler/rustc_mir/src/transform/rustc_peek.rs
@@ -205,7 +205,7 @@
if let mir::TerminatorKind::Call { func: Operand::Constant(func), args, .. } =
&terminator.kind
{
- if let ty::FnDef(def_id, substs) = *func.literal.ty.kind() {
+ if let ty::FnDef(def_id, substs) = *func.literal.ty().kind() {
let sig = tcx.fn_sig(def_id);
let name = tcx.item_name(def_id);
if sig.abi() != Abi::RustIntrinsic || name != sym::rustc_peek {
diff --git a/compiler/rustc_mir/src/transform/simplify.rs b/compiler/rustc_mir/src/transform/simplify.rs
index 11539d3..d2314a9 100644
--- a/compiler/rustc_mir/src/transform/simplify.rs
+++ b/compiler/rustc_mir/src/transform/simplify.rs
@@ -31,10 +31,10 @@
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
+use rustc_middle::ty::ParamEnv;
use rustc_middle::ty::TyCtxt;
use smallvec::SmallVec;
-use std::borrow::Cow;
-use std::convert::TryInto;
+use std::{borrow::Cow, convert::TryInto};
pub struct SimplifyCfg {
label: String,
@@ -320,80 +320,91 @@
impl<'tcx> MirPass<'tcx> for SimplifyLocals {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
trace!("running SimplifyLocals on {:?}", body.source);
+ simplify_locals(body, tcx);
+ }
+}
- // First, we're going to get a count of *actual* uses for every `Local`.
- let mut used_locals = UsedLocals::new(body);
+pub fn simplify_locals<'tcx>(body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>) {
+ // First, we're going to get a count of *actual* uses for every `Local`.
+ let mut used_locals = UsedLocals::new(body, tcx);
- // Next, we're going to remove any `Local` with zero actual uses. When we remove those
- // `Locals`, we're also going to subtract any uses of other `Locals` from the `used_locals`
- // count. For example, if we removed `_2 = discriminant(_1)`, then we'll subtract one from
- // `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a
- // fixedpoint where there are no more unused locals.
- remove_unused_definitions(&mut used_locals, body);
+ // Next, we're going to remove any `Local` with zero actual uses. When we remove those
+ // `Locals`, we're also going to subtract any uses of other `Locals` from the `used_locals`
+ // count. For example, if we removed `_2 = discriminant(_1)`, then we'll subtract one from
+ // `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a
+ // fixedpoint where there are no more unused locals.
+ remove_unused_definitions(&mut used_locals, body);
- // Finally, we'll actually do the work of shrinking `body.local_decls` and remapping the `Local`s.
- let map = make_local_map(&mut body.local_decls, &used_locals);
+ // Finally, we'll actually do the work of shrinking `body.local_decls` and remapping the `Local`s.
+ let arg_count = body.arg_count.try_into().unwrap();
+ let map = make_local_map(&mut body.local_decls, &used_locals, arg_count);
- // Only bother running the `LocalUpdater` if we actually found locals to remove.
- if map.iter().any(Option::is_none) {
- // Update references to all vars and tmps now
- let mut updater = LocalUpdater { map, tcx };
- updater.visit_body(body);
+ // Only bother running the `LocalUpdater` if we actually found locals to remove.
+ if map.iter().any(Option::is_none) {
+ // Update references to all vars and tmps now
+ let mut updater = LocalUpdater { map, tcx };
+ updater.visit_body(body);
- body.local_decls.shrink_to_fit();
- }
+ body.local_decls.shrink_to_fit();
}
}
/// Construct the mapping while swapping out unused stuff out from the `vec`.
-fn make_local_map<V>(
+fn make_local_map<'tcx, V>(
local_decls: &mut IndexVec<Local, V>,
- used_locals: &UsedLocals,
+ used_locals: &UsedLocals<'tcx>,
+ arg_count: u32,
) -> IndexVec<Local, Option<Local>> {
- let mut map: IndexVec<Local, Option<Local>> = IndexVec::from_elem(None, &*local_decls);
+ let mut map: IndexVec<Local, Option<Local>> = IndexVec::from_elem(None, local_decls);
let mut used = Local::new(0);
for alive_index in local_decls.indices() {
- // `is_used` treats the `RETURN_PLACE` and arguments as used.
- if !used_locals.is_used(alive_index) {
- continue;
+ // When creating the local map treat the `RETURN_PLACE` and arguments as used.
+ if alive_index.as_u32() <= arg_count || used_locals.is_used(alive_index) {
+ map[alive_index] = Some(used);
+ if alive_index != used {
+ local_decls.swap(alive_index, used);
+ }
+ used.increment_by(1);
}
-
- map[alive_index] = Some(used);
- if alive_index != used {
- local_decls.swap(alive_index, used);
- }
- used.increment_by(1);
}
local_decls.truncate(used.index());
map
}
/// Keeps track of used & unused locals.
-struct UsedLocals {
+struct UsedLocals<'tcx> {
increment: bool,
- arg_count: u32,
use_count: IndexVec<Local, u32>,
+ is_static: bool,
+ local_decls: IndexVec<Local, LocalDecl<'tcx>>,
+ param_env: ParamEnv<'tcx>,
+ tcx: TyCtxt<'tcx>,
}
-impl UsedLocals {
+impl UsedLocals<'tcx> {
/// Determines which locals are used & unused in the given body.
- fn new(body: &Body<'_>) -> Self {
+ fn new(body: &Body<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
+ let def_id = body.source.def_id();
+ let is_static = tcx.is_static(def_id);
+ let param_env = tcx.param_env(def_id);
+ let local_decls = body.local_decls.clone();
let mut this = Self {
increment: true,
- arg_count: body.arg_count.try_into().unwrap(),
use_count: IndexVec::from_elem(0, &body.local_decls),
+ is_static,
+ local_decls,
+ param_env,
+ tcx,
};
this.visit_body(body);
this
}
/// Checks if local is used.
- ///
- /// Return place and arguments are always considered used.
fn is_used(&self, local: Local) -> bool {
trace!("is_used({:?}): use_count: {:?}", local, self.use_count[local]);
- local.as_u32() <= self.arg_count || self.use_count[local] != 0
+ self.use_count[local] != 0
}
/// Updates the use counts to reflect the removal of given statement.
@@ -413,8 +424,7 @@
} else {
// A definition. Although, it still might use other locals for indexing.
self.super_projection(
- place.local,
- &place.projection,
+ place.as_ref(),
PlaceContext::MutatingUse(MutatingUseContext::Projection),
location,
);
@@ -422,10 +432,11 @@
}
}
-impl Visitor<'_> for UsedLocals {
+impl Visitor<'tcx> for UsedLocals<'tcx> {
fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
match statement.kind {
StatementKind::LlvmInlineAsm(..)
+ | StatementKind::CopyNonOverlapping(..)
| StatementKind::Retag(..)
| StatementKind::Coverage(..)
| StatementKind::FakeRead(..)
@@ -448,7 +459,21 @@
}
}
- fn visit_local(&mut self, local: &Local, _ctx: PlaceContext, _location: Location) {
+ fn visit_local(&mut self, local: &Local, ctx: PlaceContext, _location: Location) {
+ debug!("local: {:?} is_static: {:?}, ctx: {:?}", local, self.is_static, ctx);
+ // Do not count _0 as a used in `return;` if it is a ZST.
+ let return_place = *local == RETURN_PLACE
+ && matches!(ctx, PlaceContext::NonMutatingUse(visit::NonMutatingUseContext::Move));
+ if !self.is_static && return_place {
+ let ty = self.local_decls[*local].ty;
+ let param_env_and = self.param_env.and(ty);
+ if let Ok(layout) = self.tcx.layout_of(param_env_and) {
+ debug!("layout.is_zst: {:?}", layout.is_zst());
+ if layout.is_zst() {
+ return;
+ }
+ }
+ }
if self.increment {
self.use_count[*local] += 1;
} else {
@@ -459,7 +484,10 @@
}
/// Removes unused definitions. Updates the used locals to reflect the changes made.
-fn remove_unused_definitions<'a, 'tcx>(used_locals: &'a mut UsedLocals, body: &mut Body<'tcx>) {
+fn remove_unused_definitions<'a, 'tcx>(
+ used_locals: &'a mut UsedLocals<'tcx>,
+ body: &mut Body<'tcx>,
+) {
// The use counts are updated as we remove the statements. A local might become unused
// during the retain operation, leading to a temporary inconsistency (storage statements or
// definitions referencing the local might remain). For correctness it is crucial that this
diff --git a/compiler/rustc_mir/src/transform/simplify_comparison_integral.rs b/compiler/rustc_mir/src/transform/simplify_comparison_integral.rs
index ea56080..9f473f3 100644
--- a/compiler/rustc_mir/src/transform/simplify_comparison_integral.rs
+++ b/compiler/rustc_mir/src/transform/simplify_comparison_integral.rs
@@ -80,14 +80,14 @@
// we convert the move in the comparison statement to a copy.
// unwrap is safe as we know this statement is an assign
- let box (_, rhs) = bb.statements[opt.bin_op_stmt_idx].kind.as_assign_mut().unwrap();
+ let (_, rhs) = bb.statements[opt.bin_op_stmt_idx].kind.as_assign_mut().unwrap();
use Operand::*;
match rhs {
- Rvalue::BinaryOp(_, ref mut left @ Move(_), Constant(_)) => {
+ Rvalue::BinaryOp(_, box (ref mut left @ Move(_), Constant(_))) => {
*left = Copy(opt.to_switch_on);
}
- Rvalue::BinaryOp(_, Constant(_), ref mut right @ Move(_)) => {
+ Rvalue::BinaryOp(_, box (Constant(_), ref mut right @ Move(_))) => {
*right = Copy(opt.to_switch_on);
}
_ => (),
@@ -166,7 +166,10 @@
if *lhs == place_switched_on =>
{
match rhs {
- Rvalue::BinaryOp(op @ (BinOp::Eq | BinOp::Ne), left, right) => {
+ Rvalue::BinaryOp(
+ op @ (BinOp::Eq | BinOp::Ne),
+ box (left, right),
+ ) => {
let (branch_value_scalar, branch_value_ty, to_switch_on) =
find_branch_value_info(left, right)?;
@@ -202,12 +205,12 @@
match (left, right) {
(Constant(branch_value), Copy(to_switch_on) | Move(to_switch_on))
| (Copy(to_switch_on) | Move(to_switch_on), Constant(branch_value)) => {
- let branch_value_ty = branch_value.literal.ty;
+ let branch_value_ty = branch_value.literal.ty();
// we only want to apply this optimization if we are matching on integrals (and chars), as it is not possible to switch on floats
if !branch_value_ty.is_integral() && !branch_value_ty.is_char() {
return None;
};
- let branch_value_scalar = branch_value.literal.val.try_to_scalar()?;
+ let branch_value_scalar = branch_value.literal.try_to_scalar()?;
Some((branch_value_scalar, branch_value_ty, *to_switch_on))
}
_ => None,
diff --git a/compiler/rustc_mir/src/transform/simplify_try.rs b/compiler/rustc_mir/src/transform/simplify_try.rs
index 05a8882..b42543c 100644
--- a/compiler/rustc_mir/src/transform/simplify_try.rs
+++ b/compiler/rustc_mir/src/transform/simplify_try.rs
@@ -696,8 +696,8 @@
/// _0 = move _1; // bb2
/// ```
/// In this case the two statements are equal iff
- /// 1: _0 is an enum where the variant index 0 is fieldless, and
- /// 2: bb1 was targeted by a switch where the discriminant of _1 was switched on
+ /// - `_0` is an enum where the variant index 0 is fieldless, and
+ /// - bb1 was targeted by a switch where the discriminant of `_1` was switched on
fn statement_equality(
&self,
adt_matched_on: Place<'tcx>,
diff --git a/compiler/rustc_mir/src/transform/unreachable_prop.rs b/compiler/rustc_mir/src/transform/unreachable_prop.rs
index e39c865..658c6b6 100644
--- a/compiler/rustc_mir/src/transform/unreachable_prop.rs
+++ b/compiler/rustc_mir/src/transform/unreachable_prop.rs
@@ -12,8 +12,8 @@
impl MirPass<'_> for UnreachablePropagation {
fn run_pass<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
- if tcx.sess.opts.debugging_opts.mir_opt_level < 3 {
- // Enable only under -Zmir-opt-level=3 as in some cases (check the deeply-nested-opt
+ if tcx.sess.mir_opt_level() < 4 {
+ // Enable only under -Zmir-opt-level=4 as in some cases (check the deeply-nested-opt
// perf benchmark) LLVM may spend quite a lot of time optimizing the generated code.
return;
}
diff --git a/compiler/rustc_mir/src/transform/validate.rs b/compiler/rustc_mir/src/transform/validate.rs
index 29b90bf..d009b0b 100644
--- a/compiler/rustc_mir/src/transform/validate.rs
+++ b/compiler/rustc_mir/src/transform/validate.rs
@@ -294,7 +294,49 @@
);
}
}
- _ => {}
+ StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ let src_ty = src.ty(&self.body.local_decls, self.tcx);
+ let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
+ src_deref.ty
+ } else {
+ self.fail(
+ location,
+ format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
+ );
+ return;
+ };
+ let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
+ let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
+ dst_deref.ty
+ } else {
+ self.fail(
+ location,
+ format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
+ );
+ return;
+ };
+ // since CopyNonOverlapping is parametrized by 1 type,
+ // we only need to check that they are equal and not keep an extra parameter.
+ if op_src_ty != op_dst_ty {
+ self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
+ }
+
+ let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
+ if op_cnt_ty != self.tcx.types.usize {
+ self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
+ }
+ }
+ StatementKind::SetDiscriminant { .. }
+ | StatementKind::StorageLive(..)
+ | StatementKind::StorageDead(..)
+ | StatementKind::LlvmInlineAsm(..)
+ | StatementKind::Retag(_, _)
+ | StatementKind::Coverage(_)
+ | StatementKind::Nop => {}
}
self.super_statement(statement, location);
diff --git a/compiler/rustc_mir/src/util/elaborate_drops.rs b/compiler/rustc_mir/src/util/elaborate_drops.rs
index 0e2d8e5..e9190d7 100644
--- a/compiler/rustc_mir/src/util/elaborate_drops.rs
+++ b/compiler/rustc_mir/src/util/elaborate_drops.rs
@@ -678,11 +678,14 @@
let one = self.constant_usize(1);
let (ptr_next, cur_next) = if ptr_based {
- (Rvalue::Use(copy(cur.into())), Rvalue::BinaryOp(BinOp::Offset, move_(cur.into()), one))
+ (
+ Rvalue::Use(copy(cur.into())),
+ Rvalue::BinaryOp(BinOp::Offset, box (move_(cur.into()), one)),
+ )
} else {
(
Rvalue::AddressOf(Mutability::Mut, tcx.mk_place_index(self.place, cur)),
- Rvalue::BinaryOp(BinOp::Add, move_(cur.into()), one),
+ Rvalue::BinaryOp(BinOp::Add, box (move_(cur.into()), one)),
)
};
@@ -700,7 +703,7 @@
let loop_block = BasicBlockData {
statements: vec![self.assign(
can_go,
- Rvalue::BinaryOp(BinOp::Eq, copy(Place::from(cur)), copy(length_or_end)),
+ Rvalue::BinaryOp(BinOp::Eq, box (copy(Place::from(cur)), copy(length_or_end))),
)],
is_cleanup: unwind.is_cleanup(),
terminator: Some(Terminator {
@@ -816,7 +819,10 @@
self.assign(cur, Rvalue::Cast(CastKind::Misc, Operand::Move(tmp), iter_ty)),
self.assign(
length_or_end,
- Rvalue::BinaryOp(BinOp::Offset, Operand::Copy(cur), Operand::Move(length)),
+ Rvalue::BinaryOp(
+ BinOp::Offset,
+ box (Operand::Copy(cur), Operand::Move(length)),
+ ),
),
]
} else {
@@ -1029,7 +1035,7 @@
Operand::Constant(box Constant {
span: self.source_info.span,
user_ty: None,
- literal: ty::Const::from_usize(self.tcx(), val.into()),
+ literal: ty::Const::from_usize(self.tcx(), val.into()).into(),
})
}
diff --git a/compiler/rustc_mir/src/util/find_self_call.rs b/compiler/rustc_mir/src/util/find_self_call.rs
index 5b146ee..ddda98d 100644
--- a/compiler/rustc_mir/src/util/find_self_call.rs
+++ b/compiler/rustc_mir/src/util/find_self_call.rs
@@ -17,8 +17,8 @@
&body[block].terminator
{
debug!("find_self_call: func={:?}", func);
- if let Operand::Constant(box Constant { literal: ty::Const { ty, .. }, .. }) = func {
- if let ty::FnDef(def_id, substs) = *ty.kind() {
+ if let Operand::Constant(box Constant { literal, .. }) = func {
+ if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
if let Some(ty::AssocItem { fn_has_self_parameter: true, .. }) =
tcx.opt_associated_item(def_id)
{
diff --git a/compiler/rustc_mir/src/util/generic_graphviz.rs b/compiler/rustc_mir/src/util/generic_graphviz.rs
index fd55a4d..fd41e28 100644
--- a/compiler/rustc_mir/src/util/generic_graphviz.rs
+++ b/compiler/rustc_mir/src/util/generic_graphviz.rs
@@ -6,8 +6,8 @@
pub struct GraphvizWriter<
'a,
G: graph::DirectedGraph + graph::WithSuccessors + graph::WithStartNode + graph::WithNumNodes,
- NodeContentFn: Fn(<G as rustc_data_structures::graph::DirectedGraph>::Node) -> Vec<String>,
- EdgeLabelsFn: Fn(<G as rustc_data_structures::graph::DirectedGraph>::Node) -> Vec<String>,
+ NodeContentFn: Fn(<G as graph::DirectedGraph>::Node) -> Vec<String>,
+ EdgeLabelsFn: Fn(<G as graph::DirectedGraph>::Node) -> Vec<String>,
> {
graph: &'a G,
is_subgraph: bool,
@@ -20,8 +20,8 @@
impl<
'a,
G: graph::DirectedGraph + graph::WithSuccessors + graph::WithStartNode + graph::WithNumNodes,
- NodeContentFn: Fn(<G as rustc_data_structures::graph::DirectedGraph>::Node) -> Vec<String>,
- EdgeLabelsFn: Fn(<G as rustc_data_structures::graph::DirectedGraph>::Node) -> Vec<String>,
+ NodeContentFn: Fn(<G as graph::DirectedGraph>::Node) -> Vec<String>,
+ EdgeLabelsFn: Fn(<G as graph::DirectedGraph>::Node) -> Vec<String>,
> GraphvizWriter<'a, G, NodeContentFn, EdgeLabelsFn>
{
pub fn new(
diff --git a/compiler/rustc_mir/src/util/graphviz.rs b/compiler/rustc_mir/src/util/graphviz.rs
index 37498e5..92c7a35 100644
--- a/compiler/rustc_mir/src/util/graphviz.rs
+++ b/compiler/rustc_mir/src/util/graphviz.rs
@@ -2,7 +2,7 @@
use rustc_graphviz as dot;
use rustc_hir::def_id::DefId;
use rustc_middle::mir::*;
-use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::{self, TyCtxt};
use std::fmt::Debug;
use std::io::{self, Write};
@@ -16,14 +16,27 @@
{
let def_ids = dump_mir_def_ids(tcx, single);
- let use_subgraphs = def_ids.len() > 1;
+ let mirs =
+ def_ids
+ .iter()
+ .flat_map(|def_id| {
+ if tcx.is_const_fn_raw(*def_id) {
+ vec![tcx.optimized_mir(*def_id), tcx.mir_for_ctfe(*def_id)]
+ } else {
+ vec![tcx.instance_mir(ty::InstanceDef::Item(ty::WithOptConstParam::unknown(
+ *def_id,
+ )))]
+ }
+ })
+ .collect::<Vec<_>>();
+
+ let use_subgraphs = mirs.len() > 1;
if use_subgraphs {
writeln!(w, "digraph __crate__ {{")?;
}
- for def_id in def_ids {
- let body = &tcx.optimized_mir(def_id);
- write_mir_fn_graphviz(tcx, body, use_subgraphs, w)?;
+ for mir in mirs {
+ write_mir_fn_graphviz(tcx, mir, use_subgraphs, w)?;
}
if use_subgraphs {
diff --git a/compiler/rustc_mir/src/util/pretty.rs b/compiler/rustc_mir/src/util/pretty.rs
index 7fc1c3a..1bf010f 100644
--- a/compiler/rustc_mir/src/util/pretty.rs
+++ b/compiler/rustc_mir/src/util/pretty.rs
@@ -131,7 +131,7 @@
Some(promoted) => write!(file, "::{:?}`", promoted)?,
}
writeln!(file, " {} {}", disambiguator, pass_name)?;
- if let Some(ref layout) = body.generator_layout {
+ if let Some(ref layout) = body.generator_layout() {
writeln!(file, "/* generator_layout = {:#?} */", layout)?;
}
writeln!(file)?;
@@ -289,19 +289,19 @@
}
Ok(())
};
- match tcx.hir().body_const_context(def_id.expect_local()) {
- None => render_body(w, tcx.optimized_mir(def_id))?,
- // For `const fn` we want to render the optimized MIR. If you want the mir used in
- // ctfe, you can dump the MIR after the `Deaggregator` optimization pass.
- Some(rustc_hir::ConstContext::ConstFn) => {
- render_body(w, tcx.optimized_mir(def_id))?;
- writeln!(w)?;
- writeln!(w, "// MIR FOR CTFE")?;
- // Do not use `render_body`, as that would render the promoteds again, but these
- // are shared between mir_for_ctfe and optimized_mir
- write_mir_fn(tcx, tcx.mir_for_ctfe(def_id), &mut |_, _| Ok(()), w)?;
- }
- Some(_) => render_body(w, tcx.mir_for_ctfe(def_id))?,
+
+ // For `const fn` we want to render both the optimized MIR and the MIR for ctfe.
+ if tcx.is_const_fn_raw(def_id) {
+ render_body(w, tcx.optimized_mir(def_id))?;
+ writeln!(w)?;
+ writeln!(w, "// MIR FOR CTFE")?;
+ // Do not use `render_body`, as that would render the promoteds again, but these
+ // are shared between mir_for_ctfe and optimized_mir
+ write_mir_fn(tcx, tcx.mir_for_ctfe(def_id), &mut |_, _| Ok(()), w)?;
+ } else {
+ let instance_mir =
+ tcx.instance_mir(ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)));
+ render_body(w, instance_mir)?;
}
}
Ok(())
@@ -439,7 +439,7 @@
fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
self.super_constant(constant, location);
let Constant { span, user_ty, literal } = constant;
- match literal.ty.kind() {
+ match literal.ty().kind() {
ty::Int(_) | ty::Uint(_) | ty::Bool | ty::Char => {}
// Unit type
ty::Tuple(tys) if tys.is_empty() => {}
@@ -449,7 +449,12 @@
if let Some(user_ty) = user_ty {
self.push(&format!("+ user_ty: {:?}", user_ty));
}
- self.push(&format!("+ literal: {:?}", literal));
+ match literal {
+ ConstantKind::Ty(literal) => self.push(&format!("+ literal: {:?}", literal)),
+ ConstantKind::Val(val, ty) => {
+ self.push(&format!("+ literal: {:?}, {}", val, ty))
+ }
+ }
}
}
}
@@ -956,7 +961,7 @@
write!(w, ": {} =", body.return_ty())?;
}
- if let Some(yield_ty) = body.yield_ty {
+ if let Some(yield_ty) = body.yield_ty() {
writeln!(w)?;
writeln!(w, "yields {}", yield_ty)?;
}
diff --git a/compiler/rustc_mir/src/util/spanview.rs b/compiler/rustc_mir/src/util/spanview.rs
index d3ef8c6..a9a30e4 100644
--- a/compiler/rustc_mir/src/util/spanview.rs
+++ b/compiler/rustc_mir/src/util/spanview.rs
@@ -245,6 +245,7 @@
Retag(..) => "Retag",
AscribeUserType(..) => "AscribeUserType",
Coverage(..) => "Coverage",
+ CopyNonOverlapping(..) => "CopyNonOverlapping",
Nop => "Nop",
}
}
diff --git a/compiler/rustc_mir/src/util/storage.rs b/compiler/rustc_mir/src/util/storage.rs
index 4e1696c..18b8ef5 100644
--- a/compiler/rustc_mir/src/util/storage.rs
+++ b/compiler/rustc_mir/src/util/storage.rs
@@ -34,6 +34,7 @@
impl std::ops::Deref for AlwaysLiveLocals {
type Target = BitSet<Local>;
+ #[inline]
fn deref(&self) -> &Self::Target {
&self.0
}
diff --git a/compiler/rustc_mir_build/Cargo.toml b/compiler/rustc_mir_build/Cargo.toml
index 2dd894a..b75221b 100644
--- a/compiler/rustc_mir_build/Cargo.toml
+++ b/compiler/rustc_mir_build/Cargo.toml
@@ -24,4 +24,4 @@
rustc_target = { path = "../rustc_target" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
rustc_ast = { path = "../rustc_ast" }
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_mir_build/src/build/block.rs b/compiler/rustc_mir_build/src/build/block.rs
index d5f72e6..808c6e3 100644
--- a/compiler/rustc_mir_build/src/build/block.rs
+++ b/compiler/rustc_mir_build/src/build/block.rs
@@ -2,7 +2,6 @@
use crate::build::ForGuard::OutsideGuard;
use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder};
use crate::thir::*;
-use rustc_hir as hir;
use rustc_middle::mir::*;
use rustc_session::lint::builtin::UNSAFE_OP_IN_UNSAFE_FN;
use rustc_session::lint::Level;
@@ -13,7 +12,7 @@
&mut self,
destination: Place<'tcx>,
block: BasicBlock,
- ast_block: &'tcx hir::Block<'tcx>,
+ ast_block: &Block<'_, 'tcx>,
source_info: SourceInfo,
) -> BlockAnd<()> {
let Block {
@@ -24,7 +23,7 @@
expr,
targeted_by_break,
safety_mode,
- } = self.hir.mirror(ast_block);
+ } = *ast_block;
self.in_opt_scope(opt_destruction_scope.map(|de| (de, source_info)), move |this| {
this.in_scope((region_scope, source_info), LintLevel::Inherited, move |this| {
if targeted_by_break {
@@ -50,8 +49,8 @@
destination: Place<'tcx>,
mut block: BasicBlock,
span: Span,
- stmts: Vec<StmtRef<'tcx>>,
- expr: Option<ExprRef<'tcx>>,
+ stmts: &[Stmt<'_, 'tcx>],
+ expr: Option<&Expr<'_, 'tcx>>,
safety_mode: BlockSafety,
) -> BlockAnd<()> {
let this = self;
@@ -79,10 +78,9 @@
this.update_source_scope_for_safety_mode(span, safety_mode);
let source_info = this.source_info(span);
- for stmt in stmts {
- let Stmt { kind, opt_destruction_scope } = this.hir.mirror(stmt);
+ for Stmt { kind, opt_destruction_scope } in stmts {
match kind {
- StmtKind::Expr { scope, expr } => {
+ &StmtKind::Expr { scope, expr } => {
this.block_context.push(BlockFrame::Statement { ignores_expr_result: true });
unpack!(
block = this.in_opt_scope(
@@ -90,7 +88,6 @@
|this| {
let si = (scope, source_info);
this.in_scope(si, LintLevel::Inherited, |this| {
- let expr = this.hir.mirror(expr);
this.stmt_expr(block, expr, Some(scope))
})
}
@@ -102,45 +99,44 @@
this.block_context.push(BlockFrame::Statement { ignores_expr_result });
// Enter the remainder scope, i.e., the bindings' destruction scope.
- this.push_scope((remainder_scope, source_info));
+ this.push_scope((*remainder_scope, source_info));
let_scope_stack.push(remainder_scope);
// Declare the bindings, which may create a source scope.
- let remainder_span =
- remainder_scope.span(this.hir.tcx(), &this.hir.region_scope_tree);
+ let remainder_span = remainder_scope.span(this.tcx, this.region_scope_tree);
let visibility_scope =
Some(this.new_source_scope(remainder_span, LintLevel::Inherited, None));
// Evaluate the initializer, if present.
if let Some(init) = initializer {
- let initializer_span = init.span();
+ let initializer_span = init.span;
unpack!(
block = this.in_opt_scope(
opt_destruction_scope.map(|de| (de, source_info)),
|this| {
- let scope = (init_scope, source_info);
- this.in_scope(scope, lint_level, |this| {
+ let scope = (*init_scope, source_info);
+ this.in_scope(scope, *lint_level, |this| {
this.declare_bindings(
visibility_scope,
remainder_span,
- &pattern,
+ pattern,
ArmHasGuard(false),
Some((None, initializer_span)),
);
- this.expr_into_pattern(block, pattern, init)
+ this.expr_into_pattern(block, pattern.clone(), init)
})
}
)
);
} else {
- let scope = (init_scope, source_info);
- unpack!(this.in_scope(scope, lint_level, |this| {
+ let scope = (*init_scope, source_info);
+ unpack!(this.in_scope(scope, *lint_level, |this| {
this.declare_bindings(
visibility_scope,
remainder_span,
- &pattern,
+ pattern,
ArmHasGuard(false),
None,
);
@@ -171,18 +167,15 @@
// Then, the block may have an optional trailing expression which is a “return” value
// of the block, which is stored into `destination`.
- let tcx = this.hir.tcx();
+ let tcx = this.tcx;
let destination_ty = destination.ty(&this.local_decls, tcx).ty;
if let Some(expr) = expr {
let tail_result_is_ignored =
destination_ty.is_unit() || this.block_context.currently_ignores_tail_results();
- let span = match expr {
- ExprRef::Thir(expr) => expr.span,
- ExprRef::Mirror(ref expr) => expr.span,
- };
- this.block_context.push(BlockFrame::TailExpr { tail_result_is_ignored, span });
+ this.block_context
+ .push(BlockFrame::TailExpr { tail_result_is_ignored, span: expr.span });
- unpack!(block = this.into(destination, block, expr));
+ unpack!(block = this.expr_into_dest(destination, block, expr));
let popped = this.block_context.pop();
assert!(popped.map_or(false, |bf| bf.is_tail_expr()));
@@ -194,13 +187,13 @@
if destination_ty.is_unit() {
// We only want to assign an implicit `()` as the return value of the block if the
// block does not diverge. (Otherwise, we may try to assign a unit to a `!`-type.)
- this.cfg.push_assign_unit(block, source_info, destination, this.hir.tcx());
+ this.cfg.push_assign_unit(block, source_info, destination, this.tcx);
}
}
// Finally, we pop all the let scopes before exiting out from the scope of block
// itself.
for scope in let_scope_stack.into_iter().rev() {
- unpack!(block = this.pop_scope((scope, source_info), block));
+ unpack!(block = this.pop_scope((*scope, source_info), block));
}
// Restore the original source scope.
this.source_scope = outer_source_scope;
@@ -220,7 +213,7 @@
Safety::Safe => {}
// no longer treat `unsafe fn`s as `unsafe` contexts (see RFC #2585)
Safety::FnUnsafe
- if self.hir.tcx().lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, hir_id).0
+ if self.tcx.lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, hir_id).0
!= Level::Allow => {}
_ => return,
}
diff --git a/compiler/rustc_mir_build/src/build/cfg.rs b/compiler/rustc_mir_build/src/build/cfg.rs
index 42e2b24..e562e52 100644
--- a/compiler/rustc_mir_build/src/build/cfg.rs
+++ b/compiler/rustc_mir_build/src/build/cfg.rs
@@ -68,7 +68,7 @@
Rvalue::Use(Operand::Constant(box Constant {
span: source_info.span,
user_ty: None,
- literal: ty::Const::zero_sized(tcx, tcx.types.unit),
+ literal: ty::Const::zero_sized(tcx, tcx.types.unit).into(),
})),
);
}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_constant.rs b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
index 3a36ad5..57f56e2 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_constant.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
@@ -8,18 +8,10 @@
impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Compile `expr`, yielding a compile-time constant. Assumes that
/// `expr` is a valid compile-time constant!
- crate fn as_constant<M>(&mut self, expr: M) -> Constant<'tcx>
- where
- M: Mirror<'tcx, Output = Expr<'tcx>>,
- {
- let expr = self.hir.mirror(expr);
- self.expr_as_constant(expr)
- }
-
- fn expr_as_constant(&mut self, expr: Expr<'tcx>) -> Constant<'tcx> {
+ crate fn as_constant(&mut self, expr: &Expr<'_, 'tcx>) -> Constant<'tcx> {
let this = self;
- let Expr { ty, temp_lifetime: _, span, kind } = expr;
- match kind {
+ let Expr { ty, temp_lifetime: _, span, ref kind } = *expr;
+ match *kind {
ExprKind::Scope { region_scope: _, lint_level: _, value } => this.as_constant(value),
ExprKind::Literal { literal, user_ty, const_id: _ } => {
let user_ty = user_ty.map(|user_ty| {
@@ -30,10 +22,14 @@
})
});
assert_eq!(literal.ty, ty);
- Constant { span, user_ty, literal }
+ Constant { span, user_ty, literal: literal.into() }
}
- ExprKind::StaticRef { literal, .. } => Constant { span, user_ty: None, literal },
- ExprKind::ConstBlock { value } => Constant { span, user_ty: None, literal: value },
+ ExprKind::StaticRef { literal, .. } => {
+ Constant { span, user_ty: None, literal: literal.into() }
+ }
+ ExprKind::ConstBlock { value } => {
+ Constant { span: span, user_ty: None, literal: value.into() }
+ }
_ => span_bug!(span, "expression is not a valid constant {:?}", kind),
}
}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_operand.rs b/compiler/rustc_mir_build/src/build/expr/as_operand.rs
index 60f8d8c..c393878 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_operand.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_operand.rs
@@ -14,10 +14,11 @@
/// after the current enclosing `ExprKind::Scope` has ended, so
/// please do *not* return it from functions to avoid bad
/// miscompiles.
- crate fn as_local_operand<M>(&mut self, block: BasicBlock, expr: M) -> BlockAnd<Operand<'tcx>>
- where
- M: Mirror<'tcx, Output = Expr<'tcx>>,
- {
+ crate fn as_local_operand(
+ &mut self,
+ block: BasicBlock,
+ expr: &Expr<'_, 'tcx>,
+ ) -> BlockAnd<Operand<'tcx>> {
let local_scope = self.local_scope();
self.as_operand(block, Some(local_scope), expr)
}
@@ -70,14 +71,11 @@
/// value to the stack.
///
/// See #68034 for more details.
- crate fn as_local_call_operand<M>(
+ crate fn as_local_call_operand(
&mut self,
block: BasicBlock,
- expr: M,
- ) -> BlockAnd<Operand<'tcx>>
- where
- M: Mirror<'tcx, Output = Expr<'tcx>>,
- {
+ expr: &Expr<'_, 'tcx>,
+ ) -> BlockAnd<Operand<'tcx>> {
let local_scope = self.local_scope();
self.as_call_operand(block, Some(local_scope), expr)
}
@@ -88,41 +86,16 @@
/// this time.
///
/// The operand is known to be live until the end of `scope`.
- crate fn as_operand<M>(
- &mut self,
- block: BasicBlock,
- scope: Option<region::Scope>,
- expr: M,
- ) -> BlockAnd<Operand<'tcx>>
- where
- M: Mirror<'tcx, Output = Expr<'tcx>>,
- {
- let expr = self.hir.mirror(expr);
- self.expr_as_operand(block, scope, expr)
- }
-
+ ///
/// Like `as_local_call_operand`, except that the argument will
/// not be valid once `scope` ends.
- fn as_call_operand<M>(
- &mut self,
- block: BasicBlock,
- scope: Option<region::Scope>,
- expr: M,
- ) -> BlockAnd<Operand<'tcx>>
- where
- M: Mirror<'tcx, Output = Expr<'tcx>>,
- {
- let expr = self.hir.mirror(expr);
- self.expr_as_call_operand(block, scope, expr)
- }
-
- fn expr_as_operand(
+ crate fn as_operand(
&mut self,
mut block: BasicBlock,
scope: Option<region::Scope>,
- expr: Expr<'tcx>,
+ expr: &Expr<'_, 'tcx>,
) -> BlockAnd<Operand<'tcx>> {
- debug!("expr_as_operand(block={:?}, expr={:?})", block, expr);
+ debug!("as_operand(block={:?}, expr={:?})", block, expr);
let this = self;
if let ExprKind::Scope { region_scope, lint_level, value } = expr.kind {
@@ -133,7 +106,7 @@
}
let category = Category::of(&expr.kind).unwrap();
- debug!("expr_as_operand: category={:?} for={:?}", category, expr.kind);
+ debug!("as_operand: category={:?} for={:?}", category, expr.kind);
match category {
Category::Constant => {
let constant = this.as_constant(expr);
@@ -146,13 +119,13 @@
}
}
- fn expr_as_call_operand(
+ crate fn as_call_operand(
&mut self,
mut block: BasicBlock,
scope: Option<region::Scope>,
- expr: Expr<'tcx>,
+ expr: &Expr<'_, 'tcx>,
) -> BlockAnd<Operand<'tcx>> {
- debug!("expr_as_call_operand(block={:?}, expr={:?})", block, expr);
+ debug!("as_call_operand(block={:?}, expr={:?})", block, expr);
let this = self;
if let ExprKind::Scope { region_scope, lint_level, value } = expr.kind {
@@ -163,12 +136,12 @@
});
}
- let tcx = this.hir.tcx();
+ let tcx = this.tcx;
if tcx.features().unsized_fn_params {
let ty = expr.ty;
let span = expr.span;
- let param_env = this.hir.param_env;
+ let param_env = this.param_env;
if !ty.is_sized(tcx.at(span), param_env) {
// !sized means !copy, so this is an unsized move
@@ -176,9 +149,7 @@
// As described above, detect the case where we are passing a value of unsized
// type, and that value is coming from the deref of a box.
- if let ExprKind::Deref { ref arg } = expr.kind {
- let arg = this.hir.mirror(arg.clone());
-
+ if let ExprKind::Deref { arg } = expr.kind {
// Generate let tmp0 = arg0
let operand = unpack!(block = this.as_temp(block, scope, arg, Mutability::Mut));
@@ -193,6 +164,6 @@
}
}
- this.expr_as_operand(block, scope, expr)
+ this.as_operand(block, scope, expr)
}
}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_place.rs b/compiler/rustc_mir_build/src/build/expr/as_place.rs
index 3308a24..fbc9c30 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_place.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_place.rs
@@ -10,6 +10,7 @@
use rustc_middle::middle::region;
use rustc_middle::mir::AssertKind::BoundsCheck;
use rustc_middle::mir::*;
+use rustc_middle::ty::AdtDef;
use rustc_middle::ty::{self, CanonicalUserTypeAnnotation, Ty, TyCtxt, Variance};
use rustc_span::Span;
use rustc_target::abi::VariantIdx;
@@ -17,7 +18,7 @@
use rustc_index::vec::Idx;
/// The "outermost" place that holds this value.
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, Debug, PartialEq)]
crate enum PlaceBase {
/// Denotes the start of a `Place`.
Local(Local),
@@ -67,7 +68,7 @@
///
/// This is used internally when building a place for an expression like `a.b.c`. The fields `b`
/// and `c` can be progressively pushed onto the place builder that is created when converting `a`.
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
crate struct PlaceBuilder<'tcx> {
base: PlaceBase,
projection: Vec<PlaceElem<'tcx>>,
@@ -83,20 +84,23 @@
mir_projections: &[PlaceElem<'tcx>],
) -> Vec<HirProjectionKind> {
let mut hir_projections = Vec::new();
+ let mut variant = None;
for mir_projection in mir_projections {
let hir_projection = match mir_projection {
ProjectionElem::Deref => HirProjectionKind::Deref,
ProjectionElem::Field(field, _) => {
- // We will never encouter this for multivariant enums,
- // read the comment for `Downcast`.
- HirProjectionKind::Field(field.index() as u32, VariantIdx::new(0))
+ let variant = variant.unwrap_or(VariantIdx::new(0));
+ HirProjectionKind::Field(field.index() as u32, variant)
}
- ProjectionElem::Downcast(..) => {
- // This projections exist only for enums that have
- // multiple variants. Since such enums that are captured
- // completely, we can stop here.
- break;
+ ProjectionElem::Downcast(.., idx) => {
+ // We don't expect to see multi-variant enums here, as earlier
+ // phases will have truncated them already. However, there can
+ // still be downcasts, thanks to single-variant enums.
+ // We keep track of VariantIdx so we can use this information
+ // if the next ProjectionElem is a Field.
+ variant = Some(*idx);
+ continue;
}
ProjectionElem::Index(..)
| ProjectionElem::ConstantIndex { .. }
@@ -106,7 +110,7 @@
break;
}
};
-
+ variant = None;
hir_projections.push(hir_projection);
}
@@ -194,12 +198,12 @@
/// Takes a PlaceBuilder and resolves the upvar (if any) within it, so that the
/// `PlaceBuilder` now starts from `PlaceBase::Local`.
///
-/// Returns a Result with the error being the HirId of the Upvar that was not found.
+/// Returns a Result with the error being the PlaceBuilder (`from_builder`) that was not found.
fn to_upvars_resolved_place_builder<'a, 'tcx>(
from_builder: PlaceBuilder<'tcx>,
tcx: TyCtxt<'tcx>,
typeck_results: &'a ty::TypeckResults<'tcx>,
-) -> Result<PlaceBuilder<'tcx>, HirId> {
+) -> Result<PlaceBuilder<'tcx>, PlaceBuilder<'tcx>> {
match from_builder.base {
PlaceBase::Local(_) => Ok(from_builder),
PlaceBase::Upvar { var_hir_id, closure_def_id, closure_kind } => {
@@ -230,13 +234,12 @@
from_builder.projection
)
} else {
- // FIXME(project-rfc-2229#24): Handle this case properly
debug!(
"No associated capture found for {:?}[{:#?}]",
var_hir_id, from_builder.projection,
);
}
- return Err(var_hir_id);
+ return Err(from_builder);
};
let closure_ty = typeck_results
@@ -300,6 +303,25 @@
to_upvars_resolved_place_builder(self, tcx, typeck_results).unwrap()
}
+ /// Attempts to resolve the `PlaceBuilder`.
+ /// On success, it will return the resolved `PlaceBuilder`.
+ /// On failure, it will return itself.
+ ///
+ /// Upvars resolve may fail for a `PlaceBuilder` when attempting to
+ /// resolve a disjoint field whose root variable is not captured
+ /// (destructured assignments) or when attempting to resolve a root
+ /// variable (discriminant matching with only wildcard arm) that is
+ /// not captured. This can happen because the final mir that will be
+ /// generated doesn't require a read for this place. Failures will only
+ /// happen inside closures.
+ crate fn try_upvars_resolved<'a>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> Result<PlaceBuilder<'tcx>, PlaceBuilder<'tcx>> {
+ to_upvars_resolved_place_builder(self, tcx, typeck_results)
+ }
+
crate fn base(&self) -> PlaceBase {
self.base
}
@@ -308,15 +330,22 @@
self.project(PlaceElem::Field(f, ty))
}
- fn deref(self) -> Self {
+ crate fn deref(self) -> Self {
self.project(PlaceElem::Deref)
}
+ crate fn downcast(self, adt_def: &'tcx AdtDef, variant_index: VariantIdx) -> Self {
+ self.project(PlaceElem::Downcast(
+ Some(adt_def.variants[variant_index].ident.name),
+ variant_index,
+ ))
+ }
+
fn index(self, index: Local) -> Self {
self.project(PlaceElem::Index(index))
}
- fn project(mut self, elem: PlaceElem<'tcx>) -> Self {
+ crate fn project(mut self, elem: PlaceElem<'tcx>) -> Self {
self.projection.push(elem);
self
}
@@ -347,25 +376,22 @@
/// Extra care is needed if any user code is allowed to run between calling
/// this method and using it, as is the case for `match` and index
/// expressions.
- crate fn as_place<M>(&mut self, mut block: BasicBlock, expr: M) -> BlockAnd<Place<'tcx>>
- where
- M: Mirror<'tcx, Output = Expr<'tcx>>,
- {
+ crate fn as_place(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'_, 'tcx>,
+ ) -> BlockAnd<Place<'tcx>> {
let place_builder = unpack!(block = self.as_place_builder(block, expr));
- block.and(place_builder.into_place(self.hir.tcx(), self.hir.typeck_results()))
+ block.and(place_builder.into_place(self.tcx, self.typeck_results))
}
/// This is used when constructing a compound `Place`, so that we can avoid creating
/// intermediate `Place` values until we know the full set of projections.
- crate fn as_place_builder<M>(
+ crate fn as_place_builder(
&mut self,
block: BasicBlock,
- expr: M,
- ) -> BlockAnd<PlaceBuilder<'tcx>>
- where
- M: Mirror<'tcx, Output = Expr<'tcx>>,
- {
- let expr = self.hir.mirror(expr);
+ expr: &Expr<'_, 'tcx>,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
self.expr_as_place(block, expr, Mutability::Mut, None)
}
@@ -374,16 +400,13 @@
/// place. The place itself may or may not be mutable:
/// * If this expr is a place expr like a.b, then we will return that place.
/// * Otherwise, a temporary is created: in that event, it will be an immutable temporary.
- crate fn as_read_only_place<M>(
+ crate fn as_read_only_place(
&mut self,
mut block: BasicBlock,
- expr: M,
- ) -> BlockAnd<Place<'tcx>>
- where
- M: Mirror<'tcx, Output = Expr<'tcx>>,
- {
+ expr: &Expr<'_, 'tcx>,
+ ) -> BlockAnd<Place<'tcx>> {
let place_builder = unpack!(block = self.as_read_only_place_builder(block, expr));
- block.and(place_builder.into_place(self.hir.tcx(), self.hir.typeck_results()))
+ block.and(place_builder.into_place(self.tcx, self.typeck_results))
}
/// This is used when constructing a compound `Place`, so that we can avoid creating
@@ -392,22 +415,18 @@
/// place. The place itself may or may not be mutable:
/// * If this expr is a place expr like a.b, then we will return that place.
/// * Otherwise, a temporary is created: in that event, it will be an immutable temporary.
- fn as_read_only_place_builder<M>(
+ fn as_read_only_place_builder(
&mut self,
block: BasicBlock,
- expr: M,
- ) -> BlockAnd<PlaceBuilder<'tcx>>
- where
- M: Mirror<'tcx, Output = Expr<'tcx>>,
- {
- let expr = self.hir.mirror(expr);
+ expr: &Expr<'_, 'tcx>,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
self.expr_as_place(block, expr, Mutability::Not, None)
}
fn expr_as_place(
&mut self,
mut block: BasicBlock,
- expr: Expr<'tcx>,
+ expr: &Expr<'_, 'tcx>,
mutability: Mutability,
fake_borrow_temps: Option<&mut Vec<Local>>,
) -> BlockAnd<PlaceBuilder<'tcx>> {
@@ -419,18 +438,15 @@
match expr.kind {
ExprKind::Scope { region_scope, lint_level, value } => {
this.in_scope((region_scope, source_info), lint_level, |this| {
- let value = this.hir.mirror(value);
this.expr_as_place(block, value, mutability, fake_borrow_temps)
})
}
ExprKind::Field { lhs, name } => {
- let lhs = this.hir.mirror(lhs);
let place_builder =
unpack!(block = this.expr_as_place(block, lhs, mutability, fake_borrow_temps,));
block.and(place_builder.field(name, expr.ty))
}
ExprKind::Deref { arg } => {
- let arg = this.hir.mirror(arg);
let place_builder =
unpack!(block = this.expr_as_place(block, arg, mutability, fake_borrow_temps,));
block.and(place_builder.deref())
@@ -462,7 +478,6 @@
}
ExprKind::PlaceTypeAscription { source, user_ty } => {
- let source = this.hir.mirror(source);
let place_builder = unpack!(
block = this.expr_as_place(block, source, mutability, fake_borrow_temps,)
);
@@ -474,8 +489,7 @@
inferred_ty: expr.ty,
});
- let place =
- place_builder.clone().into_place(this.hir.tcx(), this.hir.typeck_results());
+ let place = place_builder.clone().into_place(this.tcx, this.typeck_results);
this.cfg.push(
block,
Statement {
@@ -493,7 +507,6 @@
block.and(place_builder)
}
ExprKind::ValueTypeAscription { source, user_ty } => {
- let source = this.hir.mirror(source);
let temp =
unpack!(block = this.as_temp(block, source.temp_lifetime, source, mutability));
if let Some(user_ty) = user_ty {
@@ -570,12 +583,11 @@
upvar_id: ty::UpvarId,
) -> BlockAnd<PlaceBuilder<'tcx>> {
let closure_ty = self
- .hir
- .typeck_results()
- .node_type(self.hir.tcx().hir().local_def_id_to_hir_id(upvar_id.closure_expr_id));
+ .typeck_results
+ .node_type(self.tcx.hir().local_def_id_to_hir_id(upvar_id.closure_expr_id));
let closure_kind = if let ty::Closure(_, closure_substs) = closure_ty.kind() {
- self.hir.infcx().closure_kind(closure_substs).unwrap()
+ self.infcx.closure_kind(closure_substs).unwrap()
} else {
// Generators are considered FnOnce.
ty::ClosureKind::FnOnce
@@ -599,41 +611,32 @@
fn lower_index_expression(
&mut self,
mut block: BasicBlock,
- base: ExprRef<'tcx>,
- index: ExprRef<'tcx>,
+ base: &Expr<'_, 'tcx>,
+ index: &Expr<'_, 'tcx>,
mutability: Mutability,
fake_borrow_temps: Option<&mut Vec<Local>>,
temp_lifetime: Option<region::Scope>,
expr_span: Span,
source_info: SourceInfo,
) -> BlockAnd<PlaceBuilder<'tcx>> {
- let lhs = self.hir.mirror(base);
-
let base_fake_borrow_temps = &mut Vec::new();
let is_outermost_index = fake_borrow_temps.is_none();
let fake_borrow_temps = fake_borrow_temps.unwrap_or(base_fake_borrow_temps);
let mut base_place =
- unpack!(block = self.expr_as_place(block, lhs, mutability, Some(fake_borrow_temps),));
+ unpack!(block = self.expr_as_place(block, base, mutability, Some(fake_borrow_temps),));
// Making this a *fresh* temporary means we do not have to worry about
// the index changing later: Nothing will ever change this temporary.
// The "retagging" transformation (for Stacked Borrows) relies on this.
let idx = unpack!(block = self.as_temp(block, temp_lifetime, index, Mutability::Not,));
- block = self.bounds_check(
- block,
- base_place.clone().into_place(self.hir.tcx(), self.hir.typeck_results()),
- idx,
- expr_span,
- source_info,
- );
+ block = self.bounds_check(block, base_place.clone(), idx, expr_span, source_info);
if is_outermost_index {
self.read_fake_borrows(block, fake_borrow_temps, source_info)
} else {
- base_place =
- base_place.expect_upvars_resolved(self.hir.tcx(), self.hir.typeck_results());
+ base_place = base_place.expect_upvars_resolved(self.tcx, self.typeck_results);
self.add_fake_borrows_of_base(
&base_place,
block,
@@ -649,25 +652,33 @@
fn bounds_check(
&mut self,
block: BasicBlock,
- slice: Place<'tcx>,
+ slice: PlaceBuilder<'tcx>,
index: Local,
expr_span: Span,
source_info: SourceInfo,
) -> BasicBlock {
- let usize_ty = self.hir.usize_ty();
- let bool_ty = self.hir.bool_ty();
+ let usize_ty = self.tcx.types.usize;
+ let bool_ty = self.tcx.types.bool;
// bounds check:
let len = self.temp(usize_ty, expr_span);
let lt = self.temp(bool_ty, expr_span);
// len = len(slice)
- self.cfg.push_assign(block, source_info, len, Rvalue::Len(slice));
+ self.cfg.push_assign(
+ block,
+ source_info,
+ len,
+ Rvalue::Len(slice.into_place(self.tcx, self.typeck_results)),
+ );
// lt = idx < len
self.cfg.push_assign(
block,
source_info,
lt,
- Rvalue::BinaryOp(BinOp::Lt, Operand::Copy(Place::from(index)), Operand::Copy(len)),
+ Rvalue::BinaryOp(
+ BinOp::Lt,
+ box (Operand::Copy(Place::from(index)), Operand::Copy(len)),
+ ),
);
let msg = BoundsCheck { len: Operand::Move(len), index: Operand::Copy(Place::from(index)) };
// assert!(lt, "...")
@@ -682,7 +693,7 @@
expr_span: Span,
source_info: SourceInfo,
) {
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
let local = match base_place.base {
PlaceBase::Local(local) => local,
PlaceBase::Upvar { .. } => bug!("Expected PlacseBase::Local found Upvar"),
diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
index e602f4d..7f24a41 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
@@ -8,6 +8,7 @@
use crate::thir::*;
use rustc_middle::middle::region;
use rustc_middle::mir::AssertKind;
+use rustc_middle::mir::Place;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, Ty, UpvarSubsts};
use rustc_span::Span;
@@ -19,33 +20,21 @@
/// The operand returned from this function will *not be valid* after
/// an ExprKind::Scope is passed, so please do *not* return it from
/// functions to avoid bad miscompiles.
- crate fn as_local_rvalue<M>(&mut self, block: BasicBlock, expr: M) -> BlockAnd<Rvalue<'tcx>>
- where
- M: Mirror<'tcx, Output = Expr<'tcx>>,
- {
+ crate fn as_local_rvalue(
+ &mut self,
+ block: BasicBlock,
+ expr: &Expr<'_, 'tcx>,
+ ) -> BlockAnd<Rvalue<'tcx>> {
let local_scope = self.local_scope();
self.as_rvalue(block, Some(local_scope), expr)
}
/// Compile `expr`, yielding an rvalue.
- fn as_rvalue<M>(
- &mut self,
- block: BasicBlock,
- scope: Option<region::Scope>,
- expr: M,
- ) -> BlockAnd<Rvalue<'tcx>>
- where
- M: Mirror<'tcx, Output = Expr<'tcx>>,
- {
- let expr = self.hir.mirror(expr);
- self.expr_as_rvalue(block, scope, expr)
- }
-
- fn expr_as_rvalue(
+ crate fn as_rvalue(
&mut self,
mut block: BasicBlock,
scope: Option<region::Scope>,
- expr: Expr<'tcx>,
+ expr: &Expr<'_, 'tcx>,
) -> BlockAnd<Rvalue<'tcx>> {
debug!("expr_as_rvalue(block={:?}, scope={:?}, expr={:?})", block, scope, expr);
@@ -71,8 +60,8 @@
ExprKind::Unary { op, arg } => {
let arg = unpack!(block = this.as_operand(block, scope, arg));
// Check for -MIN on signed integers
- if this.hir.check_overflow() && op == UnOp::Neg && expr.ty.is_signed() {
- let bool_ty = this.hir.bool_ty();
+ if this.check_overflow && op == UnOp::Neg && expr.ty.is_signed() {
+ let bool_ty = this.tcx.types.bool;
let minval = this.minval_literal(expr_span, expr.ty);
let is_min = this.temp(bool_ty, expr_span);
@@ -81,7 +70,7 @@
block,
source_info,
is_min,
- Rvalue::BinaryOp(BinOp::Eq, arg.to_copy(), minval),
+ Rvalue::BinaryOp(BinOp::Eq, box (arg.to_copy(), minval)),
);
block = this.assert(
@@ -95,7 +84,6 @@
block.and(Rvalue::UnaryOp(op, arg))
}
ExprKind::Box { value } => {
- let value = this.hir.mirror(value);
// The `Box<T>` temporary created here is not a part of the HIR,
// and therefore is not considered during generator auto-trait
// determination. See the comment about `box` at `yield_in_scope`.
@@ -115,8 +103,11 @@
// initialize the box contents:
unpack!(
- block =
- this.into(this.hir.tcx().mk_place_deref(Place::from(result)), block, value)
+ block = this.expr_into_dest(
+ this.tcx.mk_place_deref(Place::from(result)),
+ block,
+ value
+ )
);
block.and(Rvalue::Use(Operand::Move(Place::from(result))))
}
@@ -156,7 +147,7 @@
// to the same MIR as `let x = ();`.
// first process the set of fields
- let el_ty = expr.ty.sequence_element_type(this.hir.tcx());
+ let el_ty = expr.ty.sequence_element_type(this.tcx);
let fields: Vec<_> = fields
.into_iter()
.map(|f| unpack!(block = this.as_operand(block, scope, f)))
@@ -174,12 +165,45 @@
block.and(Rvalue::Aggregate(box AggregateKind::Tuple, fields))
}
- ExprKind::Closure { closure_id, substs, upvars, movability } => {
+ ExprKind::Closure { closure_id, substs, upvars, movability, ref fake_reads } => {
+ // Convert the closure fake reads, if any, from `ExprRef` to mir `Place`
+ // and push the fake reads.
+ // This must come before creating the operands. This is required in case
+ // there is a fake read and a borrow of the same path, since otherwise the
+ // fake read might interfere with the borrow. Consider an example like this
+ // one:
+ // ```
+ // let mut x = 0;
+ // let c = || {
+ // &mut x; // mutable borrow of `x`
+ // match x { _ => () } // fake read of `x`
+ // };
+ // ```
+ // FIXME(RFC2229): Remove feature gate once diagnostics are improved
+ if this.tcx.features().capture_disjoint_fields {
+ for (thir_place, cause, hir_id) in fake_reads.into_iter() {
+ let place_builder =
+ unpack!(block = this.as_place_builder(block, thir_place));
+
+ if let Ok(place_builder_resolved) =
+ place_builder.try_upvars_resolved(this.tcx, this.typeck_results)
+ {
+ let mir_place =
+ place_builder_resolved.into_place(this.tcx, this.typeck_results);
+ this.cfg.push_fake_read(
+ block,
+ this.source_info(this.tcx.hir().span(*hir_id)),
+ *cause,
+ mir_place,
+ );
+ }
+ }
+ }
+
// see (*) above
let operands: Vec<_> = upvars
.into_iter()
.map(|upvar| {
- let upvar = this.hir.mirror(upvar);
match Category::of(&upvar.kind) {
// Use as_place to avoid creating a temporary when
// moving a variable into a closure, so that
@@ -214,6 +238,7 @@
}
})
.collect();
+
let result = match substs {
UpvarSubsts::Generator(substs) => {
// We implicitly set the discriminant to 0. See
@@ -230,7 +255,7 @@
block.and(Rvalue::Use(Operand::Constant(box Constant {
span: expr_span,
user_ty: None,
- literal: ty::Const::zero_sized(this.hir.tcx(), this.hir.tcx().types.unit),
+ literal: ty::Const::zero_sized(this.tcx, this.tcx.types.unit).into(),
})))
}
ExprKind::Yield { .. }
@@ -282,21 +307,21 @@
rhs: Operand<'tcx>,
) -> BlockAnd<Rvalue<'tcx>> {
let source_info = self.source_info(span);
- let bool_ty = self.hir.bool_ty();
- if self.hir.check_overflow() && op.is_checkable() && ty.is_integral() {
- let result_tup = self.hir.tcx().intern_tup(&[ty, bool_ty]);
+ let bool_ty = self.tcx.types.bool;
+ if self.check_overflow && op.is_checkable() && ty.is_integral() {
+ let result_tup = self.tcx.intern_tup(&[ty, bool_ty]);
let result_value = self.temp(result_tup, span);
self.cfg.push_assign(
block,
source_info,
result_value,
- Rvalue::CheckedBinaryOp(op, lhs.to_copy(), rhs.to_copy()),
+ Rvalue::CheckedBinaryOp(op, box (lhs.to_copy(), rhs.to_copy())),
);
let val_fld = Field::new(0);
let of_fld = Field::new(1);
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
let val = tcx.mk_place_field(result_value, val_fld, ty);
let of = tcx.mk_place_field(result_value, of_fld, bool_ty);
@@ -324,7 +349,7 @@
block,
source_info,
is_zero,
- Rvalue::BinaryOp(BinOp::Eq, rhs.to_copy(), zero),
+ Rvalue::BinaryOp(BinOp::Eq, box (rhs.to_copy(), zero)),
);
block = self.assert(block, Operand::Move(is_zero), false, zero_err, span);
@@ -345,13 +370,13 @@
block,
source_info,
is_neg_1,
- Rvalue::BinaryOp(BinOp::Eq, rhs.to_copy(), neg_1),
+ Rvalue::BinaryOp(BinOp::Eq, box (rhs.to_copy(), neg_1)),
);
self.cfg.push_assign(
block,
source_info,
is_min,
- Rvalue::BinaryOp(BinOp::Eq, lhs.to_copy(), min),
+ Rvalue::BinaryOp(BinOp::Eq, box (lhs.to_copy(), min)),
);
let is_neg_1 = Operand::Move(is_neg_1);
@@ -360,14 +385,14 @@
block,
source_info,
of,
- Rvalue::BinaryOp(BinOp::BitAnd, is_neg_1, is_min),
+ Rvalue::BinaryOp(BinOp::BitAnd, box (is_neg_1, is_min)),
);
block = self.assert(block, Operand::Move(of), false, overflow_err, span);
}
}
- block.and(Rvalue::BinaryOp(op, lhs, rhs))
+ block.and(Rvalue::BinaryOp(op, box (lhs, rhs)))
}
}
@@ -377,7 +402,7 @@
upvar_ty: Ty<'tcx>,
temp_lifetime: Option<region::Scope>,
mut block: BasicBlock,
- arg: ExprRef<'tcx>,
+ arg: &Expr<'_, 'tcx>,
) -> BlockAnd<Operand<'tcx>> {
let this = self;
@@ -398,7 +423,7 @@
// is same as that of the capture in the parent closure.
PlaceBase::Upvar { .. } => {
let enclosing_upvars_resolved =
- arg_place_builder.clone().into_place(this.hir.tcx(), this.hir.typeck_results());
+ arg_place_builder.clone().into_place(this.tcx, this.typeck_results);
match enclosing_upvars_resolved.as_ref() {
PlaceRef {
@@ -435,13 +460,13 @@
Mutability::Mut => BorrowKind::Mut { allow_two_phase_borrow: false },
};
- let arg_place = arg_place_builder.into_place(this.hir.tcx(), this.hir.typeck_results());
+ let arg_place = arg_place_builder.into_place(this.tcx, this.typeck_results);
this.cfg.push_assign(
block,
source_info,
Place::from(temp),
- Rvalue::Ref(this.hir.tcx().lifetimes.re_erased, borrow_kind, arg_place),
+ Rvalue::Ref(this.tcx.lifetimes.re_erased, borrow_kind, arg_place),
);
// See the comment in `expr_as_temp` and on the `rvalue_scopes` field for why
@@ -456,9 +481,9 @@
// Helper to get a `-1` value of the appropriate type
fn neg_1_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
let param_ty = ty::ParamEnv::empty().and(ty);
- let bits = self.hir.tcx().layout_of(param_ty).unwrap().size.bits();
+ let bits = self.tcx.layout_of(param_ty).unwrap().size.bits();
let n = (!0u128) >> (128 - bits);
- let literal = ty::Const::from_bits(self.hir.tcx(), n, param_ty);
+ let literal = ty::Const::from_bits(self.tcx, n, param_ty);
self.literal_operand(span, literal)
}
@@ -467,9 +492,9 @@
fn minval_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
assert!(ty.is_signed());
let param_ty = ty::ParamEnv::empty().and(ty);
- let bits = self.hir.tcx().layout_of(param_ty).unwrap().size.bits();
+ let bits = self.tcx.layout_of(param_ty).unwrap().size.bits();
let n = 1 << (bits - 1);
- let literal = ty::Const::from_bits(self.hir.tcx(), n, param_ty);
+ let literal = ty::Const::from_bits(self.tcx, n, param_ty);
self.literal_operand(span, literal)
}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_temp.rs b/compiler/rustc_mir_build/src/build/expr/as_temp.rs
index 9984b52..98b910a 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_temp.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_temp.rs
@@ -4,40 +4,34 @@
use crate::build::{BlockAnd, BlockAndExtension, Builder};
use crate::thir::*;
use rustc_data_structures::stack::ensure_sufficient_stack;
-use rustc_hir as hir;
use rustc_middle::middle::region;
use rustc_middle::mir::*;
impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Compile `expr` into a fresh temporary. This is used when building
/// up rvalues so as to freeze the value that will be consumed.
- crate fn as_temp<M>(
+ crate fn as_temp(
&mut self,
block: BasicBlock,
temp_lifetime: Option<region::Scope>,
- expr: M,
+ expr: &Expr<'_, 'tcx>,
mutability: Mutability,
- ) -> BlockAnd<Local>
- where
- M: Mirror<'tcx, Output = Expr<'tcx>>,
- {
- let expr = self.hir.mirror(expr);
- //
+ ) -> BlockAnd<Local> {
// this is the only place in mir building that we need to truly need to worry about
// infinite recursion. Everything else does recurse, too, but it always gets broken up
// at some point by inserting an intermediate temporary
- ensure_sufficient_stack(|| self.expr_as_temp(block, temp_lifetime, expr, mutability))
+ ensure_sufficient_stack(|| self.as_temp_inner(block, temp_lifetime, expr, mutability))
}
- fn expr_as_temp(
+ fn as_temp_inner(
&mut self,
mut block: BasicBlock,
temp_lifetime: Option<region::Scope>,
- expr: Expr<'tcx>,
+ expr: &Expr<'_, 'tcx>,
mutability: Mutability,
) -> BlockAnd<Local> {
debug!(
- "expr_as_temp(block={:?}, temp_lifetime={:?}, expr={:?}, mutability={:?})",
+ "as_temp(block={:?}, temp_lifetime={:?}, expr={:?}, mutability={:?})",
block, temp_lifetime, expr, mutability
);
let this = self;
@@ -65,13 +59,13 @@
}
match expr.kind {
ExprKind::StaticRef { def_id, .. } => {
- assert!(!this.hir.tcx().is_thread_local_static(def_id));
+ assert!(!this.tcx.is_thread_local_static(def_id));
local_decl.internal = true;
local_decl.local_info =
Some(box LocalInfo::StaticRef { def_id, is_thread_local: false });
}
ExprKind::ThreadLocalRef(def_id) => {
- assert!(this.hir.tcx().is_thread_local_static(def_id));
+ assert!(this.tcx.is_thread_local_static(def_id));
local_decl.internal = true;
local_decl.local_info =
Some(box LocalInfo::StaticRef { def_id, is_thread_local: true });
@@ -89,7 +83,7 @@
// Don't bother with StorageLive and Dead for these temporaries,
// they are never assigned.
ExprKind::Break { .. } | ExprKind::Continue { .. } | ExprKind::Return { .. } => (),
- ExprKind::Block { body: hir::Block { expr: None, targeted_by_break: false, .. } }
+ ExprKind::Block { body: Block { expr: None, targeted_by_break: false, .. } }
if expr_ty.is_never() => {}
_ => {
this.cfg
@@ -114,7 +108,7 @@
}
}
- unpack!(block = this.into(temp_place, block, expr));
+ unpack!(block = this.expr_into_dest(temp_place, block, expr));
if let Some(temp_lifetime) = temp_lifetime {
this.schedule_drop(expr_span, temp_lifetime, temp, DropKind::Value);
diff --git a/compiler/rustc_mir_build/src/build/expr/category.rs b/compiler/rustc_mir_build/src/build/expr/category.rs
index 9320b58..0cadfa2 100644
--- a/compiler/rustc_mir_build/src/build/expr/category.rs
+++ b/compiler/rustc_mir_build/src/build/expr/category.rs
@@ -31,7 +31,7 @@
/// Determines the category for a given expression. Note that scope
/// and paren expressions have no category.
impl Category {
- crate fn of(ek: &ExprKind<'_>) -> Option<Category> {
+ crate fn of(ek: &ExprKind<'_, '_>) -> Option<Category> {
match *ek {
ExprKind::Scope { .. } => None,
diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs
index 235fe14..fc92e80 100644
--- a/compiler/rustc_mir_build/src/build/expr/into.rs
+++ b/compiler/rustc_mir_build/src/build/expr/into.rs
@@ -7,19 +7,20 @@
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_hir as hir;
+use rustc_index::vec::Idx;
use rustc_middle::mir::*;
-use rustc_middle::ty::CanonicalUserTypeAnnotation;
+use rustc_middle::ty::{self, CanonicalUserTypeAnnotation};
impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Compile `expr`, storing the result into `destination`, which
/// is assumed to be uninitialized.
- crate fn into_expr(
+ crate fn expr_into_dest(
&mut self,
destination: Place<'tcx>,
mut block: BasicBlock,
- expr: Expr<'tcx>,
+ expr: &Expr<'_, 'tcx>,
) -> BlockAnd<()> {
- debug!("into_expr(destination={:?}, block={:?}, expr={:?})", destination, block, expr);
+ debug!("expr_into_dest(destination={:?}, block={:?}, expr={:?})", destination, block, expr);
// since we frequently have to reference `self` from within a
// closure, where `self` would be shadowed, it's easier to
@@ -40,11 +41,11 @@
let region_scope = (region_scope, source_info);
ensure_sufficient_stack(|| {
this.in_scope(region_scope, lint_level, |this| {
- this.into(destination, block, value)
+ this.expr_into_dest(destination, block, value)
})
})
}
- ExprKind::Block { body: ast_block } => {
+ ExprKind::Block { body: ref ast_block } => {
this.ast_block(destination, block, ast_block, source_info)
}
ExprKind::Match { scrutinee, arms } => {
@@ -58,17 +59,17 @@
let mut then_block = this.cfg.start_new_block();
let mut else_block = this.cfg.start_new_block();
- let term = TerminatorKind::if_(this.hir.tcx(), operand, then_block, else_block);
+ let term = TerminatorKind::if_(this.tcx, operand, then_block, else_block);
this.cfg.terminate(block, source_info, term);
- unpack!(then_block = this.into(destination, then_block, then));
+ unpack!(then_block = this.expr_into_dest(destination, then_block, then));
else_block = if let Some(else_opt) = else_opt {
- unpack!(this.into(destination, else_block, else_opt))
+ unpack!(this.expr_into_dest(destination, else_block, else_opt))
} else {
// Body of the `if` expression without an `else` clause must return `()`, thus
// we implicitly generate a `else {}` if it is not specified.
let correct_si = this.source_info(expr_span.shrink_to_hi());
- this.cfg.push_assign_unit(else_block, correct_si, destination, this.hir.tcx());
+ this.cfg.push_assign_unit(else_block, correct_si, destination, this.tcx);
else_block
};
@@ -87,7 +88,6 @@
join_block.unit()
}
ExprKind::NeverToAny { source } => {
- let source = this.hir.mirror(source);
let is_call =
matches!(source.kind, ExprKind::Call { .. } | ExprKind::InlineAsm { .. });
@@ -132,25 +132,33 @@
LogicalOp::And => (else_block, false_block),
LogicalOp::Or => (true_block, else_block),
};
- let term = TerminatorKind::if_(this.hir.tcx(), lhs, blocks.0, blocks.1);
+ let term = TerminatorKind::if_(this.tcx, lhs, blocks.0, blocks.1);
this.cfg.terminate(block, source_info, term);
let rhs = unpack!(else_block = this.as_local_operand(else_block, rhs));
- let term = TerminatorKind::if_(this.hir.tcx(), rhs, true_block, false_block);
+ let term = TerminatorKind::if_(this.tcx, rhs, true_block, false_block);
this.cfg.terminate(else_block, source_info, term);
this.cfg.push_assign_constant(
true_block,
source_info,
destination,
- Constant { span: expr_span, user_ty: None, literal: this.hir.true_literal() },
+ Constant {
+ span: expr_span,
+ user_ty: None,
+ literal: ty::Const::from_bool(this.tcx, true).into(),
+ },
);
this.cfg.push_assign_constant(
false_block,
source_info,
destination,
- Constant { span: expr_span, user_ty: None, literal: this.hir.false_literal() },
+ Constant {
+ span: expr_span,
+ user_ty: None,
+ literal: ty::Const::from_bool(this.tcx, false).into(),
+ },
);
// Link up both branches:
@@ -188,7 +196,7 @@
// introduce a unit temporary as the destination for the loop body.
let tmp = this.get_unit_temp();
// Execute the body, branching back to the test.
- let body_block_end = unpack!(this.into(tmp, body_block, body));
+ let body_block_end = unpack!(this.expr_into_dest(tmp, body_block, body));
this.cfg.goto(body_block_end, source_info, loop_block);
// Loops are only exited by `break` expressions.
@@ -206,7 +214,7 @@
this.record_operands_moved(&args);
- debug!("into_expr: fn_span={:?}", fn_span);
+ debug!("expr_into_dest: fn_span={:?}", fn_span);
this.cfg.terminate(
block,
@@ -230,7 +238,7 @@
this.diverge_from(block);
success.unit()
}
- ExprKind::Use { source } => this.into(destination, block, source),
+ ExprKind::Use { source } => this.expr_into_dest(destination, block, source),
ExprKind::Borrow { arg, borrow_kind } => {
// We don't do this in `as_rvalue` because we use `as_place`
// for borrow expressions, so we cannot create an `RValue` that
@@ -241,8 +249,7 @@
BorrowKind::Shared => unpack!(block = this.as_read_only_place(block, arg)),
_ => unpack!(block = this.as_place(block, arg)),
};
- let borrow =
- Rvalue::Ref(this.hir.tcx().lifetimes.re_erased, borrow_kind, arg_place);
+ let borrow = Rvalue::Ref(this.tcx.lifetimes.re_erased, borrow_kind, arg_place);
this.cfg.push_assign(block, source_info, destination, borrow);
block.unit()
}
@@ -255,7 +262,7 @@
this.cfg.push_assign(block, source_info, destination, address_of);
block.unit()
}
- ExprKind::Adt { adt_def, variant_index, substs, user_ty, fields, base } => {
+ ExprKind::Adt { adt_def, variant_index, substs, user_ty, fields, ref base } => {
// See the notes for `ExprKind::Array` in `as_rvalue` and for
// `ExprKind::Borrow` above.
let is_union = adt_def.is_union();
@@ -270,7 +277,8 @@
.map(|f| (f.name, unpack!(block = this.as_operand(block, Some(scope), f.expr))))
.collect();
- let field_names = this.hir.all_fields(adt_def, variant_index);
+ let field_names: Vec<_> =
+ (0..adt_def.variants[variant_index].fields.len()).map(Field::new).collect();
let fields: Vec<_> = if let Some(FruInfo { base, field_types }) = base {
let place_builder = unpack!(block = this.as_place_builder(block, base));
@@ -288,7 +296,7 @@
this.consume_by_copy_or_move(
place_builder
.field(n, ty)
- .into_place(this.hir.tcx(), this.hir.typeck_results()),
+ .into_place(this.tcx, this.typeck_results),
)
}
})
@@ -325,7 +333,7 @@
use rustc_middle::mir;
let operands = operands
.into_iter()
- .map(|op| match op {
+ .map(|op| match *op {
thir::InlineAsmOperand::In { reg, expr } => mir::InlineAsmOperand::In {
reg,
value: unpack!(block = this.as_local_operand(block, expr)),
@@ -334,7 +342,9 @@
mir::InlineAsmOperand::Out {
reg,
late,
- place: expr.map(|expr| unpack!(block = this.as_place(block, expr))),
+ place: expr
+ .as_ref()
+ .map(|expr| unpack!(block = this.as_place(block, expr))),
}
}
thir::InlineAsmOperand::InOut { reg, late, expr } => {
@@ -352,7 +362,7 @@
reg,
late,
in_value: unpack!(block = this.as_local_operand(block, in_expr)),
- out_place: out_expr.map(|out_expr| {
+ out_place: out_expr.as_ref().map(|out_expr| {
unpack!(block = this.as_place(block, out_expr))
}),
}
@@ -394,7 +404,7 @@
| ExprKind::AssignOp { .. }
| ExprKind::LlvmInlineAsm { .. } => {
unpack!(block = this.stmt_expr(block, expr, None));
- this.cfg.push_assign_unit(block, source_info, destination, this.hir.tcx());
+ this.cfg.push_assign_unit(block, source_info, destination, this.tcx);
block.unit()
}
@@ -417,7 +427,7 @@
block.unit()
}
ExprKind::Index { .. } | ExprKind::Deref { .. } | ExprKind::Field { .. } => {
- debug_assert!(Category::of(&expr.kind) == Some(Category::Place));
+ debug_assert_eq!(Category::of(&expr.kind), Some(Category::Place));
// Create a "fake" temporary variable so that we check that the
// value is Sized. Usually, this is caught in type checking, but
@@ -426,8 +436,6 @@
this.local_decls.push(LocalDecl::new(expr.ty, expr.span));
}
- debug_assert!(Category::of(&expr.kind) == Some(Category::Place));
-
let place = unpack!(block = this.as_place(block, expr));
let rvalue = Rvalue::Use(this.consume_by_copy_or_move(place));
this.cfg.push_assign(block, source_info, destination, rvalue);
diff --git a/compiler/rustc_mir_build/src/build/expr/mod.rs b/compiler/rustc_mir_build/src/build/expr/mod.rs
index ac8c7e7..539de80 100644
--- a/compiler/rustc_mir_build/src/build/expr/mod.rs
+++ b/compiler/rustc_mir_build/src/build/expr/mod.rs
@@ -9,7 +9,7 @@
//! a type that is not `Copy`, then using any of these functions will
//! "move" the value out of its current home (if any).
//!
-//! - `into` -- writes the value into a specific location, which
+//! - `expr_into_dest` -- writes the value into a specific location, which
//! should be uninitialized
//! - `as_operand` -- evaluates the value and yields an `Operand`,
//! suitable for use as an argument to an `Rvalue`
@@ -62,7 +62,7 @@
mod as_constant;
mod as_operand;
-mod as_place;
+pub mod as_place;
mod as_rvalue;
mod as_temp;
mod category;
diff --git a/compiler/rustc_mir_build/src/build/expr/stmt.rs b/compiler/rustc_mir_build/src/build/expr/stmt.rs
index f117689..f01315f 100644
--- a/compiler/rustc_mir_build/src/build/expr/stmt.rs
+++ b/compiler/rustc_mir_build/src/build/expr/stmt.rs
@@ -13,7 +13,7 @@
crate fn stmt_expr(
&mut self,
mut block: BasicBlock,
- expr: Expr<'tcx>,
+ expr: &Expr<'_, 'tcx>,
statement_scope: Option<region::Scope>,
) -> BlockAnd<()> {
let this = self;
@@ -21,29 +21,25 @@
let source_info = this.source_info(expr.span);
// Handle a number of expressions that don't need a destination at all. This
// avoids needing a mountain of temporary `()` variables.
- let expr2 = expr.clone();
match expr.kind {
ExprKind::Scope { region_scope, lint_level, value } => {
- let value = this.hir.mirror(value);
this.in_scope((region_scope, source_info), lint_level, |this| {
this.stmt_expr(block, value, statement_scope)
})
}
ExprKind::Assign { lhs, rhs } => {
- let lhs = this.hir.mirror(lhs);
- let rhs = this.hir.mirror(rhs);
let lhs_span = lhs.span;
// Note: we evaluate assignments right-to-left. This
// is better for borrowck interaction with overloaded
// operators like x[j] = x[i].
- debug!("stmt_expr Assign block_context.push(SubExpr) : {:?}", expr2);
+ debug!("stmt_expr Assign block_context.push(SubExpr) : {:?}", expr);
this.block_context.push(BlockFrame::SubExpr);
// Generate better code for things that don't need to be
// dropped.
- if this.hir.needs_drop(lhs.ty) {
+ if lhs.ty.needs_drop(this.tcx, this.param_env) {
let rhs = unpack!(block = this.as_local_operand(block, rhs));
let lhs = unpack!(block = this.as_place(block, lhs));
unpack!(block = this.build_drop_and_replace(block, lhs_span, lhs, rhs));
@@ -65,10 +61,9 @@
// only affects weird things like `x += {x += 1; x}`
// -- is that equal to `x + (x + 1)` or `2*(x+1)`?
- let lhs = this.hir.mirror(lhs);
let lhs_ty = lhs.ty;
- debug!("stmt_expr AssignOp block_context.push(SubExpr) : {:?}", expr2);
+ debug!("stmt_expr AssignOp block_context.push(SubExpr) : {:?}", expr);
this.block_context.push(BlockFrame::SubExpr);
// As above, RTL.
@@ -90,24 +85,27 @@
ExprKind::Continue { label } => {
this.break_scope(block, None, BreakableTarget::Continue(label), source_info)
}
- ExprKind::Break { label, value } => {
- this.break_scope(block, value, BreakableTarget::Break(label), source_info)
- }
+ ExprKind::Break { label, value } => this.break_scope(
+ block,
+ value.as_deref(),
+ BreakableTarget::Break(label),
+ source_info,
+ ),
ExprKind::Return { value } => {
- this.break_scope(block, value, BreakableTarget::Return, source_info)
+ this.break_scope(block, value.as_deref(), BreakableTarget::Return, source_info)
}
ExprKind::LlvmInlineAsm { asm, outputs, inputs } => {
- debug!("stmt_expr LlvmInlineAsm block_context.push(SubExpr) : {:?}", expr2);
+ debug!("stmt_expr LlvmInlineAsm block_context.push(SubExpr) : {:?}", expr);
this.block_context.push(BlockFrame::SubExpr);
let outputs = outputs
.into_iter()
- .map(|output| unpack!(block = this.as_place(block, output)))
+ .map(|output| unpack!(block = this.as_place(block, &output)))
.collect::<Vec<_>>()
.into_boxed_slice();
let inputs = inputs
.into_iter()
.map(|input| {
- (input.span(), unpack!(block = this.as_local_operand(block, input)))
+ (input.span, unpack!(block = this.as_local_operand(block, &input)))
})
.collect::<Vec<_>>()
.into_boxed_slice();
@@ -140,15 +138,15 @@
// it is usually better to focus on `the_value` rather
// than the entirety of block(s) surrounding it.
let adjusted_span = (|| {
- if let ExprKind::Block { body } = expr.kind {
+ if let ExprKind::Block { body } = &expr.kind {
if let Some(tail_expr) = &body.expr {
- let mut expr = tail_expr;
- while let rustc_hir::ExprKind::Block(subblock, _label) = &expr.kind {
- if let Some(subtail_expr) = &subblock.expr {
- expr = subtail_expr
- } else {
- break;
- }
+ let mut expr = &*tail_expr;
+ while let ExprKind::Block {
+ body: Block { expr: Some(nested_expr), .. },
+ }
+ | ExprKind::Scope { value: nested_expr, .. } = &expr.kind
+ {
+ expr = nested_expr;
}
this.block_context.push(BlockFrame::TailExpr {
tail_result_is_ignored: true,
diff --git a/compiler/rustc_mir_build/src/build/into.rs b/compiler/rustc_mir_build/src/build/into.rs
deleted file mode 100644
index 7264e49..0000000
--- a/compiler/rustc_mir_build/src/build/into.rs
+++ /dev/null
@@ -1,55 +0,0 @@
-//! In general, there are a number of things for which it's convenient
-//! to just call `builder.into` and have it emit its result into a
-//! given location. This is basically for expressions or things that can be
-//! wrapped up as expressions (e.g., blocks). To make this ergonomic, we use this
-//! latter `EvalInto` trait.
-
-use crate::build::{BlockAnd, Builder};
-use crate::thir::*;
-use rustc_middle::mir::*;
-
-pub(in crate::build) trait EvalInto<'tcx> {
- fn eval_into(
- self,
- builder: &mut Builder<'_, 'tcx>,
- destination: Place<'tcx>,
- block: BasicBlock,
- ) -> BlockAnd<()>;
-}
-
-impl<'a, 'tcx> Builder<'a, 'tcx> {
- crate fn into<E>(
- &mut self,
- destination: Place<'tcx>,
- block: BasicBlock,
- expr: E,
- ) -> BlockAnd<()>
- where
- E: EvalInto<'tcx>,
- {
- expr.eval_into(self, destination, block)
- }
-}
-
-impl<'tcx> EvalInto<'tcx> for ExprRef<'tcx> {
- fn eval_into(
- self,
- builder: &mut Builder<'_, 'tcx>,
- destination: Place<'tcx>,
- block: BasicBlock,
- ) -> BlockAnd<()> {
- let expr = builder.hir.mirror(self);
- builder.into_expr(destination, block, expr)
- }
-}
-
-impl<'tcx> EvalInto<'tcx> for Expr<'tcx> {
- fn eval_into(
- self,
- builder: &mut Builder<'_, 'tcx>,
- destination: Place<'tcx>,
- block: BasicBlock,
- ) -> BlockAnd<()> {
- builder.into_expr(destination, block, self)
- }
-}
diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs
index fde007e..73fd3f0 100644
--- a/compiler/rustc_mir_build/src/build/matches/mod.rs
+++ b/compiler/rustc_mir_build/src/build/matches/mod.rs
@@ -5,6 +5,7 @@
//! This also includes code for pattern bindings in `let` statements and
//! function parameters.
+use crate::build::expr::as_place::PlaceBuilder;
use crate::build::scope::DropKind;
use crate::build::ForGuard::{self, OutsideGuard, RefWithinGuard};
use crate::build::{BlockAnd, BlockAndExtension, Builder};
@@ -89,14 +90,14 @@
destination: Place<'tcx>,
span: Span,
mut block: BasicBlock,
- scrutinee: ExprRef<'tcx>,
- arms: Vec<Arm<'tcx>>,
+ scrutinee: &Expr<'_, 'tcx>,
+ arms: &[Arm<'_, 'tcx>],
) -> BlockAnd<()> {
- let scrutinee_span = scrutinee.span();
+ let scrutinee_span = scrutinee.span;
let scrutinee_place =
unpack!(block = self.lower_scrutinee(block, scrutinee, scrutinee_span,));
- let mut arm_candidates = self.create_match_candidates(scrutinee_place, &arms);
+ let mut arm_candidates = self.create_match_candidates(scrutinee_place.clone(), &arms);
let match_has_guard = arms.iter().any(|arm| arm.guard.is_some());
let mut candidates =
@@ -119,10 +120,10 @@
fn lower_scrutinee(
&mut self,
mut block: BasicBlock,
- scrutinee: ExprRef<'tcx>,
+ scrutinee: &Expr<'_, 'tcx>,
scrutinee_span: Span,
- ) -> BlockAnd<Place<'tcx>> {
- let scrutinee_place = unpack!(block = self.as_place(block, scrutinee));
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ let scrutinee_place_builder = unpack!(block = self.as_place_builder(block, scrutinee));
// Matching on a `scrutinee_place` with an uninhabited type doesn't
// generate any memory reads by itself, and so if the place "expression"
// contains unsafe operations like raw pointer dereferences or union
@@ -140,23 +141,29 @@
// check safety.
let cause_matched_place = FakeReadCause::ForMatchedPlace;
let source_info = self.source_info(scrutinee_span);
- self.cfg.push_fake_read(block, source_info, cause_matched_place, scrutinee_place);
- block.and(scrutinee_place)
+ if let Ok(scrutinee_builder) =
+ scrutinee_place_builder.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ let scrutinee_place = scrutinee_builder.into_place(self.tcx, self.typeck_results);
+ self.cfg.push_fake_read(block, source_info, cause_matched_place, scrutinee_place);
+ }
+
+ block.and(scrutinee_place_builder)
}
/// Create the initial `Candidate`s for a `match` expression.
fn create_match_candidates<'pat>(
&mut self,
- scrutinee: Place<'tcx>,
- arms: &'pat [Arm<'tcx>],
- ) -> Vec<(&'pat Arm<'tcx>, Candidate<'pat, 'tcx>)> {
+ scrutinee: PlaceBuilder<'tcx>,
+ arms: &'pat [Arm<'pat, 'tcx>],
+ ) -> Vec<(&'pat Arm<'pat, 'tcx>, Candidate<'pat, 'tcx>)> {
// Assemble a list of candidates: there is one candidate per pattern,
// which means there may be more than one candidate *per arm*.
arms.iter()
.map(|arm| {
let arm_has_guard = arm.guard.is_some();
- let arm_candidate = Candidate::new(scrutinee, &arm.pattern, arm_has_guard);
+ let arm_candidate = Candidate::new(scrutinee.clone(), &arm.pattern, arm_has_guard);
(arm, arm_candidate)
})
.collect()
@@ -222,9 +229,9 @@
fn lower_match_arms(
&mut self,
destination: Place<'tcx>,
- scrutinee_place: Place<'tcx>,
+ scrutinee_place_builder: PlaceBuilder<'tcx>,
scrutinee_span: Span,
- arm_candidates: Vec<(&'_ Arm<'tcx>, Candidate<'_, 'tcx>)>,
+ arm_candidates: Vec<(&'_ Arm<'_, 'tcx>, Candidate<'_, 'tcx>)>,
outer_source_info: SourceInfo,
fake_borrow_temps: Vec<(Place<'tcx>, Local)>,
) -> BlockAnd<()> {
@@ -236,13 +243,33 @@
let arm_source_info = self.source_info(arm.span);
let arm_scope = (arm.scope, arm_source_info);
self.in_scope(arm_scope, arm.lint_level, |this| {
- let body = this.hir.mirror(arm.body.clone());
+ // `try_upvars_resolved` may fail if it is unable to resolve the given
+ // `PlaceBuilder` inside a closure. In this case, we don't want to include
+ // a scrutinee place. `scrutinee_place_builder` will fail to be resolved
+ // if the only match arm is a wildcard (`_`).
+ // Example:
+ // ```
+ // let foo = (0, 1);
+ // let c = || {
+ // match foo { _ => () };
+ // };
+ // ```
+ let mut opt_scrutinee_place: Option<(Option<&Place<'tcx>>, Span)> = None;
+ let scrutinee_place: Place<'tcx>;
+ if let Ok(scrutinee_builder) = scrutinee_place_builder
+ .clone()
+ .try_upvars_resolved(this.tcx, this.typeck_results)
+ {
+ scrutinee_place =
+ scrutinee_builder.into_place(this.tcx, this.typeck_results);
+ opt_scrutinee_place = Some((Some(&scrutinee_place), scrutinee_span));
+ }
let scope = this.declare_bindings(
None,
arm.span,
&arm.pattern,
ArmHasGuard(arm.guard.is_some()),
- Some((Some(&scrutinee_place), scrutinee_span)),
+ opt_scrutinee_place,
);
let arm_block = this.bind_pattern(
@@ -259,7 +286,7 @@
this.source_scope = source_scope;
}
- this.into(destination, arm_block, body)
+ this.expr_into_dest(destination, arm_block, &arm.body)
})
})
.collect();
@@ -286,7 +313,7 @@
&mut self,
outer_source_info: SourceInfo,
candidate: Candidate<'_, 'tcx>,
- guard: Option<&Guard<'tcx>>,
+ guard: Option<&Guard<'_, 'tcx>>,
fake_borrow_temps: &Vec<(Place<'tcx>, Local)>,
scrutinee_span: Span,
arm_span: Option<Span>,
@@ -362,14 +389,14 @@
&mut self,
mut block: BasicBlock,
irrefutable_pat: Pat<'tcx>,
- initializer: ExprRef<'tcx>,
+ initializer: &Expr<'_, 'tcx>,
) -> BlockAnd<()> {
match *irrefutable_pat.kind {
// Optimize the case of `let x = ...` to write directly into `x`
PatKind::Binding { mode: BindingMode::ByValue, var, subpattern: None, .. } => {
let place =
self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard, true);
- unpack!(block = self.into(place, block, initializer));
+ unpack!(block = self.expr_into_dest(place, block, initializer));
// Inject a fake read, see comments on `FakeReadCause::ForLet`.
let source_info = self.source_info(irrefutable_pat.span);
@@ -404,7 +431,7 @@
} => {
let place =
self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard, true);
- unpack!(block = self.into(place, block, initializer));
+ unpack!(block = self.expr_into_dest(place, block, initializer));
// Inject a fake read, see comments on `FakeReadCause::ForLet`.
let pattern_source_info = self.source_info(irrefutable_pat.span);
@@ -414,7 +441,7 @@
let ty_source_info = self.source_info(user_ty_span);
let user_ty = pat_ascription_ty.user_ty(
&mut self.canonical_user_type_annotations,
- place.ty(&self.local_decls, self.hir.tcx()).ty,
+ place.ty(&self.local_decls, self.tcx).ty,
ty_source_info.span,
);
self.cfg.push(
@@ -447,8 +474,8 @@
}
_ => {
- let place = unpack!(block = self.as_place(block, initializer));
- self.place_into_pattern(block, irrefutable_pat, place, true)
+ let place_builder = unpack!(block = self.as_place_builder(block, initializer));
+ self.place_into_pattern(block, irrefutable_pat, place_builder, true)
}
}
}
@@ -457,14 +484,12 @@
&mut self,
block: BasicBlock,
irrefutable_pat: Pat<'tcx>,
- initializer: Place<'tcx>,
+ initializer: PlaceBuilder<'tcx>,
set_match_place: bool,
) -> BlockAnd<()> {
- let mut candidate = Candidate::new(initializer, &irrefutable_pat, false);
-
+ let mut candidate = Candidate::new(initializer.clone(), &irrefutable_pat, false);
let fake_borrow_temps =
self.lower_match_tree(block, irrefutable_pat.span, false, &mut [&mut candidate]);
-
// For matches and function arguments, the place that is being matched
// can be set when creating the variables. But the place for
// let PATTERN = ... might not even exist until we do the assignment.
@@ -479,7 +504,27 @@
VarBindingForm { opt_match_place: Some((ref mut match_place, _)), .. },
)))) = self.local_decls[local].local_info
{
- *match_place = Some(initializer);
+ // `try_upvars_resolved` may fail if it is unable to resolve the given
+ // `PlaceBuilder` inside a closure. In this case, we don't want to include
+ // a scrutinee place. `scrutinee_place_builder` will fail for destructured
+ // assignments. This is because a closure only captures the precise places
+ // that it will read and as a result a closure may not capture the entire
+ // tuple/struct and rather have individual places that will be read in the
+ // final MIR.
+ // Example:
+ // ```
+ // let foo = (0, 1);
+ // let c = || {
+ // let (v1, v2) = foo;
+ // };
+ // ```
+ if let Ok(match_pair_resolved) =
+ initializer.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ let place =
+ match_pair_resolved.into_place(self.tcx, self.typeck_results);
+ *match_place = Some(place);
+ }
} else {
bug!("Let binding to non-user variable.")
}
@@ -556,7 +601,7 @@
let local_id = self.var_local_id(var, for_guard);
let source_info = self.source_info(span);
self.cfg.push(block, Statement { source_info, kind: StatementKind::StorageLive(local_id) });
- let region_scope = self.hir.region_scope_tree.var_scope(var.local_id);
+ let region_scope = self.region_scope_tree.var_scope(var.local_id);
if schedule_drop {
self.schedule_drop(span, region_scope, local_id, DropKind::Storage);
}
@@ -565,7 +610,7 @@
crate fn schedule_drop_for_binding(&mut self, var: HirId, span: Span, for_guard: ForGuard) {
let local_id = self.var_local_id(var, for_guard);
- let region_scope = self.hir.region_scope_tree.var_scope(var.local_id);
+ let region_scope = self.region_scope_tree.var_scope(var.local_id);
self.schedule_drop(span, region_scope, local_id, DropKind::Value);
}
@@ -718,7 +763,7 @@
}
impl<'tcx, 'pat> Candidate<'pat, 'tcx> {
- fn new(place: Place<'tcx>, pattern: &'pat Pat<'tcx>, has_guard: bool) -> Self {
+ fn new(place: PlaceBuilder<'tcx>, pattern: &'pat Pat<'tcx>, has_guard: bool) -> Self {
Candidate {
span: pattern.span,
has_guard,
@@ -792,7 +837,7 @@
#[derive(Clone, Debug)]
crate struct MatchPair<'pat, 'tcx> {
// this place...
- place: Place<'tcx>,
+ place: PlaceBuilder<'tcx>,
// ... must match this pattern.
pattern: &'pat Pat<'tcx>,
@@ -1071,7 +1116,7 @@
fake_borrows.insert(Place {
local: source.local,
- projection: self.hir.tcx().intern_place_elems(proj_base),
+ projection: self.tcx.intern_place_elems(proj_base),
});
}
}
@@ -1199,7 +1244,7 @@
&mut otherwise,
pats,
or_span,
- place,
+ place.clone(),
fake_borrows,
);
});
@@ -1225,12 +1270,14 @@
otherwise: &mut Option<BasicBlock>,
pats: &'pat [Pat<'tcx>],
or_span: Span,
- place: Place<'tcx>,
+ place: PlaceBuilder<'tcx>,
fake_borrows: &mut Option<FxHashSet<Place<'tcx>>>,
) {
debug!("test_or_pattern:\ncandidate={:#?}\npats={:#?}", candidate, pats);
- let mut or_candidates: Vec<_> =
- pats.iter().map(|pat| Candidate::new(place, pat, candidate.has_guard)).collect();
+ let mut or_candidates: Vec<_> = pats
+ .iter()
+ .map(|pat| Candidate::new(place.clone(), pat, candidate.has_guard))
+ .collect();
let mut or_candidate_refs: Vec<_> = or_candidates.iter_mut().collect();
let otherwise = if candidate.otherwise_block.is_some() {
&mut candidate.otherwise_block
@@ -1413,7 +1460,7 @@
// extract the match-pair from the highest priority candidate
let match_pair = &candidates.first().unwrap().match_pairs[0];
let mut test = self.test(match_pair);
- let match_place = match_pair.place;
+ let match_place = match_pair.place.clone();
// most of the time, the test to perform is simply a function
// of the main candidate; but for a test like SwitchInt, we
@@ -1439,7 +1486,12 @@
// Insert a Shallow borrow of any places that is switched on.
if let Some(fb) = fake_borrows {
- fb.insert(match_place);
+ if let Ok(match_place_resolved) =
+ match_place.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ let resolved_place = match_place_resolved.into_place(self.tcx, self.typeck_results);
+ fb.insert(resolved_place);
+ }
}
// perform the test, branching to one of N blocks. For each of
@@ -1457,7 +1509,7 @@
// encounter a candidate where the test is not relevant; at
// that point, we stop sorting.
while let Some(candidate) = candidates.first_mut() {
- if let Some(idx) = self.sort_candidate(&match_place, &test, candidate) {
+ if let Some(idx) = self.sort_candidate(&match_place.clone(), &test, candidate) {
let (candidate, rest) = candidates.split_first_mut().unwrap();
target_candidates[idx].push(candidate);
candidates = rest;
@@ -1550,7 +1602,7 @@
fake_borrows: &'b FxHashSet<Place<'tcx>>,
temp_span: Span,
) -> Vec<(Place<'tcx>, Local)> {
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
debug!("add_fake_borrows fake_borrows = {:?}", fake_borrows);
@@ -1613,7 +1665,7 @@
&mut self,
candidate: Candidate<'pat, 'tcx>,
parent_bindings: &[(Vec<Binding<'tcx>>, Vec<Ascription<'tcx>>)],
- guard: Option<&Guard<'tcx>>,
+ guard: Option<&Guard<'_, 'tcx>>,
fake_borrows: &Vec<(Place<'tcx>, Local)>,
scrutinee_span: Span,
arm_span: Option<Span>,
@@ -1727,7 +1779,7 @@
// * So we eagerly create the reference for the arm and then take a
// reference to that.
if let Some(guard) = guard {
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
let bindings = parent_bindings
.iter()
.flat_map(|(bindings, _)| bindings)
@@ -1749,37 +1801,46 @@
let (guard_span, (post_guard_block, otherwise_post_guard_block)) = match guard {
Guard::If(e) => {
- let e = self.hir.mirror(e.clone());
let source_info = self.source_info(e.span);
(e.span, self.test_bool(block, e, source_info))
}
Guard::IfLet(pat, scrutinee) => {
- let scrutinee_span = scrutinee.span();
- let scrutinee_place = unpack!(
- block = self.lower_scrutinee(block, scrutinee.clone(), scrutinee_span)
- );
- let mut guard_candidate = Candidate::new(scrutinee_place, &pat, false);
+ let scrutinee_span = scrutinee.span;
+ let scrutinee_place_builder =
+ unpack!(block = self.lower_scrutinee(block, scrutinee, scrutinee_span));
+ let mut guard_candidate =
+ Candidate::new(scrutinee_place_builder.clone(), &pat, false);
let wildcard = Pat::wildcard_from_ty(pat.ty);
- let mut otherwise_candidate = Candidate::new(scrutinee_place, &wildcard, false);
+ let mut otherwise_candidate =
+ Candidate::new(scrutinee_place_builder.clone(), &wildcard, false);
let fake_borrow_temps = self.lower_match_tree(
block,
pat.span,
false,
&mut [&mut guard_candidate, &mut otherwise_candidate],
);
+ let mut opt_scrutinee_place: Option<(Option<&Place<'tcx>>, Span)> = None;
+ let scrutinee_place: Place<'tcx>;
+ if let Ok(scrutinee_builder) =
+ scrutinee_place_builder.try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ scrutinee_place =
+ scrutinee_builder.into_place(self.tcx, self.typeck_results);
+ opt_scrutinee_place = Some((Some(&scrutinee_place), scrutinee_span));
+ }
self.declare_bindings(
None,
pat.span.to(arm_span.unwrap()),
pat,
ArmHasGuard(false),
- Some((Some(&scrutinee_place), scrutinee.span())),
+ opt_scrutinee_place,
);
let post_guard_block = self.bind_pattern(
self.source_info(pat.span),
guard_candidate,
None,
&fake_borrow_temps,
- scrutinee.span(),
+ scrutinee.span,
None,
None,
);
@@ -1888,7 +1949,7 @@
let user_ty = ascription.user_ty.clone().user_ty(
&mut self.canonical_user_type_annotations,
- ascription.source.ty(&self.local_decls, self.hir.tcx()).ty,
+ ascription.source.ty(&self.local_decls, self.tcx).ty,
source_info.span,
);
self.cfg.push(
@@ -1917,7 +1978,7 @@
// Assign each of the bindings. Since we are binding for a
// guard expression, this will never trigger moves out of the
// candidate.
- let re_erased = self.hir.tcx().lifetimes.re_erased;
+ let re_erased = self.tcx.lifetimes.re_erased;
for binding in bindings {
debug!("bind_matched_candidate_for_guard(binding={:?})", binding);
let source_info = self.source_info(binding.span);
@@ -1966,7 +2027,7 @@
{
debug!("bind_matched_candidate_for_arm_body(block={:?})", block);
- let re_erased = self.hir.tcx().lifetimes.re_erased;
+ let re_erased = self.tcx.lifetimes.re_erased;
// Assign each of the bindings. This may trigger moves out of the candidate.
for binding in bindings {
let source_info = self.source_info(binding.span);
@@ -2015,7 +2076,7 @@
var_id, name, mode, var_ty, visibility_scope, source_info
);
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
let debug_source_info = SourceInfo { span: source_info.span, scope: visibility_scope };
let binding_mode = match mode {
BindingMode::ByValue => ty::BindingMode::BindByValue(mutability),
diff --git a/compiler/rustc_mir_build/src/build/matches/simplify.rs b/compiler/rustc_mir_build/src/build/matches/simplify.rs
index ddfaeaf..3ad143a 100644
--- a/compiler/rustc_mir_build/src/build/matches/simplify.rs
+++ b/compiler/rustc_mir_build/src/build/matches/simplify.rs
@@ -12,11 +12,11 @@
//! sort of test: for example, testing which variant an enum is, or
//! testing a value against a constant.
+use crate::build::expr::as_place::PlaceBuilder;
use crate::build::matches::{Ascription, Binding, Candidate, MatchPair};
use crate::build::Builder;
use crate::thir::{self, *};
use rustc_hir::RangeEnd;
-use rustc_middle::mir::Place;
use rustc_middle::ty;
use rustc_middle::ty::layout::IntegerExt;
use rustc_target::abi::{Integer, Size};
@@ -68,11 +68,12 @@
let match_pairs = mem::take(&mut candidate.match_pairs);
if let [MatchPair { pattern: Pat { kind: box PatKind::Or { pats }, .. }, place }] =
- *match_pairs
+ &*match_pairs
{
existing_bindings.extend_from_slice(&new_bindings);
mem::swap(&mut candidate.bindings, &mut existing_bindings);
- candidate.subcandidates = self.create_or_subcandidates(candidate, place, pats);
+ candidate.subcandidates =
+ self.create_or_subcandidates(candidate, place.clone(), pats);
return true;
}
@@ -125,12 +126,12 @@
fn create_or_subcandidates<'pat>(
&mut self,
candidate: &Candidate<'pat, 'tcx>,
- place: Place<'tcx>,
+ place: PlaceBuilder<'tcx>,
pats: &'pat [Pat<'tcx>],
) -> Vec<Candidate<'pat, 'tcx>> {
pats.iter()
.map(|pat| {
- let mut candidate = Candidate::new(place, pat, candidate.has_guard);
+ let mut candidate = Candidate::new(place.clone(), pat, candidate.has_guard);
self.simplify_candidate(&mut candidate);
candidate
})
@@ -147,18 +148,17 @@
match_pair: MatchPair<'pat, 'tcx>,
candidate: &mut Candidate<'pat, 'tcx>,
) -> Result<(), MatchPair<'pat, 'tcx>> {
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
match *match_pair.pattern.kind {
PatKind::AscribeUserType {
ref subpattern,
ascription: thir::pattern::Ascription { variance, user_ty, user_ty_span },
} => {
// Apply the type ascription to the value at `match_pair.place`, which is the
- // value being matched, taking the variance field into account.
candidate.ascriptions.push(Ascription {
span: user_ty_span,
user_ty,
- source: match_pair.place,
+ source: match_pair.place.clone().into_place(self.tcx, self.typeck_results),
variance,
});
@@ -177,7 +177,7 @@
name,
mutability,
span: match_pair.pattern.span,
- source: match_pair.place,
+ source: match_pair.place.clone().into_place(self.tcx, self.typeck_results),
var_id: var,
var_ty: ty,
binding_mode: mode,
@@ -251,21 +251,23 @@
PatKind::Variant { adt_def, substs, variant_index, ref subpatterns } => {
let irrefutable = adt_def.variants.iter_enumerated().all(|(i, v)| {
i == variant_index || {
- self.hir.tcx().features().exhaustive_patterns
+ self.tcx.features().exhaustive_patterns
&& !v
.uninhabited_from(
- self.hir.tcx(),
+ self.tcx,
substs,
adt_def.adt_kind(),
- self.hir.param_env,
+ self.param_env,
)
.is_empty()
}
}) && (adt_def.did.is_local()
|| !adt_def.is_variant_list_non_exhaustive());
if irrefutable {
- let place = tcx.mk_place_downcast(match_pair.place, adt_def, variant_index);
- candidate.match_pairs.extend(self.field_match_pairs(place, subpatterns));
+ let place_builder = match_pair.place.downcast(adt_def, variant_index);
+ candidate
+ .match_pairs
+ .extend(self.field_match_pairs(place_builder, subpatterns));
Ok(())
} else {
Err(match_pair)
@@ -290,8 +292,8 @@
}
PatKind::Deref { ref subpattern } => {
- let place = tcx.mk_place_deref(match_pair.place);
- candidate.match_pairs.push(MatchPair::new(place, subpattern));
+ let place_builder = match_pair.place.deref();
+ candidate.match_pairs.push(MatchPair::new(place_builder, subpattern));
Ok(())
}
diff --git a/compiler/rustc_mir_build/src/build/matches/test.rs b/compiler/rustc_mir_build/src/build/matches/test.rs
index 126fb95..b082169 100644
--- a/compiler/rustc_mir_build/src/build/matches/test.rs
+++ b/compiler/rustc_mir_build/src/build/matches/test.rs
@@ -5,6 +5,7 @@
// identify what tests are needed, perform the tests, and then filter
// the candidates based on the result.
+use crate::build::expr::as_place::PlaceBuilder;
use crate::build::matches::{Candidate, MatchPair, Test, TestKind};
use crate::build::Builder;
use crate::thir::pattern::compare_const_vals;
@@ -13,9 +14,11 @@
use rustc_hir::{LangItem, RangeEnd};
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::*;
+use rustc_middle::ty::subst::{GenericArg, Subst};
use rustc_middle::ty::util::IntTypeExt;
-use rustc_middle::ty::{self, adjustment::PointerCast, Ty};
-use rustc_span::symbol::sym;
+use rustc_middle::ty::{self, adjustment::PointerCast, Ty, TyCtxt};
+use rustc_span::def_id::DefId;
+use rustc_span::symbol::{sym, Symbol};
use rustc_target::abi::VariantIdx;
use std::cmp::Ordering;
@@ -51,7 +54,7 @@
PatKind::Constant { value } => Test {
span: match_pair.pattern.span,
- kind: TestKind::Eq { value, ty: match_pair.pattern.ty.clone() },
+ kind: TestKind::Eq { value, ty: match_pair.pattern.ty },
},
PatKind::Range(range) => {
@@ -79,7 +82,7 @@
pub(super) fn add_cases_to_switch<'pat>(
&mut self,
- test_place: &Place<'tcx>,
+ test_place: &PlaceBuilder<'tcx>,
candidate: &Candidate<'pat, 'tcx>,
switch_ty: Ty<'tcx>,
options: &mut FxIndexMap<&'tcx ty::Const<'tcx>, u128>,
@@ -93,9 +96,9 @@
match *match_pair.pattern.kind {
PatKind::Constant { value } => {
- options.entry(value).or_insert_with(|| {
- value.eval_bits(self.hir.tcx(), self.hir.param_env, switch_ty)
- });
+ options
+ .entry(value)
+ .or_insert_with(|| value.eval_bits(self.tcx, self.param_env, switch_ty));
true
}
PatKind::Variant { .. } => {
@@ -121,7 +124,7 @@
pub(super) fn add_variants_to_switch<'pat>(
&mut self,
- test_place: &Place<'tcx>,
+ test_place: &PlaceBuilder<'tcx>,
candidate: &Candidate<'pat, 'tcx>,
variants: &mut BitSet<VariantIdx>,
) -> bool {
@@ -149,15 +152,23 @@
pub(super) fn perform_test(
&mut self,
block: BasicBlock,
- place: Place<'tcx>,
+ place_builder: PlaceBuilder<'tcx>,
test: &Test<'tcx>,
make_target_blocks: impl FnOnce(&mut Self) -> Vec<BasicBlock>,
) {
+ let place: Place<'tcx>;
+ if let Ok(test_place_builder) =
+ place_builder.try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ place = test_place_builder.into_place(self.tcx, self.typeck_results);
+ } else {
+ return;
+ }
debug!(
"perform_test({:?}, {:?}: {:?}, {:?})",
block,
place,
- place.ty(&self.local_decls, self.hir.tcx()),
+ place.ty(&self.local_decls, self.tcx),
test
);
@@ -169,7 +180,7 @@
let num_enum_variants = adt_def.variants.len();
debug_assert_eq!(target_blocks.len(), num_enum_variants + 1);
let otherwise_block = *target_blocks.last().unwrap();
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
let switch_targets = SwitchTargets::new(
adt_def.discriminants(tcx).filter_map(|(idx, discr)| {
if variants.contains(idx) {
@@ -217,7 +228,7 @@
0 => (second_bb, first_bb),
v => span_bug!(test.span, "expected boolean value but got {:?}", v),
};
- TerminatorKind::if_(self.hir.tcx(), Operand::Copy(place), true_bb, false_bb)
+ TerminatorKind::if_(self.tcx, Operand::Copy(place), true_bb, false_bb)
} else {
bug!("`TestKind::SwitchInt` on `bool` should have two targets")
}
@@ -292,7 +303,7 @@
TestKind::Len { len, op } => {
let target_blocks = make_target_blocks(self);
- let usize_ty = self.hir.usize_ty();
+ let usize_ty = self.tcx.types.usize;
let actual = self.temp(usize_ty, test.span);
// actual = len(place)
@@ -331,17 +342,17 @@
left: Operand<'tcx>,
right: Operand<'tcx>,
) {
- let bool_ty = self.hir.bool_ty();
+ let bool_ty = self.tcx.types.bool;
let result = self.temp(bool_ty, source_info.span);
// result = op(left, right)
- self.cfg.push_assign(block, source_info, result, Rvalue::BinaryOp(op, left, right));
+ self.cfg.push_assign(block, source_info, result, Rvalue::BinaryOp(op, box (left, right)));
// branch based on result
self.cfg.terminate(
block,
source_info,
- TerminatorKind::if_(self.hir.tcx(), Operand::Move(result), success_block, fail_block),
+ TerminatorKind::if_(self.tcx, Operand::Move(result), success_block, fail_block),
);
}
@@ -377,7 +388,7 @@
// nothing to do, neither is an array
(None, None) => {}
(Some((region, elem_ty, _)), _) | (None, Some((region, elem_ty, _))) => {
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
// make both a slice
ty = tcx.mk_imm_ref(region, tcx.mk_slice(elem_ty));
if opt_ref_ty.is_some() {
@@ -408,10 +419,10 @@
_ => bug!("non_scalar_compare called on non-reference type: {}", ty),
};
- let eq_def_id = self.hir.tcx().require_lang_item(LangItem::PartialEq, None);
- let method = self.hir.trait_method(eq_def_id, sym::eq, deref_ty, &[deref_ty.into()]);
+ let eq_def_id = self.tcx.require_lang_item(LangItem::PartialEq, None);
+ let method = trait_method(self.tcx, eq_def_id, sym::eq, deref_ty, &[deref_ty.into()]);
- let bool_ty = self.hir.bool_ty();
+ let bool_ty = self.tcx.types.bool;
let eq_result = self.temp(bool_ty, source_info.span);
let eq_block = self.cfg.start_new_block();
self.cfg.terminate(
@@ -427,7 +438,7 @@
// Need to experiment.
user_ty: None,
- literal: method,
+ literal: method.into(),
}),
args: vec![val, expect],
destination: Some((eq_result, eq_block)),
@@ -443,12 +454,7 @@
self.cfg.terminate(
eq_block,
source_info,
- TerminatorKind::if_(
- self.hir.tcx(),
- Operand::Move(eq_result),
- success_block,
- fail_block,
- ),
+ TerminatorKind::if_(self.tcx, Operand::Move(eq_result), success_block, fail_block),
);
} else {
bug!("`TestKind::Eq` should have two target blocks")
@@ -484,7 +490,7 @@
/// tighter match code if we do something a bit different.
pub(super) fn sort_candidate<'pat>(
&mut self,
- test_place: &Place<'tcx>,
+ test_place: &PlaceBuilder<'tcx>,
test: &Test<'tcx>,
candidate: &mut Candidate<'pat, 'tcx>,
) -> Option<usize> {
@@ -632,11 +638,11 @@
use rustc_hir::RangeEnd::*;
use std::cmp::Ordering::*;
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
let test_ty = test.lo.ty;
- let lo = compare_const_vals(tcx, test.lo, pat.hi, self.hir.param_env, test_ty)?;
- let hi = compare_const_vals(tcx, test.hi, pat.lo, self.hir.param_env, test_ty)?;
+ let lo = compare_const_vals(tcx, test.lo, pat.hi, self.param_env, test_ty)?;
+ let hi = compare_const_vals(tcx, test.hi, pat.lo, self.param_env, test_ty)?;
match (test.end, pat.end, lo, hi) {
// pat < test
@@ -731,7 +737,6 @@
candidate: &mut Candidate<'pat, 'tcx>,
) {
let match_pair = candidate.match_pairs.remove(match_pair_index);
- let tcx = self.hir.tcx();
// So, if we have a match-pattern like `x @ Enum::Variant(P1, P2)`,
// we want to create a set of derived match-patterns like
@@ -740,10 +745,10 @@
Some(adt_def.variants[variant_index].ident.name),
variant_index,
);
- let downcast_place = tcx.mk_place_elem(match_pair.place, elem); // `(x as Variant)`
+ let downcast_place = match_pair.place.project(elem); // `(x as Variant)`
let consequent_match_pairs = subpatterns.iter().map(|subpattern| {
// e.g., `(x as Variant).0`
- let place = tcx.mk_place_field(downcast_place, subpattern.field, subpattern.pattern.ty);
+ let place = downcast_place.clone().field(subpattern.field, subpattern.pattern.ty);
// e.g., `(x as Variant).0 @ P1`
MatchPair::new(place, &subpattern.pattern)
});
@@ -762,10 +767,10 @@
) -> Option<bool> {
use std::cmp::Ordering::*;
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
- let a = compare_const_vals(tcx, range.lo, value, self.hir.param_env, range.lo.ty)?;
- let b = compare_const_vals(tcx, value, range.hi, self.hir.param_env, range.lo.ty)?;
+ let a = compare_const_vals(tcx, range.lo, value, self.param_env, range.lo.ty)?;
+ let b = compare_const_vals(tcx, value, range.hi, self.param_env, range.lo.ty)?;
match (b, range.end) {
(Less, _) | (Equal, RangeEnd::Included) if a != Greater => Some(true),
@@ -815,3 +820,25 @@
fn is_switch_ty(ty: Ty<'_>) -> bool {
ty.is_integral() || ty.is_char() || ty.is_bool()
}
+
+fn trait_method<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+ method_name: Symbol,
+ self_ty: Ty<'tcx>,
+ params: &[GenericArg<'tcx>],
+) -> &'tcx ty::Const<'tcx> {
+ let substs = tcx.mk_substs_trait(self_ty, params);
+
+ // The unhygienic comparison here is acceptable because this is only
+ // used on known traits.
+ let item = tcx
+ .associated_items(trait_def_id)
+ .filter_by_name_unhygienic(method_name)
+ .find(|item| item.kind == ty::AssocKind::Fn)
+ .expect("trait method not found");
+
+ let method_ty = tcx.type_of(item.def_id);
+ let method_ty = method_ty.subst(tcx, substs);
+ ty::Const::zero_sized(tcx, method_ty)
+}
diff --git a/compiler/rustc_mir_build/src/build/matches/util.rs b/compiler/rustc_mir_build/src/build/matches/util.rs
index db1f678..d49a00a 100644
--- a/compiler/rustc_mir_build/src/build/matches/util.rs
+++ b/compiler/rustc_mir_build/src/build/matches/util.rs
@@ -1,3 +1,4 @@
+use crate::build::expr::as_place::PlaceBuilder;
use crate::build::matches::MatchPair;
use crate::build::Builder;
use crate::thir::*;
@@ -9,14 +10,13 @@
impl<'a, 'tcx> Builder<'a, 'tcx> {
crate fn field_match_pairs<'pat>(
&mut self,
- place: Place<'tcx>,
+ place: PlaceBuilder<'tcx>,
subpatterns: &'pat [FieldPat<'tcx>],
) -> Vec<MatchPair<'pat, 'tcx>> {
subpatterns
.iter()
.map(|fieldpat| {
- let place =
- self.hir.tcx().mk_place_field(place, fieldpat.field, fieldpat.pattern.ty);
+ let place = place.clone().field(fieldpat.field, fieldpat.pattern.ty);
MatchPair::new(place, &fieldpat.pattern)
})
.collect()
@@ -25,34 +25,37 @@
crate fn prefix_slice_suffix<'pat>(
&mut self,
match_pairs: &mut SmallVec<[MatchPair<'pat, 'tcx>; 1]>,
- place: &Place<'tcx>,
+ place: &PlaceBuilder<'tcx>,
prefix: &'pat [Pat<'tcx>],
opt_slice: Option<&'pat Pat<'tcx>>,
suffix: &'pat [Pat<'tcx>],
) {
- let tcx = self.hir.tcx();
- let (min_length, exact_size) = match place.ty(&self.local_decls, tcx).ty.kind() {
- ty::Array(_, length) => (length.eval_usize(tcx, self.hir.param_env), true),
+ let tcx = self.tcx;
+ let (min_length, exact_size) = match place
+ .clone()
+ .into_place(tcx, self.typeck_results)
+ .ty(&self.local_decls, tcx)
+ .ty
+ .kind()
+ {
+ ty::Array(_, length) => (length.eval_usize(tcx, self.param_env), true),
_ => ((prefix.len() + suffix.len()).try_into().unwrap(), false),
};
match_pairs.extend(prefix.iter().enumerate().map(|(idx, subpattern)| {
let elem =
ProjectionElem::ConstantIndex { offset: idx as u64, min_length, from_end: false };
- let place = tcx.mk_place_elem(*place, elem);
+ let place = place.clone().project(elem);
MatchPair::new(place, subpattern)
}));
if let Some(subslice_pat) = opt_slice {
let suffix_len = suffix.len() as u64;
- let subslice = tcx.mk_place_elem(
- *place,
- ProjectionElem::Subslice {
- from: prefix.len() as u64,
- to: if exact_size { min_length - suffix_len } else { suffix_len },
- from_end: !exact_size,
- },
- );
+ let subslice = place.clone().project(ProjectionElem::Subslice {
+ from: prefix.len() as u64,
+ to: if exact_size { min_length - suffix_len } else { suffix_len },
+ from_end: !exact_size,
+ });
match_pairs.push(MatchPair::new(subslice, subslice_pat));
}
@@ -63,7 +66,7 @@
min_length,
from_end: !exact_size,
};
- let place = tcx.mk_place_elem(*place, elem);
+ let place = place.clone().project(elem);
MatchPair::new(place, subpattern)
}));
}
@@ -92,7 +95,7 @@
}
impl<'pat, 'tcx> MatchPair<'pat, 'tcx> {
- crate fn new(place: Place<'tcx>, pattern: &'pat Pat<'tcx>) -> MatchPair<'pat, 'tcx> {
+ crate fn new(place: PlaceBuilder<'tcx>, pattern: &'pat Pat<'tcx>) -> MatchPair<'pat, 'tcx> {
MatchPair { place, pattern }
}
}
diff --git a/compiler/rustc_mir_build/src/build/misc.rs b/compiler/rustc_mir_build/src/build/misc.rs
index 29651d9..a1126d1 100644
--- a/compiler/rustc_mir_build/src/build/misc.rs
+++ b/compiler/rustc_mir_build/src/build/misc.rs
@@ -3,10 +3,10 @@
use crate::build::Builder;
-use rustc_middle::ty::{self, Ty};
-
use rustc_middle::mir::*;
+use rustc_middle::ty::{self, Ty};
use rustc_span::{Span, DUMMY_SP};
+use rustc_trait_selection::infer::InferCtxtExt;
impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Adds a new temporary value of type `ty` storing the result of
@@ -30,6 +30,7 @@
span: Span,
literal: &'tcx ty::Const<'tcx>,
) -> Operand<'tcx> {
+ let literal = literal.into();
let constant = box Constant { span, user_ty: None, literal };
Operand::Constant(constant)
}
@@ -37,7 +38,7 @@
// Returns a zero literal operand for the appropriate type, works for
// bool, char and integers.
crate fn zero_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
- let literal = ty::Const::from_bits(self.hir.tcx(), 0, ty::ParamEnv::empty().and(ty));
+ let literal = ty::Const::from_bits(self.tcx, 0, ty::ParamEnv::empty().and(ty));
self.literal_operand(span, literal)
}
@@ -48,7 +49,7 @@
source_info: SourceInfo,
value: u64,
) -> Place<'tcx> {
- let usize_ty = self.hir.usize_ty();
+ let usize_ty = self.tcx.types.usize;
let temp = self.temp(usize_ty, source_info.span);
self.cfg.push_assign_constant(
block,
@@ -57,16 +58,16 @@
Constant {
span: source_info.span,
user_ty: None,
- literal: self.hir.usize_literal(value),
+ literal: ty::Const::from_usize(self.tcx, value).into(),
},
);
temp
}
crate fn consume_by_copy_or_move(&self, place: Place<'tcx>) -> Operand<'tcx> {
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
let ty = place.ty(&self.local_decls, tcx).ty;
- if !self.hir.type_is_copy_modulo_regions(ty, DUMMY_SP) {
+ if !self.infcx.type_is_copy_modulo_regions(self.param_env, ty, DUMMY_SP) {
Operand::Move(place)
} else {
Operand::Copy(place)
diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs
index 5f6c8d2..af4aaef 100644
--- a/compiler/rustc_mir_build/src/build/mod.rs
+++ b/compiler/rustc_mir_build/src/build/mod.rs
@@ -1,7 +1,7 @@
use crate::build;
+use crate::build::expr::as_place::PlaceBuilder;
use crate::build::scope::DropKind;
-use crate::thir::cx::Cx;
-use crate::thir::{BindingMode, LintLevel, PatKind};
+use crate::thir::{build_thir, Arena, BindingMode, Expr, LintLevel, Pat, PatKind};
use rustc_attr::{self as attr, UnwindAttr};
use rustc_errors::ErrorReported;
use rustc_hir as hir;
@@ -9,13 +9,13 @@
use rustc_hir::lang_items::LangItem;
use rustc_hir::{GeneratorKind, HirIdMap, Node};
use rustc_index::vec::{Idx, IndexVec};
-use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
use rustc_middle::middle::region;
use rustc_middle::mir::*;
use rustc_middle::ty::subst::Subst;
-use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
-use rustc_span::symbol::kw;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeckResults};
+use rustc_span::symbol::{kw, sym};
use rustc_span::Span;
use rustc_target::spec::abi::Abi;
use rustc_target::spec::PanicStrategy;
@@ -42,6 +42,8 @@
/// Construct the MIR for a given `DefId`.
fn mir_build(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -> Body<'_> {
let id = tcx.hir().local_def_id_to_hir_id(def.did);
+ let body_owner_kind = tcx.hir().body_owner_kind(id);
+ let typeck_results = tcx.typeck_opt_const_arg(def);
// Figure out what primary body this item has.
let (body_id, return_ty_span, span_with_body) = match tcx.hir().get(id) {
@@ -86,15 +88,15 @@
// If we don't have a specialized span for the body, just use the
// normal def span.
let span_with_body = span_with_body.unwrap_or_else(|| tcx.hir().span(id));
+ let arena = Arena::default();
tcx.infer_ctxt().enter(|infcx| {
- let cx = Cx::new(&infcx, def, id);
- let body = if let Some(ErrorReported) = cx.typeck_results().tainted_by_errors {
- build::construct_error(cx, body_id)
- } else if cx.body_owner_kind.is_fn_or_closure() {
+ let body = if let Some(ErrorReported) = typeck_results.tainted_by_errors {
+ build::construct_error(&infcx, def, id, body_id, body_owner_kind)
+ } else if body_owner_kind.is_fn_or_closure() {
// fetch the fully liberated fn signature (that is, all bound
// types/lifetimes replaced)
- let fn_sig = cx.typeck_results().liberated_fn_sigs()[id];
+ let fn_sig = typeck_results.liberated_fn_sigs()[id];
let fn_def_id = tcx.hir().local_def_id(id);
let safety = match fn_sig.unsafety {
@@ -103,6 +105,7 @@
};
let body = tcx.hir().body(body_id);
+ let thir = build_thir(tcx, def, &arena, &body.value);
let ty = tcx.type_of(fn_def_id);
let mut abi = fn_sig.abi;
let implicit_argument = match ty.kind() {
@@ -178,7 +181,8 @@
};
let mut mir = build::construct_fn(
- cx,
+ &infcx,
+ def,
id,
arguments,
safety,
@@ -186,9 +190,12 @@
return_ty,
return_ty_span,
body,
+ thir,
span_with_body,
);
- mir.yield_ty = yield_ty;
+ if yield_ty.is_some() {
+ mir.generator.as_mut().unwrap().yield_ty = yield_ty;
+ }
mir
} else {
// Get the revealed type of this const. This is *not* the adjusted
@@ -203,9 +210,12 @@
// place to be the type of the constant because NLL typeck will
// equate them.
- let return_ty = cx.typeck_results().node_type(id);
+ let return_ty = typeck_results.node_type(id);
- build::construct_const(cx, body_id, return_ty, return_ty_span)
+ let ast_expr = &tcx.hir().body(body_id).value;
+ let thir = build_thir(tcx, def, &arena, ast_expr);
+
+ build::construct_const(&infcx, thir, def, id, return_ty, return_ty_span)
};
lints::check(tcx, &body);
@@ -218,7 +228,7 @@
!(body.local_decls.has_free_regions()
|| body.basic_blocks().has_free_regions()
|| body.var_debug_info.has_free_regions()
- || body.yield_ty.has_free_regions()),
+ || body.yield_ty().has_free_regions()),
"Unexpected free regions in MIR: {:?}",
body,
);
@@ -302,10 +312,17 @@
struct BlockContext(Vec<BlockFrame>);
struct Builder<'a, 'tcx> {
- hir: Cx<'a, 'tcx>,
+ tcx: TyCtxt<'tcx>,
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ typeck_results: &'tcx TypeckResults<'tcx>,
+ region_scope_tree: &'tcx region::ScopeTree,
+ param_env: ty::ParamEnv<'tcx>,
+
cfg: CFG<'tcx>,
def_id: DefId,
+ hir_id: hir::HirId,
+ check_overflow: bool,
fn_span: Span,
arg_count: usize,
generator_kind: Option<GeneratorKind>,
@@ -546,7 +563,7 @@
}};
}
-fn should_abort_on_panic(tcx: TyCtxt<'_>, fn_def_id: LocalDefId, _abi: Abi) -> bool {
+fn should_abort_on_panic(tcx: TyCtxt<'_>, fn_def_id: LocalDefId, abi: Abi) -> bool {
// Validate `#[unwind]` syntax regardless of platform-specific panic strategy.
let attrs = &tcx.get_attrs(fn_def_id.to_def_id());
let unwind_attr = attr::find_unwind_attr(&tcx.sess, attrs);
@@ -556,12 +573,46 @@
return false;
}
- // This is a special case: some functions have a C abi but are meant to
- // unwind anyway. Don't stop them.
match unwind_attr {
- None => false, // FIXME(#58794); should be `!(abi == Abi::Rust || abi == Abi::RustCall)`
+ // If an `#[unwind]` attribute was found, we should adhere to it.
Some(UnwindAttr::Allowed) => false,
Some(UnwindAttr::Aborts) => true,
+ // If no attribute was found and the panic strategy is `unwind`, then we should examine
+ // the function's ABI string to determine whether it should abort upon panic.
+ None if tcx.features().c_unwind => {
+ use Abi::*;
+ match abi {
+ // In the case of ABI's that have an `-unwind` equivalent, check whether the ABI
+ // permits unwinding. If so, we should not abort. Otherwise, we should.
+ C { unwind } | Stdcall { unwind } | System { unwind } | Thiscall { unwind } => {
+ !unwind
+ }
+ // Rust and `rust-call` functions are allowed to unwind, and should not abort.
+ Rust | RustCall => false,
+ // Other ABI's should abort.
+ Cdecl
+ | Fastcall
+ | Vectorcall
+ | Aapcs
+ | Win64
+ | SysV64
+ | PtxKernel
+ | Msp430Interrupt
+ | X86Interrupt
+ | AmdGpuKernel
+ | EfiApi
+ | AvrInterrupt
+ | AvrNonBlockingInterrupt
+ | CCmseNonSecureCall
+ | RustIntrinsic
+ | PlatformIntrinsic
+ | Unadjusted => true,
+ }
+ }
+ // If the `c_unwind` feature gate is not active, follow the behavior that was in place
+ // prior to #76570. This is a special case: some functions have a C ABI but are meant to
+ // unwind anyway. Don't stop them.
+ None => false, // FIXME(#58794); should be `!(abi == Abi::Rust || abi == Abi::RustCall)`
}
}
@@ -575,8 +626,9 @@
Option<ImplicitSelfKind>,
);
-fn construct_fn<'a, 'tcx, A>(
- hir: Cx<'a, 'tcx>,
+fn construct_fn<'tcx, A>(
+ infcx: &InferCtxt<'_, 'tcx>,
+ fn_def: ty::WithOptConstParam<LocalDefId>,
fn_id: hir::HirId,
arguments: A,
safety: Safety,
@@ -584,6 +636,7 @@
return_ty: Ty<'tcx>,
return_ty_span: Span,
body: &'tcx hir::Body<'tcx>,
+ expr: &Expr<'_, 'tcx>,
span_with_body: Span,
) -> Body<'tcx>
where
@@ -591,15 +644,13 @@
{
let arguments: Vec<_> = arguments.collect();
- let tcx = hir.tcx();
- let tcx_hir = tcx.hir();
- let span = tcx_hir.span(fn_id);
-
- let fn_def_id = tcx_hir.local_def_id(fn_id);
+ let tcx = infcx.tcx;
+ let span = tcx.hir().span(fn_id);
let mut builder = Builder::new(
- hir,
- fn_def_id.to_def_id(),
+ infcx,
+ fn_def,
+ fn_id,
span_with_body,
arguments.len(),
safety,
@@ -623,16 +674,16 @@
Some(builder.in_scope(arg_scope_s, LintLevel::Inherited, |builder| {
builder.args_and_body(
START_BLOCK,
- fn_def_id.to_def_id(),
+ fn_def.did.to_def_id(),
&arguments,
arg_scope,
- &body.value,
+ expr,
)
}))
}));
let source_info = builder.source_info(fn_end);
builder.cfg.terminate(return_block, source_info, TerminatorKind::Return);
- let should_abort = should_abort_on_panic(tcx, fn_def_id, abi);
+ let should_abort = should_abort_on_panic(tcx, fn_def.did, abi);
builder.build_drop_trees(should_abort);
return_block.unit()
}));
@@ -643,7 +694,7 @@
} else {
None
};
- debug!("fn_id {:?} has attrs {:?}", fn_def_id, tcx.get_attrs(fn_def_id.to_def_id()));
+ debug!("fn_id {:?} has attrs {:?}", fn_def, tcx.get_attrs(fn_def.did.to_def_id()));
let mut body = builder.finish();
body.spread_arg = spread_arg;
@@ -651,22 +702,20 @@
}
fn construct_const<'a, 'tcx>(
- hir: Cx<'a, 'tcx>,
- body_id: hir::BodyId,
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ expr: &Expr<'_, 'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+ hir_id: hir::HirId,
const_ty: Ty<'tcx>,
const_ty_span: Span,
) -> Body<'tcx> {
- let tcx = hir.tcx();
- let owner_id = tcx.hir().body_owner(body_id);
- let def_id = tcx.hir().local_def_id(owner_id);
- let span = tcx.hir().span(owner_id);
+ let tcx = infcx.tcx;
+ let span = tcx.hir().span(hir_id);
let mut builder =
- Builder::new(hir, def_id.to_def_id(), span, 0, Safety::Safe, const_ty, const_ty_span, None);
+ Builder::new(infcx, def, hir_id, span, 0, Safety::Safe, const_ty, const_ty_span, None);
let mut block = START_BLOCK;
- let ast_expr = &tcx.hir().body(body_id).value;
- let expr = builder.hir.mirror(ast_expr);
- unpack!(block = builder.into_expr(Place::return_place(), block, expr));
+ unpack!(block = builder.expr_into_dest(Place::return_place(), block, &expr));
let source_info = builder.source_info(span);
builder.cfg.terminate(block, source_info, TerminatorKind::Return);
@@ -680,29 +729,34 @@
///
/// This is required because we may still want to run MIR passes on an item
/// with type errors, but normal MIR construction can't handle that in general.
-fn construct_error<'a, 'tcx>(hir: Cx<'a, 'tcx>, body_id: hir::BodyId) -> Body<'tcx> {
- let tcx = hir.tcx();
- let owner_id = tcx.hir().body_owner(body_id);
- let def_id = tcx.hir().local_def_id(owner_id);
- let span = tcx.hir().span(owner_id);
+fn construct_error<'a, 'tcx>(
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+ hir_id: hir::HirId,
+ body_id: hir::BodyId,
+ body_owner_kind: hir::BodyOwnerKind,
+) -> Body<'tcx> {
+ let tcx = infcx.tcx;
+ let span = tcx.hir().span(hir_id);
let ty = tcx.ty_error();
- let num_params = match hir.body_owner_kind {
- hir::BodyOwnerKind::Fn => tcx.hir().fn_decl_by_hir_id(owner_id).unwrap().inputs.len(),
+ let generator_kind = tcx.hir().body(body_id).generator_kind;
+ let num_params = match body_owner_kind {
+ hir::BodyOwnerKind::Fn => tcx.hir().fn_decl_by_hir_id(hir_id).unwrap().inputs.len(),
hir::BodyOwnerKind::Closure => {
- if tcx.hir().body(body_id).generator_kind().is_some() {
+ if generator_kind.is_some() {
// Generators have an implicit `self` parameter *and* a possibly
// implicit resume parameter.
2
} else {
// The implicit self parameter adds another local in MIR.
- 1 + tcx.hir().fn_decl_by_hir_id(owner_id).unwrap().inputs.len()
+ 1 + tcx.hir().fn_decl_by_hir_id(hir_id).unwrap().inputs.len()
}
}
hir::BodyOwnerKind::Const => 0,
hir::BodyOwnerKind::Static(_) => 0,
};
let mut builder =
- Builder::new(hir, def_id.to_def_id(), span, num_params, Safety::Safe, ty, span, None);
+ Builder::new(infcx, def, hir_id, span, num_params, Safety::Safe, ty, span, generator_kind);
let source_info = builder.source_info(span);
// Some MIR passes will expect the number of parameters to match the
// function declaration.
@@ -711,16 +765,15 @@
}
builder.cfg.terminate(START_BLOCK, source_info, TerminatorKind::Unreachable);
let mut body = builder.finish();
- if tcx.hir().body(body_id).generator_kind.is_some() {
- body.yield_ty = Some(ty);
- }
+ body.generator.as_mut().map(|gen| gen.yield_ty = Some(ty));
body
}
impl<'a, 'tcx> Builder<'a, 'tcx> {
fn new(
- hir: Cx<'a, 'tcx>,
- def_id: DefId,
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+ hir_id: hir::HirId,
span: Span,
arg_count: usize,
safety: Safety,
@@ -728,10 +781,30 @@
return_span: Span,
generator_kind: Option<GeneratorKind>,
) -> Builder<'a, 'tcx> {
- let lint_level = LintLevel::Explicit(hir.root_lint_level);
+ let tcx = infcx.tcx;
+ let attrs = tcx.hir().attrs(hir_id);
+ // Some functions always have overflow checks enabled,
+ // however, they may not get codegen'd, depending on
+ // the settings for the crate they are codegened in.
+ let mut check_overflow = tcx.sess.contains_name(attrs, sym::rustc_inherit_overflow_checks);
+ // Respect -C overflow-checks.
+ check_overflow |= tcx.sess.overflow_checks();
+ // Constants always need overflow checks.
+ check_overflow |= matches!(
+ tcx.hir().body_owner_kind(hir_id),
+ hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_)
+ );
+
+ let lint_level = LintLevel::Explicit(hir_id);
let mut builder = Builder {
- hir,
- def_id,
+ tcx,
+ infcx,
+ typeck_results: tcx.typeck_opt_const_arg(def),
+ region_scope_tree: tcx.region_scope_tree(def.did),
+ param_env: tcx.param_env(def.did),
+ def_id: def.did.to_def_id(),
+ hir_id,
+ check_overflow,
cfg: CFG { basic_blocks: IndexVec::new() },
fn_span: span,
arg_count,
@@ -787,7 +860,7 @@
fn_def_id: DefId,
arguments: &[ArgInfo<'tcx>],
argument_scope: region::Scope,
- ast_body: &'tcx hir::Expr<'tcx>,
+ expr: &Expr<'_, 'tcx>,
) -> BlockAnd<()> {
// Allocate locals for the function arguments
for &ArgInfo(ty, _, arg_opt, _) in arguments.iter() {
@@ -807,12 +880,12 @@
}
}
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
let tcx_hir = tcx.hir();
- let hir_typeck_results = self.hir.typeck_results();
+ let hir_typeck_results = self.typeck_results;
// In analyze_closure() in upvar.rs we gathered a list of upvars used by a
- // indexed closure and we stored in a map called closure_captures in TypeckResults
+ // indexed closure and we stored in a map called closure_min_captures in TypeckResults
// with the closure's DefId. Here, we run through that vec of UpvarIds for
// the given closure and use the necessary information to create upvar
// debuginfo and to fill `self.upvar_mutbls`.
@@ -885,14 +958,18 @@
// Make sure we drop (parts of) the argument even when not matched on.
self.schedule_drop(
- arg_opt.as_ref().map_or(ast_body.span, |arg| arg.pat.span),
+ arg_opt.as_ref().map_or(expr.span, |arg| arg.pat.span),
argument_scope,
local,
DropKind::Value,
);
if let Some(arg) = arg_opt {
- let pattern = self.hir.pattern_from_hir(&arg.pat);
+ let pat = match tcx.hir().get(arg.pat.hir_id) {
+ Node::Pat(pat) | Node::Binding(pat) => pat,
+ node => bug!("pattern became {:?}", node),
+ };
+ let pattern = Pat::from_hir(tcx, self.param_env, self.typeck_results, pat);
let original_source_scope = self.source_scope;
let span = pattern.span;
self.set_correct_source_scope_for_arg(arg.hir_id, original_source_scope, span);
@@ -927,12 +1004,15 @@
_ => {
scope = self.declare_bindings(
scope,
- ast_body.span,
+ expr.span,
&pattern,
matches::ArmHasGuard(false),
Some((Some(&place), span)),
);
- unpack!(block = self.place_into_pattern(block, pattern, place, false));
+ let place_builder = PlaceBuilder::from(local);
+ unpack!(
+ block = self.place_into_pattern(block, pattern, place_builder, false)
+ );
}
}
self.source_scope = original_source_scope;
@@ -944,8 +1024,7 @@
self.source_scope = source_scope;
}
- let body = self.hir.mirror(ast_body);
- self.into(Place::return_place(), block, body)
+ self.expr_into_dest(Place::return_place(), block, &expr)
}
fn set_correct_source_scope_for_arg(
@@ -954,15 +1033,15 @@
original_source_scope: SourceScope,
pattern_span: Span,
) {
- let tcx = self.hir.tcx();
- let current_root = tcx.maybe_lint_level_root_bounded(arg_hir_id, self.hir.root_lint_level);
+ let tcx = self.tcx;
+ let current_root = tcx.maybe_lint_level_root_bounded(arg_hir_id, self.hir_id);
let parent_root = tcx.maybe_lint_level_root_bounded(
self.source_scopes[original_source_scope]
.local_data
.as_ref()
.assert_crate_local()
.lint_root,
- self.hir.root_lint_level,
+ self.hir_id,
);
if current_root != parent_root {
self.source_scope =
@@ -974,7 +1053,7 @@
match self.unit_temp {
Some(tmp) => tmp,
None => {
- let ty = self.hir.unit_ty();
+ let ty = self.tcx.mk_unit();
let fn_span = self.fn_span;
let tmp = self.temp(ty, fn_span);
self.unit_temp = Some(tmp);
@@ -992,7 +1071,6 @@
mod block;
mod cfg;
mod expr;
-mod into;
mod matches;
mod misc;
mod scope;
diff --git a/compiler/rustc_mir_build/src/build/scope.rs b/compiler/rustc_mir_build/src/build/scope.rs
index 5e9d780..b637b9b 100644
--- a/compiler/rustc_mir_build/src/build/scope.rs
+++ b/compiler/rustc_mir_build/src/build/scope.rs
@@ -82,7 +82,7 @@
*/
use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder, CFG};
-use crate::thir::{Expr, ExprRef, LintLevel};
+use crate::thir::{Expr, LintLevel};
use rustc_data_structures::fx::FxHashMap;
use rustc_index::vec::IndexVec;
use rustc_middle::middle::region;
@@ -516,7 +516,7 @@
{
debug!("in_scope(region_scope={:?})", region_scope);
let source_scope = self.source_scope;
- let tcx = self.hir.tcx();
+ let tcx = self.tcx;
if let LintLevel::Explicit(current_hir_id) = lint_level {
// Use `maybe_lint_level_root_bounded` with `root_lint_level` as a bound
// to avoid adding Hir dependences on our parents.
@@ -524,10 +524,9 @@
let parent_root = tcx.maybe_lint_level_root_bounded(
self.source_scopes[source_scope].local_data.as_ref().assert_crate_local().lint_root,
- self.hir.root_lint_level,
+ self.hir_id,
);
- let current_root =
- tcx.maybe_lint_level_root_bounded(current_hir_id, self.hir.root_lint_level);
+ let current_root = tcx.maybe_lint_level_root_bounded(current_hir_id, self.hir_id);
if parent_root != current_root {
self.source_scope = self.new_source_scope(
@@ -575,7 +574,7 @@
crate fn break_scope(
&mut self,
mut block: BasicBlock,
- value: Option<ExprRef<'tcx>>,
+ value: Option<&Expr<'_, 'tcx>>,
target: BreakableTarget,
source_info: SourceInfo,
) -> BlockAnd<()> {
@@ -612,10 +611,10 @@
if let Some(value) = value {
debug!("stmt_expr Break val block_context.push(SubExpr)");
self.block_context.push(BlockFrame::SubExpr);
- unpack!(block = self.into(destination, block, value));
+ unpack!(block = self.expr_into_dest(destination, block, value));
self.block_context.pop();
} else {
- self.cfg.push_assign_unit(block, source_info, destination, self.hir.tcx())
+ self.cfg.push_assign_unit(block, source_info, destination, self.tcx)
}
} else {
assert!(value.is_none(), "`return` and `break` should have a destination");
@@ -763,7 +762,7 @@
) {
let needs_drop = match drop_kind {
DropKind::Value => {
- if !self.hir.needs_drop(self.local_decls[local].ty) {
+ if !self.local_decls[local].ty.needs_drop(self.tcx, self.param_env) {
return;
}
true
@@ -834,10 +833,9 @@
}
if scope.region_scope == region_scope {
- let region_scope_span =
- region_scope.span(self.hir.tcx(), &self.hir.region_scope_tree);
+ let region_scope_span = region_scope.span(self.tcx, &self.region_scope_tree);
// Attribute scope exit drops to scope's closing brace.
- let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span);
+ let scope_end = self.tcx.sess.source_map().end_point(region_scope_span);
scope.drops.push(DropData {
source_info: SourceInfo { span: scope_end, scope: scope.source_scope },
@@ -920,13 +918,13 @@
crate fn test_bool(
&mut self,
mut block: BasicBlock,
- condition: Expr<'tcx>,
+ condition: &Expr<'_, 'tcx>,
source_info: SourceInfo,
) -> (BasicBlock, BasicBlock) {
let cond = unpack!(block = self.as_local_operand(block, condition));
let true_block = self.cfg.start_new_block();
let false_block = self.cfg.start_new_block();
- let term = TerminatorKind::if_(self.hir.tcx(), cond.clone(), true_block, false_block);
+ let term = TerminatorKind::if_(self.tcx, cond.clone(), true_block, false_block);
self.cfg.terminate(block, source_info, term);
match cond {
diff --git a/compiler/rustc_mir_build/src/lib.rs b/compiler/rustc_mir_build/src/lib.rs
index 0866892..b1591d8 100644
--- a/compiler/rustc_mir_build/src/lib.rs
+++ b/compiler/rustc_mir_build/src/lib.rs
@@ -20,7 +20,7 @@
mod build;
mod lints;
-mod thir;
+pub mod thir;
use rustc_middle::ty::query::Providers;
diff --git a/compiler/rustc_mir_build/src/lints.rs b/compiler/rustc_mir_build/src/lints.rs
index 576b537..ef8bd20 100644
--- a/compiler/rustc_mir_build/src/lints.rs
+++ b/compiler/rustc_mir_build/src/lints.rs
@@ -15,7 +15,7 @@
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
if let Some(fn_like_node) = FnLikeNode::from_node(tcx.hir().get(hir_id)) {
- if let FnKind::Closure(_) = fn_like_node.kind() {
+ if let FnKind::Closure = fn_like_node.kind() {
// closures can't recur, so they don't matter.
return;
}
diff --git a/compiler/rustc_mir_build/src/thir/arena.rs b/compiler/rustc_mir_build/src/thir/arena.rs
new file mode 100644
index 0000000..aacc7b1
--- /dev/null
+++ b/compiler/rustc_mir_build/src/thir/arena.rs
@@ -0,0 +1,98 @@
+use crate::thir::*;
+
+macro_rules! declare_arena {
+ ([], [$($a:tt $name:ident: $ty:ty,)*]) => {
+ #[derive(Default)]
+ pub struct Arena<'thir, 'tcx> {
+ pub dropless: rustc_arena::DroplessArena,
+ drop: rustc_arena::DropArena,
+ $($name: rustc_arena::arena_for_type!($a[$ty]),)*
+ }
+
+ pub trait ArenaAllocatable<'thir, 'tcx, T = Self>: Sized {
+ fn allocate_on(self, arena: &'thir Arena<'thir, 'tcx>) -> &'thir mut Self;
+ fn allocate_from_iter(
+ arena: &'thir Arena<'thir, 'tcx>,
+ iter: impl ::std::iter::IntoIterator<Item = Self>,
+ ) -> &'thir mut [Self];
+ }
+
+ impl<'thir, 'tcx, T: Copy> ArenaAllocatable<'thir, 'tcx, ()> for T {
+ #[inline]
+ fn allocate_on(self, arena: &'thir Arena<'thir, 'tcx>) -> &'thir mut Self {
+ arena.dropless.alloc(self)
+ }
+ #[inline]
+ fn allocate_from_iter(
+ arena: &'thir Arena<'thir, 'tcx>,
+ iter: impl ::std::iter::IntoIterator<Item = Self>,
+ ) -> &'thir mut [Self] {
+ arena.dropless.alloc_from_iter(iter)
+ }
+
+ }
+ $(
+ impl<'thir, 'tcx> ArenaAllocatable<'thir, 'tcx, $ty> for $ty {
+ #[inline]
+ fn allocate_on(self, arena: &'thir Arena<'thir, 'tcx>) -> &'thir mut Self {
+ if !::std::mem::needs_drop::<Self>() {
+ return arena.dropless.alloc(self);
+ }
+ match rustc_arena::which_arena_for_type!($a[&arena.$name]) {
+ ::std::option::Option::<&rustc_arena::TypedArena<Self>>::Some(ty_arena) => {
+ ty_arena.alloc(self)
+ }
+ ::std::option::Option::None => unsafe { arena.drop.alloc(self) },
+ }
+ }
+
+ #[inline]
+ fn allocate_from_iter(
+ arena: &'thir Arena<'thir, 'tcx>,
+ iter: impl ::std::iter::IntoIterator<Item = Self>,
+ ) -> &'thir mut [Self] {
+ if !::std::mem::needs_drop::<Self>() {
+ return arena.dropless.alloc_from_iter(iter);
+ }
+ match rustc_arena::which_arena_for_type!($a[&arena.$name]) {
+ ::std::option::Option::<&rustc_arena::TypedArena<Self>>::Some(ty_arena) => {
+ ty_arena.alloc_from_iter(iter)
+ }
+ ::std::option::Option::None => unsafe { arena.drop.alloc_from_iter(iter) },
+ }
+ }
+ }
+ )*
+
+ impl<'thir, 'tcx> Arena<'thir, 'tcx> {
+ #[inline]
+ pub fn alloc<T: ArenaAllocatable<'thir, 'tcx, U>, U>(&'thir self, value: T) -> &'thir mut T {
+ value.allocate_on(self)
+ }
+
+ #[allow(dead_code)] // function is never used
+ #[inline]
+ pub fn alloc_slice<T: ::std::marker::Copy>(&'thir self, value: &[T]) -> &'thir mut [T] {
+ if value.is_empty() {
+ return &mut [];
+ }
+ self.dropless.alloc_slice(value)
+ }
+
+ pub fn alloc_from_iter<T: ArenaAllocatable<'thir, 'tcx, U>, U>(
+ &'thir self,
+ iter: impl ::std::iter::IntoIterator<Item = T>,
+ ) -> &'thir mut [T] {
+ T::allocate_from_iter(self, iter)
+ }
+ }
+ }
+}
+
+declare_arena!([], [
+ [] arm: Arm<'thir, 'tcx>,
+ [] expr: Expr<'thir, 'tcx>,
+ [] field_expr: FieldExpr<'thir, 'tcx>,
+ [few] inline_asm_operand: InlineAsmOperand<'thir, 'tcx>,
+ [] stmt: Stmt<'thir, 'tcx>,
+]);
diff --git a/compiler/rustc_mir_build/src/thir/constant.rs b/compiler/rustc_mir_build/src/thir/constant.rs
index 969f7d1..ac93d04 100644
--- a/compiler/rustc_mir_build/src/thir/constant.rs
+++ b/compiler/rustc_mir_build/src/thir/constant.rs
@@ -1,3 +1,4 @@
+use rustc_apfloat::Float;
use rustc_ast as ast;
use rustc_middle::mir::interpret::{
Allocation, ConstValue, LitToConstError, LitToConstInput, Scalar,
@@ -61,20 +62,40 @@
use rustc_apfloat::ieee::{Double, Single};
let scalar = match fty {
ty::FloatTy::F32 => {
- num.parse::<f32>().map_err(|_| ())?;
+ let rust_f = num.parse::<f32>().map_err(|_| ())?;
let mut f = num.parse::<Single>().unwrap_or_else(|e| {
panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e)
});
+ assert!(
+ u128::from(rust_f.to_bits()) == f.to_bits(),
+ "apfloat::ieee::Single gave different result for `{}`: \
+ {}({:#x}) vs Rust's {}({:#x})",
+ rust_f,
+ f,
+ f.to_bits(),
+ Single::from_bits(rust_f.to_bits().into()),
+ rust_f.to_bits()
+ );
if neg {
f = -f;
}
Scalar::from_f32(f)
}
ty::FloatTy::F64 => {
- num.parse::<f64>().map_err(|_| ())?;
+ let rust_f = num.parse::<f64>().map_err(|_| ())?;
let mut f = num.parse::<Double>().unwrap_or_else(|e| {
panic!("apfloat::ieee::Double failed to parse `{}`: {:?}", num, e)
});
+ assert!(
+ u128::from(rust_f.to_bits()) == f.to_bits(),
+ "apfloat::ieee::Double gave different result for `{}`: \
+ {}({:#x}) vs Rust's {}({:#x})",
+ rust_f,
+ f,
+ f.to_bits(),
+ Double::from_bits(rust_f.to_bits().into()),
+ rust_f.to_bits()
+ );
if neg {
f = -f;
}
diff --git a/compiler/rustc_mir_build/src/thir/cx/block.rs b/compiler/rustc_mir_build/src/thir/cx/block.rs
index 980888d..d450f8a 100644
--- a/compiler/rustc_mir_build/src/thir/cx/block.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/block.rs
@@ -1,4 +1,3 @@
-use crate::thir::cx::to_ref::ToRef;
use crate::thir::cx::Cx;
use crate::thir::{self, *};
@@ -8,110 +7,95 @@
use rustc_index::vec::Idx;
-impl<'tcx> Mirror<'tcx> for &'tcx hir::Block<'tcx> {
- type Output = Block<'tcx>;
-
- fn make_mirror(self, cx: &mut Cx<'_, 'tcx>) -> Block<'tcx> {
+impl<'thir, 'tcx> Cx<'thir, 'tcx> {
+ crate fn mirror_block(&mut self, block: &'tcx hir::Block<'tcx>) -> Block<'thir, 'tcx> {
// We have to eagerly lower the "spine" of the statements
// in order to get the lexical scoping correctly.
- let stmts = mirror_stmts(cx, self.hir_id.local_id, &*self.stmts);
+ let stmts = self.mirror_stmts(block.hir_id.local_id, block.stmts);
let opt_destruction_scope =
- cx.region_scope_tree.opt_destruction_scope(self.hir_id.local_id);
+ self.region_scope_tree.opt_destruction_scope(block.hir_id.local_id);
Block {
- targeted_by_break: self.targeted_by_break,
- region_scope: region::Scope { id: self.hir_id.local_id, data: region::ScopeData::Node },
+ targeted_by_break: block.targeted_by_break,
+ region_scope: region::Scope {
+ id: block.hir_id.local_id,
+ data: region::ScopeData::Node,
+ },
opt_destruction_scope,
- span: self.span,
+ span: block.span,
stmts,
- expr: self.expr.to_ref(),
- safety_mode: match self.rules {
+ expr: block.expr.map(|expr| self.mirror_expr(expr)),
+ safety_mode: match block.rules {
hir::BlockCheckMode::DefaultBlock => BlockSafety::Safe,
- hir::BlockCheckMode::UnsafeBlock(..) => BlockSafety::ExplicitUnsafe(self.hir_id),
+ hir::BlockCheckMode::UnsafeBlock(..) => BlockSafety::ExplicitUnsafe(block.hir_id),
hir::BlockCheckMode::PushUnsafeBlock(..) => BlockSafety::PushUnsafe,
hir::BlockCheckMode::PopUnsafeBlock(..) => BlockSafety::PopUnsafe,
},
}
}
-}
-fn mirror_stmts<'a, 'tcx>(
- cx: &mut Cx<'a, 'tcx>,
- block_id: hir::ItemLocalId,
- stmts: &'tcx [hir::Stmt<'tcx>],
-) -> Vec<StmtRef<'tcx>> {
- let mut result = vec![];
- for (index, stmt) in stmts.iter().enumerate() {
- let hir_id = stmt.hir_id;
- let opt_dxn_ext = cx.region_scope_tree.opt_destruction_scope(hir_id.local_id);
- match stmt.kind {
- hir::StmtKind::Expr(ref expr) | hir::StmtKind::Semi(ref expr) => {
- result.push(StmtRef::Mirror(Box::new(Stmt {
+ fn mirror_stmts(
+ &mut self,
+ block_id: hir::ItemLocalId,
+ stmts: &'tcx [hir::Stmt<'tcx>],
+ ) -> &'thir [Stmt<'thir, 'tcx>] {
+ self.arena.alloc_from_iter(stmts.iter().enumerate().filter_map(|(index, stmt)| {
+ let hir_id = stmt.hir_id;
+ let opt_dxn_ext = self.region_scope_tree.opt_destruction_scope(hir_id.local_id);
+ match stmt.kind {
+ hir::StmtKind::Expr(ref expr) | hir::StmtKind::Semi(ref expr) => Some(Stmt {
kind: StmtKind::Expr {
scope: region::Scope { id: hir_id.local_id, data: region::ScopeData::Node },
- expr: expr.to_ref(),
+ expr: self.mirror_expr(expr),
},
opt_destruction_scope: opt_dxn_ext,
- })))
- }
- hir::StmtKind::Item(..) => {
- // ignore for purposes of the MIR
- }
- hir::StmtKind::Local(ref local) => {
- let remainder_scope = region::Scope {
- id: block_id,
- data: region::ScopeData::Remainder(region::FirstStatementIndex::new(index)),
- };
-
- let mut pattern = cx.pattern_from_hir(&local.pat);
-
- if let Some(ty) = &local.ty {
- if let Some(&user_ty) = cx.typeck_results.user_provided_types().get(ty.hir_id) {
- debug!("mirror_stmts: user_ty={:?}", user_ty);
- pattern = Pat {
- ty: pattern.ty,
- span: pattern.span,
- kind: Box::new(PatKind::AscribeUserType {
- ascription: thir::pattern::Ascription {
- user_ty: PatTyProj::from_user_type(user_ty),
- user_ty_span: ty.span,
- variance: ty::Variance::Covariant,
- },
- subpattern: pattern,
- }),
- };
- }
+ }),
+ hir::StmtKind::Item(..) => {
+ // ignore for purposes of the MIR
+ None
}
+ hir::StmtKind::Local(ref local) => {
+ let remainder_scope = region::Scope {
+ id: block_id,
+ data: region::ScopeData::Remainder(region::FirstStatementIndex::new(index)),
+ };
- result.push(StmtRef::Mirror(Box::new(Stmt {
- kind: StmtKind::Let {
- remainder_scope,
- init_scope: region::Scope {
- id: hir_id.local_id,
- data: region::ScopeData::Node,
+ let mut pattern = self.pattern_from_hir(local.pat);
+
+ if let Some(ty) = &local.ty {
+ if let Some(&user_ty) =
+ self.typeck_results.user_provided_types().get(ty.hir_id)
+ {
+ debug!("mirror_stmts: user_ty={:?}", user_ty);
+ pattern = Pat {
+ ty: pattern.ty,
+ span: pattern.span,
+ kind: Box::new(PatKind::AscribeUserType {
+ ascription: thir::pattern::Ascription {
+ user_ty: PatTyProj::from_user_type(user_ty),
+ user_ty_span: ty.span,
+ variance: ty::Variance::Covariant,
+ },
+ subpattern: pattern,
+ }),
+ };
+ }
+ }
+
+ Some(Stmt {
+ kind: StmtKind::Let {
+ remainder_scope,
+ init_scope: region::Scope {
+ id: hir_id.local_id,
+ data: region::ScopeData::Node,
+ },
+ pattern,
+ initializer: local.init.map(|init| self.mirror_expr(init)),
+ lint_level: LintLevel::Explicit(local.hir_id),
},
- pattern,
- initializer: local.init.to_ref(),
- lint_level: LintLevel::Explicit(local.hir_id),
- },
- opt_destruction_scope: opt_dxn_ext,
- })));
+ opt_destruction_scope: opt_dxn_ext,
+ })
+ }
}
- }
+ }))
}
- result
-}
-
-crate fn to_expr_ref<'a, 'tcx>(
- cx: &mut Cx<'a, 'tcx>,
- block: &'tcx hir::Block<'tcx>,
-) -> ExprRef<'tcx> {
- let block_ty = cx.typeck_results().node_type(block.hir_id);
- let temp_lifetime = cx.region_scope_tree.temporary_scope(block.hir_id.local_id);
- let expr = Expr {
- ty: block_ty,
- temp_lifetime,
- span: block.span,
- kind: ExprKind::Block { body: block },
- };
- expr.to_ref()
}
diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs
index 2962cbe..200a5fc 100644
--- a/compiler/rustc_mir_build/src/thir/cx/expr.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs
@@ -1,11 +1,11 @@
-use crate::thir::cx::block;
-use crate::thir::cx::to_ref::ToRef;
use crate::thir::cx::Cx;
use crate::thir::util::UserAnnotatedTyHelpers;
use crate::thir::*;
+use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, CtorOf, DefKind, Res};
use rustc_index::vec::Idx;
+use rustc_middle::hir::place::Place as HirPlace;
use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
use rustc_middle::hir::place::ProjectionKind as HirProjectionKind;
use rustc_middle::mir::interpret::Scalar;
@@ -17,45 +17,71 @@
use rustc_middle::ty::{self, AdtKind, Ty};
use rustc_span::Span;
-impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr<'tcx> {
- type Output = Expr<'tcx>;
+use std::iter;
- fn make_mirror(self, cx: &mut Cx<'_, 'tcx>) -> Expr<'tcx> {
- let temp_lifetime = cx.region_scope_tree.temporary_scope(self.hir_id.local_id);
- let expr_scope = region::Scope { id: self.hir_id.local_id, data: region::ScopeData::Node };
+impl<'thir, 'tcx> Cx<'thir, 'tcx> {
+ /// Mirrors and allocates a single [`hir::Expr`]. If you need to mirror a whole slice
+ /// of expressions, prefer using [`mirror_exprs`].
+ ///
+ /// [`mirror_exprs`]: Self::mirror_exprs
+ crate fn mirror_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) -> &'thir Expr<'thir, 'tcx> {
+ // `mirror_expr` is recursing very deep. Make sure the stack doesn't overflow.
+ ensure_sufficient_stack(|| self.arena.alloc(self.mirror_expr_inner(expr)))
+ }
- debug!("Expr::make_mirror(): id={}, span={:?}", self.hir_id, self.span);
+ /// Mirrors and allocates a slice of [`hir::Expr`]s. They will be allocated as a
+ /// contiguous sequence in memory.
+ crate fn mirror_exprs(&mut self, exprs: &'tcx [hir::Expr<'tcx>]) -> &'thir [Expr<'thir, 'tcx>] {
+ self.arena.alloc_from_iter(exprs.iter().map(|expr| self.mirror_expr_inner(expr)))
+ }
- let mut expr = make_mirror_unadjusted(cx, self);
+ /// Mirrors a [`hir::Expr`] without allocating it into the arena.
+ /// This is a separate, private function so that [`mirror_expr`] and [`mirror_exprs`] can
+ /// decide how to allocate this expression (alone or within a slice).
+ ///
+ /// [`mirror_expr`]: Self::mirror_expr
+ /// [`mirror_exprs`]: Self::mirror_exprs
+ pub(super) fn mirror_expr_inner(
+ &mut self,
+ hir_expr: &'tcx hir::Expr<'tcx>,
+ ) -> Expr<'thir, 'tcx> {
+ let temp_lifetime = self.region_scope_tree.temporary_scope(hir_expr.hir_id.local_id);
+ let expr_scope =
+ region::Scope { id: hir_expr.hir_id.local_id, data: region::ScopeData::Node };
+
+ debug!("Expr::make_mirror(): id={}, span={:?}", hir_expr.hir_id, hir_expr.span);
+
+ let mut expr = self.make_mirror_unadjusted(hir_expr);
// Now apply adjustments, if any.
- for adjustment in cx.typeck_results().expr_adjustments(self) {
+ for adjustment in self.typeck_results.expr_adjustments(hir_expr) {
debug!("make_mirror: expr={:?} applying adjustment={:?}", expr, adjustment);
- expr = apply_adjustment(cx, self, expr, adjustment);
+ expr = self.apply_adjustment(hir_expr, expr, adjustment);
}
// Next, wrap this up in the expr's scope.
expr = Expr {
temp_lifetime,
ty: expr.ty,
- span: self.span,
+ span: hir_expr.span,
kind: ExprKind::Scope {
region_scope: expr_scope,
- value: expr.to_ref(),
- lint_level: LintLevel::Explicit(self.hir_id),
+ value: self.arena.alloc(expr),
+ lint_level: LintLevel::Explicit(hir_expr.hir_id),
},
};
// Finally, create a destruction scope, if any.
- if let Some(region_scope) = cx.region_scope_tree.opt_destruction_scope(self.hir_id.local_id)
+ if let Some(region_scope) =
+ self.region_scope_tree.opt_destruction_scope(hir_expr.hir_id.local_id)
{
expr = Expr {
temp_lifetime,
ty: expr.ty,
- span: self.span,
+ span: hir_expr.span,
kind: ExprKind::Scope {
region_scope,
- value: expr.to_ref(),
+ value: self.arena.alloc(expr),
lint_level: LintLevel::Inherited,
},
};
@@ -64,364 +90,407 @@
// OK, all done!
expr
}
-}
-fn apply_adjustment<'a, 'tcx>(
- cx: &mut Cx<'a, 'tcx>,
- hir_expr: &'tcx hir::Expr<'tcx>,
- mut expr: Expr<'tcx>,
- adjustment: &Adjustment<'tcx>,
-) -> Expr<'tcx> {
- let Expr { temp_lifetime, mut span, .. } = expr;
+ fn apply_adjustment(
+ &mut self,
+ hir_expr: &'tcx hir::Expr<'tcx>,
+ mut expr: Expr<'thir, 'tcx>,
+ adjustment: &Adjustment<'tcx>,
+ ) -> Expr<'thir, 'tcx> {
+ let Expr { temp_lifetime, mut span, .. } = expr;
- // Adjust the span from the block, to the last expression of the
- // block. This is a better span when returning a mutable reference
- // with too short a lifetime. The error message will use the span
- // from the assignment to the return place, which should only point
- // at the returned value, not the entire function body.
- //
- // fn return_short_lived<'a>(x: &'a mut i32) -> &'static mut i32 {
- // x
- // // ^ error message points at this expression.
- // }
- let mut adjust_span = |expr: &mut Expr<'tcx>| {
- if let ExprKind::Block { body } = expr.kind {
- if let Some(ref last_expr) = body.expr {
- span = last_expr.span;
- expr.span = span;
+ // Adjust the span from the block, to the last expression of the
+ // block. This is a better span when returning a mutable reference
+ // with too short a lifetime. The error message will use the span
+ // from the assignment to the return place, which should only point
+ // at the returned value, not the entire function body.
+ //
+ // fn return_short_lived<'a>(x: &'a mut i32) -> &'static mut i32 {
+ // x
+ // // ^ error message points at this expression.
+ // }
+ let mut adjust_span = |expr: &mut Expr<'thir, 'tcx>| {
+ if let ExprKind::Block { body } = &expr.kind {
+ if let Some(ref last_expr) = body.expr {
+ span = last_expr.span;
+ expr.span = span;
+ }
}
- }
- };
+ };
- let kind = match adjustment.kind {
- Adjust::Pointer(PointerCast::Unsize) => {
- adjust_span(&mut expr);
- ExprKind::Pointer { cast: PointerCast::Unsize, source: expr.to_ref() }
- }
- Adjust::Pointer(cast) => ExprKind::Pointer { cast, source: expr.to_ref() },
- Adjust::NeverToAny => ExprKind::NeverToAny { source: expr.to_ref() },
- Adjust::Deref(None) => {
- adjust_span(&mut expr);
- ExprKind::Deref { arg: expr.to_ref() }
- }
- Adjust::Deref(Some(deref)) => {
- // We don't need to do call adjust_span here since
- // deref coercions always start with a built-in deref.
- let call = deref.method_call(cx.tcx(), expr.ty);
+ let kind = match adjustment.kind {
+ Adjust::Pointer(PointerCast::Unsize) => {
+ adjust_span(&mut expr);
+ ExprKind::Pointer { cast: PointerCast::Unsize, source: self.arena.alloc(expr) }
+ }
+ Adjust::Pointer(cast) => ExprKind::Pointer { cast, source: self.arena.alloc(expr) },
+ Adjust::NeverToAny => ExprKind::NeverToAny { source: self.arena.alloc(expr) },
+ Adjust::Deref(None) => {
+ adjust_span(&mut expr);
+ ExprKind::Deref { arg: self.arena.alloc(expr) }
+ }
+ Adjust::Deref(Some(deref)) => {
+ // We don't need to do call adjust_span here since
+ // deref coercions always start with a built-in deref.
+ let call = deref.method_call(self.tcx(), expr.ty);
- expr = Expr {
- temp_lifetime,
- ty: cx.tcx.mk_ref(deref.region, ty::TypeAndMut { ty: expr.ty, mutbl: deref.mutbl }),
- span,
- kind: ExprKind::Borrow {
- borrow_kind: deref.mutbl.to_borrow_kind(),
- arg: expr.to_ref(),
- },
- };
-
- overloaded_place(
- cx,
- hir_expr,
- adjustment.target,
- Some(call),
- vec![expr.to_ref()],
- deref.span,
- )
- }
- Adjust::Borrow(AutoBorrow::Ref(_, m)) => {
- ExprKind::Borrow { borrow_kind: m.to_borrow_kind(), arg: expr.to_ref() }
- }
- Adjust::Borrow(AutoBorrow::RawPtr(mutability)) => {
- ExprKind::AddressOf { mutability, arg: expr.to_ref() }
- }
- };
-
- Expr { temp_lifetime, ty: adjustment.target, span, kind }
-}
-
-fn make_mirror_unadjusted<'a, 'tcx>(
- cx: &mut Cx<'a, 'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
-) -> Expr<'tcx> {
- let expr_ty = cx.typeck_results().expr_ty(expr);
- let temp_lifetime = cx.region_scope_tree.temporary_scope(expr.hir_id.local_id);
-
- let kind = match expr.kind {
- // Here comes the interesting stuff:
- hir::ExprKind::MethodCall(_, method_span, ref args, fn_span) => {
- // Rewrite a.b(c) into UFCS form like Trait::b(a, c)
- let expr = method_callee(cx, expr, method_span, None);
- let args = args.iter().map(|e| e.to_ref()).collect();
- ExprKind::Call { ty: expr.ty, fun: expr.to_ref(), args, from_hir_call: true, fn_span }
- }
-
- hir::ExprKind::Call(ref fun, ref args) => {
- if cx.typeck_results().is_method_call(expr) {
- // The callee is something implementing Fn, FnMut, or FnOnce.
- // Find the actual method implementation being called and
- // build the appropriate UFCS call expression with the
- // callee-object as expr parameter.
-
- // rewrite f(u, v) into FnOnce::call_once(f, (u, v))
-
- let method = method_callee(cx, expr, fun.span, None);
-
- let arg_tys = args.iter().map(|e| cx.typeck_results().expr_ty_adjusted(e));
- let tupled_args = Expr {
- ty: cx.tcx.mk_tup(arg_tys),
+ expr = Expr {
temp_lifetime,
- span: expr.span,
- kind: ExprKind::Tuple { fields: args.iter().map(ToRef::to_ref).collect() },
+ ty: self
+ .tcx
+ .mk_ref(deref.region, ty::TypeAndMut { ty: expr.ty, mutbl: deref.mutbl }),
+ span,
+ kind: ExprKind::Borrow {
+ borrow_kind: deref.mutbl.to_borrow_kind(),
+ arg: self.arena.alloc(expr),
+ },
};
- ExprKind::Call {
- ty: method.ty,
- fun: method.to_ref(),
- args: vec![fun.to_ref(), tupled_args.to_ref()],
- from_hir_call: true,
- fn_span: expr.span,
- }
- } else {
- let adt_data =
- if let hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) = fun.kind {
- // Tuple-like ADTs are represented as ExprKind::Call. We convert them here.
- expr_ty.ty_adt_def().and_then(|adt_def| match path.res {
- Res::Def(DefKind::Ctor(_, CtorKind::Fn), ctor_id) => {
- Some((adt_def, adt_def.variant_index_with_ctor_id(ctor_id)))
- }
- Res::SelfCtor(..) => Some((adt_def, VariantIdx::new(0))),
- _ => None,
- })
- } else {
- None
- };
- if let Some((adt_def, index)) = adt_data {
- let substs = cx.typeck_results().node_substs(fun.hir_id);
- let user_provided_types = cx.typeck_results().user_provided_types();
- let user_ty = user_provided_types.get(fun.hir_id).copied().map(|mut u_ty| {
- if let UserType::TypeOf(ref mut did, _) = &mut u_ty.value {
- *did = adt_def.did;
- }
- u_ty
- });
- debug!("make_mirror_unadjusted: (call) user_ty={:?}", user_ty);
+ self.overloaded_place(
+ hir_expr,
+ adjustment.target,
+ Some(call),
+ self.arena.alloc_from_iter(iter::once(expr)),
+ deref.span,
+ )
+ }
+ Adjust::Borrow(AutoBorrow::Ref(_, m)) => {
+ ExprKind::Borrow { borrow_kind: m.to_borrow_kind(), arg: self.arena.alloc(expr) }
+ }
+ Adjust::Borrow(AutoBorrow::RawPtr(mutability)) => {
+ ExprKind::AddressOf { mutability, arg: self.arena.alloc(expr) }
+ }
+ };
- let field_refs = args
- .iter()
- .enumerate()
- .map(|(idx, e)| FieldExprRef { name: Field::new(idx), expr: e.to_ref() })
- .collect();
- ExprKind::Adt {
- adt_def,
- substs,
- variant_index: index,
- fields: field_refs,
- user_ty,
- base: None,
- }
- } else {
+ Expr { temp_lifetime, ty: adjustment.target, span, kind }
+ }
+
+ fn make_mirror_unadjusted(&mut self, expr: &'tcx hir::Expr<'tcx>) -> Expr<'thir, 'tcx> {
+ let expr_ty = self.typeck_results().expr_ty(expr);
+ let temp_lifetime = self.region_scope_tree.temporary_scope(expr.hir_id.local_id);
+
+ let kind = match expr.kind {
+ // Here comes the interesting stuff:
+ hir::ExprKind::MethodCall(_, method_span, ref args, fn_span) => {
+ // Rewrite a.b(c) into UFCS form like Trait::b(a, c)
+ let expr = self.method_callee(expr, method_span, None);
+ let args = self.mirror_exprs(args);
+ ExprKind::Call {
+ ty: expr.ty,
+ fun: self.arena.alloc(expr),
+ args,
+ from_hir_call: true,
+ fn_span,
+ }
+ }
+
+ hir::ExprKind::Call(ref fun, ref args) => {
+ if self.typeck_results().is_method_call(expr) {
+ // The callee is something implementing Fn, FnMut, or FnOnce.
+ // Find the actual method implementation being called and
+ // build the appropriate UFCS call expression with the
+ // callee-object as expr parameter.
+
+ // rewrite f(u, v) into FnOnce::call_once(f, (u, v))
+
+ let method = self.method_callee(expr, fun.span, None);
+
+ let arg_tys = args.iter().map(|e| self.typeck_results().expr_ty_adjusted(e));
+ let tupled_args = Expr {
+ ty: self.tcx.mk_tup(arg_tys),
+ temp_lifetime,
+ span: expr.span,
+ kind: ExprKind::Tuple { fields: self.mirror_exprs(args) },
+ };
+
ExprKind::Call {
- ty: cx.typeck_results().node_type(fun.hir_id),
- fun: fun.to_ref(),
- args: args.to_ref(),
+ ty: method.ty,
+ fun: self.arena.alloc(method),
+ args: self
+ .arena
+ .alloc_from_iter(vec![self.mirror_expr_inner(fun), tupled_args]),
from_hir_call: true,
fn_span: expr.span,
}
- }
- }
- }
+ } else {
+ let adt_data =
+ if let hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) = fun.kind {
+ // Tuple-like ADTs are represented as ExprKind::Call. We convert them here.
+ expr_ty.ty_adt_def().and_then(|adt_def| match path.res {
+ Res::Def(DefKind::Ctor(_, CtorKind::Fn), ctor_id) => {
+ Some((adt_def, adt_def.variant_index_with_ctor_id(ctor_id)))
+ }
+ Res::SelfCtor(..) => Some((adt_def, VariantIdx::new(0))),
+ _ => None,
+ })
+ } else {
+ None
+ };
+ if let Some((adt_def, index)) = adt_data {
+ let substs = self.typeck_results().node_substs(fun.hir_id);
+ let user_provided_types = self.typeck_results().user_provided_types();
+ let user_ty =
+ user_provided_types.get(fun.hir_id).copied().map(|mut u_ty| {
+ if let UserType::TypeOf(ref mut did, _) = &mut u_ty.value {
+ *did = adt_def.did;
+ }
+ u_ty
+ });
+ debug!("make_mirror_unadjusted: (call) user_ty={:?}", user_ty);
- hir::ExprKind::AddrOf(hir::BorrowKind::Ref, mutbl, ref arg) => {
- ExprKind::Borrow { borrow_kind: mutbl.to_borrow_kind(), arg: arg.to_ref() }
- }
-
- hir::ExprKind::AddrOf(hir::BorrowKind::Raw, mutability, ref arg) => {
- ExprKind::AddressOf { mutability, arg: arg.to_ref() }
- }
-
- hir::ExprKind::Block(ref blk, _) => ExprKind::Block { body: &blk },
-
- hir::ExprKind::Assign(ref lhs, ref rhs, _) => {
- ExprKind::Assign { lhs: lhs.to_ref(), rhs: rhs.to_ref() }
- }
-
- hir::ExprKind::AssignOp(op, ref lhs, ref rhs) => {
- if cx.typeck_results().is_method_call(expr) {
- overloaded_operator(cx, expr, vec![lhs.to_ref(), rhs.to_ref()])
- } else {
- ExprKind::AssignOp { op: bin_op(op.node), lhs: lhs.to_ref(), rhs: rhs.to_ref() }
- }
- }
-
- hir::ExprKind::Lit(ref lit) => ExprKind::Literal {
- literal: cx.const_eval_literal(&lit.node, expr_ty, lit.span, false),
- user_ty: None,
- const_id: None,
- },
-
- hir::ExprKind::Binary(op, ref lhs, ref rhs) => {
- if cx.typeck_results().is_method_call(expr) {
- overloaded_operator(cx, expr, vec![lhs.to_ref(), rhs.to_ref()])
- } else {
- // FIXME overflow
- match (op.node, cx.constness) {
- (hir::BinOpKind::And, _) => ExprKind::LogicalOp {
- op: LogicalOp::And,
- lhs: lhs.to_ref(),
- rhs: rhs.to_ref(),
- },
- (hir::BinOpKind::Or, _) => ExprKind::LogicalOp {
- op: LogicalOp::Or,
- lhs: lhs.to_ref(),
- rhs: rhs.to_ref(),
- },
-
- _ => {
- let op = bin_op(op.node);
- ExprKind::Binary { op, lhs: lhs.to_ref(), rhs: rhs.to_ref() }
+ let field_refs =
+ self.arena.alloc_from_iter(args.iter().enumerate().map(|(idx, e)| {
+ FieldExpr { name: Field::new(idx), expr: self.mirror_expr(e) }
+ }));
+ ExprKind::Adt {
+ adt_def,
+ substs,
+ variant_index: index,
+ fields: field_refs,
+ user_ty,
+ base: None,
+ }
+ } else {
+ ExprKind::Call {
+ ty: self.typeck_results().node_type(fun.hir_id),
+ fun: self.mirror_expr(fun),
+ args: self.mirror_exprs(args),
+ from_hir_call: true,
+ fn_span: expr.span,
+ }
}
}
}
- }
- hir::ExprKind::Index(ref lhs, ref index) => {
- if cx.typeck_results().is_method_call(expr) {
- overloaded_place(
- cx,
- expr,
- expr_ty,
- None,
- vec![lhs.to_ref(), index.to_ref()],
- expr.span,
- )
- } else {
- ExprKind::Index { lhs: lhs.to_ref(), index: index.to_ref() }
+ hir::ExprKind::AddrOf(hir::BorrowKind::Ref, mutbl, ref arg) => {
+ ExprKind::Borrow { borrow_kind: mutbl.to_borrow_kind(), arg: self.mirror_expr(arg) }
}
- }
- hir::ExprKind::Unary(hir::UnOp::UnDeref, ref arg) => {
- if cx.typeck_results().is_method_call(expr) {
- overloaded_place(cx, expr, expr_ty, None, vec![arg.to_ref()], expr.span)
- } else {
- ExprKind::Deref { arg: arg.to_ref() }
+ hir::ExprKind::AddrOf(hir::BorrowKind::Raw, mutability, ref arg) => {
+ ExprKind::AddressOf { mutability, arg: self.mirror_expr(arg) }
}
- }
- hir::ExprKind::Unary(hir::UnOp::UnNot, ref arg) => {
- if cx.typeck_results().is_method_call(expr) {
- overloaded_operator(cx, expr, vec![arg.to_ref()])
- } else {
- ExprKind::Unary { op: UnOp::Not, arg: arg.to_ref() }
+ hir::ExprKind::Block(ref blk, _) => ExprKind::Block { body: self.mirror_block(blk) },
+
+ hir::ExprKind::Assign(ref lhs, ref rhs, _) => {
+ ExprKind::Assign { lhs: self.mirror_expr(lhs), rhs: self.mirror_expr(rhs) }
}
- }
- hir::ExprKind::Unary(hir::UnOp::UnNeg, ref arg) => {
- if cx.typeck_results().is_method_call(expr) {
- overloaded_operator(cx, expr, vec![arg.to_ref()])
- } else if let hir::ExprKind::Lit(ref lit) = arg.kind {
- ExprKind::Literal {
- literal: cx.const_eval_literal(&lit.node, expr_ty, lit.span, true),
- user_ty: None,
- const_id: None,
- }
- } else {
- ExprKind::Unary { op: UnOp::Neg, arg: arg.to_ref() }
- }
- }
-
- hir::ExprKind::Struct(ref qpath, ref fields, ref base) => match expr_ty.kind() {
- ty::Adt(adt, substs) => match adt.adt_kind() {
- AdtKind::Struct | AdtKind::Union => {
- let user_provided_types = cx.typeck_results().user_provided_types();
- let user_ty = user_provided_types.get(expr.hir_id).copied();
- debug!("make_mirror_unadjusted: (struct/union) user_ty={:?}", user_ty);
- ExprKind::Adt {
- adt_def: adt,
- variant_index: VariantIdx::new(0),
- substs,
- user_ty,
- fields: field_refs(cx, fields),
- base: base.as_ref().map(|base| FruInfo {
- base: base.to_ref(),
- field_types: cx.typeck_results().fru_field_types()[expr.hir_id].clone(),
- }),
+ hir::ExprKind::AssignOp(op, ref lhs, ref rhs) => {
+ if self.typeck_results().is_method_call(expr) {
+ let lhs = self.mirror_expr_inner(lhs);
+ let rhs = self.mirror_expr_inner(rhs);
+ self.overloaded_operator(expr, self.arena.alloc_from_iter(vec![lhs, rhs]))
+ } else {
+ ExprKind::AssignOp {
+ op: bin_op(op.node),
+ lhs: self.mirror_expr(lhs),
+ rhs: self.mirror_expr(rhs),
}
}
- AdtKind::Enum => {
- let res = cx.typeck_results().qpath_res(qpath, expr.hir_id);
- match res {
- Res::Def(DefKind::Variant, variant_id) => {
- assert!(base.is_none());
+ }
- let index = adt.variant_index_with_id(variant_id);
- let user_provided_types = cx.typeck_results().user_provided_types();
- let user_ty = user_provided_types.get(expr.hir_id).copied();
- debug!("make_mirror_unadjusted: (variant) user_ty={:?}", user_ty);
- ExprKind::Adt {
- adt_def: adt,
- variant_index: index,
- substs,
- user_ty,
- fields: field_refs(cx, fields),
- base: None,
+ hir::ExprKind::Lit(ref lit) => ExprKind::Literal {
+ literal: self.const_eval_literal(&lit.node, expr_ty, lit.span, false),
+ user_ty: None,
+ const_id: None,
+ },
+
+ hir::ExprKind::Binary(op, ref lhs, ref rhs) => {
+ if self.typeck_results().is_method_call(expr) {
+ let lhs = self.mirror_expr_inner(lhs);
+ let rhs = self.mirror_expr_inner(rhs);
+ self.overloaded_operator(expr, self.arena.alloc_from_iter(vec![lhs, rhs]))
+ } else {
+ // FIXME overflow
+ match op.node {
+ hir::BinOpKind::And => ExprKind::LogicalOp {
+ op: LogicalOp::And,
+ lhs: self.mirror_expr(lhs),
+ rhs: self.mirror_expr(rhs),
+ },
+ hir::BinOpKind::Or => ExprKind::LogicalOp {
+ op: LogicalOp::Or,
+ lhs: self.mirror_expr(lhs),
+ rhs: self.mirror_expr(rhs),
+ },
+
+ _ => {
+ let op = bin_op(op.node);
+ ExprKind::Binary {
+ op,
+ lhs: self.mirror_expr(lhs),
+ rhs: self.mirror_expr(rhs),
}
}
- _ => {
- span_bug!(expr.span, "unexpected res: {:?}", res);
- }
}
}
- },
- _ => {
- span_bug!(expr.span, "unexpected type for struct literal: {:?}", expr_ty);
}
- },
- hir::ExprKind::Closure(..) => {
- let closure_ty = cx.typeck_results().expr_ty(expr);
- let (def_id, substs, movability) = match *closure_ty.kind() {
- ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs), None),
- ty::Generator(def_id, substs, movability) => {
- (def_id, UpvarSubsts::Generator(substs), Some(movability))
+ hir::ExprKind::Index(ref lhs, ref index) => {
+ if self.typeck_results().is_method_call(expr) {
+ let lhs = self.mirror_expr_inner(lhs);
+ let index = self.mirror_expr_inner(index);
+ self.overloaded_place(
+ expr,
+ expr_ty,
+ None,
+ self.arena.alloc_from_iter(vec![lhs, index]),
+ expr.span,
+ )
+ } else {
+ ExprKind::Index { lhs: self.mirror_expr(lhs), index: self.mirror_expr(index) }
}
+ }
+
+ hir::ExprKind::Unary(hir::UnOp::Deref, ref arg) => {
+ if self.typeck_results().is_method_call(expr) {
+ let arg = self.mirror_expr_inner(arg);
+ self.overloaded_place(
+ expr,
+ expr_ty,
+ None,
+ self.arena.alloc_from_iter(iter::once(arg)),
+ expr.span,
+ )
+ } else {
+ ExprKind::Deref { arg: self.mirror_expr(arg) }
+ }
+ }
+
+ hir::ExprKind::Unary(hir::UnOp::Not, ref arg) => {
+ if self.typeck_results().is_method_call(expr) {
+ let arg = self.mirror_expr_inner(arg);
+ self.overloaded_operator(expr, self.arena.alloc_from_iter(iter::once(arg)))
+ } else {
+ ExprKind::Unary { op: UnOp::Not, arg: self.mirror_expr(arg) }
+ }
+ }
+
+ hir::ExprKind::Unary(hir::UnOp::Neg, ref arg) => {
+ if self.typeck_results().is_method_call(expr) {
+ let arg = self.mirror_expr_inner(arg);
+ self.overloaded_operator(expr, self.arena.alloc_from_iter(iter::once(arg)))
+ } else if let hir::ExprKind::Lit(ref lit) = arg.kind {
+ ExprKind::Literal {
+ literal: self.const_eval_literal(&lit.node, expr_ty, lit.span, true),
+ user_ty: None,
+ const_id: None,
+ }
+ } else {
+ ExprKind::Unary { op: UnOp::Neg, arg: self.mirror_expr(arg) }
+ }
+ }
+
+ hir::ExprKind::Struct(ref qpath, ref fields, ref base) => match expr_ty.kind() {
+ ty::Adt(adt, substs) => match adt.adt_kind() {
+ AdtKind::Struct | AdtKind::Union => {
+ let user_provided_types = self.typeck_results().user_provided_types();
+ let user_ty = user_provided_types.get(expr.hir_id).copied();
+ debug!("make_mirror_unadjusted: (struct/union) user_ty={:?}", user_ty);
+ ExprKind::Adt {
+ adt_def: adt,
+ variant_index: VariantIdx::new(0),
+ substs,
+ user_ty,
+ fields: self.field_refs(fields),
+ base: base.as_ref().map(|base| FruInfo {
+ base: self.mirror_expr(base),
+ field_types: self.arena.alloc_from_iter(
+ self.typeck_results().fru_field_types()[expr.hir_id]
+ .iter()
+ .cloned(),
+ ),
+ }),
+ }
+ }
+ AdtKind::Enum => {
+ let res = self.typeck_results().qpath_res(qpath, expr.hir_id);
+ match res {
+ Res::Def(DefKind::Variant, variant_id) => {
+ assert!(base.is_none());
+
+ let index = adt.variant_index_with_id(variant_id);
+ let user_provided_types =
+ self.typeck_results().user_provided_types();
+ let user_ty = user_provided_types.get(expr.hir_id).copied();
+ debug!("make_mirror_unadjusted: (variant) user_ty={:?}", user_ty);
+ ExprKind::Adt {
+ adt_def: adt,
+ variant_index: index,
+ substs,
+ user_ty,
+ fields: self.field_refs(fields),
+ base: None,
+ }
+ }
+ _ => {
+ span_bug!(expr.span, "unexpected res: {:?}", res);
+ }
+ }
+ }
+ },
_ => {
- span_bug!(expr.span, "closure expr w/o closure type: {:?}", closure_ty);
+ span_bug!(expr.span, "unexpected type for struct literal: {:?}", expr_ty);
}
- };
+ },
- let upvars = cx
- .typeck_results()
- .closure_min_captures_flattened(def_id)
- .zip(substs.upvar_tys())
- .map(|(captured_place, ty)| capture_upvar(cx, expr, captured_place, ty))
- .collect();
- ExprKind::Closure { closure_id: def_id, substs, upvars, movability }
- }
+ hir::ExprKind::Closure(..) => {
+ let closure_ty = self.typeck_results().expr_ty(expr);
+ let (def_id, substs, movability) = match *closure_ty.kind() {
+ ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs), None),
+ ty::Generator(def_id, substs, movability) => {
+ (def_id, UpvarSubsts::Generator(substs), Some(movability))
+ }
+ _ => {
+ span_bug!(expr.span, "closure expr w/o closure type: {:?}", closure_ty);
+ }
+ };
- hir::ExprKind::Path(ref qpath) => {
- let res = cx.typeck_results().qpath_res(qpath, expr.hir_id);
- convert_path_expr(cx, expr, res)
- }
+ let upvars = self.arena.alloc_from_iter(
+ self.typeck_results
+ .closure_min_captures_flattened(def_id)
+ .zip(substs.upvar_tys())
+ .map(|(captured_place, ty)| self.capture_upvar(expr, captured_place, ty)),
+ );
- hir::ExprKind::InlineAsm(ref asm) => ExprKind::InlineAsm {
- template: asm.template,
- operands: asm
- .operands
- .iter()
- .map(|(op, _op_sp)| {
+ // Convert the closure fake reads, if any, from hir `Place` to ExprRef
+ let fake_reads = match self.typeck_results.closure_fake_reads.get(&def_id) {
+ Some(fake_reads) => fake_reads
+ .iter()
+ .map(|(place, cause, hir_id)| {
+ let expr = self.convert_captured_hir_place(expr, place.clone());
+ let expr_ref: &'thir Expr<'thir, 'tcx> = self.arena.alloc(expr);
+ (expr_ref, *cause, *hir_id)
+ })
+ .collect(),
+ None => Vec::new(),
+ };
+
+ ExprKind::Closure { closure_id: def_id, substs, upvars, movability, fake_reads }
+ }
+
+ hir::ExprKind::Path(ref qpath) => {
+ let res = self.typeck_results().qpath_res(qpath, expr.hir_id);
+ self.convert_path_expr(expr, res)
+ }
+
+ hir::ExprKind::InlineAsm(ref asm) => ExprKind::InlineAsm {
+ template: asm.template,
+ operands: self.arena.alloc_from_iter(asm.operands.iter().map(|(op, _op_sp)| {
match *op {
hir::InlineAsmOperand::In { reg, ref expr } => {
- InlineAsmOperand::In { reg, expr: expr.to_ref() }
+ InlineAsmOperand::In { reg, expr: self.mirror_expr(expr) }
}
hir::InlineAsmOperand::Out { reg, late, ref expr } => {
InlineAsmOperand::Out {
reg,
late,
- expr: expr.as_ref().map(|expr| expr.to_ref()),
+ expr: expr.as_ref().map(|expr| self.mirror_expr(expr)),
}
}
hir::InlineAsmOperand::InOut { reg, late, ref expr } => {
- InlineAsmOperand::InOut { reg, late, expr: expr.to_ref() }
+ InlineAsmOperand::InOut { reg, late, expr: self.mirror_expr(expr) }
}
hir::InlineAsmOperand::SplitInOut {
reg,
@@ -431,11 +500,11 @@
} => InlineAsmOperand::SplitInOut {
reg,
late,
- in_expr: in_expr.to_ref(),
- out_expr: out_expr.as_ref().map(|expr| expr.to_ref()),
+ in_expr: self.mirror_expr(in_expr),
+ out_expr: out_expr.as_ref().map(|expr| self.mirror_expr(expr)),
},
hir::InlineAsmOperand::Const { ref expr } => {
- InlineAsmOperand::Const { expr: expr.to_ref() }
+ InlineAsmOperand::Const { expr: self.mirror_expr(expr) }
}
hir::InlineAsmOperand::Sym { ref expr } => {
let qpath = match expr.kind {
@@ -447,25 +516,24 @@
),
};
let temp_lifetime =
- cx.region_scope_tree.temporary_scope(expr.hir_id.local_id);
- let res = cx.typeck_results().qpath_res(qpath, expr.hir_id);
+ self.region_scope_tree.temporary_scope(expr.hir_id.local_id);
+ let res = self.typeck_results().qpath_res(qpath, expr.hir_id);
let ty;
match res {
Res::Def(DefKind::Fn, _) | Res::Def(DefKind::AssocFn, _) => {
- ty = cx.typeck_results().node_type(expr.hir_id);
- let user_ty = user_substs_applied_to_res(cx, expr.hir_id, res);
+ ty = self.typeck_results().node_type(expr.hir_id);
+ let user_ty = self.user_substs_applied_to_res(expr.hir_id, res);
InlineAsmOperand::SymFn {
- expr: Expr {
+ expr: self.arena.alloc(Expr {
ty,
temp_lifetime,
span: expr.span,
kind: ExprKind::Literal {
- literal: ty::Const::zero_sized(cx.tcx, ty),
+ literal: ty::Const::zero_sized(self.tcx, ty),
user_ty,
const_id: None,
},
- }
- .to_ref(),
+ }),
}
}
@@ -474,277 +542,583 @@
}
_ => {
- cx.tcx.sess.span_err(
+ self.tcx.sess.span_err(
expr.span,
"asm `sym` operand must point to a fn or static",
);
// Not a real fn, but we're not reaching codegen anyways...
- ty = cx.tcx.ty_error();
+ ty = self.tcx.ty_error();
InlineAsmOperand::SymFn {
- expr: Expr {
+ expr: self.arena.alloc(Expr {
ty,
temp_lifetime,
span: expr.span,
kind: ExprKind::Literal {
- literal: ty::Const::zero_sized(cx.tcx, ty),
+ literal: ty::Const::zero_sized(self.tcx, ty),
user_ty: None,
const_id: None,
},
- }
- .to_ref(),
+ }),
}
}
}
}
}
- })
- .collect(),
- options: asm.options,
- line_spans: asm.line_spans,
- },
-
- hir::ExprKind::LlvmInlineAsm(ref asm) => ExprKind::LlvmInlineAsm {
- asm: &asm.inner,
- outputs: asm.outputs_exprs.to_ref(),
- inputs: asm.inputs_exprs.to_ref(),
- },
-
- hir::ExprKind::ConstBlock(ref anon_const) => {
- let anon_const_def_id = cx.tcx.hir().local_def_id(anon_const.hir_id);
- let value = ty::Const::from_anon_const(cx.tcx, anon_const_def_id);
-
- ExprKind::ConstBlock { value }
- }
- // Now comes the rote stuff:
- hir::ExprKind::Repeat(ref v, ref count) => {
- let count_def_id = cx.tcx.hir().local_def_id(count.hir_id);
- let count = ty::Const::from_anon_const(cx.tcx, count_def_id);
-
- ExprKind::Repeat { value: v.to_ref(), count }
- }
- hir::ExprKind::Ret(ref v) => ExprKind::Return { value: v.to_ref() },
- hir::ExprKind::Break(dest, ref value) => match dest.target_id {
- Ok(target_id) => ExprKind::Break {
- label: region::Scope { id: target_id.local_id, data: region::ScopeData::Node },
- value: value.to_ref(),
+ })),
+ options: asm.options,
+ line_spans: asm.line_spans,
},
- Err(err) => bug!("invalid loop id for break: {}", err),
- },
- hir::ExprKind::Continue(dest) => match dest.target_id {
- Ok(loop_id) => ExprKind::Continue {
- label: region::Scope { id: loop_id.local_id, data: region::ScopeData::Node },
+
+ hir::ExprKind::LlvmInlineAsm(ref asm) => ExprKind::LlvmInlineAsm {
+ asm: &asm.inner,
+ outputs: self.mirror_exprs(asm.outputs_exprs),
+ inputs: self.mirror_exprs(asm.inputs_exprs),
},
- Err(err) => bug!("invalid loop id for continue: {}", err),
- },
- hir::ExprKind::If(cond, then, else_opt) => ExprKind::If {
- cond: cond.to_ref(),
- then: then.to_ref(),
- else_opt: else_opt.map(|el| el.to_ref()),
- },
- hir::ExprKind::Match(ref discr, ref arms, _) => ExprKind::Match {
- scrutinee: discr.to_ref(),
- arms: arms.iter().map(|a| convert_arm(cx, a)).collect(),
- },
- hir::ExprKind::Loop(ref body, ..) => ExprKind::Loop { body: block::to_expr_ref(cx, body) },
- hir::ExprKind::Field(ref source, ..) => ExprKind::Field {
- lhs: source.to_ref(),
- name: Field::new(cx.tcx.field_index(expr.hir_id, cx.typeck_results)),
- },
- hir::ExprKind::Cast(ref source, ref cast_ty) => {
- // Check for a user-given type annotation on this `cast`
- let user_provided_types = cx.typeck_results.user_provided_types();
- let user_ty = user_provided_types.get(cast_ty.hir_id);
- debug!(
- "cast({:?}) has ty w/ hir_id {:?} and user provided ty {:?}",
- expr, cast_ty.hir_id, user_ty,
- );
+ hir::ExprKind::ConstBlock(ref anon_const) => {
+ let anon_const_def_id = self.tcx.hir().local_def_id(anon_const.hir_id);
+ let value = ty::Const::from_anon_const(self.tcx, anon_const_def_id);
- // Check to see if this cast is a "coercion cast", where the cast is actually done
- // using a coercion (or is a no-op).
- let cast = if cx.typeck_results().is_coercion_cast(source.hir_id) {
- // Convert the lexpr to a vexpr.
- ExprKind::Use { source: source.to_ref() }
- } else if cx.typeck_results().expr_ty(source).is_region_ptr() {
- // Special cased so that we can type check that the element
- // type of the source matches the pointed to type of the
- // destination.
- ExprKind::Pointer { source: source.to_ref(), cast: PointerCast::ArrayToPointer }
- } else {
- // check whether this is casting an enum variant discriminant
- // to prevent cycles, we refer to the discriminant initializer
- // which is always an integer and thus doesn't need to know the
- // enum's layout (or its tag type) to compute it during const eval
- // Example:
- // enum Foo {
- // A,
- // B = A as isize + 4,
- // }
- // The correct solution would be to add symbolic computations to miri,
- // so we wouldn't have to compute and store the actual value
- let var = if let hir::ExprKind::Path(ref qpath) = source.kind {
- let res = cx.typeck_results().qpath_res(qpath, source.hir_id);
- cx.typeck_results().node_type(source.hir_id).ty_adt_def().and_then(|adt_def| {
- match res {
- Res::Def(
- DefKind::Ctor(CtorOf::Variant, CtorKind::Const),
- variant_ctor_id,
- ) => {
- let idx = adt_def.variant_index_with_ctor_id(variant_ctor_id);
- let (d, o) = adt_def.discriminant_def_for_variant(idx);
- use rustc_middle::ty::util::IntTypeExt;
- let ty = adt_def.repr.discr_type();
- let ty = ty.to_ty(cx.tcx());
- Some((d, o, ty))
- }
- _ => None,
- }
- })
- } else {
- None
- };
+ ExprKind::ConstBlock { value }
+ }
+ // Now comes the rote stuff:
+ hir::ExprKind::Repeat(ref v, ref count) => {
+ let count_def_id = self.tcx.hir().local_def_id(count.hir_id);
+ let count = ty::Const::from_anon_const(self.tcx, count_def_id);
- let source = if let Some((did, offset, var_ty)) = var {
- let mk_const = |literal| {
- Expr {
- temp_lifetime,
- ty: var_ty,
- span: expr.span,
- kind: ExprKind::Literal { literal, user_ty: None, const_id: None },
- }
- .to_ref()
- };
- let offset = mk_const(ty::Const::from_bits(
- cx.tcx,
- offset as u128,
- cx.param_env.and(var_ty),
- ));
- match did {
- Some(did) => {
- // in case we are offsetting from a computed discriminant
- // and not the beginning of discriminants (which is always `0`)
- let substs = InternalSubsts::identity_for_item(cx.tcx(), did);
- let lhs = mk_const(cx.tcx().mk_const(ty::Const {
- val: ty::ConstKind::Unevaluated(
- ty::WithOptConstParam::unknown(did),
- substs,
- None,
- ),
- ty: var_ty,
- }));
- let bin = ExprKind::Binary { op: BinOp::Add, lhs, rhs: offset };
- Expr { temp_lifetime, ty: var_ty, span: expr.span, kind: bin }.to_ref()
- }
- None => offset,
+ ExprKind::Repeat { value: self.mirror_expr(v), count }
+ }
+ hir::ExprKind::Ret(ref v) => {
+ ExprKind::Return { value: v.as_ref().map(|v| self.mirror_expr(v)) }
+ }
+ hir::ExprKind::Break(dest, ref value) => match dest.target_id {
+ Ok(target_id) => ExprKind::Break {
+ label: region::Scope { id: target_id.local_id, data: region::ScopeData::Node },
+ value: value.as_ref().map(|value| self.mirror_expr(value)),
+ },
+ Err(err) => bug!("invalid loop id for break: {}", err),
+ },
+ hir::ExprKind::Continue(dest) => match dest.target_id {
+ Ok(loop_id) => ExprKind::Continue {
+ label: region::Scope { id: loop_id.local_id, data: region::ScopeData::Node },
+ },
+ Err(err) => bug!("invalid loop id for continue: {}", err),
+ },
+ hir::ExprKind::If(cond, then, else_opt) => ExprKind::If {
+ cond: self.mirror_expr(cond),
+ then: self.mirror_expr(then),
+ else_opt: else_opt.map(|el| self.mirror_expr(el)),
+ },
+ hir::ExprKind::Match(ref discr, ref arms, _) => ExprKind::Match {
+ scrutinee: self.mirror_expr(discr),
+ arms: self.arena.alloc_from_iter(arms.iter().map(|a| self.convert_arm(a))),
+ },
+ hir::ExprKind::Loop(ref body, ..) => {
+ let block_ty = self.typeck_results().node_type(body.hir_id);
+ let temp_lifetime = self.region_scope_tree.temporary_scope(body.hir_id.local_id);
+ let block = self.mirror_block(body);
+ let body = self.arena.alloc(Expr {
+ ty: block_ty,
+ temp_lifetime,
+ span: block.span,
+ kind: ExprKind::Block { body: block },
+ });
+ ExprKind::Loop { body }
+ }
+ hir::ExprKind::Field(ref source, ..) => ExprKind::Field {
+ lhs: self.mirror_expr(source),
+ name: Field::new(self.tcx.field_index(expr.hir_id, self.typeck_results)),
+ },
+ hir::ExprKind::Cast(ref source, ref cast_ty) => {
+ // Check for a user-given type annotation on this `cast`
+ let user_provided_types = self.typeck_results.user_provided_types();
+ let user_ty = user_provided_types.get(cast_ty.hir_id);
+
+ debug!(
+ "cast({:?}) has ty w/ hir_id {:?} and user provided ty {:?}",
+ expr, cast_ty.hir_id, user_ty,
+ );
+
+ // Check to see if this cast is a "coercion cast", where the cast is actually done
+ // using a coercion (or is a no-op).
+ let cast = if self.typeck_results().is_coercion_cast(source.hir_id) {
+ // Convert the lexpr to a vexpr.
+ ExprKind::Use { source: self.mirror_expr(source) }
+ } else if self.typeck_results().expr_ty(source).is_region_ptr() {
+ // Special cased so that we can type check that the element
+ // type of the source matches the pointed to type of the
+ // destination.
+ ExprKind::Pointer {
+ source: self.mirror_expr(source),
+ cast: PointerCast::ArrayToPointer,
}
} else {
- source.to_ref()
+ // check whether this is casting an enum variant discriminant
+ // to prevent cycles, we refer to the discriminant initializer
+ // which is always an integer and thus doesn't need to know the
+ // enum's layout (or its tag type) to compute it during const eval
+ // Example:
+ // enum Foo {
+ // A,
+ // B = A as isize + 4,
+ // }
+ // The correct solution would be to add symbolic computations to miri,
+ // so we wouldn't have to compute and store the actual value
+ let var = if let hir::ExprKind::Path(ref qpath) = source.kind {
+ let res = self.typeck_results().qpath_res(qpath, source.hir_id);
+ self.typeck_results().node_type(source.hir_id).ty_adt_def().and_then(
+ |adt_def| match res {
+ Res::Def(
+ DefKind::Ctor(CtorOf::Variant, CtorKind::Const),
+ variant_ctor_id,
+ ) => {
+ let idx = adt_def.variant_index_with_ctor_id(variant_ctor_id);
+ let (d, o) = adt_def.discriminant_def_for_variant(idx);
+ use rustc_middle::ty::util::IntTypeExt;
+ let ty = adt_def.repr.discr_type();
+ let ty = ty.to_ty(self.tcx());
+ Some((d, o, ty))
+ }
+ _ => None,
+ },
+ )
+ } else {
+ None
+ };
+
+ let source = if let Some((did, offset, var_ty)) = var {
+ let mk_const = |literal| {
+ self.arena.alloc(Expr {
+ temp_lifetime,
+ ty: var_ty,
+ span: expr.span,
+ kind: ExprKind::Literal { literal, user_ty: None, const_id: None },
+ })
+ };
+ let offset = mk_const(ty::Const::from_bits(
+ self.tcx,
+ offset as u128,
+ self.param_env.and(var_ty),
+ ));
+ match did {
+ Some(did) => {
+ // in case we are offsetting from a computed discriminant
+ // and not the beginning of discriminants (which is always `0`)
+ let substs = InternalSubsts::identity_for_item(self.tcx(), did);
+ let lhs = mk_const(self.tcx().mk_const(ty::Const {
+ val: ty::ConstKind::Unevaluated(
+ ty::WithOptConstParam::unknown(did),
+ substs,
+ None,
+ ),
+ ty: var_ty,
+ }));
+ let bin =
+ ExprKind::Binary { op: BinOp::Add, lhs: lhs, rhs: offset };
+ self.arena.alloc(Expr {
+ temp_lifetime,
+ ty: var_ty,
+ span: expr.span,
+ kind: bin,
+ })
+ }
+ None => offset,
+ }
+ } else {
+ self.mirror_expr(source)
+ };
+
+ ExprKind::Cast { source: source }
};
- ExprKind::Cast { source }
+ if let Some(user_ty) = user_ty {
+ // NOTE: Creating a new Expr and wrapping a Cast inside of it may be
+ // inefficient, revisit this when performance becomes an issue.
+ let cast_expr = self.arena.alloc(Expr {
+ temp_lifetime,
+ ty: expr_ty,
+ span: expr.span,
+ kind: cast,
+ });
+ debug!("make_mirror_unadjusted: (cast) user_ty={:?}", user_ty);
+
+ ExprKind::ValueTypeAscription { source: cast_expr, user_ty: Some(*user_ty) }
+ } else {
+ cast
+ }
+ }
+ hir::ExprKind::Type(ref source, ref ty) => {
+ let user_provided_types = self.typeck_results.user_provided_types();
+ let user_ty = user_provided_types.get(ty.hir_id).copied();
+ debug!("make_mirror_unadjusted: (type) user_ty={:?}", user_ty);
+ let mirrored = self.mirror_expr(source);
+ if source.is_syntactic_place_expr() {
+ ExprKind::PlaceTypeAscription { source: mirrored, user_ty }
+ } else {
+ ExprKind::ValueTypeAscription { source: mirrored, user_ty }
+ }
+ }
+ hir::ExprKind::DropTemps(ref source) => {
+ ExprKind::Use { source: self.mirror_expr(source) }
+ }
+ hir::ExprKind::Box(ref value) => ExprKind::Box { value: self.mirror_expr(value) },
+ hir::ExprKind::Array(ref fields) => {
+ ExprKind::Array { fields: self.mirror_exprs(fields) }
+ }
+ hir::ExprKind::Tup(ref fields) => ExprKind::Tuple { fields: self.mirror_exprs(fields) },
+
+ hir::ExprKind::Yield(ref v, _) => ExprKind::Yield { value: self.mirror_expr(v) },
+ hir::ExprKind::Err => unreachable!(),
+ };
+
+ Expr { temp_lifetime, ty: expr_ty, span: expr.span, kind }
+ }
+
+ fn user_substs_applied_to_res(
+ &mut self,
+ hir_id: hir::HirId,
+ res: Res,
+ ) -> Option<ty::CanonicalUserType<'tcx>> {
+ debug!("user_substs_applied_to_res: res={:?}", res);
+ let user_provided_type = match res {
+ // A reference to something callable -- e.g., a fn, method, or
+ // a tuple-struct or tuple-variant. This has the type of a
+ // `Fn` but with the user-given substitutions.
+ Res::Def(DefKind::Fn, _)
+ | Res::Def(DefKind::AssocFn, _)
+ | Res::Def(DefKind::Ctor(_, CtorKind::Fn), _)
+ | Res::Def(DefKind::Const, _)
+ | Res::Def(DefKind::AssocConst, _) => {
+ self.typeck_results().user_provided_types().get(hir_id).copied()
+ }
+
+ // A unit struct/variant which is used as a value (e.g.,
+ // `None`). This has the type of the enum/struct that defines
+ // this variant -- but with the substitutions given by the
+ // user.
+ Res::Def(DefKind::Ctor(_, CtorKind::Const), _) => {
+ self.user_substs_applied_to_ty_of_hir_id(hir_id)
+ }
+
+ // `Self` is used in expression as a tuple struct constructor or an unit struct constructor
+ Res::SelfCtor(_) => self.user_substs_applied_to_ty_of_hir_id(hir_id),
+
+ _ => bug!("user_substs_applied_to_res: unexpected res {:?} at {:?}", res, hir_id),
+ };
+ debug!("user_substs_applied_to_res: user_provided_type={:?}", user_provided_type);
+ user_provided_type
+ }
+
+ fn method_callee(
+ &mut self,
+ expr: &hir::Expr<'_>,
+ span: Span,
+ overloaded_callee: Option<(DefId, SubstsRef<'tcx>)>,
+ ) -> Expr<'thir, 'tcx> {
+ let temp_lifetime = self.region_scope_tree.temporary_scope(expr.hir_id.local_id);
+ let (def_id, substs, user_ty) = match overloaded_callee {
+ Some((def_id, substs)) => (def_id, substs, None),
+ None => {
+ let (kind, def_id) =
+ self.typeck_results().type_dependent_def(expr.hir_id).unwrap_or_else(|| {
+ span_bug!(expr.span, "no type-dependent def for method callee")
+ });
+ let user_ty = self.user_substs_applied_to_res(expr.hir_id, Res::Def(kind, def_id));
+ debug!("method_callee: user_ty={:?}", user_ty);
+ (def_id, self.typeck_results().node_substs(expr.hir_id), user_ty)
+ }
+ };
+ let ty = self.tcx().mk_fn_def(def_id, substs);
+ Expr {
+ temp_lifetime,
+ ty,
+ span,
+ kind: ExprKind::Literal {
+ literal: ty::Const::zero_sized(self.tcx(), ty),
+ user_ty,
+ const_id: None,
+ },
+ }
+ }
+
+ fn convert_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) -> Arm<'thir, 'tcx> {
+ Arm {
+ pattern: self.pattern_from_hir(&arm.pat),
+ guard: arm.guard.as_ref().map(|g| match g {
+ hir::Guard::If(ref e) => Guard::If(self.mirror_expr(e)),
+ hir::Guard::IfLet(ref pat, ref e) => {
+ Guard::IfLet(self.pattern_from_hir(pat), self.mirror_expr(e))
+ }
+ }),
+ body: self.mirror_expr(arm.body),
+ lint_level: LintLevel::Explicit(arm.hir_id),
+ scope: region::Scope { id: arm.hir_id.local_id, data: region::ScopeData::Node },
+ span: arm.span,
+ }
+ }
+
+ fn convert_path_expr(
+ &mut self,
+ expr: &'tcx hir::Expr<'tcx>,
+ res: Res,
+ ) -> ExprKind<'thir, 'tcx> {
+ let substs = self.typeck_results().node_substs(expr.hir_id);
+ match res {
+ // A regular function, constructor function or a constant.
+ Res::Def(DefKind::Fn, _)
+ | Res::Def(DefKind::AssocFn, _)
+ | Res::Def(DefKind::Ctor(_, CtorKind::Fn), _)
+ | Res::SelfCtor(..) => {
+ let user_ty = self.user_substs_applied_to_res(expr.hir_id, res);
+ debug!("convert_path_expr: user_ty={:?}", user_ty);
+ ExprKind::Literal {
+ literal: ty::Const::zero_sized(
+ self.tcx,
+ self.typeck_results().node_type(expr.hir_id),
+ ),
+ user_ty,
+ const_id: None,
+ }
+ }
+
+ Res::Def(DefKind::ConstParam, def_id) => {
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ let item_id = self.tcx.hir().get_parent_node(hir_id);
+ let item_def_id = self.tcx.hir().local_def_id(item_id);
+ let generics = self.tcx.generics_of(item_def_id);
+ let index = generics.param_def_id_to_index[&def_id];
+ let name = self.tcx.hir().name(hir_id);
+ let val = ty::ConstKind::Param(ty::ParamConst::new(index, name));
+ ExprKind::Literal {
+ literal: self.tcx.mk_const(ty::Const {
+ val,
+ ty: self.typeck_results().node_type(expr.hir_id),
+ }),
+ user_ty: None,
+ const_id: Some(def_id),
+ }
+ }
+
+ Res::Def(DefKind::Const, def_id) | Res::Def(DefKind::AssocConst, def_id) => {
+ let user_ty = self.user_substs_applied_to_res(expr.hir_id, res);
+ debug!("convert_path_expr: (const) user_ty={:?}", user_ty);
+ ExprKind::Literal {
+ literal: self.tcx.mk_const(ty::Const {
+ val: ty::ConstKind::Unevaluated(
+ ty::WithOptConstParam::unknown(def_id),
+ substs,
+ None,
+ ),
+ ty: self.typeck_results().node_type(expr.hir_id),
+ }),
+ user_ty,
+ const_id: Some(def_id),
+ }
+ }
+
+ Res::Def(DefKind::Ctor(_, CtorKind::Const), def_id) => {
+ let user_provided_types = self.typeck_results.user_provided_types();
+ let user_provided_type = user_provided_types.get(expr.hir_id).copied();
+ debug!("convert_path_expr: user_provided_type={:?}", user_provided_type);
+ let ty = self.typeck_results().node_type(expr.hir_id);
+ match ty.kind() {
+ // A unit struct/variant which is used as a value.
+ // We return a completely different ExprKind here to account for this special case.
+ ty::Adt(adt_def, substs) => ExprKind::Adt {
+ adt_def,
+ variant_index: adt_def.variant_index_with_ctor_id(def_id),
+ substs,
+ user_ty: user_provided_type,
+ fields: self.arena.alloc_from_iter(iter::empty()),
+ base: None,
+ },
+ _ => bug!("unexpected ty: {:?}", ty),
+ }
+ }
+
+ // We encode uses of statics as a `*&STATIC` where the `&STATIC` part is
+ // a constant reference (or constant raw pointer for `static mut`) in MIR
+ Res::Def(DefKind::Static, id) => {
+ let ty = self.tcx.static_ptr_ty(id);
+ let temp_lifetime = self.region_scope_tree.temporary_scope(expr.hir_id.local_id);
+ let kind = if self.tcx.is_thread_local_static(id) {
+ ExprKind::ThreadLocalRef(id)
+ } else {
+ let ptr = self.tcx.create_static_alloc(id);
+ ExprKind::StaticRef {
+ literal: ty::Const::from_scalar(self.tcx, Scalar::Ptr(ptr.into()), ty),
+ def_id: id,
+ }
+ };
+ ExprKind::Deref {
+ arg: self.arena.alloc(Expr { ty, temp_lifetime, span: expr.span, kind }),
+ }
+ }
+
+ Res::Local(var_hir_id) => self.convert_var(var_hir_id),
+
+ _ => span_bug!(expr.span, "res `{:?}` not yet implemented", res),
+ }
+ }
+
+ fn convert_var(&mut self, var_hir_id: hir::HirId) -> ExprKind<'thir, 'tcx> {
+ // We want upvars here not captures.
+ // Captures will be handled in MIR.
+ let is_upvar = self
+ .tcx
+ .upvars_mentioned(self.body_owner)
+ .map_or(false, |upvars| upvars.contains_key(&var_hir_id));
+
+ debug!(
+ "convert_var({:?}): is_upvar={}, body_owner={:?}",
+ var_hir_id, is_upvar, self.body_owner
+ );
+
+ if is_upvar {
+ ExprKind::UpvarRef { closure_def_id: self.body_owner, var_hir_id }
+ } else {
+ ExprKind::VarRef { id: var_hir_id }
+ }
+ }
+
+ fn overloaded_operator(
+ &mut self,
+ expr: &'tcx hir::Expr<'tcx>,
+ args: &'thir [Expr<'thir, 'tcx>],
+ ) -> ExprKind<'thir, 'tcx> {
+ let fun = self.arena.alloc(self.method_callee(expr, expr.span, None));
+ ExprKind::Call { ty: fun.ty, fun, args, from_hir_call: false, fn_span: expr.span }
+ }
+
+ fn overloaded_place(
+ &mut self,
+ expr: &'tcx hir::Expr<'tcx>,
+ place_ty: Ty<'tcx>,
+ overloaded_callee: Option<(DefId, SubstsRef<'tcx>)>,
+ args: &'thir [Expr<'thir, 'tcx>],
+ span: Span,
+ ) -> ExprKind<'thir, 'tcx> {
+ // For an overloaded *x or x[y] expression of type T, the method
+ // call returns an &T and we must add the deref so that the types
+ // line up (this is because `*x` and `x[y]` represent places):
+
+ // Reconstruct the output assuming it's a reference with the
+ // same region and mutability as the receiver. This holds for
+ // `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`.
+ let (region, mutbl) = match *args[0].ty.kind() {
+ ty::Ref(region, _, mutbl) => (region, mutbl),
+ _ => span_bug!(span, "overloaded_place: receiver is not a reference"),
+ };
+ let ref_ty = self.tcx.mk_ref(region, ty::TypeAndMut { ty: place_ty, mutbl });
+
+ // construct the complete expression `foo()` for the overloaded call,
+ // which will yield the &T type
+ let temp_lifetime = self.region_scope_tree.temporary_scope(expr.hir_id.local_id);
+ let fun = self.arena.alloc(self.method_callee(expr, span, overloaded_callee));
+ let ref_expr = self.arena.alloc(Expr {
+ temp_lifetime,
+ ty: ref_ty,
+ span,
+ kind: ExprKind::Call { ty: fun.ty, fun, args, from_hir_call: false, fn_span: span },
+ });
+
+ // construct and return a deref wrapper `*foo()`
+ ExprKind::Deref { arg: ref_expr }
+ }
+
+ fn convert_captured_hir_place(
+ &mut self,
+ closure_expr: &'tcx hir::Expr<'tcx>,
+ place: HirPlace<'tcx>,
+ ) -> Expr<'thir, 'tcx> {
+ let temp_lifetime = self.region_scope_tree.temporary_scope(closure_expr.hir_id.local_id);
+ let var_ty = place.base_ty;
+
+ // The result of capture analysis in `rustc_typeck/check/upvar.rs`represents a captured path
+ // as it's seen for use within the closure and not at the time of closure creation.
+ //
+ // That is we see expect to see it start from a captured upvar and not something that is local
+ // to the closure's parent.
+ let var_hir_id = match place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ base => bug!("Expected an upvar, found {:?}", base),
+ };
+
+ let mut captured_place_expr = Expr {
+ temp_lifetime,
+ ty: var_ty,
+ span: closure_expr.span,
+ kind: self.convert_var(var_hir_id),
+ };
+
+ for proj in place.projections.iter() {
+ let kind = match proj.kind {
+ HirProjectionKind::Deref => {
+ ExprKind::Deref { arg: self.arena.alloc(captured_place_expr) }
+ }
+ HirProjectionKind::Field(field, ..) => {
+ // Variant index will always be 0, because for multi-variant
+ // enums, we capture the enum entirely.
+ ExprKind::Field {
+ lhs: self.arena.alloc(captured_place_expr),
+ name: Field::new(field as usize),
+ }
+ }
+ HirProjectionKind::Index | HirProjectionKind::Subslice => {
+ // We don't capture these projections, so we can ignore them here
+ continue;
+ }
};
- if let Some(user_ty) = user_ty {
- // NOTE: Creating a new Expr and wrapping a Cast inside of it may be
- // inefficient, revisit this when performance becomes an issue.
- let cast_expr = Expr { temp_lifetime, ty: expr_ty, span: expr.span, kind: cast };
- debug!("make_mirror_unadjusted: (cast) user_ty={:?}", user_ty);
+ captured_place_expr =
+ Expr { temp_lifetime, ty: proj.ty, span: closure_expr.span, kind };
+ }
- ExprKind::ValueTypeAscription {
- source: cast_expr.to_ref(),
- user_ty: Some(*user_ty),
+ captured_place_expr
+ }
+
+ fn capture_upvar(
+ &mut self,
+ closure_expr: &'tcx hir::Expr<'tcx>,
+ captured_place: &'tcx ty::CapturedPlace<'tcx>,
+ upvar_ty: Ty<'tcx>,
+ ) -> Expr<'thir, 'tcx> {
+ let upvar_capture = captured_place.info.capture_kind;
+ let captured_place_expr =
+ self.convert_captured_hir_place(closure_expr, captured_place.place.clone());
+ let temp_lifetime = self.region_scope_tree.temporary_scope(closure_expr.hir_id.local_id);
+
+ match upvar_capture {
+ ty::UpvarCapture::ByValue(_) => captured_place_expr,
+ ty::UpvarCapture::ByRef(upvar_borrow) => {
+ let borrow_kind = match upvar_borrow.kind {
+ ty::BorrowKind::ImmBorrow => BorrowKind::Shared,
+ ty::BorrowKind::UniqueImmBorrow => BorrowKind::Unique,
+ ty::BorrowKind::MutBorrow => BorrowKind::Mut { allow_two_phase_borrow: false },
+ };
+ Expr {
+ temp_lifetime,
+ ty: upvar_ty,
+ span: closure_expr.span,
+ kind: ExprKind::Borrow {
+ borrow_kind,
+ arg: self.arena.alloc(captured_place_expr),
+ },
}
- } else {
- cast
}
}
- hir::ExprKind::Type(ref source, ref ty) => {
- let user_provided_types = cx.typeck_results.user_provided_types();
- let user_ty = user_provided_types.get(ty.hir_id).copied();
- debug!("make_mirror_unadjusted: (type) user_ty={:?}", user_ty);
- if source.is_syntactic_place_expr() {
- ExprKind::PlaceTypeAscription { source: source.to_ref(), user_ty }
- } else {
- ExprKind::ValueTypeAscription { source: source.to_ref(), user_ty }
- }
- }
- hir::ExprKind::DropTemps(ref source) => ExprKind::Use { source: source.to_ref() },
- hir::ExprKind::Box(ref value) => ExprKind::Box { value: value.to_ref() },
- hir::ExprKind::Array(ref fields) => ExprKind::Array { fields: fields.to_ref() },
- hir::ExprKind::Tup(ref fields) => ExprKind::Tuple { fields: fields.to_ref() },
+ }
- hir::ExprKind::Yield(ref v, _) => ExprKind::Yield { value: v.to_ref() },
- hir::ExprKind::Err => unreachable!(),
- };
-
- Expr { temp_lifetime, ty: expr_ty, span: expr.span, kind }
-}
-
-fn user_substs_applied_to_res<'tcx>(
- cx: &mut Cx<'_, 'tcx>,
- hir_id: hir::HirId,
- res: Res,
-) -> Option<ty::CanonicalUserType<'tcx>> {
- debug!("user_substs_applied_to_res: res={:?}", res);
- let user_provided_type = match res {
- // A reference to something callable -- e.g., a fn, method, or
- // a tuple-struct or tuple-variant. This has the type of a
- // `Fn` but with the user-given substitutions.
- Res::Def(DefKind::Fn, _)
- | Res::Def(DefKind::AssocFn, _)
- | Res::Def(DefKind::Ctor(_, CtorKind::Fn), _)
- | Res::Def(DefKind::Const, _)
- | Res::Def(DefKind::AssocConst, _) => {
- cx.typeck_results().user_provided_types().get(hir_id).copied()
- }
-
- // A unit struct/variant which is used as a value (e.g.,
- // `None`). This has the type of the enum/struct that defines
- // this variant -- but with the substitutions given by the
- // user.
- Res::Def(DefKind::Ctor(_, CtorKind::Const), _) => {
- cx.user_substs_applied_to_ty_of_hir_id(hir_id)
- }
-
- // `Self` is used in expression as a tuple struct constructor or an unit struct constructor
- Res::SelfCtor(_) => cx.user_substs_applied_to_ty_of_hir_id(hir_id),
-
- _ => bug!("user_substs_applied_to_res: unexpected res {:?} at {:?}", res, hir_id),
- };
- debug!("user_substs_applied_to_res: user_provided_type={:?}", user_provided_type);
- user_provided_type
-}
-
-fn method_callee<'a, 'tcx>(
- cx: &mut Cx<'a, 'tcx>,
- expr: &hir::Expr<'_>,
- span: Span,
- overloaded_callee: Option<(DefId, SubstsRef<'tcx>)>,
-) -> Expr<'tcx> {
- let temp_lifetime = cx.region_scope_tree.temporary_scope(expr.hir_id.local_id);
- let (def_id, substs, user_ty) = match overloaded_callee {
- Some((def_id, substs)) => (def_id, substs, None),
- None => {
- let (kind, def_id) = cx
- .typeck_results()
- .type_dependent_def(expr.hir_id)
- .unwrap_or_else(|| span_bug!(expr.span, "no type-dependent def for method callee"));
- let user_ty = user_substs_applied_to_res(cx, expr.hir_id, Res::Def(kind, def_id));
- debug!("method_callee: user_ty={:?}", user_ty);
- (def_id, cx.typeck_results().node_substs(expr.hir_id), user_ty)
- }
- };
- let ty = cx.tcx().mk_fn_def(def_id, substs);
- Expr {
- temp_lifetime,
- ty,
- span,
- kind: ExprKind::Literal {
- literal: ty::Const::zero_sized(cx.tcx(), ty),
- user_ty,
- const_id: None,
- },
+ /// Converts a list of named fields (i.e., for struct-like struct/enum ADTs) into FieldExpr.
+ fn field_refs(
+ &mut self,
+ fields: &'tcx [hir::ExprField<'tcx>],
+ ) -> &'thir [FieldExpr<'thir, 'tcx>] {
+ self.arena.alloc_from_iter(fields.iter().map(|field| FieldExpr {
+ name: Field::new(self.tcx.field_index(field.hir_id, self.typeck_results)),
+ expr: self.mirror_expr(field.expr),
+ }))
}
}
@@ -776,135 +1150,6 @@
}
}
-fn convert_arm<'tcx>(cx: &mut Cx<'_, 'tcx>, arm: &'tcx hir::Arm<'tcx>) -> Arm<'tcx> {
- Arm {
- pattern: cx.pattern_from_hir(&arm.pat),
- guard: arm.guard.as_ref().map(|g| match g {
- hir::Guard::If(ref e) => Guard::If(e.to_ref()),
- hir::Guard::IfLet(ref pat, ref e) => Guard::IfLet(cx.pattern_from_hir(pat), e.to_ref()),
- }),
- body: arm.body.to_ref(),
- lint_level: LintLevel::Explicit(arm.hir_id),
- scope: region::Scope { id: arm.hir_id.local_id, data: region::ScopeData::Node },
- span: arm.span,
- }
-}
-
-fn convert_path_expr<'a, 'tcx>(
- cx: &mut Cx<'a, 'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- res: Res,
-) -> ExprKind<'tcx> {
- let substs = cx.typeck_results().node_substs(expr.hir_id);
- match res {
- // A regular function, constructor function or a constant.
- Res::Def(DefKind::Fn, _)
- | Res::Def(DefKind::AssocFn, _)
- | Res::Def(DefKind::Ctor(_, CtorKind::Fn), _)
- | Res::SelfCtor(..) => {
- let user_ty = user_substs_applied_to_res(cx, expr.hir_id, res);
- debug!("convert_path_expr: user_ty={:?}", user_ty);
- ExprKind::Literal {
- literal: ty::Const::zero_sized(cx.tcx, cx.typeck_results().node_type(expr.hir_id)),
- user_ty,
- const_id: None,
- }
- }
-
- Res::Def(DefKind::ConstParam, def_id) => {
- let hir_id = cx.tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
- let item_id = cx.tcx.hir().get_parent_node(hir_id);
- let item_def_id = cx.tcx.hir().local_def_id(item_id);
- let generics = cx.tcx.generics_of(item_def_id);
- let index = generics.param_def_id_to_index[&def_id];
- let name = cx.tcx.hir().name(hir_id);
- let val = ty::ConstKind::Param(ty::ParamConst::new(index, name));
- ExprKind::Literal {
- literal: cx
- .tcx
- .mk_const(ty::Const { val, ty: cx.typeck_results().node_type(expr.hir_id) }),
- user_ty: None,
- const_id: Some(def_id),
- }
- }
-
- Res::Def(DefKind::Const, def_id) | Res::Def(DefKind::AssocConst, def_id) => {
- let user_ty = user_substs_applied_to_res(cx, expr.hir_id, res);
- debug!("convert_path_expr: (const) user_ty={:?}", user_ty);
- ExprKind::Literal {
- literal: cx.tcx.mk_const(ty::Const {
- val: ty::ConstKind::Unevaluated(
- ty::WithOptConstParam::unknown(def_id),
- substs,
- None,
- ),
- ty: cx.typeck_results().node_type(expr.hir_id),
- }),
- user_ty,
- const_id: Some(def_id),
- }
- }
-
- Res::Def(DefKind::Ctor(_, CtorKind::Const), def_id) => {
- let user_provided_types = cx.typeck_results.user_provided_types();
- let user_provided_type = user_provided_types.get(expr.hir_id).copied();
- debug!("convert_path_expr: user_provided_type={:?}", user_provided_type);
- let ty = cx.typeck_results().node_type(expr.hir_id);
- match ty.kind() {
- // A unit struct/variant which is used as a value.
- // We return a completely different ExprKind here to account for this special case.
- ty::Adt(adt_def, substs) => ExprKind::Adt {
- adt_def,
- variant_index: adt_def.variant_index_with_ctor_id(def_id),
- substs,
- user_ty: user_provided_type,
- fields: vec![],
- base: None,
- },
- _ => bug!("unexpected ty: {:?}", ty),
- }
- }
-
- // We encode uses of statics as a `*&STATIC` where the `&STATIC` part is
- // a constant reference (or constant raw pointer for `static mut`) in MIR
- Res::Def(DefKind::Static, id) => {
- let ty = cx.tcx.static_ptr_ty(id);
- let temp_lifetime = cx.region_scope_tree.temporary_scope(expr.hir_id.local_id);
- let kind = if cx.tcx.is_thread_local_static(id) {
- ExprKind::ThreadLocalRef(id)
- } else {
- let ptr = cx.tcx.create_static_alloc(id);
- ExprKind::StaticRef {
- literal: ty::Const::from_scalar(cx.tcx, Scalar::Ptr(ptr.into()), ty),
- def_id: id,
- }
- };
- ExprKind::Deref { arg: Expr { ty, temp_lifetime, span: expr.span, kind }.to_ref() }
- }
-
- Res::Local(var_hir_id) => convert_var(cx, var_hir_id),
-
- _ => span_bug!(expr.span, "res `{:?}` not yet implemented", res),
- }
-}
-
-fn convert_var<'tcx>(cx: &mut Cx<'_, 'tcx>, var_hir_id: hir::HirId) -> ExprKind<'tcx> {
- // We want upvars here not captures.
- // Captures will be handled in MIR.
- let is_upvar = cx
- .tcx
- .upvars_mentioned(cx.body_owner)
- .map_or(false, |upvars| upvars.contains_key(&var_hir_id));
-
- debug!("convert_var({:?}): is_upvar={}, body_owner={:?}", var_hir_id, is_upvar, cx.body_owner);
-
- if is_upvar {
- ExprKind::UpvarRef { closure_def_id: cx.body_owner, var_hir_id }
- } else {
- ExprKind::VarRef { id: var_hir_id }
- }
-}
-
fn bin_op(op: hir::BinOpKind) -> BinOp {
match op {
hir::BinOpKind::Add => BinOp::Add,
@@ -926,139 +1171,3 @@
_ => bug!("no equivalent for ast binop {:?}", op),
}
}
-
-fn overloaded_operator<'a, 'tcx>(
- cx: &mut Cx<'a, 'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- args: Vec<ExprRef<'tcx>>,
-) -> ExprKind<'tcx> {
- let fun = method_callee(cx, expr, expr.span, None);
- ExprKind::Call { ty: fun.ty, fun: fun.to_ref(), args, from_hir_call: false, fn_span: expr.span }
-}
-
-fn overloaded_place<'a, 'tcx>(
- cx: &mut Cx<'a, 'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- place_ty: Ty<'tcx>,
- overloaded_callee: Option<(DefId, SubstsRef<'tcx>)>,
- args: Vec<ExprRef<'tcx>>,
- span: Span,
-) -> ExprKind<'tcx> {
- // For an overloaded *x or x[y] expression of type T, the method
- // call returns an &T and we must add the deref so that the types
- // line up (this is because `*x` and `x[y]` represent places):
-
- let recv_ty = match args[0] {
- ExprRef::Thir(e) => cx.typeck_results().expr_ty_adjusted(e),
- ExprRef::Mirror(ref e) => e.ty,
- };
-
- // Reconstruct the output assuming it's a reference with the
- // same region and mutability as the receiver. This holds for
- // `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`.
- let (region, mutbl) = match *recv_ty.kind() {
- ty::Ref(region, _, mutbl) => (region, mutbl),
- _ => span_bug!(span, "overloaded_place: receiver is not a reference"),
- };
- let ref_ty = cx.tcx.mk_ref(region, ty::TypeAndMut { ty: place_ty, mutbl });
-
- // construct the complete expression `foo()` for the overloaded call,
- // which will yield the &T type
- let temp_lifetime = cx.region_scope_tree.temporary_scope(expr.hir_id.local_id);
- let fun = method_callee(cx, expr, span, overloaded_callee);
- let ref_expr = Expr {
- temp_lifetime,
- ty: ref_ty,
- span,
- kind: ExprKind::Call {
- ty: fun.ty,
- fun: fun.to_ref(),
- args,
- from_hir_call: false,
- fn_span: span,
- },
- };
-
- // construct and return a deref wrapper `*foo()`
- ExprKind::Deref { arg: ref_expr.to_ref() }
-}
-
-fn capture_upvar<'a, 'tcx>(
- cx: &mut Cx<'_, 'tcx>,
- closure_expr: &'tcx hir::Expr<'tcx>,
- captured_place: &'a ty::CapturedPlace<'tcx>,
- upvar_ty: Ty<'tcx>,
-) -> ExprRef<'tcx> {
- let upvar_capture = captured_place.info.capture_kind;
- let temp_lifetime = cx.region_scope_tree.temporary_scope(closure_expr.hir_id.local_id);
- let var_ty = captured_place.place.base_ty;
-
- // The result of capture analysis in `rustc_typeck/check/upvar.rs`represents a captured path
- // as it's seen for use within the closure and not at the time of closure creation.
- //
- // That is we see expect to see it start from a captured upvar and not something that is local
- // to the closure's parent.
- let var_hir_id = match captured_place.place.base {
- HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
- base => bug!("Expected an upvar, found {:?}", base),
- };
-
- let mut captured_place_expr = Expr {
- temp_lifetime,
- ty: var_ty,
- span: closure_expr.span,
- kind: convert_var(cx, var_hir_id),
- };
-
- for proj in captured_place.place.projections.iter() {
- let kind = match proj.kind {
- HirProjectionKind::Deref => ExprKind::Deref { arg: captured_place_expr.to_ref() },
- HirProjectionKind::Field(field, ..) => {
- // Variant index will always be 0, because for multi-variant
- // enums, we capture the enum entirely.
- ExprKind::Field {
- lhs: captured_place_expr.to_ref(),
- name: Field::new(field as usize),
- }
- }
- HirProjectionKind::Index | HirProjectionKind::Subslice => {
- // We don't capture these projections, so we can ignore them here
- continue;
- }
- };
-
- captured_place_expr = Expr { temp_lifetime, ty: proj.ty, span: closure_expr.span, kind };
- }
-
- match upvar_capture {
- ty::UpvarCapture::ByValue(_) => captured_place_expr.to_ref(),
- ty::UpvarCapture::ByRef(upvar_borrow) => {
- let borrow_kind = match upvar_borrow.kind {
- ty::BorrowKind::ImmBorrow => BorrowKind::Shared,
- ty::BorrowKind::UniqueImmBorrow => BorrowKind::Unique,
- ty::BorrowKind::MutBorrow => BorrowKind::Mut { allow_two_phase_borrow: false },
- };
- Expr {
- temp_lifetime,
- ty: upvar_ty,
- span: closure_expr.span,
- kind: ExprKind::Borrow { borrow_kind, arg: captured_place_expr.to_ref() },
- }
- .to_ref()
- }
- }
-}
-
-/// Converts a list of named fields (i.e., for struct-like struct/enum ADTs) into FieldExprRef.
-fn field_refs<'a, 'tcx>(
- cx: &mut Cx<'a, 'tcx>,
- fields: &'tcx [hir::Field<'tcx>],
-) -> Vec<FieldExprRef<'tcx>> {
- fields
- .iter()
- .map(|field| FieldExprRef {
- name: Field::new(cx.tcx.field_index(field.hir_id, cx.typeck_results)),
- expr: field.expr.to_ref(),
- })
- .collect()
-}
diff --git a/compiler/rustc_mir_build/src/thir/cx/mod.rs b/compiler/rustc_mir_build/src/thir/cx/mod.rs
index 465808c..c043360 100644
--- a/compiler/rustc_mir_build/src/thir/cx/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/mod.rs
@@ -2,6 +2,7 @@
//! structures into the THIR. The `builder` is generally ignorant of the tcx,
//! etc., and instead goes through the `Cx` for most of its work.
+use crate::thir::arena::Arena;
use crate::thir::util::UserAnnotatedTyHelpers;
use crate::thir::*;
@@ -9,118 +10,48 @@
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::Node;
-use rustc_index::vec::Idx;
-use rustc_infer::infer::InferCtxt;
use rustc_middle::middle::region;
use rustc_middle::mir::interpret::{LitToConstError, LitToConstInput};
-use rustc_middle::ty::subst::Subst;
-use rustc_middle::ty::subst::{GenericArg, InternalSubsts};
use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_span::symbol::{sym, Symbol};
-use rustc_target::abi::VariantIdx;
-use rustc_trait_selection::infer::InferCtxtExt;
-#[derive(Clone)]
-crate struct Cx<'a, 'tcx> {
+pub fn build_thir<'thir, 'tcx>(
tcx: TyCtxt<'tcx>,
- infcx: &'a InferCtxt<'a, 'tcx>,
+ owner_def: ty::WithOptConstParam<LocalDefId>,
+ arena: &'thir Arena<'thir, 'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+) -> &'thir Expr<'thir, 'tcx> {
+ Cx::new(tcx, owner_def, &arena).mirror_expr(expr)
+}
- crate root_lint_level: hir::HirId,
+struct Cx<'thir, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ arena: &'thir Arena<'thir, 'tcx>,
+
crate param_env: ty::ParamEnv<'tcx>,
- /// Identity `InternalSubsts` for use with const-evaluation.
- crate identity_substs: &'tcx InternalSubsts<'tcx>,
-
crate region_scope_tree: &'tcx region::ScopeTree,
- crate typeck_results: &'a ty::TypeckResults<'tcx>,
-
- /// This is `Constness::Const` if we are compiling a `static`,
- /// `const`, or the body of a `const fn`.
- constness: hir::Constness,
+ crate typeck_results: &'tcx ty::TypeckResults<'tcx>,
/// The `DefId` of the owner of this body.
body_owner: DefId,
-
- /// What kind of body is being compiled.
- crate body_owner_kind: hir::BodyOwnerKind,
-
- /// Whether this constant/function needs overflow checks.
- check_overflow: bool,
}
-impl<'a, 'tcx> Cx<'a, 'tcx> {
- crate fn new(
- infcx: &'a InferCtxt<'a, 'tcx>,
+impl<'thir, 'tcx> Cx<'thir, 'tcx> {
+ fn new(
+ tcx: TyCtxt<'tcx>,
def: ty::WithOptConstParam<LocalDefId>,
- src_id: hir::HirId,
- ) -> Cx<'a, 'tcx> {
- let tcx = infcx.tcx;
+ arena: &'thir Arena<'thir, 'tcx>,
+ ) -> Cx<'thir, 'tcx> {
let typeck_results = tcx.typeck_opt_const_arg(def);
- let body_owner_kind = tcx.hir().body_owner_kind(src_id);
-
- let constness = match body_owner_kind {
- hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => hir::Constness::Const,
- hir::BodyOwnerKind::Closure | hir::BodyOwnerKind::Fn => hir::Constness::NotConst,
- };
-
- let attrs = tcx.hir().attrs(src_id);
-
- // Some functions always have overflow checks enabled,
- // however, they may not get codegen'd, depending on
- // the settings for the crate they are codegened in.
- let mut check_overflow = tcx.sess.contains_name(attrs, sym::rustc_inherit_overflow_checks);
-
- // Respect -C overflow-checks.
- check_overflow |= tcx.sess.overflow_checks();
-
- // Constants always need overflow checks.
- check_overflow |= constness == hir::Constness::Const;
-
Cx {
tcx,
- infcx,
- root_lint_level: src_id,
+ arena,
param_env: tcx.param_env(def.did),
- identity_substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()),
region_scope_tree: tcx.region_scope_tree(def.did),
typeck_results,
- constness,
body_owner: def.did.to_def_id(),
- body_owner_kind,
- check_overflow,
}
}
-}
-
-impl<'a, 'tcx> Cx<'a, 'tcx> {
- /// Normalizes `ast` into the appropriate "mirror" type.
- crate fn mirror<M: Mirror<'tcx>>(&mut self, ast: M) -> M::Output {
- ast.make_mirror(self)
- }
-
- crate fn usize_ty(&mut self) -> Ty<'tcx> {
- self.tcx.types.usize
- }
-
- crate fn usize_literal(&mut self, value: u64) -> &'tcx ty::Const<'tcx> {
- ty::Const::from_usize(self.tcx, value)
- }
-
- crate fn bool_ty(&mut self) -> Ty<'tcx> {
- self.tcx.types.bool
- }
-
- crate fn unit_ty(&mut self) -> Ty<'tcx> {
- self.tcx.mk_unit()
- }
-
- crate fn true_literal(&mut self) -> &'tcx ty::Const<'tcx> {
- ty::Const::from_bool(self.tcx, true)
- }
-
- crate fn false_literal(&mut self) -> &'tcx ty::Const<'tcx> {
- ty::Const::from_bool(self.tcx, false)
- }
crate fn const_eval_literal(
&mut self,
@@ -137,11 +68,11 @@
// FIXME(#31407) this is only necessary because float parsing is buggy
self.tcx.sess.span_err(sp, "could not evaluate float literal (see issue #31407)");
// create a dummy value and continue compiling
- Const::from_bits(self.tcx, 0, self.param_env.and(ty))
+ self.tcx.const_error(ty)
}
Err(LitToConstError::Reported) => {
// create a dummy value and continue compiling
- Const::from_bits(self.tcx, 0, self.param_env.and(ty))
+ self.tcx.const_error(ty)
}
Err(LitToConstError::TypeError) => bug!("const_eval_literal: had type error"),
}
@@ -154,69 +85,17 @@
};
Pat::from_hir(self.tcx, self.param_env, self.typeck_results(), p)
}
-
- crate fn trait_method(
- &mut self,
- trait_def_id: DefId,
- method_name: Symbol,
- self_ty: Ty<'tcx>,
- params: &[GenericArg<'tcx>],
- ) -> &'tcx ty::Const<'tcx> {
- let substs = self.tcx.mk_substs_trait(self_ty, params);
-
- // The unhygienic comparison here is acceptable because this is only
- // used on known traits.
- let item = self
- .tcx
- .associated_items(trait_def_id)
- .filter_by_name_unhygienic(method_name)
- .find(|item| item.kind == ty::AssocKind::Fn)
- .expect("trait method not found");
-
- let method_ty = self.tcx.type_of(item.def_id);
- let method_ty = method_ty.subst(self.tcx, substs);
- ty::Const::zero_sized(self.tcx, method_ty)
- }
-
- crate fn all_fields(&mut self, adt_def: &ty::AdtDef, variant_index: VariantIdx) -> Vec<Field> {
- (0..adt_def.variants[variant_index].fields.len()).map(Field::new).collect()
- }
-
- crate fn needs_drop(&mut self, ty: Ty<'tcx>) -> bool {
- ty.needs_drop(self.tcx, self.param_env)
- }
-
- crate fn infcx(&self) -> &'a InferCtxt<'a, 'tcx> {
- self.infcx
- }
-
- crate fn tcx(&self) -> TyCtxt<'tcx> {
- self.tcx
- }
-
- crate fn typeck_results(&self) -> &'a ty::TypeckResults<'tcx> {
- self.typeck_results
- }
-
- crate fn check_overflow(&self) -> bool {
- self.check_overflow
- }
-
- crate fn type_is_copy_modulo_regions(&self, ty: Ty<'tcx>, span: Span) -> bool {
- self.infcx.type_is_copy_modulo_regions(self.param_env, ty, span)
- }
}
impl<'tcx> UserAnnotatedTyHelpers<'tcx> for Cx<'_, 'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
- self.tcx()
+ self.tcx
}
fn typeck_results(&self) -> &ty::TypeckResults<'tcx> {
- self.typeck_results()
+ self.typeck_results
}
}
mod block;
mod expr;
-mod to_ref;
diff --git a/compiler/rustc_mir_build/src/thir/cx/to_ref.rs b/compiler/rustc_mir_build/src/thir/cx/to_ref.rs
deleted file mode 100644
index 53a988e..0000000
--- a/compiler/rustc_mir_build/src/thir/cx/to_ref.rs
+++ /dev/null
@@ -1,65 +0,0 @@
-use crate::thir::*;
-
-use rustc_hir as hir;
-
-crate trait ToRef {
- type Output;
- fn to_ref(self) -> Self::Output;
-}
-
-impl<'tcx> ToRef for &'tcx hir::Expr<'tcx> {
- type Output = ExprRef<'tcx>;
-
- fn to_ref(self) -> ExprRef<'tcx> {
- ExprRef::Thir(self)
- }
-}
-
-impl<'tcx> ToRef for &'tcx &'tcx hir::Expr<'tcx> {
- type Output = ExprRef<'tcx>;
-
- fn to_ref(self) -> ExprRef<'tcx> {
- ExprRef::Thir(&**self)
- }
-}
-
-impl<'tcx> ToRef for Expr<'tcx> {
- type Output = ExprRef<'tcx>;
-
- fn to_ref(self) -> ExprRef<'tcx> {
- ExprRef::Mirror(Box::new(self))
- }
-}
-
-impl<'tcx, T, U> ToRef for &'tcx Option<T>
-where
- &'tcx T: ToRef<Output = U>,
-{
- type Output = Option<U>;
-
- fn to_ref(self) -> Option<U> {
- self.as_ref().map(|expr| expr.to_ref())
- }
-}
-
-impl<'tcx, T, U> ToRef for &'tcx Vec<T>
-where
- &'tcx T: ToRef<Output = U>,
-{
- type Output = Vec<U>;
-
- fn to_ref(self) -> Vec<U> {
- self.iter().map(|expr| expr.to_ref()).collect()
- }
-}
-
-impl<'tcx, T, U> ToRef for &'tcx [T]
-where
- &'tcx T: ToRef<Output = U>,
-{
- type Output = Vec<U>;
-
- fn to_ref(self) -> Vec<U> {
- self.iter().map(|expr| expr.to_ref()).collect()
- }
-}
diff --git a/compiler/rustc_mir_build/src/thir/mod.rs b/compiler/rustc_mir_build/src/thir/mod.rs
index ed3d392..6f20195 100644
--- a/compiler/rustc_mir_build/src/thir/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/mod.rs
@@ -4,13 +4,12 @@
//! unit-tested and separated from the Rust source and compiler data
//! structures.
-use self::cx::Cx;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_middle::infer::canonical::Canonical;
use rustc_middle::middle::region;
-use rustc_middle::mir::{BinOp, BorrowKind, Field, UnOp};
+use rustc_middle::mir::{BinOp, BorrowKind, FakeReadCause, Field, UnOp};
use rustc_middle::ty::adjustment::PointerCast;
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{AdtDef, Const, Ty, UpvarSubsts, UserType};
@@ -19,58 +18,57 @@
use rustc_target::asm::InlineAsmRegOrRegClass;
crate mod constant;
+
crate mod cx;
+pub use cx::build_thir;
crate mod pattern;
-crate use self::pattern::PatTyProj;
-crate use self::pattern::{BindingMode, FieldPat, Pat, PatKind, PatRange};
+pub use self::pattern::{Ascription, BindingMode, FieldPat, Pat, PatKind, PatRange, PatTyProj};
+
+mod arena;
+pub use arena::Arena;
mod util;
#[derive(Copy, Clone, Debug)]
-crate enum LintLevel {
+pub enum LintLevel {
Inherited,
Explicit(hir::HirId),
}
-#[derive(Clone, Debug)]
-crate struct Block<'tcx> {
- crate targeted_by_break: bool,
- crate region_scope: region::Scope,
- crate opt_destruction_scope: Option<region::Scope>,
- crate span: Span,
- crate stmts: Vec<StmtRef<'tcx>>,
- crate expr: Option<ExprRef<'tcx>>,
- crate safety_mode: BlockSafety,
+#[derive(Debug)]
+pub struct Block<'thir, 'tcx> {
+ pub targeted_by_break: bool,
+ pub region_scope: region::Scope,
+ pub opt_destruction_scope: Option<region::Scope>,
+ pub span: Span,
+ pub stmts: &'thir [Stmt<'thir, 'tcx>],
+ pub expr: Option<&'thir Expr<'thir, 'tcx>>,
+ pub safety_mode: BlockSafety,
}
#[derive(Copy, Clone, Debug)]
-crate enum BlockSafety {
+pub enum BlockSafety {
Safe,
ExplicitUnsafe(hir::HirId),
PushUnsafe,
PopUnsafe,
}
-#[derive(Clone, Debug)]
-crate enum StmtRef<'tcx> {
- Mirror(Box<Stmt<'tcx>>),
+#[derive(Debug)]
+pub struct Stmt<'thir, 'tcx> {
+ pub kind: StmtKind<'thir, 'tcx>,
+ pub opt_destruction_scope: Option<region::Scope>,
}
-#[derive(Clone, Debug)]
-crate struct Stmt<'tcx> {
- crate kind: StmtKind<'tcx>,
- crate opt_destruction_scope: Option<region::Scope>,
-}
-
-#[derive(Clone, Debug)]
-crate enum StmtKind<'tcx> {
+#[derive(Debug)]
+pub enum StmtKind<'thir, 'tcx> {
Expr {
/// scope for this statement; may be used as lifetime of temporaries
scope: region::Scope,
/// expression being evaluated in this statement
- expr: ExprRef<'tcx>,
+ expr: &'thir Expr<'thir, 'tcx>,
},
Let {
@@ -88,7 +86,7 @@
pattern: Pat<'tcx>,
/// let pat: ty = <INIT> ...
- initializer: Option<ExprRef<'tcx>>,
+ initializer: Option<&'thir Expr<'thir, 'tcx>>,
/// the lint level for this let-statement
lint_level: LintLevel,
@@ -96,13 +94,13 @@
}
// `Expr` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
-rustc_data_structures::static_assert_size!(Expr<'_>, 168);
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(Expr<'_, '_>, 144);
/// The Thir trait implementor lowers their expressions (`&'tcx H::Expr`)
/// into instances of this `Expr` enum. This lowering can be done
/// basically as lazily or as eagerly as desired: every recursive
-/// reference to an expression in this enum is an `ExprRef<'tcx>`, which
+/// reference to an expression in this enum is an `&'thir Expr<'thir, 'tcx>`, which
/// may in turn be another instance of this enum (boxed), or else an
/// unlowered `&'tcx H::Expr`. Note that instances of `Expr` are very
/// short-lived. They are created by `Thir::to_expr`, analyzed and
@@ -113,105 +111,105 @@
/// MIR simplifications are already done in the impl of `Thir`. For
/// example, method calls and overloaded operators are absent: they are
/// expected to be converted into `Expr::Call` instances.
-#[derive(Clone, Debug)]
-crate struct Expr<'tcx> {
+#[derive(Debug)]
+pub struct Expr<'thir, 'tcx> {
/// type of this expression
- crate ty: Ty<'tcx>,
+ pub ty: Ty<'tcx>,
/// lifetime of this expression if it should be spilled into a
/// temporary; should be None only if in a constant context
- crate temp_lifetime: Option<region::Scope>,
+ pub temp_lifetime: Option<region::Scope>,
/// span of the expression in the source
- crate span: Span,
+ pub span: Span,
/// kind of expression
- crate kind: ExprKind<'tcx>,
+ pub kind: ExprKind<'thir, 'tcx>,
}
-#[derive(Clone, Debug)]
-crate enum ExprKind<'tcx> {
+#[derive(Debug)]
+pub enum ExprKind<'thir, 'tcx> {
Scope {
region_scope: region::Scope,
lint_level: LintLevel,
- value: ExprRef<'tcx>,
+ value: &'thir Expr<'thir, 'tcx>,
},
Box {
- value: ExprRef<'tcx>,
+ value: &'thir Expr<'thir, 'tcx>,
},
If {
- cond: ExprRef<'tcx>,
- then: ExprRef<'tcx>,
- else_opt: Option<ExprRef<'tcx>>,
+ cond: &'thir Expr<'thir, 'tcx>,
+ then: &'thir Expr<'thir, 'tcx>,
+ else_opt: Option<&'thir Expr<'thir, 'tcx>>,
},
Call {
ty: Ty<'tcx>,
- fun: ExprRef<'tcx>,
- args: Vec<ExprRef<'tcx>>,
- // Whether this is from a call in HIR, rather than from an overloaded
- // operator. True for overloaded function call.
+ fun: &'thir Expr<'thir, 'tcx>,
+ args: &'thir [Expr<'thir, 'tcx>],
+ /// Whether this is from a call in HIR, rather than from an overloaded
+ /// operator. `true` for overloaded function call.
from_hir_call: bool,
/// This `Span` is the span of the function, without the dot and receiver
/// (e.g. `foo(a, b)` in `x.foo(a, b)`
fn_span: Span,
},
Deref {
- arg: ExprRef<'tcx>,
+ arg: &'thir Expr<'thir, 'tcx>,
}, // NOT overloaded!
Binary {
op: BinOp,
- lhs: ExprRef<'tcx>,
- rhs: ExprRef<'tcx>,
+ lhs: &'thir Expr<'thir, 'tcx>,
+ rhs: &'thir Expr<'thir, 'tcx>,
}, // NOT overloaded!
LogicalOp {
op: LogicalOp,
- lhs: ExprRef<'tcx>,
- rhs: ExprRef<'tcx>,
+ lhs: &'thir Expr<'thir, 'tcx>,
+ rhs: &'thir Expr<'thir, 'tcx>,
}, // NOT overloaded!
// LogicalOp is distinct from BinaryOp because of lazy evaluation of the operands.
Unary {
op: UnOp,
- arg: ExprRef<'tcx>,
+ arg: &'thir Expr<'thir, 'tcx>,
}, // NOT overloaded!
Cast {
- source: ExprRef<'tcx>,
+ source: &'thir Expr<'thir, 'tcx>,
},
Use {
- source: ExprRef<'tcx>,
+ source: &'thir Expr<'thir, 'tcx>,
}, // Use a lexpr to get a vexpr.
NeverToAny {
- source: ExprRef<'tcx>,
+ source: &'thir Expr<'thir, 'tcx>,
},
Pointer {
cast: PointerCast,
- source: ExprRef<'tcx>,
+ source: &'thir Expr<'thir, 'tcx>,
},
Loop {
- body: ExprRef<'tcx>,
+ body: &'thir Expr<'thir, 'tcx>,
},
Match {
- scrutinee: ExprRef<'tcx>,
- arms: Vec<Arm<'tcx>>,
+ scrutinee: &'thir Expr<'thir, 'tcx>,
+ arms: &'thir [Arm<'thir, 'tcx>],
},
Block {
- body: &'tcx hir::Block<'tcx>,
+ body: Block<'thir, 'tcx>,
},
Assign {
- lhs: ExprRef<'tcx>,
- rhs: ExprRef<'tcx>,
+ lhs: &'thir Expr<'thir, 'tcx>,
+ rhs: &'thir Expr<'thir, 'tcx>,
},
AssignOp {
op: BinOp,
- lhs: ExprRef<'tcx>,
- rhs: ExprRef<'tcx>,
+ lhs: &'thir Expr<'thir, 'tcx>,
+ rhs: &'thir Expr<'thir, 'tcx>,
},
Field {
- lhs: ExprRef<'tcx>,
+ lhs: &'thir Expr<'thir, 'tcx>,
name: Field,
},
Index {
- lhs: ExprRef<'tcx>,
- index: ExprRef<'tcx>,
+ lhs: &'thir Expr<'thir, 'tcx>,
+ index: &'thir Expr<'thir, 'tcx>,
},
VarRef {
id: hir::HirId,
@@ -226,35 +224,35 @@
},
Borrow {
borrow_kind: BorrowKind,
- arg: ExprRef<'tcx>,
+ arg: &'thir Expr<'thir, 'tcx>,
},
/// A `&raw [const|mut] $place_expr` raw borrow resulting in type `*[const|mut] T`.
AddressOf {
mutability: hir::Mutability,
- arg: ExprRef<'tcx>,
+ arg: &'thir Expr<'thir, 'tcx>,
},
Break {
label: region::Scope,
- value: Option<ExprRef<'tcx>>,
+ value: Option<&'thir Expr<'thir, 'tcx>>,
},
Continue {
label: region::Scope,
},
Return {
- value: Option<ExprRef<'tcx>>,
+ value: Option<&'thir Expr<'thir, 'tcx>>,
},
ConstBlock {
value: &'tcx Const<'tcx>,
},
Repeat {
- value: ExprRef<'tcx>,
+ value: &'thir Expr<'thir, 'tcx>,
count: &'tcx Const<'tcx>,
},
Array {
- fields: Vec<ExprRef<'tcx>>,
+ fields: &'thir [Expr<'thir, 'tcx>],
},
Tuple {
- fields: Vec<ExprRef<'tcx>>,
+ fields: &'thir [Expr<'thir, 'tcx>],
},
Adt {
adt_def: &'tcx AdtDef,
@@ -265,24 +263,25 @@
/// Bar::<T> { ... }`.
user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
- fields: Vec<FieldExprRef<'tcx>>,
- base: Option<FruInfo<'tcx>>,
+ fields: &'thir [FieldExpr<'thir, 'tcx>],
+ base: Option<FruInfo<'thir, 'tcx>>,
},
PlaceTypeAscription {
- source: ExprRef<'tcx>,
+ source: &'thir Expr<'thir, 'tcx>,
/// Type that the user gave to this expression
user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
},
ValueTypeAscription {
- source: ExprRef<'tcx>,
+ source: &'thir Expr<'thir, 'tcx>,
/// Type that the user gave to this expression
user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
},
Closure {
closure_id: DefId,
substs: UpvarSubsts<'tcx>,
- upvars: Vec<ExprRef<'tcx>>,
+ upvars: &'thir [Expr<'thir, 'tcx>],
movability: Option<hir::Movability>,
+ fake_reads: Vec<(&'thir Expr<'thir, 'tcx>, FakeReadCause, hir::HirId)>,
},
Literal {
literal: &'tcx Const<'tcx>,
@@ -302,7 +301,7 @@
},
InlineAsm {
template: &'tcx [InlineAsmTemplatePiece],
- operands: Vec<InlineAsmOperand<'tcx>>,
+ operands: &'thir [InlineAsmOperand<'thir, 'tcx>],
options: InlineAsmOptions,
line_spans: &'tcx [Span],
},
@@ -310,158 +309,77 @@
ThreadLocalRef(DefId),
LlvmInlineAsm {
asm: &'tcx hir::LlvmInlineAsmInner,
- outputs: Vec<ExprRef<'tcx>>,
- inputs: Vec<ExprRef<'tcx>>,
+ outputs: &'thir [Expr<'thir, 'tcx>],
+ inputs: &'thir [Expr<'thir, 'tcx>],
},
Yield {
- value: ExprRef<'tcx>,
+ value: &'thir Expr<'thir, 'tcx>,
},
}
-#[derive(Clone, Debug)]
-crate enum ExprRef<'tcx> {
- Thir(&'tcx hir::Expr<'tcx>),
- Mirror(Box<Expr<'tcx>>),
+#[derive(Debug)]
+pub struct FieldExpr<'thir, 'tcx> {
+ pub name: Field,
+ pub expr: &'thir Expr<'thir, 'tcx>,
}
-#[derive(Clone, Debug)]
-crate struct FieldExprRef<'tcx> {
- crate name: Field,
- crate expr: ExprRef<'tcx>,
+#[derive(Debug)]
+pub struct FruInfo<'thir, 'tcx> {
+ pub base: &'thir Expr<'thir, 'tcx>,
+ pub field_types: &'thir [Ty<'tcx>],
}
-#[derive(Clone, Debug)]
-crate struct FruInfo<'tcx> {
- crate base: ExprRef<'tcx>,
- crate field_types: Vec<Ty<'tcx>>,
+#[derive(Debug)]
+pub struct Arm<'thir, 'tcx> {
+ pub pattern: Pat<'tcx>,
+ pub guard: Option<Guard<'thir, 'tcx>>,
+ pub body: &'thir Expr<'thir, 'tcx>,
+ pub lint_level: LintLevel,
+ pub scope: region::Scope,
+ pub span: Span,
}
-#[derive(Clone, Debug)]
-crate struct Arm<'tcx> {
- crate pattern: Pat<'tcx>,
- crate guard: Option<Guard<'tcx>>,
- crate body: ExprRef<'tcx>,
- crate lint_level: LintLevel,
- crate scope: region::Scope,
- crate span: Span,
-}
-
-#[derive(Clone, Debug)]
-crate enum Guard<'tcx> {
- If(ExprRef<'tcx>),
- IfLet(Pat<'tcx>, ExprRef<'tcx>),
+#[derive(Debug)]
+pub enum Guard<'thir, 'tcx> {
+ If(&'thir Expr<'thir, 'tcx>),
+ IfLet(Pat<'tcx>, &'thir Expr<'thir, 'tcx>),
}
#[derive(Copy, Clone, Debug)]
-crate enum LogicalOp {
+pub enum LogicalOp {
And,
Or,
}
-impl<'tcx> ExprRef<'tcx> {
- crate fn span(&self) -> Span {
- match self {
- ExprRef::Thir(expr) => expr.span,
- ExprRef::Mirror(expr) => expr.span,
- }
- }
-}
-
-#[derive(Clone, Debug)]
-crate enum InlineAsmOperand<'tcx> {
+#[derive(Debug)]
+pub enum InlineAsmOperand<'thir, 'tcx> {
In {
reg: InlineAsmRegOrRegClass,
- expr: ExprRef<'tcx>,
+ expr: &'thir Expr<'thir, 'tcx>,
},
Out {
reg: InlineAsmRegOrRegClass,
late: bool,
- expr: Option<ExprRef<'tcx>>,
+ expr: Option<&'thir Expr<'thir, 'tcx>>,
},
InOut {
reg: InlineAsmRegOrRegClass,
late: bool,
- expr: ExprRef<'tcx>,
+ expr: &'thir Expr<'thir, 'tcx>,
},
SplitInOut {
reg: InlineAsmRegOrRegClass,
late: bool,
- in_expr: ExprRef<'tcx>,
- out_expr: Option<ExprRef<'tcx>>,
+ in_expr: &'thir Expr<'thir, 'tcx>,
+ out_expr: Option<&'thir Expr<'thir, 'tcx>>,
},
Const {
- expr: ExprRef<'tcx>,
+ expr: &'thir Expr<'thir, 'tcx>,
},
SymFn {
- expr: ExprRef<'tcx>,
+ expr: &'thir Expr<'thir, 'tcx>,
},
SymStatic {
def_id: DefId,
},
}
-
-///////////////////////////////////////////////////////////////////////////
-// The Mirror trait
-
-/// "Mirroring" is the process of converting from a HIR type into one
-/// of the THIR types defined in this file. This is basically a "on
-/// the fly" desugaring step that hides a lot of the messiness in the
-/// tcx. For example, the mirror of a `&'tcx hir::Expr` is an
-/// `Expr<'tcx>`.
-///
-/// Mirroring is gradual: when you mirror an outer expression like `e1
-/// + e2`, the references to the inner expressions `e1` and `e2` are
-/// `ExprRef<'tcx>` instances, and they may or may not be eagerly
-/// mirrored. This allows a single AST node from the compiler to
-/// expand into one or more Thir nodes, which lets the Thir nodes be
-/// simpler.
-crate trait Mirror<'tcx> {
- type Output;
-
- fn make_mirror(self, cx: &mut Cx<'_, 'tcx>) -> Self::Output;
-}
-
-impl<'tcx> Mirror<'tcx> for Expr<'tcx> {
- type Output = Expr<'tcx>;
-
- fn make_mirror(self, _: &mut Cx<'_, 'tcx>) -> Expr<'tcx> {
- self
- }
-}
-
-impl<'tcx> Mirror<'tcx> for ExprRef<'tcx> {
- type Output = Expr<'tcx>;
-
- fn make_mirror(self, hir: &mut Cx<'_, 'tcx>) -> Expr<'tcx> {
- match self {
- ExprRef::Thir(h) => h.make_mirror(hir),
- ExprRef::Mirror(m) => *m,
- }
- }
-}
-
-impl<'tcx> Mirror<'tcx> for Stmt<'tcx> {
- type Output = Stmt<'tcx>;
-
- fn make_mirror(self, _: &mut Cx<'_, 'tcx>) -> Stmt<'tcx> {
- self
- }
-}
-
-impl<'tcx> Mirror<'tcx> for StmtRef<'tcx> {
- type Output = Stmt<'tcx>;
-
- fn make_mirror(self, _: &mut Cx<'_, 'tcx>) -> Stmt<'tcx> {
- match self {
- StmtRef::Mirror(m) => *m,
- }
- }
-}
-
-impl<'tcx> Mirror<'tcx> for Block<'tcx> {
- type Output = Block<'tcx>;
-
- fn make_mirror(self, _: &mut Cx<'_, 'tcx>) -> Block<'tcx> {
- self
- }
-}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
index 3977068..fdecbb9 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
@@ -1,6 +1,6 @@
-use super::usefulness::Usefulness::*;
use super::usefulness::{
- compute_match_usefulness, expand_pattern, MatchArm, MatchCheckCtxt, UsefulnessReport,
+ compute_match_usefulness, expand_pattern, MatchArm, MatchCheckCtxt, Reachability,
+ UsefulnessReport,
};
use super::{PatCtxt, PatKind, PatternError};
@@ -366,14 +366,31 @@
}
fn irrefutable_let_pattern(tcx: TyCtxt<'_>, span: Span, id: HirId, source: hir::MatchSource) {
- tcx.struct_span_lint_hir(IRREFUTABLE_LET_PATTERNS, id, span, |lint| {
- let msg = match source {
- hir::MatchSource::IfLetDesugar { .. } => "irrefutable if-let pattern",
- hir::MatchSource::WhileLetDesugar => "irrefutable while-let pattern",
- hir::MatchSource::IfLetGuardDesugar => "irrefutable if-let guard",
- _ => bug!(),
- };
- lint.build(msg).emit()
+ tcx.struct_span_lint_hir(IRREFUTABLE_LET_PATTERNS, id, span, |lint| match source {
+ hir::MatchSource::IfLetDesugar { .. } => {
+ let mut diag = lint.build("irrefutable `if let` pattern");
+ diag.note("this pattern will always match, so the `if let` is useless");
+ diag.help("consider replacing the `if let` with a `let`");
+ diag.emit()
+ }
+ hir::MatchSource::WhileLetDesugar => {
+ let mut diag = lint.build("irrefutable `while let` pattern");
+ diag.note("this pattern will always match, so the loop will never exit");
+ diag.help("consider instead using a `loop { ... }` with a `let` inside it");
+ diag.emit()
+ }
+ hir::MatchSource::IfLetGuardDesugar => {
+ let mut diag = lint.build("irrefutable `if let` guard pattern");
+ diag.note("this pattern will always match, so the guard is useless");
+ diag.help("consider removing the guard and adding a `let` inside the match arm");
+ diag.emit()
+ }
+ _ => {
+ bug!(
+ "expected `if let`, `while let`, or `if let` guard HIR match source, found {:?}",
+ source,
+ )
+ }
});
}
@@ -387,7 +404,7 @@
report_arm_reachability(&cx, &report, hir::MatchSource::IfLetGuardDesugar);
if report.non_exhaustiveness_witnesses.is_empty() {
- // The match is exhaustive, i.e. the if let pattern is irrefutable.
+ // The match is exhaustive, i.e. the `if let` pattern is irrefutable.
irrefutable_let_pattern(cx.tcx, pat.span, pat_id, hir::MatchSource::IfLetGuardDesugar)
}
}
@@ -398,10 +415,11 @@
report: &UsefulnessReport<'p, 'tcx>,
source: hir::MatchSource,
) {
+ use Reachability::*;
let mut catchall = None;
for (arm_index, (arm, is_useful)) in report.arm_usefulness.iter().enumerate() {
match is_useful {
- NotUseful => {
+ Unreachable => {
match source {
hir::MatchSource::WhileDesugar => bug!(),
@@ -430,17 +448,16 @@
hir::MatchSource::AwaitDesugar | hir::MatchSource::TryDesugar => {}
}
}
- Useful(unreachables) if unreachables.is_empty() => {}
+ Reachable(unreachables) if unreachables.is_empty() => {}
// The arm is reachable, but contains unreachable subpatterns (from or-patterns).
- Useful(unreachables) => {
- let mut unreachables: Vec<_> = unreachables.iter().collect();
+ Reachable(unreachables) => {
+ let mut unreachables = unreachables.clone();
// Emit lints in the order in which they occur in the file.
unreachables.sort_unstable();
for span in unreachables {
unreachable_pattern(cx.tcx, span, arm.hir_id, None);
}
}
- UsefulWithWitness(_) => bug!(),
}
if !arm.has_guard && catchall.is_none() && pat_is_catchall(arm.pat) {
catchall = Some(arm.pat.span);
diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
index 32fc0f0..ef1419b 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
@@ -18,7 +18,7 @@
/// Converts an evaluated constant to a pattern (if possible).
/// This means aggregate values (like structs and enums) are converted
/// to a pattern that matches the value (as if you'd compared via structural equality).
- #[instrument(skip(self))]
+ #[instrument(level = "debug", skip(self))]
pub(super) fn const_to_pat(
&self,
cv: &'tcx ty::Const<'tcx>,
diff --git a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
index e67166c..8c740a7 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
@@ -723,8 +723,6 @@
where
'tcx: 'a,
{
- debug!("Constructor::split({:#?})", self);
-
match self {
Wildcard => {
let mut split_wildcard = SplitWildcard::new(pcx);
@@ -1345,7 +1343,9 @@
match &mut fields {
Fields::Vec(pats) => {
for (i, pat) in new_pats {
- pats[i] = pat
+ if let Some(p) = pats.get_mut(i) {
+ *p = pat;
+ }
}
}
Fields::Filtered { fields, .. } => {
diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
index 7186e26..9ac79a3 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
@@ -40,22 +40,22 @@
}
#[derive(Copy, Clone, Debug, PartialEq)]
-crate enum BindingMode {
+pub enum BindingMode {
ByValue,
ByRef(BorrowKind),
}
#[derive(Clone, Debug, PartialEq)]
-crate struct FieldPat<'tcx> {
- crate field: Field,
- crate pattern: Pat<'tcx>,
+pub struct FieldPat<'tcx> {
+ pub field: Field,
+ pub pattern: Pat<'tcx>,
}
#[derive(Clone, Debug, PartialEq)]
-crate struct Pat<'tcx> {
- crate ty: Ty<'tcx>,
- crate span: Span,
- crate kind: Box<PatKind<'tcx>>,
+pub struct Pat<'tcx> {
+ pub ty: Ty<'tcx>,
+ pub span: Span,
+ pub kind: Box<PatKind<'tcx>>,
}
impl<'tcx> Pat<'tcx> {
@@ -65,8 +65,8 @@
}
#[derive(Copy, Clone, Debug, PartialEq)]
-crate struct PatTyProj<'tcx> {
- crate user_ty: CanonicalUserType<'tcx>,
+pub struct PatTyProj<'tcx> {
+ pub user_ty: CanonicalUserType<'tcx>,
}
impl<'tcx> PatTyProj<'tcx> {
@@ -92,8 +92,8 @@
}
#[derive(Copy, Clone, Debug, PartialEq)]
-crate struct Ascription<'tcx> {
- crate user_ty: PatTyProj<'tcx>,
+pub struct Ascription<'tcx> {
+ pub user_ty: PatTyProj<'tcx>,
/// Variance to use when relating the type `user_ty` to the **type of the value being
/// matched**. Typically, this is `Variance::Covariant`, since the value being matched must
/// have a type that is some subtype of the ascribed type.
@@ -112,12 +112,12 @@
/// requires that `&'static str <: T_x`, where `T_x` is the type of `x`. Really, we should
/// probably be checking for a `PartialEq` impl instead, but this preserves the behavior
/// of the old type-check for now. See #57280 for details.
- crate variance: ty::Variance,
- crate user_ty_span: Span,
+ pub variance: ty::Variance,
+ pub user_ty_span: Span,
}
#[derive(Clone, Debug, PartialEq)]
-crate enum PatKind<'tcx> {
+pub enum PatKind<'tcx> {
Wild,
AscribeUserType {
@@ -195,10 +195,10 @@
}
#[derive(Copy, Clone, Debug, PartialEq)]
-crate struct PatRange<'tcx> {
- crate lo: &'tcx ty::Const<'tcx>,
- crate hi: &'tcx ty::Const<'tcx>,
- crate end: RangeEnd,
+pub struct PatRange<'tcx> {
+ pub lo: &'tcx ty::Const<'tcx>,
+ pub hi: &'tcx ty::Const<'tcx>,
+ pub end: RangeEnd,
}
impl<'tcx> fmt::Display for Pat<'tcx> {
@@ -866,7 +866,7 @@
return *self.const_to_pat(value, expr.hir_id, expr.span, false).kind;
}
hir::ExprKind::Lit(ref lit) => (lit, false),
- hir::ExprKind::Unary(hir::UnOp::UnNeg, ref expr) => {
+ hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => {
let lit = match expr.kind {
hir::ExprKind::Lit(ref lit) => lit,
_ => span_bug!(expr.span, "not a literal: {:?}", expr),
diff --git a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
index d7c08a2..dce0df8 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
@@ -288,7 +288,7 @@
use super::{PatternFoldable, PatternFolder};
use rustc_data_structures::captures::Captures;
-use rustc_data_structures::sync::OnceCell;
+use rustc_data_structures::fx::FxHashMap;
use rustc_arena::TypedArena;
use rustc_hir::def_id::DefId;
@@ -299,6 +299,7 @@
use smallvec::{smallvec, SmallVec};
use std::fmt;
use std::iter::{FromIterator, IntoIterator};
+use std::lazy::OnceCell;
crate struct MatchCheckCtxt<'a, 'tcx> {
crate tcx: TyCtxt<'tcx>,
@@ -344,6 +345,12 @@
pub(super) is_top_level: bool,
}
+impl<'a, 'p, 'tcx> fmt::Debug for PatCtxt<'a, 'p, 'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("PatCtxt").field("ty", &self.ty).finish()
+ }
+}
+
crate fn expand_pattern<'tcx>(pat: Pat<'tcx>) -> Pat<'tcx> {
LiteralExpander.fold_pattern(&pat)
}
@@ -379,11 +386,32 @@
pub(super) fn is_wildcard(&self) -> bool {
matches!(*self.kind, PatKind::Binding { subpattern: None, .. } | PatKind::Wild)
}
+
+ fn is_or_pat(&self) -> bool {
+ matches!(*self.kind, PatKind::Or { .. })
+ }
+
+ /// Recursively expand this pattern into its subpatterns. Only useful for or-patterns.
+ fn expand_or_pat(&self) -> Vec<&Self> {
+ fn expand<'p, 'tcx>(pat: &'p Pat<'tcx>, vec: &mut Vec<&'p Pat<'tcx>>) {
+ if let PatKind::Or { pats } = pat.kind.as_ref() {
+ for pat in pats {
+ expand(pat, vec);
+ }
+ } else {
+ vec.push(pat)
+ }
+ }
+
+ let mut pats = Vec::new();
+ expand(self, &mut pats);
+ pats
+ }
}
/// A row of a matrix. Rows of len 1 are very common, which is why `SmallVec[_; 2]`
/// works well.
-#[derive(Debug, Clone)]
+#[derive(Clone)]
struct PatStack<'p, 'tcx> {
pats: SmallVec<[&'p Pat<'tcx>; 2]>,
/// Cache for the constructor of the head
@@ -411,6 +439,7 @@
self.pats[0]
}
+ #[inline]
fn head_ctor<'a>(&'a self, cx: &MatchCheckCtxt<'p, 'tcx>) -> &'a Constructor<'tcx> {
self.head_ctor.get_or_init(|| Constructor::from_pat(cx, self.head()))
}
@@ -419,23 +448,14 @@
self.pats.iter().copied()
}
- // If the first pattern is an or-pattern, expand this pattern. Otherwise, return `None`.
- fn expand_or_pat(&self) -> Option<Vec<Self>> {
- if self.is_empty() {
- None
- } else if let PatKind::Or { pats } = &*self.head().kind {
- Some(
- pats.iter()
- .map(|pat| {
- let mut new_patstack = PatStack::from_pattern(pat);
- new_patstack.pats.extend_from_slice(&self.pats[1..]);
- new_patstack
- })
- .collect(),
- )
- } else {
- None
- }
+ // Recursively expand the first pattern into its subpatterns. Only useful if the pattern is an
+ // or-pattern. Panics if `self` is empty.
+ fn expand_or_pat<'a>(&'a self) -> impl Iterator<Item = PatStack<'p, 'tcx>> + Captures<'a> {
+ self.head().expand_or_pat().into_iter().map(move |pat| {
+ let mut new_patstack = PatStack::from_pattern(pat);
+ new_patstack.pats.extend_from_slice(&self.pats[1..]);
+ new_patstack
+ })
}
/// This computes `S(self.head_ctor(), self)`. See top of the file for explanations.
@@ -475,6 +495,17 @@
}
}
+/// Pretty-printing for matrix row.
+impl<'p, 'tcx> fmt::Debug for PatStack<'p, 'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "+")?;
+ for pat in self.iter() {
+ write!(f, " {} +", pat)?;
+ }
+ Ok(())
+ }
+}
+
/// A 2D matrix.
#[derive(Clone, PartialEq)]
pub(super) struct Matrix<'p, 'tcx> {
@@ -491,13 +522,12 @@
self.patterns.get(0).map(|r| r.len())
}
- /// Pushes a new row to the matrix. If the row starts with an or-pattern, this expands it.
+ /// Pushes a new row to the matrix. If the row starts with an or-pattern, this recursively
+ /// expands it.
fn push(&mut self, row: PatStack<'p, 'tcx>) {
- if let Some(rows) = row.expand_or_pat() {
- for row in rows {
- // We recursively expand the or-patterns of the new rows.
- // This is necessary as we might have `0 | (1 | 2)` or e.g., `x @ 0 | x @ (1 | 2)`.
- self.push(row)
+ if !row.is_empty() && row.head().is_or_pat() {
+ for row in row.expand_or_pat() {
+ self.patterns.push(row);
}
} else {
self.patterns.push(row);
@@ -543,17 +573,11 @@
/// Pretty-printer for matrices of patterns, example:
///
/// ```text
-/// +++++++++++++++++++++++++++++
/// + _ + [] +
-/// +++++++++++++++++++++++++++++
/// + true + [First] +
-/// +++++++++++++++++++++++++++++
/// + true + [Second(true)] +
-/// +++++++++++++++++++++++++++++
/// + false + [_] +
-/// +++++++++++++++++++++++++++++
/// + _ + [_, _, tail @ ..] +
-/// +++++++++++++++++++++++++++++
/// ```
impl<'p, 'tcx> fmt::Debug for Matrix<'p, 'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -561,17 +585,14 @@
let Matrix { patterns: m, .. } = self;
let pretty_printed_matrix: Vec<Vec<String>> =
- m.iter().map(|row| row.iter().map(|pat| format!("{:?}", pat)).collect()).collect();
+ m.iter().map(|row| row.iter().map(|pat| format!("{}", pat)).collect()).collect();
- let column_count = m.iter().map(|row| row.len()).max().unwrap_or(0);
+ let column_count = m.iter().map(|row| row.len()).next().unwrap_or(0);
assert!(m.iter().all(|row| row.len() == column_count));
let column_widths: Vec<usize> = (0..column_count)
.map(|col| pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0))
.collect();
- let total_width = column_widths.iter().cloned().sum::<usize>() + column_count * 3 + 1;
- let br = "+".repeat(total_width);
- write!(f, "{}\n", br)?;
for row in pretty_printed_matrix {
write!(f, "+")?;
for (column, pat_str) in row.into_iter().enumerate() {
@@ -580,7 +601,6 @@
write!(f, " +")?;
}
write!(f, "\n")?;
- write!(f, "{}\n", br)?;
}
Ok(())
}
@@ -600,183 +620,318 @@
}
}
-/// Represents a set of `Span`s closed under the containment relation. That is, if a `Span` is
-/// contained in the set then all `Span`s contained in it are also implicitly contained in the set.
-/// In particular this means that when intersecting two sets, taking the intersection of some span
-/// and one of its subspans returns the subspan, whereas a simple `HashSet` would have returned an
-/// empty intersection.
-/// It is assumed that two spans don't overlap without one being contained in the other; in other
-/// words, that the inclusion structure forms a tree and not a DAG.
-/// Intersection is not very efficient. It compares everything pairwise. If needed it could be made
-/// faster by sorting the `Span`s and merging cleverly.
-#[derive(Debug, Clone, Default)]
-pub(crate) struct SpanSet {
- /// The minimal set of `Span`s required to represent the whole set. If A and B are `Span`s in
- /// the `SpanSet`, and A is a descendant of B, then only B will be in `root_spans`.
- /// Invariant: the spans are disjoint.
- root_spans: Vec<Span>,
+/// Given a pattern or a pattern-stack, this struct captures a set of its subpatterns. We use that
+/// to track reachable sub-patterns arising from or-patterns. In the absence of or-patterns this
+/// will always be either `Empty` (the whole pattern is unreachable) or `Full` (the whole pattern
+/// is reachable). When there are or-patterns, some subpatterns may be reachable while others
+/// aren't. In this case the whole pattern still counts as reachable, but we will lint the
+/// unreachable subpatterns.
+///
+/// This supports a limited set of operations, so not all possible sets of subpatterns can be
+/// represented. That's ok, we only want the ones that make sense for our usage.
+///
+/// What we're doing is illustrated by this:
+/// ```
+/// match (true, 0) {
+/// (true, 0) => {}
+/// (_, 1) => {}
+/// (true | false, 0 | 1) => {}
+/// }
+/// ```
+/// When we try the alternatives of the `true | false` or-pattern, the last `0` is reachable in the
+/// `false` alternative but not the `true`. So overall it is reachable. By contrast, the last `1`
+/// is not reachable in either alternative, so we want to signal this to the user.
+/// Therefore we take the union of sets of reachable patterns coming from different alternatives in
+/// order to figure out which subpatterns are overall reachable.
+///
+/// Invariant: we try to construct the smallest representation we can. In particular if
+/// `self.is_empty()` we ensure that `self` is `Empty`, and same with `Full`. This is not important
+/// for correctness currently.
+#[derive(Debug, Clone)]
+enum SubPatSet<'p, 'tcx> {
+ /// The empty set. This means the pattern is unreachable.
+ Empty,
+ /// The set containing the full pattern.
+ Full,
+ /// If the pattern is a pattern with a constructor or a pattern-stack, we store a set for each
+ /// of its subpatterns. Missing entries in the map are implicitly full, because that's the
+ /// common case.
+ Seq { subpats: FxHashMap<usize, SubPatSet<'p, 'tcx>> },
+ /// If the pattern is an or-pattern, we store a set for each of its alternatives. Missing
+ /// entries in the map are implicitly empty. Note: we always flatten nested or-patterns.
+ Alt {
+ subpats: FxHashMap<usize, SubPatSet<'p, 'tcx>>,
+ /// Counts the total number of alternatives in the pattern
+ alt_count: usize,
+ /// We keep the pattern around to retrieve spans.
+ pat: &'p Pat<'tcx>,
+ },
}
-impl SpanSet {
- /// Creates an empty set.
- fn new() -> Self {
- Self::default()
+impl<'p, 'tcx> SubPatSet<'p, 'tcx> {
+ fn full() -> Self {
+ SubPatSet::Full
+ }
+ fn empty() -> Self {
+ SubPatSet::Empty
}
- /// Tests whether the set is empty.
- pub(crate) fn is_empty(&self) -> bool {
- self.root_spans.is_empty()
+ fn is_empty(&self) -> bool {
+ match self {
+ SubPatSet::Empty => true,
+ SubPatSet::Full => false,
+ // If any subpattern in a sequence is unreachable, the whole pattern is unreachable.
+ SubPatSet::Seq { subpats } => subpats.values().any(|set| set.is_empty()),
+ // An or-pattern is reachable if any of its alternatives is.
+ SubPatSet::Alt { subpats, .. } => subpats.values().all(|set| set.is_empty()),
+ }
}
- /// Iterate over the disjoint list of spans at the roots of this set.
- pub(crate) fn iter<'a>(&'a self) -> impl Iterator<Item = Span> + Captures<'a> {
- self.root_spans.iter().copied()
+ fn is_full(&self) -> bool {
+ match self {
+ SubPatSet::Empty => false,
+ SubPatSet::Full => true,
+ // The whole pattern is reachable only when all its alternatives are.
+ SubPatSet::Seq { subpats } => subpats.values().all(|sub_set| sub_set.is_full()),
+ // The whole or-pattern is reachable only when all its alternatives are.
+ SubPatSet::Alt { subpats, alt_count, .. } => {
+ subpats.len() == *alt_count && subpats.values().all(|set| set.is_full())
+ }
+ }
}
- /// Tests whether the set contains a given Span.
- fn contains(&self, span: Span) -> bool {
- self.iter().any(|root_span| root_span.contains(span))
- }
-
- /// Add a span to the set if we know the span has no intersection in this set.
- fn push_nonintersecting(&mut self, new_span: Span) {
- self.root_spans.push(new_span);
- }
-
- fn intersection_mut(&mut self, other: &Self) {
- if self.is_empty() || other.is_empty() {
- *self = Self::new();
+ /// Union `self` with `other`, mutating `self`.
+ fn union(&mut self, other: Self) {
+ use SubPatSet::*;
+ // Union with full stays full; union with empty changes nothing.
+ if self.is_full() || other.is_empty() {
+ return;
+ } else if self.is_empty() {
+ *self = other;
+ return;
+ } else if other.is_full() {
+ *self = Full;
return;
}
- // Those that were in `self` but not contained in `other`
- let mut leftover = SpanSet::new();
- // We keep the elements in `self` that are also in `other`.
- self.root_spans.retain(|span| {
- let retain = other.contains(*span);
- if !retain {
- leftover.root_spans.push(*span);
+
+ match (&mut *self, other) {
+ (Seq { subpats: s_set }, Seq { subpats: mut o_set }) => {
+ s_set.retain(|i, s_sub_set| {
+ // Missing entries count as full.
+ let o_sub_set = o_set.remove(&i).unwrap_or(Full);
+ s_sub_set.union(o_sub_set);
+ // We drop full entries.
+ !s_sub_set.is_full()
+ });
+ // Everything left in `o_set` is missing from `s_set`, i.e. counts as full. Since
+ // unioning with full returns full, we can drop those entries.
}
- retain
- });
- // We keep the elements in `other` that are also in the original `self`. You might think
- // this is not needed because `self` already contains the intersection. But those aren't
- // just sets of things. If `self = [a]`, `other = [b]` and `a` contains `b`, then `b`
- // belongs in the intersection but we didn't catch it in the filtering above. We look at
- // `leftover` instead of the full original `self` to avoid duplicates.
- for span in other.iter() {
- if leftover.contains(span) {
- self.root_spans.push(span);
+ (Alt { subpats: s_set, .. }, Alt { subpats: mut o_set, .. }) => {
+ s_set.retain(|i, s_sub_set| {
+ // Missing entries count as empty.
+ let o_sub_set = o_set.remove(&i).unwrap_or(Empty);
+ s_sub_set.union(o_sub_set);
+ // We drop empty entries.
+ !s_sub_set.is_empty()
+ });
+ // Everything left in `o_set` is missing from `s_set`, i.e. counts as empty. Since
+ // unioning with empty changes nothing, we can take those entries as is.
+ s_set.extend(o_set);
+ }
+ _ => bug!(),
+ }
+
+ if self.is_full() {
+ *self = Full;
+ }
+ }
+
+ /// Returns a list of the spans of the unreachable subpatterns. If `self` is empty (i.e. the
+ /// whole pattern is unreachable) we return `None`.
+ fn list_unreachable_spans(&self) -> Option<Vec<Span>> {
+ /// Panics if `set.is_empty()`.
+ fn fill_spans(set: &SubPatSet<'_, '_>, spans: &mut Vec<Span>) {
+ match set {
+ SubPatSet::Empty => bug!(),
+ SubPatSet::Full => {}
+ SubPatSet::Seq { subpats } => {
+ for (_, sub_set) in subpats {
+ fill_spans(sub_set, spans);
+ }
+ }
+ SubPatSet::Alt { subpats, pat, alt_count, .. } => {
+ let expanded = pat.expand_or_pat();
+ for i in 0..*alt_count {
+ let sub_set = subpats.get(&i).unwrap_or(&SubPatSet::Empty);
+ if sub_set.is_empty() {
+ // Found a unreachable subpattern.
+ spans.push(expanded[i].span);
+ } else {
+ fill_spans(sub_set, spans);
+ }
+ }
+ }
}
}
+
+ if self.is_empty() {
+ return None;
+ }
+ if self.is_full() {
+ // No subpatterns are unreachable.
+ return Some(Vec::new());
+ }
+ let mut spans = Vec::new();
+ fill_spans(self, &mut spans);
+ Some(spans)
+ }
+
+ /// When `self` refers to a patstack that was obtained from specialization, after running
+ /// `unspecialize` it will refer to the original patstack before specialization.
+ fn unspecialize(self, arity: usize) -> Self {
+ use SubPatSet::*;
+ match self {
+ Full => Full,
+ Empty => Empty,
+ Seq { subpats } => {
+ // We gather the first `arity` subpatterns together and shift the remaining ones.
+ let mut new_subpats = FxHashMap::default();
+ let mut new_subpats_first_col = FxHashMap::default();
+ for (i, sub_set) in subpats {
+ if i < arity {
+ // The first `arity` indices are now part of the pattern in the first
+ // column.
+ new_subpats_first_col.insert(i, sub_set);
+ } else {
+ // Indices after `arity` are simply shifted
+ new_subpats.insert(i - arity + 1, sub_set);
+ }
+ }
+ // If `new_subpats_first_col` has no entries it counts as full, so we can omit it.
+ if !new_subpats_first_col.is_empty() {
+ new_subpats.insert(0, Seq { subpats: new_subpats_first_col });
+ }
+ Seq { subpats: new_subpats }
+ }
+ Alt { .. } => bug!(), // `self` is a patstack
+ }
+ }
+
+ /// When `self` refers to a patstack that was obtained from splitting an or-pattern, after
+ /// running `unspecialize` it will refer to the original patstack before splitting.
+ ///
+ /// For example:
+ /// ```
+ /// match Some(true) {
+ /// Some(true) => {}
+ /// None | Some(true | false) => {}
+ /// }
+ /// ```
+ /// Here `None` would return the full set and `Some(true | false)` would return the set
+ /// containing `false`. After `unsplit_or_pat`, we want the set to contain `None` and `false`.
+ /// This is what this function does.
+ fn unsplit_or_pat(mut self, alt_id: usize, alt_count: usize, pat: &'p Pat<'tcx>) -> Self {
+ use SubPatSet::*;
+ if self.is_empty() {
+ return Empty;
+ }
+
+ // Subpatterns coming from inside the or-pattern alternative itself, e.g. in `None | Some(0
+ // | 1)`.
+ let set_first_col = match &mut self {
+ Full => Full,
+ Seq { subpats } => subpats.remove(&0).unwrap_or(Full),
+ Empty => unreachable!(),
+ Alt { .. } => bug!(), // `self` is a patstack
+ };
+ let mut subpats_first_col = FxHashMap::default();
+ subpats_first_col.insert(alt_id, set_first_col);
+ let set_first_col = Alt { subpats: subpats_first_col, pat, alt_count };
+
+ let mut subpats = match self {
+ Full => FxHashMap::default(),
+ Seq { subpats } => subpats,
+ Empty => unreachable!(),
+ Alt { .. } => bug!(), // `self` is a patstack
+ };
+ subpats.insert(0, set_first_col);
+ Seq { subpats }
}
}
+/// This carries the results of computing usefulness, as described at the top of the file. When
+/// checking usefulness of a match branch, we use the `NoWitnesses` variant, which also keeps track
+/// of potential unreachable sub-patterns (in the presence of or-patterns). When checking
+/// exhaustiveness of a whole match, we use the `WithWitnesses` variant, which carries a list of
+/// witnesses of non-exhaustiveness when there are any.
+/// Which variant to use is dictated by `WitnessPreference`.
#[derive(Clone, Debug)]
-crate enum Usefulness<'tcx> {
- /// Pontentially carries a set of sub-branches that have been found to be unreachable. Used
- /// only in the presence of or-patterns, otherwise it stays empty.
- Useful(SpanSet),
- /// Carries a list of witnesses of non-exhaustiveness.
- UsefulWithWitness(Vec<Witness<'tcx>>),
- NotUseful,
+enum Usefulness<'p, 'tcx> {
+ /// Carries a set of subpatterns that have been found to be reachable. If empty, this indicates
+ /// the whole pattern is unreachable. If not, this indicates that the pattern is reachable but
+ /// that some sub-patterns may be unreachable (due to or-patterns). In the absence of
+ /// or-patterns this will always be either `Empty` (the whole pattern is unreachable) or `Full`
+ /// (the whole pattern is reachable).
+ NoWitnesses(SubPatSet<'p, 'tcx>),
+ /// Carries a list of witnesses of non-exhaustiveness. If empty, indicates that the whole
+ /// pattern is unreachable.
+ WithWitnesses(Vec<Witness<'tcx>>),
}
-impl<'tcx> Usefulness<'tcx> {
+impl<'p, 'tcx> Usefulness<'p, 'tcx> {
fn new_useful(preference: WitnessPreference) -> Self {
match preference {
- ConstructWitness => UsefulWithWitness(vec![Witness(vec![])]),
- LeaveOutWitness => Useful(Default::default()),
+ ConstructWitness => WithWitnesses(vec![Witness(vec![])]),
+ LeaveOutWitness => NoWitnesses(SubPatSet::full()),
+ }
+ }
+ fn new_not_useful(preference: WitnessPreference) -> Self {
+ match preference {
+ ConstructWitness => WithWitnesses(vec![]),
+ LeaveOutWitness => NoWitnesses(SubPatSet::empty()),
+ }
+ }
+
+ /// Combine usefulnesses from two branches. This is an associative operation.
+ fn extend(&mut self, other: Self) {
+ match (&mut *self, other) {
+ (WithWitnesses(_), WithWitnesses(o)) if o.is_empty() => {}
+ (WithWitnesses(s), WithWitnesses(o)) if s.is_empty() => *self = WithWitnesses(o),
+ (WithWitnesses(s), WithWitnesses(o)) => s.extend(o),
+ (NoWitnesses(s), NoWitnesses(o)) => s.union(o),
+ _ => unreachable!(),
}
}
/// When trying several branches and each returns a `Usefulness`, we need to combine the
/// results together.
- fn merge(usefulnesses: impl Iterator<Item = Self>) -> Self {
- // If we have detected some unreachable sub-branches, we only want to keep them when they
- // were unreachable in _all_ branches. Eg. in the following, the last `true` is unreachable
- // in the second branch of the first or-pattern, but not otherwise. Therefore we don't want
- // to lint that it is unreachable.
- // ```
- // match (true, true) {
- // (true, true) => {}
- // (false | true, false | true) => {}
- // }
- // ```
- // Here however we _do_ want to lint that the last `false` is unreachable. So we don't want
- // to intersect the spans that come directly from the or-pattern, since each branch of the
- // or-pattern brings a new disjoint pattern.
- // ```
- // match None {
- // Some(false) => {}
- // None | Some(true | false) => {}
- // }
- // ```
-
- // Is `None` when no branch was useful. Will often be `Some(Spanset::new())` because the
- // sets are only non-empty in the presence of or-patterns.
- let mut unreachables: Option<SpanSet> = None;
- // Witnesses of usefulness, if any.
- let mut witnesses = Vec::new();
-
+ fn merge(pref: WitnessPreference, usefulnesses: impl Iterator<Item = Self>) -> Self {
+ let mut ret = Self::new_not_useful(pref);
for u in usefulnesses {
- match u {
- Useful(spans) if spans.is_empty() => {
- // Once we reach the empty set, more intersections won't change the result.
- return Useful(SpanSet::new());
- }
- Useful(spans) => {
- if let Some(unreachables) = &mut unreachables {
- if !unreachables.is_empty() {
- unreachables.intersection_mut(&spans);
- }
- if unreachables.is_empty() {
- return Useful(SpanSet::new());
- }
- } else {
- unreachables = Some(spans);
- }
- }
- NotUseful => {}
- UsefulWithWitness(wits) => {
- witnesses.extend(wits);
+ ret.extend(u);
+ if let NoWitnesses(subpats) = &ret {
+ if subpats.is_full() {
+ // Once we reach the full set, more unions won't change the result.
+ return ret;
}
}
}
-
- if !witnesses.is_empty() {
- UsefulWithWitness(witnesses)
- } else if let Some(unreachables) = unreachables {
- Useful(unreachables)
- } else {
- NotUseful
- }
+ ret
}
/// After calculating the usefulness for a branch of an or-pattern, call this to make this
/// usefulness mergeable with those from the other branches.
- fn unsplit_or_pat(self, this_span: Span, or_pat_spans: &[Span]) -> Self {
+ fn unsplit_or_pat(self, alt_id: usize, alt_count: usize, pat: &'p Pat<'tcx>) -> Self {
match self {
- Useful(mut spans) => {
- // We register the spans of the other branches of this or-pattern as being
- // unreachable from this one. This ensures that intersecting together the sets of
- // spans returns what we want.
- // Until we optimize `SpanSet` however, intersecting this entails a number of
- // comparisons quadratic in the number of branches.
- for &span in or_pat_spans {
- if span != this_span {
- spans.push_nonintersecting(span);
- }
- }
- Useful(spans)
- }
- x => x,
+ NoWitnesses(subpats) => NoWitnesses(subpats.unsplit_or_pat(alt_id, alt_count, pat)),
+ WithWitnesses(_) => bug!(),
}
}
/// After calculating usefulness after a specialization, call this to recontruct a usefulness
/// that makes sense for the matrix pre-specialization. This new usefulness can then be merged
/// with the results of specializing with the other constructors.
- fn apply_constructor<'p>(
+ fn apply_constructor(
self,
pcx: PatCtxt<'_, 'p, 'tcx>,
matrix: &Matrix<'p, 'tcx>, // used to compute missing ctors
@@ -784,7 +939,8 @@
ctor_wild_subpatterns: &Fields<'p, 'tcx>,
) -> Self {
match self {
- UsefulWithWitness(witnesses) => {
+ WithWitnesses(witnesses) if witnesses.is_empty() => WithWitnesses(witnesses),
+ WithWitnesses(witnesses) => {
let new_witnesses = if matches!(ctor, Constructor::Missing) {
let mut split_wildcard = SplitWildcard::new(pcx);
split_wildcard.split(pcx, matrix.head_ctors(pcx.cx));
@@ -814,9 +970,9 @@
.map(|witness| witness.apply_constructor(pcx, &ctor, ctor_wild_subpatterns))
.collect()
};
- UsefulWithWitness(new_witnesses)
+ WithWitnesses(new_witnesses)
}
- x => x,
+ NoWitnesses(subpats) => NoWitnesses(subpats.unspecialize(ctor_wild_subpatterns.len())),
}
}
}
@@ -924,6 +1080,10 @@
/// `is_under_guard` is used to inform if the pattern has a guard. If it
/// has one it must not be inserted into the matrix. This shouldn't be
/// relied on for soundness.
+#[instrument(
+ level = "debug",
+ skip(cx, matrix, witness_preference, hir_id, is_under_guard, is_top_level)
+)]
fn is_useful<'p, 'tcx>(
cx: &MatchCheckCtxt<'p, 'tcx>,
matrix: &Matrix<'p, 'tcx>,
@@ -932,9 +1092,9 @@
hir_id: HirId,
is_under_guard: bool,
is_top_level: bool,
-) -> Usefulness<'tcx> {
+) -> Usefulness<'p, 'tcx> {
+ debug!("matrix,v={:?}{:?}", matrix, v);
let Matrix { patterns: rows, .. } = matrix;
- debug!("is_useful({:#?}, {:#?})", matrix, v);
// The base case. We are pattern-matching on () and the return value is
// based on whether our matrix has a row or not.
@@ -942,12 +1102,14 @@
// first and then, if v is non-empty, the return value is based on whether
// the type of the tuple we're checking is inhabited or not.
if v.is_empty() {
- return if rows.is_empty() {
+ let ret = if rows.is_empty() {
Usefulness::new_useful(witness_preference)
} else {
- NotUseful
+ Usefulness::new_not_useful(witness_preference)
};
- };
+ debug!(?ret);
+ return ret;
+ }
assert!(rows.iter().all(|r| r.len() == v.len()));
@@ -955,16 +1117,15 @@
let ty = matrix.heads().next().map_or(v.head().ty, |r| r.ty);
let pcx = PatCtxt { cx, ty, span: v.head().span, is_top_level };
- debug!("is_useful_expand_first_col: ty={:#?}, expanding {:#?}", pcx.ty, v.head());
-
// If the first pattern is an or-pattern, expand it.
- let ret = if let Some(vs) = v.expand_or_pat() {
- let subspans: Vec<_> = vs.iter().map(|v| v.head().span).collect();
- // We expand the or pattern, trying each of its branches in turn and keeping careful track
- // of possible unreachable sub-branches.
+ let ret = if v.head().is_or_pat() {
+ debug!("expanding or-pattern");
+ let v_head = v.head();
+ let vs: Vec<_> = v.expand_or_pat().collect();
+ let alt_count = vs.len();
+ // We try each or-pattern branch in turn.
let mut matrix = matrix.clone();
- let usefulnesses = vs.into_iter().map(|v| {
- let v_span = v.head().span;
+ let usefulnesses = vs.into_iter().enumerate().map(|(i, v)| {
let usefulness =
is_useful(cx, &matrix, &v, witness_preference, hir_id, is_under_guard, false);
// If pattern has a guard don't add it to the matrix.
@@ -973,9 +1134,9 @@
// branches like `Some(_) | Some(0)`.
matrix.push(v);
}
- usefulness.unsplit_or_pat(v_span, &subspans)
+ usefulness.unsplit_or_pat(i, alt_count, v_head)
});
- Usefulness::merge(usefulnesses)
+ Usefulness::merge(witness_preference, usefulnesses)
} else {
let v_ctor = v.head_ctor(cx);
if let Constructor::IntRange(ctor_range) = &v_ctor {
@@ -993,6 +1154,7 @@
// witness the usefulness of `v`.
let start_matrix = &matrix;
let usefulnesses = split_ctors.into_iter().map(|ctor| {
+ debug!("specialize({:?})", ctor);
// We cache the result of `Fields::wildcards` because it is used a lot.
let ctor_wild_subpatterns = Fields::wildcards(pcx, &ctor);
let spec_matrix =
@@ -1002,9 +1164,9 @@
is_useful(cx, &spec_matrix, &v, witness_preference, hir_id, is_under_guard, false);
usefulness.apply_constructor(pcx, start_matrix, &ctor, &ctor_wild_subpatterns)
});
- Usefulness::merge(usefulnesses)
+ Usefulness::merge(witness_preference, usefulnesses)
};
- debug!("is_useful::returns({:#?}, {:#?}) = {:?}", matrix, v, ret);
+ debug!(?ret);
ret
}
@@ -1017,10 +1179,21 @@
crate has_guard: bool,
}
+/// Indicates whether or not a given arm is reachable.
+#[derive(Clone, Debug)]
+crate enum Reachability {
+ /// The arm is reachable. This additionally carries a set of or-pattern branches that have been
+ /// found to be unreachable despite the overall arm being reachable. Used only in the presence
+ /// of or-patterns, otherwise it stays empty.
+ Reachable(Vec<Span>),
+ /// The arm is unreachable.
+ Unreachable,
+}
+
/// The output of checking a match for exhaustiveness and arm reachability.
crate struct UsefulnessReport<'p, 'tcx> {
/// For each arm of the input, whether that arm is reachable after the arms above it.
- crate arm_usefulness: Vec<(MatchArm<'p, 'tcx>, Usefulness<'tcx>)>,
+ crate arm_usefulness: Vec<(MatchArm<'p, 'tcx>, Reachability)>,
/// If the match is exhaustive, this is empty. If not, this contains witnesses for the lack of
/// exhaustiveness.
crate non_exhaustiveness_witnesses: Vec<super::Pat<'tcx>>,
@@ -1048,7 +1221,14 @@
if !arm.has_guard {
matrix.push(v);
}
- (arm, usefulness)
+ let reachability = match usefulness {
+ NoWitnesses(subpats) if subpats.is_empty() => Reachability::Unreachable,
+ NoWitnesses(subpats) => {
+ Reachability::Reachable(subpats.list_unreachable_spans().unwrap())
+ }
+ WithWitnesses(..) => bug!(),
+ };
+ (arm, reachability)
})
.collect();
@@ -1056,15 +1236,8 @@
let v = PatStack::from_pattern(wild_pattern);
let usefulness = is_useful(cx, &matrix, &v, ConstructWitness, scrut_hir_id, false, true);
let non_exhaustiveness_witnesses = match usefulness {
- NotUseful => vec![], // Wildcard pattern isn't useful, so the match is exhaustive.
- UsefulWithWitness(pats) => {
- if pats.is_empty() {
- bug!("Exhaustiveness check returned no witnesses")
- } else {
- pats.into_iter().map(|w| w.single_pattern()).collect()
- }
- }
- Useful(_) => bug!(),
+ WithWitnesses(pats) => pats.into_iter().map(|w| w.single_pattern()).collect(),
+ NoWitnesses(_) => bug!(),
};
UsefulnessReport { arm_usefulness, non_exhaustiveness_witnesses }
}
diff --git a/compiler/rustc_parse/Cargo.toml b/compiler/rustc_parse/Cargo.toml
index 52835e5..c887729 100644
--- a/compiler/rustc_parse/Cargo.toml
+++ b/compiler/rustc_parse/Cargo.toml
@@ -19,4 +19,4 @@
rustc_span = { path = "../rustc_span" }
rustc_ast = { path = "../rustc_ast" }
unicode-normalization = "0.1.11"
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_parse/src/lexer/mod.rs b/compiler/rustc_parse/src/lexer/mod.rs
index 4a638ec..4bf870e 100644
--- a/compiler/rustc_parse/src/lexer/mod.rs
+++ b/compiler/rustc_parse/src/lexer/mod.rs
@@ -268,6 +268,9 @@
// tokens like `<<` from `rustc_lexer`, and then add fancier error recovery to it,
// as there will be less overall work to do this way.
let token = unicode_chars::check_for_substitution(self, start, c, &mut err);
+ if c == '\x00' {
+ err.help("source files must contain UTF-8 encoded text, unexpected null bytes might occur when a different encoding is used");
+ }
err.emit();
token?
}
diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs
index f155f3a..001b52b 100644
--- a/compiler/rustc_parse/src/lib.rs
+++ b/compiler/rustc_parse/src/lib.rs
@@ -6,11 +6,12 @@
#![feature(or_patterns)]
#![feature(box_syntax)]
#![feature(box_patterns)]
+#![recursion_limit = "256"]
use rustc_ast as ast;
-use rustc_ast::attr::HasAttrs;
use rustc_ast::token::{self, Nonterminal};
use rustc_ast::tokenstream::{self, CanSynthesizeMissingTokens, LazyTokenStream, TokenStream};
+use rustc_ast::AstLike;
use rustc_ast_pretty::pprust;
use rustc_data_structures::sync::Lrc;
use rustc_errors::{Diagnostic, FatalError, Level, PResult};
diff --git a/compiler/rustc_parse/src/parser/attr.rs b/compiler/rustc_parse/src/parser/attr.rs
index 1b26fb3..95d4a48 100644
--- a/compiler/rustc_parse/src/parser/attr.rs
+++ b/compiler/rustc_parse/src/parser/attr.rs
@@ -1,4 +1,4 @@
-use super::{Parser, PathStyle};
+use super::{AttrWrapper, Parser, PathStyle};
use rustc_ast as ast;
use rustc_ast::attr;
use rustc_ast::token::{self, Nonterminal};
@@ -26,7 +26,7 @@
impl<'a> Parser<'a> {
/// Parses attributes that appear before an item.
- pub(super) fn parse_outer_attributes(&mut self) -> PResult<'a, Vec<ast::Attribute>> {
+ pub(super) fn parse_outer_attributes(&mut self) -> PResult<'a, AttrWrapper> {
let mut attrs: Vec<ast::Attribute> = Vec::new();
let mut just_parsed_doc_comment = false;
loop {
@@ -74,7 +74,7 @@
break;
}
}
- Ok(attrs)
+ Ok(AttrWrapper::new(attrs))
}
/// Matches `attribute = # ! [ meta_item ]`.
@@ -89,7 +89,8 @@
inner_parse_policy, self.token
);
let lo = self.token.span;
- self.collect_tokens(|this| {
+ // Attributse can't have attributes of their own
+ self.collect_tokens_no_attrs(|this| {
if this.eat(&token::Pound) {
let style = if this.eat(&token::Not) {
ast::AttrStyle::Inner
@@ -163,7 +164,8 @@
let args = this.parse_attr_args()?;
Ok(ast::AttrItem { path, args, tokens: None })
};
- if capture_tokens { self.collect_tokens(do_parse) } else { do_parse(self) }?
+ // Attr items don't have attributes
+ if capture_tokens { self.collect_tokens_no_attrs(do_parse) } else { do_parse(self) }?
})
}
@@ -306,13 +308,11 @@
}
pub fn maybe_needs_tokens(attrs: &[ast::Attribute]) -> bool {
- // One of the attributes may either itself be a macro, or apply derive macros (`derive`),
+ // One of the attributes may either itself be a macro,
// or expand to macro attributes (`cfg_attr`).
attrs.iter().any(|attr| {
attr.ident().map_or(true, |ident| {
- ident.name == sym::derive
- || ident.name == sym::cfg_attr
- || !rustc_feature::is_builtin_attr_name(ident.name)
+ ident.name == sym::cfg_attr || !rustc_feature::is_builtin_attr_name(ident.name)
})
})
}
diff --git a/compiler/rustc_parse/src/parser/attr_wrapper.rs b/compiler/rustc_parse/src/parser/attr_wrapper.rs
new file mode 100644
index 0000000..7512f46
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/attr_wrapper.rs
@@ -0,0 +1,189 @@
+use super::attr;
+use super::{ForceCollect, Parser, TokenCursor, TrailingToken};
+use rustc_ast::token::{self, Token, TokenKind};
+use rustc_ast::tokenstream::{CreateTokenStream, TokenStream, TokenTree, TreeAndSpacing};
+use rustc_ast::tokenstream::{DelimSpan, LazyTokenStream, Spacing};
+use rustc_ast::AstLike;
+use rustc_ast::{self as ast};
+use rustc_errors::PResult;
+use rustc_span::{Span, DUMMY_SP};
+
+/// A wrapper type to ensure that the parser handles outer attributes correctly.
+/// When we parse outer attributes, we need to ensure that we capture tokens
+/// for the attribute target. This allows us to perform cfg-expansion on
+/// a token stream before we invoke a derive proc-macro.
+///
+/// This wrapper prevents direct access to the underlying `Vec<ast::Attribute>`.
+/// Parsing code can only get access to the underlying attributes
+/// by passing an `AttrWrapper` to `collect_tokens_trailing_tokens`.
+/// This makes it difficult to accidentally construct an AST node
+/// (which stores a `Vec<ast::Attribute>`) without first collecting tokens.
+///
+/// This struct has its own module, to ensure that the parser code
+/// cannot directly access the `attrs` field
+#[derive(Debug, Clone)]
+pub struct AttrWrapper {
+ attrs: Vec<ast::Attribute>,
+}
+
+impl AttrWrapper {
+ pub fn empty() -> AttrWrapper {
+ AttrWrapper { attrs: vec![] }
+ }
+ pub fn new(attrs: Vec<ast::Attribute>) -> AttrWrapper {
+ AttrWrapper { attrs }
+ }
+ // FIXME: Delay span bug here?
+ pub(crate) fn take_for_recovery(self) -> Vec<ast::Attribute> {
+ self.attrs
+ }
+ pub fn is_empty(&self) -> bool {
+ self.attrs.is_empty()
+ }
+}
+
+impl<'a> Parser<'a> {
+ /// Records all tokens consumed by the provided callback,
+ /// including the current token. These tokens are collected
+ /// into a `LazyTokenStream`, and returned along with the result
+ /// of the callback.
+ ///
+ /// Note: If your callback consumes an opening delimiter
+ /// (including the case where you call `collect_tokens`
+ /// when the current token is an opening delimeter),
+ /// you must also consume the corresponding closing delimiter.
+ ///
+ /// That is, you can consume
+ /// `something ([{ }])` or `([{}])`, but not `([{}]`
+ ///
+ /// This restriction shouldn't be an issue in practice,
+ /// since this function is used to record the tokens for
+ /// a parsed AST item, which always has matching delimiters.
+ pub fn collect_tokens_trailing_token<R: AstLike>(
+ &mut self,
+ attrs: AttrWrapper,
+ force_collect: ForceCollect,
+ f: impl FnOnce(&mut Self, Vec<ast::Attribute>) -> PResult<'a, (R, TrailingToken)>,
+ ) -> PResult<'a, R> {
+ if matches!(force_collect, ForceCollect::No) && !attr::maybe_needs_tokens(&attrs.attrs) {
+ return Ok(f(self, attrs.attrs)?.0);
+ }
+ let start_token = (self.token.clone(), self.token_spacing);
+ let cursor_snapshot = self.token_cursor.clone();
+
+ let (mut ret, trailing_token) = f(self, attrs.attrs)?;
+ let tokens = match ret.tokens_mut() {
+ Some(tokens) if tokens.is_none() => tokens,
+ _ => return Ok(ret),
+ };
+
+ // Produces a `TokenStream` on-demand. Using `cursor_snapshot`
+ // and `num_calls`, we can reconstruct the `TokenStream` seen
+ // by the callback. This allows us to avoid producing a `TokenStream`
+ // if it is never needed - for example, a captured `macro_rules!`
+ // argument that is never passed to a proc macro.
+ // In practice token stream creation happens rarely compared to
+ // calls to `collect_tokens` (see some statistics in #78736),
+ // so we are doing as little up-front work as possible.
+ //
+ // This also makes `Parser` very cheap to clone, since
+ // there is no intermediate collection buffer to clone.
+ #[derive(Clone)]
+ struct LazyTokenStreamImpl {
+ start_token: (Token, Spacing),
+ cursor_snapshot: TokenCursor,
+ num_calls: usize,
+ desugar_doc_comments: bool,
+ append_unglued_token: Option<TreeAndSpacing>,
+ }
+ impl CreateTokenStream for LazyTokenStreamImpl {
+ fn create_token_stream(&self) -> TokenStream {
+ // The token produced by the final call to `next` or `next_desugared`
+ // was not actually consumed by the callback. The combination
+ // of chaining the initial token and using `take` produces the desired
+ // result - we produce an empty `TokenStream` if no calls were made,
+ // and omit the final token otherwise.
+ let mut cursor_snapshot = self.cursor_snapshot.clone();
+ let tokens = std::iter::once(self.start_token.clone())
+ .chain((0..self.num_calls).map(|_| {
+ if self.desugar_doc_comments {
+ cursor_snapshot.next_desugared()
+ } else {
+ cursor_snapshot.next()
+ }
+ }))
+ .take(self.num_calls);
+
+ make_token_stream(tokens, self.append_unglued_token.clone())
+ }
+ }
+
+ let mut num_calls = self.token_cursor.num_next_calls - cursor_snapshot.num_next_calls;
+ match trailing_token {
+ TrailingToken::None => {}
+ TrailingToken::Semi => {
+ assert_eq!(self.token.kind, token::Semi);
+ num_calls += 1;
+ }
+ TrailingToken::MaybeComma => {
+ if self.token.kind == token::Comma {
+ num_calls += 1;
+ }
+ }
+ }
+
+ *tokens = Some(LazyTokenStream::new(LazyTokenStreamImpl {
+ start_token,
+ num_calls,
+ cursor_snapshot,
+ desugar_doc_comments: self.desugar_doc_comments,
+ append_unglued_token: self.token_cursor.append_unglued_token.clone(),
+ }));
+
+ Ok(ret)
+ }
+}
+
+/// Converts a flattened iterator of tokens (including open and close delimiter tokens)
+/// into a `TokenStream`, creating a `TokenTree::Delimited` for each matching pair
+/// of open and close delims.
+fn make_token_stream(
+ tokens: impl Iterator<Item = (Token, Spacing)>,
+ append_unglued_token: Option<TreeAndSpacing>,
+) -> TokenStream {
+ #[derive(Debug)]
+ struct FrameData {
+ open: Span,
+ inner: Vec<(TokenTree, Spacing)>,
+ }
+ let mut stack = vec![FrameData { open: DUMMY_SP, inner: vec![] }];
+ for (token, spacing) in tokens {
+ match token {
+ Token { kind: TokenKind::OpenDelim(_), span } => {
+ stack.push(FrameData { open: span, inner: vec![] });
+ }
+ Token { kind: TokenKind::CloseDelim(delim), span } => {
+ let frame_data = stack.pop().expect("Token stack was empty!");
+ let dspan = DelimSpan::from_pair(frame_data.open, span);
+ let stream = TokenStream::new(frame_data.inner);
+ let delimited = TokenTree::Delimited(dspan, delim, stream);
+ stack
+ .last_mut()
+ .unwrap_or_else(|| panic!("Bottom token frame is missing for tokens!"))
+ .inner
+ .push((delimited, Spacing::Alone));
+ }
+ token => {
+ stack
+ .last_mut()
+ .expect("Bottom token frame is missing!")
+ .inner
+ .push((TokenTree::Token(token), spacing));
+ }
+ }
+ }
+ let mut final_buf = stack.pop().expect("Missing final buf!");
+ final_buf.inner.extend(append_unglued_token);
+ assert!(stack.is_empty(), "Stack should be empty: final_buf={:?} stack={:?}", final_buf, stack);
+ TokenStream::new(final_buf.inner)
+}
diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs
index 5512e84..77e85c0 100644
--- a/compiler/rustc_parse/src/parser/diagnostics.rs
+++ b/compiler/rustc_parse/src/parser/diagnostics.rs
@@ -223,7 +223,7 @@
fn tokens_to_string(tokens: &[TokenType]) -> String {
let mut i = tokens.iter();
// This might be a sign we need a connect method on `Iterator`.
- let b = i.next().map_or(String::new(), |t| t.to_string());
+ let b = i.next().map_or_else(String::new, |t| t.to_string());
i.enumerate().fold(b, |mut b, (i, a)| {
if tokens.len() > 2 && i == tokens.len() - 2 {
b.push_str(", or ");
@@ -640,7 +640,7 @@
}
}
Err(mut err) => {
- // We could't parse generic parameters, unlikely to be a turbofish. Rely on
+ // We couldn't parse generic parameters, unlikely to be a turbofish. Rely on
// generic parse error instead.
err.cancel();
*self = snapshot;
@@ -662,7 +662,7 @@
let x = self.parse_seq_to_before_end(
&token::Gt,
SeqSep::trailing_allowed(token::Comma),
- |p| p.parse_ty(),
+ |p| p.parse_generic_arg(),
);
match x {
Ok((_, _, false)) => {
@@ -1242,7 +1242,7 @@
let is_question = self.eat(&token::Question); // Handle `await? <expr>`.
let expr = if self.token == token::OpenDelim(token::Brace) {
// Handle `await { <expr> }`.
- // This needs to be handled separatedly from the next arm to avoid
+ // This needs to be handled separately from the next arm to avoid
// interpreting `await { <expr> }?` as `<expr>?.await`.
self.parse_block_expr(None, self.token.span, BlockCheckMode::Default, AttrVec::new())
} else {
@@ -1613,48 +1613,88 @@
Applicability::HasPlaceholders,
);
return Some(ident);
- } else if let PatKind::Ident(_, ident, _) = pat.kind {
- if require_name
- && (self.token == token::Comma
- || self.token == token::Lt
- || self.token == token::CloseDelim(token::Paren))
- {
- // `fn foo(a, b) {}`, `fn foo(a<x>, b<y>) {}` or `fn foo(usize, usize) {}`
- if first_param {
- err.span_suggestion(
- pat.span,
- "if this is a `self` type, give it a parameter name",
- format!("self: {}", ident),
- Applicability::MaybeIncorrect,
- );
+ } else if require_name
+ && (self.token == token::Comma
+ || self.token == token::Lt
+ || self.token == token::CloseDelim(token::Paren))
+ {
+ let rfc_note = "anonymous parameters are removed in the 2018 edition (see RFC 1685)";
+
+ let (ident, self_sugg, param_sugg, type_sugg) = match pat.kind {
+ PatKind::Ident(_, ident, _) => (
+ ident,
+ format!("self: {}", ident),
+ format!("{}: TypeName", ident),
+ format!("_: {}", ident),
+ ),
+ // Also catches `fn foo(&a)`.
+ PatKind::Ref(ref pat, mutab)
+ if matches!(pat.clone().into_inner().kind, PatKind::Ident(..)) =>
+ {
+ match pat.clone().into_inner().kind {
+ PatKind::Ident(_, ident, _) => {
+ let mutab = mutab.prefix_str();
+ (
+ ident,
+ format!("self: &{}{}", mutab, ident),
+ format!("{}: &{}TypeName", ident, mutab),
+ format!("_: &{}{}", mutab, ident),
+ )
+ }
+ _ => unreachable!(),
+ }
}
- // Avoid suggesting that `fn foo(HashMap<u32>)` is fixed with a change to
- // `fn foo(HashMap: TypeName<u32>)`.
- if self.token != token::Lt {
- err.span_suggestion(
- pat.span,
- "if this is a parameter name, give it a type",
- format!("{}: TypeName", ident),
- Applicability::HasPlaceholders,
- );
+ _ => {
+ // Otherwise, try to get a type and emit a suggestion.
+ if let Some(ty) = pat.to_ty() {
+ err.span_suggestion_verbose(
+ pat.span,
+ "explicitly ignore the parameter name",
+ format!("_: {}", pprust::ty_to_string(&ty)),
+ Applicability::MachineApplicable,
+ );
+ err.note(rfc_note);
+ }
+
+ return None;
}
+ };
+
+ // `fn foo(a, b) {}`, `fn foo(a<x>, b<y>) {}` or `fn foo(usize, usize) {}`
+ if first_param {
err.span_suggestion(
pat.span,
- "if this is a type, explicitly ignore the parameter name",
- format!("_: {}", ident),
- Applicability::MachineApplicable,
+ "if this is a `self` type, give it a parameter name",
+ self_sugg,
+ Applicability::MaybeIncorrect,
);
- err.note("anonymous parameters are removed in the 2018 edition (see RFC 1685)");
-
- // Don't attempt to recover by using the `X` in `X<Y>` as the parameter name.
- return if self.token == token::Lt { None } else { Some(ident) };
}
+ // Avoid suggesting that `fn foo(HashMap<u32>)` is fixed with a change to
+ // `fn foo(HashMap: TypeName<u32>)`.
+ if self.token != token::Lt {
+ err.span_suggestion(
+ pat.span,
+ "if this is a parameter name, give it a type",
+ param_sugg,
+ Applicability::HasPlaceholders,
+ );
+ }
+ err.span_suggestion(
+ pat.span,
+ "if this is a type, explicitly ignore the parameter name",
+ type_sugg,
+ Applicability::MachineApplicable,
+ );
+ err.note(rfc_note);
+
+ // Don't attempt to recover by using the `X` in `X<Y>` as the parameter name.
+ return if self.token == token::Lt { None } else { Some(ident) };
}
None
}
pub(super) fn recover_arg_parse(&mut self) -> PResult<'a, (P<ast::Pat>, P<ast::Ty>)> {
- let pat = self.parse_pat(Some("argument name"))?;
+ let pat = self.parse_pat_no_top_alt(Some("argument name"))?;
self.expect(&token::Colon)?;
let ty = self.parse_ty()?;
diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs
index cfd7ad4..a3f2a8b 100644
--- a/compiler/rustc_parse/src/parser/expr.rs
+++ b/compiler/rustc_parse/src/parser/expr.rs
@@ -1,7 +1,7 @@
use super::pat::{GateOr, RecoverComma, PARAM_EXPECTED};
use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign};
-use super::{BlockMode, Parser, PathStyle, Restrictions, TokenType};
-use super::{SemiColonMode, SeqSep, TokenExpectType};
+use super::{AttrWrapper, BlockMode, ForceCollect, Parser, PathStyle, Restrictions, TokenType};
+use super::{SemiColonMode, SeqSep, TokenExpectType, TrailingToken};
use crate::maybe_recover_from_interpolated_ty_qpath;
use rustc_ast::ptr::P;
@@ -10,7 +10,7 @@
use rustc_ast::util::classify;
use rustc_ast::util::literal::LitError;
use rustc_ast::util::parser::{prec_let_scrutinee_needs_par, AssocOp, Fixity};
-use rustc_ast::{self as ast, AttrStyle, AttrVec, CaptureBy, Field, Lit, UnOp, DUMMY_NODE_ID};
+use rustc_ast::{self as ast, AttrStyle, AttrVec, CaptureBy, ExprField, Lit, UnOp, DUMMY_NODE_ID};
use rustc_ast::{AnonConst, BinOp, BinOpKind, FnDecl, FnRetTy, MacCall, Param, Ty, TyKind};
use rustc_ast::{Arm, Async, BlockCheckMode, Expr, ExprKind, Label, Movability, RangeLimits};
use rustc_ast_pretty::pprust;
@@ -62,16 +62,16 @@
#[derive(Debug)]
pub(super) enum LhsExpr {
NotYetParsed,
- AttributesParsed(AttrVec),
+ AttributesParsed(AttrWrapper),
AlreadyParsed(P<Expr>),
}
-impl From<Option<AttrVec>> for LhsExpr {
+impl From<Option<AttrWrapper>> for LhsExpr {
/// Converts `Some(attrs)` into `LhsExpr::AttributesParsed(attrs)`
/// and `None` into `LhsExpr::NotYetParsed`.
///
/// This conversion does not allocate.
- fn from(o: Option<AttrVec>) -> Self {
+ fn from(o: Option<AttrWrapper>) -> Self {
if let Some(attrs) = o { LhsExpr::AttributesParsed(attrs) } else { LhsExpr::NotYetParsed }
}
}
@@ -123,7 +123,7 @@
pub(super) fn parse_expr_res(
&mut self,
r: Restrictions,
- already_parsed_attrs: Option<AttrVec>,
+ already_parsed_attrs: Option<AttrWrapper>,
) -> PResult<'a, P<Expr>> {
self.with_res(r, |this| this.parse_assoc_expr(already_parsed_attrs))
}
@@ -133,7 +133,10 @@
/// This parses an expression accounting for associativity and precedence of the operators in
/// the expression.
#[inline]
- fn parse_assoc_expr(&mut self, already_parsed_attrs: Option<AttrVec>) -> PResult<'a, P<Expr>> {
+ fn parse_assoc_expr(
+ &mut self,
+ already_parsed_attrs: Option<AttrWrapper>,
+ ) -> PResult<'a, P<Expr>> {
self.parse_assoc_expr_with(0, already_parsed_attrs.into())
}
@@ -423,7 +426,7 @@
let span = self.mk_expr_sp(&lhs, lhs.span, rhs_span);
let limits =
if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed };
- Ok(self.mk_expr(span, self.mk_range(Some(lhs), rhs, limits)?, AttrVec::new()))
+ Ok(self.mk_expr(span, self.mk_range(Some(lhs), rhs, limits), AttrVec::new()))
}
fn is_at_start_of_range_notation_rhs(&self) -> bool {
@@ -439,7 +442,7 @@
}
/// Parses prefix-forms of range notation: `..expr`, `..`, `..=expr`.
- fn parse_prefix_range_expr(&mut self, attrs: Option<AttrVec>) -> PResult<'a, P<Expr>> {
+ fn parse_prefix_range_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
// Check for deprecated `...` syntax.
if self.token == token::DotDotDot {
self.err_dotdotdot_syntax(self.token.span);
@@ -456,45 +459,62 @@
_ => RangeLimits::Closed,
};
let op = AssocOp::from_token(&self.token);
+ // FIXME: `parse_prefix_range_expr` is called when the current
+ // token is `DotDot`, `DotDotDot`, or `DotDotEq`. If we haven't already
+ // parsed attributes, then trying to parse them here will always fail.
+ // We should figure out how we want attributes on range expressions to work.
let attrs = self.parse_or_use_outer_attributes(attrs)?;
- let lo = self.token.span;
- self.bump();
- let (span, opt_end) = if self.is_at_start_of_range_notation_rhs() {
- // RHS must be parsed with more associativity than the dots.
- self.parse_assoc_expr_with(op.unwrap().precedence() + 1, LhsExpr::NotYetParsed)
- .map(|x| (lo.to(x.span), Some(x)))?
- } else {
- (lo, None)
- };
- Ok(self.mk_expr(span, self.mk_range(None, opt_end, limits)?, attrs))
+ self.collect_tokens_for_expr(attrs, |this, attrs| {
+ let lo = this.token.span;
+ this.bump();
+ let (span, opt_end) = if this.is_at_start_of_range_notation_rhs() {
+ // RHS must be parsed with more associativity than the dots.
+ this.parse_assoc_expr_with(op.unwrap().precedence() + 1, LhsExpr::NotYetParsed)
+ .map(|x| (lo.to(x.span), Some(x)))?
+ } else {
+ (lo, None)
+ };
+ Ok(this.mk_expr(span, this.mk_range(None, opt_end, limits), attrs.into()))
+ })
}
/// Parses a prefix-unary-operator expr.
- fn parse_prefix_expr(&mut self, attrs: Option<AttrVec>) -> PResult<'a, P<Expr>> {
+ fn parse_prefix_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
let attrs = self.parse_or_use_outer_attributes(attrs)?;
- // FIXME: Use super::attr::maybe_needs_tokens(&attrs) once we come up
- // with a good way of passing `force_tokens` through from `parse_nonterminal`.
- // Checking !attrs.is_empty() is correct, but will cause us to unnecessarily
- // capture tokens in some circumstances.
- let needs_tokens = !attrs.is_empty();
- let do_parse = |this: &mut Parser<'a>| {
- let lo = this.token.span;
- // Note: when adding new unary operators, don't forget to adjust TokenKind::can_begin_expr()
- let (hi, ex) = match this.token.uninterpolate().kind {
- token::Not => this.parse_unary_expr(lo, UnOp::Not), // `!expr`
- token::Tilde => this.recover_tilde_expr(lo), // `~expr`
- token::BinOp(token::Minus) => this.parse_unary_expr(lo, UnOp::Neg), // `-expr`
- token::BinOp(token::Star) => this.parse_unary_expr(lo, UnOp::Deref), // `*expr`
- token::BinOp(token::And) | token::AndAnd => this.parse_borrow_expr(lo),
- token::Ident(..) if this.token.is_keyword(kw::Box) => this.parse_box_expr(lo),
- token::Ident(..) if this.is_mistaken_not_ident_negation() => {
- this.recover_not_expr(lo)
- }
- _ => return this.parse_dot_or_call_expr(Some(attrs)),
- }?;
- Ok(this.mk_expr(lo.to(hi), ex, attrs))
- };
- if needs_tokens { self.collect_tokens(do_parse) } else { do_parse(self) }
+ let lo = self.token.span;
+
+ macro_rules! make_it {
+ ($this:ident, $attrs:expr, |this, _| $body:expr) => {
+ $this.collect_tokens_for_expr($attrs, |$this, attrs| {
+ let (hi, ex) = $body?;
+ Ok($this.mk_expr(lo.to(hi), ex, attrs.into()))
+ })
+ };
+ }
+
+ let this = self;
+
+ // Note: when adding new unary operators, don't forget to adjust TokenKind::can_begin_expr()
+ match this.token.uninterpolate().kind {
+ token::Not => make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Not)), // `!expr`
+ token::Tilde => make_it!(this, attrs, |this, _| this.recover_tilde_expr(lo)), // `~expr`
+ token::BinOp(token::Minus) => {
+ make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Neg))
+ } // `-expr`
+ token::BinOp(token::Star) => {
+ make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Deref))
+ } // `*expr`
+ token::BinOp(token::And) | token::AndAnd => {
+ make_it!(this, attrs, |this, _| this.parse_borrow_expr(lo))
+ }
+ token::Ident(..) if this.token.is_keyword(kw::Box) => {
+ make_it!(this, attrs, |this, _| this.parse_box_expr(lo))
+ }
+ token::Ident(..) if this.is_mistaken_not_ident_negation() => {
+ make_it!(this, attrs, |this, _| this.recover_not_expr(lo))
+ }
+ _ => return this.parse_dot_or_call_expr(Some(attrs)),
+ }
}
fn parse_prefix_expr_common(&mut self, lo: Span) -> PResult<'a, (Span, P<Expr>)> {
@@ -805,18 +825,20 @@
}
/// Parses `a.b` or `a(13)` or `a[4]` or just `a`.
- fn parse_dot_or_call_expr(&mut self, attrs: Option<AttrVec>) -> PResult<'a, P<Expr>> {
+ fn parse_dot_or_call_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
let attrs = self.parse_or_use_outer_attributes(attrs)?;
- let base = self.parse_bottom_expr();
- let (span, base) = self.interpolated_or_expr_span(base)?;
- self.parse_dot_or_call_expr_with(base, span, attrs)
+ self.collect_tokens_for_expr(attrs, |this, attrs| {
+ let base = this.parse_bottom_expr();
+ let (span, base) = this.interpolated_or_expr_span(base)?;
+ this.parse_dot_or_call_expr_with(base, span, attrs)
+ })
}
pub(super) fn parse_dot_or_call_expr_with(
&mut self,
e0: P<Expr>,
lo: Span,
- mut attrs: AttrVec,
+ mut attrs: Vec<ast::Attribute>,
) -> PResult<'a, P<Expr>> {
// Stitch the list of outer attributes onto the return value.
// A little bit ugly, but the best way given the current code
@@ -824,7 +846,7 @@
self.parse_dot_or_call_expr_with_(e0, lo).map(|expr| {
expr.map(|mut expr| {
attrs.extend::<Vec<_>>(expr.attrs.into());
- expr.attrs = attrs;
+ expr.attrs = attrs.into();
expr
})
})
@@ -1019,7 +1041,7 @@
/// Assuming we have just parsed `.`, continue parsing into an expression.
fn parse_dot_suffix(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> {
if self.token.uninterpolated_span().rust_2018() && self.eat_keyword(kw::Await) {
- return self.mk_await_expr(self_arg, lo);
+ return Ok(self.mk_await_expr(self_arg, lo));
}
let fn_span_lo = self.token.span;
@@ -1703,19 +1725,25 @@
fn parse_fn_block_param(&mut self) -> PResult<'a, Param> {
let lo = self.token.span;
let attrs = self.parse_outer_attributes()?;
- let pat = self.parse_pat(PARAM_EXPECTED)?;
- let ty = if self.eat(&token::Colon) {
- self.parse_ty()?
- } else {
- self.mk_ty(self.prev_token.span, TyKind::Infer)
- };
- Ok(Param {
- attrs: attrs.into(),
- ty,
- pat,
- span: lo.to(self.token.span),
- id: DUMMY_NODE_ID,
- is_placeholder: false,
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let pat = this.parse_pat_no_top_alt(PARAM_EXPECTED)?;
+ let ty = if this.eat(&token::Colon) {
+ this.parse_ty()?
+ } else {
+ this.mk_ty(this.prev_token.span, TyKind::Infer)
+ };
+
+ Ok((
+ Param {
+ attrs: attrs.into(),
+ ty,
+ pat,
+ span: lo.to(this.token.span),
+ id: DUMMY_NODE_ID,
+ is_placeholder: false,
+ },
+ TrailingToken::MaybeComma,
+ ))
})
}
@@ -1731,7 +1759,7 @@
let thn = if self.eat_keyword(kw::Else) || !cond.returns() {
self.error_missing_if_cond(lo, cond.span)
} else {
- let attrs = self.parse_outer_attributes()?; // For recovery.
+ let attrs = self.parse_outer_attributes()?.take_for_recovery(); // For recovery.
let not_block = self.token != token::OpenDelim(token::Brace);
let block = self.parse_block().map_err(|mut err| {
if not_block {
@@ -1775,7 +1803,7 @@
/// The `let` token has already been eaten.
fn parse_let_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
- let pat = self.parse_top_pat(GateOr::No, RecoverComma::Yes)?;
+ let pat = self.parse_pat_allow_top_alt(None, GateOr::No, RecoverComma::Yes)?;
self.expect(&token::Eq)?;
let expr = self.with_res(self.restrictions | Restrictions::NO_STRUCT_LITERAL, |this| {
this.parse_assoc_expr_with(1 + prec_let_scrutinee_needs_par(), None.into())
@@ -1788,7 +1816,7 @@
/// Parses an `else { ... }` expression (`else` token already eaten).
fn parse_else_expr(&mut self) -> PResult<'a, P<Expr>> {
let ctx_span = self.prev_token.span; // `else`
- let attrs = self.parse_outer_attributes()?; // For recovery.
+ let attrs = self.parse_outer_attributes()?.take_for_recovery(); // For recovery.
let expr = if self.eat_keyword(kw::If) {
self.parse_if_expr(AttrVec::new())?
} else {
@@ -1838,7 +1866,7 @@
_ => None,
};
- let pat = self.parse_top_pat(GateOr::Yes, RecoverComma::Yes)?;
+ let pat = self.parse_pat_allow_top_alt(None, GateOr::Yes, RecoverComma::Yes)?;
if !self.eat_keyword(kw::In) {
self.error_missing_in_for_loop();
}
@@ -1945,87 +1973,204 @@
Ok(self.mk_expr(lo.to(hi), ExprKind::Match(scrutinee, arms), attrs))
}
+ /// Attempt to recover from match arm body with statements and no surrounding braces.
+ fn parse_arm_body_missing_braces(
+ &mut self,
+ first_expr: &P<Expr>,
+ arrow_span: Span,
+ ) -> Option<P<Expr>> {
+ if self.token.kind != token::Semi {
+ return None;
+ }
+ let start_snapshot = self.clone();
+ let semi_sp = self.token.span;
+ self.bump(); // `;`
+ let mut stmts =
+ vec![self.mk_stmt(first_expr.span, ast::StmtKind::Expr(first_expr.clone()))];
+ let err = |this: &mut Parser<'_>, stmts: Vec<ast::Stmt>| {
+ let span = stmts[0].span.to(stmts[stmts.len() - 1].span);
+ let mut err = this.struct_span_err(span, "`match` arm body without braces");
+ let (these, s, are) =
+ if stmts.len() > 1 { ("these", "s", "are") } else { ("this", "", "is") };
+ err.span_label(
+ span,
+ &format!(
+ "{these} statement{s} {are} not surrounded by a body",
+ these = these,
+ s = s,
+ are = are
+ ),
+ );
+ err.span_label(arrow_span, "while parsing the `match` arm starting here");
+ if stmts.len() > 1 {
+ err.multipart_suggestion(
+ &format!("surround the statement{} with a body", s),
+ vec![
+ (span.shrink_to_lo(), "{ ".to_string()),
+ (span.shrink_to_hi(), " }".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_suggestion(
+ semi_sp,
+ "use a comma to end a `match` arm expression",
+ ",".to_string(),
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ this.mk_expr_err(span)
+ };
+ // We might have either a `,` -> `;` typo, or a block without braces. We need
+ // a more subtle parsing strategy.
+ loop {
+ if self.token.kind == token::CloseDelim(token::Brace) {
+ // We have reached the closing brace of the `match` expression.
+ return Some(err(self, stmts));
+ }
+ if self.token.kind == token::Comma {
+ *self = start_snapshot;
+ return None;
+ }
+ let pre_pat_snapshot = self.clone();
+ match self.parse_pat_no_top_alt(None) {
+ Ok(_pat) => {
+ if self.token.kind == token::FatArrow {
+ // Reached arm end.
+ *self = pre_pat_snapshot;
+ return Some(err(self, stmts));
+ }
+ }
+ Err(mut err) => {
+ err.cancel();
+ }
+ }
+
+ *self = pre_pat_snapshot;
+ match self.parse_stmt_without_recovery(true, ForceCollect::No) {
+ // Consume statements for as long as possible.
+ Ok(Some(stmt)) => {
+ stmts.push(stmt);
+ }
+ Ok(None) => {
+ *self = start_snapshot;
+ break;
+ }
+ // We couldn't parse either yet another statement missing it's
+ // enclosing block nor the next arm's pattern or closing brace.
+ Err(mut stmt_err) => {
+ stmt_err.cancel();
+ *self = start_snapshot;
+ break;
+ }
+ }
+ }
+ None
+ }
+
pub(super) fn parse_arm(&mut self) -> PResult<'a, Arm> {
let attrs = self.parse_outer_attributes()?;
- let lo = self.token.span;
- let pat = self.parse_top_pat(GateOr::No, RecoverComma::Yes)?;
- let guard = if self.eat_keyword(kw::If) {
- let if_span = self.prev_token.span;
- let cond = self.parse_expr()?;
- if let ExprKind::Let(..) = cond.kind {
- // Remove the last feature gating of a `let` expression since it's stable.
- self.sess.gated_spans.ungate_last(sym::let_chains, cond.span);
- let span = if_span.to(cond.span);
- self.sess.gated_spans.gate(sym::if_let_guard, span);
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let lo = this.token.span;
+ let pat = this.parse_pat_allow_top_alt(None, GateOr::No, RecoverComma::Yes)?;
+ let guard = if this.eat_keyword(kw::If) {
+ let if_span = this.prev_token.span;
+ let cond = this.parse_expr()?;
+ if let ExprKind::Let(..) = cond.kind {
+ // Remove the last feature gating of a `let` expression since it's stable.
+ this.sess.gated_spans.ungate_last(sym::let_chains, cond.span);
+ let span = if_span.to(cond.span);
+ this.sess.gated_spans.gate(sym::if_let_guard, span);
+ }
+ Some(cond)
+ } else {
+ None
+ };
+ let arrow_span = this.token.span;
+ this.expect(&token::FatArrow)?;
+ let arm_start_span = this.token.span;
+
+ let expr = this.parse_expr_res(Restrictions::STMT_EXPR, None).map_err(|mut err| {
+ err.span_label(arrow_span, "while parsing the `match` arm starting here");
+ err
+ })?;
+
+ let require_comma = classify::expr_requires_semi_to_be_stmt(&expr)
+ && this.token != token::CloseDelim(token::Brace);
+
+ let hi = this.prev_token.span;
+
+ if require_comma {
+ let sm = this.sess.source_map();
+ if let Some(body) = this.parse_arm_body_missing_braces(&expr, arrow_span) {
+ let span = body.span;
+ return Ok((
+ ast::Arm {
+ attrs,
+ pat,
+ guard,
+ body,
+ span,
+ id: DUMMY_NODE_ID,
+ is_placeholder: false,
+ },
+ TrailingToken::None,
+ ));
+ }
+ this.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]).map_err(
+ |mut err| {
+ match (sm.span_to_lines(expr.span), sm.span_to_lines(arm_start_span)) {
+ (Ok(ref expr_lines), Ok(ref arm_start_lines))
+ if arm_start_lines.lines[0].end_col
+ == expr_lines.lines[0].end_col
+ && expr_lines.lines.len() == 2
+ && this.token == token::FatArrow =>
+ {
+ // We check whether there's any trailing code in the parse span,
+ // if there isn't, we very likely have the following:
+ //
+ // X | &Y => "y"
+ // | -- - missing comma
+ // | |
+ // | arrow_span
+ // X | &X => "x"
+ // | - ^^ self.token.span
+ // | |
+ // | parsed until here as `"y" & X`
+ err.span_suggestion_short(
+ arm_start_span.shrink_to_hi(),
+ "missing a comma here to end this `match` arm",
+ ",".to_owned(),
+ Applicability::MachineApplicable,
+ );
+ }
+ _ => {
+ err.span_label(
+ arrow_span,
+ "while parsing the `match` arm starting here",
+ );
+ }
+ }
+ err
+ },
+ )?;
+ } else {
+ this.eat(&token::Comma);
}
- Some(cond)
- } else {
- None
- };
- let arrow_span = self.token.span;
- self.expect(&token::FatArrow)?;
- let arm_start_span = self.token.span;
- let expr = self.parse_expr_res(Restrictions::STMT_EXPR, None).map_err(|mut err| {
- err.span_label(arrow_span, "while parsing the `match` arm starting here");
- err
- })?;
-
- let require_comma = classify::expr_requires_semi_to_be_stmt(&expr)
- && self.token != token::CloseDelim(token::Brace);
-
- let hi = self.prev_token.span;
-
- if require_comma {
- let sm = self.sess.source_map();
- self.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]).map_err(
- |mut err| {
- match (sm.span_to_lines(expr.span), sm.span_to_lines(arm_start_span)) {
- (Ok(ref expr_lines), Ok(ref arm_start_lines))
- if arm_start_lines.lines[0].end_col == expr_lines.lines[0].end_col
- && expr_lines.lines.len() == 2
- && self.token == token::FatArrow =>
- {
- // We check whether there's any trailing code in the parse span,
- // if there isn't, we very likely have the following:
- //
- // X | &Y => "y"
- // | -- - missing comma
- // | |
- // | arrow_span
- // X | &X => "x"
- // | - ^^ self.token.span
- // | |
- // | parsed until here as `"y" & X`
- err.span_suggestion_short(
- arm_start_span.shrink_to_hi(),
- "missing a comma here to end this `match` arm",
- ",".to_owned(),
- Applicability::MachineApplicable,
- );
- }
- _ => {
- err.span_label(
- arrow_span,
- "while parsing the `match` arm starting here",
- );
- }
- }
- err
+ Ok((
+ ast::Arm {
+ attrs,
+ pat,
+ guard,
+ body: expr,
+ span: lo.to(hi),
+ id: DUMMY_NODE_ID,
+ is_placeholder: false,
},
- )?;
- } else {
- self.eat(&token::Comma);
- }
-
- Ok(ast::Arm {
- attrs,
- pat,
- guard,
- body: expr,
- span: lo.to(hi),
- id: DUMMY_NODE_ID,
- is_placeholder: false,
+ TrailingToken::None,
+ ))
})
}
@@ -2171,7 +2316,7 @@
}
let recovery_field = self.find_struct_error_after_field_looking_code();
- let parsed_field = match self.parse_field() {
+ let parsed_field = match self.parse_expr_field() {
Ok(f) => Some(f),
Err(mut e) => {
if pth == kw::Async {
@@ -2228,18 +2373,22 @@
let span = pth.span.to(self.token.span);
self.expect(&token::CloseDelim(token::Brace))?;
- let expr = if recover_async { ExprKind::Err } else { ExprKind::Struct(pth, fields, base) };
+ let expr = if recover_async {
+ ExprKind::Err
+ } else {
+ ExprKind::Struct(P(ast::StructExpr { path: pth, fields, rest: base }))
+ };
Ok(self.mk_expr(span, expr, attrs))
}
/// Use in case of error after field-looking code: `S { foo: () with a }`.
- fn find_struct_error_after_field_looking_code(&self) -> Option<Field> {
+ fn find_struct_error_after_field_looking_code(&self) -> Option<ExprField> {
match self.token.ident() {
Some((ident, is_raw))
if (is_raw || !ident.is_reserved())
&& self.look_ahead(1, |t| *t == token::Colon) =>
{
- Some(ast::Field {
+ Some(ast::ExprField {
ident,
span: self.token.span,
expr: self.mk_expr_err(self.token.span),
@@ -2273,31 +2422,37 @@
}
/// Parses `ident (COLON expr)?`.
- fn parse_field(&mut self) -> PResult<'a, Field> {
- let attrs = self.parse_outer_attributes()?.into();
- let lo = self.token.span;
+ fn parse_expr_field(&mut self) -> PResult<'a, ExprField> {
+ let attrs = self.parse_outer_attributes()?;
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let lo = this.token.span;
- // Check if a colon exists one ahead. This means we're parsing a fieldname.
- let is_shorthand = !self.look_ahead(1, |t| t == &token::Colon || t == &token::Eq);
- let (ident, expr) = if is_shorthand {
- // Mimic `x: x` for the `x` field shorthand.
- let ident = self.parse_ident_common(false)?;
- let path = ast::Path::from_ident(ident);
- (ident, self.mk_expr(ident.span, ExprKind::Path(None, path), AttrVec::new()))
- } else {
- let ident = self.parse_field_name()?;
- self.error_on_eq_field_init(ident);
- self.bump(); // `:`
- (ident, self.parse_expr()?)
- };
- Ok(ast::Field {
- ident,
- span: lo.to(expr.span),
- expr,
- is_shorthand,
- attrs,
- id: DUMMY_NODE_ID,
- is_placeholder: false,
+ // Check if a colon exists one ahead. This means we're parsing a fieldname.
+ let is_shorthand = !this.look_ahead(1, |t| t == &token::Colon || t == &token::Eq);
+ let (ident, expr) = if is_shorthand {
+ // Mimic `x: x` for the `x` field shorthand.
+ let ident = this.parse_ident_common(false)?;
+ let path = ast::Path::from_ident(ident);
+ (ident, this.mk_expr(ident.span, ExprKind::Path(None, path), AttrVec::new()))
+ } else {
+ let ident = this.parse_field_name()?;
+ this.error_on_eq_field_init(ident);
+ this.bump(); // `:`
+ (ident, this.parse_expr()?)
+ };
+
+ Ok((
+ ast::ExprField {
+ ident,
+ span: lo.to(expr.span),
+ expr,
+ is_shorthand,
+ attrs: attrs.into(),
+ id: DUMMY_NODE_ID,
+ is_placeholder: false,
+ },
+ TrailingToken::MaybeComma,
+ ))
})
}
@@ -2356,12 +2511,12 @@
start: Option<P<Expr>>,
end: Option<P<Expr>>,
limits: RangeLimits,
- ) -> PResult<'a, ExprKind> {
+ ) -> ExprKind {
if end.is_none() && limits == RangeLimits::Closed {
self.error_inclusive_range_with_no_end(self.prev_token.span);
- Ok(ExprKind::Err)
+ ExprKind::Err
} else {
- Ok(ExprKind::Range(start, end, limits))
+ ExprKind::Range(start, end, limits)
}
}
@@ -2381,11 +2536,11 @@
ExprKind::Call(f, args)
}
- fn mk_await_expr(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> {
+ fn mk_await_expr(&mut self, self_arg: P<Expr>, lo: Span) -> P<Expr> {
let span = lo.to(self.prev_token.span);
let await_expr = self.mk_expr(span, ExprKind::Await(self_arg), AttrVec::new());
self.recover_from_await_method_call();
- Ok(await_expr)
+ await_expr
}
crate fn mk_expr(&self, span: Span, kind: ExprKind, attrs: AttrVec) -> P<Expr> {
@@ -2405,4 +2560,27 @@
.map_or(lhs_span, |a| a.span)
.to(rhs_span)
}
+
+ fn collect_tokens_for_expr(
+ &mut self,
+ attrs: AttrWrapper,
+ f: impl FnOnce(&mut Self, Vec<ast::Attribute>) -> PResult<'a, P<Expr>>,
+ ) -> PResult<'a, P<Expr>> {
+ // FIXME - come up with a nice way to properly forward `ForceCollect`from
+ // the nonterminal parsing code. TThis approach iscorrect, but will cause
+ // us to unnecessarily capture tokens for exprs that have only builtin
+ // attributes. Revisit this before #![feature(stmt_expr_attributes)] is stabilized
+ let force_collect = if attrs.is_empty() { ForceCollect::No } else { ForceCollect::Yes };
+ self.collect_tokens_trailing_token(attrs, force_collect, |this, attrs| {
+ let res = f(this, attrs)?;
+ let trailing = if this.restrictions.contains(Restrictions::STMT_EXPR)
+ && this.token.kind == token::Semi
+ {
+ TrailingToken::Semi
+ } else {
+ TrailingToken::None
+ };
+ Ok((res, trailing))
+ })
+ }
}
diff --git a/compiler/rustc_parse/src/parser/generics.rs b/compiler/rustc_parse/src/parser/generics.rs
index 42a1337..f175c5b 100644
--- a/compiler/rustc_parse/src/parser/generics.rs
+++ b/compiler/rustc_parse/src/parser/generics.rs
@@ -1,4 +1,4 @@
-use super::Parser;
+use super::{ForceCollect, Parser, TrailingToken};
use rustc_ast::token;
use rustc_ast::{
@@ -84,68 +84,89 @@
/// a trailing comma and erroneous trailing attributes.
pub(super) fn parse_generic_params(&mut self) -> PResult<'a, Vec<ast::GenericParam>> {
let mut params = Vec::new();
- loop {
+ let mut done = false;
+ while !done {
let attrs = self.parse_outer_attributes()?;
- if self.check_lifetime() {
- let lifetime = self.expect_lifetime();
- // Parse lifetime parameter.
- let bounds =
- if self.eat(&token::Colon) { self.parse_lt_param_bounds() } else { Vec::new() };
- params.push(ast::GenericParam {
- ident: lifetime.ident,
- id: lifetime.id,
- attrs: attrs.into(),
- bounds,
- kind: ast::GenericParamKind::Lifetime,
- is_placeholder: false,
- });
- } else if self.check_keyword(kw::Const) {
- // Parse const parameter.
- params.push(self.parse_const_param(attrs)?);
- } else if self.check_ident() {
- // Parse type parameter.
- params.push(self.parse_ty_param(attrs)?);
- } else if self.token.can_begin_type() {
- // Trying to write an associated type bound? (#26271)
- let snapshot = self.clone();
- match self.parse_ty_where_predicate() {
- Ok(where_predicate) => {
- self.struct_span_err(
- where_predicate.span(),
- "bounds on associated types do not belong here",
- )
- .span_label(where_predicate.span(), "belongs in `where` clause")
- .emit();
- }
- Err(mut err) => {
- err.cancel();
- *self = snapshot;
- break;
- }
- }
- } else {
- // Check for trailing attributes and stop parsing.
- if !attrs.is_empty() {
- if !params.is_empty() {
- self.struct_span_err(
- attrs[0].span,
- "trailing attribute after generic parameter",
- )
- .span_label(attrs[0].span, "attributes must go before parameters")
- .emit();
+ let param =
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let param = if this.check_lifetime() {
+ let lifetime = this.expect_lifetime();
+ // Parse lifetime parameter.
+ let bounds = if this.eat(&token::Colon) {
+ this.parse_lt_param_bounds()
+ } else {
+ Vec::new()
+ };
+ Some(ast::GenericParam {
+ ident: lifetime.ident,
+ id: lifetime.id,
+ attrs: attrs.into(),
+ bounds,
+ kind: ast::GenericParamKind::Lifetime,
+ is_placeholder: false,
+ })
+ } else if this.check_keyword(kw::Const) {
+ // Parse const parameter.
+ Some(this.parse_const_param(attrs)?)
+ } else if this.check_ident() {
+ // Parse type parameter.
+ Some(this.parse_ty_param(attrs)?)
+ } else if this.token.can_begin_type() {
+ // Trying to write an associated type bound? (#26271)
+ let snapshot = this.clone();
+ match this.parse_ty_where_predicate() {
+ Ok(where_predicate) => {
+ this.struct_span_err(
+ where_predicate.span(),
+ "bounds on associated types do not belong here",
+ )
+ .span_label(where_predicate.span(), "belongs in `where` clause")
+ .emit();
+ // FIXME - try to continue parsing other generics?
+ return Ok((None, TrailingToken::None));
+ }
+ Err(mut err) => {
+ err.cancel();
+ // FIXME - maybe we should overwrite 'self' outside of `collect_tokens`?
+ *this = snapshot;
+ return Ok((None, TrailingToken::None));
+ }
+ }
} else {
- self.struct_span_err(attrs[0].span, "attribute without generic parameters")
- .span_label(
- attrs[0].span,
- "attributes are only permitted when preceding parameters",
- )
- .emit();
- }
- }
- break;
- }
+ // Check for trailing attributes and stop parsing.
+ if !attrs.is_empty() {
+ if !params.is_empty() {
+ this.struct_span_err(
+ attrs[0].span,
+ "trailing attribute after generic parameter",
+ )
+ .span_label(attrs[0].span, "attributes must go before parameters")
+ .emit();
+ } else {
+ this.struct_span_err(
+ attrs[0].span,
+ "attribute without generic parameters",
+ )
+ .span_label(
+ attrs[0].span,
+ "attributes are only permitted when preceding parameters",
+ )
+ .emit();
+ }
+ }
+ return Ok((None, TrailingToken::None));
+ };
- if !self.eat(&token::Comma) {
+ if !this.eat(&token::Comma) {
+ done = true;
+ }
+ // We just ate the comma, so no need to use `TrailingToken`
+ Ok((param, TrailingToken::None))
+ })?;
+
+ if let Some(param) = param {
+ params.push(param);
+ } else {
break;
}
}
diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs
index ee24286..70dbaa5 100644
--- a/compiler/rustc_parse/src/parser/item.rs
+++ b/compiler/rustc_parse/src/parser/item.rs
@@ -1,8 +1,6 @@
use super::diagnostics::{dummy_arg, ConsumeClosingDelim, Error};
use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign};
-use super::{FollowedByType, ForceCollect, Parser, PathStyle, TrailingToken};
-
-use crate::{maybe_collect_tokens, maybe_whole};
+use super::{AttrWrapper, FollowedByType, ForceCollect, Parser, PathStyle, TrailingToken};
use rustc_ast::ast::*;
use rustc_ast::ptr::P;
@@ -11,7 +9,7 @@
use rustc_ast::{self as ast, AttrVec, Attribute, DUMMY_NODE_ID};
use rustc_ast::{Async, Const, Defaultness, IsAuto, Mutability, Unsafe, UseTree, UseTreeKind};
use rustc_ast::{BindingMode, Block, FnDecl, FnSig, Param, SelfKind};
-use rustc_ast::{EnumDef, Generics, StructField, TraitRef, Ty, TyKind, Variant, VariantData};
+use rustc_ast::{EnumDef, FieldDef, Generics, TraitRef, Ty, TyKind, Variant, VariantData};
use rustc_ast::{FnHeader, ForeignItem, Path, PathSegment, Visibility, VisibilityKind};
use rustc_ast::{MacArgs, MacCall, MacDelimiter};
use rustc_ast_pretty::pprust;
@@ -27,11 +25,9 @@
impl<'a> Parser<'a> {
/// Parses a source module as a crate. This is the main entry point for the parser.
pub fn parse_crate_mod(&mut self) -> PResult<'a, ast::Crate> {
- let lo = self.token.span;
- let (module, attrs) = self.parse_mod(&token::Eof, Unsafe::No)?;
- let span = lo.to(self.token.span);
+ let (attrs, items, span) = self.parse_mod(&token::Eof)?;
let proc_macros = Vec::new(); // Filled in by `proc_macro_harness::inject()`.
- Ok(ast::Crate { attrs, module, span, proc_macros })
+ Ok(ast::Crate { attrs, items, span, proc_macros })
}
/// Parses a `mod <foo> { ... }` or `mod <foo>;` item.
@@ -39,35 +35,26 @@
let unsafety = self.parse_unsafety();
self.expect_keyword(kw::Mod)?;
let id = self.parse_ident()?;
- let (module, mut inner_attrs) = if self.eat(&token::Semi) {
- (Mod { inner: Span::default(), unsafety, items: Vec::new(), inline: false }, Vec::new())
+ let mod_kind = if self.eat(&token::Semi) {
+ ModKind::Unloaded
} else {
self.expect(&token::OpenDelim(token::Brace))?;
- self.parse_mod(&token::CloseDelim(token::Brace), unsafety)?
+ let (mut inner_attrs, items, inner_span) =
+ self.parse_mod(&token::CloseDelim(token::Brace))?;
+ attrs.append(&mut inner_attrs);
+ ModKind::Loaded(items, Inline::Yes, inner_span)
};
- attrs.append(&mut inner_attrs);
- Ok((id, ItemKind::Mod(module)))
+ Ok((id, ItemKind::Mod(unsafety, mod_kind)))
}
/// Parses the contents of a module (inner attributes followed by module items).
pub fn parse_mod(
&mut self,
term: &TokenKind,
- unsafety: Unsafe,
- ) -> PResult<'a, (Mod, Vec<Attribute>)> {
+ ) -> PResult<'a, (Vec<Attribute>, Vec<P<Item>>, Span)> {
let lo = self.token.span;
let attrs = self.parse_inner_attributes()?;
- let module = self.parse_mod_items(term, lo, unsafety)?;
- Ok((module, attrs))
- }
- /// Given a termination token, parses all of the items in a module.
- fn parse_mod_items(
- &mut self,
- term: &TokenKind,
- inner_lo: Span,
- unsafety: Unsafe,
- ) -> PResult<'a, Mod> {
let mut items = vec![];
while let Some(item) = self.parse_item(ForceCollect::No)? {
items.push(item);
@@ -84,9 +71,7 @@
}
}
- let hi = if self.token.span.is_dummy() { inner_lo } else { self.prev_token.span };
-
- Ok(Mod { inner: inner_lo.to(hi), unsafety, items, inline: true })
+ Ok((attrs, items, lo.to(self.prev_token.span)))
}
}
@@ -108,25 +93,40 @@
pub(super) fn parse_item_common(
&mut self,
- mut attrs: Vec<Attribute>,
+ attrs: AttrWrapper,
mac_allowed: bool,
attrs_allowed: bool,
req_name: ReqName,
force_collect: ForceCollect,
) -> PResult<'a, Option<Item>> {
- maybe_whole!(self, NtItem, |item| {
- let mut item = item;
- mem::swap(&mut item.attrs, &mut attrs);
- item.attrs.extend(attrs);
- Some(item.into_inner())
- });
+ // Don't use `maybe_whole` so that we have precise control
+ // over when we bump the parser
+ if let token::Interpolated(nt) = &self.token.kind {
+ if let token::NtItem(item) = &**nt {
+ let item = item.clone();
+
+ return self.collect_tokens_trailing_token(
+ attrs,
+ force_collect,
+ |this, mut attrs| {
+ let mut item = item;
+ mem::swap(&mut item.attrs, &mut attrs);
+ item.attrs.extend(attrs);
+ // Bump the parser so the we capture the token::Interpolated
+ this.bump();
+ Ok((Some(item.into_inner()), TrailingToken::None))
+ },
+ );
+ }
+ };
let mut unclosed_delims = vec![];
- let item = maybe_collect_tokens!(self, force_collect, &attrs, |this: &mut Self| {
- let item = this.parse_item_common_(attrs, mac_allowed, attrs_allowed, req_name);
- unclosed_delims.append(&mut this.unclosed_delims);
- Ok((item?, TrailingToken::None))
- })?;
+ let item =
+ self.collect_tokens_trailing_token(attrs, force_collect, |this: &mut Self, attrs| {
+ let item = this.parse_item_common_(attrs, mac_allowed, attrs_allowed, req_name);
+ unclosed_delims.append(&mut this.unclosed_delims);
+ Ok((item?, TrailingToken::None))
+ })?;
self.unclosed_delims.append(&mut unclosed_delims);
Ok(item)
@@ -204,6 +204,7 @@
def: &mut Defaultness,
req_name: ReqName,
) -> PResult<'a, Option<ItemInfo>> {
+ let def_final = def == &Defaultness::Final;
let mut def = || mem::replace(def, Defaultness::Final);
let info = if self.eat_keyword(kw::Use) {
@@ -225,8 +226,8 @@
return Err(e);
}
- (Ident::invalid(), ItemKind::Use(P(tree)))
- } else if self.check_fn_front_matter() {
+ (Ident::invalid(), ItemKind::Use(tree))
+ } else if self.check_fn_front_matter(def_final) {
// FUNCTION ITEM
let (ident, sig, generics, body) = self.parse_fn(attrs, req_name, lo)?;
(ident, ItemKind::Fn(box FnKind(def(), sig, generics, body)))
@@ -1109,39 +1110,45 @@
fn parse_enum_variant(&mut self) -> PResult<'a, Option<Variant>> {
let variant_attrs = self.parse_outer_attributes()?;
- let vlo = self.token.span;
+ self.collect_tokens_trailing_token(
+ variant_attrs,
+ ForceCollect::No,
+ |this, variant_attrs| {
+ let vlo = this.token.span;
- let vis = self.parse_visibility(FollowedByType::No)?;
- if !self.recover_nested_adt_item(kw::Enum)? {
- return Ok(None);
- }
- let ident = self.parse_ident()?;
+ let vis = this.parse_visibility(FollowedByType::No)?;
+ if !this.recover_nested_adt_item(kw::Enum)? {
+ return Ok((None, TrailingToken::None));
+ }
+ let ident = this.parse_ident()?;
- let struct_def = if self.check(&token::OpenDelim(token::Brace)) {
- // Parse a struct variant.
- let (fields, recovered) = self.parse_record_struct_body()?;
- VariantData::Struct(fields, recovered)
- } else if self.check(&token::OpenDelim(token::Paren)) {
- VariantData::Tuple(self.parse_tuple_struct_body()?, DUMMY_NODE_ID)
- } else {
- VariantData::Unit(DUMMY_NODE_ID)
- };
+ let struct_def = if this.check(&token::OpenDelim(token::Brace)) {
+ // Parse a struct variant.
+ let (fields, recovered) = this.parse_record_struct_body()?;
+ VariantData::Struct(fields, recovered)
+ } else if this.check(&token::OpenDelim(token::Paren)) {
+ VariantData::Tuple(this.parse_tuple_struct_body()?, DUMMY_NODE_ID)
+ } else {
+ VariantData::Unit(DUMMY_NODE_ID)
+ };
- let disr_expr =
- if self.eat(&token::Eq) { Some(self.parse_anon_const_expr()?) } else { None };
+ let disr_expr =
+ if this.eat(&token::Eq) { Some(this.parse_anon_const_expr()?) } else { None };
- let vr = ast::Variant {
- ident,
- vis,
- id: DUMMY_NODE_ID,
- attrs: variant_attrs,
- data: struct_def,
- disr_expr,
- span: vlo.to(self.prev_token.span),
- is_placeholder: false,
- };
+ let vr = ast::Variant {
+ ident,
+ vis,
+ id: DUMMY_NODE_ID,
+ attrs: variant_attrs,
+ data: struct_def,
+ disr_expr,
+ span: vlo.to(this.prev_token.span),
+ is_placeholder: false,
+ };
- Ok(Some(vr))
+ Ok((Some(vr), TrailingToken::MaybeComma))
+ },
+ )
}
/// Parses `struct Foo { ... }`.
@@ -1225,14 +1232,12 @@
Ok((class_name, ItemKind::Union(vdata, generics)))
}
- fn parse_record_struct_body(
- &mut self,
- ) -> PResult<'a, (Vec<StructField>, /* recovered */ bool)> {
+ fn parse_record_struct_body(&mut self) -> PResult<'a, (Vec<FieldDef>, /* recovered */ bool)> {
let mut fields = Vec::new();
let mut recovered = false;
if self.eat(&token::OpenDelim(token::Brace)) {
while self.token != token::CloseDelim(token::Brace) {
- let field = self.parse_struct_decl_field().map_err(|e| {
+ let field = self.parse_field_def().map_err(|e| {
self.consume_block(token::Brace, ConsumeClosingDelim::No);
recovered = true;
e
@@ -1257,33 +1262,41 @@
Ok((fields, recovered))
}
- fn parse_tuple_struct_body(&mut self) -> PResult<'a, Vec<StructField>> {
+ fn parse_tuple_struct_body(&mut self) -> PResult<'a, Vec<FieldDef>> {
// This is the case where we find `struct Foo<T>(T) where T: Copy;`
// Unit like structs are handled in parse_item_struct function
self.parse_paren_comma_seq(|p| {
let attrs = p.parse_outer_attributes()?;
- let lo = p.token.span;
- let vis = p.parse_visibility(FollowedByType::Yes)?;
- let ty = p.parse_ty()?;
- Ok(StructField {
- span: lo.to(ty.span),
- vis,
- ident: None,
- id: DUMMY_NODE_ID,
- ty,
- attrs,
- is_placeholder: false,
+ p.collect_tokens_trailing_token(attrs, ForceCollect::No, |p, attrs| {
+ let lo = p.token.span;
+ let vis = p.parse_visibility(FollowedByType::Yes)?;
+ let ty = p.parse_ty()?;
+
+ Ok((
+ FieldDef {
+ span: lo.to(ty.span),
+ vis,
+ ident: None,
+ id: DUMMY_NODE_ID,
+ ty,
+ attrs,
+ is_placeholder: false,
+ },
+ TrailingToken::MaybeComma,
+ ))
})
})
.map(|(r, _)| r)
}
/// Parses an element of a struct declaration.
- fn parse_struct_decl_field(&mut self) -> PResult<'a, StructField> {
+ fn parse_field_def(&mut self) -> PResult<'a, FieldDef> {
let attrs = self.parse_outer_attributes()?;
- let lo = self.token.span;
- let vis = self.parse_visibility(FollowedByType::No)?;
- self.parse_single_struct_field(lo, vis, attrs)
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let lo = this.token.span;
+ let vis = this.parse_visibility(FollowedByType::No)?;
+ Ok((this.parse_single_struct_field(lo, vis, attrs)?, TrailingToken::None))
+ })
}
/// Parses a structure field declaration.
@@ -1292,7 +1305,7 @@
lo: Span,
vis: Visibility,
attrs: Vec<Attribute>,
- ) -> PResult<'a, StructField> {
+ ) -> PResult<'a, FieldDef> {
let mut seen_comma: bool = false;
let a_var = self.parse_name_and_ty(lo, vis, attrs)?;
if self.token == token::Comma {
@@ -1384,11 +1397,11 @@
lo: Span,
vis: Visibility,
attrs: Vec<Attribute>,
- ) -> PResult<'a, StructField> {
+ ) -> PResult<'a, FieldDef> {
let name = self.parse_ident_common(false)?;
self.expect(&token::Colon)?;
let ty = self.parse_ty()?;
- Ok(StructField {
+ Ok(FieldDef {
span: lo.to(self.prev_token.span),
ident: Some(name),
vis,
@@ -1461,15 +1474,7 @@
let vstr = pprust::vis_to_string(vis);
let vstr = vstr.trim_end();
if macro_rules {
- let msg = format!("can't qualify macro_rules invocation with `{}`", vstr);
- self.struct_span_err(vis.span, &msg)
- .span_suggestion(
- vis.span,
- "try exporting the macro",
- "#[macro_export]".to_owned(),
- Applicability::MaybeIncorrect, // speculative
- )
- .emit();
+ self.sess.gated_spans.gate(sym::pub_macro_rules, vis.span);
} else {
self.struct_span_err(vis.span, "can't qualify macro invocation with `pub`")
.span_suggestion(
@@ -1630,18 +1635,27 @@
}
/// Is the current token the start of an `FnHeader` / not a valid parse?
- pub(super) fn check_fn_front_matter(&mut self) -> bool {
+ ///
+ /// `check_pub` adds additional `pub` to the checks in case users place it
+ /// wrongly, can be used to ensure `pub` never comes after `default`.
+ pub(super) fn check_fn_front_matter(&mut self, check_pub: bool) -> bool {
// We use an over-approximation here.
// `const const`, `fn const` won't parse, but we're not stepping over other syntax either.
- const QUALS: [Symbol; 4] = [kw::Const, kw::Async, kw::Unsafe, kw::Extern];
+ // `pub` is added in case users got confused with the ordering like `async pub fn`,
+ // only if it wasn't preceeded by `default` as `default pub` is invalid.
+ let quals: &[Symbol] = if check_pub {
+ &[kw::Pub, kw::Const, kw::Async, kw::Unsafe, kw::Extern]
+ } else {
+ &[kw::Const, kw::Async, kw::Unsafe, kw::Extern]
+ };
self.check_keyword(kw::Fn) // Definitely an `fn`.
// `$qual fn` or `$qual $qual`:
- || QUALS.iter().any(|&kw| self.check_keyword(kw))
+ || quals.iter().any(|&kw| self.check_keyword(kw))
&& self.look_ahead(1, |t| {
// `$qual fn`, e.g. `const fn` or `async fn`.
t.is_keyword(kw::Fn)
// Two qualifiers `$qual $qual` is enough, e.g. `async unsafe`.
- || t.is_non_raw_ident_where(|i| QUALS.contains(&i.name)
+ || t.is_non_raw_ident_where(|i| quals.contains(&i.name)
// Rule out 2015 `const async: T = val`.
&& i.is_reserved()
// Rule out unsafe extern block.
@@ -1662,10 +1676,11 @@
/// FnFrontMatter = FnQual "fn" ;
/// ```
pub(super) fn parse_fn_front_matter(&mut self) -> PResult<'a, FnHeader> {
+ let sp_start = self.token.span;
let constness = self.parse_constness();
let asyncness = self.parse_asyncness();
let unsafety = self.parse_unsafety();
- let ext = self.parse_extern()?;
+ let ext = self.parse_extern();
if let Async::Yes { span, .. } = asyncness {
self.ban_async_in_2015(span);
@@ -1675,8 +1690,27 @@
// It is possible for `expect_one_of` to recover given the contents of
// `self.expected_tokens`, therefore, do not use `self.unexpected()` which doesn't
// account for this.
- if !self.expect_one_of(&[], &[])? {
- unreachable!()
+ match self.expect_one_of(&[], &[]) {
+ Ok(true) => {}
+ Ok(false) => unreachable!(),
+ Err(mut err) => {
+ // Recover incorrect visibility order such as `async pub`.
+ if self.check_keyword(kw::Pub) {
+ let sp = sp_start.to(self.prev_token.span);
+ if let Ok(snippet) = self.span_to_snippet(sp) {
+ let vis = self.parse_visibility(FollowedByType::No)?;
+ let vs = pprust::vis_to_string(&vis);
+ let vs = vs.trim_end();
+ err.span_suggestion(
+ sp_start.to(self.prev_token.span),
+ &format!("visibility `{}` must come before `{}`", vs, snippet),
+ format!("{} {}", vs, snippet),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ return Err(err);
+ }
}
}
@@ -1736,74 +1770,80 @@
fn parse_param_general(&mut self, req_name: ReqName, first_param: bool) -> PResult<'a, Param> {
let lo = self.token.span;
let attrs = self.parse_outer_attributes()?;
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ // Possibly parse `self`. Recover if we parsed it and it wasn't allowed here.
+ if let Some(mut param) = this.parse_self_param()? {
+ param.attrs = attrs.into();
+ let res = if first_param { Ok(param) } else { this.recover_bad_self_param(param) };
+ return Ok((res?, TrailingToken::None));
+ }
- // Possibly parse `self`. Recover if we parsed it and it wasn't allowed here.
- if let Some(mut param) = self.parse_self_param()? {
- param.attrs = attrs.into();
- return if first_param { Ok(param) } else { self.recover_bad_self_param(param) };
- }
+ let is_name_required = match this.token.kind {
+ token::DotDotDot => false,
+ _ => req_name(this.token.span.edition()),
+ };
+ let (pat, ty) = if is_name_required || this.is_named_param() {
+ debug!("parse_param_general parse_pat (is_name_required:{})", is_name_required);
- let is_name_required = match self.token.kind {
- token::DotDotDot => false,
- _ => req_name(self.token.span.edition()),
- };
- let (pat, ty) = if is_name_required || self.is_named_param() {
- debug!("parse_param_general parse_pat (is_name_required:{})", is_name_required);
+ let (pat, colon) = this.parse_fn_param_pat_colon()?;
+ if !colon {
+ let mut err = this.unexpected::<()>().unwrap_err();
+ return if let Some(ident) =
+ this.parameter_without_type(&mut err, pat, is_name_required, first_param)
+ {
+ err.emit();
+ Ok((dummy_arg(ident), TrailingToken::None))
+ } else {
+ Err(err)
+ };
+ }
- let pat = self.parse_fn_param_pat()?;
- if let Err(mut err) = self.expect(&token::Colon) {
- return if let Some(ident) =
- self.parameter_without_type(&mut err, pat, is_name_required, first_param)
+ this.eat_incorrect_doc_comment_for_param_type();
+ (pat, this.parse_ty_for_param()?)
+ } else {
+ debug!("parse_param_general ident_to_pat");
+ let parser_snapshot_before_ty = this.clone();
+ this.eat_incorrect_doc_comment_for_param_type();
+ let mut ty = this.parse_ty_for_param();
+ if ty.is_ok()
+ && this.token != token::Comma
+ && this.token != token::CloseDelim(token::Paren)
{
- err.emit();
- Ok(dummy_arg(ident))
- } else {
- Err(err)
- };
- }
-
- self.eat_incorrect_doc_comment_for_param_type();
- (pat, self.parse_ty_for_param()?)
- } else {
- debug!("parse_param_general ident_to_pat");
- let parser_snapshot_before_ty = self.clone();
- self.eat_incorrect_doc_comment_for_param_type();
- let mut ty = self.parse_ty_for_param();
- if ty.is_ok()
- && self.token != token::Comma
- && self.token != token::CloseDelim(token::Paren)
- {
- // This wasn't actually a type, but a pattern looking like a type,
- // so we are going to rollback and re-parse for recovery.
- ty = self.unexpected();
- }
- match ty {
- Ok(ty) => {
- let ident = Ident::new(kw::Empty, self.prev_token.span);
- let bm = BindingMode::ByValue(Mutability::Not);
- let pat = self.mk_pat_ident(ty.span, bm, ident);
- (pat, ty)
+ // This wasn't actually a type, but a pattern looking like a type,
+ // so we are going to rollback and re-parse for recovery.
+ ty = this.unexpected();
}
- // If this is a C-variadic argument and we hit an error, return the error.
- Err(err) if self.token == token::DotDotDot => return Err(err),
- // Recover from attempting to parse the argument as a type without pattern.
- Err(mut err) => {
- err.cancel();
- *self = parser_snapshot_before_ty;
- self.recover_arg_parse()?
+ match ty {
+ Ok(ty) => {
+ let ident = Ident::new(kw::Empty, this.prev_token.span);
+ let bm = BindingMode::ByValue(Mutability::Not);
+ let pat = this.mk_pat_ident(ty.span, bm, ident);
+ (pat, ty)
+ }
+ // If this is a C-variadic argument and we hit an error, return the error.
+ Err(err) if this.token == token::DotDotDot => return Err(err),
+ // Recover from attempting to parse the argument as a type without pattern.
+ Err(mut err) => {
+ err.cancel();
+ *this = parser_snapshot_before_ty;
+ this.recover_arg_parse()?
+ }
}
- }
- };
+ };
- let span = lo.until(self.token.span);
+ let span = lo.until(this.token.span);
- Ok(Param {
- attrs: attrs.into(),
- id: ast::DUMMY_NODE_ID,
- is_placeholder: false,
- pat,
- span,
- ty,
+ Ok((
+ Param {
+ attrs: attrs.into(),
+ id: ast::DUMMY_NODE_ID,
+ is_placeholder: false,
+ pat,
+ span,
+ ty,
+ },
+ TrailingToken::None,
+ ))
})
}
diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs
index e2af63d..4cc2224 100644
--- a/compiler/rustc_parse/src/parser/mod.rs
+++ b/compiler/rustc_parse/src/parser/mod.rs
@@ -1,4 +1,5 @@
pub mod attr;
+mod attr_wrapper;
mod diagnostics;
mod expr;
mod generics;
@@ -10,16 +11,18 @@
mod ty;
use crate::lexer::UnmatchedBrace;
+pub use attr_wrapper::AttrWrapper;
pub use diagnostics::AttemptLocalParseRecovery;
use diagnostics::Error;
+pub use pat::{GateOr, RecoverComma};
pub use path::PathStyle;
use rustc_ast::ptr::P;
use rustc_ast::token::{self, DelimToken, Token, TokenKind};
-use rustc_ast::tokenstream::{self, DelimSpan, LazyTokenStream, Spacing};
-use rustc_ast::tokenstream::{CreateTokenStream, TokenStream, TokenTree, TreeAndSpacing};
+use rustc_ast::tokenstream::{self, DelimSpan, Spacing};
+use rustc_ast::tokenstream::{TokenStream, TokenTree, TreeAndSpacing};
use rustc_ast::DUMMY_NODE_ID;
-use rustc_ast::{self as ast, AnonConst, AttrStyle, AttrVec, Const, CrateSugar, Extern, HasTokens};
+use rustc_ast::{self as ast, AnonConst, AstLike, AttrStyle, AttrVec, Const, CrateSugar, Extern};
use rustc_ast::{Async, Expr, ExprKind, MacArgs, MacDelimiter, Mutability, StrLit, Unsafe};
use rustc_ast::{Visibility, VisibilityKind};
use rustc_ast_pretty::pprust;
@@ -64,6 +67,9 @@
pub enum TrailingToken {
None,
Semi,
+ /// If the trailing token is a comma, then capture it
+ /// Otherwise, ignore the trailing token
+ MaybeComma,
}
/// Like `maybe_whole_expr`, but for things other than expressions.
@@ -944,7 +950,7 @@
self.bump();
Ok(Ident::new(symbol, self.prev_token.span))
} else {
- self.parse_ident_common(false)
+ self.parse_ident_common(true)
}
}
@@ -981,7 +987,7 @@
}
// Collect tokens because they are used during lowering to HIR.
- let expr = self.collect_tokens(|this| this.parse_expr())?;
+ let expr = self.collect_tokens_no_attrs(|this| this.parse_expr())?;
let span = expr.span;
match &expr.kind {
@@ -1004,12 +1010,12 @@
fn parse_or_use_outer_attributes(
&mut self,
- already_parsed_attrs: Option<AttrVec>,
- ) -> PResult<'a, AttrVec> {
+ already_parsed_attrs: Option<AttrWrapper>,
+ ) -> PResult<'a, AttrWrapper> {
if let Some(attrs) = already_parsed_attrs {
Ok(attrs)
} else {
- self.parse_outer_attributes().map(|a| a.into())
+ self.parse_outer_attributes()
}
}
@@ -1196,12 +1202,8 @@
}
/// Parses `extern string_literal?`.
- fn parse_extern(&mut self) -> PResult<'a, Extern> {
- Ok(if self.eat_keyword(kw::Extern) {
- Extern::from_abi(self.parse_abi())
- } else {
- Extern::None
- })
+ fn parse_extern(&mut self) -> Extern {
+ if self.eat_keyword(kw::Extern) { Extern::from_abi(self.parse_abi()) } else { Extern::None }
}
/// Parses a string literal as an ABI spec.
@@ -1226,97 +1228,17 @@
}
}
- pub fn collect_tokens<R: HasTokens>(
+ pub fn collect_tokens_no_attrs<R: AstLike>(
&mut self,
f: impl FnOnce(&mut Self) -> PResult<'a, R>,
) -> PResult<'a, R> {
- self.collect_tokens_trailing_token(|this| Ok((f(this)?, TrailingToken::None)))
- }
-
- /// Records all tokens consumed by the provided callback,
- /// including the current token. These tokens are collected
- /// into a `LazyTokenStream`, and returned along with the result
- /// of the callback.
- ///
- /// Note: If your callback consumes an opening delimiter
- /// (including the case where you call `collect_tokens`
- /// when the current token is an opening delimeter),
- /// you must also consume the corresponding closing delimiter.
- ///
- /// That is, you can consume
- /// `something ([{ }])` or `([{}])`, but not `([{}]`
- ///
- /// This restriction shouldn't be an issue in practice,
- /// since this function is used to record the tokens for
- /// a parsed AST item, which always has matching delimiters.
- pub fn collect_tokens_trailing_token<R: HasTokens>(
- &mut self,
- f: impl FnOnce(&mut Self) -> PResult<'a, (R, TrailingToken)>,
- ) -> PResult<'a, R> {
- let start_token = (self.token.clone(), self.token_spacing);
- let cursor_snapshot = self.token_cursor.clone();
-
- let (mut ret, trailing_token) = f(self)?;
-
- // Produces a `TokenStream` on-demand. Using `cursor_snapshot`
- // and `num_calls`, we can reconstruct the `TokenStream` seen
- // by the callback. This allows us to avoid producing a `TokenStream`
- // if it is never needed - for example, a captured `macro_rules!`
- // argument that is never passed to a proc macro.
- // In practice token stream creation happens rarely compared to
- // calls to `collect_tokens` (see some statistics in #78736),
- // so we are doing as little up-front work as possible.
- //
- // This also makes `Parser` very cheap to clone, since
- // there is no intermediate collection buffer to clone.
- #[derive(Clone)]
- struct LazyTokenStreamImpl {
- start_token: (Token, Spacing),
- cursor_snapshot: TokenCursor,
- num_calls: usize,
- desugar_doc_comments: bool,
- append_unglued_token: Option<TreeAndSpacing>,
- }
- impl CreateTokenStream for LazyTokenStreamImpl {
- fn create_token_stream(&self) -> TokenStream {
- // The token produced by the final call to `next` or `next_desugared`
- // was not actually consumed by the callback. The combination
- // of chaining the initial token and using `take` produces the desired
- // result - we produce an empty `TokenStream` if no calls were made,
- // and omit the final token otherwise.
- let mut cursor_snapshot = self.cursor_snapshot.clone();
- let tokens = std::iter::once(self.start_token.clone())
- .chain((0..self.num_calls).map(|_| {
- if self.desugar_doc_comments {
- cursor_snapshot.next_desugared()
- } else {
- cursor_snapshot.next()
- }
- }))
- .take(self.num_calls);
-
- make_token_stream(tokens, self.append_unglued_token.clone())
- }
- }
-
- let mut num_calls = self.token_cursor.num_next_calls - cursor_snapshot.num_next_calls;
- match trailing_token {
- TrailingToken::None => {}
- TrailingToken::Semi => {
- assert_eq!(self.token.kind, token::Semi);
- num_calls += 1;
- }
- }
-
- let lazy_impl = LazyTokenStreamImpl {
- start_token,
- num_calls,
- cursor_snapshot,
- desugar_doc_comments: self.desugar_doc_comments,
- append_unglued_token: self.token_cursor.append_unglued_token.clone(),
- };
- ret.finalize_tokens(LazyTokenStream::new(lazy_impl));
- Ok(ret)
+ // The only reason to call `collect_tokens_no_attrs` is if you want tokens, so use
+ // `ForceCollect::Yes`
+ self.collect_tokens_trailing_token(
+ AttrWrapper::empty(),
+ ForceCollect::Yes,
+ |this, _attrs| Ok((f(this)?, TrailingToken::None)),
+ )
}
/// `::{` or `::*`
@@ -1365,60 +1287,3 @@
}
}
}
-
-/// Converts a flattened iterator of tokens (including open and close delimiter tokens)
-/// into a `TokenStream`, creating a `TokenTree::Delimited` for each matching pair
-/// of open and close delims.
-fn make_token_stream(
- tokens: impl Iterator<Item = (Token, Spacing)>,
- append_unglued_token: Option<TreeAndSpacing>,
-) -> TokenStream {
- #[derive(Debug)]
- struct FrameData {
- open: Span,
- inner: Vec<(TokenTree, Spacing)>,
- }
- let mut stack = vec![FrameData { open: DUMMY_SP, inner: vec![] }];
- for (token, spacing) in tokens {
- match token {
- Token { kind: TokenKind::OpenDelim(_), span } => {
- stack.push(FrameData { open: span, inner: vec![] });
- }
- Token { kind: TokenKind::CloseDelim(delim), span } => {
- let frame_data = stack.pop().expect("Token stack was empty!");
- let dspan = DelimSpan::from_pair(frame_data.open, span);
- let stream = TokenStream::new(frame_data.inner);
- let delimited = TokenTree::Delimited(dspan, delim, stream);
- stack
- .last_mut()
- .unwrap_or_else(|| panic!("Bottom token frame is missing for tokens!"))
- .inner
- .push((delimited, Spacing::Alone));
- }
- token => {
- stack
- .last_mut()
- .expect("Bottom token frame is missing!")
- .inner
- .push((TokenTree::Token(token), spacing));
- }
- }
- }
- let mut final_buf = stack.pop().expect("Missing final buf!");
- final_buf.inner.extend(append_unglued_token);
- assert!(stack.is_empty(), "Stack should be empty: final_buf={:?} stack={:?}", final_buf, stack);
- TokenStream::new(final_buf.inner)
-}
-
-#[macro_export]
-macro_rules! maybe_collect_tokens {
- ($self:ident, $force_collect:expr, $attrs:expr, $f:expr) => {
- if matches!($force_collect, ForceCollect::Yes)
- || $crate::parser::attr::maybe_needs_tokens($attrs)
- {
- $self.collect_tokens_trailing_token($f)
- } else {
- Ok($f($self)?.0)
- }
- };
-}
diff --git a/compiler/rustc_parse/src/parser/nonterminal.rs b/compiler/rustc_parse/src/parser/nonterminal.rs
index 6e25209..a84ae51 100644
--- a/compiler/rustc_parse/src/parser/nonterminal.rs
+++ b/compiler/rustc_parse/src/parser/nonterminal.rs
@@ -108,7 +108,9 @@
}
},
NonterminalKind::Block => {
- token::NtBlock(self.collect_tokens(|this| this.parse_block())?)
+ // While a block *expression* may have attributes (e.g. `#[my_attr] { ... }`),
+ // the ':block' matcher does not support them
+ token::NtBlock(self.collect_tokens_no_attrs(|this| this.parse_block())?)
}
NonterminalKind::Stmt => match self.parse_stmt(ForceCollect::Yes)? {
Some(s) => token::NtStmt(s),
@@ -117,19 +119,41 @@
}
},
NonterminalKind::Pat2018 { .. } | NonterminalKind::Pat2021 { .. } => {
- token::NtPat(self.collect_tokens(|this| match kind {
- NonterminalKind::Pat2018 { .. } => this.parse_pat(None),
+ token::NtPat(self.collect_tokens_no_attrs(|this| match kind {
+ NonterminalKind::Pat2018 { .. } => this.parse_pat_no_top_alt(None),
NonterminalKind::Pat2021 { .. } => {
- this.parse_top_pat(GateOr::Yes, RecoverComma::No)
+ this.parse_pat_allow_top_alt(None, GateOr::Yes, RecoverComma::No)
}
_ => unreachable!(),
})?)
}
- NonterminalKind::Expr => token::NtExpr(self.collect_tokens(|this| this.parse_expr())?),
- NonterminalKind::Literal => {
- token::NtLiteral(self.collect_tokens(|this| this.parse_literal_maybe_minus())?)
+
+ // If there are attributes present, then `parse_expr` will end up collecting tokens,
+ // turning the outer `collect_tokens_no_attrs` into a no-op due to the already present
+ // tokens. If there are *not* attributes present, then the outer
+ // `collect_tokens_no_attrs` will ensure that we will end up collecting tokens for the
+ // expressions.
+ //
+ // This is less efficient than it could be, since the outer `collect_tokens_no_attrs`
+ // still needs to snapshot the `TokenCursor` before calling `parse_expr`, even when
+ // `parse_expr` will end up collecting tokens. Ideally, this would work more like
+ // `parse_item`, and take in a `ForceCollect` parameter. However, this would require
+ // adding a `ForceCollect` parameter in a bunch of places in expression parsing
+ // for little gain. If the perf impact from this turns out to be noticeable, we should
+ // revisit this apporach.
+ NonterminalKind::Expr => {
+ token::NtExpr(self.collect_tokens_no_attrs(|this| this.parse_expr())?)
}
- NonterminalKind::Ty => token::NtTy(self.collect_tokens(|this| this.parse_ty())?),
+ NonterminalKind::Literal => {
+ // The `:literal` matcher does not support attributes
+ token::NtLiteral(
+ self.collect_tokens_no_attrs(|this| this.parse_literal_maybe_minus())?,
+ )
+ }
+
+ NonterminalKind::Ty => {
+ token::NtTy(self.collect_tokens_no_attrs(|this| this.parse_ty())?)
+ }
// this could be handled like a token, since it is one
NonterminalKind::Ident => {
if let Some((ident, is_raw)) = get_macro_ident(&self.token) {
@@ -141,15 +165,15 @@
return Err(self.struct_span_err(self.token.span, msg));
}
}
- NonterminalKind::Path => {
- token::NtPath(self.collect_tokens(|this| this.parse_path(PathStyle::Type))?)
- }
+ NonterminalKind::Path => token::NtPath(
+ self.collect_tokens_no_attrs(|this| this.parse_path(PathStyle::Type))?,
+ ),
NonterminalKind::Meta => {
- token::NtMeta(P(self.collect_tokens(|this| this.parse_attr_item(false))?))
+ token::NtMeta(P(self.collect_tokens_no_attrs(|this| this.parse_attr_item(false))?))
}
NonterminalKind::TT => token::NtTT(self.parse_token_tree()),
NonterminalKind::Vis => token::NtVis(
- self.collect_tokens(|this| this.parse_visibility(FollowedByType::Yes))?,
+ self.collect_tokens_no_attrs(|this| this.parse_visibility(FollowedByType::Yes))?,
),
NonterminalKind::Lifetime => {
if self.check_lifetime() {
diff --git a/compiler/rustc_parse/src/parser/pat.rs b/compiler/rustc_parse/src/parser/pat.rs
index d888514..51c01f5 100644
--- a/compiler/rustc_parse/src/parser/pat.rs
+++ b/compiler/rustc_parse/src/parser/pat.rs
@@ -1,9 +1,9 @@
-use super::{Parser, PathStyle};
+use super::{ForceCollect, Parser, PathStyle, TrailingToken};
use crate::{maybe_recover_from_interpolated_ty_qpath, maybe_whole};
use rustc_ast::mut_visit::{noop_visit_pat, MutVisitor};
use rustc_ast::ptr::P;
use rustc_ast::token;
-use rustc_ast::{self as ast, AttrVec, Attribute, FieldPat, MacCall, Pat, PatKind, RangeEnd};
+use rustc_ast::{self as ast, AttrVec, Attribute, MacCall, Pat, PatField, PatKind, RangeEnd};
use rustc_ast::{BindingMode, Expr, ExprKind, Mutability, Path, QSelf, RangeSyntax};
use rustc_ast_pretty::pprust;
use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, PResult};
@@ -19,98 +19,112 @@
/// Whether or not an or-pattern should be gated when occurring in the current context.
#[derive(PartialEq, Clone, Copy)]
-pub(super) enum GateOr {
+pub enum GateOr {
Yes,
No,
}
/// Whether or not to recover a `,` when parsing or-patterns.
#[derive(PartialEq, Copy, Clone)]
-pub(super) enum RecoverComma {
+pub enum RecoverComma {
Yes,
No,
}
+/// The result of `eat_or_separator`. We want to distinguish which case we are in to avoid
+/// emitting duplicate diagnostics.
+#[derive(Debug, Clone, Copy)]
+enum EatOrResult {
+ /// We recovered from a trailing vert.
+ TrailingVert,
+ /// We ate an `|` (or `||` and recovered).
+ AteOr,
+ /// We did not eat anything (i.e. the current token is not `|` or `||`).
+ None,
+}
+
impl<'a> Parser<'a> {
/// Parses a pattern.
///
/// Corresponds to `pat<no_top_alt>` in RFC 2535 and does not admit or-patterns
/// at the top level. Used when parsing the parameters of lambda expressions,
/// functions, function pointers, and `pat` macro fragments.
- pub fn parse_pat(&mut self, expected: Expected) -> PResult<'a, P<Pat>> {
+ pub fn parse_pat_no_top_alt(&mut self, expected: Expected) -> PResult<'a, P<Pat>> {
self.parse_pat_with_range_pat(true, expected)
}
- /// Entry point to the main pattern parser.
+ /// Parses a pattern.
+ ///
/// Corresponds to `top_pat` in RFC 2535 and allows or-pattern at the top level.
- pub(super) fn parse_top_pat(
- &mut self,
- gate_or: GateOr,
- rc: RecoverComma,
- ) -> PResult<'a, P<Pat>> {
- // Allow a '|' before the pats (RFCs 1925, 2530, and 2535).
- let gated_leading_vert = self.eat_or_separator(None) && gate_or == GateOr::Yes;
- let leading_vert_span = self.prev_token.span;
-
- // Parse the possibly-or-pattern.
- let pat = self.parse_pat_with_or(None, gate_or, rc)?;
-
- // If we parsed a leading `|` which should be gated,
- // and no other gated or-pattern has been parsed thus far,
- // then we should really gate the leading `|`.
- // This complicated procedure is done purely for diagnostics UX.
- if gated_leading_vert && self.sess.gated_spans.is_ungated(sym::or_patterns) {
- self.sess.gated_spans.gate(sym::or_patterns, leading_vert_span);
- }
-
- Ok(pat)
- }
-
- /// Parse the pattern for a function or function pointer parameter.
- /// Special recovery is provided for or-patterns and leading `|`.
- pub(super) fn parse_fn_param_pat(&mut self) -> PResult<'a, P<Pat>> {
- self.recover_leading_vert(None, "not allowed in a parameter pattern");
- let pat = self.parse_pat_with_or(PARAM_EXPECTED, GateOr::No, RecoverComma::No)?;
-
- if let PatKind::Or(..) = &pat.kind {
- self.ban_illegal_fn_param_or_pat(&pat);
- }
-
- Ok(pat)
- }
-
- /// Ban `A | B` immediately in a parameter pattern and suggest wrapping in parens.
- fn ban_illegal_fn_param_or_pat(&self, pat: &Pat) {
- let msg = "wrap the pattern in parenthesis";
- let fix = format!("({})", pprust::pat_to_string(pat));
- self.struct_span_err(pat.span, "an or-pattern parameter must be wrapped in parenthesis")
- .span_suggestion(pat.span, msg, fix, Applicability::MachineApplicable)
- .emit();
- }
-
- /// Parses a pattern, that may be a or-pattern (e.g. `Foo | Bar` in `Some(Foo | Bar)`).
- /// Corresponds to `pat<allow_top_alt>` in RFC 2535.
- fn parse_pat_with_or(
+ /// Used for parsing patterns in all cases when `pat<no_top_alt>` is not used.
+ ///
+ /// Note that after the FCP in <https://github.com/rust-lang/rust/issues/81415>,
+ /// a leading vert is allowed in nested or-patterns, too. This allows us to
+ /// simplify the grammar somewhat.
+ pub fn parse_pat_allow_top_alt(
&mut self,
expected: Expected,
gate_or: GateOr,
rc: RecoverComma,
) -> PResult<'a, P<Pat>> {
+ self.parse_pat_allow_top_alt_inner(expected, gate_or, rc).map(|(pat, _)| pat)
+ }
+
+ /// Returns the pattern and a bool indicating whether we recovered from a trailing vert (true =
+ /// recovered).
+ fn parse_pat_allow_top_alt_inner(
+ &mut self,
+ expected: Expected,
+ gate_or: GateOr,
+ rc: RecoverComma,
+ ) -> PResult<'a, (P<Pat>, bool)> {
+ // Keep track of whether we recovered from a trailing vert so that we can avoid duplicated
+ // suggestions (which bothers rustfix).
+ //
+ // Allow a '|' before the pats (RFCs 1925, 2530, and 2535).
+ let (leading_vert_span, mut trailing_vert) = match self.eat_or_separator(None) {
+ EatOrResult::AteOr => (Some(self.prev_token.span), false),
+ EatOrResult::TrailingVert => (None, true),
+ EatOrResult::None => (None, false),
+ };
+
// Parse the first pattern (`p_0`).
- let first_pat = self.parse_pat(expected)?;
+ let first_pat = self.parse_pat_no_top_alt(expected)?;
self.maybe_recover_unexpected_comma(first_pat.span, rc, gate_or)?;
// If the next token is not a `|`,
// this is not an or-pattern and we should exit here.
if !self.check(&token::BinOp(token::Or)) && self.token != token::OrOr {
- return Ok(first_pat);
+ // If we parsed a leading `|` which should be gated,
+ // then we should really gate the leading `|`.
+ // This complicated procedure is done purely for diagnostics UX.
+ if let Some(leading_vert_span) = leading_vert_span {
+ if gate_or == GateOr::Yes && self.sess.gated_spans.is_ungated(sym::or_patterns) {
+ self.sess.gated_spans.gate(sym::or_patterns, leading_vert_span);
+ }
+
+ // If there was a leading vert, treat this as an or-pattern. This improves
+ // diagnostics.
+ let span = leading_vert_span.to(self.prev_token.span);
+ return Ok((self.mk_pat(span, PatKind::Or(vec![first_pat])), trailing_vert));
+ }
+
+ return Ok((first_pat, trailing_vert));
}
// Parse the patterns `p_1 | ... | p_n` where `n > 0`.
- let lo = first_pat.span;
+ let lo = leading_vert_span.unwrap_or(first_pat.span);
let mut pats = vec![first_pat];
- while self.eat_or_separator(Some(lo)) {
- let pat = self.parse_pat(expected).map_err(|mut err| {
+ loop {
+ match self.eat_or_separator(Some(lo)) {
+ EatOrResult::AteOr => {}
+ EatOrResult::None => break,
+ EatOrResult::TrailingVert => {
+ trailing_vert = true;
+ break;
+ }
+ }
+ let pat = self.parse_pat_no_top_alt(expected).map_err(|mut err| {
err.span_label(lo, WHILE_PARSING_OR_MSG);
err
})?;
@@ -124,24 +138,103 @@
self.sess.gated_spans.gate(sym::or_patterns, or_pattern_span);
}
- Ok(self.mk_pat(or_pattern_span, PatKind::Or(pats)))
+ Ok((self.mk_pat(or_pattern_span, PatKind::Or(pats)), trailing_vert))
+ }
+
+ /// Parse a pattern and (maybe) a `Colon` in positions where a pattern may be followed by a
+ /// type annotation (e.g. for `let` bindings or `fn` params).
+ ///
+ /// Generally, this corresponds to `pat_no_top_alt` followed by an optional `Colon`. It will
+ /// eat the `Colon` token if one is present.
+ ///
+ /// The return value represents the parsed pattern and `true` if a `Colon` was parsed (`false`
+ /// otherwise).
+ pub(super) fn parse_pat_before_ty(
+ &mut self,
+ expected: Expected,
+ gate_or: GateOr,
+ rc: RecoverComma,
+ syntax_loc: &str,
+ ) -> PResult<'a, (P<Pat>, bool)> {
+ // We use `parse_pat_allow_top_alt` regardless of whether we actually want top-level
+ // or-patterns so that we can detect when a user tries to use it. This allows us to print a
+ // better error message.
+ let (pat, trailing_vert) = self.parse_pat_allow_top_alt_inner(expected, gate_or, rc)?;
+ let colon = self.eat(&token::Colon);
+
+ if let PatKind::Or(pats) = &pat.kind {
+ let msg = format!("top-level or-patterns are not allowed in {}", syntax_loc);
+ let (help, fix) = if pats.len() == 1 {
+ // If all we have is a leading vert, then print a special message. This is the case
+ // if `parse_pat_allow_top_alt` returns an or-pattern with one variant.
+ let msg = "remove the `|`";
+ let fix = pprust::pat_to_string(&pat);
+ (msg, fix)
+ } else {
+ let msg = "wrap the pattern in parentheses";
+ let fix = format!("({})", pprust::pat_to_string(&pat));
+ (msg, fix)
+ };
+
+ if trailing_vert {
+ // We already emitted an error and suggestion to remove the trailing vert. Don't
+ // emit again.
+ self.sess.span_diagnostic.delay_span_bug(pat.span, &msg);
+ } else {
+ self.struct_span_err(pat.span, &msg)
+ .span_suggestion(pat.span, help, fix, Applicability::MachineApplicable)
+ .emit();
+ }
+ }
+
+ Ok((pat, colon))
+ }
+
+ /// Parse the pattern for a function or function pointer parameter, followed by a colon.
+ ///
+ /// The return value represents the parsed pattern and `true` if a `Colon` was parsed (`false`
+ /// otherwise).
+ pub(super) fn parse_fn_param_pat_colon(&mut self) -> PResult<'a, (P<Pat>, bool)> {
+ // In order to get good UX, we first recover in the case of a leading vert for an illegal
+ // top-level or-pat. Normally, this means recovering both `|` and `||`, but in this case,
+ // a leading `||` probably doesn't indicate an or-pattern attempt, so we handle that
+ // separately.
+ if let token::OrOr = self.token.kind {
+ let span = self.token.span;
+ let mut err = self.struct_span_err(span, "unexpected `||` before function parameter");
+ err.span_suggestion(
+ span,
+ "remove the `||`",
+ String::new(),
+ Applicability::MachineApplicable,
+ );
+ err.note("alternatives in or-patterns are separated with `|`, not `||`");
+ err.emit();
+ self.bump();
+ }
+
+ self.parse_pat_before_ty(
+ PARAM_EXPECTED,
+ GateOr::No,
+ RecoverComma::No,
+ "function parameters",
+ )
}
/// Eat the or-pattern `|` separator.
/// If instead a `||` token is encountered, recover and pretend we parsed `|`.
- fn eat_or_separator(&mut self, lo: Option<Span>) -> bool {
+ fn eat_or_separator(&mut self, lo: Option<Span>) -> EatOrResult {
if self.recover_trailing_vert(lo) {
- return false;
- }
-
- match self.token.kind {
- token::OrOr => {
- // Found `||`; Recover and pretend we parsed `|`.
- self.ban_unexpected_or_or(lo);
- self.bump();
- true
- }
- _ => self.eat(&token::BinOp(token::Or)),
+ EatOrResult::TrailingVert
+ } else if matches!(self.token.kind, token::OrOr) {
+ // Found `||`; Recover and pretend we parsed `|`.
+ self.ban_unexpected_or_or(lo);
+ self.bump();
+ EatOrResult::AteOr
+ } else if self.eat(&token::BinOp(token::Or)) {
+ EatOrResult::AteOr
+ } else {
+ EatOrResult::None
}
}
@@ -157,14 +250,14 @@
matches!(
&token.uninterpolate().kind,
token::FatArrow // e.g. `a | => 0,`.
- | token::Ident(kw::If, false) // e.g. `a | if expr`.
- | token::Eq // e.g. `let a | = 0`.
- | token::Semi // e.g. `let a |;`.
- | token::Colon // e.g. `let a | :`.
- | token::Comma // e.g. `let (a |,)`.
- | token::CloseDelim(token::Bracket) // e.g. `let [a | ]`.
- | token::CloseDelim(token::Paren) // e.g. `let (a | )`.
- | token::CloseDelim(token::Brace) // e.g. `let A { f: a | }`.
+ | token::Ident(kw::If, false) // e.g. `a | if expr`.
+ | token::Eq // e.g. `let a | = 0`.
+ | token::Semi // e.g. `let a |;`.
+ | token::Colon // e.g. `let a | :`.
+ | token::Comma // e.g. `let (a |,)`.
+ | token::CloseDelim(token::Bracket) // e.g. `let [a | ]`.
+ | token::CloseDelim(token::Paren) // e.g. `let (a | )`.
+ | token::CloseDelim(token::Brace) // e.g. `let A { f: a | }`.
)
});
match (is_end_ahead, &self.token.kind) {
@@ -179,7 +272,7 @@
/// We have parsed `||` instead of `|`. Error and suggest `|` instead.
fn ban_unexpected_or_or(&mut self, lo: Option<Span>) {
- let mut err = self.struct_span_err(self.token.span, "unexpected token `||` after pattern");
+ let mut err = self.struct_span_err(self.token.span, "unexpected token `||` in pattern");
err.span_suggestion(
self.token.span,
"use a single `|` to separate multiple alternative patterns",
@@ -244,7 +337,7 @@
/// sequence of patterns until `)` is reached.
fn skip_pat_list(&mut self) -> PResult<'a, ()> {
while !self.check(&token::CloseDelim(token::Paren)) {
- self.parse_pat(None)?;
+ self.parse_pat_no_top_alt(None)?;
if !self.eat(&token::Comma) {
return Ok(());
}
@@ -252,22 +345,6 @@
Ok(())
}
- /// Recursive possibly-or-pattern parser with recovery for an erroneous leading `|`.
- /// See `parse_pat_with_or` for details on parsing or-patterns.
- fn parse_pat_with_or_inner(&mut self) -> PResult<'a, P<Pat>> {
- self.recover_leading_vert(None, "only allowed in a top-level pattern");
- self.parse_pat_with_or(None, GateOr::Yes, RecoverComma::No)
- }
-
- /// Recover if `|` or `||` is here.
- /// The user is thinking that a leading `|` is allowed in this position.
- fn recover_leading_vert(&mut self, lo: Option<Span>, ctx: &str) {
- if let token::BinOp(token::Or) | token::OrOr = self.token.kind {
- self.ban_illegal_vert(lo, "leading", ctx);
- self.bump();
- }
- }
-
/// A `|` or possibly `||` token shouldn't be here. Ban it.
fn ban_illegal_vert(&mut self, lo: Option<Span>, pos: &str, ctx: &str) {
let span = self.token.span;
@@ -305,8 +382,9 @@
self.parse_pat_tuple_or_parens()?
} else if self.check(&token::OpenDelim(token::Bracket)) {
// Parse `[pat, pat,...]` as a slice pattern.
- let (pats, _) =
- self.parse_delim_comma_seq(token::Bracket, |p| p.parse_pat_with_or_inner())?;
+ let (pats, _) = self.parse_delim_comma_seq(token::Bracket, |p| {
+ p.parse_pat_allow_top_alt(None, GateOr::Yes, RecoverComma::No)
+ })?;
PatKind::Slice(pats)
} else if self.check(&token::DotDot) && !self.is_pat_range_end_start(1) {
// A rest pattern `..`.
@@ -429,7 +507,7 @@
// At this point we attempt to parse `@ $pat_rhs` and emit an error.
self.bump(); // `@`
- let mut rhs = self.parse_pat(None)?;
+ let mut rhs = self.parse_pat_no_top_alt(None)?;
let sp = lhs.span.to(rhs.span);
if let PatKind::Ident(_, _, ref mut sub @ None) = rhs.kind {
@@ -518,8 +596,9 @@
/// Parse a tuple or parenthesis pattern.
fn parse_pat_tuple_or_parens(&mut self) -> PResult<'a, PatKind> {
- let (fields, trailing_comma) =
- self.parse_paren_comma_seq(|p| p.parse_pat_with_or_inner())?;
+ let (fields, trailing_comma) = self.parse_paren_comma_seq(|p| {
+ p.parse_pat_allow_top_alt(None, GateOr::Yes, RecoverComma::No)
+ })?;
// Here, `(pat,)` is a tuple pattern.
// For backward compatibility, `(..)` is a tuple pattern as well.
@@ -548,7 +627,7 @@
}
// Parse the pattern we hope to be an identifier.
- let mut pat = self.parse_pat(Some("identifier"))?;
+ let mut pat = self.parse_pat_no_top_alt(Some("identifier"))?;
// If we don't have `mut $ident (@ pat)?`, error.
if let PatKind::Ident(BindingMode::ByValue(m @ Mutability::Not), ..) = &mut pat.kind {
@@ -793,7 +872,7 @@
fn parse_pat_ident(&mut self, binding_mode: BindingMode) -> PResult<'a, PatKind> {
let ident = self.parse_ident()?;
let sub = if self.eat(&token::At) {
- Some(self.parse_pat(Some("binding pattern"))?)
+ Some(self.parse_pat_no_top_alt(Some("binding pattern"))?)
} else {
None
};
@@ -832,7 +911,9 @@
if qself.is_some() {
return self.error_qpath_before_pat(&path, "(");
}
- let (fields, _) = self.parse_paren_comma_seq(|p| p.parse_pat_with_or_inner())?;
+ let (fields, _) = self.parse_paren_comma_seq(|p| {
+ p.parse_pat_allow_top_alt(None, GateOr::Yes, RecoverComma::No)
+ })?;
Ok(PatKind::TupleStruct(path, fields))
}
@@ -847,7 +928,7 @@
}
/// Parses the fields of a struct-like pattern.
- fn parse_pat_fields(&mut self) -> PResult<'a, (Vec<FieldPat>, bool)> {
+ fn parse_pat_fields(&mut self) -> PResult<'a, (Vec<PatField>, bool)> {
let mut fields = Vec::new();
let mut etc = false;
let mut ate_comma = true;
@@ -938,16 +1019,24 @@
}
}
- fields.push(match self.parse_pat_field(lo, attrs) {
- Ok(field) => field,
- Err(err) => {
- if let Some(mut delayed_err) = delayed_err {
- delayed_err.emit();
- }
- return Err(err);
- }
- });
- ate_comma = self.eat(&token::Comma);
+ let field =
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let field = match this.parse_pat_field(lo, attrs) {
+ Ok(field) => Ok(field),
+ Err(err) => {
+ if let Some(mut delayed_err) = delayed_err.take() {
+ delayed_err.emit();
+ }
+ return Err(err);
+ }
+ }?;
+ ate_comma = this.eat(&token::Comma);
+ // We just ate a comma, so there's no need to use
+ // `TrailingToken::Comma`
+ Ok((field, TrailingToken::None))
+ })?;
+
+ fields.push(field)
}
if let Some(mut err) = delayed_err {
@@ -983,14 +1072,14 @@
.emit();
}
- fn parse_pat_field(&mut self, lo: Span, attrs: Vec<Attribute>) -> PResult<'a, FieldPat> {
+ fn parse_pat_field(&mut self, lo: Span, attrs: Vec<Attribute>) -> PResult<'a, PatField> {
// Check if a colon exists one ahead. This means we're parsing a fieldname.
let hi;
let (subpat, fieldname, is_shorthand) = if self.look_ahead(1, |t| t == &token::Colon) {
// Parsing a pattern of the form `fieldname: pat`.
let fieldname = self.parse_field_name()?;
self.bump();
- let pat = self.parse_pat_with_or_inner()?;
+ let pat = self.parse_pat_allow_top_alt(None, GateOr::Yes, RecoverComma::No)?;
hi = pat.span;
(pat, fieldname, false)
} else {
@@ -999,7 +1088,7 @@
let boxed_span = self.token.span;
let is_ref = self.eat_keyword(kw::Ref);
let is_mut = self.eat_keyword(kw::Mut);
- let fieldname = self.parse_ident()?;
+ let fieldname = self.parse_field_name()?;
hi = self.prev_token.span;
let bind_type = match (is_ref, is_mut) {
@@ -1015,7 +1104,7 @@
(subpat, fieldname, true)
};
- Ok(FieldPat {
+ Ok(PatField {
ident: fieldname,
pat: subpat,
is_shorthand,
diff --git a/compiler/rustc_parse/src/parser/path.rs b/compiler/rustc_parse/src/parser/path.rs
index 6b7059e..9cc600d 100644
--- a/compiler/rustc_parse/src/parser/path.rs
+++ b/compiler/rustc_parse/src/parser/path.rs
@@ -545,7 +545,7 @@
/// Parse a generic argument in a path segment.
/// This does not include constraints, e.g., `Item = u8`, which is handled in `parse_angle_arg`.
- fn parse_generic_arg(&mut self) -> PResult<'a, Option<GenericArg>> {
+ pub(super) fn parse_generic_arg(&mut self) -> PResult<'a, Option<GenericArg>> {
let start = self.token.span;
let arg = if self.check_lifetime() && self.look_ahead(1, |t| !t.is_like_plus()) {
// Parse lifetime argument.
diff --git a/compiler/rustc_parse/src/parser/stmt.rs b/compiler/rustc_parse/src/parser/stmt.rs
index 8373f6a..92e67e7 100644
--- a/compiler/rustc_parse/src/parser/stmt.rs
+++ b/compiler/rustc_parse/src/parser/stmt.rs
@@ -3,16 +3,18 @@
use super::expr::LhsExpr;
use super::pat::{GateOr, RecoverComma};
use super::path::PathStyle;
-use super::{BlockMode, ForceCollect, Parser, Restrictions, SemiColonMode, TrailingToken};
-use crate::{maybe_collect_tokens, maybe_whole};
+use super::TrailingToken;
+use super::{AttrWrapper, BlockMode, ForceCollect, Parser, Restrictions, SemiColonMode};
+use crate::maybe_whole;
use rustc_ast as ast;
-use rustc_ast::attr::HasAttrs;
use rustc_ast::ptr::P;
use rustc_ast::token::{self, TokenKind};
use rustc_ast::util::classify;
+use rustc_ast::AstLike;
use rustc_ast::{AttrStyle, AttrVec, Attribute, MacCall, MacCallStmt, MacStmtStyle};
-use rustc_ast::{Block, BlockCheckMode, Expr, ExprKind, Local, Stmt, StmtKind, DUMMY_NODE_ID};
+use rustc_ast::{Block, BlockCheckMode, Expr, ExprKind, Local, Stmt};
+use rustc_ast::{StmtKind, DUMMY_NODE_ID};
use rustc_errors::{Applicability, PResult};
use rustc_span::source_map::{BytePos, Span};
use rustc_span::symbol::{kw, sym};
@@ -33,35 +35,52 @@
/// If `force_capture` is true, forces collection of tokens regardless of whether
/// or not we have attributes
- fn parse_stmt_without_recovery(
+ crate fn parse_stmt_without_recovery(
&mut self,
capture_semi: bool,
force_collect: ForceCollect,
) -> PResult<'a, Option<Stmt>> {
- let mut attrs = self.parse_outer_attributes()?;
+ let attrs = self.parse_outer_attributes()?;
let lo = self.token.span;
- maybe_whole!(self, NtStmt, |stmt| {
- let mut stmt = stmt;
- stmt.visit_attrs(|stmt_attrs| {
- mem::swap(stmt_attrs, &mut attrs);
- stmt_attrs.extend(attrs);
- });
- Some(stmt)
- });
+ // Don't use `maybe_whole` so that we have precise control
+ // over when we bump the parser
+ if let token::Interpolated(nt) = &self.token.kind {
+ if let token::NtStmt(stmt) = &**nt {
+ let mut stmt = stmt.clone();
+ return self.collect_tokens_trailing_token(
+ attrs,
+ force_collect,
+ |this, mut attrs| {
+ stmt.visit_attrs(|stmt_attrs| {
+ mem::swap(stmt_attrs, &mut attrs);
+ stmt_attrs.extend(attrs);
+ });
+ // Make sure we capture the token::Interpolated
+ this.bump();
+ Ok((Some(stmt), TrailingToken::None))
+ },
+ );
+ }
+ }
Ok(Some(if self.token.is_keyword(kw::Let) {
- self.parse_local_mk(lo, attrs.into(), capture_semi, force_collect)?
+ self.parse_local_mk(lo, attrs, capture_semi, force_collect)?
} else if self.is_kw_followed_by_ident(kw::Mut) {
- self.recover_stmt_local(lo, attrs.into(), "missing keyword", "let mut")?
+ self.recover_stmt_local(
+ lo,
+ attrs.take_for_recovery().into(),
+ "missing keyword",
+ "let mut",
+ )?
} else if self.is_kw_followed_by_ident(kw::Auto) {
self.bump(); // `auto`
let msg = "write `let` instead of `auto` to introduce a new variable";
- self.recover_stmt_local(lo, attrs.into(), msg, "let")?
+ self.recover_stmt_local(lo, attrs.take_for_recovery().into(), msg, "let")?
} else if self.is_kw_followed_by_ident(sym::var) {
self.bump(); // `var`
let msg = "write `let` instead of `var` to introduce a new variable";
- self.recover_stmt_local(lo, attrs.into(), msg, "let")?
+ self.recover_stmt_local(lo, attrs.take_for_recovery().into(), msg, "let")?
} else if self.check_path() && !self.token.is_qpath_start() && !self.is_path_start_item() {
// We have avoided contextual keywords like `union`, items with `crate` visibility,
// or `auto trait` items. We aim to parse an arbitrary path `a::b` but not something
@@ -75,14 +94,14 @@
self.mk_stmt(lo.to(item.span), StmtKind::Item(P(item)))
} else if self.eat(&token::Semi) {
// Do not attempt to parse an expression if we're done here.
- self.error_outer_attrs(&attrs);
+ self.error_outer_attrs(&attrs.take_for_recovery());
self.mk_stmt(lo, StmtKind::Empty)
} else if self.token != token::CloseDelim(token::Brace) {
// Remainder are line-expr stmts.
- let e = self.parse_expr_res(Restrictions::STMT_EXPR, Some(attrs.into()))?;
+ let e = self.parse_expr_res(Restrictions::STMT_EXPR, Some(attrs))?;
self.mk_stmt(lo.to(e.span), StmtKind::Expr(e))
} else {
- self.error_outer_attrs(&attrs);
+ self.error_outer_attrs(&attrs.take_for_recovery());
return Ok(None);
}))
}
@@ -90,10 +109,10 @@
fn parse_stmt_path_start(
&mut self,
lo: Span,
- attrs: Vec<Attribute>,
+ attrs: AttrWrapper,
force_collect: ForceCollect,
) -> PResult<'a, Stmt> {
- maybe_collect_tokens!(self, force_collect, &attrs, |this: &mut Parser<'a>| {
+ self.collect_tokens_trailing_token(attrs, force_collect, |this, attrs| {
let path = this.parse_path(PathStyle::Expr)?;
if this.eat(&token::Not) {
@@ -113,7 +132,7 @@
};
let expr = this.with_res(Restrictions::STMT_EXPR, |this| {
- let expr = this.parse_dot_or_call_expr_with(expr, lo, attrs.into())?;
+ let expr = this.parse_dot_or_call_expr_with(expr, lo, attrs)?;
this.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(expr))
})?;
Ok((
@@ -142,7 +161,7 @@
// Since none of the above applied, this is an expression statement macro.
let e = self.mk_expr(lo.to(hi), ExprKind::MacCall(mac), AttrVec::new());
let e = self.maybe_recover_from_bad_qpath(e, true)?;
- let e = self.parse_dot_or_call_expr_with(e, lo, attrs)?;
+ let e = self.parse_dot_or_call_expr_with(e, lo, attrs.into())?;
let e = self.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(e))?;
StmtKind::Expr(e)
};
@@ -178,11 +197,11 @@
fn parse_local_mk(
&mut self,
lo: Span,
- attrs: AttrVec,
+ attrs: AttrWrapper,
capture_semi: bool,
force_collect: ForceCollect,
) -> PResult<'a, Stmt> {
- maybe_collect_tokens!(self, force_collect, &attrs, |this: &mut Parser<'a>| {
+ self.collect_tokens_trailing_token(attrs, force_collect, |this, attrs| {
this.expect_keyword(kw::Let)?;
let local = this.parse_local(attrs.into())?;
let trailing = if capture_semi && this.token.kind == token::Semi {
@@ -195,16 +214,17 @@
}
fn recover_local_after_let(&mut self, lo: Span, attrs: AttrVec) -> PResult<'a, Stmt> {
- let local = self.parse_local(attrs.into())?;
+ let local = self.parse_local(attrs)?;
Ok(self.mk_stmt(lo.to(self.prev_token.span), StmtKind::Local(local)))
}
/// Parses a local variable declaration.
fn parse_local(&mut self, attrs: AttrVec) -> PResult<'a, P<Local>> {
let lo = self.prev_token.span;
- let pat = self.parse_top_pat(GateOr::Yes, RecoverComma::Yes)?;
+ let (pat, colon) =
+ self.parse_pat_before_ty(None, GateOr::Yes, RecoverComma::Yes, "`let` bindings")?;
- let (err, ty) = if self.eat(&token::Colon) {
+ let (err, ty) = if colon {
// Save the state of the parser before parsing type normally, in case there is a `:`
// instead of an `=` typo.
let parser_snapshot_before_type = self.clone();
@@ -271,7 +291,7 @@
Ok(P(ast::Local { ty, pat, init, id: DUMMY_NODE_ID, span: lo.to(hi), attrs, tokens: None }))
}
- /// Parses the RHS of a local variable declaration (e.g., '= 14;').
+ /// Parses the RHS of a local variable declaration (e.g., `= 14;`).
fn parse_initializer(&mut self, eq_optional: bool) -> PResult<'a, Option<P<Expr>>> {
let eq_consumed = match self.token.kind {
token::BinOpEq(..) => {
@@ -286,6 +306,7 @@
"=".to_string(),
Applicability::MaybeIncorrect,
)
+ .help("if you meant to overwrite, remove the `let` binding")
.emit();
self.bump();
true
diff --git a/compiler/rustc_parse/src/parser/ty.rs b/compiler/rustc_parse/src/parser/ty.rs
index 9553f5d..0f7b8eb 100644
--- a/compiler/rustc_parse/src/parser/ty.rs
+++ b/compiler/rustc_parse/src/parser/ty.rs
@@ -209,7 +209,7 @@
} else if self.eat_keyword(kw::Underscore) {
// A type to be inferred `_`
TyKind::Infer
- } else if self.check_fn_front_matter() {
+ } else if self.check_fn_front_matter(false) {
// Function pointer type
self.parse_ty_bare_fn(lo, Vec::new(), recover_return_sign)?
} else if self.check_keyword(kw::For) {
@@ -217,7 +217,7 @@
// `for<'lt> [unsafe] [extern "ABI"] fn (&'lt S) -> T`
// `for<'lt> Trait1<'lt> + Trait2 + 'a`
let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
- if self.check_fn_front_matter() {
+ if self.check_fn_front_matter(false) {
self.parse_ty_bare_fn(lo, lifetime_defs, recover_return_sign)?
} else {
let path = self.parse_path(PathStyle::Type)?;
@@ -360,12 +360,20 @@
}
Err(err) => return Err(err),
};
+
let ty = if self.eat(&token::Semi) {
- TyKind::Array(elt_ty, self.parse_anon_const_expr()?)
+ let mut length = self.parse_anon_const_expr()?;
+ if let Err(e) = self.expect(&token::CloseDelim(token::Bracket)) {
+ // Try to recover from `X<Y, ...>` when `X::<Y, ...>` works
+ self.check_mistyped_turbofish_with_multiple_type_params(e, &mut length.value)?;
+ self.expect(&token::CloseDelim(token::Bracket))?;
+ }
+ TyKind::Array(elt_ty, length)
} else {
+ self.expect(&token::CloseDelim(token::Bracket))?;
TyKind::Slice(elt_ty)
};
- self.expect(&token::CloseDelim(token::Bracket))?;
+
Ok(ty)
}
diff --git a/compiler/rustc_parse_format/src/lib.rs b/compiler/rustc_parse_format/src/lib.rs
index f150f7a..92d9746 100644
--- a/compiler/rustc_parse_format/src/lib.rs
+++ b/compiler/rustc_parse_format/src/lib.rs
@@ -730,7 +730,7 @@
str_style: Option<usize>,
) -> (Vec<usize>, bool) {
let snippet = match snippet {
- Some(ref s) if s.starts_with('"') || s.starts_with("r#") => s,
+ Some(ref s) if s.starts_with('"') || s.starts_with("r\"") || s.starts_with("r#") => s,
_ => return (vec![], false),
};
diff --git a/compiler/rustc_passes/Cargo.toml b/compiler/rustc_passes/Cargo.toml
index c87799f..4069fb2 100644
--- a/compiler/rustc_passes/Cargo.toml
+++ b/compiler/rustc_passes/Cargo.toml
@@ -19,3 +19,4 @@
rustc_span = { path = "../rustc_span" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
rustc_lexer = { path = "../rustc_lexer" }
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs
index 0e3a722..d91d0e1 100644
--- a/compiler/rustc_passes/src/check_attr.rs
+++ b/compiler/rustc_passes/src/check_attr.rs
@@ -8,7 +8,7 @@
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
-use rustc_ast::{Attribute, LitKind, NestedMetaItem};
+use rustc_ast::{Attribute, Lit, LitKind, NestedMetaItem};
use rustc_errors::{pluralize, struct_span_err};
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
@@ -17,7 +17,9 @@
self, FnSig, ForeignItem, ForeignItemKind, HirId, Item, ItemKind, TraitItem, CRATE_HIR_ID,
};
use rustc_hir::{MethodKind, Target};
-use rustc_session::lint::builtin::{CONFLICTING_REPR_HINTS, UNUSED_ATTRIBUTES};
+use rustc_session::lint::builtin::{
+ CONFLICTING_REPR_HINTS, INVALID_DOC_ATTRIBUTES, UNUSED_ATTRIBUTES,
+};
use rustc_session::parse::feature_err;
use rustc_span::symbol::{sym, Symbol};
use rustc_span::{Span, DUMMY_SP};
@@ -29,7 +31,7 @@
match impl_item.kind {
hir::ImplItemKind::Const(..) => Target::AssocConst,
hir::ImplItemKind::Fn(..) => {
- let parent_hir_id = tcx.hir().get_parent_item(impl_item.hir_id);
+ let parent_hir_id = tcx.hir().get_parent_item(impl_item.hir_id());
let containing_item = tcx.hir().expect_item(parent_hir_id);
let containing_impl_is_for_trait = match &containing_item.kind {
hir::ItemKind::Impl(impl_) => impl_.of_trait.is_some(),
@@ -60,12 +62,12 @@
fn check_attributes(
&self,
hir_id: HirId,
- attrs: &'hir [Attribute],
span: &Span,
target: Target,
item: Option<ItemLike<'_>>,
) {
let mut is_valid = true;
+ let attrs = self.tcx.hir().attrs(hir_id);
for attr in attrs {
is_valid &= if self.tcx.sess.check_name(attr, sym::inline) {
self.check_inline(hir_id, attr, span, target)
@@ -85,12 +87,18 @@
self.check_export_name(hir_id, &attr, span, target)
} else if self.tcx.sess.check_name(attr, sym::rustc_args_required_const) {
self.check_rustc_args_required_const(&attr, span, target, item)
+ } else if self.tcx.sess.check_name(attr, sym::rustc_layout_scalar_valid_range_start) {
+ self.check_rustc_layout_scalar_valid_range(&attr, span, target)
+ } else if self.tcx.sess.check_name(attr, sym::rustc_layout_scalar_valid_range_end) {
+ self.check_rustc_layout_scalar_valid_range(&attr, span, target)
} else if self.tcx.sess.check_name(attr, sym::allow_internal_unstable) {
self.check_allow_internal_unstable(hir_id, &attr, span, target, &attrs)
} else if self.tcx.sess.check_name(attr, sym::rustc_allow_const_fn_unstable) {
self.check_rustc_allow_const_fn_unstable(hir_id, &attr, span, target)
} else if self.tcx.sess.check_name(attr, sym::naked) {
self.check_naked(hir_id, attr, span, target)
+ } else if self.tcx.sess.check_name(attr, sym::rustc_legacy_const_generics) {
+ self.check_rustc_legacy_const_generics(&attr, span, target, item)
} else {
// lint-only checks
if self.tcx.sess.check_name(attr, sym::cold) {
@@ -386,33 +394,50 @@
.emit();
}
- fn check_doc_alias(&self, meta: &NestedMetaItem, hir_id: HirId, target: Target) -> bool {
- let doc_alias = meta.value_str().map(|s| s.to_string()).unwrap_or_else(String::new);
+ fn check_doc_alias_value(
+ &self,
+ meta: &NestedMetaItem,
+ doc_alias: &str,
+ hir_id: HirId,
+ target: Target,
+ is_list: bool,
+ ) -> bool {
+ let tcx = self.tcx;
+ let err_fn = move |span: Span, msg: &str| {
+ tcx.sess.span_err(
+ span,
+ &format!(
+ "`#[doc(alias{})]` {}",
+ if is_list { "(\"...\")" } else { " = \"...\"" },
+ msg,
+ ),
+ );
+ false
+ };
if doc_alias.is_empty() {
- self.doc_attr_str_error(meta, "alias");
- return false;
+ return err_fn(
+ meta.name_value_literal_span().unwrap_or_else(|| meta.span()),
+ "attribute cannot have empty value",
+ );
}
if let Some(c) =
doc_alias.chars().find(|&c| c == '"' || c == '\'' || (c.is_whitespace() && c != ' '))
{
- self.tcx
- .sess
- .struct_span_err(
- meta.name_value_literal_span().unwrap_or_else(|| meta.span()),
- &format!("{:?} character isn't allowed in `#[doc(alias = \"...\")]`", c),
- )
- .emit();
+ self.tcx.sess.span_err(
+ meta.name_value_literal_span().unwrap_or_else(|| meta.span()),
+ &format!(
+ "{:?} character isn't allowed in `#[doc(alias{})]`",
+ c,
+ if is_list { "(\"...\")" } else { " = \"...\"" },
+ ),
+ );
return false;
}
if doc_alias.starts_with(' ') || doc_alias.ends_with(' ') {
- self.tcx
- .sess
- .struct_span_err(
- meta.name_value_literal_span().unwrap_or_else(|| meta.span()),
- "`#[doc(alias = \"...\")]` cannot start or end with ' '",
- )
- .emit();
- return false;
+ return err_fn(
+ meta.name_value_literal_span().unwrap_or_else(|| meta.span()),
+ "cannot start or end with ' '",
+ );
}
if let Some(err) = match target {
Target::Impl => Some("implementation block"),
@@ -438,27 +463,63 @@
}
_ => None,
} {
- self.tcx
- .sess
- .struct_span_err(
- meta.span(),
- &format!("`#[doc(alias = \"...\")]` isn't allowed on {}", err),
- )
- .emit();
- return false;
+ return err_fn(meta.span(), &format!("isn't allowed on {}", err));
}
let item_name = self.tcx.hir().name(hir_id);
if &*item_name.as_str() == doc_alias {
+ return err_fn(meta.span(), "is the same as the item's name");
+ }
+ true
+ }
+
+ fn check_doc_alias(&self, meta: &NestedMetaItem, hir_id: HirId, target: Target) -> bool {
+ if let Some(values) = meta.meta_item_list() {
+ let mut errors = 0;
+ for v in values {
+ match v.literal() {
+ Some(l) => match l.kind {
+ LitKind::Str(s, _) => {
+ if !self.check_doc_alias_value(v, &s.as_str(), hir_id, target, true) {
+ errors += 1;
+ }
+ }
+ _ => {
+ self.tcx
+ .sess
+ .struct_span_err(
+ v.span(),
+ "`#[doc(alias(\"a\"))]` expects string literals",
+ )
+ .emit();
+ errors += 1;
+ }
+ },
+ None => {
+ self.tcx
+ .sess
+ .struct_span_err(
+ v.span(),
+ "`#[doc(alias(\"a\"))]` expects string literals",
+ )
+ .emit();
+ errors += 1;
+ }
+ }
+ }
+ errors == 0
+ } else if let Some(doc_alias) = meta.value_str().map(|s| s.to_string()) {
+ self.check_doc_alias_value(meta, &doc_alias, hir_id, target, false)
+ } else {
self.tcx
.sess
.struct_span_err(
meta.span(),
- &format!("`#[doc(alias = \"...\")]` is the same as the item's name"),
+ "doc alias attribute expects a string `#[doc(alias = \"a\")]` or a list of \
+ strings `#[doc(alias(\"a\", \"b\"))]`",
)
.emit();
- return false;
+ false
}
- true
}
fn check_doc_keyword(&self, meta: &NestedMetaItem, hir_id: HirId) -> bool {
@@ -516,7 +577,7 @@
.struct_span_err(
meta.span(),
&format!(
- "`#![doc({} = \"...\")]` isn't allowed as a crate level attribute",
+ "`#![doc({} = \"...\")]` isn't allowed as a crate-level attribute",
attr_name,
),
)
@@ -527,26 +588,97 @@
}
fn check_doc_attrs(&self, attr: &Attribute, hir_id: HirId, target: Target) -> bool {
- if let Some(mi) = attr.meta() {
- if let Some(list) = mi.meta_item_list() {
- for meta in list {
- if meta.has_name(sym::alias) {
- if !self.check_attr_crate_level(meta, hir_id, "alias")
- || !self.check_doc_alias(meta, hir_id, target)
+ let mut is_valid = true;
+
+ if let Some(list) = attr.meta().and_then(|mi| mi.meta_item_list().map(|l| l.to_vec())) {
+ for meta in list {
+ if let Some(i_meta) = meta.meta_item() {
+ match i_meta.name_or_empty() {
+ sym::alias
+ if !self.check_attr_crate_level(&meta, hir_id, "alias")
+ || !self.check_doc_alias(&meta, hir_id, target) =>
{
- return false;
+ is_valid = false
}
- } else if meta.has_name(sym::keyword) {
- if !self.check_attr_crate_level(meta, hir_id, "keyword")
- || !self.check_doc_keyword(meta, hir_id)
+
+ sym::keyword
+ if !self.check_attr_crate_level(&meta, hir_id, "keyword")
+ || !self.check_doc_keyword(&meta, hir_id) =>
{
- return false;
+ is_valid = false
+ }
+
+ sym::test if CRATE_HIR_ID != hir_id => {
+ self.tcx.struct_span_lint_hir(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ meta.span(),
+ |lint| {
+ lint.build(
+ "`#![doc(test(...)]` is only allowed \
+ as a crate-level attribute",
+ )
+ .emit();
+ },
+ );
+ is_valid = false;
+ }
+
+ // no_default_passes: deprecated
+ // passes: deprecated
+ // plugins: removed, but rustdoc warns about it itself
+ sym::alias
+ | sym::cfg
+ | sym::hidden
+ | sym::html_favicon_url
+ | sym::html_logo_url
+ | sym::html_no_source
+ | sym::html_playground_url
+ | sym::html_root_url
+ | sym::include
+ | sym::inline
+ | sym::issue_tracker_base_url
+ | sym::keyword
+ | sym::masked
+ | sym::no_default_passes
+ | sym::no_inline
+ | sym::passes
+ | sym::plugins
+ | sym::primitive
+ | sym::spotlight
+ | sym::test => {}
+
+ _ => {
+ self.tcx.struct_span_lint_hir(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ i_meta.span,
+ |lint| {
+ let msg = format!(
+ "unknown `doc` attribute `{}`",
+ rustc_ast_pretty::pprust::path_to_string(&i_meta.path),
+ );
+ lint.build(&msg).emit();
+ },
+ );
+ is_valid = false;
}
}
+ } else {
+ self.tcx.struct_span_lint_hir(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ meta.span(),
+ |lint| {
+ lint.build(&format!("invalid `doc` attribute")).emit();
+ },
+ );
+ is_valid = false;
}
}
}
- true
+
+ is_valid
}
/// Checks if `#[cold]` is applied to a non-function. Returns `true` if valid.
@@ -750,6 +882,136 @@
}
}
+ fn check_rustc_layout_scalar_valid_range(
+ &self,
+ attr: &Attribute,
+ span: &Span,
+ target: Target,
+ ) -> bool {
+ if target != Target::Struct {
+ self.tcx
+ .sess
+ .struct_span_err(attr.span, "attribute should be applied to a struct")
+ .span_label(*span, "not a struct")
+ .emit();
+ return false;
+ }
+
+ let list = match attr.meta_item_list() {
+ None => return false,
+ Some(it) => it,
+ };
+
+ if matches!(&list[..], &[NestedMetaItem::Literal(Lit { kind: LitKind::Int(..), .. })]) {
+ true
+ } else {
+ self.tcx
+ .sess
+ .struct_span_err(attr.span, "expected exactly one integer literal argument")
+ .emit();
+ false
+ }
+ }
+
+ /// Checks if `#[rustc_legacy_const_generics]` is applied to a function and has a valid argument.
+ fn check_rustc_legacy_const_generics(
+ &self,
+ attr: &Attribute,
+ span: &Span,
+ target: Target,
+ item: Option<ItemLike<'_>>,
+ ) -> bool {
+ let is_function = matches!(target, Target::Fn | Target::Method(..));
+ if !is_function {
+ self.tcx
+ .sess
+ .struct_span_err(attr.span, "attribute should be applied to a function")
+ .span_label(*span, "not a function")
+ .emit();
+ return false;
+ }
+
+ let list = match attr.meta_item_list() {
+ // The attribute form is validated on AST.
+ None => return false,
+ Some(it) => it,
+ };
+
+ let (decl, generics) = match item {
+ Some(ItemLike::Item(Item {
+ kind: ItemKind::Fn(FnSig { decl, .. }, generics, _),
+ ..
+ })) => (decl, generics),
+ _ => bug!("should be a function item"),
+ };
+
+ for param in generics.params {
+ match param.kind {
+ hir::GenericParamKind::Const { .. } => {}
+ _ => {
+ self.tcx
+ .sess
+ .struct_span_err(
+ attr.span,
+ "#[rustc_legacy_const_generics] functions must \
+ only have const generics",
+ )
+ .span_label(param.span, "non-const generic parameter")
+ .emit();
+ return false;
+ }
+ }
+ }
+
+ if list.len() != generics.params.len() {
+ self.tcx
+ .sess
+ .struct_span_err(
+ attr.span,
+ "#[rustc_legacy_const_generics] must have one index for each generic parameter",
+ )
+ .span_label(generics.span, "generic parameters")
+ .emit();
+ return false;
+ }
+
+ let arg_count = decl.inputs.len() as u128 + generics.params.len() as u128;
+ let mut invalid_args = vec![];
+ for meta in list {
+ if let Some(LitKind::Int(val, _)) = meta.literal().map(|lit| &lit.kind) {
+ if *val >= arg_count {
+ let span = meta.span();
+ self.tcx
+ .sess
+ .struct_span_err(span, "index exceeds number of arguments")
+ .span_label(
+ span,
+ format!(
+ "there {} only {} argument{}",
+ if arg_count != 1 { "are" } else { "is" },
+ arg_count,
+ pluralize!(arg_count)
+ ),
+ )
+ .emit();
+ return false;
+ }
+ } else {
+ invalid_args.push(meta.span());
+ }
+ }
+
+ if !invalid_args.is_empty() {
+ self.tcx
+ .sess
+ .struct_span_err(invalid_args, "arguments should be non-negative integers")
+ .emit();
+ false
+ } else {
+ true
+ }
+ }
+
/// Checks if `#[link_section]` is applied to a function or static.
fn check_link_section(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) {
match target {
@@ -1057,66 +1319,36 @@
fn visit_item(&mut self, item: &'tcx Item<'tcx>) {
let target = Target::from_item(item);
- self.check_attributes(
- item.hir_id,
- item.attrs,
- &item.span,
- target,
- Some(ItemLike::Item(item)),
- );
+ self.check_attributes(item.hir_id(), &item.span, target, Some(ItemLike::Item(item)));
intravisit::walk_item(self, item)
}
fn visit_generic_param(&mut self, generic_param: &'tcx hir::GenericParam<'tcx>) {
let target = Target::from_generic_param(generic_param);
- self.check_attributes(
- generic_param.hir_id,
- generic_param.attrs,
- &generic_param.span,
- target,
- None,
- );
+ self.check_attributes(generic_param.hir_id, &generic_param.span, target, None);
intravisit::walk_generic_param(self, generic_param)
}
fn visit_trait_item(&mut self, trait_item: &'tcx TraitItem<'tcx>) {
let target = Target::from_trait_item(trait_item);
- self.check_attributes(trait_item.hir_id, &trait_item.attrs, &trait_item.span, target, None);
+ self.check_attributes(trait_item.hir_id(), &trait_item.span, target, None);
intravisit::walk_trait_item(self, trait_item)
}
- fn visit_struct_field(&mut self, struct_field: &'tcx hir::StructField<'tcx>) {
- self.check_attributes(
- struct_field.hir_id,
- &struct_field.attrs,
- &struct_field.span,
- Target::Field,
- None,
- );
- intravisit::walk_struct_field(self, struct_field);
+ fn visit_field_def(&mut self, struct_field: &'tcx hir::FieldDef<'tcx>) {
+ self.check_attributes(struct_field.hir_id, &struct_field.span, Target::Field, None);
+ intravisit::walk_field_def(self, struct_field);
}
fn visit_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) {
- self.check_attributes(arm.hir_id, &arm.attrs, &arm.span, Target::Arm, None);
+ self.check_attributes(arm.hir_id, &arm.span, Target::Arm, None);
intravisit::walk_arm(self, arm);
}
- fn visit_macro_def(&mut self, macro_def: &'tcx hir::MacroDef<'tcx>) {
- self.check_attributes(
- macro_def.hir_id,
- ¯o_def.attrs,
- ¯o_def.span,
- Target::MacroDef,
- None,
- );
- intravisit::walk_macro_def(self, macro_def);
- }
-
fn visit_foreign_item(&mut self, f_item: &'tcx ForeignItem<'tcx>) {
let target = Target::from_foreign_item(f_item);
self.check_attributes(
- f_item.hir_id,
- &f_item.attrs,
+ f_item.hir_id(),
&f_item.span,
target,
Some(ItemLike::ForeignItem(f_item)),
@@ -1126,14 +1358,14 @@
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
let target = target_from_impl_item(self.tcx, impl_item);
- self.check_attributes(impl_item.hir_id, &impl_item.attrs, &impl_item.span, target, None);
+ self.check_attributes(impl_item.hir_id(), &impl_item.span, target, None);
intravisit::walk_impl_item(self, impl_item)
}
fn visit_stmt(&mut self, stmt: &'tcx hir::Stmt<'tcx>) {
// When checking statements ignore expressions, they will be checked later.
if let hir::StmtKind::Local(ref l) = stmt.kind {
- self.check_attributes(l.hir_id, &l.attrs, &stmt.span, Target::Statement, None);
+ self.check_attributes(l.hir_id, &stmt.span, Target::Statement, None);
}
intravisit::walk_stmt(self, stmt)
}
@@ -1144,7 +1376,7 @@
_ => Target::Expression,
};
- self.check_attributes(expr.hir_id, &expr.attrs, &expr.span, target, None);
+ self.check_attributes(expr.hir_id, &expr.span, target, None);
intravisit::walk_expr(self, expr)
}
@@ -1154,9 +1386,20 @@
generics: &'tcx hir::Generics<'tcx>,
item_id: HirId,
) {
- self.check_attributes(variant.id, variant.attrs, &variant.span, Target::Variant, None);
+ self.check_attributes(variant.id, &variant.span, Target::Variant, None);
intravisit::walk_variant(self, variant, generics, item_id)
}
+
+ fn visit_macro_def(&mut self, macro_def: &'tcx hir::MacroDef<'tcx>) {
+ self.check_attributes(macro_def.hir_id(), ¯o_def.span, Target::MacroDef, None);
+ intravisit::walk_macro_def(self, macro_def);
+ }
+
+ fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
+ self.check_attributes(param.hir_id, ¶m.span, Target::Param, None);
+
+ intravisit::walk_param(self, param);
+ }
}
fn is_c_like_enum(item: &Item<'_>) -> bool {
@@ -1221,13 +1464,7 @@
tcx.hir().visit_exported_macros_in_krate(check_attr_visitor);
check_invalid_macro_level_attr(tcx, tcx.hir().krate().non_exported_macro_attrs);
if module_def_id.is_top_level_module() {
- check_attr_visitor.check_attributes(
- CRATE_HIR_ID,
- tcx.hir().krate_attrs(),
- &DUMMY_SP,
- Target::Mod,
- None,
- );
+ check_attr_visitor.check_attributes(CRATE_HIR_ID, &DUMMY_SP, Target::Mod, None);
check_invalid_crate_level_attr(tcx, tcx.hir().krate_attrs());
}
}
diff --git a/compiler/rustc_passes/src/check_const.rs b/compiler/rustc_passes/src/check_const.rs
index 8950f9b..da71356 100644
--- a/compiler/rustc_passes/src/check_const.rs
+++ b/compiler/rustc_passes/src/check_const.rs
@@ -45,7 +45,7 @@
return None;
}
- Self::Match(IfLetGuardDesugar) => bug!("if-let guard outside a `match` expression"),
+ Self::Match(IfLetGuardDesugar) => bug!("`if let` guard outside a `match` expression"),
// All other expressions are allowed.
Self::Loop(Loop | While | WhileLet)
@@ -106,7 +106,7 @@
// However, we cannot allow stable `const fn`s to use unstable features without an explicit
// opt-in via `rustc_allow_const_fn_unstable`.
attr::rustc_allow_const_fn_unstable(&tcx.sess, &tcx.get_attrs(def_id))
- .map_or(false, |mut features| features.any(|name| name == feature_gate))
+ .any(|name| name == feature_gate)
};
match required_gates {
diff --git a/compiler/rustc_passes/src/dead.rs b/compiler/rustc_passes/src/dead.rs
index 3b1b535..9d70bc4 100644
--- a/compiler/rustc_passes/src/dead.rs
+++ b/compiler/rustc_passes/src/dead.rs
@@ -15,7 +15,6 @@
use rustc_middle::ty::{self, DefIdTree, TyCtxt};
use rustc_session::lint;
-use rustc_ast as ast;
use rustc_span::symbol::{sym, Symbol};
// Any local node that may call something in its body block should be
@@ -138,7 +137,7 @@
&mut self,
lhs: &hir::Pat<'_>,
res: Res,
- pats: &[hir::FieldPat<'_>],
+ pats: &[hir::PatField<'_>],
) {
let variant = match self.typeck_results().node_type(lhs.hir_id).kind() {
ty::Adt(adt, _) => adt.variant_of_res(res),
@@ -179,8 +178,7 @@
match node {
Node::Item(item) => match item.kind {
hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) => {
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
- let def = self.tcx.adt_def(def_id);
+ let def = self.tcx.adt_def(item.def_id);
self.repr_has_repr_c = def.repr.c();
intravisit::walk_item(self, &item);
@@ -210,7 +208,7 @@
self.inherited_pub_visibility = had_inherited_pub_visibility;
}
- fn mark_as_used_if_union(&mut self, adt: &ty::AdtDef, fields: &[hir::Field<'_>]) {
+ fn mark_as_used_if_union(&mut self, adt: &ty::AdtDef, fields: &[hir::ExprField<'_>]) {
if adt.is_union() && adt.non_enum_variant().fields.len() > 1 && adt.did.is_local() {
for field in fields {
let index = self.tcx.field_index(field.hir_id, self.typeck_results());
@@ -314,7 +312,7 @@
fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
if let TyKind::OpaqueDef(item_id, _) = ty.kind {
- let item = self.tcx.hir().expect_item(item_id.id);
+ let item = self.tcx.hir().item(item_id);
intravisit::walk_item(self, item);
}
intravisit::walk_ty(self, ty);
@@ -326,11 +324,8 @@
}
}
-fn has_allow_dead_code_or_lang_attr(
- tcx: TyCtxt<'_>,
- id: hir::HirId,
- attrs: &[ast::Attribute],
-) -> bool {
+fn has_allow_dead_code_or_lang_attr(tcx: TyCtxt<'_>, id: hir::HirId) -> bool {
+ let attrs = tcx.hir().attrs(id);
if tcx.sess.contains_name(attrs, sym::lang) {
return true;
}
@@ -380,9 +375,9 @@
impl<'v, 'k, 'tcx> ItemLikeVisitor<'v> for LifeSeeder<'k, 'tcx> {
fn visit_item(&mut self, item: &hir::Item<'_>) {
- let allow_dead_code = has_allow_dead_code_or_lang_attr(self.tcx, item.hir_id, &item.attrs);
+ let allow_dead_code = has_allow_dead_code_or_lang_attr(self.tcx, item.hir_id());
if allow_dead_code {
- self.worklist.push(item.hir_id);
+ self.worklist.push(item.hir_id());
}
match item.kind {
hir::ItemKind::Enum(ref enum_def, _) => {
@@ -398,24 +393,20 @@
}
hir::ItemKind::Impl(hir::Impl { ref of_trait, items, .. }) => {
if of_trait.is_some() {
- self.worklist.push(item.hir_id);
+ self.worklist.push(item.hir_id());
}
for impl_item_ref in items {
let impl_item = self.krate.impl_item(impl_item_ref.id);
if of_trait.is_some()
- || has_allow_dead_code_or_lang_attr(
- self.tcx,
- impl_item.hir_id,
- &impl_item.attrs,
- )
+ || has_allow_dead_code_or_lang_attr(self.tcx, impl_item.hir_id())
{
- self.worklist.push(impl_item_ref.id.hir_id);
+ self.worklist.push(impl_item_ref.id.hir_id());
}
}
}
hir::ItemKind::Struct(ref variant_data, _) => {
if let Some(ctor_hir_id) = variant_data.ctor_hir_id() {
- self.struct_constructors.insert(ctor_hir_id, item.hir_id);
+ self.struct_constructors.insert(ctor_hir_id, item.hir_id());
}
}
_ => (),
@@ -425,9 +416,9 @@
fn visit_trait_item(&mut self, trait_item: &hir::TraitItem<'_>) {
use hir::TraitItemKind::{Const, Fn};
if matches!(trait_item.kind, Const(_, Some(_)) | Fn(_, hir::TraitFn::Provided(_)))
- && has_allow_dead_code_or_lang_attr(self.tcx, trait_item.hir_id, &trait_item.attrs)
+ && has_allow_dead_code_or_lang_attr(self.tcx, trait_item.hir_id())
{
- self.worklist.push(trait_item.hir_id);
+ self.worklist.push(trait_item.hir_id());
}
}
@@ -438,9 +429,9 @@
fn visit_foreign_item(&mut self, foreign_item: &hir::ForeignItem<'_>) {
use hir::ForeignItemKind::{Fn, Static};
if matches!(foreign_item.kind, Static(..) | Fn(..))
- && has_allow_dead_code_or_lang_attr(self.tcx, foreign_item.hir_id, &foreign_item.attrs)
+ && has_allow_dead_code_or_lang_attr(self.tcx, foreign_item.hir_id())
{
- self.worklist.push(foreign_item.hir_id);
+ self.worklist.push(foreign_item.hir_id());
}
}
}
@@ -510,25 +501,24 @@
| hir::ItemKind::Struct(..)
| hir::ItemKind::Union(..)
);
- should_warn && !self.symbol_is_live(item.hir_id)
+ should_warn && !self.symbol_is_live(item.hir_id())
}
- fn should_warn_about_field(&mut self, field: &hir::StructField<'_>) -> bool {
+ fn should_warn_about_field(&mut self, field: &hir::FieldDef<'_>) -> bool {
let field_type = self.tcx.type_of(self.tcx.hir().local_def_id(field.hir_id));
!field.is_positional()
&& !self.symbol_is_live(field.hir_id)
&& !field_type.is_phantom_data()
- && !has_allow_dead_code_or_lang_attr(self.tcx, field.hir_id, &field.attrs)
+ && !has_allow_dead_code_or_lang_attr(self.tcx, field.hir_id)
}
fn should_warn_about_variant(&mut self, variant: &hir::Variant<'_>) -> bool {
- !self.symbol_is_live(variant.id)
- && !has_allow_dead_code_or_lang_attr(self.tcx, variant.id, &variant.attrs)
+ !self.symbol_is_live(variant.id) && !has_allow_dead_code_or_lang_attr(self.tcx, variant.id)
}
fn should_warn_about_foreign_item(&mut self, fi: &hir::ForeignItem<'_>) -> bool {
- !self.symbol_is_live(fi.hir_id)
- && !has_allow_dead_code_or_lang_attr(self.tcx, fi.hir_id, &fi.attrs)
+ !self.symbol_is_live(fi.hir_id())
+ && !has_allow_dead_code_or_lang_attr(self.tcx, fi.hir_id())
}
// id := HIR id of an item's definition.
@@ -612,7 +602,7 @@
hir::ItemKind::Struct(..) => "constructed", // Issue #52325
_ => "used",
};
- self.warn_dead_code(item.hir_id, span, item.ident.name, participle);
+ self.warn_dead_code(item.hir_id(), span, item.ident.name, participle);
} else {
// Only continue if we didn't warn
intravisit::walk_item(self, item);
@@ -634,24 +624,24 @@
fn visit_foreign_item(&mut self, fi: &'tcx hir::ForeignItem<'tcx>) {
if self.should_warn_about_foreign_item(fi) {
- self.warn_dead_code(fi.hir_id, fi.span, fi.ident.name, "used");
+ self.warn_dead_code(fi.hir_id(), fi.span, fi.ident.name, "used");
}
intravisit::walk_foreign_item(self, fi);
}
- fn visit_struct_field(&mut self, field: &'tcx hir::StructField<'tcx>) {
+ fn visit_field_def(&mut self, field: &'tcx hir::FieldDef<'tcx>) {
if self.should_warn_about_field(&field) {
self.warn_dead_code(field.hir_id, field.span, field.ident.name, "read");
}
- intravisit::walk_struct_field(self, field);
+ intravisit::walk_field_def(self, field);
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
match impl_item.kind {
hir::ImplItemKind::Const(_, body_id) => {
- if !self.symbol_is_live(impl_item.hir_id) {
+ if !self.symbol_is_live(impl_item.hir_id()) {
self.warn_dead_code(
- impl_item.hir_id,
+ impl_item.hir_id(),
impl_item.span,
impl_item.ident.name,
"used",
@@ -660,7 +650,7 @@
self.visit_nested_body(body_id)
}
hir::ImplItemKind::Fn(_, body_id) => {
- if !self.symbol_is_live(impl_item.hir_id) {
+ if !self.symbol_is_live(impl_item.hir_id()) {
// FIXME(66095): Because impl_item.span is annotated with things
// like expansion data, and ident.span isn't, we use the
// def_span method if it's part of a macro invocation
@@ -672,7 +662,7 @@
} else {
impl_item.ident.span
};
- self.warn_dead_code(impl_item.hir_id, span, impl_item.ident.name, "used");
+ self.warn_dead_code(impl_item.hir_id(), span, impl_item.ident.name, "used");
}
self.visit_nested_body(body_id)
}
diff --git a/compiler/rustc_passes/src/diagnostic_items.rs b/compiler/rustc_passes/src/diagnostic_items.rs
index 699c96b..8dd3700 100644
--- a/compiler/rustc_passes/src/diagnostic_items.rs
+++ b/compiler/rustc_passes/src/diagnostic_items.rs
@@ -16,7 +16,7 @@
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_session::Session;
-use rustc_span::def_id::{DefId, LOCAL_CRATE};
+use rustc_span::def_id::{DefId, LocalDefId, LOCAL_CRATE};
use rustc_span::symbol::{sym, Symbol};
struct DiagnosticItemCollector<'tcx> {
@@ -27,19 +27,19 @@
impl<'v, 'tcx> ItemLikeVisitor<'v> for DiagnosticItemCollector<'tcx> {
fn visit_item(&mut self, item: &hir::Item<'_>) {
- self.observe_item(&item.attrs, item.hir_id);
+ self.observe_item(item.def_id);
}
fn visit_trait_item(&mut self, trait_item: &hir::TraitItem<'_>) {
- self.observe_item(&trait_item.attrs, trait_item.hir_id);
+ self.observe_item(trait_item.def_id);
}
fn visit_impl_item(&mut self, impl_item: &hir::ImplItem<'_>) {
- self.observe_item(&impl_item.attrs, impl_item.hir_id);
+ self.observe_item(impl_item.def_id);
}
fn visit_foreign_item(&mut self, foreign_item: &hir::ForeignItem<'_>) {
- self.observe_item(foreign_item.attrs, foreign_item.hir_id);
+ self.observe_item(foreign_item.def_id);
}
}
@@ -48,9 +48,10 @@
DiagnosticItemCollector { tcx, items: Default::default() }
}
- fn observe_item(&mut self, attrs: &[ast::Attribute], hir_id: hir::HirId) {
+ fn observe_item(&mut self, def_id: LocalDefId) {
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ let attrs = self.tcx.hir().attrs(hir_id);
if let Some(name) = extract(&self.tcx.sess, attrs) {
- let def_id = self.tcx.hir().local_def_id(hir_id);
// insert into our table
collect_item(self.tcx, &mut self.items, name, def_id.to_def_id());
}
@@ -106,7 +107,7 @@
tcx.hir().krate().visit_all_item_likes(&mut collector);
for m in tcx.hir().krate().exported_macros {
- collector.observe_item(m.attrs, m.hir_id);
+ collector.observe_item(m.def_id);
}
collector.items
diff --git a/compiler/rustc_passes/src/entry.rs b/compiler/rustc_passes/src/entry.rs
index 5ff631a..5784820 100644
--- a/compiler/rustc_passes/src/entry.rs
+++ b/compiler/rustc_passes/src/entry.rs
@@ -2,7 +2,7 @@
use rustc_errors::struct_span_err;
use rustc_hir::def_id::{CrateNum, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::itemlikevisit::ItemLikeVisitor;
-use rustc_hir::{ForeignItem, HirId, ImplItem, Item, ItemKind, TraitItem};
+use rustc_hir::{ForeignItem, HirId, ImplItem, Item, ItemKind, TraitItem, CRATE_HIR_ID};
use rustc_middle::hir::map::Map;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
@@ -32,8 +32,7 @@
impl<'a, 'tcx> ItemLikeVisitor<'tcx> for EntryContext<'a, 'tcx> {
fn visit_item(&mut self, item: &'tcx Item<'tcx>) {
- let def_id = self.map.local_def_id(item.hir_id);
- let def_key = self.map.def_key(def_id);
+ let def_key = self.map.def_key(item.def_id);
let at_root = def_key.parent == Some(CRATE_DEF_INDEX);
find_item(item, self, at_root);
}
@@ -61,7 +60,7 @@
}
// If the user wants no main function at all, then stop here.
- if tcx.sess.contains_name(&tcx.hir().krate().item.attrs, sym::no_main) {
+ if tcx.sess.contains_name(&tcx.hir().attrs(CRATE_HIR_ID), sym::no_main) {
return None;
}
@@ -81,10 +80,11 @@
// Beware, this is duplicated in `librustc_builtin_macros/test_harness.rs`
// (with `ast::Item`), so make sure to keep them in sync.
-fn entry_point_type(sess: &Session, item: &Item<'_>, at_root: bool) -> EntryPointType {
- if sess.contains_name(&item.attrs, sym::start) {
+fn entry_point_type(ctxt: &EntryContext<'_, '_>, item: &Item<'_>, at_root: bool) -> EntryPointType {
+ let attrs = ctxt.map.attrs(item.hir_id());
+ if ctxt.session.contains_name(attrs, sym::start) {
EntryPointType::Start
- } else if sess.contains_name(&item.attrs, sym::main) {
+ } else if ctxt.session.contains_name(attrs, sym::main) {
EntryPointType::MainAttr
} else if item.ident.name == sym::main {
if at_root {
@@ -104,30 +104,31 @@
}
fn find_item(item: &Item<'_>, ctxt: &mut EntryContext<'_, '_>, at_root: bool) {
- match entry_point_type(&ctxt.session, item, at_root) {
+ match entry_point_type(ctxt, item, at_root) {
EntryPointType::None => (),
_ if !matches!(item.kind, ItemKind::Fn(..)) => {
- if let Some(attr) = ctxt.session.find_by_name(item.attrs, sym::start) {
+ let attrs = ctxt.map.attrs(item.hir_id());
+ if let Some(attr) = ctxt.session.find_by_name(attrs, sym::start) {
throw_attr_err(&ctxt.session, attr.span, "start");
}
- if let Some(attr) = ctxt.session.find_by_name(item.attrs, sym::main) {
+ if let Some(attr) = ctxt.session.find_by_name(attrs, sym::main) {
throw_attr_err(&ctxt.session, attr.span, "main");
}
}
EntryPointType::MainNamed => {
if ctxt.main_fn.is_none() {
- ctxt.main_fn = Some((item.hir_id, item.span));
+ ctxt.main_fn = Some((item.hir_id(), item.span));
} else {
struct_span_err!(ctxt.session, item.span, E0136, "multiple `main` functions")
.emit();
}
}
EntryPointType::OtherMain => {
- ctxt.non_main_fns.push((item.hir_id, item.span));
+ ctxt.non_main_fns.push((item.hir_id(), item.span));
}
EntryPointType::MainAttr => {
if ctxt.attr_main_fn.is_none() {
- ctxt.attr_main_fn = Some((item.hir_id, item.span));
+ ctxt.attr_main_fn = Some((item.hir_id(), item.span));
} else {
struct_span_err!(
ctxt.session,
@@ -142,7 +143,7 @@
}
EntryPointType::Start => {
if ctxt.start_fn.is_none() {
- ctxt.start_fn = Some((item.hir_id, item.span));
+ ctxt.start_fn = Some((item.hir_id(), item.span));
} else {
struct_span_err!(ctxt.session, item.span, E0138, "multiple `start` functions")
.span_label(ctxt.start_fn.unwrap().1, "previous `#[start]` function here")
diff --git a/compiler/rustc_passes/src/hir_id_validator.rs b/compiler/rustc_passes/src/hir_id_validator.rs
index fdd6c23..79e3b59 100644
--- a/compiler/rustc_passes/src/hir_id_validator.rs
+++ b/compiler/rustc_passes/src/hir_id_validator.rs
@@ -14,12 +14,9 @@
let errors = Lock::new(Vec::new());
let hir_map = tcx.hir();
- par_iter(&hir_map.krate().modules).for_each(|(module_id, _)| {
- let local_def_id = hir_map.local_def_id(*module_id);
- hir_map.visit_item_likes_in_module(
- local_def_id,
- &mut OuterVisitor { hir_map, errors: &errors },
- );
+ par_iter(&hir_map.krate().modules).for_each(|(&module_id, _)| {
+ hir_map
+ .visit_item_likes_in_module(module_id, &mut OuterVisitor { hir_map, errors: &errors });
});
let errors = errors.into_inner();
@@ -56,22 +53,22 @@
impl<'a, 'hir> ItemLikeVisitor<'hir> for OuterVisitor<'a, 'hir> {
fn visit_item(&mut self, i: &'hir hir::Item<'hir>) {
let mut inner_visitor = self.new_inner_visitor(self.hir_map);
- inner_visitor.check(i.hir_id, |this| intravisit::walk_item(this, i));
+ inner_visitor.check(i.hir_id(), |this| intravisit::walk_item(this, i));
}
fn visit_trait_item(&mut self, i: &'hir hir::TraitItem<'hir>) {
let mut inner_visitor = self.new_inner_visitor(self.hir_map);
- inner_visitor.check(i.hir_id, |this| intravisit::walk_trait_item(this, i));
+ inner_visitor.check(i.hir_id(), |this| intravisit::walk_trait_item(this, i));
}
fn visit_impl_item(&mut self, i: &'hir hir::ImplItem<'hir>) {
let mut inner_visitor = self.new_inner_visitor(self.hir_map);
- inner_visitor.check(i.hir_id, |this| intravisit::walk_impl_item(this, i));
+ inner_visitor.check(i.hir_id(), |this| intravisit::walk_impl_item(this, i));
}
fn visit_foreign_item(&mut self, i: &'hir hir::ForeignItem<'hir>) {
let mut inner_visitor = self.new_inner_visitor(self.hir_map);
- inner_visitor.check(i.hir_id, |this| intravisit::walk_foreign_item(this, i));
+ inner_visitor.check(i.hir_id(), |this| intravisit::walk_foreign_item(this, i));
}
}
diff --git a/compiler/rustc_passes/src/hir_stats.rs b/compiler/rustc_passes/src/hir_stats.rs
index 1d02c9a..2bed8ca 100644
--- a/compiler/rustc_passes/src/hir_stats.rs
+++ b/compiler/rustc_passes/src/hir_stats.rs
@@ -66,13 +66,13 @@
let mut total_size = 0;
- println!("\n{}\n", title);
+ eprintln!("\n{}\n", title);
- println!("{:<18}{:>18}{:>14}{:>14}", "Name", "Accumulated Size", "Count", "Item Size");
- println!("----------------------------------------------------------------");
+ eprintln!("{:<18}{:>18}{:>14}{:>14}", "Name", "Accumulated Size", "Count", "Item Size");
+ eprintln!("----------------------------------------------------------------");
for (label, data) in stats {
- println!(
+ eprintln!(
"{:<18}{:>18}{:>14}{:>14}",
label,
to_readable_str(data.count * data.size),
@@ -82,8 +82,8 @@
total_size += data.count * data.size;
}
- println!("----------------------------------------------------------------");
- println!("{:<18}{:>18}\n", "Total", to_readable_str(total_size));
+ eprintln!("----------------------------------------------------------------");
+ eprintln!("{:<18}{:>18}\n", "Total", to_readable_str(total_size));
}
}
@@ -100,7 +100,7 @@
}
fn visit_nested_item(&mut self, id: hir::ItemId) {
- let nested_item = self.krate.unwrap().item(id.id);
+ let nested_item = self.krate.unwrap().item(id);
self.visit_item(nested_item)
}
@@ -114,23 +114,23 @@
self.visit_impl_item(nested_impl_item)
}
+ fn visit_nested_foreign_item(&mut self, id: hir::ForeignItemId) {
+ let nested_foreign_item = self.krate.unwrap().foreign_item(id);
+ self.visit_foreign_item(nested_foreign_item);
+ }
+
fn visit_nested_body(&mut self, body_id: hir::BodyId) {
let nested_body = self.krate.unwrap().body(body_id);
self.visit_body(nested_body)
}
fn visit_item(&mut self, i: &'v hir::Item<'v>) {
- self.record("Item", Id::Node(i.hir_id), i);
+ self.record("Item", Id::Node(i.hir_id()), i);
hir_visit::walk_item(self, i)
}
- fn visit_mod(&mut self, m: &'v hir::Mod<'v>, _s: Span, n: hir::HirId) {
- self.record("Mod", Id::None, m);
- hir_visit::walk_mod(self, m, n)
- }
-
fn visit_foreign_item(&mut self, i: &'v hir::ForeignItem<'v>) {
- self.record("ForeignItem", Id::Node(i.hir_id), i);
+ self.record("ForeignItem", Id::Node(i.hir_id()), i);
hir_visit::walk_foreign_item(self, i)
}
@@ -187,12 +187,12 @@
}
fn visit_trait_item(&mut self, ti: &'v hir::TraitItem<'v>) {
- self.record("TraitItem", Id::Node(ti.hir_id), ti);
+ self.record("TraitItem", Id::Node(ti.hir_id()), ti);
hir_visit::walk_trait_item(self, ti)
}
fn visit_impl_item(&mut self, ii: &'v hir::ImplItem<'v>) {
- self.record("ImplItem", Id::Node(ii.hir_id), ii);
+ self.record("ImplItem", Id::Node(ii.hir_id()), ii);
hir_visit::walk_impl_item(self, ii)
}
@@ -201,9 +201,9 @@
hir_visit::walk_param_bound(self, bounds)
}
- fn visit_struct_field(&mut self, s: &'v hir::StructField<'v>) {
- self.record("StructField", Id::Node(s.hir_id), s);
- hir_visit::walk_struct_field(self, s)
+ fn visit_field_def(&mut self, s: &'v hir::FieldDef<'v>) {
+ self.record("FieldDef", Id::Node(s.hir_id), s);
+ hir_visit::walk_field_def(self, s)
}
fn visit_variant(
@@ -241,22 +241,17 @@
hir_visit::walk_assoc_type_binding(self, type_binding)
}
- fn visit_attribute(&mut self, attr: &'v ast::Attribute) {
+ fn visit_attribute(&mut self, _: hir::HirId, attr: &'v ast::Attribute) {
self.record("Attribute", Id::Attr(attr.id), attr);
}
fn visit_macro_def(&mut self, macro_def: &'v hir::MacroDef<'v>) {
- self.record("MacroDef", Id::Node(macro_def.hir_id), macro_def);
+ self.record("MacroDef", Id::Node(macro_def.hir_id()), macro_def);
hir_visit::walk_macro_def(self, macro_def)
}
}
impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
- fn visit_mod(&mut self, m: &'v ast::Mod, _s: Span, _a: &[ast::Attribute], _n: NodeId) {
- self.record("Mod", Id::None, m);
- ast_visit::walk_mod(self, m)
- }
-
fn visit_foreign_item(&mut self, i: &'v ast::ForeignItem) {
self.record("ForeignItem", Id::None, i);
ast_visit::walk_foreign_item(self, i)
@@ -321,9 +316,9 @@
ast_visit::walk_param_bound(self, bounds)
}
- fn visit_struct_field(&mut self, s: &'v ast::StructField) {
- self.record("StructField", Id::None, s);
- ast_visit::walk_struct_field(self, s)
+ fn visit_field_def(&mut self, s: &'v ast::FieldDef) {
+ self.record("FieldDef", Id::None, s);
+ ast_visit::walk_field_def(self, s)
}
fn visit_variant(&mut self, v: &'v ast::Variant) {
diff --git a/compiler/rustc_passes/src/lang_items.rs b/compiler/rustc_passes/src/lang_items.rs
index 3132661..7e6bb97 100644
--- a/compiler/rustc_passes/src/lang_items.rs
+++ b/compiler/rustc_passes/src/lang_items.rs
@@ -13,7 +13,6 @@
use rustc_middle::middle::cstore::ExternCrate;
use rustc_middle::ty::TyCtxt;
-use rustc_ast::Attribute;
use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
@@ -30,29 +29,21 @@
impl ItemLikeVisitor<'v> for LanguageItemCollector<'tcx> {
fn visit_item(&mut self, item: &hir::Item<'_>) {
- self.check_for_lang(Target::from_item(item), item.hir_id, item.attrs);
+ self.check_for_lang(Target::from_item(item), item.hir_id());
if let hir::ItemKind::Enum(def, ..) = &item.kind {
for variant in def.variants {
- self.check_for_lang(Target::Variant, variant.id, variant.attrs);
+ self.check_for_lang(Target::Variant, variant.id);
}
}
}
fn visit_trait_item(&mut self, trait_item: &hir::TraitItem<'_>) {
- self.check_for_lang(
- Target::from_trait_item(trait_item),
- trait_item.hir_id,
- trait_item.attrs,
- )
+ self.check_for_lang(Target::from_trait_item(trait_item), trait_item.hir_id())
}
fn visit_impl_item(&mut self, impl_item: &hir::ImplItem<'_>) {
- self.check_for_lang(
- target_from_impl_item(self.tcx, impl_item),
- impl_item.hir_id,
- impl_item.attrs,
- )
+ self.check_for_lang(target_from_impl_item(self.tcx, impl_item), impl_item.hir_id())
}
fn visit_foreign_item(&mut self, _: &hir::ForeignItem<'_>) {}
@@ -63,7 +54,8 @@
LanguageItemCollector { tcx, items: LanguageItems::new() }
}
- fn check_for_lang(&mut self, actual_target: Target, hir_id: HirId, attrs: &[Attribute]) {
+ fn check_for_lang(&mut self, actual_target: Target, hir_id: HirId) {
+ let attrs = self.tcx.hir().attrs(hir_id);
let check_name = |attr, sym| self.tcx.sess.check_name(attr, sym);
if let Some((value, span)) = extract(check_name, &attrs) {
match ITEM_REFS.get(&value).cloned() {
diff --git a/compiler/rustc_passes/src/layout_test.rs b/compiler/rustc_passes/src/layout_test.rs
index 9e83cbd..18c1d64 100644
--- a/compiler/rustc_passes/src/layout_test.rs
+++ b/compiler/rustc_passes/src/layout_test.rs
@@ -21,16 +21,14 @@
impl ItemLikeVisitor<'tcx> for LayoutTest<'tcx> {
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- let item_def_id = self.tcx.hir().local_def_id(item.hir_id);
-
match item.kind {
ItemKind::TyAlias(..)
| ItemKind::Enum(..)
| ItemKind::Struct(..)
| ItemKind::Union(..) => {
- for attr in self.tcx.get_attrs(item_def_id.to_def_id()).iter() {
+ for attr in self.tcx.get_attrs(item.def_id.to_def_id()).iter() {
if self.tcx.sess.check_name(attr, sym::rustc_layout) {
- self.dump_layout_of(item_def_id, item, attr);
+ self.dump_layout_of(item.def_id, item, attr);
}
}
}
diff --git a/compiler/rustc_passes/src/lib_features.rs b/compiler/rustc_passes/src/lib_features.rs
index 7c62a23..3dfe317 100644
--- a/compiler/rustc_passes/src/lib_features.rs
+++ b/compiler/rustc_passes/src/lib_features.rs
@@ -109,7 +109,7 @@
}
fn span_feature_error(&self, span: Span, msg: &str) {
- struct_span_err!(self.tcx.sess, span, E0711, "{}", &msg,).emit();
+ struct_span_err!(self.tcx.sess, span, E0711, "{}", &msg).emit();
}
}
@@ -120,7 +120,7 @@
NestedVisitorMap::All(self.tcx.hir())
}
- fn visit_attribute(&mut self, attr: &'tcx Attribute) {
+ fn visit_attribute(&mut self, _: rustc_hir::HirId, attr: &'tcx Attribute) {
if let Some((feature, stable, span)) = self.extract(attr) {
self.collect_feature(feature, stable, span);
}
@@ -131,7 +131,7 @@
let mut collector = LibFeatureCollector::new(tcx);
let krate = tcx.hir().krate();
for attr in krate.non_exported_macro_attrs {
- collector.visit_attribute(attr);
+ collector.visit_attribute(rustc_hir::CRATE_HIR_ID, attr);
}
intravisit::walk_crate(&mut collector, krate);
collector.lib_features
diff --git a/compiler/rustc_passes/src/liveness.rs b/compiler/rustc_passes/src/liveness.rs
index c11dc231..9aef49d 100644
--- a/compiler/rustc_passes/src/liveness.rs
+++ b/compiler/rustc_passes/src/liveness.rs
@@ -95,7 +95,7 @@
use rustc_index::vec::IndexVec;
use rustc_middle::hir::map::Map;
use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::{self, DefIdTree, TyCtxt};
+use rustc_middle::ty::{self, DefIdTree, RootVariableMinCaptureList, TyCtxt};
use rustc_session::lint;
use rustc_span::symbol::{kw, sym, Symbol};
use rustc_span::Span;
@@ -331,7 +331,7 @@
}
}
- if let Some(captures) = maps.tcx.typeck(local_def_id).closure_captures.get(&def_id) {
+ if let Some(captures) = maps.tcx.typeck(local_def_id).closure_min_captures.get(&def_id) {
for &var_hir_id in captures.keys() {
let var_name = maps.tcx.hir().name(var_hir_id);
maps.add_variable(Upvar(var_hir_id, var_name));
@@ -367,12 +367,17 @@
}
fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
- let is_shorthand = matches!(param.pat.kind, rustc_hir::PatKind::Struct(..));
param.pat.each_binding(|_bm, hir_id, _x, ident| {
- let var = if is_shorthand {
- Local(LocalInfo { id: hir_id, name: ident.name, is_shorthand: true })
- } else {
- Param(hir_id, ident.name)
+ let var = match param.pat.kind {
+ rustc_hir::PatKind::Struct(_, fields, _) => Local(LocalInfo {
+ id: hir_id,
+ name: ident.name,
+ is_shorthand: fields
+ .iter()
+ .find(|f| f.ident == ident)
+ .map_or(false, |f| f.is_shorthand),
+ }),
+ _ => Param(hir_id, ident.name),
};
self.add_variable(var);
});
@@ -403,10 +408,10 @@
if let Some(captures) = self
.tcx
.typeck(closure_def_id)
- .closure_captures
+ .closure_min_captures
.get(&closure_def_id.to_def_id())
{
- // If closure captures is Some, upvars_mentioned must also be Some
+ // If closure_min_captures is Some, upvars_mentioned must also be Some
let upvars = self.tcx.upvars_mentioned(closure_def_id).unwrap();
call_caps.extend(captures.keys().map(|var_id| {
let upvar = upvars[var_id];
@@ -476,11 +481,10 @@
struct Liveness<'a, 'tcx> {
ir: &'a mut IrMaps<'tcx>,
- body_owner: LocalDefId,
typeck_results: &'a ty::TypeckResults<'tcx>,
param_env: ty::ParamEnv<'tcx>,
upvars: Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>>,
- closure_captures: Option<&'tcx FxIndexMap<hir::HirId, ty::UpvarId>>,
+ closure_min_captures: Option<&'tcx RootVariableMinCaptureList<'tcx>>,
successors: IndexVec<LiveNode, Option<LiveNode>>,
rwu_table: rwu_table::RWUTable,
@@ -504,8 +508,7 @@
let typeck_results = ir.tcx.typeck(body_owner);
let param_env = ir.tcx.param_env(body_owner);
let upvars = ir.tcx.upvars_mentioned(body_owner);
- let closure_captures = typeck_results.closure_captures.get(&body_owner.to_def_id());
-
+ let closure_min_captures = typeck_results.closure_min_captures.get(&body_owner.to_def_id());
let closure_ln = ir.add_live_node(ClosureNode);
let exit_ln = ir.add_live_node(ExitNode);
@@ -514,11 +517,10 @@
Liveness {
ir,
- body_owner,
typeck_results,
param_env,
upvars,
- closure_captures,
+ closure_min_captures,
successors: IndexVec::from_elem_n(None, num_live_nodes),
rwu_table: rwu_table::RWUTable::new(num_live_nodes, num_vars),
closure_ln,
@@ -702,25 +704,27 @@
// if they are live on the entry to the closure, since only the closure
// itself can access them on subsequent calls.
- if let Some(closure_captures) = self.closure_captures {
+ if let Some(closure_min_captures) = self.closure_min_captures {
// Mark upvars captured by reference as used after closure exits.
- // Since closure_captures is Some, upvars must exists too.
- let upvars = self.upvars.unwrap();
- for (&var_hir_id, upvar_id) in closure_captures {
- let upvar = upvars[&var_hir_id];
- match self.typeck_results.upvar_capture(*upvar_id) {
- ty::UpvarCapture::ByRef(_) => {
- let var = self.variable(var_hir_id, upvar.span);
- self.acc(self.exit_ln, var, ACC_READ | ACC_USE);
+ for (&var_hir_id, min_capture_list) in closure_min_captures {
+ for captured_place in min_capture_list {
+ match captured_place.info.capture_kind {
+ ty::UpvarCapture::ByRef(_) => {
+ let var = self.variable(
+ var_hir_id,
+ captured_place.get_capture_kind_span(self.ir.tcx),
+ );
+ self.acc(self.exit_ln, var, ACC_READ | ACC_USE);
+ }
+ ty::UpvarCapture::ByValue(_) => {}
}
- ty::UpvarCapture::ByValue(_) => {}
}
}
}
let succ = self.propagate_through_expr(&body.value, self.exit_ln);
- if self.closure_captures.is_none() {
+ if self.closure_min_captures.is_none() {
// Either not a closure, or closure without any captured variables.
// No need to determine liveness of captured variables, since there
// are none.
@@ -1216,7 +1220,7 @@
match path.res {
Res::Local(hid) => {
let in_upvars = self.upvars.map_or(false, |u| u.contains_key(&hid));
- let in_captures = self.closure_captures.map_or(false, |c| c.contains_key(&hid));
+ let in_captures = self.closure_min_captures.map_or(false, |c| c.contains_key(&hid));
match (in_upvars, in_captures) {
(false, _) | (true, true) => self.access_var(hir_id, hid, succ, acc, path.span),
@@ -1417,52 +1421,52 @@
}
fn warn_about_unused_upvars(&self, entry_ln: LiveNode) {
- let closure_captures = match self.closure_captures {
+ let closure_min_captures = match self.closure_min_captures {
None => return,
- Some(closure_captures) => closure_captures,
+ Some(closure_min_captures) => closure_min_captures,
};
- // If closure_captures is Some(), upvars must be Some() too.
- let upvars = self.upvars.unwrap();
- for &var_hir_id in closure_captures.keys() {
- let upvar = upvars[&var_hir_id];
- let var = self.variable(var_hir_id, upvar.span);
- let upvar_id = ty::UpvarId {
- var_path: ty::UpvarPath { hir_id: var_hir_id },
- closure_expr_id: self.body_owner,
- };
- match self.typeck_results.upvar_capture(upvar_id) {
- ty::UpvarCapture::ByValue(_) => {}
- ty::UpvarCapture::ByRef(..) => continue,
- };
- if self.used_on_entry(entry_ln, var) {
- if !self.live_on_entry(entry_ln, var) {
+ // If closure_min_captures is Some(), upvars must be Some() too.
+ for (&var_hir_id, min_capture_list) in closure_min_captures {
+ for captured_place in min_capture_list {
+ match captured_place.info.capture_kind {
+ ty::UpvarCapture::ByValue(_) => {}
+ ty::UpvarCapture::ByRef(..) => continue,
+ };
+ let span = captured_place.get_capture_kind_span(self.ir.tcx);
+ let var = self.variable(var_hir_id, span);
+ if self.used_on_entry(entry_ln, var) {
+ if !self.live_on_entry(entry_ln, var) {
+ if let Some(name) = self.should_warn(var) {
+ self.ir.tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_ASSIGNMENTS,
+ var_hir_id,
+ vec![span],
+ |lint| {
+ lint.build(&format!(
+ "value captured by `{}` is never read",
+ name
+ ))
+ .help("did you mean to capture by reference instead?")
+ .emit();
+ },
+ );
+ }
+ }
+ } else {
if let Some(name) = self.should_warn(var) {
self.ir.tcx.struct_span_lint_hir(
- lint::builtin::UNUSED_ASSIGNMENTS,
+ lint::builtin::UNUSED_VARIABLES,
var_hir_id,
- vec![upvar.span],
+ vec![span],
|lint| {
- lint.build(&format!("value captured by `{}` is never read", name))
+ lint.build(&format!("unused variable: `{}`", name))
.help("did you mean to capture by reference instead?")
.emit();
},
);
}
}
- } else {
- if let Some(name) = self.should_warn(var) {
- self.ir.tcx.struct_span_lint_hir(
- lint::builtin::UNUSED_VARIABLES,
- var_hir_id,
- vec![upvar.span],
- |lint| {
- lint.build(&format!("unused variable: `{}`", name))
- .help("did you mean to capture by reference instead?")
- .emit();
- },
- );
- }
}
}
}
@@ -1489,12 +1493,13 @@
// bindings, and we also consider the first pattern to be the "authoritative" set of ids.
// However, we should take the ids and spans of variables with the same name from the later
// patterns so the suggestions to prefix with underscores will apply to those too.
- let mut vars: FxIndexMap<Symbol, (LiveNode, Variable, Vec<(HirId, Span)>)> = <_>::default();
+ let mut vars: FxIndexMap<Symbol, (LiveNode, Variable, Vec<(HirId, Span, Span)>)> =
+ <_>::default();
pat.each_binding(|_, hir_id, pat_sp, ident| {
let ln = entry_ln.unwrap_or_else(|| self.live_node(hir_id, pat_sp));
let var = self.variable(hir_id, ident.span);
- let id_and_sp = (hir_id, pat_sp);
+ let id_and_sp = (hir_id, pat_sp, ident.span);
vars.entry(self.ir.variable_name(var))
.and_modify(|(.., hir_ids_and_spans)| hir_ids_and_spans.push(id_and_sp))
.or_insert_with(|| (ln, var, vec![id_and_sp]));
@@ -1503,7 +1508,8 @@
for (_, (ln, var, hir_ids_and_spans)) in vars {
if self.used_on_entry(ln, var) {
let id = hir_ids_and_spans[0].0;
- let spans = hir_ids_and_spans.into_iter().map(|(_, sp)| sp).collect();
+ let spans =
+ hir_ids_and_spans.into_iter().map(|(_, _, ident_span)| ident_span).collect();
on_used_on_entry(spans, id, ln, var);
} else {
self.report_unused(hir_ids_and_spans, ln, var);
@@ -1511,7 +1517,12 @@
}
}
- fn report_unused(&self, hir_ids_and_spans: Vec<(HirId, Span)>, ln: LiveNode, var: Variable) {
+ fn report_unused(
+ &self,
+ hir_ids_and_spans: Vec<(HirId, Span, Span)>,
+ ln: LiveNode,
+ var: Variable,
+ ) {
let first_hir_id = hir_ids_and_spans[0].0;
if let Some(name) = self.should_warn(var).filter(|name| name != "self") {
@@ -1525,7 +1536,10 @@
self.ir.tcx.struct_span_lint_hir(
lint::builtin::UNUSED_VARIABLES,
first_hir_id,
- hir_ids_and_spans.into_iter().map(|(_, sp)| sp).collect::<Vec<_>>(),
+ hir_ids_and_spans
+ .into_iter()
+ .map(|(_, _, ident_span)| ident_span)
+ .collect::<Vec<_>>(),
|lint| {
lint.build(&format!("variable `{}` is assigned to, but never used", name))
.note(&format!("consider using `_{}` instead", name))
@@ -1533,54 +1547,67 @@
},
)
} else {
- self.ir.tcx.struct_span_lint_hir(
- lint::builtin::UNUSED_VARIABLES,
- first_hir_id,
- hir_ids_and_spans.iter().map(|(_, sp)| *sp).collect::<Vec<_>>(),
- |lint| {
- let mut err = lint.build(&format!("unused variable: `{}`", name));
+ let (shorthands, non_shorthands): (Vec<_>, Vec<_>) =
+ hir_ids_and_spans.iter().copied().partition(|(hir_id, _, ident_span)| {
+ let var = self.variable(*hir_id, *ident_span);
+ self.ir.variable_is_shorthand(var)
+ });
- let (shorthands, non_shorthands): (Vec<_>, Vec<_>) =
- hir_ids_and_spans.into_iter().partition(|(hir_id, span)| {
- let var = self.variable(*hir_id, *span);
- self.ir.variable_is_shorthand(var)
- });
+ // If we have both shorthand and non-shorthand, prefer the "try ignoring
+ // the field" message, and suggest `_` for the non-shorthands. If we only
+ // have non-shorthand, then prefix with an underscore instead.
+ if !shorthands.is_empty() {
+ let shorthands = shorthands
+ .into_iter()
+ .map(|(_, pat_span, _)| (pat_span, format!("{}: _", name)))
+ .chain(
+ non_shorthands
+ .into_iter()
+ .map(|(_, pat_span, _)| (pat_span, "_".to_string())),
+ )
+ .collect::<Vec<_>>();
- let mut shorthands = shorthands
- .into_iter()
- .map(|(_, span)| (span, format!("{}: _", name)))
- .collect::<Vec<_>>();
-
- // If we have both shorthand and non-shorthand, prefer the "try ignoring
- // the field" message, and suggest `_` for the non-shorthands. If we only
- // have non-shorthand, then prefix with an underscore instead.
- if !shorthands.is_empty() {
- shorthands.extend(
- non_shorthands
- .into_iter()
- .map(|(_, span)| (span, "_".to_string()))
- .collect::<Vec<_>>(),
- );
-
+ self.ir.tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_VARIABLES,
+ first_hir_id,
+ hir_ids_and_spans
+ .iter()
+ .map(|(_, pat_span, _)| *pat_span)
+ .collect::<Vec<_>>(),
+ |lint| {
+ let mut err = lint.build(&format!("unused variable: `{}`", name));
err.multipart_suggestion(
"try ignoring the field",
shorthands,
Applicability::MachineApplicable,
);
- } else {
+ err.emit()
+ },
+ );
+ } else {
+ let non_shorthands = non_shorthands
+ .into_iter()
+ .map(|(_, _, ident_span)| (ident_span, format!("_{}", name)))
+ .collect::<Vec<_>>();
+
+ self.ir.tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_VARIABLES,
+ first_hir_id,
+ hir_ids_and_spans
+ .iter()
+ .map(|(_, _, ident_span)| *ident_span)
+ .collect::<Vec<_>>(),
+ |lint| {
+ let mut err = lint.build(&format!("unused variable: `{}`", name));
err.multipart_suggestion(
"if this is intentional, prefix it with an underscore",
- non_shorthands
- .into_iter()
- .map(|(_, span)| (span, format!("_{}", name)))
- .collect::<Vec<_>>(),
+ non_shorthands,
Applicability::MachineApplicable,
);
- }
-
- err.emit()
- },
- );
+ err.emit()
+ },
+ );
+ }
}
}
}
diff --git a/compiler/rustc_passes/src/naked_functions.rs b/compiler/rustc_passes/src/naked_functions.rs
index 93fb23c0..89bc2e1 100644
--- a/compiler/rustc_passes/src/naked_functions.rs
+++ b/compiler/rustc_passes/src/naked_functions.rs
@@ -46,7 +46,7 @@
let fn_header;
match fk {
- FnKind::Closure(..) => {
+ FnKind::Closure => {
// Closures with a naked attribute are rejected during attribute
// check. Don't validate them any further.
return;
@@ -62,7 +62,8 @@
}
}
- let naked = fk.attrs().iter().any(|attr| attr.has_name(sym::naked));
+ let attrs = self.tcx.hir().attrs(hir_id);
+ let naked = attrs.iter().any(|attr| attr.has_name(sym::naked));
if naked {
let body = self.tcx.hir().body(body_id);
check_abi(self.tcx, hir_id, fn_header.abi, ident_span);
diff --git a/compiler/rustc_passes/src/reachable.rs b/compiler/rustc_passes/src/reachable.rs
index eb24c51..20aaaea 100644
--- a/compiler/rustc_passes/src/reachable.rs
+++ b/compiler/rustc_passes/src/reachable.rs
@@ -31,7 +31,7 @@
match item.kind {
hir::ItemKind::Fn(ref sig, ..) if sig.header.is_const() => true,
hir::ItemKind::Impl { .. } | hir::ItemKind::Fn(..) => {
- let generics = tcx.generics_of(tcx.hir().local_def_id(item.hir_id));
+ let generics = tcx.generics_of(item.def_id);
generics.requires_monomorphization(tcx)
}
_ => false,
@@ -43,8 +43,8 @@
impl_item: &hir::ImplItem<'_>,
impl_src: LocalDefId,
) -> bool {
- let codegen_fn_attrs = tcx.codegen_fn_attrs(impl_item.hir_id.owner.to_def_id());
- let generics = tcx.generics_of(tcx.hir().local_def_id(impl_item.hir_id));
+ let codegen_fn_attrs = tcx.codegen_fn_attrs(impl_item.hir_id().owner.to_def_id());
+ let generics = tcx.generics_of(impl_item.def_id);
if codegen_fn_attrs.requests_inline() || generics.requires_monomorphization(tcx) {
return true;
}
@@ -218,8 +218,7 @@
} else {
false
};
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
- let codegen_attrs = self.tcx.codegen_fn_attrs(def_id);
+ let codegen_attrs = self.tcx.codegen_fn_attrs(item.def_id);
let is_extern = codegen_attrs.contains_extern_indicator();
let std_internal =
codegen_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
@@ -239,9 +238,11 @@
Node::Item(item) => {
match item.kind {
hir::ItemKind::Fn(.., body) => {
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
- if item_might_be_inlined(self.tcx, &item, self.tcx.codegen_fn_attrs(def_id))
- {
+ if item_might_be_inlined(
+ self.tcx,
+ &item,
+ self.tcx.codegen_fn_attrs(item.def_id),
+ ) {
self.visit_nested_body(body);
}
}
@@ -341,23 +342,21 @@
// Anything which has custom linkage gets thrown on the worklist no
// matter where it is in the crate, along with "special std symbols"
// which are currently akin to allocator symbols.
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
- let codegen_attrs = self.tcx.codegen_fn_attrs(def_id);
+ let codegen_attrs = self.tcx.codegen_fn_attrs(item.def_id);
if codegen_attrs.contains_extern_indicator()
|| codegen_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL)
{
- self.worklist.push(def_id);
+ self.worklist.push(item.def_id);
}
// We need only trait impls here, not inherent impls, and only non-exported ones
if let hir::ItemKind::Impl(hir::Impl { of_trait: Some(ref trait_ref), ref items, .. }) =
item.kind
{
- if !self.access_levels.is_reachable(item.hir_id) {
+ if !self.access_levels.is_reachable(item.hir_id()) {
// FIXME(#53488) remove `let`
let tcx = self.tcx;
- self.worklist
- .extend(items.iter().map(|ii_ref| tcx.hir().local_def_id(ii_ref.id.hir_id)));
+ self.worklist.extend(items.iter().map(|ii_ref| ii_ref.id.def_id));
let trait_def_id = match trait_ref.path.res {
Res::Def(DefKind::Trait, def_id) => def_id,
diff --git a/compiler/rustc_passes/src/region.rs b/compiler/rustc_passes/src/region.rs
index 64356f7..b532021 100644
--- a/compiler/rustc_passes/src/region.rs
+++ b/compiler/rustc_passes/src/region.rs
@@ -664,7 +664,7 @@
match expr.kind {
hir::ExprKind::AddrOf(_, _, ref subexpr)
- | hir::ExprKind::Unary(hir::UnOp::UnDeref, ref subexpr)
+ | hir::ExprKind::Unary(hir::UnOp::Deref, ref subexpr)
| hir::ExprKind::Field(ref subexpr, _)
| hir::ExprKind::Index(ref subexpr, _) => {
expr = &subexpr;
diff --git a/compiler/rustc_passes/src/stability.rs b/compiler/rustc_passes/src/stability.rs
index e1d03e3..dd9cb51 100644
--- a/compiler/rustc_passes/src/stability.rs
+++ b/compiler/rustc_passes/src/stability.rs
@@ -9,7 +9,7 @@
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
-use rustc_hir::{Generics, HirId, Item, StructField, TraitRef, Ty, TyKind, Variant};
+use rustc_hir::{FieldDef, Generics, HirId, Item, TraitRef, Ty, TyKind, Variant};
use rustc_middle::hir::map::Map;
use rustc_middle::middle::privacy::AccessLevels;
use rustc_middle::middle::stability::{DeprecationEntry, Index};
@@ -70,6 +70,17 @@
}
}
+enum InheritStability {
+ Yes,
+ No,
+}
+
+impl InheritStability {
+ fn yes(&self) -> bool {
+ matches!(self, InheritStability::Yes)
+ }
+}
+
// A private tree-walker for producing an Index.
struct Annotator<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
@@ -86,15 +97,16 @@
fn annotate<F>(
&mut self,
hir_id: HirId,
- attrs: &[Attribute],
item_sp: Span,
kind: AnnotationKind,
inherit_deprecation: InheritDeprecation,
inherit_const_stability: InheritConstStability,
+ inherit_from_parent: InheritStability,
visit_children: F,
) where
F: FnOnce(&mut Self),
{
+ let attrs = self.tcx.hir().attrs(hir_id);
debug!("annotate(id = {:?}, attrs = {:?})", hir_id, attrs);
let mut did_error = false;
if !self.tcx.features().staged_api {
@@ -131,12 +143,13 @@
}
if self.tcx.features().staged_api {
- if let Some(..) = attrs.iter().find(|a| self.tcx.sess.check_name(a, sym::deprecated)) {
- self.tcx.sess.span_err(
- item_sp,
- "`#[deprecated]` cannot be used in staged API; \
- use `#[rustc_deprecated]` instead",
- );
+ if let Some(a) = attrs.iter().find(|a| self.tcx.sess.check_name(a, sym::deprecated)) {
+ self.tcx
+ .sess
+ .struct_span_err(a.span, "`#[deprecated]` cannot be used in staged API")
+ .span_label(a.span, "use `#[rustc_deprecated]` instead")
+ .span_label(item_sp, "")
+ .emit();
}
} else {
self.recurse_with_stability_attrs(
@@ -150,7 +163,7 @@
let (stab, const_stab) = attr::find_stability(&self.tcx.sess, attrs, item_sp);
- let const_stab = const_stab.map(|const_stab| {
+ let const_stab = const_stab.map(|(const_stab, _)| {
let const_stab = self.tcx.intern_const_stability(const_stab);
self.index.const_stab_map.insert(hir_id, const_stab);
const_stab
@@ -180,12 +193,15 @@
}
}
- let stab = stab.map(|stab| {
+ let stab = stab.map(|(stab, span)| {
// Error if prohibited, or can't inherit anything from a container.
if kind == AnnotationKind::Prohibited
|| (kind == AnnotationKind::Container && stab.level.is_stable() && is_deprecated)
{
- self.tcx.sess.span_err(item_sp, "This stability annotation is useless");
+ self.tcx.sess.struct_span_err(span,"this stability annotation is useless")
+ .span_label(span, "useless stability annotation")
+ .span_label(item_sp, "the stability attribute annotates this item")
+ .emit();
}
debug!("annotate: found {:?}", stab);
@@ -202,16 +218,19 @@
{
match stab_v.parse::<u64>() {
Err(_) => {
- self.tcx.sess.span_err(item_sp, "Invalid stability version found");
+ self.tcx.sess.struct_span_err(span, "invalid stability version found")
+ .span_label(span, "invalid stability version")
+ .span_label(item_sp, "the stability attribute annotates this item")
+ .emit();
break;
}
Ok(stab_vp) => match dep_v.parse::<u64>() {
Ok(dep_vp) => match dep_vp.cmp(&stab_vp) {
Ordering::Less => {
- self.tcx.sess.span_err(
- item_sp,
- "An API can't be stabilized after it is deprecated",
- );
+ self.tcx.sess.struct_span_err(span, "an API can't be stabilized after it is deprecated")
+ .span_label(span, "invalid version")
+ .span_label(item_sp, "the stability attribute annotates this item")
+ .emit();
break;
}
Ordering::Equal => continue,
@@ -219,9 +238,10 @@
},
Err(_) => {
if dep_v != "TBD" {
- self.tcx
- .sess
- .span_err(item_sp, "Invalid deprecation version found");
+ self.tcx.sess.struct_span_err(span, "invalid deprecation version found")
+ .span_label(span, "invalid deprecation version")
+ .span_label(item_sp, "the stability attribute annotates this item")
+ .emit();
}
break;
}
@@ -237,7 +257,9 @@
if stab.is_none() {
debug!("annotate: stab not found, parent = {:?}", self.parent_stab);
if let Some(stab) = self.parent_stab {
- if inherit_deprecation.yes() && stab.level.is_unstable() {
+ if inherit_deprecation.yes() && stab.level.is_unstable()
+ || inherit_from_parent.yes()
+ {
self.index.stab_map.insert(hir_id, stab);
}
}
@@ -363,11 +385,11 @@
if let Some(ctor_hir_id) = sd.ctor_hir_id() {
self.annotate(
ctor_hir_id,
- &i.attrs,
i.span,
AnnotationKind::Required,
InheritDeprecation::Yes,
InheritConstStability::No,
+ InheritStability::Yes,
|_| {},
)
}
@@ -376,12 +398,12 @@
}
self.annotate(
- i.hir_id,
- &i.attrs,
+ i.hir_id(),
i.span,
kind,
InheritDeprecation::Yes,
const_stab_inherit,
+ InheritStability::No,
|v| intravisit::walk_item(v, i),
);
self.in_trait_impl = orig_in_trait_impl;
@@ -389,12 +411,12 @@
fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem<'tcx>) {
self.annotate(
- ti.hir_id,
- &ti.attrs,
+ ti.hir_id(),
ti.span,
AnnotationKind::Required,
InheritDeprecation::Yes,
InheritConstStability::No,
+ InheritStability::No,
|v| {
intravisit::walk_trait_item(v, ti);
},
@@ -405,12 +427,12 @@
let kind =
if self.in_trait_impl { AnnotationKind::Prohibited } else { AnnotationKind::Required };
self.annotate(
- ii.hir_id,
- &ii.attrs,
+ ii.hir_id(),
ii.span,
kind,
InheritDeprecation::Yes,
InheritConstStability::No,
+ InheritStability::No,
|v| {
intravisit::walk_impl_item(v, ii);
},
@@ -420,20 +442,20 @@
fn visit_variant(&mut self, var: &'tcx Variant<'tcx>, g: &'tcx Generics<'tcx>, item_id: HirId) {
self.annotate(
var.id,
- &var.attrs,
var.span,
AnnotationKind::Required,
InheritDeprecation::Yes,
InheritConstStability::No,
+ InheritStability::Yes,
|v| {
if let Some(ctor_hir_id) = var.data.ctor_hir_id() {
v.annotate(
ctor_hir_id,
- &var.attrs,
var.span,
AnnotationKind::Required,
InheritDeprecation::Yes,
InheritConstStability::No,
+ InheritStability::No,
|_| {},
);
}
@@ -443,28 +465,28 @@
)
}
- fn visit_struct_field(&mut self, s: &'tcx StructField<'tcx>) {
+ fn visit_field_def(&mut self, s: &'tcx FieldDef<'tcx>) {
self.annotate(
s.hir_id,
- &s.attrs,
s.span,
AnnotationKind::Required,
InheritDeprecation::Yes,
InheritConstStability::No,
+ InheritStability::Yes,
|v| {
- intravisit::walk_struct_field(v, s);
+ intravisit::walk_field_def(v, s);
},
);
}
fn visit_foreign_item(&mut self, i: &'tcx hir::ForeignItem<'tcx>) {
self.annotate(
- i.hir_id,
- &i.attrs,
+ i.hir_id(),
i.span,
AnnotationKind::Required,
InheritDeprecation::Yes,
InheritConstStability::No,
+ InheritStability::No,
|v| {
intravisit::walk_foreign_item(v, i);
},
@@ -473,12 +495,12 @@
fn visit_macro_def(&mut self, md: &'tcx hir::MacroDef<'tcx>) {
self.annotate(
- md.hir_id,
- &md.attrs,
+ md.hir_id(),
md.span,
AnnotationKind::Required,
InheritDeprecation::Yes,
InheritConstStability::No,
+ InheritStability::No,
|_| {},
);
}
@@ -494,11 +516,11 @@
self.annotate(
p.hir_id,
- &p.attrs,
p.span,
kind,
InheritDeprecation::No,
InheritConstStability::No,
+ InheritStability::No,
|v| {
intravisit::walk_generic_param(v, p);
},
@@ -556,7 +578,7 @@
hir::ItemKind::Impl(hir::Impl { of_trait: None, .. })
| hir::ItemKind::ForeignMod { .. }
) {
- self.check_missing_stability(i.hir_id, i.span);
+ self.check_missing_stability(i.hir_id(), i.span);
}
// Ensure `const fn` that are `stable` have one of `rustc_const_unstable` or
@@ -564,21 +586,21 @@
if self.tcx.features().staged_api
&& matches!(&i.kind, hir::ItemKind::Fn(sig, ..) if sig.header.is_const())
{
- self.check_missing_const_stability(i.hir_id, i.span);
+ self.check_missing_const_stability(i.hir_id(), i.span);
}
intravisit::walk_item(self, i)
}
fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem<'tcx>) {
- self.check_missing_stability(ti.hir_id, ti.span);
+ self.check_missing_stability(ti.hir_id(), ti.span);
intravisit::walk_trait_item(self, ti);
}
fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem<'tcx>) {
- let impl_def_id = self.tcx.hir().local_def_id(self.tcx.hir().get_parent_item(ii.hir_id));
+ let impl_def_id = self.tcx.hir().local_def_id(self.tcx.hir().get_parent_item(ii.hir_id()));
if self.tcx.impl_trait_ref(impl_def_id).is_none() {
- self.check_missing_stability(ii.hir_id, ii.span);
+ self.check_missing_stability(ii.hir_id(), ii.span);
}
intravisit::walk_impl_item(self, ii);
}
@@ -588,18 +610,18 @@
intravisit::walk_variant(self, var, g, item_id);
}
- fn visit_struct_field(&mut self, s: &'tcx StructField<'tcx>) {
+ fn visit_field_def(&mut self, s: &'tcx FieldDef<'tcx>) {
self.check_missing_stability(s.hir_id, s.span);
- intravisit::walk_struct_field(self, s);
+ intravisit::walk_field_def(self, s);
}
fn visit_foreign_item(&mut self, i: &'tcx hir::ForeignItem<'tcx>) {
- self.check_missing_stability(i.hir_id, i.span);
+ self.check_missing_stability(i.hir_id(), i.span);
intravisit::walk_foreign_item(self, i);
}
fn visit_macro_def(&mut self, md: &'tcx hir::MacroDef<'tcx>) {
- self.check_missing_stability(md.hir_id, md.span);
+ self.check_missing_stability(md.hir_id(), md.span);
}
// Note that we don't need to `check_missing_stability` for default generic parameters,
@@ -664,11 +686,11 @@
annotator.annotate(
hir::CRATE_HIR_ID,
- &krate.item.attrs,
krate.item.span,
AnnotationKind::Required,
InheritDeprecation::Yes,
InheritConstStability::No,
+ InheritStability::No,
|v| intravisit::walk_crate(v, krate),
);
}
@@ -712,13 +734,12 @@
return;
}
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
- let cnum = match self.tcx.extern_mod_stmt_cnum(def_id) {
+ let cnum = match self.tcx.extern_mod_stmt_cnum(item.def_id) {
Some(cnum) => cnum,
None => return,
};
let def_id = DefId { krate: cnum, index: CRATE_DEF_INDEX };
- self.tcx.check_stability(def_id, Some(item.hir_id), item.span);
+ self.tcx.check_stability(def_id, Some(item.hir_id()), item.span);
}
// For implementations of traits, check the stability of each item
@@ -730,21 +751,17 @@
// error if all involved types and traits are stable, because
// it will have no effect.
// See: https://github.com/rust-lang/rust/issues/55436
- if let (Some(Stability { level: attr::Unstable { .. }, .. }), _) =
- attr::find_stability(&self.tcx.sess, &item.attrs, item.span)
+ let attrs = self.tcx.hir().attrs(item.hir_id());
+ if let (Some((Stability { level: attr::Unstable { .. }, .. }, span)), _) =
+ attr::find_stability(&self.tcx.sess, attrs, item.span)
{
let mut c = CheckTraitImplStable { tcx: self.tcx, fully_stable: true };
c.visit_ty(self_ty);
c.visit_trait_ref(t);
if c.fully_stable {
- let span = item
- .attrs
- .iter()
- .find(|a| a.has_name(sym::unstable))
- .map_or(item.span, |a| a.span);
self.tcx.struct_span_lint_hir(
INEFFECTIVE_UNSTABLE_TRAIT_IMPL,
- item.hir_id,
+ item.hir_id(),
span,
|lint| lint
.build("an `#[unstable]` annotation here has no effect")
@@ -775,15 +792,14 @@
// There's no good place to insert stability check for non-Copy unions,
// so semi-randomly perform it here in stability.rs
hir::ItemKind::Union(..) if !self.tcx.features().untagged_unions => {
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
- let ty = self.tcx.type_of(def_id);
+ let ty = self.tcx.type_of(item.def_id);
let (adt_def, substs) = match ty.kind() {
ty::Adt(adt_def, substs) => (adt_def, substs),
_ => bug!(),
};
// Non-`Copy` fields are unstable, except for `ManuallyDrop`.
- let param_env = self.tcx.param_env(def_id);
+ let param_env = self.tcx.param_env(item.def_id);
for field in &adt_def.non_enum_variant().fields {
let field_ty = field.ty(self.tcx, substs);
if !field_ty.ty_adt_def().map_or(false, |adt_def| adt_def.is_manually_drop())
diff --git a/compiler/rustc_passes/src/weak_lang_items.rs b/compiler/rustc_passes/src/weak_lang_items.rs
index daff94c..de369ba 100644
--- a/compiler/rustc_passes/src/weak_lang_items.rs
+++ b/compiler/rustc_passes/src/weak_lang_items.rs
@@ -97,7 +97,8 @@
fn visit_foreign_item(&mut self, i: &hir::ForeignItem<'_>) {
let check_name = |attr, sym| self.tcx.sess.check_name(attr, sym);
- if let Some((lang_item, _)) = lang_items::extract(check_name, &i.attrs) {
+ let attrs = self.tcx.hir().attrs(i.hir_id());
+ if let Some((lang_item, _)) = lang_items::extract(check_name, attrs) {
self.register(lang_item, i.span);
}
intravisit::walk_foreign_item(self, i)
diff --git a/compiler/rustc_plugin_impl/src/build.rs b/compiler/rustc_plugin_impl/src/build.rs
index 4796d9a..a49afa3 100644
--- a/compiler/rustc_plugin_impl/src/build.rs
+++ b/compiler/rustc_plugin_impl/src/build.rs
@@ -1,7 +1,7 @@
//! Used by `rustc` when compiling a plugin crate.
use rustc_hir as hir;
-use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
use rustc_hir::itemlikevisit::ItemLikeVisitor;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
@@ -10,14 +10,15 @@
struct RegistrarFinder<'tcx> {
tcx: TyCtxt<'tcx>,
- registrars: Vec<(hir::HirId, Span)>,
+ registrars: Vec<(LocalDefId, Span)>,
}
impl<'v, 'tcx> ItemLikeVisitor<'v> for RegistrarFinder<'tcx> {
fn visit_item(&mut self, item: &hir::Item<'_>) {
if let hir::ItemKind::Fn(..) = item.kind {
- if self.tcx.sess.contains_name(&item.attrs, sym::plugin_registrar) {
- self.registrars.push((item.hir_id, item.span));
+ let attrs = self.tcx.hir().attrs(item.hir_id());
+ if self.tcx.sess.contains_name(attrs, sym::plugin_registrar) {
+ self.registrars.push((item.def_id, item.span));
}
}
}
@@ -43,8 +44,8 @@
match finder.registrars.len() {
0 => None,
1 => {
- let (hir_id, _) = finder.registrars.pop().unwrap();
- Some(tcx.hir().local_def_id(hir_id).to_def_id())
+ let (def_id, _) = finder.registrars.pop().unwrap();
+ Some(def_id.to_def_id())
}
_ => {
let diagnostic = tcx.sess.diagnostic();
diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs
index 631dcb6..84240f8 100644
--- a/compiler/rustc_privacy/src/lib.rs
+++ b/compiler/rustc_privacy/src/lib.rs
@@ -77,6 +77,12 @@
fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> ControlFlow<Self::BreakTy> {
self.skeleton().visit_trait(trait_ref)
}
+ fn visit_projection_ty(
+ &mut self,
+ projection: ty::ProjectionTy<'tcx>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.skeleton().visit_projection_ty(projection)
+ }
fn visit_predicates(
&mut self,
predicates: ty::GenericPredicates<'tcx>,
@@ -101,6 +107,20 @@
if self.def_id_visitor.shallow() { ControlFlow::CONTINUE } else { substs.visit_with(self) }
}
+ fn visit_projection_ty(
+ &mut self,
+ projection: ty::ProjectionTy<'tcx>,
+ ) -> ControlFlow<V::BreakTy> {
+ let (trait_ref, assoc_substs) =
+ projection.trait_ref_and_own_substs(self.def_id_visitor.tcx());
+ self.visit_trait(trait_ref)?;
+ if self.def_id_visitor.shallow() {
+ ControlFlow::CONTINUE
+ } else {
+ assoc_substs.iter().try_for_each(|subst| subst.visit_with(self))
+ }
+ }
+
fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<V::BreakTy> {
match predicate.kind().skip_binder() {
ty::PredicateKind::Trait(ty::TraitPredicate { trait_ref }, _) => {
@@ -108,7 +128,7 @@
}
ty::PredicateKind::Projection(ty::ProjectionPredicate { projection_ty, ty }) => {
ty.visit_with(self)?;
- self.visit_trait(projection_ty.trait_ref(self.def_id_visitor.tcx()))
+ self.visit_projection_ty(projection_ty)
}
ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty, _region)) => {
ty.visit_with(self)
@@ -197,7 +217,7 @@
return ControlFlow::CONTINUE;
}
// This will also visit substs if necessary, so we don't need to recurse.
- return self.visit_trait(proj.trait_ref(tcx));
+ return self.visit_projection_ty(proj);
}
ty::Dynamic(predicates, ..) => {
// All traits in the list are considered the "primary" part of the type
@@ -454,11 +474,9 @@
let module_def_id = self.tcx.hir().local_def_id(reachable_mod);
let module = self.tcx.hir().get_module(module_def_id).0;
for item_id in module.item_ids {
- let hir_id = item_id.id;
- let item_def_id = self.tcx.hir().local_def_id(hir_id);
- let def_kind = self.tcx.def_kind(item_def_id);
- let vis = self.tcx.visibility(item_def_id);
- self.update_macro_reachable_def(hir_id, def_kind, vis, defining_mod);
+ let def_kind = self.tcx.def_kind(item_id.def_id);
+ let vis = self.tcx.visibility(item_id.def_id);
+ self.update_macro_reachable_def(item_id.hir_id(), def_kind, vis, defining_mod);
}
if let Some(exports) = self.tcx.module_exports(module_def_id) {
for export in exports {
@@ -588,14 +606,17 @@
.map(|module_hir_id| self.tcx.hir().expect_item(module_hir_id))
{
if let hir::ItemKind::Mod(m) = &item.kind {
- for item_id in m.item_ids {
- let item = self.tcx.hir().expect_item(item_id.id);
- let def_id = self.tcx.hir().local_def_id(item_id.id);
- if !self.tcx.hygienic_eq(segment.ident, item.ident, def_id.to_def_id()) {
+ for &item_id in m.item_ids {
+ let item = self.tcx.hir().item(item_id);
+ if !self.tcx.hygienic_eq(
+ segment.ident,
+ item.ident,
+ item_id.def_id.to_def_id(),
+ ) {
continue;
}
if let hir::ItemKind::Use(..) = item.kind {
- self.update(item.hir_id, Some(AccessLevel::Exported));
+ self.update(item.hir_id(), Some(AccessLevel::Exported));
}
}
}
@@ -616,7 +637,7 @@
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let inherited_item_level = match item.kind {
hir::ItemKind::Impl { .. } => {
- Option::<AccessLevel>::of_impl(item.hir_id, self.tcx, &self.access_levels)
+ Option::<AccessLevel>::of_impl(item.hir_id(), self.tcx, &self.access_levels)
}
// Foreign modules inherit level from parents.
hir::ItemKind::ForeignMod { .. } => self.prev_level,
@@ -644,7 +665,7 @@
};
// Update level of the item itself.
- let item_level = self.update(item.hir_id, inherited_item_level);
+ let item_level = self.update(item.hir_id(), inherited_item_level);
// Update levels of nested things.
match item.kind {
@@ -662,13 +683,13 @@
hir::ItemKind::Impl(ref impl_) => {
for impl_item_ref in impl_.items {
if impl_.of_trait.is_some() || impl_item_ref.vis.node.is_pub() {
- self.update(impl_item_ref.id.hir_id, item_level);
+ self.update(impl_item_ref.id.hir_id(), item_level);
}
}
}
hir::ItemKind::Trait(.., trait_item_refs) => {
for trait_item_ref in trait_item_refs {
- self.update(trait_item_ref.id.hir_id, item_level);
+ self.update(trait_item_ref.id.hir_id(), item_level);
}
}
hir::ItemKind::Struct(ref def, _) | hir::ItemKind::Union(ref def, _) => {
@@ -684,7 +705,7 @@
hir::ItemKind::ForeignMod { items, .. } => {
for foreign_item in items {
if foreign_item.vis.node.is_pub() {
- self.update(foreign_item.id.hir_id, item_level);
+ self.update(foreign_item.id.hir_id(), item_level);
}
}
}
@@ -727,7 +748,7 @@
// reachable if they are returned via `impl Trait`, even from private functions.
let exist_level =
cmp::max(item_level, Some(AccessLevel::ReachableFromImplTrait));
- self.reach(item.hir_id, exist_level).generics().predicates().ty();
+ self.reach(item.hir_id(), exist_level).generics().predicates().ty();
}
}
// Visit everything.
@@ -736,15 +757,15 @@
| hir::ItemKind::Fn(..)
| hir::ItemKind::TyAlias(..) => {
if item_level.is_some() {
- self.reach(item.hir_id, item_level).generics().predicates().ty();
+ self.reach(item.hir_id(), item_level).generics().predicates().ty();
}
}
hir::ItemKind::Trait(.., trait_item_refs) => {
if item_level.is_some() {
- self.reach(item.hir_id, item_level).generics().predicates();
+ self.reach(item.hir_id(), item_level).generics().predicates();
for trait_item_ref in trait_item_refs {
- let mut reach = self.reach(trait_item_ref.id.hir_id, item_level);
+ let mut reach = self.reach(trait_item_ref.id.hir_id(), item_level);
reach.generics().predicates();
if trait_item_ref.kind == AssocItemKind::Type
@@ -759,18 +780,18 @@
}
hir::ItemKind::TraitAlias(..) => {
if item_level.is_some() {
- self.reach(item.hir_id, item_level).generics().predicates();
+ self.reach(item.hir_id(), item_level).generics().predicates();
}
}
// Visit everything except for private impl items.
hir::ItemKind::Impl(ref impl_) => {
if item_level.is_some() {
- self.reach(item.hir_id, item_level).generics().predicates().ty().trait_ref();
+ self.reach(item.hir_id(), item_level).generics().predicates().ty().trait_ref();
for impl_item_ref in impl_.items {
- let impl_item_level = self.get(impl_item_ref.id.hir_id);
+ let impl_item_level = self.get(impl_item_ref.id.hir_id());
if impl_item_level.is_some() {
- self.reach(impl_item_ref.id.hir_id, impl_item_level)
+ self.reach(impl_item_ref.id.hir_id(), impl_item_level)
.generics()
.predicates()
.ty();
@@ -782,7 +803,7 @@
// Visit everything, but enum variants have their own levels.
hir::ItemKind::Enum(ref def, _) => {
if item_level.is_some() {
- self.reach(item.hir_id, item_level).generics().predicates();
+ self.reach(item.hir_id(), item_level).generics().predicates();
}
for variant in def.variants {
let variant_level = self.get(variant.id);
@@ -792,16 +813,16 @@
}
// Corner case: if the variant is reachable, but its
// enum is not, make the enum reachable as well.
- self.update(item.hir_id, variant_level);
+ self.update(item.hir_id(), variant_level);
}
}
}
// Visit everything, but foreign items have their own levels.
hir::ItemKind::ForeignMod { items, .. } => {
for foreign_item in items {
- let foreign_item_level = self.get(foreign_item.id.hir_id);
+ let foreign_item_level = self.get(foreign_item.id.hir_id());
if foreign_item_level.is_some() {
- self.reach(foreign_item.id.hir_id, foreign_item_level)
+ self.reach(foreign_item.id.hir_id(), foreign_item_level)
.generics()
.predicates()
.ty();
@@ -811,7 +832,7 @@
// Visit everything except for private fields.
hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
if item_level.is_some() {
- self.reach(item.hir_id, item_level).generics().predicates();
+ self.reach(item.hir_id(), item_level).generics().predicates();
for field in struct_def.fields() {
let field_level = self.get(field.hir_id);
if field_level.is_some() {
@@ -860,20 +881,19 @@
fn visit_macro_def(&mut self, md: &'tcx hir::MacroDef<'tcx>) {
// Non-opaque macros cannot make other items more accessible than they already are.
- if attr::find_transparency(&self.tcx.sess, &md.attrs, md.ast.macro_rules).0
+ let attrs = self.tcx.hir().attrs(md.hir_id());
+ if attr::find_transparency(&self.tcx.sess, &attrs, md.ast.macro_rules).0
!= Transparency::Opaque
{
// `#[macro_export]`-ed `macro_rules!` are `Public` since they
// ignore their containing path to always appear at the crate root.
if md.ast.macro_rules {
- self.update(md.hir_id, Some(AccessLevel::Public));
+ self.update(md.hir_id(), Some(AccessLevel::Public));
}
return;
}
- let macro_module_def_id =
- ty::DefIdTree::parent(self.tcx, self.tcx.hir().local_def_id(md.hir_id).to_def_id())
- .unwrap();
+ let macro_module_def_id = ty::DefIdTree::parent(self.tcx, md.def_id.to_def_id()).unwrap();
let hir_id = macro_module_def_id
.as_local()
.map(|def_id| self.tcx.hir().local_def_id_to_hir_id(def_id));
@@ -883,7 +903,7 @@
_ => return,
};
let level = if md.vis.node.is_pub() { self.get(module_id) } else { None };
- let new_level = self.update(md.hir_id, level);
+ let new_level = self.update(md.hir_id(), level);
if new_level.is_none() {
return;
}
@@ -1037,7 +1057,7 @@
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- let orig_current_item = self.current_item.replace(item.hir_id);
+ let orig_current_item = self.current_item.replace(item.hir_id());
intravisit::walk_item(self, item);
self.current_item = orig_current_item;
}
@@ -1204,10 +1224,9 @@
}
for (poly_predicate, _) in bounds.projection_bounds {
- let tcx = self.tcx;
if self.visit(poly_predicate.skip_binder().ty).is_break()
|| self
- .visit_trait(poly_predicate.skip_binder().projection_ty.trait_ref(tcx))
+ .visit_projection_ty(poly_predicate.skip_binder().projection_ty)
.is_break()
{
return;
@@ -1322,8 +1341,7 @@
// Check types in item interfaces.
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- let orig_current_item =
- mem::replace(&mut self.current_item, self.tcx.hir().local_def_id(item.hir_id));
+ let orig_current_item = mem::replace(&mut self.current_item, item.def_id);
let old_maybe_typeck_results = self.maybe_typeck_results.take();
intravisit::walk_item(self, item);
self.maybe_typeck_results = old_maybe_typeck_results;
@@ -1463,7 +1481,7 @@
hir::ItemKind::ForeignMod { .. } => {}
hir::ItemKind::Trait(.., ref bounds, _) => {
- if !self.trait_is_public(item.hir_id) {
+ if !self.trait_is_public(item.hir_id()) {
return;
}
@@ -1526,7 +1544,7 @@
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
match impl_item.kind {
hir::ImplItemKind::Const(..) | hir::ImplItemKind::Fn(..) => {
- self.access_levels.is_reachable(impl_item_ref.id.hir_id)
+ self.access_levels.is_reachable(impl_item_ref.id.hir_id())
}
hir::ImplItemKind::TyAlias(_) => false,
}
@@ -1546,8 +1564,10 @@
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
match impl_item.kind {
hir::ImplItemKind::Const(..) | hir::ImplItemKind::Fn(..)
- if self
- .item_is_public(&impl_item.hir_id, &impl_item.vis) =>
+ if self.item_is_public(
+ &impl_item.hir_id(),
+ &impl_item.vis,
+ ) =>
{
intravisit::walk_impl_item(self, impl_item)
}
@@ -1588,7 +1608,7 @@
// methods will be visible as `Public::foo`.
let mut found_pub_static = false;
for impl_item_ref in impl_.items {
- if self.item_is_public(&impl_item_ref.id.hir_id, &impl_item_ref.vis) {
+ if self.item_is_public(&impl_item_ref.id.hir_id(), &impl_item_ref.vis) {
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
match impl_item_ref.kind {
AssocItemKind::Const => {
@@ -1615,7 +1635,7 @@
hir::ItemKind::TyAlias(..) => return,
// Not at all public, so we don't care.
- _ if !self.item_is_public(&item.hir_id, &item.vis) => {
+ _ if !self.item_is_public(&item.hir_id(), &item.vis) => {
return;
}
@@ -1651,7 +1671,7 @@
}
fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
- if self.access_levels.is_reachable(item.hir_id) {
+ if self.access_levels.is_reachable(item.hir_id()) {
intravisit::walk_foreign_item(self, item)
}
}
@@ -1678,9 +1698,9 @@
}
}
- fn visit_struct_field(&mut self, s: &'tcx hir::StructField<'tcx>) {
+ fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
if s.vis.node.is_pub() || self.in_variant {
- intravisit::walk_struct_field(self, s);
+ intravisit::walk_field_def(self, s);
}
}
@@ -1849,41 +1869,18 @@
}
}
-struct PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> {
+struct PrivateItemsInPublicInterfacesVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
has_pub_restricted: bool,
- old_error_set: &'a HirIdSet,
+ old_error_set_ancestry: HirIdSet,
}
-impl<'a, 'tcx> PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> {
+impl<'tcx> PrivateItemsInPublicInterfacesVisitor<'tcx> {
fn check(
&self,
item_id: hir::HirId,
required_visibility: ty::Visibility,
) -> SearchInterfaceForPrivateItemsVisitor<'tcx> {
- let mut has_old_errors = false;
-
- // Slow path taken only if there any errors in the crate.
- for &id in self.old_error_set {
- // Walk up the nodes until we find `item_id` (or we hit a root).
- let mut id = id;
- loop {
- if id == item_id {
- has_old_errors = true;
- break;
- }
- let parent = self.tcx.hir().get_parent_node(id);
- if parent == id {
- break;
- }
- id = parent;
- }
-
- if has_old_errors {
- break;
- }
- }
-
SearchInterfaceForPrivateItemsVisitor {
tcx: self.tcx,
item_id,
@@ -1891,7 +1888,7 @@
span: self.tcx.hir().span(item_id),
required_visibility,
has_pub_restricted: self.has_pub_restricted,
- has_old_errors,
+ has_old_errors: self.old_error_set_ancestry.contains(&item_id),
in_assoc_ty: false,
}
}
@@ -1917,7 +1914,7 @@
}
}
-impl<'a, 'tcx> Visitor<'tcx> for PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> {
+impl<'tcx> Visitor<'tcx> for PrivateItemsInPublicInterfacesVisitor<'tcx> {
type Map = Map<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
@@ -1926,7 +1923,7 @@
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let tcx = self.tcx;
- let item_visibility = tcx.visibility(tcx.hir().local_def_id(item.hir_id).to_def_id());
+ let item_visibility = tcx.visibility(item.def_id);
match item.kind {
// Crates are always public.
@@ -1942,34 +1939,34 @@
| hir::ItemKind::Static(..)
| hir::ItemKind::Fn(..)
| hir::ItemKind::TyAlias(..) => {
- self.check(item.hir_id, item_visibility).generics().predicates().ty();
+ self.check(item.hir_id(), item_visibility).generics().predicates().ty();
}
hir::ItemKind::OpaqueTy(..) => {
// `ty()` for opaque types is the underlying type,
// it's not a part of interface, so we skip it.
- self.check(item.hir_id, item_visibility).generics().bounds();
+ self.check(item.hir_id(), item_visibility).generics().bounds();
}
hir::ItemKind::Trait(.., trait_item_refs) => {
- self.check(item.hir_id, item_visibility).generics().predicates();
+ self.check(item.hir_id(), item_visibility).generics().predicates();
for trait_item_ref in trait_item_refs {
self.check_assoc_item(
- trait_item_ref.id.hir_id,
+ trait_item_ref.id.hir_id(),
trait_item_ref.kind,
trait_item_ref.defaultness,
item_visibility,
);
if let AssocItemKind::Type = trait_item_ref.kind {
- self.check(trait_item_ref.id.hir_id, item_visibility).bounds();
+ self.check(trait_item_ref.id.hir_id(), item_visibility).bounds();
}
}
}
hir::ItemKind::TraitAlias(..) => {
- self.check(item.hir_id, item_visibility).generics().predicates();
+ self.check(item.hir_id(), item_visibility).generics().predicates();
}
hir::ItemKind::Enum(ref def, _) => {
- self.check(item.hir_id, item_visibility).generics().predicates();
+ self.check(item.hir_id(), item_visibility).generics().predicates();
for variant in def.variants {
for field in variant.data.fields() {
@@ -1980,13 +1977,13 @@
// Subitems of foreign modules have their own publicity.
hir::ItemKind::ForeignMod { items, .. } => {
for foreign_item in items {
- let vis = tcx.visibility(tcx.hir().local_def_id(foreign_item.id.hir_id));
- self.check(foreign_item.id.hir_id, vis).generics().predicates().ty();
+ let vis = tcx.visibility(foreign_item.id.def_id);
+ self.check(foreign_item.id.hir_id(), vis).generics().predicates().ty();
}
}
// Subitems of structs and unions have their own publicity.
hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
- self.check(item.hir_id, item_visibility).generics().predicates();
+ self.check(item.hir_id(), item_visibility).generics().predicates();
for field in struct_def.fields() {
let field_visibility = tcx.visibility(tcx.hir().local_def_id(field.hir_id));
@@ -1998,20 +1995,16 @@
// A trait impl is public when both its type and its trait are public
// Subitems of trait impls have inherited publicity.
hir::ItemKind::Impl(ref impl_) => {
- let impl_vis = ty::Visibility::of_impl(item.hir_id, tcx, &Default::default());
- self.check(item.hir_id, impl_vis).generics().predicates();
+ let impl_vis = ty::Visibility::of_impl(item.hir_id(), tcx, &Default::default());
+ self.check(item.hir_id(), impl_vis).generics().predicates();
for impl_item_ref in impl_.items {
let impl_item_vis = if impl_.of_trait.is_none() {
- min(
- tcx.visibility(tcx.hir().local_def_id(impl_item_ref.id.hir_id)),
- impl_vis,
- tcx,
- )
+ min(tcx.visibility(impl_item_ref.id.def_id), impl_vis, tcx)
} else {
impl_vis
};
self.check_assoc_item(
- impl_item_ref.id.hir_id,
+ impl_item_ref.id.hir_id(),
impl_item_ref.kind,
impl_item_ref.defaultness,
impl_item_vis,
@@ -2141,11 +2134,22 @@
pub_restricted_visitor.has_pub_restricted
};
+ let mut old_error_set_ancestry = HirIdSet::default();
+ for mut id in visitor.old_error_set.iter().copied() {
+ loop {
+ if !old_error_set_ancestry.insert(id) {
+ break;
+ }
+ let parent = tcx.hir().get_parent_node(id);
+ if parent == id {
+ break;
+ }
+ id = parent;
+ }
+ }
+
// Check for private types and traits in public interfaces.
- let mut visitor = PrivateItemsInPublicInterfacesVisitor {
- tcx,
- has_pub_restricted,
- old_error_set: &visitor.old_error_set,
- };
+ let mut visitor =
+ PrivateItemsInPublicInterfacesVisitor { tcx, has_pub_restricted, old_error_set_ancestry };
krate.visit_all_item_likes(&mut DeepVisitor::new(&mut visitor));
}
diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml
new file mode 100644
index 0000000..383e30c
--- /dev/null
+++ b/compiler/rustc_query_impl/Cargo.toml
@@ -0,0 +1,27 @@
+[package]
+authors = ["The Rust Project Developers"]
+name = "rustc_query_impl"
+version = "0.0.0"
+edition = "2018"
+
+[lib]
+doctest = false
+
+[dependencies]
+measureme = "9.0.0"
+rustc-rayon-core = "0.3.1"
+tracing = "0.1"
+rustc_ast = { path = "../rustc_ast" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_span = { path = "../rustc_span" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_session = { path = "../rustc_session" }
+rustc_target = { path = "../rustc_target" }
diff --git a/compiler/rustc_middle/src/ty/query/README.md b/compiler/rustc_query_impl/src/README.md
similarity index 100%
rename from compiler/rustc_middle/src/ty/query/README.md
rename to compiler/rustc_query_impl/src/README.md
diff --git a/compiler/rustc_middle/src/ty/query/keys.rs b/compiler/rustc_query_impl/src/keys.rs
similarity index 79%
rename from compiler/rustc_middle/src/ty/query/keys.rs
rename to compiler/rustc_query_impl/src/keys.rs
index bfa1581..e467f41 100644
--- a/compiler/rustc_middle/src/ty/query/keys.rs
+++ b/compiler/rustc_query_impl/src/keys.rs
@@ -1,20 +1,17 @@
//! Defines the set of legal keys that can be used in queries.
-use crate::infer::canonical::Canonical;
-use crate::mir;
-use crate::ty::fast_reject::SimplifiedType;
-use crate::ty::subst::{GenericArg, SubstsRef};
-use crate::ty::{self, Ty, TyCtxt};
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
-use rustc_query_system::query::DefaultCacheSelector;
-use rustc_span::symbol::Symbol;
+use rustc_middle::infer::canonical::Canonical;
+use rustc_middle::mir;
+use rustc_middle::ty::fast_reject::SimplifiedType;
+use rustc_middle::ty::subst::{GenericArg, SubstsRef};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::symbol::{Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
/// The `Key` trait controls what types can legally be used as the key
/// for a query.
pub trait Key {
- type CacheSelector;
-
/// Given an instance of this key, what crate is it referring to?
/// This is used to find the provider.
fn query_crate(&self) -> CrateNum;
@@ -25,8 +22,6 @@
}
impl<'tcx> Key for ty::InstanceDef<'tcx> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -37,8 +32,6 @@
}
impl<'tcx> Key for ty::Instance<'tcx> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -49,8 +42,6 @@
}
impl<'tcx> Key for mir::interpret::GlobalId<'tcx> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
self.instance.query_crate()
}
@@ -61,8 +52,6 @@
}
impl<'tcx> Key for mir::interpret::LitToConstInput<'tcx> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -73,8 +62,6 @@
}
impl Key for CrateNum {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
*self
}
@@ -84,8 +71,6 @@
}
impl Key for LocalDefId {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
self.to_def_id().query_crate()
}
@@ -95,8 +80,6 @@
}
impl Key for DefId {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
self.krate
}
@@ -106,8 +89,6 @@
}
impl Key for ty::WithOptConstParam<LocalDefId> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
self.did.query_crate()
}
@@ -117,8 +98,6 @@
}
impl Key for (DefId, DefId) {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
self.0.krate
}
@@ -128,8 +107,6 @@
}
impl Key for (ty::Instance<'tcx>, LocalDefId) {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
self.0.query_crate()
}
@@ -139,8 +116,6 @@
}
impl Key for (DefId, LocalDefId) {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
self.0.krate
}
@@ -150,8 +125,6 @@
}
impl Key for (LocalDefId, DefId) {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -160,9 +133,25 @@
}
}
-impl Key for (CrateNum, DefId) {
- type CacheSelector = DefaultCacheSelector;
+impl Key for (DefId, Option<Ident>) {
+ fn query_crate(&self) -> CrateNum {
+ self.0.krate
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.def_span(self.0)
+ }
+}
+impl Key for (DefId, LocalDefId, Ident) {
+ fn query_crate(&self) -> CrateNum {
+ self.0.krate
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.1.default_span(tcx)
+ }
+}
+
+impl Key for (CrateNum, DefId) {
fn query_crate(&self) -> CrateNum {
self.0
}
@@ -172,8 +161,6 @@
}
impl Key for (DefId, SimplifiedType) {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
self.0.krate
}
@@ -183,8 +170,6 @@
}
impl<'tcx> Key for SubstsRef<'tcx> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -194,8 +179,6 @@
}
impl<'tcx> Key for (DefId, SubstsRef<'tcx>) {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
self.0.krate
}
@@ -210,8 +193,6 @@
(ty::WithOptConstParam<DefId>, SubstsRef<'tcx>),
)
{
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
(self.0).0.did.krate
}
@@ -221,8 +202,6 @@
}
impl<'tcx> Key for (LocalDefId, DefId, SubstsRef<'tcx>) {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -232,8 +211,6 @@
}
impl<'tcx> Key for (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
self.1.def_id().krate
}
@@ -243,8 +220,15 @@
}
impl<'tcx> Key for (&'tcx ty::Const<'tcx>, mir::Field) {
- type CacheSelector = DefaultCacheSelector;
+ fn query_crate(&self) -> CrateNum {
+ LOCAL_CRATE
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+impl<'tcx> Key for mir::interpret::ConstAlloc<'tcx> {
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -254,8 +238,6 @@
}
impl<'tcx> Key for ty::PolyTraitRef<'tcx> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
self.def_id().krate
}
@@ -265,8 +247,6 @@
}
impl<'tcx> Key for GenericArg<'tcx> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -276,8 +256,6 @@
}
impl<'tcx> Key for &'tcx ty::Const<'tcx> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -287,8 +265,6 @@
}
impl<'tcx> Key for Ty<'tcx> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -298,8 +274,6 @@
}
impl<'tcx> Key for &'tcx ty::List<ty::Predicate<'tcx>> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -309,8 +283,6 @@
}
impl<'tcx> Key for ty::ParamEnv<'tcx> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -320,8 +292,6 @@
}
impl<'tcx, T: Key> Key for ty::ParamEnvAnd<'tcx, T> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
self.value.query_crate()
}
@@ -331,8 +301,6 @@
}
impl Key for Symbol {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -344,8 +312,6 @@
/// Canonical query goals correspond to abstract trait operations that
/// are not tied to any crate in particular.
impl<'tcx, T> Key for Canonical<'tcx, T> {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -356,8 +322,6 @@
}
impl Key for (Symbol, u32, u32) {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
@@ -368,8 +332,6 @@
}
impl<'tcx> Key for (DefId, Ty<'tcx>, SubstsRef<'tcx>, ty::ParamEnv<'tcx>) {
- type CacheSelector = DefaultCacheSelector;
-
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs
new file mode 100644
index 0000000..e931479
--- /dev/null
+++ b/compiler/rustc_query_impl/src/lib.rs
@@ -0,0 +1,63 @@
+//! Support for serializing the dep-graph and reloading it.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(in_band_lifetimes)]
+#![feature(exhaustive_patterns)]
+#![feature(nll)]
+#![feature(min_specialization)]
+#![feature(crate_visibility_modifier)]
+#![feature(once_cell)]
+#![feature(rustc_attrs)]
+#![feature(never_type)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate rustc_middle;
+#[macro_use]
+extern crate tracing;
+
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_errors::{DiagnosticBuilder, Handler};
+use rustc_hir::def_id::CrateNum;
+use rustc_index::vec::IndexVec;
+use rustc_middle::dep_graph;
+use rustc_middle::ich::StableHashingContext;
+use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values};
+use rustc_middle::ty::query::{Providers, QueryEngine};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_serialize::opaque;
+use rustc_span::{Span, DUMMY_SP};
+
+#[macro_use]
+mod plumbing;
+pub use plumbing::QueryCtxt;
+use plumbing::QueryStruct;
+use rustc_query_system::query::*;
+
+mod stats;
+pub use self::stats::print_stats;
+
+mod keys;
+use keys::Key;
+
+mod values;
+use self::values::Value;
+
+use rustc_query_system::query::QueryAccessors;
+pub use rustc_query_system::query::QueryConfig;
+pub(crate) use rustc_query_system::query::QueryDescription;
+
+use rustc_middle::ty::query::on_disk_cache;
+
+mod profiling_support;
+pub use self::profiling_support::alloc_self_profile_query_strings;
+
+rustc_query_append! { [define_queries!][<'tcx>] }
+
+impl<'tcx> Queries<'tcx> {
+ // Force codegen in the dyn-trait transformation in this crate.
+ pub fn as_dyn(&'tcx self) -> &'tcx dyn QueryEngine<'tcx> {
+ self
+ }
+}
diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs
new file mode 100644
index 0000000..37a176d
--- /dev/null
+++ b/compiler/rustc_query_impl/src/plumbing.rs
@@ -0,0 +1,616 @@
+//! The implementation of the query system itself. This defines the macros that
+//! generate the actual methods on tcx which find and execute the provider,
+//! manage the caches, and so forth.
+
+use super::queries;
+use rustc_middle::dep_graph::{DepKind, DepNode, DepNodeExt, DepNodeIndex, SerializedDepNodeIndex};
+use rustc_middle::ty::query::on_disk_cache;
+use rustc_middle::ty::tls::{self, ImplicitCtxt};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_query_system::dep_graph::HasDepContext;
+use rustc_query_system::query::{QueryContext, QueryDescription, QueryJobId, QueryMap};
+
+use rustc_data_structures::sync::Lock;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::Diagnostic;
+use rustc_serialize::opaque;
+use rustc_span::def_id::{DefId, LocalDefId};
+
+#[derive(Copy, Clone)]
+pub struct QueryCtxt<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+ pub queries: &'tcx super::Queries<'tcx>,
+}
+
+impl<'tcx> std::ops::Deref for QueryCtxt<'tcx> {
+ type Target = TyCtxt<'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.tcx
+ }
+}
+
+impl HasDepContext for QueryCtxt<'tcx> {
+ type DepKind = rustc_middle::dep_graph::DepKind;
+ type StableHashingContext = rustc_middle::ich::StableHashingContext<'tcx>;
+ type DepContext = TyCtxt<'tcx>;
+
+ #[inline]
+ fn dep_context(&self) -> &Self::DepContext {
+ &self.tcx
+ }
+}
+
+impl QueryContext for QueryCtxt<'tcx> {
+ fn def_path_str(&self, def_id: DefId) -> String {
+ self.tcx.def_path_str(def_id)
+ }
+
+ fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>> {
+ tls::with_related_context(**self, |icx| icx.query)
+ }
+
+ fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>> {
+ self.queries.try_collect_active_jobs(**self)
+ }
+
+ fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) {
+ let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize];
+ (cb.try_load_from_on_disk_cache)(*self, dep_node)
+ }
+
+ fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
+ // FIXME: This match is just a workaround for incremental bugs and should
+ // be removed. https://github.com/rust-lang/rust/issues/62649 is one such
+ // bug that must be fixed before removing this.
+ match dep_node.kind {
+ DepKind::hir_owner | DepKind::hir_owner_nodes => {
+ if let Some(def_id) = dep_node.extract_def_id(**self) {
+ let def_id = def_id.expect_local();
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ if def_id != hir_id.owner {
+ // This `DefPath` does not have a
+ // corresponding `DepNode` (e.g. a
+ // struct field), and the ` DefPath`
+ // collided with the `DefPath` of a
+ // proper item that existed in the
+ // previous compilation session.
+ //
+ // Since the given `DefPath` does not
+ // denote the item that previously
+ // existed, we just fail to mark green.
+ return false;
+ }
+ } else {
+ // If the node does not exist anymore, we
+ // just fail to mark green.
+ return false;
+ }
+ }
+ _ => {
+ // For other kinds of nodes it's OK to be
+ // forced.
+ }
+ }
+
+ debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
+
+ // We must avoid ever having to call `force_from_dep_node()` for a
+ // `DepNode::codegen_unit`:
+ // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
+ // would always end up having to evaluate the first caller of the
+ // `codegen_unit` query that *is* reconstructible. This might very well be
+ // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
+ // to re-trigger calling the `codegen_unit` query with the right key. At
+ // that point we would already have re-done all the work we are trying to
+ // avoid doing in the first place.
+ // The solution is simple: Just explicitly call the `codegen_unit` query for
+ // each CGU, right after partitioning. This way `try_mark_green` will always
+ // hit the cache instead of having to go through `force_from_dep_node`.
+ // This assertion makes sure, we actually keep applying the solution above.
+ debug_assert!(
+ dep_node.kind != DepKind::codegen_unit,
+ "calling force_from_dep_node() on DepKind::codegen_unit"
+ );
+
+ let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize];
+ (cb.force_from_dep_node)(*self, dep_node)
+ }
+
+ // Interactions with on_disk_cache
+ fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
+ self.on_disk_cache
+ .as_ref()
+ .map(|c| c.load_diagnostics(**self, prev_dep_node_index))
+ .unwrap_or_default()
+ }
+
+ fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
+ if let Some(c) = self.on_disk_cache.as_ref() {
+ c.store_diagnostics(dep_node_index, diagnostics)
+ }
+ }
+
+ fn store_diagnostics_for_anon_node(
+ &self,
+ dep_node_index: DepNodeIndex,
+ diagnostics: ThinVec<Diagnostic>,
+ ) {
+ if let Some(c) = self.on_disk_cache.as_ref() {
+ c.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
+ }
+ }
+
+ /// Executes a job by changing the `ImplicitCtxt` to point to the
+ /// new query job while it executes. It returns the diagnostics
+ /// captured during execution and the actual result.
+ #[inline(always)]
+ fn start_query<R>(
+ &self,
+ token: QueryJobId<Self::DepKind>,
+ diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
+ compute: impl FnOnce() -> R,
+ ) -> R {
+ // The `TyCtxt` stored in TLS has the same global interner lifetime
+ // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
+ // when accessing the `ImplicitCtxt`.
+ tls::with_related_context(**self, move |current_icx| {
+ // Update the `ImplicitCtxt` to point to our new query job.
+ let new_icx = ImplicitCtxt {
+ tcx: **self,
+ query: Some(token),
+ diagnostics,
+ layout_depth: current_icx.layout_depth,
+ task_deps: current_icx.task_deps,
+ };
+
+ // Use the `ImplicitCtxt` while we execute the query.
+ tls::enter_context(&new_icx, |_| {
+ rustc_data_structures::stack::ensure_sufficient_stack(compute)
+ })
+ })
+ }
+}
+
+impl<'tcx> QueryCtxt<'tcx> {
+ pub(super) fn encode_query_results(
+ self,
+ encoder: &mut on_disk_cache::CacheEncoder<'a, 'tcx, opaque::FileEncoder>,
+ query_result_index: &mut on_disk_cache::EncodedQueryResultIndex,
+ ) -> opaque::FileEncodeResult {
+ macro_rules! encode_queries {
+ ($($query:ident,)*) => {
+ $(
+ on_disk_cache::encode_query_results::<_, super::queries::$query<'_>>(
+ self,
+ encoder,
+ query_result_index
+ )?;
+ )*
+ }
+ }
+
+ rustc_cached_queries!(encode_queries!);
+
+ Ok(())
+ }
+}
+
+/// This struct stores metadata about each Query.
+///
+/// Information is retrieved by indexing the `QUERIES` array using the integer value
+/// of the `DepKind`. Overall, this allows to implement `QueryContext` using this manual
+/// jump table instead of large matches.
+pub struct QueryStruct {
+ /// The red/green evaluation system will try to mark a specific DepNode in the
+ /// dependency graph as green by recursively trying to mark the dependencies of
+ /// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
+ /// where we don't know if it is red or green and we therefore actually have
+ /// to recompute its value in order to find out. Since the only piece of
+ /// information that we have at that point is the `DepNode` we are trying to
+ /// re-evaluate, we need some way to re-run a query from just that. This is what
+ /// `force_from_dep_node()` implements.
+ ///
+ /// In the general case, a `DepNode` consists of a `DepKind` and an opaque
+ /// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
+ /// is usually constructed by computing a stable hash of the query-key that the
+ /// `DepNode` corresponds to. Consequently, it is not in general possible to go
+ /// back from hash to query-key (since hash functions are not reversible). For
+ /// this reason `force_from_dep_node()` is expected to fail from time to time
+ /// because we just cannot find out, from the `DepNode` alone, what the
+ /// corresponding query-key is and therefore cannot re-run the query.
+ ///
+ /// The system deals with this case letting `try_mark_green` fail which forces
+ /// the root query to be re-evaluated.
+ ///
+ /// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
+ /// Fortunately, we can use some contextual information that will allow us to
+ /// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
+ /// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
+ /// valid `DefPathHash`. Since we also always build a huge table that maps every
+ /// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
+ /// everything we need to re-run the query.
+ ///
+ /// Take the `mir_promoted` query as an example. Like many other queries, it
+ /// just has a single parameter: the `DefId` of the item it will compute the
+ /// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
+ /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
+ /// is actually a `DefPathHash`, and can therefore just look up the corresponding
+ /// `DefId` in `tcx.def_path_hash_to_def_id`.
+ ///
+ /// When you implement a new query, it will likely have a corresponding new
+ /// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As
+ /// a rule of thumb, if your query takes a `DefId` or `LocalDefId` as sole parameter,
+ /// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
+ /// add it to the "We don't have enough information to reconstruct..." group in
+ /// the match below.
+ pub(crate) force_from_dep_node: fn(tcx: QueryCtxt<'_>, dep_node: &DepNode) -> bool,
+
+ /// Invoke a query to put the on-disk cached value in memory.
+ pub(crate) try_load_from_on_disk_cache: fn(QueryCtxt<'_>, &DepNode),
+}
+
+macro_rules! handle_cycle_error {
+ ([][$tcx: expr, $error:expr]) => {{
+ $error.emit();
+ Value::from_cycle_error($tcx)
+ }};
+ ([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{
+ $error.emit();
+ $tcx.sess.abort_if_errors();
+ unreachable!()
+ }};
+ ([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{
+ $error.delay_as_bug();
+ Value::from_cycle_error($tcx)
+ }};
+ ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
+ handle_cycle_error!([$($($modifiers)*)*][$($args)*])
+ };
+}
+
+macro_rules! is_anon {
+ ([]) => {{
+ false
+ }};
+ ([anon $($rest:tt)*]) => {{
+ true
+ }};
+ ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
+ is_anon!([$($($modifiers)*)*])
+ };
+}
+
+macro_rules! is_eval_always {
+ ([]) => {{
+ false
+ }};
+ ([eval_always $($rest:tt)*]) => {{
+ true
+ }};
+ ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
+ is_eval_always!([$($($modifiers)*)*])
+ };
+}
+
+macro_rules! hash_result {
+ ([][$hcx:expr, $result:expr]) => {{
+ dep_graph::hash_result($hcx, &$result)
+ }};
+ ([no_hash $($rest:tt)*][$hcx:expr, $result:expr]) => {{
+ None
+ }};
+ ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
+ hash_result!([$($($modifiers)*)*][$($args)*])
+ };
+}
+
+macro_rules! define_queries {
+ (<$tcx:tt>
+ $($(#[$attr:meta])*
+ [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
+
+ define_queries_struct! {
+ tcx: $tcx,
+ input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
+ }
+
+ mod make_query {
+ use super::*;
+
+ // Create an eponymous constructor for each query.
+ $(#[allow(nonstandard_style)] $(#[$attr])*
+ pub fn $name<$tcx>(tcx: QueryCtxt<$tcx>, key: query_keys::$name<$tcx>) -> QueryStackFrame {
+ let kind = dep_graph::DepKind::$name;
+ let name = stringify!($name);
+ let description = ty::print::with_forced_impl_filename_line(
+ // Force filename-line mode to avoid invoking `type_of` query.
+ || queries::$name::describe(tcx, key)
+ );
+ let description = if tcx.sess.verbose() {
+ format!("{} [{}]", description, name)
+ } else {
+ description
+ };
+ let span = if kind == dep_graph::DepKind::def_span {
+ // The `def_span` query is used to calculate `default_span`,
+ // so exit to avoid infinite recursion.
+ None
+ } else {
+ Some(key.default_span(*tcx))
+ };
+ let hash = || {
+ let mut hcx = tcx.create_stable_hashing_context();
+ let mut hasher = StableHasher::new();
+ std::mem::discriminant(&kind).hash_stable(&mut hcx, &mut hasher);
+ key.hash_stable(&mut hcx, &mut hasher);
+ hasher.finish::<u64>()
+ };
+
+ QueryStackFrame::new(name, description, span, hash)
+ })*
+ }
+
+ #[allow(nonstandard_style)]
+ pub mod queries {
+ use std::marker::PhantomData;
+
+ $(pub struct $name<$tcx> {
+ data: PhantomData<&$tcx ()>
+ })*
+ }
+
+ $(impl<$tcx> QueryConfig for queries::$name<$tcx> {
+ type Key = query_keys::$name<$tcx>;
+ type Value = query_values::$name<$tcx>;
+ type Stored = query_stored::$name<$tcx>;
+ const NAME: &'static str = stringify!($name);
+ }
+
+ impl<$tcx> QueryAccessors<QueryCtxt<$tcx>> for queries::$name<$tcx> {
+ const ANON: bool = is_anon!([$($modifiers)*]);
+ const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
+ const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$name;
+
+ type Cache = query_storage::$name<$tcx>;
+
+ #[inline(always)]
+ fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, Self::Key>
+ where QueryCtxt<$tcx>: 'a
+ {
+ &tcx.queries.$name
+ }
+
+ #[inline(always)]
+ fn query_cache<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryCacheStore<Self::Cache>
+ where 'tcx:'a
+ {
+ &tcx.query_caches.$name
+ }
+
+ #[inline]
+ fn compute(tcx: QueryCtxt<'tcx>, key: Self::Key) -> Self::Value {
+ let provider = tcx.queries.providers.get(key.query_crate())
+ // HACK(eddyb) it's possible crates may be loaded after
+ // the query engine is created, and because crate loading
+ // is not yet integrated with the query engine, such crates
+ // would be missing appropriate entries in `providers`.
+ .unwrap_or(&tcx.queries.fallback_extern_providers)
+ .$name;
+ provider(*tcx, key)
+ }
+
+ fn hash_result(
+ _hcx: &mut StableHashingContext<'_>,
+ _result: &Self::Value
+ ) -> Option<Fingerprint> {
+ hash_result!([$($modifiers)*][_hcx, _result])
+ }
+
+ fn handle_cycle_error(
+ tcx: QueryCtxt<'tcx>,
+ mut error: DiagnosticBuilder<'_>,
+ ) -> Self::Value {
+ handle_cycle_error!([$($modifiers)*][tcx, error])
+ }
+ })*
+
+ #[allow(non_upper_case_globals)]
+ pub mod query_callbacks {
+ use super::*;
+ use rustc_middle::dep_graph::DepNode;
+ use rustc_middle::ty::query::query_keys;
+ use rustc_query_system::dep_graph::DepNodeParams;
+ use rustc_query_system::query::{force_query, QueryDescription};
+
+ // We use this for most things when incr. comp. is turned off.
+ pub const Null: QueryStruct = QueryStruct {
+ force_from_dep_node: |_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node),
+ try_load_from_on_disk_cache: |_, _| {},
+ };
+
+ pub const TraitSelect: QueryStruct = QueryStruct {
+ force_from_dep_node: |_, _| false,
+ try_load_from_on_disk_cache: |_, _| {},
+ };
+
+ pub const CompileCodegenUnit: QueryStruct = QueryStruct {
+ force_from_dep_node: |_, _| false,
+ try_load_from_on_disk_cache: |_, _| {},
+ };
+
+ $(pub const $name: QueryStruct = {
+ const is_anon: bool = is_anon!([$($modifiers)*]);
+
+ #[inline(always)]
+ fn can_reconstruct_query_key() -> bool {
+ <query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>
+ ::can_reconstruct_query_key()
+ }
+
+ fn recover<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<query_keys::$name<'tcx>> {
+ <query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>::recover(tcx, dep_node)
+ }
+
+ fn force_from_dep_node(tcx: QueryCtxt<'_>, dep_node: &DepNode) -> bool {
+ if is_anon {
+ return false;
+ }
+
+ if !can_reconstruct_query_key() {
+ return false;
+ }
+
+ if let Some(key) = recover(*tcx, dep_node) {
+ force_query::<queries::$name<'_>, _>(tcx, key, DUMMY_SP, *dep_node);
+ return true;
+ }
+
+ false
+ }
+
+ fn try_load_from_on_disk_cache(tcx: QueryCtxt<'_>, dep_node: &DepNode) {
+ if is_anon {
+ return
+ }
+
+ if !can_reconstruct_query_key() {
+ return
+ }
+
+ debug_assert!(tcx.dep_graph
+ .node_color(dep_node)
+ .map(|c| c.is_green())
+ .unwrap_or(false));
+
+ let key = recover(*tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash));
+ if queries::$name::cache_on_disk(tcx, &key, None) {
+ let _ = tcx.$name(key);
+ }
+ }
+
+ QueryStruct {
+ force_from_dep_node,
+ try_load_from_on_disk_cache,
+ }
+ };)*
+ }
+
+ static QUERY_CALLBACKS: &[QueryStruct] = &make_dep_kind_array!(query_callbacks);
+ }
+}
+
+// FIXME(eddyb) this macro (and others?) use `$tcx` and `'tcx` interchangeably.
+// We should either not take `$tcx` at all and use `'tcx` everywhere, or use
+// `$tcx` everywhere (even if that isn't necessary due to lack of hygiene).
+macro_rules! define_queries_struct {
+ (tcx: $tcx:tt,
+ input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
+ pub struct Queries<$tcx> {
+ providers: IndexVec<CrateNum, Providers>,
+ fallback_extern_providers: Box<Providers>,
+
+ $($(#[$attr])* $name: QueryState<
+ crate::dep_graph::DepKind,
+ query_keys::$name<$tcx>,
+ >,)*
+ }
+
+ impl<$tcx> Queries<$tcx> {
+ pub fn new(
+ providers: IndexVec<CrateNum, Providers>,
+ fallback_extern_providers: Providers,
+ ) -> Self {
+ Queries {
+ providers,
+ fallback_extern_providers: Box::new(fallback_extern_providers),
+ $($name: Default::default()),*
+ }
+ }
+
+ pub(crate) fn try_collect_active_jobs(
+ &$tcx self,
+ tcx: TyCtxt<$tcx>,
+ ) -> Option<QueryMap<crate::dep_graph::DepKind>> {
+ let tcx = QueryCtxt { tcx, queries: self };
+ let mut jobs = QueryMap::default();
+
+ $(
+ self.$name.try_collect_active_jobs(
+ tcx,
+ dep_graph::DepKind::$name,
+ make_query::$name,
+ &mut jobs,
+ )?;
+ )*
+
+ Some(jobs)
+ }
+ }
+
+ impl QueryEngine<'tcx> for Queries<'tcx> {
+ unsafe fn deadlock(&'tcx self, _tcx: TyCtxt<'tcx>, _registry: &rustc_rayon_core::Registry) {
+ #[cfg(parallel_compiler)]
+ {
+ let tcx = QueryCtxt { tcx: _tcx, queries: self };
+ rustc_query_system::query::deadlock(tcx, _registry)
+ }
+ }
+
+ fn encode_query_results(
+ &'tcx self,
+ tcx: TyCtxt<'tcx>,
+ encoder: &mut on_disk_cache::CacheEncoder<'a, 'tcx, opaque::FileEncoder>,
+ query_result_index: &mut on_disk_cache::EncodedQueryResultIndex,
+ ) -> opaque::FileEncodeResult {
+ let tcx = QueryCtxt { tcx, queries: self };
+ tcx.encode_query_results(encoder, query_result_index)
+ }
+
+ fn exec_cache_promotions(&'tcx self, tcx: TyCtxt<'tcx>) {
+ let tcx = QueryCtxt { tcx, queries: self };
+ tcx.dep_graph.exec_cache_promotions(tcx)
+ }
+
+ fn try_mark_green(&'tcx self, tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool {
+ let qcx = QueryCtxt { tcx, queries: self };
+ tcx.dep_graph.try_mark_green(qcx, dep_node).is_some()
+ }
+
+ fn try_print_query_stack(
+ &'tcx self,
+ tcx: TyCtxt<'tcx>,
+ query: Option<QueryJobId<dep_graph::DepKind>>,
+ handler: &Handler,
+ num_frames: Option<usize>,
+ ) -> usize {
+ let qcx = QueryCtxt { tcx, queries: self };
+ rustc_query_system::query::print_query_stack(qcx, query, handler, num_frames)
+ }
+
+ $($(#[$attr])*
+ #[inline(always)]
+ fn $name(
+ &'tcx self,
+ tcx: TyCtxt<$tcx>,
+ span: Span,
+ key: query_keys::$name<$tcx>,
+ lookup: QueryLookup,
+ mode: QueryMode,
+ ) -> Option<query_stored::$name<$tcx>> {
+ let qcx = QueryCtxt { tcx, queries: self };
+ get_query::<queries::$name<$tcx>, _>(qcx, span, key, lookup, mode)
+ })*
+ }
+ };
+}
+
+fn describe_as_module(def_id: LocalDefId, tcx: TyCtxt<'_>) -> String {
+ if def_id.is_top_level_module() {
+ "top-level module".to_string()
+ } else {
+ format!("module `{}`", tcx.def_path_str(def_id.to_def_id()))
+ }
+}
+
+rustc_query_description! {}
diff --git a/compiler/rustc_middle/src/ty/query/profiling_support.rs b/compiler/rustc_query_impl/src/profiling_support.rs
similarity index 86%
rename from compiler/rustc_middle/src/ty/query/profiling_support.rs
rename to compiler/rustc_query_impl/src/profiling_support.rs
index cbcecb8..2448588 100644
--- a/compiler/rustc_middle/src/ty/query/profiling_support.rs
+++ b/compiler/rustc_query_impl/src/profiling_support.rs
@@ -1,32 +1,31 @@
-use crate::ty::context::TyCtxt;
-use crate::ty::WithOptConstParam;
use measureme::{StringComponent, StringId};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::SelfProfiler;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::definitions::DefPathData;
-use rustc_query_system::query::{QueryCache, QueryContext, QueryState};
+use rustc_middle::ty::{TyCtxt, WithOptConstParam};
+use rustc_query_system::query::{QueryCache, QueryCacheStore};
use std::fmt::Debug;
use std::io::Write;
-pub struct QueryKeyStringCache {
+struct QueryKeyStringCache {
def_id_cache: FxHashMap<DefId, StringId>,
}
impl QueryKeyStringCache {
- pub fn new() -> QueryKeyStringCache {
+ fn new() -> QueryKeyStringCache {
QueryKeyStringCache { def_id_cache: Default::default() }
}
}
-pub struct QueryKeyStringBuilder<'p, 'c, 'tcx> {
+struct QueryKeyStringBuilder<'p, 'c, 'tcx> {
profiler: &'p SelfProfiler,
tcx: TyCtxt<'tcx>,
string_cache: &'c mut QueryKeyStringCache,
}
impl<'p, 'c, 'tcx> QueryKeyStringBuilder<'p, 'c, 'tcx> {
- pub fn new(
+ fn new(
profiler: &'p SelfProfiler,
tcx: TyCtxt<'tcx>,
string_cache: &'c mut QueryKeyStringCache,
@@ -98,7 +97,7 @@
}
}
-pub trait IntoSelfProfilingString {
+trait IntoSelfProfilingString {
fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId;
}
@@ -123,7 +122,7 @@
}
#[rustc_specialization_trait]
-pub trait SpecIntoSelfProfilingString: Debug {
+trait SpecIntoSelfProfilingString: Debug {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
@@ -227,10 +226,10 @@
/// Allocate the self-profiling query strings for a single query cache. This
/// method is called from `alloc_self_profile_query_strings` which knows all
/// the queries via macro magic.
-pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
+fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
tcx: TyCtxt<'tcx>,
query_name: &'static str,
- query_state: &QueryState<crate::dep_graph::DepKind, <TyCtxt<'tcx> as QueryContext>::Query, C>,
+ query_cache: &QueryCacheStore<C>,
string_cache: &mut QueryKeyStringCache,
) where
C: QueryCache,
@@ -251,7 +250,7 @@
// need to invoke queries itself, we cannot keep the query caches
// locked while doing so. Instead we copy out the
// `(query_key, dep_node_index)` pairs and release the lock again.
- let query_keys_and_indices: Vec<_> = query_state
+ let query_keys_and_indices: Vec<_> = query_cache
.iter_results(|results| results.map(|(k, _, i)| (k.clone(), i)).collect());
// Now actually allocate the strings. If allocating the strings
@@ -276,7 +275,7 @@
let query_name = profiler.get_or_alloc_cached_string(query_name);
let event_id = event_id_builder.from_label(query_name).to_string_id();
- query_state.iter_results(|results| {
+ query_cache.iter_results(|results| {
let query_invocation_ids: Vec<_> = results.map(|v| v.2.into()).collect();
profiler.bulk_map_query_invocation_id_to_single_string(
@@ -287,3 +286,35 @@
}
});
}
+
+/// All self-profiling events generated by the query engine use
+/// virtual `StringId`s for their `event_id`. This method makes all
+/// those virtual `StringId`s point to actual strings.
+///
+/// If we are recording only summary data, the ids will point to
+/// just the query names. If we are recording query keys too, we
+/// allocate the corresponding strings here.
+pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'tcx>) {
+ if !tcx.prof.enabled() {
+ return;
+ }
+
+ let mut string_cache = QueryKeyStringCache::new();
+
+ macro_rules! alloc_once {
+ (<$tcx:tt>
+ $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident($K:ty) -> $V:ty,)*
+ ) => {
+ $({
+ alloc_self_profile_query_strings_for_query_cache(
+ tcx,
+ stringify!($name),
+ &tcx.query_caches.$name,
+ &mut string_cache,
+ );
+ })*
+ }
+ }
+
+ rustc_query_append! { [alloc_once!][<'tcx>] }
+}
diff --git a/compiler/rustc_middle/src/ty/query/stats.rs b/compiler/rustc_query_impl/src/stats.rs
similarity index 74%
rename from compiler/rustc_middle/src/ty/query/stats.rs
rename to compiler/rustc_query_impl/src/stats.rs
index e0b44ce..4d52483 100644
--- a/compiler/rustc_middle/src/ty/query/stats.rs
+++ b/compiler/rustc_query_impl/src/stats.rs
@@ -1,10 +1,9 @@
-use crate::ty::query::queries;
-use crate::ty::TyCtxt;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
-use rustc_query_system::query::{QueryAccessors, QueryCache, QueryContext, QueryState};
+use rustc_middle::ty::query::query_storage;
+use rustc_middle::ty::TyCtxt;
+use rustc_query_system::query::{QueryCache, QueryCacheStore};
use std::any::type_name;
-use std::hash::Hash;
use std::mem;
#[cfg(debug_assertions)]
use std::sync::atomic::Ordering;
@@ -37,10 +36,8 @@
local_def_id_keys: Option<usize>,
}
-fn stats<D, Q, C>(name: &'static str, map: &QueryState<D, Q, C>) -> QueryStats
+fn stats<C>(name: &'static str, map: &QueryCacheStore<C>) -> QueryStats
where
- D: Copy + Clone + Eq + Hash,
- Q: Clone,
C: QueryCache,
{
let mut stats = QueryStats {
@@ -70,29 +67,29 @@
if cfg!(debug_assertions) {
let hits: usize = queries.iter().map(|s| s.cache_hits).sum();
let results: usize = queries.iter().map(|s| s.entry_count).sum();
- println!("\nQuery cache hit rate: {}", hits as f64 / (hits + results) as f64);
+ eprintln!("\nQuery cache hit rate: {}", hits as f64 / (hits + results) as f64);
}
let mut query_key_sizes = queries.clone();
query_key_sizes.sort_by_key(|q| q.key_size);
- println!("\nLarge query keys:");
+ eprintln!("\nLarge query keys:");
for q in query_key_sizes.iter().rev().filter(|q| q.key_size > 8) {
- println!(" {} - {} x {} - {}", q.name, q.key_size, q.entry_count, q.key_type);
+ eprintln!(" {} - {} x {} - {}", q.name, q.key_size, q.entry_count, q.key_type);
}
let mut query_value_sizes = queries.clone();
query_value_sizes.sort_by_key(|q| q.value_size);
- println!("\nLarge query values:");
+ eprintln!("\nLarge query values:");
for q in query_value_sizes.iter().rev().filter(|q| q.value_size > 8) {
- println!(" {} - {} x {} - {}", q.name, q.value_size, q.entry_count, q.value_type);
+ eprintln!(" {} - {} x {} - {}", q.name, q.value_size, q.entry_count, q.value_type);
}
if cfg!(debug_assertions) {
let mut query_cache_hits = queries.clone();
query_cache_hits.sort_by_key(|q| q.cache_hits);
- println!("\nQuery cache hits:");
+ eprintln!("\nQuery cache hits:");
for q in query_cache_hits.iter().rev() {
- println!(
+ eprintln!(
" {} - {} ({}%)",
q.name,
q.cache_hits,
@@ -103,19 +100,19 @@
let mut query_value_count = queries.clone();
query_value_count.sort_by_key(|q| q.entry_count);
- println!("\nQuery value count:");
+ eprintln!("\nQuery value count:");
for q in query_value_count.iter().rev() {
- println!(" {} - {}", q.name, q.entry_count);
+ eprintln!(" {} - {}", q.name, q.entry_count);
}
let mut def_id_density: Vec<_> =
queries.iter().filter(|q| q.local_def_id_keys.is_some()).collect();
def_id_density.sort_by_key(|q| q.local_def_id_keys.unwrap());
- println!("\nLocal DefId density:");
+ eprintln!("\nLocal DefId density:");
let total = tcx.hir().definitions().def_index_count() as f64;
for q in def_id_density.iter().rev() {
let local = q.local_def_id_keys.unwrap();
- println!(" {} - {} = ({}%)", q.name, local, (local as f64 * 100.0) / total);
+ eprintln!(" {} - {} = ({}%)", q.name, local, (local as f64 * 100.0) / total);
}
}
@@ -128,12 +125,10 @@
$(
queries.push(stats::<
- crate::dep_graph::DepKind,
- <TyCtxt<'_> as QueryContext>::Query,
- <queries::$name<'_> as QueryAccessors<TyCtxt<'_>>>::Cache,
+ query_storage::$name<'_>,
>(
stringify!($name),
- &tcx.queries.$name,
+ &tcx.query_caches.$name,
));
)*
diff --git a/compiler/rustc_middle/src/ty/query/values.rs b/compiler/rustc_query_impl/src/values.rs
similarity index 75%
rename from compiler/rustc_middle/src/ty/query/values.rs
rename to compiler/rustc_query_impl/src/values.rs
index f28b0f4..003867b 100644
--- a/compiler/rustc_middle/src/ty/query/values.rs
+++ b/compiler/rustc_query_impl/src/values.rs
@@ -1,18 +1,19 @@
-use crate::ty::{self, AdtSizedConstraint, Ty, TyCtxt, TyS};
+use super::QueryCtxt;
+use rustc_middle::ty::{self, AdtSizedConstraint, Ty, TyS};
pub(super) trait Value<'tcx>: Sized {
- fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self;
+ fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self;
}
impl<'tcx, T> Value<'tcx> for T {
- default fn from_cycle_error(tcx: TyCtxt<'tcx>) -> T {
+ default fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> T {
tcx.sess.abort_if_errors();
bug!("Value::from_cycle_error called without errors");
}
}
impl<'tcx> Value<'tcx> for &'_ TyS<'_> {
- fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
+ fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self {
// SAFETY: This is never called when `Self` is not `Ty<'tcx>`.
// FIXME: Represent the above fact in the trait system somehow.
unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(tcx.ty_error()) }
@@ -20,19 +21,19 @@
}
impl<'tcx> Value<'tcx> for ty::SymbolName<'_> {
- fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
+ fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self {
// SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`.
// FIXME: Represent the above fact in the trait system somehow.
unsafe {
std::mem::transmute::<ty::SymbolName<'tcx>, ty::SymbolName<'_>>(ty::SymbolName::new(
- tcx, "<error>",
+ *tcx, "<error>",
))
}
}
}
impl<'tcx> Value<'tcx> for AdtSizedConstraint<'_> {
- fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
+ fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self {
// SAFETY: This is never called when `Self` is not `AdtSizedConstraint<'tcx>`.
// FIXME: Represent the above fact in the trait system somehow.
unsafe {
diff --git a/compiler/rustc_query_system/Cargo.toml b/compiler/rustc_query_system/Cargo.toml
index f38d62d..19512dc 100644
--- a/compiler/rustc_query_system/Cargo.toml
+++ b/compiler/rustc_query_system/Cargo.toml
@@ -10,12 +10,13 @@
[dependencies]
rustc_arena = { path = "../rustc_arena" }
tracing = "0.1"
-rustc-rayon-core = "0.3.0"
+rustc-rayon-core = "0.3.1"
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
rustc_macros = { path = "../rustc_macros" }
rustc_index = { path = "../rustc_index" }
rustc_serialize = { path = "../rustc_serialize" }
+rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
parking_lot = "0.11"
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_query_system/src/cache.rs b/compiler/rustc_query_system/src/cache.rs
index be3d360..c6dc7b4 100644
--- a/compiler/rustc_query_system/src/cache.rs
+++ b/compiler/rustc_query_system/src/cache.rs
@@ -1,7 +1,6 @@
//! Cache for candidate selection.
-use crate::dep_graph::DepNodeIndex;
-use crate::query::QueryContext;
+use crate::dep_graph::{DepContext, DepNodeIndex};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::HashMapExt;
@@ -28,7 +27,7 @@
}
impl<Key: Eq + Hash, Value: Clone> Cache<Key, Value> {
- pub fn get<CTX: QueryContext>(&self, key: &Key, tcx: CTX) -> Option<Value> {
+ pub fn get<CTX: DepContext>(&self, key: &Key, tcx: CTX) -> Option<Value> {
Some(self.hashmap.borrow().get(key)?.get(tcx))
}
@@ -55,7 +54,7 @@
WithDepNode { dep_node, cached_value }
}
- pub fn get<CTX: QueryContext>(&self, tcx: CTX) -> T {
+ pub fn get<CTX: DepContext>(&self, tcx: CTX) -> T {
tcx.dep_graph().read_index(self.dep_node);
self.cached_value.clone()
}
diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
index 64aba87..f55e2f7 100644
--- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
@@ -79,7 +79,7 @@
pub fn construct<Ctxt, Key>(tcx: Ctxt, kind: K, arg: &Key) -> DepNode<K>
where
- Ctxt: crate::query::QueryContext<DepKind = K>,
+ Ctxt: super::DepContext<DepKind = K>,
Key: DepNodeParams<Ctxt>,
{
let hash = arg.to_fingerprint(tcx);
@@ -87,7 +87,10 @@
#[cfg(debug_assertions)]
{
- if !kind.can_reconstruct_query_key() && tcx.debug_dep_node() {
+ if !kind.can_reconstruct_query_key()
+ && (tcx.sess().opts.debugging_opts.incremental_info
+ || tcx.sess().opts.debugging_opts.query_dep_graph)
+ {
tcx.dep_graph().register_dep_node_debug_str(dep_node, || arg.to_debug_str(tcx));
}
}
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index 4fb3a68..0f25572 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -23,7 +23,8 @@
use super::prev::PreviousDepGraph;
use super::query::DepGraphQuery;
use super::serialized::SerializedDepNodeIndex;
-use super::{DepContext, DepKind, DepNode, WorkProductId};
+use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
+use crate::query::QueryContext;
#[derive(Clone)]
pub struct DepGraph<K: DepKind> {
@@ -235,7 +236,7 @@
/// `arg` parameter.
///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
- pub fn with_task<Ctxt: DepContext<DepKind = K>, A, R>(
+ pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
&self,
key: DepNode<K>,
cx: Ctxt,
@@ -261,7 +262,7 @@
)
}
- fn with_task_impl<Ctxt: DepContext<DepKind = K>, A, R>(
+ fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A, R>(
&self,
key: DepNode<K>,
cx: Ctxt,
@@ -271,14 +272,15 @@
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex) {
if let Some(ref data) = self.data {
+ let dcx = cx.dep_context();
let task_deps = create_task(key).map(Lock::new);
let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
- let mut hcx = cx.create_stable_hashing_context();
+ let mut hcx = dcx.create_stable_hashing_context();
let current_fingerprint = hash_result(&mut hcx, &result);
- let print_status = cfg!(debug_assertions) && cx.debug_dep_tasks();
+ let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;
// Intern the new `DepNode`.
let dep_node_index = if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
@@ -408,7 +410,7 @@
/// Executes something within an "eval-always" task which is a task
/// that runs whenever anything changes.
- pub fn with_eval_always_task<Ctxt: DepContext<DepKind = K>, A, R>(
+ pub fn with_eval_always_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
&self,
key: DepNode<K>,
cx: Ctxt,
@@ -585,7 +587,7 @@
/// A node will have an index, when it's already been marked green, or when we can mark it
/// green. This function will mark the current task as a reader of the specified node, when
/// a node index can be found for that node.
- pub fn try_mark_green_and_read<Ctxt: DepContext<DepKind = K>>(
+ pub fn try_mark_green_and_read<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
dep_node: &DepNode<K>,
@@ -597,7 +599,7 @@
})
}
- pub fn try_mark_green<Ctxt: DepContext<DepKind = K>>(
+ pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
dep_node: &DepNode<K>,
@@ -625,7 +627,7 @@
}
/// Try to mark a dep-node which existed in the previous compilation session as green.
- fn try_mark_previous_green<Ctxt: DepContext<DepKind = K>>(
+ fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
data: &DepGraphData<K>,
@@ -729,7 +731,7 @@
return None;
}
None => {
- if !tcx.has_errors_or_delayed_span_bugs() {
+ if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
panic!(
"try_mark_previous_green() - Forcing the DepNode \
should have set its color"
@@ -809,7 +811,7 @@
/// This may be called concurrently on multiple threads for the same dep node.
#[cold]
#[inline(never)]
- fn emit_diagnostics<Ctxt: DepContext<DepKind = K>>(
+ fn emit_diagnostics<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
data: &DepGraphData<K>,
@@ -833,7 +835,7 @@
// Promote the previous diagnostics to the current session.
tcx.store_diagnostics(dep_node_index, diagnostics.clone().into());
- let handle = tcx.diagnostic();
+ let handle = tcx.dep_context().sess().diagnostic();
for diagnostic in diagnostics {
handle.emit_diagnostic(&diagnostic);
@@ -874,7 +876,8 @@
//
// This method will only load queries that will end up in the disk cache.
// Other queries will not be executed.
- pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
+ pub fn exec_cache_promotions<Ctxt: QueryContext<DepKind = K>>(&self, qcx: Ctxt) {
+ let tcx = qcx.dep_context();
let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
let data = self.data.as_ref().unwrap();
@@ -882,7 +885,7 @@
match data.colors.get(prev_index) {
Some(DepNodeColor::Green(_)) => {
let dep_node = data.previous.index_to_node(prev_index);
- tcx.try_load_from_on_disk_cache(&dep_node);
+ qcx.try_load_from_on_disk_cache(&dep_node);
}
None | Some(DepNodeColor::Red) => {
// We can skip red nodes because a node can only be marked
@@ -964,29 +967,29 @@
----------------------------------------------\
------------";
- println!("[incremental]");
- println!("[incremental] DepGraph Statistics");
- println!("{}", SEPARATOR);
- println!("[incremental]");
- println!("[incremental] Total Node Count: {}", total_node_count);
- println!("[incremental] Total Edge Count: {}", total_edge_count);
+ eprintln!("[incremental]");
+ eprintln!("[incremental] DepGraph Statistics");
+ eprintln!("{}", SEPARATOR);
+ eprintln!("[incremental]");
+ eprintln!("[incremental] Total Node Count: {}", total_node_count);
+ eprintln!("[incremental] Total Edge Count: {}", total_edge_count);
if cfg!(debug_assertions) {
let total_edge_reads = current.total_read_count.load(Relaxed);
let total_duplicate_edge_reads = current.total_duplicate_read_count.load(Relaxed);
- println!("[incremental] Total Edge Reads: {}", total_edge_reads);
- println!("[incremental] Total Duplicate Edge Reads: {}", total_duplicate_edge_reads);
+ eprintln!("[incremental] Total Edge Reads: {}", total_edge_reads);
+ eprintln!("[incremental] Total Duplicate Edge Reads: {}", total_duplicate_edge_reads);
}
- println!("[incremental]");
+ eprintln!("[incremental]");
- println!(
+ eprintln!(
"[incremental] {:<36}| {:<17}| {:<12}| {:<17}|",
"Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count"
);
- println!(
+ eprintln!(
"[incremental] -------------------------------------\
|------------------\
|-------------\
@@ -997,7 +1000,7 @@
let node_kind_ratio = (100.0 * (stat.node_counter as f64)) / (total_node_count as f64);
let node_kind_avg_edges = (stat.edge_counter as f64) / (stat.node_counter as f64);
- println!(
+ eprintln!(
"[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |",
format!("{:?}", stat.kind),
node_kind_ratio,
@@ -1006,8 +1009,8 @@
);
}
- println!("{}", SEPARATOR);
- println!("[incremental]");
+ eprintln!("{}", SEPARATOR);
+ eprintln!("[incremental]");
}
fn next_virtual_depnode_index(&self) -> DepNodeIndex {
diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs
index b1c9016..e8fb71b 100644
--- a/compiler/rustc_query_system/src/dep_graph/mod.rs
+++ b/compiler/rustc_query_system/src/dep_graph/mod.rs
@@ -13,8 +13,7 @@
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sync::Lock;
-use rustc_data_structures::thin_vec::ThinVec;
-use rustc_errors::Diagnostic;
+use rustc_session::Session;
use std::fmt;
use std::hash::Hash;
@@ -26,38 +25,37 @@
/// Create a hashing context for hashing new results.
fn create_stable_hashing_context(&self) -> Self::StableHashingContext;
- fn debug_dep_tasks(&self) -> bool;
- fn debug_dep_node(&self) -> bool;
-
- /// Try to force a dep node to execute and see if it's green.
- fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
+ /// Access the DepGraph.
+ fn dep_graph(&self) -> &DepGraph<Self::DepKind>;
fn register_reused_dep_node(&self, dep_node: &DepNode<Self::DepKind>);
- /// Return whether the current session is tainted by errors.
- fn has_errors_or_delayed_span_bugs(&self) -> bool;
-
- /// Return the diagnostic handler.
- fn diagnostic(&self) -> &rustc_errors::Handler;
-
- /// Load data from the on-disk cache.
- fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);
-
- /// Load diagnostics associated to the node in the previous session.
- fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic>;
-
- /// Register diagnostics for the given node, for use in next session.
- fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);
-
- /// Register diagnostics for the given node, for use in next session.
- fn store_diagnostics_for_anon_node(
- &self,
- dep_node_index: DepNodeIndex,
- diagnostics: ThinVec<Diagnostic>,
- );
-
/// Access the profiler.
fn profiler(&self) -> &SelfProfilerRef;
+
+ /// Access the compiler session.
+ fn sess(&self) -> &Session;
+}
+
+pub trait HasDepContext: Copy {
+ type DepKind: self::DepKind;
+ type StableHashingContext;
+ type DepContext: self::DepContext<
+ DepKind = Self::DepKind,
+ StableHashingContext = Self::StableHashingContext,
+ >;
+
+ fn dep_context(&self) -> &Self::DepContext;
+}
+
+impl<T: DepContext> HasDepContext for T {
+ type DepKind = T::DepKind;
+ type StableHashingContext = T::StableHashingContext;
+ type DepContext = Self;
+
+ fn dep_context(&self) -> &Self::DepContext {
+ self
+ }
}
/// Describe the different families of dependency nodes.
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs
index 1d2bc1a..001bf3b 100644
--- a/compiler/rustc_query_system/src/query/caches.rs
+++ b/compiler/rustc_query_system/src/query/caches.rs
@@ -1,5 +1,5 @@
use crate::dep_graph::DepNodeIndex;
-use crate::query::plumbing::{QueryLookup, QueryState};
+use crate::query::plumbing::{QueryCacheStore, QueryLookup};
use rustc_arena::TypedArena;
use rustc_data_structures::fx::FxHashMap;
@@ -31,17 +31,15 @@
/// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need
/// to compute it.
- fn lookup<D, Q, R, OnHit, OnMiss>(
+ fn lookup<'s, R, OnHit>(
&self,
- state: &QueryState<D, Q, Self>,
- key: Self::Key,
+ state: &'s QueryCacheStore<Self>,
+ key: &Self::Key,
// `on_hit` can be called while holding a lock to the query state shard.
on_hit: OnHit,
- on_miss: OnMiss,
- ) -> R
+ ) -> Result<R, QueryLookup>
where
- OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R,
- OnMiss: FnOnce(Self::Key, QueryLookup<'_, D, Q, Self::Key, Self::Sharded>) -> R;
+ OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R;
fn complete(
&self,
@@ -51,12 +49,11 @@
index: DepNodeIndex,
) -> Self::Stored;
- fn iter<R, L>(
+ fn iter<R>(
&self,
- shards: &Sharded<L>,
- get_shard: impl Fn(&mut L) -> &mut Self::Sharded,
+ shards: &Sharded<Self::Sharded>,
f: impl for<'a> FnOnce(
- Box<dyn Iterator<Item = (&'a Self::Key, &'a Self::Value, DepNodeIndex)> + 'a>,
+ &'a mut dyn Iterator<Item = (&'a Self::Key, &'a Self::Value, DepNodeIndex)>,
) -> R,
) -> R;
}
@@ -95,23 +92,24 @@
type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
#[inline(always)]
- fn lookup<D, Q, R, OnHit, OnMiss>(
+ fn lookup<'s, R, OnHit>(
&self,
- state: &QueryState<D, Q, Self>,
- key: K,
+ state: &'s QueryCacheStore<Self>,
+ key: &K,
on_hit: OnHit,
- on_miss: OnMiss,
- ) -> R
+ ) -> Result<R, QueryLookup>
where
OnHit: FnOnce(&V, DepNodeIndex) -> R,
- OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
{
- let mut lookup = state.get_lookup(&key);
- let lock = &mut *lookup.lock;
+ let (lookup, lock) = state.get_lookup(key);
+ let result = lock.raw_entry().from_key_hashed_nocheck(lookup.key_hash, key);
- let result = lock.cache.raw_entry().from_key_hashed_nocheck(lookup.key_hash, &key);
-
- if let Some((_, value)) = result { on_hit(&value.0, value.1) } else { on_miss(key, lookup) }
+ if let Some((_, value)) = result {
+ let hit_result = on_hit(&value.0, value.1);
+ Ok(hit_result)
+ } else {
+ Err(lookup)
+ }
}
#[inline]
@@ -126,16 +124,14 @@
value
}
- fn iter<R, L>(
+ fn iter<R>(
&self,
- shards: &Sharded<L>,
- get_shard: impl Fn(&mut L) -> &mut Self::Sharded,
- f: impl for<'a> FnOnce(Box<dyn Iterator<Item = (&'a K, &'a V, DepNodeIndex)> + 'a>) -> R,
+ shards: &Sharded<Self::Sharded>,
+ f: impl for<'a> FnOnce(&'a mut dyn Iterator<Item = (&'a K, &'a V, DepNodeIndex)>) -> R,
) -> R {
- let mut shards = shards.lock_shards();
- let mut shards: Vec<_> = shards.iter_mut().map(|shard| get_shard(shard)).collect();
- let results = shards.iter_mut().flat_map(|shard| shard.iter()).map(|(k, v)| (k, &v.0, v.1));
- f(Box::new(results))
+ let shards = shards.lock_shards();
+ let mut results = shards.iter().flat_map(|shard| shard.iter()).map(|(k, v)| (k, &v.0, v.1));
+ f(&mut results)
}
}
@@ -177,26 +173,23 @@
type Sharded = FxHashMap<K, &'tcx (V, DepNodeIndex)>;
#[inline(always)]
- fn lookup<D, Q, R, OnHit, OnMiss>(
+ fn lookup<'s, R, OnHit>(
&self,
- state: &QueryState<D, Q, Self>,
- key: K,
+ state: &'s QueryCacheStore<Self>,
+ key: &K,
on_hit: OnHit,
- on_miss: OnMiss,
- ) -> R
+ ) -> Result<R, QueryLookup>
where
OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
- OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
{
- let mut lookup = state.get_lookup(&key);
- let lock = &mut *lookup.lock;
-
- let result = lock.cache.raw_entry().from_key_hashed_nocheck(lookup.key_hash, &key);
+ let (lookup, lock) = state.get_lookup(key);
+ let result = lock.raw_entry().from_key_hashed_nocheck(lookup.key_hash, key);
if let Some((_, value)) = result {
- on_hit(&&value.0, value.1)
+ let hit_result = on_hit(&&value.0, value.1);
+ Ok(hit_result)
} else {
- on_miss(key, lookup)
+ Err(lookup)
}
}
@@ -214,15 +207,13 @@
&value.0
}
- fn iter<R, L>(
+ fn iter<R>(
&self,
- shards: &Sharded<L>,
- get_shard: impl Fn(&mut L) -> &mut Self::Sharded,
- f: impl for<'a> FnOnce(Box<dyn Iterator<Item = (&'a K, &'a V, DepNodeIndex)> + 'a>) -> R,
+ shards: &Sharded<Self::Sharded>,
+ f: impl for<'a> FnOnce(&'a mut dyn Iterator<Item = (&'a K, &'a V, DepNodeIndex)>) -> R,
) -> R {
- let mut shards = shards.lock_shards();
- let mut shards: Vec<_> = shards.iter_mut().map(|shard| get_shard(shard)).collect();
- let results = shards.iter_mut().flat_map(|shard| shard.iter()).map(|(k, v)| (k, &v.0, v.1));
- f(Box::new(results))
+ let shards = shards.lock_shards();
+ let mut results = shards.iter().flat_map(|shard| shard.iter()).map(|(k, v)| (k, &v.0, v.1));
+ f(&mut results)
}
}
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
index 0f0684b..4e2515c 100644
--- a/compiler/rustc_query_system/src/query/config.rs
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -3,11 +3,10 @@
use crate::dep_graph::DepNode;
use crate::dep_graph::SerializedDepNodeIndex;
use crate::query::caches::QueryCache;
-use crate::query::plumbing::CycleError;
-use crate::query::{QueryContext, QueryState};
+use crate::query::{QueryCacheStore, QueryContext, QueryState};
use rustc_data_structures::fingerprint::Fingerprint;
-use std::borrow::Cow;
+use rustc_errors::DiagnosticBuilder;
use std::fmt::Debug;
use std::hash::Hash;
@@ -28,15 +27,15 @@
pub compute: fn(CTX, K) -> V,
pub hash_result: fn(&mut CTX::StableHashingContext, &V) -> Option<Fingerprint>,
- pub handle_cycle_error: fn(CTX, CycleError<CTX::Query>) -> V,
+ pub handle_cycle_error: fn(CTX, DiagnosticBuilder<'_>) -> V,
pub cache_on_disk: fn(CTX, &K, Option<&V>) -> bool,
pub try_load_from_disk: fn(CTX, SerializedDepNodeIndex) -> Option<V>,
}
impl<CTX: QueryContext, K, V> QueryVtable<CTX, K, V> {
- pub(crate) fn to_dep_node(&self, tcx: CTX, key: &K) -> DepNode<CTX::DepKind>
+ pub(crate) fn to_dep_node(&self, tcx: CTX::DepContext, key: &K) -> DepNode<CTX::DepKind>
where
- K: crate::dep_graph::DepNodeParams<CTX>,
+ K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
{
DepNode::construct(tcx, self.dep_kind, key)
}
@@ -53,8 +52,8 @@
(self.hash_result)(hcx, value)
}
- pub(crate) fn handle_cycle_error(&self, tcx: CTX, error: CycleError<CTX::Query>) -> V {
- (self.handle_cycle_error)(tcx, error)
+ pub(crate) fn handle_cycle_error(&self, tcx: CTX, diag: DiagnosticBuilder<'_>) -> V {
+ (self.handle_cycle_error)(tcx, diag)
}
pub(crate) fn cache_on_disk(&self, tcx: CTX, key: &K, value: Option<&V>) -> bool {
@@ -74,14 +73,14 @@
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
// Don't use this method to access query results, instead use the methods on TyCtxt
- fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX::DepKind, CTX::Query, Self::Cache>;
-
- fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode<CTX::DepKind>
+ fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX::DepKind, Self::Key>
where
- Self::Key: crate::dep_graph::DepNodeParams<CTX>,
- {
- DepNode::construct(tcx, Self::DEP_KIND, key)
- }
+ CTX: 'a;
+
+ // Don't use this method to access query results, instead use the methods on TyCtxt
+ fn query_cache<'a>(tcx: CTX) -> &'a QueryCacheStore<Self::Cache>
+ where
+ CTX: 'a;
// Don't use this method to compute query results, instead use the methods on TyCtxt
fn compute(tcx: CTX, key: Self::Key) -> Self::Value;
@@ -91,11 +90,11 @@
result: &Self::Value,
) -> Option<Fingerprint>;
- fn handle_cycle_error(tcx: CTX, error: CycleError<CTX::Query>) -> Self::Value;
+ fn handle_cycle_error(tcx: CTX, diag: DiagnosticBuilder<'_>) -> Self::Value;
}
pub trait QueryDescription<CTX: QueryContext>: QueryAccessors<CTX> {
- fn describe(tcx: CTX, key: Self::Key) -> Cow<'static, str>;
+ fn describe(tcx: CTX, key: Self::Key) -> String;
#[inline]
fn cache_on_disk(_: CTX, _: &Self::Key, _: Option<&Self::Value>) -> bool {
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index 5fed500..35a2ac8 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -1,6 +1,10 @@
+use crate::dep_graph::DepContext;
use crate::query::plumbing::CycleError;
+use crate::query::{QueryContext, QueryStackFrame};
use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level};
+use rustc_session::Session;
use rustc_span::Span;
use std::convert::TryFrom;
@@ -10,10 +14,9 @@
#[cfg(parallel_compiler)]
use {
- super::QueryContext,
+ crate::dep_graph::DepKind,
parking_lot::{Condvar, Mutex},
rustc_data_structures::fx::FxHashSet,
- rustc_data_structures::stable_hasher::{HashStable, StableHasher},
rustc_data_structures::sync::Lock,
rustc_data_structures::sync::Lrc,
rustc_data_structures::{jobserver, OnDrop},
@@ -25,13 +28,13 @@
/// Represents a span and a query key.
#[derive(Clone, Debug)]
-pub struct QueryInfo<Q> {
+pub struct QueryInfo {
/// The span corresponding to the reason for which this query was required.
pub span: Span,
- pub query: Q,
+ pub query: QueryStackFrame,
}
-pub(crate) type QueryMap<D, Q> = FxHashMap<QueryJobId<D>, QueryJobInfo<D, Q>>;
+pub type QueryMap<D> = FxHashMap<QueryJobId<D>, QueryJobInfo<D>>;
/// A value uniquely identifying an active query job within a shard in the query cache.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
@@ -58,34 +61,34 @@
QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
}
- fn query<Q: Clone>(self, map: &QueryMap<D, Q>) -> Q {
+ fn query(self, map: &QueryMap<D>) -> QueryStackFrame {
map.get(&self).unwrap().info.query.clone()
}
#[cfg(parallel_compiler)]
- fn span<Q: Clone>(self, map: &QueryMap<D, Q>) -> Span {
+ fn span(self, map: &QueryMap<D>) -> Span {
map.get(&self).unwrap().job.span
}
#[cfg(parallel_compiler)]
- fn parent<Q: Clone>(self, map: &QueryMap<D, Q>) -> Option<QueryJobId<D>> {
+ fn parent(self, map: &QueryMap<D>) -> Option<QueryJobId<D>> {
map.get(&self).unwrap().job.parent
}
#[cfg(parallel_compiler)]
- fn latch<'a, Q: Clone>(self, map: &'a QueryMap<D, Q>) -> Option<&'a QueryLatch<D, Q>> {
+ fn latch<'a>(self, map: &'a QueryMap<D>) -> Option<&'a QueryLatch<D>> {
map.get(&self).unwrap().job.latch.as_ref()
}
}
-pub struct QueryJobInfo<D, Q> {
- pub info: QueryInfo<Q>,
- pub job: QueryJob<D, Q>,
+pub struct QueryJobInfo<D> {
+ pub info: QueryInfo,
+ pub job: QueryJob<D>,
}
/// Represents an active query job.
#[derive(Clone)]
-pub struct QueryJob<D, Q> {
+pub struct QueryJob<D> {
pub id: QueryShardJobId,
/// The span corresponding to the reason for which this query was required.
@@ -96,15 +99,14 @@
/// The latch that is used to wait on this job.
#[cfg(parallel_compiler)]
- latch: Option<QueryLatch<D, Q>>,
+ latch: Option<QueryLatch<D>>,
- dummy: PhantomData<QueryLatch<D, Q>>,
+ dummy: PhantomData<QueryLatch<D>>,
}
-impl<D, Q> QueryJob<D, Q>
+impl<D> QueryJob<D>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
{
/// Creates a new query job.
pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self {
@@ -119,7 +121,7 @@
}
#[cfg(parallel_compiler)]
- pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D, Q> {
+ pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D> {
if self.latch.is_none() {
self.latch = Some(QueryLatch::new());
}
@@ -127,8 +129,8 @@
}
#[cfg(not(parallel_compiler))]
- pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D, Q> {
- QueryLatch { id, dummy: PhantomData }
+ pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D> {
+ QueryLatch { id }
}
/// Signals to waiters that the query is complete.
@@ -147,23 +149,21 @@
#[cfg(not(parallel_compiler))]
#[derive(Clone)]
-pub(super) struct QueryLatch<D, Q> {
+pub(super) struct QueryLatch<D> {
id: QueryJobId<D>,
- dummy: PhantomData<Q>,
}
#[cfg(not(parallel_compiler))]
-impl<D, Q> QueryLatch<D, Q>
+impl<D> QueryLatch<D>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
{
pub(super) fn find_cycle_in_stack(
&self,
- query_map: QueryMap<D, Q>,
+ query_map: QueryMap<D>,
current_job: &Option<QueryJobId<D>>,
span: Span,
- ) -> CycleError<Q> {
+ ) -> CycleError {
// Find the waitee amongst `current_job` parents
let mut cycle = Vec::new();
let mut current_job = Option::clone(current_job);
@@ -197,15 +197,15 @@
}
#[cfg(parallel_compiler)]
-struct QueryWaiter<D, Q> {
+struct QueryWaiter<D> {
query: Option<QueryJobId<D>>,
condvar: Condvar,
span: Span,
- cycle: Lock<Option<CycleError<Q>>>,
+ cycle: Lock<Option<CycleError>>,
}
#[cfg(parallel_compiler)]
-impl<D, Q> QueryWaiter<D, Q> {
+impl<D> QueryWaiter<D> {
fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry);
self.condvar.notify_one();
@@ -213,19 +213,19 @@
}
#[cfg(parallel_compiler)]
-struct QueryLatchInfo<D, Q> {
+struct QueryLatchInfo<D> {
complete: bool,
- waiters: Vec<Lrc<QueryWaiter<D, Q>>>,
+ waiters: Vec<Lrc<QueryWaiter<D>>>,
}
#[cfg(parallel_compiler)]
#[derive(Clone)]
-pub(super) struct QueryLatch<D, Q> {
- info: Lrc<Mutex<QueryLatchInfo<D, Q>>>,
+pub(super) struct QueryLatch<D> {
+ info: Lrc<Mutex<QueryLatchInfo<D>>>,
}
#[cfg(parallel_compiler)]
-impl<D: Eq + Hash, Q: Clone> QueryLatch<D, Q> {
+impl<D: Eq + Hash> QueryLatch<D> {
fn new() -> Self {
QueryLatch {
info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
@@ -234,13 +234,13 @@
}
#[cfg(parallel_compiler)]
-impl<D, Q> QueryLatch<D, Q> {
+impl<D> QueryLatch<D> {
/// Awaits for the query job to complete.
pub(super) fn wait_on(
&self,
query: Option<QueryJobId<D>>,
span: Span,
- ) -> Result<(), CycleError<Q>> {
+ ) -> Result<(), CycleError> {
let waiter =
Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
self.wait_on_inner(&waiter);
@@ -255,7 +255,7 @@
}
/// Awaits the caller on this latch by blocking the current thread.
- fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D, Q>>) {
+ fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) {
let mut info = self.info.lock();
if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside
@@ -289,7 +289,7 @@
/// Removes a single waiter from the list of waiters.
/// This is used to break query cycles.
- fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D, Q>> {
+ fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> {
let mut info = self.info.lock();
debug_assert!(!info.complete);
// Remove the waiter from the list of waiters
@@ -311,14 +311,13 @@
/// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)]
-fn visit_waiters<D, Q, F>(
- query_map: &QueryMap<D, Q>,
+fn visit_waiters<D, F>(
+ query_map: &QueryMap<D>,
query: QueryJobId<D>,
mut visit: F,
) -> Option<Option<Waiter<D>>>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>,
{
// Visit the parent query which is a non-resumable waiter since it's on the same stack
@@ -348,8 +347,8 @@
/// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle.
#[cfg(parallel_compiler)]
-fn cycle_check<D, Q>(
- query_map: &QueryMap<D, Q>,
+fn cycle_check<D>(
+ query_map: &QueryMap<D>,
query: QueryJobId<D>,
span: Span,
stack: &mut Vec<(Span, QueryJobId<D>)>,
@@ -357,7 +356,6 @@
) -> Option<Option<Waiter<D>>>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
{
if !visited.insert(query) {
return if let Some(p) = stack.iter().position(|q| q.1 == query) {
@@ -393,14 +391,13 @@
/// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search.
#[cfg(parallel_compiler)]
-fn connected_to_root<D, Q>(
- query_map: &QueryMap<D, Q>,
+fn connected_to_root<D>(
+ query_map: &QueryMap<D>,
query: QueryJobId<D>,
visited: &mut FxHashSet<QueryJobId<D>>,
) -> bool
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
{
// We already visited this or we're deliberately ignoring it
if !visited.insert(query) {
@@ -420,30 +417,23 @@
// Deterministically pick an query from a list
#[cfg(parallel_compiler)]
-fn pick_query<'a, CTX, T, F>(
- query_map: &QueryMap<CTX::DepKind, CTX::Query>,
- tcx: CTX,
- queries: &'a [T],
- f: F,
-) -> &'a T
+fn pick_query<'a, D, T, F>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
where
- CTX: QueryContext,
- F: Fn(&T) -> (Span, QueryJobId<CTX::DepKind>),
+ D: Copy + Clone + Eq + Hash,
+ F: Fn(&T) -> (Span, QueryJobId<D>),
{
// Deterministically pick an entry point
// FIXME: Sort this instead
- let mut hcx = tcx.create_stable_hashing_context();
queries
.iter()
.min_by_key(|v| {
let (span, query) = f(v);
- let mut stable_hasher = StableHasher::new();
- query.query(query_map).hash_stable(&mut hcx, &mut stable_hasher);
+ let hash = query.query(query_map).hash;
// Prefer entry points which have valid spans for nicer error messages
// We add an integer to the tuple ensuring that entry points
// with valid spans are picked first
let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
- (span_cmp, stable_hasher.finish::<u64>())
+ (span_cmp, hash)
})
.unwrap()
}
@@ -454,11 +444,10 @@
/// If a cycle was not found, the starting query is removed from `jobs` and
/// the function returns false.
#[cfg(parallel_compiler)]
-fn remove_cycle<CTX: QueryContext>(
- query_map: &QueryMap<CTX::DepKind, CTX::Query>,
- jobs: &mut Vec<QueryJobId<CTX::DepKind>>,
- wakelist: &mut Vec<Lrc<QueryWaiter<CTX::DepKind, CTX::Query>>>,
- tcx: CTX,
+fn remove_cycle<D: DepKind>(
+ query_map: &QueryMap<D>,
+ jobs: &mut Vec<QueryJobId<D>>,
+ wakelist: &mut Vec<Lrc<QueryWaiter<D>>>,
) -> bool {
let mut visited = FxHashSet::default();
let mut stack = Vec::new();
@@ -508,15 +497,15 @@
None
} else {
// Deterministically pick one of the waiters to show to the user
- let waiter = *pick_query(query_map, tcx, &waiters, |s| *s);
+ let waiter = *pick_query(query_map, &waiters, |s| *s);
Some((span, query, Some(waiter)))
}
}
})
- .collect::<Vec<(Span, QueryJobId<CTX::DepKind>, Option<(Span, QueryJobId<CTX::DepKind>)>)>>();
+ .collect::<Vec<(Span, QueryJobId<D>, Option<(Span, QueryJobId<D>)>)>>();
// Deterministically pick an entry point
- let (_, entry_point, usage) = pick_query(query_map, tcx, &entry_points, |e| (e.0, e.1));
+ let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
// Shift the stack so that our entry point is first
let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
@@ -573,7 +562,7 @@
let mut found_cycle = false;
while jobs.len() > 0 {
- if remove_cycle(&query_map, &mut jobs, &mut wakelist, tcx) {
+ if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
found_cycle = true;
}
}
@@ -594,3 +583,76 @@
on_panic.disable();
}
+
+#[inline(never)]
+#[cold]
+pub(crate) fn report_cycle<'a>(
+ sess: &'a Session,
+ CycleError { usage, cycle: stack }: CycleError,
+) -> DiagnosticBuilder<'a> {
+ assert!(!stack.is_empty());
+
+ let fix_span = |span: Span, query: &QueryStackFrame| {
+ sess.source_map().guess_head_span(query.default_span(span))
+ };
+
+ let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
+ let mut err =
+ struct_span_err!(sess, span, E0391, "cycle detected when {}", stack[0].query.description);
+
+ for i in 1..stack.len() {
+ let query = &stack[i].query;
+ let span = fix_span(stack[(i + 1) % stack.len()].span, query);
+ err.span_note(span, &format!("...which requires {}...", query.description));
+ }
+
+ err.note(&format!(
+ "...which again requires {}, completing the cycle",
+ stack[0].query.description
+ ));
+
+ if let Some((span, query)) = usage {
+ err.span_note(fix_span(span, &query), &format!("cycle used when {}", query.description));
+ }
+
+ err
+}
+
+pub fn print_query_stack<CTX: QueryContext>(
+ tcx: CTX,
+ mut current_query: Option<QueryJobId<CTX::DepKind>>,
+ handler: &Handler,
+ num_frames: Option<usize>,
+) -> usize {
+ // Be careful relying on global state here: this code is called from
+ // a panic hook, which means that the global `Handler` may be in a weird
+ // state if it was responsible for triggering the panic.
+ let mut i = 0;
+ let query_map = tcx.try_collect_active_jobs();
+
+ while let Some(query) = current_query {
+ if Some(i) == num_frames {
+ break;
+ }
+ let query_info = if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) {
+ info
+ } else {
+ break;
+ };
+ let mut diag = Diagnostic::new(
+ Level::FailureNote,
+ &format!(
+ "#{} [{}] {}",
+ i, query_info.info.query.name, query_info.info.query.description
+ ),
+ );
+ diag.span =
+ tcx.dep_context().sess().source_map().guess_head_span(query_info.info.span).into();
+ handler.force_print_diagnostic(diag);
+
+ current_query = query_info.job.parent;
+ i += 1;
+ }
+
+ i
+}
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
index da45565..aef8a13 100644
--- a/compiler/rustc_query_system/src/query/mod.rs
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -4,7 +4,7 @@
mod job;
#[cfg(parallel_compiler)]
pub use self::job::deadlock;
-pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
+pub use self::job::{print_query_stack, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap};
mod caches;
pub use self::caches::{
@@ -14,31 +14,82 @@
mod config;
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
-use crate::dep_graph::{DepContext, DepGraph};
-use crate::query::job::QueryMap;
+use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
-use rustc_data_structures::stable_hasher::HashStable;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;
use rustc_span::def_id::DefId;
+use rustc_span::Span;
-pub trait QueryContext: DepContext {
- type Query: Clone + HashStable<Self::StableHashingContext>;
+/// Description of a frame in the query stack.
+///
+/// This is mostly used in case of cycles for error reporting.
+#[derive(Clone, Debug)]
+pub struct QueryStackFrame {
+ pub name: &'static str,
+ pub description: String,
+ span: Option<Span>,
+ /// This hash is used to deterministically pick
+ /// a query to remove cycles in the parallel compiler.
+ #[cfg(parallel_compiler)]
+ hash: u64,
+}
- fn incremental_verify_ich(&self) -> bool;
- fn verbose(&self) -> bool;
+impl QueryStackFrame {
+ #[inline]
+ pub fn new(
+ name: &'static str,
+ description: String,
+ span: Option<Span>,
+ _hash: impl FnOnce() -> u64,
+ ) -> Self {
+ Self {
+ name,
+ description,
+ span,
+ #[cfg(parallel_compiler)]
+ hash: _hash(),
+ }
+ }
+ // FIXME(eddyb) Get more valid `Span`s on queries.
+ #[inline]
+ pub fn default_span(&self, span: Span) -> Span {
+ if !span.is_dummy() {
+ return span;
+ }
+ self.span.unwrap_or(span)
+ }
+}
+
+pub trait QueryContext: HasDepContext {
/// Get string representation from DefPath.
fn def_path_str(&self, def_id: DefId) -> String;
- /// Access the DepGraph.
- fn dep_graph(&self) -> &DepGraph<Self::DepKind>;
-
/// Get the query information from the TLS context.
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>;
- fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind, Self::Query>>;
+ fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>>;
+
+ /// Load data from the on-disk cache.
+ fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);
+
+ /// Try to force a dep node to execute and see if it's green.
+ fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
+
+ /// Load diagnostics associated to the node in the previous session.
+ fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic>;
+
+ /// Register diagnostics for the given node, for use in next session.
+ fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);
+
+ /// Register diagnostics for the given node, for use in next session.
+ fn store_diagnostics_for_anon_node(
+ &self,
+ dep_node_index: DepNodeIndex,
+ diagnostics: ThinVec<Diagnostic>,
+ );
/// Executes a job by changing the `ImplicitCtxt` to point to the
/// new query job while it executes. It returns the diagnostics
@@ -47,6 +98,6 @@
&self,
token: QueryJobId<Self::DepKind>,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
- compute: impl FnOnce(Self) -> R,
+ compute: impl FnOnce() -> R,
) -> R;
}
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 3653213..da37209 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -2,22 +2,23 @@
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
-use crate::dep_graph::{DepKind, DepNode};
+use crate::dep_graph::{DepContext, DepKind, DepNode};
use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
use crate::query::caches::QueryCache;
use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
-use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
-use crate::query::{QueryContext, QueryMap};
+use crate::query::job::{
+ report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId,
+};
+use crate::query::{QueryContext, QueryMap, QueryStackFrame};
#[cfg(not(parallel_compiler))]
use rustc_data_structures::cold_path;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHasher};
-use rustc_data_structures::sharded::Sharded;
+use rustc_data_structures::sharded::{get_shard_index_by_hash, Sharded};
use rustc_data_structures::sync::{Lock, LockGuard};
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::{Diagnostic, FatalError};
-use rustc_span::source_map::DUMMY_SP;
use rustc_span::Span;
use std::collections::hash_map::Entry;
use std::fmt::Debug;
@@ -28,83 +29,103 @@
#[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering};
-pub(super) struct QueryStateShard<D, Q, K, C> {
- pub(super) cache: C,
- active: FxHashMap<K, QueryResult<D, Q>>,
+pub struct QueryCacheStore<C: QueryCache> {
+ cache: C,
+ shards: Sharded<C::Sharded>,
+ #[cfg(debug_assertions)]
+ pub cache_hits: AtomicUsize,
+}
+
+impl<C: QueryCache> Default for QueryCacheStore<C> {
+ fn default() -> Self {
+ Self {
+ cache: C::default(),
+ shards: Default::default(),
+ #[cfg(debug_assertions)]
+ cache_hits: AtomicUsize::new(0),
+ }
+ }
+}
+
+/// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
+pub struct QueryLookup {
+ pub(super) key_hash: u64,
+ shard: usize,
+}
+
+// We compute the key's hash once and then use it for both the
+// shard lookup and the hashmap lookup. This relies on the fact
+// that both of them use `FxHasher`.
+fn hash_for_shard<K: Hash>(key: &K) -> u64 {
+ let mut hasher = FxHasher::default();
+ key.hash(&mut hasher);
+ hasher.finish()
+}
+
+impl<C: QueryCache> QueryCacheStore<C> {
+ pub(super) fn get_lookup<'tcx>(
+ &'tcx self,
+ key: &C::Key,
+ ) -> (QueryLookup, LockGuard<'tcx, C::Sharded>) {
+ let key_hash = hash_for_shard(key);
+ let shard = get_shard_index_by_hash(key_hash);
+ let lock = self.shards.get_shard_by_index(shard).lock();
+ (QueryLookup { key_hash, shard }, lock)
+ }
+
+ pub fn iter_results<R>(
+ &self,
+ f: impl for<'a> FnOnce(
+ &'a mut dyn Iterator<Item = (&'a C::Key, &'a C::Value, DepNodeIndex)>,
+ ) -> R,
+ ) -> R {
+ self.cache.iter(&self.shards, f)
+ }
+}
+
+struct QueryStateShard<D, K> {
+ active: FxHashMap<K, QueryResult<D>>,
/// Used to generate unique ids for active jobs.
jobs: u32,
}
-impl<D, Q, K, C: Default> Default for QueryStateShard<D, Q, K, C> {
- fn default() -> QueryStateShard<D, Q, K, C> {
- QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
+impl<D, K> Default for QueryStateShard<D, K> {
+ fn default() -> QueryStateShard<D, K> {
+ QueryStateShard { active: Default::default(), jobs: 0 }
}
}
-pub struct QueryState<D, Q, C: QueryCache> {
- cache: C,
- shards: Sharded<QueryStateShard<D, Q, C::Key, C::Sharded>>,
- #[cfg(debug_assertions)]
- pub cache_hits: AtomicUsize,
-}
-
-impl<D, Q, C: QueryCache> QueryState<D, Q, C> {
- #[inline]
- pub(super) fn get_lookup<'tcx>(
- &'tcx self,
- key: &C::Key,
- ) -> QueryLookup<'tcx, D, Q, C::Key, C::Sharded> {
- // We compute the key's hash once and then use it for both the
- // shard lookup and the hashmap lookup. This relies on the fact
- // that both of them use `FxHasher`.
- let mut hasher = FxHasher::default();
- key.hash(&mut hasher);
- let key_hash = hasher.finish();
-
- let shard = self.shards.get_shard_index_by_hash(key_hash);
- let lock = self.shards.get_shard_by_index(shard).lock();
- QueryLookup { key_hash, shard, lock }
- }
+pub struct QueryState<D, K> {
+ shards: Sharded<QueryStateShard<D, K>>,
}
/// Indicates the state of a query for a given key in a query map.
-enum QueryResult<D, Q> {
+enum QueryResult<D> {
/// An already executing query. The query job can be used to await for its completion.
- Started(QueryJob<D, Q>),
+ Started(QueryJob<D>),
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic.
Poisoned,
}
-impl<D, Q, C> QueryState<D, Q, C>
+impl<D, K> QueryState<D, K>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
- C: QueryCache,
+ K: Eq + Hash + Clone + Debug,
{
- #[inline(always)]
- pub fn iter_results<R>(
- &self,
- f: impl for<'a> FnOnce(
- Box<dyn Iterator<Item = (&'a C::Key, &'a C::Value, DepNodeIndex)> + 'a>,
- ) -> R,
- ) -> R {
- self.cache.iter(&self.shards, |shard| &mut shard.cache, f)
- }
-
- #[inline(always)]
pub fn all_inactive(&self) -> bool {
let shards = self.shards.lock_shards();
shards.iter().all(|shard| shard.active.is_empty())
}
- pub fn try_collect_active_jobs(
+ pub fn try_collect_active_jobs<CTX: Copy>(
&self,
+ tcx: CTX,
kind: D,
- make_query: fn(C::Key) -> Q,
- jobs: &mut QueryMap<D, Q>,
+ make_query: fn(CTX, K) -> QueryStackFrame,
+ jobs: &mut QueryMap<D>,
) -> Option<()> {
// We use try_lock_shards here since we are called from the
// deadlock handler, and this shouldn't be locked.
@@ -114,7 +135,7 @@
shard.active.iter().filter_map(move |(k, v)| {
if let QueryResult::Started(ref job) = *v {
let id = QueryJobId::new(job.id, shard_id, kind);
- let info = QueryInfo { span: job.span, query: make_query(k.clone()) };
+ let info = QueryInfo { span: job.span, query: make_query(tcx, k.clone()) };
Some((id, QueryJobInfo { info, job: job.clone() }))
} else {
None
@@ -126,41 +147,28 @@
}
}
-impl<D, Q, C: QueryCache> Default for QueryState<D, Q, C> {
- fn default() -> QueryState<D, Q, C> {
- QueryState {
- cache: C::default(),
- shards: Default::default(),
- #[cfg(debug_assertions)]
- cache_hits: AtomicUsize::new(0),
- }
+impl<D, K> Default for QueryState<D, K> {
+ fn default() -> QueryState<D, K> {
+ QueryState { shards: Default::default() }
}
}
-/// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
-pub struct QueryLookup<'tcx, D, Q, K, C> {
- pub(super) key_hash: u64,
- shard: usize,
- pub(super) lock: LockGuard<'tcx, QueryStateShard<D, Q, K, C>>,
-}
-
/// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped.
-struct JobOwner<'tcx, D, Q, C>
+struct JobOwner<'tcx, D, C>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
C: QueryCache,
{
- state: &'tcx QueryState<D, Q, C>,
+ state: &'tcx QueryState<D, C::Key>,
+ cache: &'tcx QueryCacheStore<C>,
key: C::Key,
id: QueryJobId<D>,
}
-impl<'tcx, D, Q, C> JobOwner<'tcx, D, Q, C>
+impl<'tcx, D, C> JobOwner<'tcx, D, C>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
C: QueryCache,
{
/// Either gets a `JobOwner` corresponding the query, allowing us to
@@ -172,18 +180,21 @@
/// This function is inlined because that results in a noticeable speed-up
/// for some compile-time benchmarks.
#[inline(always)]
- fn try_start<'a, 'b, CTX>(
+ fn try_start<'b, CTX>(
tcx: CTX,
- state: &'b QueryState<CTX::DepKind, CTX::Query, C>,
+ state: &'b QueryState<CTX::DepKind, C::Key>,
+ cache: &'b QueryCacheStore<C>,
span: Span,
key: &C::Key,
- mut lookup: QueryLookup<'a, CTX::DepKind, CTX::Query, C::Key, C::Sharded>,
+ lookup: QueryLookup,
query: &QueryVtable<CTX, C::Key, C::Value>,
- ) -> TryGetJob<'b, CTX::DepKind, CTX::Query, C>
+ ) -> TryGetJob<'b, CTX::DepKind, C>
where
CTX: QueryContext,
{
- let lock = &mut *lookup.lock;
+ let shard = lookup.shard;
+ let mut state_lock = state.shards.get_shard_by_index(shard).lock();
+ let lock = &mut *state_lock;
let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
Entry::Occupied(mut entry) => {
@@ -193,13 +204,13 @@
// in another thread has completed. Record how long we wait in the
// self-profiler.
let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
- Some(tcx.profiler().query_blocked())
+ Some(tcx.dep_context().profiler().query_blocked())
} else {
None
};
// Create the id of the job we're waiting for
- let id = QueryJobId::new(job.id, lookup.shard, query.dep_kind);
+ let id = QueryJobId::new(job.id, shard, query.dep_kind);
(job.latch(id), _query_blocked_prof_timer)
}
@@ -214,30 +225,31 @@
lock.jobs = id;
let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
- let global_id = QueryJobId::new(id, lookup.shard, query.dep_kind);
+ let global_id = QueryJobId::new(id, shard, query.dep_kind);
let job = tcx.current_query_job();
let job = QueryJob::new(id, span, job);
entry.insert(QueryResult::Started(job));
- let owner = JobOwner { state, id: global_id, key: (*key).clone() };
+ let owner = JobOwner { state, cache, id: global_id, key: (*key).clone() };
return TryGetJob::NotYetStarted(owner);
}
};
- mem::drop(lookup.lock);
+ mem::drop(state_lock);
// If we are single-threaded we know that we have cycle error,
// so we just return the error.
#[cfg(not(parallel_compiler))]
return TryGetJob::Cycle(cold_path(|| {
- let error: CycleError<CTX::Query> = latch.find_cycle_in_stack(
+ let error: CycleError = latch.find_cycle_in_stack(
tcx.try_collect_active_jobs().unwrap(),
&tcx.current_query_job(),
span,
);
+ let error = report_cycle(tcx.dep_context().sess(), error);
let value = query.handle_cycle_error(tcx, error);
- state.cache.store_nocache(value)
+ cache.cache.store_nocache(value)
}));
// With parallel queries we might just have to wait on some other
@@ -247,18 +259,25 @@
let result = latch.wait_on(tcx.current_query_job(), span);
if let Err(cycle) = result {
+ let cycle = report_cycle(tcx.dep_context().sess(), cycle);
let value = query.handle_cycle_error(tcx, cycle);
- let value = state.cache.store_nocache(value);
+ let value = cache.cache.store_nocache(value);
return TryGetJob::Cycle(value);
}
- let cached = try_get_cached(
- tcx,
- state,
- (*key).clone(),
- |value, index| (value.clone(), index),
- |_, _| panic!("value must be in cache after waiting"),
- );
+ let cached = cache
+ .cache
+ .lookup(cache, &key, |value, index| {
+ if unlikely!(tcx.dep_context().profiler().enabled()) {
+ tcx.dep_context().profiler().query_cache_hit(index.into());
+ }
+ #[cfg(debug_assertions)]
+ {
+ cache.cache_hits.fetch_add(1, Ordering::Relaxed);
+ }
+ (value.clone(), index)
+ })
+ .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
if let Some(prof_timer) = _query_blocked_prof_timer.take() {
prof_timer.finish_with_query_invocation_id(cached.1.into());
@@ -270,22 +289,29 @@
/// Completes the query by updating the query cache with the `result`,
/// signals the waiter and forgets the JobOwner, so it won't poison the query
- #[inline(always)]
fn complete(self, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored {
// We can move out of `self` here because we `mem::forget` it below
let key = unsafe { ptr::read(&self.key) };
let state = self.state;
+ let cache = self.cache;
// Forget ourself so our destructor won't poison the query
mem::forget(self);
let (job, result) = {
- let mut lock = state.shards.get_shard_by_value(&key).lock();
- let job = match lock.active.remove(&key).unwrap() {
- QueryResult::Started(job) => job,
- QueryResult::Poisoned => panic!(),
+ let key_hash = hash_for_shard(&key);
+ let shard = get_shard_index_by_hash(key_hash);
+ let job = {
+ let mut lock = state.shards.get_shard_by_index(shard).lock();
+ match lock.active.remove(&key).unwrap() {
+ QueryResult::Started(job) => job,
+ QueryResult::Poisoned => panic!(),
+ }
};
- let result = state.cache.complete(&mut lock.cache, key, result, dep_node_index);
+ let result = {
+ let mut lock = cache.shards.get_shard_by_index(shard).lock();
+ cache.cache.complete(&mut lock, key, result, dep_node_index)
+ };
(job, result)
};
@@ -294,7 +320,6 @@
}
}
-#[inline(always)]
fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
where
F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
@@ -304,10 +329,9 @@
(result, diagnostics.into_inner())
}
-impl<'tcx, D, Q, C> Drop for JobOwner<'tcx, D, Q, C>
+impl<'tcx, D, C> Drop for JobOwner<'tcx, D, C>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
C: QueryCache,
{
#[inline(never)]
@@ -332,21 +356,20 @@
}
#[derive(Clone)]
-pub struct CycleError<Q> {
+pub(crate) struct CycleError {
/// The query and related span that uses the cycle.
- pub usage: Option<(Span, Q)>,
- pub cycle: Vec<QueryInfo<Q>>,
+ pub usage: Option<(Span, QueryStackFrame)>,
+ pub cycle: Vec<QueryInfo>,
}
/// The result of `try_start`.
-enum TryGetJob<'tcx, D, Q, C>
+enum TryGetJob<'tcx, D, C>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
C: QueryCache,
{
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
- NotYetStarted(JobOwner<'tcx, D, Q, C>),
+ NotYetStarted(JobOwner<'tcx, D, C>),
/// The query was already completed.
/// Returns the result of the query and its dep-node index
@@ -362,83 +385,79 @@
/// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need
/// to compute it.
-#[inline(always)]
-fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
+#[inline]
+pub fn try_get_cached<'a, CTX, C, R, OnHit>(
tcx: CTX,
- state: &QueryState<CTX::DepKind, CTX::Query, C>,
- key: C::Key,
+ cache: &'a QueryCacheStore<C>,
+ key: &C::Key,
// `on_hit` can be called while holding a lock to the query cache
on_hit: OnHit,
- on_miss: OnMiss,
-) -> R
+) -> Result<R, QueryLookup>
where
C: QueryCache,
- CTX: QueryContext,
- OnHit: FnOnce(&C::Stored, DepNodeIndex) -> R,
- OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>) -> R,
+ CTX: DepContext,
+ OnHit: FnOnce(&C::Stored) -> R,
{
- state.cache.lookup(
- state,
- key,
- |value, index| {
- if unlikely!(tcx.profiler().enabled()) {
- tcx.profiler().query_cache_hit(index.into());
- }
- #[cfg(debug_assertions)]
- {
- state.cache_hits.fetch_add(1, Ordering::Relaxed);
- }
- on_hit(value, index)
- },
- on_miss,
- )
+ cache.cache.lookup(cache, &key, |value, index| {
+ if unlikely!(tcx.profiler().enabled()) {
+ tcx.profiler().query_cache_hit(index.into());
+ }
+ #[cfg(debug_assertions)]
+ {
+ cache.cache_hits.fetch_add(1, Ordering::Relaxed);
+ }
+ tcx.dep_graph().read_index(index);
+ on_hit(value)
+ })
}
-#[inline(always)]
fn try_execute_query<CTX, C>(
tcx: CTX,
- state: &QueryState<CTX::DepKind, CTX::Query, C>,
+ state: &QueryState<CTX::DepKind, C::Key>,
+ cache: &QueryCacheStore<C>,
span: Span,
key: C::Key,
- lookup: QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>,
+ lookup: QueryLookup,
query: &QueryVtable<CTX, C::Key, C::Value>,
) -> C::Stored
where
C: QueryCache,
- C::Key: crate::dep_graph::DepNodeParams<CTX>,
+ C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
CTX: QueryContext,
{
- let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
- tcx, state, span, &key, lookup, query,
+ let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
+ tcx, state, cache, span, &key, lookup, query,
) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(result) => return result,
#[cfg(parallel_compiler)]
TryGetJob::JobCompleted((v, index)) => {
- tcx.dep_graph().read_index(index);
+ tcx.dep_context().dep_graph().read_index(index);
return v;
}
};
// Fast path for when incr. comp. is off. `to_dep_node` is
// expensive for some `DepKind`s.
- if !tcx.dep_graph().is_fully_enabled() {
+ if !tcx.dep_context().dep_graph().is_fully_enabled() {
let null_dep_node = DepNode::new_no_params(DepKind::NULL);
return force_query_with_job(tcx, key, job, null_dep_node, query).0;
}
if query.anon {
- let prof_timer = tcx.profiler().query_provider();
+ let prof_timer = tcx.dep_context().profiler().query_provider();
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
- tcx.start_query(job.id, diagnostics, |tcx| {
- tcx.dep_graph().with_anon_task(query.dep_kind, || query.compute(tcx, key))
+ tcx.start_query(job.id, diagnostics, || {
+ tcx.dep_context()
+ .dep_graph()
+ .with_anon_task(query.dep_kind, || query.compute(tcx, key))
})
});
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
- tcx.dep_graph().read_index(dep_node_index);
+ tcx.dep_context().dep_graph().read_index(dep_node_index);
if unlikely!(!diagnostics.is_empty()) {
tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
@@ -447,14 +466,14 @@
return job.complete(result, dep_node_index);
}
- let dep_node = query.to_dep_node(tcx, &key);
+ let dep_node = query.to_dep_node(*tcx.dep_context(), &key);
if !query.eval_always {
// The diagnostics for this query will be
// promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
- let loaded = tcx.start_query(job.id, None, |tcx| {
- let marked = tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node);
+ let loaded = tcx.start_query(job.id, None, || {
+ let marked = tcx.dep_context().dep_graph().try_mark_green_and_read(tcx, &dep_node);
marked.map(|(prev_dep_node_index, dep_node_index)| {
(
load_from_disk_and_cache_in_memory(
@@ -475,7 +494,7 @@
}
let (result, dep_node_index) = force_query_with_job(tcx, key, job, dep_node, query);
- tcx.dep_graph().read_index(dep_node_index);
+ tcx.dep_context().dep_graph().read_index(dep_node_index);
result
}
@@ -493,11 +512,11 @@
// Note this function can be called concurrently from the same query
// We must ensure that this is handled correctly.
- debug_assert!(tcx.dep_graph().is_green(dep_node));
+ debug_assert!(tcx.dep_context().dep_graph().is_green(dep_node));
// First we try to load the result from the on-disk cache.
let result = if query.cache_on_disk(tcx, &key, None) {
- let prof_timer = tcx.profiler().incr_cache_loading();
+ let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
let result = query.try_load_from_disk(tcx, prev_dep_node_index);
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -514,34 +533,41 @@
None
};
- let result = if let Some(result) = result {
+ if let Some(result) = result {
+ // If `-Zincremental-verify-ich` is specified, re-hash results from
+ // the cache and make sure that they have the expected fingerprint.
+ if unlikely!(tcx.dep_context().sess().opts.debugging_opts.incremental_verify_ich) {
+ incremental_verify_ich(*tcx.dep_context(), &result, dep_node, dep_node_index, query);
+ }
+
result
} else {
// We could not load a result from the on-disk cache, so
// recompute.
- let prof_timer = tcx.profiler().query_provider();
+ let prof_timer = tcx.dep_context().profiler().query_provider();
// The dep-graph for this computation is already in-place.
- let result = tcx.dep_graph().with_ignore(|| query.compute(tcx, key));
+ let result = tcx.dep_context().dep_graph().with_ignore(|| query.compute(tcx, key));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+ // Verify that re-running the query produced a result with the expected hash
+ // This catches bugs in query implementations, turning them into ICEs.
+ // For example, a query might sort its result by `DefId` - since `DefId`s are
+ // not stable across compilation sessions, the result could get up getting sorted
+ // in a different order when the query is re-run, even though all of the inputs
+ // (e.g. `DefPathHash` values) were green.
+ //
+ // See issue #82920 for an example of a miscompilation that would get turned into
+ // an ICE by this check
+ incremental_verify_ich(*tcx.dep_context(), &result, dep_node, dep_node_index, query);
+
result
- };
-
- // If `-Zincremental-verify-ich` is specified, re-hash results from
- // the cache and make sure that they have the expected fingerprint.
- if unlikely!(tcx.incremental_verify_ich()) {
- incremental_verify_ich(tcx, &result, dep_node, dep_node_index, query);
}
-
- result
}
-#[inline(never)]
-#[cold]
fn incremental_verify_ich<CTX, K, V: Debug>(
- tcx: CTX,
+ tcx: CTX::DepContext,
result: &V,
dep_node: &DepNode<CTX::DepKind>,
dep_node_index: DepNodeIndex,
@@ -564,13 +590,25 @@
let old_hash = tcx.dep_graph().fingerprint_of(dep_node_index);
- assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,);
+ if new_hash != old_hash {
+ let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
+ format!("`cargo clean -p {}` or `cargo clean`", crate_name)
+ } else {
+ "`cargo clean`".to_string()
+ };
+ tcx.sess().struct_err(&format!("internal compiler error: encountered incremental compilation error with {:?}", dep_node))
+ .help(&format!("This is a known issue with the compiler. Run {} to allow your project to compile", run_cmd))
+ .note(&format!("Please follow the instructions below to create a bug report with the provided information"))
+ .note(&format!("See <https://github.com/rust-lang/rust/issues/84970> for more information."))
+ .emit();
+ panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
+ }
}
fn force_query_with_job<C, CTX>(
tcx: CTX,
key: C::Key,
- job: JobOwner<'_, CTX::DepKind, CTX::Query, C>,
+ job: JobOwner<'_, CTX::DepKind, C>,
dep_node: DepNode<CTX::DepKind>,
query: &QueryVtable<CTX, C::Key, C::Value>,
) -> (C::Stored, DepNodeIndex)
@@ -584,7 +622,7 @@
// 2. Two distinct query keys get mapped to the same `DepNode`
// (see for example #48923).
assert!(
- !tcx.dep_graph().dep_node_exists(&dep_node),
+ !tcx.dep_context().dep_graph().dep_node_exists(&dep_node),
"forcing query with already existing `DepNode`\n\
- query-key: {:?}\n\
- dep-node: {:?}",
@@ -592,12 +630,12 @@
dep_node
);
- let prof_timer = tcx.profiler().query_provider();
+ let prof_timer = tcx.dep_context().profiler().query_provider();
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
- tcx.start_query(job.id, diagnostics, |tcx| {
+ tcx.start_query(job.id, diagnostics, || {
if query.eval_always {
- tcx.dep_graph().with_eval_always_task(
+ tcx.dep_context().dep_graph().with_eval_always_task(
dep_node,
tcx,
key,
@@ -605,7 +643,13 @@
query.hash_result,
)
} else {
- tcx.dep_graph().with_task(dep_node, tcx, key, query.compute, query.hash_result)
+ tcx.dep_context().dep_graph().with_task(
+ dep_node,
+ tcx,
+ key,
+ query.compute,
+ query.hash_result,
+ )
}
})
});
@@ -624,57 +668,45 @@
#[inline(never)]
fn get_query_impl<CTX, C>(
tcx: CTX,
- state: &QueryState<CTX::DepKind, CTX::Query, C>,
+ state: &QueryState<CTX::DepKind, C::Key>,
+ cache: &QueryCacheStore<C>,
span: Span,
key: C::Key,
+ lookup: QueryLookup,
query: &QueryVtable<CTX, C::Key, C::Value>,
) -> C::Stored
where
CTX: QueryContext,
C: QueryCache,
- C::Key: crate::dep_graph::DepNodeParams<CTX>,
+ C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
{
- try_get_cached(
- tcx,
- state,
- key,
- |value, index| {
- tcx.dep_graph().read_index(index);
- value.clone()
- },
- |key, lookup| try_execute_query(tcx, state, span, key, lookup, query),
- )
+ try_execute_query(tcx, state, cache, span, key, lookup, query)
}
/// Ensure that either this query has all green inputs or been executed.
/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
+/// Returns true if the query should still run.
///
/// This function is particularly useful when executing passes for their
/// side-effects -- e.g., in order to report errors for erroneous programs.
///
/// Note: The optimization is only available during incr. comp.
#[inline(never)]
-fn ensure_query_impl<CTX, C>(
- tcx: CTX,
- state: &QueryState<CTX::DepKind, CTX::Query, C>,
- key: C::Key,
- query: &QueryVtable<CTX, C::Key, C::Value>,
-) where
- C: QueryCache,
- C::Key: crate::dep_graph::DepNodeParams<CTX>,
+fn ensure_must_run<CTX, K, V>(tcx: CTX, key: &K, query: &QueryVtable<CTX, K, V>) -> bool
+where
+ K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
CTX: QueryContext,
{
if query.eval_always {
- let _ = get_query_impl(tcx, state, DUMMY_SP, key, query);
- return;
+ return true;
}
// Ensuring an anonymous query makes no sense
assert!(!query.anon);
- let dep_node = query.to_dep_node(tcx, &key);
+ let dep_node = query.to_dep_node(*tcx.dep_context(), key);
- match tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node) {
+ match tcx.dep_context().dep_graph().try_mark_green_and_read(tcx, &dep_node) {
None => {
// A None return from `try_mark_green_and_read` means that this is either
// a new dep node or that the dep node has already been marked red.
@@ -682,10 +714,11 @@
// DepNodeIndex. We must invoke the query itself. The performance cost
// this introduces should be negligible as we'll immediately hit the
// in-memory cache, or another query down the line will.
- let _ = get_query_impl(tcx, state, DUMMY_SP, key, query);
+ true
}
Some((_, dep_node_index)) => {
- tcx.profiler().query_cache_hit(dep_node_index.into());
+ tcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
+ false
}
}
}
@@ -693,68 +726,80 @@
#[inline(never)]
fn force_query_impl<CTX, C>(
tcx: CTX,
- state: &QueryState<CTX::DepKind, CTX::Query, C>,
+ state: &QueryState<CTX::DepKind, C::Key>,
+ cache: &QueryCacheStore<C>,
key: C::Key,
span: Span,
dep_node: DepNode<CTX::DepKind>,
query: &QueryVtable<CTX, C::Key, C::Value>,
) where
C: QueryCache,
- C::Key: crate::dep_graph::DepNodeParams<CTX>,
+ C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
CTX: QueryContext,
{
// We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query.
+ let cached = cache.cache.lookup(cache, &key, |_, index| {
+ if unlikely!(tcx.dep_context().profiler().enabled()) {
+ tcx.dep_context().profiler().query_cache_hit(index.into());
+ }
+ #[cfg(debug_assertions)]
+ {
+ cache.cache_hits.fetch_add(1, Ordering::Relaxed);
+ }
+ });
- try_get_cached(
- tcx,
- state,
- key,
- |_, _| {
- // Cache hit, do nothing
- },
- |key, lookup| {
- let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
- tcx, state, span, &key, lookup, query,
- ) {
- TryGetJob::NotYetStarted(job) => job,
- TryGetJob::Cycle(_) => return,
- #[cfg(parallel_compiler)]
- TryGetJob::JobCompleted(_) => return,
- };
- force_query_with_job(tcx, key, job, dep_node, query);
- },
- );
+ let lookup = match cached {
+ Ok(()) => return,
+ Err(lookup) => lookup,
+ };
+
+ let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
+ tcx, state, cache, span, &key, lookup, query,
+ ) {
+ TryGetJob::NotYetStarted(job) => job,
+ TryGetJob::Cycle(_) => return,
+ #[cfg(parallel_compiler)]
+ TryGetJob::JobCompleted(_) => return,
+ };
+ force_query_with_job(tcx, key, job, dep_node, query);
}
-#[inline(always)]
-pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key) -> Q::Stored
+pub enum QueryMode {
+ Get,
+ Ensure,
+}
+
+pub fn get_query<Q, CTX>(
+ tcx: CTX,
+ span: Span,
+ key: Q::Key,
+ lookup: QueryLookup,
+ mode: QueryMode,
+) -> Option<Q::Stored>
where
Q: QueryDescription<CTX>,
- Q::Key: crate::dep_graph::DepNodeParams<CTX>,
+ Q::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
CTX: QueryContext,
{
+ let query = &Q::VTABLE;
+ if let QueryMode::Ensure = mode {
+ if !ensure_must_run(tcx, &key, query) {
+ return None;
+ }
+ }
+
debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
-
- get_query_impl(tcx, Q::query_state(tcx), span, key, &Q::VTABLE)
+ let value =
+ get_query_impl(tcx, Q::query_state(tcx), Q::query_cache(tcx), span, key, lookup, query);
+ Some(value)
}
-#[inline(always)]
-pub fn ensure_query<Q, CTX>(tcx: CTX, key: Q::Key)
-where
- Q: QueryDescription<CTX>,
- Q::Key: crate::dep_graph::DepNodeParams<CTX>,
- CTX: QueryContext,
-{
- ensure_query_impl(tcx, Q::query_state(tcx), key, &Q::VTABLE)
-}
-
-#[inline(always)]
pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, span: Span, dep_node: DepNode<CTX::DepKind>)
where
Q: QueryDescription<CTX>,
- Q::Key: crate::dep_graph::DepNodeParams<CTX>,
+ Q::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
CTX: QueryContext,
{
- force_query_impl(tcx, Q::query_state(tcx), key, span, dep_node, &Q::VTABLE)
+ force_query_impl(tcx, Q::query_state(tcx), Q::query_cache(tcx), key, span, dep_node, &Q::VTABLE)
}
diff --git a/compiler/rustc_resolve/Cargo.toml b/compiler/rustc_resolve/Cargo.toml
index 821f9ea..7441f4a 100644
--- a/compiler/rustc_resolve/Cargo.toml
+++ b/compiler/rustc_resolve/Cargo.toml
@@ -26,4 +26,4 @@
rustc_metadata = { path = "../rustc_metadata" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs
index c4ee4df..d77022a 100644
--- a/compiler/rustc_resolve/src/build_reduced_graph.rs
+++ b/compiler/rustc_resolve/src/build_reduced_graph.rs
@@ -258,16 +258,16 @@
Ok(ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX)))
}
ast::VisibilityKind::Inherited => {
- if matches!(self.parent_scope.module.kind, ModuleKind::Def(DefKind::Enum, _, _)) {
- // Any inherited visibility resolved directly inside an enum
- // (e.g. variants or fields) inherits from the visibility of the enum.
- let parent_enum = self.parent_scope.module.def_id().unwrap().expect_local();
- Ok(self.r.visibilities[&parent_enum])
- } else {
- // If it's not in an enum, its visibility is restricted to the `mod` item
- // that it's defined in.
- Ok(ty::Visibility::Restricted(self.parent_scope.module.nearest_parent_mod))
- }
+ Ok(match self.parent_scope.module.kind {
+ // Any inherited visibility resolved directly inside an enum or trait
+ // (i.e. variants, fields, and trait items) inherits from the visibility
+ // of the enum or trait.
+ ModuleKind::Def(DefKind::Enum | DefKind::Trait, def_id, _) => {
+ self.r.visibilities[&def_id.expect_local()]
+ }
+ // Otherwise, the visibility is restricted to the nearest parent `mod` item.
+ _ => ty::Visibility::Restricted(self.parent_scope.module.nearest_parent_mod),
+ })
}
ast::VisibilityKind::Restricted { ref path, id, .. } => {
// For visibilities we are not ready to provide correct implementation of "uniform
@@ -1230,13 +1230,13 @@
};
let res = Res::Def(DefKind::Macro(ext.macro_kind()), def_id.to_def_id());
+ let is_macro_export = self.r.session.contains_name(&item.attrs, sym::macro_export);
self.r.macro_map.insert(def_id.to_def_id(), ext);
self.r.local_macro_def_scopes.insert(def_id, parent_scope.module);
- if macro_rules {
+ if macro_rules && matches!(item.vis.kind, ast::VisibilityKind::Inherited) {
let ident = ident.normalize_to_macros_2_0();
self.r.macro_names.insert(ident);
- let is_macro_export = self.r.session.contains_name(&item.attrs, sym::macro_export);
let vis = if is_macro_export {
ty::Visibility::Public
} else {
@@ -1261,6 +1261,11 @@
}),
))
} else {
+ if is_macro_export {
+ let what = if macro_rules { "`macro_rules` with `pub`" } else { "`macro` items" };
+ let msg = format!("`#[macro_export]` cannot be used on {what}");
+ self.r.session.span_err(item.span, &msg);
+ }
let module = parent_scope.module;
let vis = match item.kind {
// Visibilities must not be resolved non-speculatively twice
@@ -1365,60 +1370,42 @@
return;
}
+ let vis = self.resolve_visibility(&item.vis);
let local_def_id = self.r.local_def_id(item.id);
let def_id = local_def_id.to_def_id();
- let vis = match ctxt {
- AssocCtxt::Trait => {
- let (def_kind, ns) = match item.kind {
- AssocItemKind::Const(..) => (DefKind::AssocConst, ValueNS),
- AssocItemKind::Fn(box FnKind(_, ref sig, _, _)) => {
- if sig.decl.has_self() {
- self.r.has_self.insert(def_id);
- }
- (DefKind::AssocFn, ValueNS)
- }
- AssocItemKind::TyAlias(..) => (DefKind::AssocTy, TypeNS),
- AssocItemKind::MacCall(_) => bug!(), // handled above
- };
- let parent = self.parent_scope.module;
- let expansion = self.parent_scope.expansion;
- let res = Res::Def(def_kind, def_id);
- // Trait item visibility is inherited from its trait when not specified explicitly.
- let vis = match &item.vis.kind {
- ast::VisibilityKind::Inherited => {
- self.r.visibilities[&parent.def_id().unwrap().expect_local()]
- }
- _ => self.resolve_visibility(&item.vis),
- };
- // FIXME: For historical reasons the binding visibility is set to public,
- // use actual visibility here instead, using enum variants as an example.
- let vis_hack = ty::Visibility::Public;
- self.r.define(parent, item.ident, ns, (res, vis_hack, item.span, expansion));
- Some(vis)
- }
- AssocCtxt::Impl => {
- // Trait impl item visibility is inherited from its trait when not specified
- // explicitly. In that case we cannot determine it here in early resolve,
- // so we leave a hole in the visibility table to be filled later.
- // Inherent impl item visibility is never inherited from other items.
- if matches!(item.vis.kind, ast::VisibilityKind::Inherited)
- && self
- .r
- .trait_impl_items
- .contains(&ty::DefIdTree::parent(&*self.r, def_id).unwrap().expect_local())
- {
- None
- } else {
- Some(self.resolve_visibility(&item.vis))
- }
- }
- };
-
- if let Some(vis) = vis {
+ if !(ctxt == AssocCtxt::Impl
+ && matches!(item.vis.kind, ast::VisibilityKind::Inherited)
+ && self
+ .r
+ .trait_impl_items
+ .contains(&ty::DefIdTree::parent(&*self.r, def_id).unwrap().expect_local()))
+ {
+ // Trait impl item visibility is inherited from its trait when not specified
+ // explicitly. In that case we cannot determine it here in early resolve,
+ // so we leave a hole in the visibility table to be filled later.
self.r.visibilities.insert(local_def_id, vis);
}
+ if ctxt == AssocCtxt::Trait {
+ let (def_kind, ns) = match item.kind {
+ AssocItemKind::Const(..) => (DefKind::AssocConst, ValueNS),
+ AssocItemKind::Fn(box FnKind(_, ref sig, _, _)) => {
+ if sig.decl.has_self() {
+ self.r.has_self.insert(def_id);
+ }
+ (DefKind::AssocFn, ValueNS)
+ }
+ AssocItemKind::TyAlias(..) => (DefKind::AssocTy, TypeNS),
+ AssocItemKind::MacCall(_) => bug!(), // handled above
+ };
+
+ let parent = self.parent_scope.module;
+ let expansion = self.parent_scope.expansion;
+ let res = Res::Def(def_kind, def_id);
+ self.r.define(parent, item.ident, ns, (res, vis, item.span, expansion));
+ }
+
visit::walk_assoc_item(self, item, ctxt);
}
@@ -1439,19 +1426,19 @@
}
}
- fn visit_field(&mut self, f: &'b ast::Field) {
+ fn visit_expr_field(&mut self, f: &'b ast::ExprField) {
if f.is_placeholder {
self.visit_invoc(f.id);
} else {
- visit::walk_field(self, f);
+ visit::walk_expr_field(self, f);
}
}
- fn visit_field_pattern(&mut self, fp: &'b ast::FieldPat) {
+ fn visit_pat_field(&mut self, fp: &'b ast::PatField) {
if fp.is_placeholder {
self.visit_invoc(fp.id);
} else {
- visit::walk_field_pattern(self, fp);
+ visit::walk_pat_field(self, fp);
}
}
@@ -1471,13 +1458,13 @@
}
}
- fn visit_struct_field(&mut self, sf: &'b ast::StructField) {
+ fn visit_field_def(&mut self, sf: &'b ast::FieldDef) {
if sf.is_placeholder {
self.visit_invoc(sf.id);
} else {
let vis = self.resolve_visibility(&sf.vis);
self.r.visibilities.insert(self.r.local_def_id(sf.id), vis);
- visit::walk_struct_field(self, sf);
+ visit::walk_field_def(self, sf);
}
}
@@ -1490,19 +1477,13 @@
}
let parent = self.parent_scope.module;
- let vis = match variant.vis.kind {
- // Variant visibility is inherited from its enum when not specified explicitly.
- ast::VisibilityKind::Inherited => {
- self.r.visibilities[&parent.def_id().unwrap().expect_local()]
- }
- _ => self.resolve_visibility(&variant.vis),
- };
let expn_id = self.parent_scope.expansion;
let ident = variant.ident;
// Define a name in the type namespace.
let def_id = self.r.local_def_id(variant.id);
let res = Res::Def(DefKind::Variant, def_id.to_def_id());
+ let vis = self.resolve_visibility(&variant.vis);
self.r.define(parent, ident, TypeNS, (res, vis, variant.span, expn_id));
self.r.visibilities.insert(def_id, vis);
diff --git a/compiler/rustc_resolve/src/def_collector.rs b/compiler/rustc_resolve/src/def_collector.rs
index 727d6ab..17f0c39 100644
--- a/compiler/rustc_resolve/src/def_collector.rs
+++ b/compiler/rustc_resolve/src/def_collector.rs
@@ -1,4 +1,4 @@
-use crate::Resolver;
+use crate::{ImplTraitContext, Resolver};
use rustc_ast::visit::{self, FnKind};
use rustc_ast::walk_list;
use rustc_ast::*;
@@ -16,14 +16,15 @@
fragment: &AstFragment,
expansion: ExpnId,
) {
- let parent_def = resolver.invocation_parents[&expansion];
- fragment.visit_with(&mut DefCollector { resolver, parent_def, expansion });
+ let (parent_def, impl_trait_context) = resolver.invocation_parents[&expansion];
+ fragment.visit_with(&mut DefCollector { resolver, parent_def, expansion, impl_trait_context });
}
/// Creates `DefId`s for nodes in the AST.
struct DefCollector<'a, 'b> {
resolver: &'a mut Resolver<'b>,
parent_def: LocalDefId,
+ impl_trait_context: ImplTraitContext,
expansion: ExpnId,
}
@@ -40,7 +41,17 @@
self.parent_def = orig_parent_def;
}
- fn collect_field(&mut self, field: &'a StructField, index: Option<usize>) {
+ fn with_impl_trait<F: FnOnce(&mut Self)>(
+ &mut self,
+ impl_trait_context: ImplTraitContext,
+ f: F,
+ ) {
+ let orig_itc = std::mem::replace(&mut self.impl_trait_context, impl_trait_context);
+ f(self);
+ self.impl_trait_context = orig_itc;
+ }
+
+ fn collect_field(&mut self, field: &'a FieldDef, index: Option<usize>) {
let index = |this: &Self| {
index.unwrap_or_else(|| {
let node_id = NodeId::placeholder_from_expn_id(this.expansion);
@@ -55,13 +66,14 @@
} else {
let name = field.ident.map_or_else(|| sym::integer(index(self)), |ident| ident.name);
let def = self.create_def(field.id, DefPathData::ValueNs(name), field.span);
- self.with_parent(def, |this| visit::walk_struct_field(this, field));
+ self.with_parent(def, |this| visit::walk_field_def(this, field));
}
}
fn visit_macro_invoc(&mut self, id: NodeId) {
+ let id = id.placeholder_to_expn_id();
let old_parent =
- self.resolver.invocation_parents.insert(id.placeholder_to_expn_id(), self.parent_def);
+ self.resolver.invocation_parents.insert(id, (self.parent_def, self.impl_trait_context));
assert!(old_parent.is_none(), "parent `LocalDefId` is reset for an invocation");
}
}
@@ -103,29 +115,37 @@
let def = self.create_def(i.id, def_data, i.span);
self.with_parent(def, |this| {
- match i.kind {
- ItemKind::Struct(ref struct_def, _) | ItemKind::Union(ref struct_def, _) => {
- // If this is a unit or tuple-like struct, register the constructor.
- if let Some(ctor_hir_id) = struct_def.ctor_id() {
- this.create_def(ctor_hir_id, DefPathData::Ctor, i.span);
+ this.with_impl_trait(ImplTraitContext::Existential, |this| {
+ match i.kind {
+ ItemKind::Struct(ref struct_def, _) | ItemKind::Union(ref struct_def, _) => {
+ // If this is a unit or tuple-like struct, register the constructor.
+ if let Some(ctor_hir_id) = struct_def.ctor_id() {
+ this.create_def(ctor_hir_id, DefPathData::Ctor, i.span);
+ }
}
+ _ => {}
}
- _ => {}
- }
- visit::walk_item(this, i);
+ visit::walk_item(this, i);
+ })
});
}
fn visit_fn(&mut self, fn_kind: FnKind<'a>, span: Span, _: NodeId) {
if let FnKind::Fn(_, _, sig, _, body) = fn_kind {
if let Async::Yes { closure_id, return_impl_trait_id, .. } = sig.header.asyncness {
- self.create_def(return_impl_trait_id, DefPathData::ImplTrait, span);
+ let return_impl_trait_id =
+ self.create_def(return_impl_trait_id, DefPathData::ImplTrait, span);
// For async functions, we need to create their inner defs inside of a
// closure to match their desugared representation. Besides that,
// we must mirror everything that `visit::walk_fn` below does.
self.visit_fn_header(&sig.header);
- visit::walk_fn_decl(self, &sig.decl);
+ for param in &sig.decl.inputs {
+ self.visit_param(param);
+ }
+ self.with_parent(return_impl_trait_id, |this| {
+ this.visit_fn_ret_ty(&sig.decl.output)
+ });
let closure_def = self.create_def(closure_id, DefPathData::ClosureExpr, span);
self.with_parent(closure_def, |this| walk_list!(this, visit_block, body));
return;
@@ -137,6 +157,14 @@
fn visit_use_tree(&mut self, use_tree: &'a UseTree, id: NodeId, _nested: bool) {
self.create_def(id, DefPathData::Misc, use_tree.span);
+ match use_tree.kind {
+ UseTreeKind::Simple(_, id1, id2) => {
+ self.create_def(id1, DefPathData::Misc, use_tree.prefix.span);
+ self.create_def(id2, DefPathData::Misc, use_tree.prefix.span);
+ }
+ UseTreeKind::Glob => (),
+ UseTreeKind::Nested(..) => {}
+ }
visit::walk_use_tree(self, use_tree, id);
}
@@ -191,7 +219,15 @@
};
self.create_def(param.id, def_path_data, param.ident.span);
- visit::walk_generic_param(self, param);
+ // impl-Trait can happen inside generic parameters, like
+ // ```
+ // fn foo<U: Iterator<Item = impl Clone>>() {}
+ // ```
+ //
+ // In that case, the impl-trait is lowered as an additional generic parameter.
+ self.with_impl_trait(ImplTraitContext::Universal(self.parent_def), |this| {
+ visit::walk_generic_param(this, param)
+ });
}
fn visit_assoc_item(&mut self, i: &'a AssocItem, ctxt: visit::AssocCtxt) {
@@ -244,8 +280,19 @@
match ty.kind {
TyKind::MacCall(..) => self.visit_macro_invoc(ty.id),
TyKind::ImplTrait(node_id, _) => {
- let parent_def = self.create_def(node_id, DefPathData::ImplTrait, ty.span);
- self.with_parent(parent_def, |this| visit::walk_ty(this, ty));
+ let parent_def = match self.impl_trait_context {
+ ImplTraitContext::Universal(item_def) => self.resolver.create_def(
+ item_def,
+ node_id,
+ DefPathData::ImplTrait,
+ self.expansion,
+ ty.span,
+ ),
+ ImplTraitContext::Existential => {
+ self.create_def(node_id, DefPathData::ImplTrait, ty.span)
+ }
+ };
+ self.with_parent(parent_def, |this| visit::walk_ty(this, ty))
}
_ => visit::walk_ty(self, ty),
}
@@ -262,25 +309,35 @@
if arm.is_placeholder { self.visit_macro_invoc(arm.id) } else { visit::walk_arm(self, arm) }
}
- fn visit_field(&mut self, f: &'a Field) {
- if f.is_placeholder { self.visit_macro_invoc(f.id) } else { visit::walk_field(self, f) }
+ fn visit_expr_field(&mut self, f: &'a ExprField) {
+ if f.is_placeholder {
+ self.visit_macro_invoc(f.id)
+ } else {
+ visit::walk_expr_field(self, f)
+ }
}
- fn visit_field_pattern(&mut self, fp: &'a FieldPat) {
+ fn visit_pat_field(&mut self, fp: &'a PatField) {
if fp.is_placeholder {
self.visit_macro_invoc(fp.id)
} else {
- visit::walk_field_pattern(self, fp)
+ visit::walk_pat_field(self, fp)
}
}
fn visit_param(&mut self, p: &'a Param) {
- if p.is_placeholder { self.visit_macro_invoc(p.id) } else { visit::walk_param(self, p) }
+ if p.is_placeholder {
+ self.visit_macro_invoc(p.id)
+ } else {
+ self.with_impl_trait(ImplTraitContext::Universal(self.parent_def), |this| {
+ visit::walk_param(this, p)
+ })
+ }
}
// This method is called only when we are visiting an individual field
// after expanding an attribute on it.
- fn visit_struct_field(&mut self, field: &'a StructField) {
+ fn visit_field_def(&mut self, field: &'a FieldDef) {
self.collect_field(field, None);
}
}
diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs
index 3b02f74..7493fd6 100644
--- a/compiler/rustc_resolve/src/diagnostics.rs
+++ b/compiler/rustc_resolve/src/diagnostics.rs
@@ -9,6 +9,7 @@
use rustc_hir::def::Namespace::{self, *};
use rustc_hir::def::{self, CtorKind, CtorOf, DefKind, NonMacroAttrKind};
use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
+use rustc_hir::PrimTy;
use rustc_middle::bug;
use rustc_middle::ty::{self, DefIdTree};
use rustc_session::Session;
@@ -718,10 +719,9 @@
}
}
Scope::BuiltinTypes => {
- let primitive_types = &this.primitive_type_table.primitive_types;
- suggestions.extend(primitive_types.iter().flat_map(|(name, prim_ty)| {
+ suggestions.extend(PrimTy::ALL.iter().filter_map(|prim_ty| {
let res = Res::PrimTy(*prim_ty);
- filter_fn(res).then_some(TypoSuggestion::from_res(*name, res))
+ filter_fn(res).then_some(TypoSuggestion::from_res(prim_ty.name(), res))
}))
}
}
diff --git a/compiler/rustc_resolve/src/imports.rs b/compiler/rustc_resolve/src/imports.rs
index cb1f083..61f4c00 100644
--- a/compiler/rustc_resolve/src/imports.rs
+++ b/compiler/rustc_resolve/src/imports.rs
@@ -18,11 +18,10 @@
use rustc_hir::def::{self, PartialRes};
use rustc_hir::def_id::DefId;
use rustc_middle::hir::exports::Export;
+use rustc_middle::span_bug;
use rustc_middle::ty;
-use rustc_middle::{bug, span_bug};
use rustc_session::lint::builtin::{PUB_USE_OF_PRIVATE_EXTERN_CRATE, UNUSED_IMPORTS};
use rustc_session::lint::BuiltinLintDiagnostics;
-use rustc_session::DiagnosticMessageId;
use rustc_span::hygiene::ExpnId;
use rustc_span::lev_distance::find_best_match_for_name;
use rustc_span::symbol::{kw, Ident, Symbol};
@@ -157,6 +156,21 @@
}
}
+// Reexports of the form `pub use foo as bar;` where `foo` is `extern crate foo;`
+// are permitted for backward-compatibility under a deprecation lint.
+fn pub_use_of_private_extern_crate_hack(import: &Import<'_>, binding: &NameBinding<'_>) -> bool {
+ match (&import.kind, &binding.kind) {
+ (
+ ImportKind::Single { .. },
+ NameBindingKind::Import {
+ import: Import { kind: ImportKind::ExternCrate { .. }, .. },
+ ..
+ },
+ ) => import.vis.get() == ty::Visibility::Public,
+ _ => false,
+ }
+}
+
impl<'a> Resolver<'a> {
crate fn resolve_ident_in_module_unadjusted(
&mut self,
@@ -264,10 +278,7 @@
return Err((Determined, Weak::No));
}
}
- // `extern crate` are always usable for backwards compatibility, see issue #37020,
- // remove this together with `PUB_USE_OF_PRIVATE_EXTERN_CRATE`.
- let usable = this.is_accessible_from(binding.vis, parent_scope.module)
- || binding.is_extern_crate();
+ let usable = this.is_accessible_from(binding.vis, parent_scope.module);
if usable { Ok(binding) } else { Err((Determined, Weak::No)) }
};
@@ -310,10 +321,7 @@
}
}
- if !(self.is_accessible_from(binding.vis, parent_scope.module) ||
- // Remove this together with `PUB_USE_OF_PRIVATE_EXTERN_CRATE`
- (self.last_import_segment && binding.is_extern_crate()))
- {
+ if !self.is_accessible_from(binding.vis, parent_scope.module) {
self.privacy_errors.push(PrivacyError {
ident,
binding,
@@ -456,13 +464,12 @@
binding: &'a NameBinding<'a>,
import: &'a Import<'a>,
) -> &'a NameBinding<'a> {
- let vis = if binding.pseudo_vis().is_at_least(import.vis.get(), self) ||
- // cf. `PUB_USE_OF_PRIVATE_EXTERN_CRATE`
- !import.is_glob() && binding.is_extern_crate()
+ let vis = if binding.vis.is_at_least(import.vis.get(), self)
+ || pub_use_of_private_extern_crate_hack(import, binding)
{
import.vis.get()
} else {
- binding.pseudo_vis()
+ binding.vis
};
if let ImportKind::Glob { ref max_vis, .. } = import.kind {
@@ -1178,7 +1185,7 @@
self.r.per_ns(|this, ns| {
if let Ok(binding) = source_bindings[ns].get() {
let vis = import.vis.get();
- if !binding.pseudo_vis().is_at_least(vis, &*this) {
+ if !binding.vis.is_at_least(vis, &*this) {
reexport_error = Some((ns, binding));
} else {
any_successful_reexport = true;
@@ -1189,7 +1196,7 @@
// All namespaces must be re-exported with extra visibility for an error to occur.
if !any_successful_reexport {
let (ns, binding) = reexport_error.unwrap();
- if ns == TypeNS && binding.is_extern_crate() {
+ if pub_use_of_private_extern_crate_hack(import, binding) {
let msg = format!(
"extern crate `{}` is private, and cannot be \
re-exported (error E0365), consider declaring with \
@@ -1362,7 +1369,7 @@
Some(None) => import.parent_scope.module,
None => continue,
};
- if self.r.is_accessible_from(binding.pseudo_vis(), scope) {
+ if self.r.is_accessible_from(binding.vis, scope) {
let imported_binding = self.r.import(binding, import);
let _ = self.r.try_define(import.parent_scope.module, key, imported_binding);
}
@@ -1380,9 +1387,8 @@
let mut reexports = Vec::new();
- module.for_each_child(self.r, |this, ident, ns, binding| {
- // Filter away ambiguous imports and anything that has def-site
- // hygiene.
+ module.for_each_child(self.r, |this, ident, _, binding| {
+ // Filter away ambiguous imports and anything that has def-site hygiene.
// FIXME: Implement actual cross-crate hygiene.
let is_good_import =
binding.is_import() && !binding.is_ambiguity() && !ident.span.from_expansion();
@@ -1392,71 +1398,6 @@
reexports.push(Export { ident, res, span: binding.span, vis: binding.vis });
}
}
-
- if let NameBindingKind::Import { binding: orig_binding, import, .. } = binding.kind {
- if ns == TypeNS
- && orig_binding.is_variant()
- && !orig_binding.vis.is_at_least(binding.vis, &*this)
- {
- let msg = match import.kind {
- ImportKind::Single { .. } => {
- format!("variant `{}` is private and cannot be re-exported", ident)
- }
- ImportKind::Glob { .. } => {
- let msg = "enum is private and its variants \
- cannot be re-exported"
- .to_owned();
- let error_id = (
- DiagnosticMessageId::ErrorId(0), // no code?!
- Some(binding.span),
- msg.clone(),
- );
- let fresh =
- this.session.one_time_diagnostics.borrow_mut().insert(error_id);
- if !fresh {
- return;
- }
- msg
- }
- ref s => bug!("unexpected import kind {:?}", s),
- };
- let mut err = this.session.struct_span_err(binding.span, &msg);
-
- let imported_module = match import.imported_module.get() {
- Some(ModuleOrUniformRoot::Module(module)) => module,
- _ => bug!("module should exist"),
- };
- let parent_module = imported_module.parent.expect("parent should exist");
- let resolutions = this.resolutions(parent_module).borrow();
- let enum_path_segment_index = import.module_path.len() - 1;
- let enum_ident = import.module_path[enum_path_segment_index].ident;
-
- let key = this.new_key(enum_ident, TypeNS);
- let enum_resolution = resolutions.get(&key).expect("resolution should exist");
- let enum_span =
- enum_resolution.borrow().binding.expect("binding should exist").span;
- let enum_def_span = this.session.source_map().guess_head_span(enum_span);
- let enum_def_snippet = this
- .session
- .source_map()
- .span_to_snippet(enum_def_span)
- .expect("snippet should exist");
- // potentially need to strip extant `crate`/`pub(path)` for suggestion
- let after_vis_index = enum_def_snippet
- .find("enum")
- .expect("`enum` keyword should exist in snippet");
- let suggestion = format!("pub {}", &enum_def_snippet[after_vis_index..]);
-
- this.session.diag_span_suggestion_once(
- &mut err,
- DiagnosticMessageId::ErrorId(0),
- enum_def_span,
- "consider making the enum public",
- suggestion,
- );
- err.emit();
- }
- }
});
if !reexports.is_empty() {
diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs
index e5d6aeb..af241ef 100644
--- a/compiler/rustc_resolve/src/late.rs
+++ b/compiler/rustc_resolve/src/late.rs
@@ -20,7 +20,7 @@
use rustc_hir::def::Namespace::{self, *};
use rustc_hir::def::{self, CtorKind, DefKind, PartialRes, PerNS};
use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX};
-use rustc_hir::TraitCandidate;
+use rustc_hir::{PrimTy, TraitCandidate};
use rustc_middle::{bug, span_bug};
use rustc_session::lint;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
@@ -1023,7 +1023,7 @@
});
}
- ItemKind::Mod(_) | ItemKind::ForeignMod(_) => {
+ ItemKind::Mod(..) | ItemKind::ForeignMod(_) => {
self.with_scope(item.id, |this| {
visit::walk_item(this, item);
});
@@ -1926,7 +1926,7 @@
self.r.trait_map.insert(id, traits);
}
- if self.r.primitive_type_table.primitive_types.contains_key(&path[0].ident.name) {
+ if PrimTy::from_name(path[0].ident.name).is_some() {
let mut std_path = Vec::with_capacity(1 + path.len());
std_path.push(Segment::from_ident(Ident::with_dummy_span(sym::std)));
@@ -2120,13 +2120,9 @@
// The same fallback is used when `a` resolves to nothing.
PathResult::Module(ModuleOrUniformRoot::Module(_)) | PathResult::Failed { .. }
if (ns == TypeNS || path.len() > 1)
- && self
- .r
- .primitive_type_table
- .primitive_types
- .contains_key(&path[0].ident.name) =>
+ && PrimTy::from_name(path[0].ident.name).is_some() =>
{
- let prim = self.r.primitive_type_table.primitive_types[&path[0].ident.name];
+ let prim = PrimTy::from_name(path[0].ident.name).unwrap();
PartialRes::with_unresolved_segments(Res::PrimTy(prim), path.len() - 1)
}
PathResult::Module(ModuleOrUniformRoot::Module(module)) => {
@@ -2255,8 +2251,8 @@
visit::walk_expr(self, expr);
}
- ExprKind::Struct(ref path, ..) => {
- self.smart_resolve_path(expr.id, None, path, PathSource::Struct);
+ ExprKind::Struct(ref se) => {
+ self.smart_resolve_path(expr.id, None, &se.path, PathSource::Struct);
visit::walk_expr(self, expr);
}
@@ -2330,8 +2326,22 @@
ExprKind::Call(ref callee, ref arguments) => {
self.resolve_expr(callee, Some(expr));
- for argument in arguments {
- self.resolve_expr(argument, None);
+ let const_args = self.r.legacy_const_generic_args(callee).unwrap_or(Vec::new());
+ for (idx, argument) in arguments.iter().enumerate() {
+ // Constant arguments need to be treated as AnonConst since
+ // that is how they will be later lowered to HIR.
+ if const_args.contains(&idx) {
+ self.with_constant_rib(
+ IsRepeatExpr::No,
+ argument.is_potential_trivial_const_param(),
+ None,
+ |this| {
+ this.resolve_expr(argument, None);
+ },
+ );
+ } else {
+ self.resolve_expr(argument, None);
+ }
}
}
ExprKind::Type(ref type_expr, ref ty) => {
diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs
index 927535b..e85d78d 100644
--- a/compiler/rustc_resolve/src/late/diagnostics.rs
+++ b/compiler/rustc_resolve/src/late/diagnostics.rs
@@ -16,6 +16,7 @@
use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::PrimTy;
use rustc_session::parse::feature_err;
+use rustc_span::edition::Edition;
use rustc_span::hygiene::MacroKind;
use rustc_span::lev_distance::find_best_match_for_name;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
@@ -133,7 +134,7 @@
let is_enum_variant = &|res| matches!(res, Res::Def(DefKind::Variant, _));
// Make the base error.
- let expected = source.descr_expected();
+ let mut expected = source.descr_expected();
let path_str = Segment::names_to_string(path);
let item_str = path.last().unwrap().ident;
let (base_msg, fallback_label, base_span, could_be_expr) = if let Some(res) = res {
@@ -166,6 +167,15 @@
let (mod_prefix, mod_str) = if path.len() == 1 {
(String::new(), "this scope".to_string())
} else if path.len() == 2 && path[0].ident.name == kw::PathRoot {
+ if self.r.session.edition() > Edition::Edition2015 {
+ // In edition 2018 onwards, the `::foo` syntax may only pull from the extern prelude
+ // which overrides all other expectations of item type
+ expected = "crate";
+ (String::new(), "the list of imported crates".to_string())
+ } else {
+ (String::new(), "the crate root".to_string())
+ }
+ } else if path.len() == 2 && path[0].ident.name == kw::Crate {
(String::new(), "the crate root".to_string())
} else {
let mod_path = &path[..path.len() - 1];
@@ -319,9 +329,13 @@
.collect::<Vec<_>>();
let crate_def_id = DefId::local(CRATE_DEF_INDEX);
if candidates.is_empty() && is_expected(Res::Def(DefKind::Enum, crate_def_id)) {
- let enum_candidates =
- self.r.lookup_import_candidates(ident, ns, &self.parent_scope, is_enum_variant);
-
+ let mut enum_candidates: Vec<_> = self
+ .r
+ .lookup_import_candidates(ident, ns, &self.parent_scope, is_enum_variant)
+ .into_iter()
+ .map(|suggestion| import_candidate_to_enum_paths(&suggestion))
+ .filter(|(_, enum_ty_path)| !enum_ty_path.starts_with("std::prelude::"))
+ .collect();
if !enum_candidates.is_empty() {
if let (PathSource::Type, Some(span)) =
(source, self.diagnostic_metadata.current_type_ascription.last())
@@ -340,10 +354,6 @@
}
}
- let mut enum_candidates = enum_candidates
- .iter()
- .map(|suggestion| import_candidate_to_enum_paths(&suggestion))
- .collect::<Vec<_>>();
enum_candidates.sort();
// Contextualize for E0412 "cannot find type", but don't belabor the point
@@ -363,19 +373,7 @@
err.span_suggestions(
span,
&msg,
- enum_candidates
- .into_iter()
- .map(|(_variant_path, enum_ty_path)| enum_ty_path)
- // Variants re-exported in prelude doesn't mean `prelude::v1` is the
- // type name!
- // FIXME: is there a more principled way to do this that
- // would work for other re-exports?
- .filter(|enum_ty_path| enum_ty_path != "std::prelude::v1")
- // Also write `Option` rather than `std::prelude::v1::Option`.
- .map(|enum_ty_path| {
- // FIXME #56861: DRY-er prelude filtering.
- enum_ty_path.trim_start_matches("std::prelude::v1::").to_owned()
- }),
+ enum_candidates.into_iter().map(|(_variant_path, enum_ty_path)| enum_ty_path),
Applicability::MachineApplicable,
);
}
@@ -565,6 +563,15 @@
}
}
}
+ } else if err_code == &rustc_errors::error_code!(E0412) {
+ if let Some(correct) = Self::likely_rust_type(path) {
+ err.span_suggestion(
+ span,
+ "perhaps you intended to use this type",
+ correct.to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ }
}
}
@@ -1105,7 +1112,7 @@
}
if let Some(items) = self.diagnostic_metadata.current_trait_assoc_items {
- for assoc_item in &items[..] {
+ for assoc_item in items {
if assoc_item.ident == ident {
return Some(match &assoc_item.kind {
ast::AssocItemKind::Const(..) => AssocSuggestion::AssocConst,
@@ -1212,8 +1219,8 @@
// Add primitive types to the mix
if filter_fn(Res::PrimTy(PrimTy::Bool)) {
names.extend(
- self.r.primitive_type_table.primitive_types.iter().map(|(name, prim_ty)| {
- TypoSuggestion::from_res(*name, Res::PrimTy(*prim_ty))
+ PrimTy::ALL.iter().map(|prim_ty| {
+ TypoSuggestion::from_res(prim_ty.name(), Res::PrimTy(*prim_ty))
}),
)
}
@@ -1245,6 +1252,23 @@
}
}
+ // Returns the name of the Rust type approximately corresponding to
+ // a type name in another programming language.
+ fn likely_rust_type(path: &[Segment]) -> Option<Symbol> {
+ let name = path[path.len() - 1].ident.as_str();
+ // Common Java types
+ Some(match &*name {
+ "byte" => sym::u8, // In Java, bytes are signed, but in practice one almost always wants unsigned bytes.
+ "short" => sym::i16,
+ "boolean" => sym::bool,
+ "int" => sym::i32,
+ "long" => sym::i64,
+ "float" => sym::f32,
+ "double" => sym::f64,
+ _ => return None,
+ })
+ }
+
/// Only used in a specific case of type ascription suggestions
fn get_colon_suggestion_span(&self, start: Span) -> Span {
let sm = self.r.session.source_map();
@@ -1657,6 +1681,7 @@
);
err.span_label(lifetime_ref.span, "undeclared lifetime");
let mut suggests_in_band = false;
+ let mut suggest_note = true;
for missing in &self.missing_named_lifetime_spots {
match missing {
MissingLifetimeSpot::Generics(generics) => {
@@ -1676,12 +1701,24 @@
suggests_in_band = true;
(generics.span, format!("<{}>", lifetime_ref))
};
- err.span_suggestion(
- span,
- &format!("consider introducing lifetime `{}` here", lifetime_ref),
- sugg,
- Applicability::MaybeIncorrect,
- );
+ if !span.from_expansion() {
+ err.span_suggestion(
+ span,
+ &format!("consider introducing lifetime `{}` here", lifetime_ref),
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ } else if suggest_note {
+ suggest_note = false; // Avoid displaying the same help multiple times.
+ err.span_label(
+ span,
+ &format!(
+ "lifetime `{}` is missing in item created through this procedural \
+ macro",
+ lifetime_ref,
+ ),
+ );
+ }
}
MissingLifetimeSpot::HigherRanked { span, span_type } => {
err.span_suggestion(
@@ -1696,7 +1733,7 @@
);
err.note(
"for more information on higher-ranked polymorphism, visit \
- https://doc.rust-lang.org/nomicon/hrtb.html",
+ https://doc.rust-lang.org/nomicon/hrtb.html",
);
}
_ => {}
@@ -1708,7 +1745,7 @@
{
err.help(
"if you want to experiment with in-band lifetime bindings, \
- add `#![feature(in_band_lifetimes)]` to the crate attributes",
+ add `#![feature(in_band_lifetimes)]` to the crate attributes",
);
}
err.emit();
diff --git a/compiler/rustc_resolve/src/late/lifetimes.rs b/compiler/rustc_resolve/src/late/lifetimes.rs
index 0bab339..2c61c09 100644
--- a/compiler/rustc_resolve/src/late/lifetimes.rs
+++ b/compiler/rustc_resolve/src/late/lifetimes.rs
@@ -540,7 +540,7 @@
self.missing_named_lifetime_spots.pop();
self.is_in_fn_syntax = was_in_fn_syntax;
}
- hir::TyKind::TraitObject(bounds, ref lifetime) => {
+ hir::TyKind::TraitObject(bounds, ref lifetime, _) => {
debug!("visit_ty: TraitObject(bounds={:?}, lifetime={:?})", bounds, lifetime);
for bound in bounds {
self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
@@ -587,7 +587,7 @@
// `type MyAnonTy<'b> = impl MyTrait<'b>;`
// ^ ^ this gets resolved in the scope of
// the opaque_ty generics
- let opaque_ty = self.tcx.hir().expect_item(item_id.id);
+ let opaque_ty = self.tcx.hir().item(item_id);
let (generics, bounds) = match opaque_ty.kind {
// Named opaque `impl Trait` types are reached via `TyKind::Path`.
// This arm is for `impl Trait` in the types of statics, constants and locals.
@@ -632,20 +632,32 @@
let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
// Ensure that the parent of the def is an item, not HRTB
let parent_id = self.tcx.hir().get_parent_node(hir_id);
- let parent_impl_id = hir::ImplItemId { hir_id: parent_id };
- let parent_trait_id = hir::TraitItemId { hir_id: parent_id };
- let krate = self.tcx.hir().krate();
-
- if !(krate.items.contains_key(&parent_id)
- || krate.impl_items.contains_key(&parent_impl_id)
- || krate.trait_items.contains_key(&parent_trait_id))
+ let parent_is_item = if let Some(parent_def_id) =
+ parent_id.as_owner()
{
+ let parent_item_id = hir::ItemId { def_id: parent_def_id };
+ let parent_impl_id = hir::ImplItemId { def_id: parent_def_id };
+ let parent_trait_id =
+ hir::TraitItemId { def_id: parent_def_id };
+ let parent_foreign_id =
+ hir::ForeignItemId { def_id: parent_def_id };
+ let krate = self.tcx.hir().krate();
+
+ krate.items.contains_key(&parent_item_id)
+ || krate.impl_items.contains_key(&parent_impl_id)
+ || krate.trait_items.contains_key(&parent_trait_id)
+ || krate.foreign_items.contains_key(&parent_foreign_id)
+ } else {
+ false
+ };
+
+ if !parent_is_item {
struct_span_err!(
self.tcx.sess,
lifetime.span,
E0657,
"`impl Trait` can only capture lifetimes \
- bound at the fn or impl level"
+ bound at the fn or impl level"
)
.emit();
self.uninsert_lifetime_on_error(lifetime, def.unwrap());
@@ -738,7 +750,7 @@
self.missing_named_lifetime_spots.push((&trait_item.generics).into());
let tcx = self.tcx;
self.visit_early_late(
- Some(tcx.hir().get_parent_item(trait_item.hir_id)),
+ Some(tcx.hir().get_parent_item(trait_item.hir_id())),
&sig.decl,
&trait_item.generics,
|this| intravisit::walk_trait_item(this, trait_item),
@@ -800,7 +812,7 @@
self.missing_named_lifetime_spots.push((&impl_item.generics).into());
let tcx = self.tcx;
self.visit_early_late(
- Some(tcx.hir().get_parent_item(impl_item.hir_id)),
+ Some(tcx.hir().get_parent_item(impl_item.hir_id())),
&sig.decl,
&impl_item.generics,
|this| intravisit::walk_impl_item(this, impl_item),
@@ -1227,7 +1239,8 @@
let result = object_lifetime_defaults_for_item(tcx, generics);
// Debugging aid.
- if tcx.sess.contains_name(&item.attrs, sym::rustc_object_lifetime_default) {
+ let attrs = tcx.hir().attrs(item.hir_id());
+ if tcx.sess.contains_name(attrs, sym::rustc_object_lifetime_default) {
let object_lifetime_default_reprs: String = result
.iter()
.map(|set| match *set {
@@ -1255,7 +1268,7 @@
tcx.sess.span_err(item.span, &object_lifetime_default_reprs);
}
- map.insert(item.hir_id, result);
+ map.insert(item.hir_id(), result);
}
_ => {}
}
@@ -1959,7 +1972,7 @@
// Therefore, we would compute `object_lifetime_defaults` to a
// vector like `['x, 'static]`. Note that the vector only
// includes type parameters.
- let object_lifetime_defaults = type_def_id.map_or(vec![], |def_id| {
+ let object_lifetime_defaults = type_def_id.map_or_else(Vec::new, |def_id| {
let in_body = {
let mut scope = self.scope;
loop {
@@ -2111,7 +2124,7 @@
self.tcx.hir().expect_item(self.tcx.hir().get_parent_item(parent)).kind
{
assoc_item_kind =
- trait_items.iter().find(|ti| ti.id.hir_id == parent).map(|ti| ti.kind);
+ trait_items.iter().find(|ti| ti.id.hir_id() == parent).map(|ti| ti.kind);
}
match *m {
hir::TraitFn::Required(_) => None,
@@ -2125,7 +2138,7 @@
{
impl_self = Some(self_ty);
assoc_item_kind =
- items.iter().find(|ii| ii.id.hir_id == parent).map(|ii| ii.kind);
+ items.iter().find(|ii| ii.id.hir_id() == parent).map(|ii| ii.kind);
}
Some(body)
}
@@ -2286,7 +2299,7 @@
self.outer_index.shift_in(1);
}
match ty.kind {
- hir::TyKind::TraitObject(bounds, ref lifetime) => {
+ hir::TyKind::TraitObject(bounds, ref lifetime, _) => {
for bound in bounds {
self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
}
diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs
index b19990e..ccfb5ff 100644
--- a/compiler/rustc_resolve/src/lib.rs
+++ b/compiler/rustc_resolve/src/lib.rs
@@ -11,6 +11,7 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(box_patterns)]
#![feature(bool_to_option)]
+#![feature(control_flow_enum)]
#![feature(crate_visibility_modifier)]
#![feature(format_args_capture)]
#![feature(nll)]
@@ -23,11 +24,13 @@
use rustc_arena::{DroplessArena, TypedArena};
use rustc_ast::node_id::NodeMap;
+use rustc_ast::ptr::P;
use rustc_ast::unwrap_or;
use rustc_ast::visit::{self, Visitor};
-use rustc_ast::{self as ast, FloatTy, IntTy, NodeId, UintTy};
+use rustc_ast::{self as ast, NodeId};
use rustc_ast::{Crate, CRATE_NODE_ID};
-use rustc_ast::{ItemKind, Path};
+use rustc_ast::{Expr, ExprKind, LitKind};
+use rustc_ast::{ItemKind, ModKind, Path};
use rustc_ast_lowering::ResolverAstLowering;
use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
@@ -39,8 +42,7 @@
use rustc_hir::def::{self, CtorOf, DefKind, NonMacroAttrKind, PartialRes};
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, CRATE_DEF_INDEX};
use rustc_hir::definitions::{DefKey, DefPathData, Definitions};
-use rustc_hir::PrimTy::{self, Bool, Char, Float, Int, Str, Uint};
-use rustc_hir::TraitCandidate;
+use rustc_hir::{PrimTy, TraitCandidate};
use rustc_index::vec::IndexVec;
use rustc_metadata::creader::{CStore, CrateLoader};
use rustc_middle::hir::exports::ExportMap;
@@ -60,6 +62,7 @@
use smallvec::{smallvec, SmallVec};
use std::cell::{Cell, RefCell};
use std::collections::BTreeSet;
+use std::ops::ControlFlow;
use std::{cmp, fmt, iter, ptr};
use tracing::debug;
@@ -153,6 +156,12 @@
}
}
+#[derive(Copy, Debug, Clone)]
+enum ImplTraitContext {
+ Existential,
+ Universal(LocalDefId),
+}
+
#[derive(Eq)]
struct BindingError {
name: Symbol,
@@ -284,28 +293,21 @@
impl UsePlacementFinder {
fn check(krate: &Crate, target_module: NodeId) -> (Option<Span>, bool) {
let mut finder = UsePlacementFinder { target_module, span: None, found_use: false };
- visit::walk_crate(&mut finder, krate);
+ if let ControlFlow::Continue(..) = finder.check_mod(&krate.items, CRATE_NODE_ID) {
+ visit::walk_crate(&mut finder, krate);
+ }
(finder.span, finder.found_use)
}
-}
-impl<'tcx> Visitor<'tcx> for UsePlacementFinder {
- fn visit_mod(
- &mut self,
- module: &'tcx ast::Mod,
- _: Span,
- _: &[ast::Attribute],
- node_id: NodeId,
- ) {
+ fn check_mod(&mut self, items: &[P<ast::Item>], node_id: NodeId) -> ControlFlow<()> {
if self.span.is_some() {
- return;
+ return ControlFlow::Break(());
}
if node_id != self.target_module {
- visit::walk_mod(self, module);
- return;
+ return ControlFlow::Continue(());
}
// find a use statement
- for item in &module.items {
+ for item in items {
match item.kind {
ItemKind::Use(..) => {
// don't suggest placing a use before the prelude
@@ -313,7 +315,7 @@
if !item.span.from_expansion() {
self.span = Some(item.span.shrink_to_lo());
self.found_use = true;
- return;
+ return ControlFlow::Break(());
}
}
// don't place use before extern crate
@@ -338,6 +340,18 @@
}
}
}
+ ControlFlow::Continue(())
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for UsePlacementFinder {
+ fn visit_item(&mut self, item: &'tcx ast::Item) {
+ if let ItemKind::Mod(_, ModKind::Loaded(items, ..)) = &item.kind {
+ if let ControlFlow::Break(..) = self.check_mod(items, item.id) {
+ return;
+ }
+ }
+ visit::walk_item(self, item);
}
}
@@ -751,27 +765,12 @@
fn is_possibly_imported_variant(&self) -> bool {
match self.kind {
NameBindingKind::Import { binding, .. } => binding.is_possibly_imported_variant(),
- _ => self.is_variant(),
- }
- }
-
- // We sometimes need to treat variants as `pub` for backwards compatibility.
- fn pseudo_vis(&self) -> ty::Visibility {
- if self.is_variant() && self.res().def_id().is_local() {
- ty::Visibility::Public
- } else {
- self.vis
- }
- }
-
- fn is_variant(&self) -> bool {
- matches!(
- self.kind,
NameBindingKind::Res(
Res::Def(DefKind::Variant | DefKind::Ctor(CtorOf::Variant, ..), _),
_,
- )
- )
+ ) => true,
+ NameBindingKind::Res(..) | NameBindingKind::Module(..) => false,
+ }
}
fn is_extern_crate(&self) -> bool {
@@ -834,39 +833,6 @@
}
}
-/// Interns the names of the primitive types.
-///
-/// All other types are defined somewhere and possibly imported, but the primitive ones need
-/// special handling, since they have no place of origin.
-struct PrimitiveTypeTable {
- primitive_types: FxHashMap<Symbol, PrimTy>,
-}
-
-impl PrimitiveTypeTable {
- fn new() -> PrimitiveTypeTable {
- let mut table = FxHashMap::default();
-
- table.insert(sym::bool, Bool);
- table.insert(sym::char, Char);
- table.insert(sym::f32, Float(FloatTy::F32));
- table.insert(sym::f64, Float(FloatTy::F64));
- table.insert(sym::isize, Int(IntTy::Isize));
- table.insert(sym::i8, Int(IntTy::I8));
- table.insert(sym::i16, Int(IntTy::I16));
- table.insert(sym::i32, Int(IntTy::I32));
- table.insert(sym::i64, Int(IntTy::I64));
- table.insert(sym::i128, Int(IntTy::I128));
- table.insert(sym::str, Str);
- table.insert(sym::usize, Uint(UintTy::Usize));
- table.insert(sym::u8, Uint(UintTy::U8));
- table.insert(sym::u16, Uint(UintTy::U16));
- table.insert(sym::u32, Uint(UintTy::U32));
- table.insert(sym::u64, Uint(UintTy::U64));
- table.insert(sym::u128, Uint(UintTy::U128));
- Self { primitive_types: table }
- }
-}
-
#[derive(Debug, Default, Clone)]
pub struct ExternPreludeEntry<'a> {
extern_crate_item: Option<&'a NameBinding<'a>>,
@@ -912,9 +878,6 @@
/// "self-confirming" import resolutions during import validation.
unusable_binding: Option<&'a NameBinding<'a>>,
- /// The idents for the primitive types.
- primitive_type_table: PrimitiveTypeTable,
-
/// Resolutions for nodes that have a single resolution.
partial_res_map: NodeMap<PartialRes>,
/// Resolutions for import nodes, which have multiple resolutions in different namespaces.
@@ -1004,6 +967,8 @@
output_macro_rules_scopes: FxHashMap<ExpnId, MacroRulesScopeRef<'a>>,
/// Helper attributes that are in scope for the given expansion.
helper_attrs: FxHashMap<ExpnId, Vec<Ident>>,
+ /// Resolutions for paths inside the `#[derive(...)]` attribute with the given `ExpnId`.
+ derive_resolutions: FxHashMap<ExpnId, Vec<(Lrc<SyntaxExtension>, ast::Path)>>,
/// Avoid duplicated errors for "name already defined".
name_already_seen: FxHashMap<Symbol, Span>,
@@ -1030,13 +995,16 @@
/// Indices of unnamed struct or variant fields with unresolved attributes.
placeholder_field_indices: FxHashMap<NodeId, usize>,
/// When collecting definitions from an AST fragment produced by a macro invocation `ExpnId`
- /// we know what parent node that fragment should be attached to thanks to this table.
- invocation_parents: FxHashMap<ExpnId, LocalDefId>,
+ /// we know what parent node that fragment should be attached to thanks to this table,
+ /// and how the `impl Trait` fragments were introduced.
+ invocation_parents: FxHashMap<ExpnId, (LocalDefId, ImplTraitContext)>,
next_disambiguator: FxHashMap<(LocalDefId, DefPathData), u32>,
/// Some way to know that we are in a *trait* impl in `visit_assoc_item`.
/// FIXME: Replace with a more general AST map (together with some other fields).
trait_impl_items: FxHashSet<LocalDefId>,
+
+ legacy_const_generic_args: FxHashMap<DefId, Option<Vec<usize>>>,
}
/// Nothing really interesting here; it just provides memory for the rest of the crate.
@@ -1118,6 +1086,10 @@
self.cstore().item_generics_num_lifetimes(def_id, sess)
}
+ fn legacy_const_generic_args(&mut self, expr: &Expr) -> Option<Vec<usize>> {
+ self.legacy_const_generic_args(expr)
+ }
+
fn get_partial_res(&mut self, id: NodeId) -> Option<PartialRes> {
self.partial_res_map.get(&id).cloned()
}
@@ -1240,7 +1212,7 @@
node_id_to_def_id.insert(CRATE_NODE_ID, root);
let mut invocation_parents = FxHashMap::default();
- invocation_parents.insert(ExpnId::root(), root);
+ invocation_parents.insert(ExpnId::root(), (root, ImplTraitContext::Existential));
let mut extern_prelude: FxHashMap<Ident, ExternPreludeEntry<'_>> = session
.opts
@@ -1284,8 +1256,6 @@
last_import_segment: false,
unusable_binding: None,
- primitive_type_table: PrimitiveTypeTable::new(),
-
partial_res_map: Default::default(),
import_res_map: Default::default(),
label_res_map: Default::default(),
@@ -1334,6 +1304,7 @@
invocation_parent_scopes: Default::default(),
output_macro_rules_scopes: Default::default(),
helper_attrs: Default::default(),
+ derive_resolutions: Default::default(),
local_macro_def_scopes: FxHashMap::default(),
name_already_seen: FxHashMap::default(),
potentially_unused_imports: Vec::new(),
@@ -1359,6 +1330,7 @@
invocation_parents,
next_disambiguator: Default::default(),
trait_impl_items: Default::default(),
+ legacy_const_generic_args: Default::default(),
};
let root_parent_scope = ParentScope::module(graph_root, &resolver);
@@ -1457,7 +1429,7 @@
fn macro_def(&self, mut ctxt: SyntaxContext) -> DefId {
loop {
- match ctxt.outer_expn().expn_data().macro_def_id {
+ match ctxt.outer_expn_data().macro_def_id {
Some(def_id) => return def_id,
None => ctxt.remove_mark(),
};
@@ -1994,9 +1966,9 @@
}
if ns == TypeNS {
- if let Some(prim_ty) = self.primitive_type_table.primitive_types.get(&ident.name) {
+ if let Some(prim_ty) = PrimTy::from_name(ident.name) {
let binding =
- (Res::PrimTy(*prim_ty), ty::Visibility::Public, DUMMY_SP, ExpnId::root())
+ (Res::PrimTy(prim_ty), ty::Visibility::Public, DUMMY_SP, ExpnId::root())
.to_name_binding(self.arenas);
return Some(LexicalScopeBinding::Item(binding));
}
@@ -2468,8 +2440,10 @@
Applicability::MaybeIncorrect,
)),
)
- } else {
+ } else if self.session.edition() == Edition::Edition2015 {
(format!("maybe a missing crate `{}`?", ident), None)
+ } else {
+ (format!("could not find `{}` in the crate root", ident), None)
}
} else if i == 0 {
if ident
@@ -2485,10 +2459,16 @@
}
} else {
let parent = path[i - 1].ident.name;
- let parent = if parent == kw::PathRoot {
- "crate root".to_owned()
- } else {
- format!("`{}`", parent)
+ let parent = match parent {
+ // ::foo is mounted at the crate root for 2015, and is the extern
+ // prelude for 2018+
+ kw::PathRoot if self.session.edition() > Edition::Edition2015 => {
+ "the list of imported crates".to_owned()
+ }
+ kw::PathRoot | kw::Crate => "the crate root".to_owned(),
+ _ => {
+ format!("`{}`", parent)
+ }
};
let mut msg = format!("could not find `{}` in {}", ident, parent);
@@ -3351,6 +3331,61 @@
pub fn opt_span(&self, def_id: DefId) -> Option<Span> {
if let Some(def_id) = def_id.as_local() { Some(self.def_id_to_span[def_id]) } else { None }
}
+
+ /// Checks if an expression refers to a function marked with
+ /// `#[rustc_legacy_const_generics]` and returns the argument index list
+ /// from the attribute.
+ pub fn legacy_const_generic_args(&mut self, expr: &Expr) -> Option<Vec<usize>> {
+ if let ExprKind::Path(None, path) = &expr.kind {
+ // Don't perform legacy const generics rewriting if the path already
+ // has generic arguments.
+ if path.segments.last().unwrap().args.is_some() {
+ return None;
+ }
+
+ let partial_res = self.partial_res_map.get(&expr.id)?;
+ if partial_res.unresolved_segments() != 0 {
+ return None;
+ }
+
+ if let Res::Def(def::DefKind::Fn, def_id) = partial_res.base_res() {
+ // We only support cross-crate argument rewriting. Uses
+ // within the same crate should be updated to use the new
+ // const generics style.
+ if def_id.is_local() {
+ return None;
+ }
+
+ if let Some(v) = self.legacy_const_generic_args.get(&def_id) {
+ return v.clone();
+ }
+
+ let parse_attrs = || {
+ let attrs = self.cstore().item_attrs(def_id, self.session);
+ let attr = attrs
+ .iter()
+ .find(|a| self.session.check_name(a, sym::rustc_legacy_const_generics))?;
+ let mut ret = vec![];
+ for meta in attr.meta_item_list()? {
+ match meta.literal()?.kind {
+ LitKind::Int(a, _) => {
+ ret.push(a as usize);
+ }
+ _ => panic!("invalid arg index"),
+ }
+ }
+ Some(ret)
+ };
+
+ // Cache the lookup to avoid parsing attributes for an iterm
+ // multiple times.
+ let ret = parse_attrs();
+ self.legacy_const_generic_args.insert(def_id, ret.clone());
+ return ret;
+ }
+ }
+ None
+ }
}
fn names_to_string(names: &[Symbol]) -> String {
diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs
index d0adee2..2e47d4c 100644
--- a/compiler/rustc_resolve/src/macros.rs
+++ b/compiler/rustc_resolve/src/macros.rs
@@ -6,7 +6,7 @@
use crate::{AmbiguityError, AmbiguityErrorMisc, AmbiguityKind, BuiltinMacroState, Determinacy};
use crate::{CrateLint, ParentScope, ResolutionError, Resolver, Scope, ScopeSet, Weak};
use crate::{ModuleKind, ModuleOrUniformRoot, NameBinding, PathResult, Segment, ToNameBinding};
-use rustc_ast::{self as ast, NodeId};
+use rustc_ast::{self as ast, Inline, ItemKind, ModKind, NodeId};
use rustc_ast_lowering::ResolverAstLowering;
use rustc_ast_pretty::pprust;
use rustc_attr::StabilityLevel;
@@ -14,16 +14,18 @@
use rustc_data_structures::ptr_key::PtrKey;
use rustc_data_structures::sync::Lrc;
use rustc_errors::struct_span_err;
-use rustc_expand::base::{Indeterminate, InvocationRes, ResolverExpand};
-use rustc_expand::base::{SyntaxExtension, SyntaxExtensionKind};
+use rustc_expand::base::Annotatable;
+use rustc_expand::base::{Indeterminate, ResolverExpand, SyntaxExtension, SyntaxExtensionKind};
use rustc_expand::compile_declarative_macro;
use rustc_expand::expand::{AstFragment, Invocation, InvocationKind};
use rustc_feature::is_builtin_attr_name;
use rustc_hir::def::{self, DefKind, NonMacroAttrKind};
use rustc_hir::def_id;
+use rustc_hir::PrimTy;
use rustc_middle::middle::stability;
use rustc_middle::ty;
-use rustc_session::lint::builtin::{SOFT_UNSTABLE, UNUSED_MACROS};
+use rustc_session::lint::builtin::{LEGACY_DERIVE_HELPERS, SOFT_UNSTABLE, UNUSED_MACROS};
+use rustc_session::lint::BuiltinLintDiagnostics;
use rustc_session::parse::feature_err;
use rustc_session::Session;
use rustc_span::edition::Edition;
@@ -152,6 +154,26 @@
(registered_attrs, registered_tools)
}
+// Some feature gates for inner attributes are reported as lints for backward compatibility.
+fn soft_custom_inner_attributes_gate(path: &ast::Path, invoc: &Invocation) -> bool {
+ match &path.segments[..] {
+ // `#![test]`
+ [seg] if seg.ident.name == sym::test => return true,
+ // `#![rustfmt::skip]` on out-of-line modules
+ [seg1, seg2] if seg1.ident.name == sym::rustfmt && seg2.ident.name == sym::skip => {
+ if let InvocationKind::Attr { item, .. } = &invoc.kind {
+ if let Annotatable::Item(item) = item {
+ if let ItemKind::Mod(_, ModKind::Loaded(_, Inline::No, _)) = item.kind {
+ return true;
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ false
+}
+
impl<'a> ResolverExpand for Resolver<'a> {
fn next_node_id(&mut self) -> NodeId {
self.next_node_id()
@@ -226,7 +248,7 @@
invoc: &Invocation,
eager_expansion_root: ExpnId,
force: bool,
- ) -> Result<InvocationRes, Indeterminate> {
+ ) -> Result<Lrc<SyntaxExtension>, Indeterminate> {
let invoc_id = invoc.expansion_data.id;
let parent_scope = match self.invocation_parent_scopes.get(&invoc_id) {
Some(parent_scope) => *parent_scope,
@@ -243,65 +265,15 @@
}
};
- let (path, kind, inner_attr, derives, after_derive) = match invoc.kind {
- InvocationKind::Attr { ref attr, ref derives, after_derive, .. } => (
+ let (path, kind, inner_attr, derives) = match invoc.kind {
+ InvocationKind::Attr { ref attr, ref derives, .. } => (
&attr.get_normal_item().path,
MacroKind::Attr,
attr.style == ast::AttrStyle::Inner,
self.arenas.alloc_ast_paths(derives),
- after_derive,
),
- InvocationKind::Bang { ref mac, .. } => {
- (&mac.path, MacroKind::Bang, false, &[][..], false)
- }
- InvocationKind::Derive { ref path, .. } => {
- (path, MacroKind::Derive, false, &[][..], false)
- }
- InvocationKind::DeriveContainer { ref derives, .. } => {
- // Block expansion of the container until we resolve all derives in it.
- // This is required for two reasons:
- // - Derive helper attributes are in scope for the item to which the `#[derive]`
- // is applied, so they have to be produced by the container's expansion rather
- // than by individual derives.
- // - Derives in the container need to know whether one of them is a built-in `Copy`.
- // FIXME: Try to avoid repeated resolutions for derives here and in expansion.
- let mut exts = Vec::new();
- let mut helper_attrs = Vec::new();
- for path in derives {
- exts.push(
- match self.resolve_macro_path(
- path,
- Some(MacroKind::Derive),
- &parent_scope,
- true,
- force,
- ) {
- Ok((Some(ext), _)) => {
- let span = path
- .segments
- .last()
- .unwrap()
- .ident
- .span
- .normalize_to_macros_2_0();
- helper_attrs.extend(
- ext.helper_attrs.iter().map(|name| Ident::new(*name, span)),
- );
- if ext.builtin_name == Some(sym::Copy) {
- self.containers_deriving_copy.insert(invoc_id);
- }
- ext
- }
- Ok(_) | Err(Determinacy::Determined) => {
- self.dummy_ext(MacroKind::Derive)
- }
- Err(Determinacy::Undetermined) => return Err(Indeterminate),
- },
- )
- }
- self.helper_attrs.insert(invoc_id, helper_attrs);
- return Ok(InvocationRes::DeriveContainer(exts));
- }
+ InvocationKind::Bang { ref mac, .. } => (&mac.path, MacroKind::Bang, false, &[][..]),
+ InvocationKind::Derive { ref path, .. } => (path, MacroKind::Derive, false, &[][..]),
};
// Derives are not included when `invocations` are collected, so we have to add them here.
@@ -316,6 +288,7 @@
parent_scope,
node_id,
force,
+ soft_custom_inner_attributes_gate(path, invoc),
)?;
let span = invoc.span();
@@ -327,14 +300,41 @@
));
if let Res::Def(_, _) = res {
- if after_derive {
- self.session.span_err(span, "macro attributes must be placed before `#[derive]`");
- }
let normal_module_def_id = self.macro_def_scope(invoc_id).nearest_parent_mod;
self.definitions.add_parent_module_of_macro_def(invoc_id, normal_module_def_id);
+
+ // Gate macro attributes in `#[derive]` output.
+ if !self.session.features_untracked().macro_attributes_in_derive_output
+ && kind == MacroKind::Attr
+ && ext.builtin_name != Some(sym::derive)
+ {
+ let mut expn_id = parent_scope.expansion;
+ loop {
+ // Helper attr table is a quick way to determine whether the attr is `derive`.
+ if self.helper_attrs.contains_key(&expn_id) {
+ feature_err(
+ &self.session.parse_sess,
+ sym::macro_attributes_in_derive_output,
+ path.span,
+ "macro attributes in `#[derive]` output are unstable",
+ )
+ .emit();
+ break;
+ } else {
+ let expn_data = expn_id.expn_data();
+ match expn_data.kind {
+ ExpnKind::Root
+ | ExpnKind::Macro(MacroKind::Bang | MacroKind::Derive, _) => {
+ break;
+ }
+ _ => expn_id = expn_data.parent,
+ }
+ }
+ }
+ }
}
- Ok(InvocationRes::Single(ext))
+ Ok(ext)
}
fn check_unused_macros(&mut self) {
@@ -343,18 +343,75 @@
}
}
- fn lint_node_id(&mut self, expn_id: ExpnId) -> NodeId {
+ fn lint_node_id(&self, expn_id: ExpnId) -> NodeId {
// FIXME - make this more precise. This currently returns the NodeId of the
// nearest closing item - we should try to return the closest parent of the ExpnId
self.invocation_parents
.get(&expn_id)
- .map_or(ast::CRATE_NODE_ID, |id| self.def_id_to_node_id[*id])
+ .map_or(ast::CRATE_NODE_ID, |id| self.def_id_to_node_id[id.0])
}
fn has_derive_copy(&self, expn_id: ExpnId) -> bool {
self.containers_deriving_copy.contains(&expn_id)
}
+ fn resolve_derives(
+ &mut self,
+ expn_id: ExpnId,
+ derives: Vec<ast::Path>,
+ force: bool,
+ ) -> Result<(), Indeterminate> {
+ // Block expansion of the container until we resolve all derives in it.
+ // This is required for two reasons:
+ // - Derive helper attributes are in scope for the item to which the `#[derive]`
+ // is applied, so they have to be produced by the container's expansion rather
+ // than by individual derives.
+ // - Derives in the container need to know whether one of them is a built-in `Copy`.
+ // FIXME: Try to cache intermediate results to avoid resolving same derives multiple times.
+ let parent_scope = self.invocation_parent_scopes[&expn_id];
+ let mut exts = Vec::new();
+ let mut helper_attrs = Vec::new();
+ let mut has_derive_copy = false;
+ for path in derives {
+ exts.push((
+ match self.resolve_macro_path(
+ &path,
+ Some(MacroKind::Derive),
+ &parent_scope,
+ true,
+ force,
+ ) {
+ Ok((Some(ext), _)) => {
+ let span =
+ path.segments.last().unwrap().ident.span.normalize_to_macros_2_0();
+ helper_attrs
+ .extend(ext.helper_attrs.iter().map(|name| Ident::new(*name, span)));
+ has_derive_copy |= ext.builtin_name == Some(sym::Copy);
+ ext
+ }
+ Ok(_) | Err(Determinacy::Determined) => self.dummy_ext(MacroKind::Derive),
+ Err(Determinacy::Undetermined) => return Err(Indeterminate),
+ },
+ path,
+ ))
+ }
+ self.derive_resolutions.insert(expn_id, exts);
+ self.helper_attrs.insert(expn_id, helper_attrs);
+ // Mark this derive as having `Copy` either if it has `Copy` itself or if its parent derive
+ // has `Copy`, to support cases like `#[derive(Clone, Copy)] #[derive(Debug)]`.
+ if has_derive_copy || self.has_derive_copy(parent_scope.expansion) {
+ self.containers_deriving_copy.insert(expn_id);
+ }
+ Ok(())
+ }
+
+ fn take_derive_resolutions(
+ &mut self,
+ expn_id: ExpnId,
+ ) -> Option<Vec<(Lrc<SyntaxExtension>, ast::Path)>> {
+ self.derive_resolutions.remove(&expn_id)
+ }
+
// The function that implements the resolution logic of `#[cfg_accessible(path)]`.
// Returns true if the path can certainly be resolved in one of three namespaces,
// returns false if the path certainly cannot be resolved in any of the three namespaces.
@@ -405,6 +462,7 @@
parent_scope: &ParentScope<'a>,
node_id: NodeId,
force: bool,
+ soft_custom_inner_attributes_gate: bool,
) -> Result<(Lrc<SyntaxExtension>, Res), Indeterminate> {
let (ext, res) = match self.resolve_macro_path(path, Some(kind), parent_scope, true, force)
{
@@ -472,7 +530,7 @@
Res::NonMacroAttr(..) => "custom inner attributes are unstable",
_ => unreachable!(),
};
- if path == &sym::test {
+ if soft_custom_inner_attributes_gate {
self.session.parse_sess.buffer_lint(SOFT_UNSTABLE, path.span, node_id, msg);
} else {
feature_err(&self.session.parse_sess, sym::custom_inner_attributes, path.span, msg)
@@ -796,12 +854,10 @@
}
result
}
- Scope::BuiltinTypes => {
- match this.primitive_type_table.primitive_types.get(&ident.name).cloned() {
- Some(prim_ty) => ok(Res::PrimTy(prim_ty), DUMMY_SP, this.arenas),
- None => Err(Determinacy::Determined),
- }
- }
+ Scope::BuiltinTypes => match PrimTy::from_name(ident.name) {
+ Some(prim_ty) => ok(Res::PrimTy(prim_ty), DUMMY_SP, this.arenas),
+ None => Err(Determinacy::Determined),
+ },
};
match result {
@@ -819,6 +875,8 @@
let is_builtin = |res| {
matches!(res, Res::NonMacroAttr(NonMacroAttrKind::Builtin(..)))
};
+ let derive_helper =
+ Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper);
let derive_helper_compat =
Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat);
@@ -827,7 +885,7 @@
} else if is_builtin(innermost_res) || is_builtin(res) {
Some(AmbiguityKind::BuiltinAttr)
} else if innermost_res == derive_helper_compat
- || res == derive_helper_compat
+ || res == derive_helper_compat && innermost_res != derive_helper
{
Some(AmbiguityKind::DeriveHelper)
} else if innermost_flags.contains(Flags::MACRO_RULES)
@@ -993,6 +1051,15 @@
let res = binding.res();
let seg = Segment::from_ident(ident);
check_consistency(self, &[seg], ident.span, kind, initial_res, res);
+ if res == Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat) {
+ self.lint_buffer.buffer_lint_with_diagnostic(
+ LEGACY_DERIVE_HELPERS,
+ self.lint_node_id(parent_scope.expansion),
+ ident.span,
+ "derive helper attribute is used before it is introduced",
+ BuiltinLintDiagnostics::LegacyDeriveHelpers(binding.span),
+ );
+ }
}
Err(..) => {
let expected = kind.descr_expected();
@@ -1079,7 +1146,7 @@
crate fn check_reserved_macro_name(&mut self, ident: Ident, res: Res) {
// Reserve some names that are not quite covered by the general check
// performed on `Resolver::builtin_attrs`.
- if ident.name == sym::cfg || ident.name == sym::cfg_attr || ident.name == sym::derive {
+ if ident.name == sym::cfg || ident.name == sym::cfg_attr {
let macro_kind = self.get_macro(res).map(|ext| ext.macro_kind());
if macro_kind.is_some() && sub_namespace_match(macro_kind, Some(MacroKind::Attr)) {
self.session.span_err(
diff --git a/compiler/rustc_save_analysis/src/dump_visitor.rs b/compiler/rustc_save_analysis/src/dump_visitor.rs
index 2834e7b..15435df 100644
--- a/compiler/rustc_save_analysis/src/dump_visitor.rs
+++ b/compiler/rustc_save_analysis/src/dump_visitor.rs
@@ -301,7 +301,7 @@
fn process_struct_field_def(
&mut self,
- field: &'tcx hir::StructField<'tcx>,
+ field: &'tcx hir::FieldDef<'tcx>,
parent_id: hir::HirId,
) {
let field_data = self.save_ctxt.get_field_data(field, parent_id);
@@ -373,14 +373,14 @@
body: hir::BodyId,
) {
let map = &self.tcx.hir();
- self.nest_typeck_results(map.local_def_id(item.hir_id), |v| {
+ self.nest_typeck_results(item.def_id, |v| {
let body = map.body(body);
if let Some(fn_data) = v.save_ctxt.get_item_data(item) {
down_cast_data!(fn_data, DefData, item.span);
v.process_formals(body.params, &fn_data.qualname);
- v.process_generic_params(ty_params, &fn_data.qualname, item.hir_id);
+ v.process_generic_params(ty_params, &fn_data.qualname, item.hir_id());
- v.dumper.dump_def(&access_from!(v.save_ctxt, item, item.hir_id), fn_data);
+ v.dumper.dump_def(&access_from!(v.save_ctxt, item, item.hir_id()), fn_data);
}
for arg in decl.inputs {
@@ -401,10 +401,10 @@
typ: &'tcx hir::Ty<'tcx>,
expr: &'tcx hir::Expr<'tcx>,
) {
- self.nest_typeck_results(self.tcx.hir().local_def_id(item.hir_id), |v| {
+ self.nest_typeck_results(item.def_id, |v| {
if let Some(var_data) = v.save_ctxt.get_item_data(item) {
down_cast_data!(var_data, DefData, item.span);
- v.dumper.dump_def(&access_from!(v.save_ctxt, item, item.hir_id), var_data);
+ v.dumper.dump_def(&access_from!(v.save_ctxt, item, item.hir_id()), var_data);
}
v.visit_ty(&typ);
v.visit_expr(expr);
@@ -465,10 +465,7 @@
) {
debug!("process_struct {:?} {:?}", item, item.span);
let name = item.ident.to_string();
- let qualname = format!(
- "::{}",
- self.tcx.def_path_str(self.tcx.hir().local_def_id(item.hir_id).to_def_id())
- );
+ let qualname = format!("::{}", self.tcx.def_path_str(item.def_id.to_def_id()));
let kind = match item.kind {
hir::ItemKind::Struct(_, _) => DefKind::Struct,
@@ -499,11 +496,12 @@
if !self.span.filter_generated(item.ident.span) {
let span = self.span_from_span(item.ident.span);
+ let attrs = self.tcx.hir().attrs(item.hir_id());
self.dumper.dump_def(
- &access_from!(self.save_ctxt, item, item.hir_id),
+ &access_from!(self.save_ctxt, item, item.hir_id()),
Def {
kind,
- id: id_from_hir_id(item.hir_id, &self.save_ctxt),
+ id: id_from_def_id(item.def_id.to_def_id()),
span,
name,
qualname: qualname.clone(),
@@ -511,20 +509,20 @@
parent: None,
children: fields,
decl_id: None,
- docs: self.save_ctxt.docs_for_attrs(&item.attrs),
+ docs: self.save_ctxt.docs_for_attrs(attrs),
sig: sig::item_signature(item, &self.save_ctxt),
- attributes: lower_attributes(item.attrs.to_vec(), &self.save_ctxt),
+ attributes: lower_attributes(attrs.to_vec(), &self.save_ctxt),
},
);
}
- self.nest_typeck_results(self.tcx.hir().local_def_id(item.hir_id), |v| {
+ self.nest_typeck_results(item.def_id, |v| {
for field in def.fields() {
- v.process_struct_field_def(field, item.hir_id);
+ v.process_struct_field_def(field, item.hir_id());
v.visit_ty(&field.ty);
}
- v.process_generic_params(ty_params, &qualname, item.hir_id);
+ v.process_generic_params(ty_params, &qualname, item.hir_id());
});
}
@@ -541,7 +539,7 @@
};
down_cast_data!(enum_data, DefData, item.span);
- let access = access_from!(self.save_ctxt, item, item.hir_id);
+ let access = access_from!(self.save_ctxt, item, item.hir_id());
for variant in enum_definition.variants {
let name = variant.ident.name.to_string();
@@ -556,7 +554,8 @@
if !self.span.filter_generated(name_span) {
let span = self.span_from_span(name_span);
let id = id_from_hir_id(variant.id, &self.save_ctxt);
- let parent = Some(id_from_hir_id(item.hir_id, &self.save_ctxt));
+ let parent = Some(id_from_def_id(item.def_id.to_def_id()));
+ let attrs = self.tcx.hir().attrs(variant.id);
self.dumper.dump_def(
&access,
@@ -570,12 +569,9 @@
parent,
children: vec![],
decl_id: None,
- docs: self.save_ctxt.docs_for_attrs(&variant.attrs),
+ docs: self.save_ctxt.docs_for_attrs(attrs),
sig: sig::variant_signature(variant, &self.save_ctxt),
- attributes: lower_attributes(
- variant.attrs.to_vec(),
- &self.save_ctxt,
- ),
+ attributes: lower_attributes(attrs.to_vec(), &self.save_ctxt),
},
);
}
@@ -596,7 +592,8 @@
if !self.span.filter_generated(name_span) {
let span = self.span_from_span(name_span);
let id = id_from_hir_id(variant.id, &self.save_ctxt);
- let parent = Some(id_from_hir_id(item.hir_id, &self.save_ctxt));
+ let parent = Some(id_from_def_id(item.def_id.to_def_id()));
+ let attrs = self.tcx.hir().attrs(variant.id);
self.dumper.dump_def(
&access,
@@ -610,12 +607,9 @@
parent,
children: vec![],
decl_id: None,
- docs: self.save_ctxt.docs_for_attrs(&variant.attrs),
+ docs: self.save_ctxt.docs_for_attrs(attrs),
sig: sig::variant_signature(variant, &self.save_ctxt),
- attributes: lower_attributes(
- variant.attrs.to_vec(),
- &self.save_ctxt,
- ),
+ attributes: lower_attributes(attrs.to_vec(), &self.save_ctxt),
},
);
}
@@ -627,7 +621,7 @@
self.visit_ty(field.ty);
}
}
- self.process_generic_params(ty_params, &enum_data.qualname, item.hir_id);
+ self.process_generic_params(ty_params, &enum_data.qualname, item.hir_id());
self.dumper.dump_def(&access, enum_data);
}
@@ -644,17 +638,14 @@
}
let map = &self.tcx.hir();
- self.nest_typeck_results(map.local_def_id(item.hir_id), |v| {
+ self.nest_typeck_results(item.def_id, |v| {
v.visit_ty(&impl_.self_ty);
if let Some(trait_ref) = &impl_.of_trait {
v.process_path(trait_ref.hir_ref_id, &hir::QPath::Resolved(None, &trait_ref.path));
}
- v.process_generic_params(&impl_.generics, "", item.hir_id);
+ v.process_generic_params(&impl_.generics, "", item.hir_id());
for impl_item in impl_.items {
- v.process_impl_item(
- map.impl_item(impl_item.id),
- map.local_def_id(item.hir_id).to_def_id(),
- );
+ v.process_impl_item(map.impl_item(impl_item.id), item.def_id.to_def_id());
}
});
}
@@ -667,10 +658,7 @@
methods: &'tcx [hir::TraitItemRef],
) {
let name = item.ident.to_string();
- let qualname = format!(
- "::{}",
- self.tcx.def_path_str(self.tcx.hir().local_def_id(item.hir_id).to_def_id())
- );
+ let qualname = format!("::{}", self.tcx.def_path_str(item.def_id.to_def_id()));
let mut val = name.clone();
if !generics.params.is_empty() {
val.push_str(&generic_params_to_string(generics.params));
@@ -680,12 +668,13 @@
val.push_str(&bounds_to_string(trait_refs));
}
if !self.span.filter_generated(item.ident.span) {
- let id = id_from_hir_id(item.hir_id, &self.save_ctxt);
+ let id = id_from_def_id(item.def_id.to_def_id());
let span = self.span_from_span(item.ident.span);
let children =
- methods.iter().map(|i| id_from_hir_id(i.id.hir_id, &self.save_ctxt)).collect();
+ methods.iter().map(|i| id_from_def_id(i.id.def_id.to_def_id())).collect();
+ let attrs = self.tcx.hir().attrs(item.hir_id());
self.dumper.dump_def(
- &access_from!(self.save_ctxt, item, item.hir_id),
+ &access_from!(self.save_ctxt, item, item.hir_id()),
Def {
kind: DefKind::Trait,
id,
@@ -696,9 +685,9 @@
parent: None,
children,
decl_id: None,
- docs: self.save_ctxt.docs_for_attrs(&item.attrs),
+ docs: self.save_ctxt.docs_for_attrs(attrs),
sig: sig::item_signature(item, &self.save_ctxt),
- attributes: lower_attributes(item.attrs.to_vec(), &self.save_ctxt),
+ attributes: lower_attributes(attrs.to_vec(), &self.save_ctxt),
},
);
}
@@ -729,20 +718,17 @@
kind: RelationKind::SuperTrait,
span,
from: id_from_def_id(id),
- to: id_from_hir_id(item.hir_id, &self.save_ctxt),
+ to: id_from_def_id(item.def_id.to_def_id()),
});
}
}
}
// walk generics and methods
- self.process_generic_params(generics, &qualname, item.hir_id);
+ self.process_generic_params(generics, &qualname, item.hir_id());
for method in methods {
let map = &self.tcx.hir();
- self.process_trait_item(
- map.trait_item(method.id),
- map.local_def_id(item.hir_id).to_def_id(),
- )
+ self.process_trait_item(map.trait_item(method.id), item.def_id.to_def_id())
}
}
@@ -750,7 +736,7 @@
fn process_mod(&mut self, item: &'tcx hir::Item<'tcx>) {
if let Some(mod_data) = self.save_ctxt.get_item_data(item) {
down_cast_data!(mod_data, DefData, item.span);
- self.dumper.dump_def(&access_from!(self.save_ctxt, item, item.hir_id), mod_data);
+ self.dumper.dump_def(&access_from!(self.save_ctxt, item, item.hir_id()), mod_data);
}
}
@@ -807,7 +793,7 @@
&mut self,
ex: &'tcx hir::Expr<'tcx>,
path: &'tcx hir::QPath<'tcx>,
- fields: &'tcx [hir::Field<'tcx>],
+ fields: &'tcx [hir::ExprField<'tcx>],
variant: &'tcx ty::VariantDef,
rest: Option<&'tcx hir::Expr<'tcx>>,
) {
@@ -1010,14 +996,15 @@
hir::TraitItemKind::Const(ref ty, body) => {
let body = body.map(|b| &self.tcx.hir().body(b).value);
let respan = respan(vis_span, hir::VisibilityKind::Public);
+ let attrs = self.tcx.hir().attrs(trait_item.hir_id());
self.process_assoc_const(
- trait_item.hir_id,
+ trait_item.hir_id(),
trait_item.ident,
&ty,
body,
trait_id,
&respan,
- &trait_item.attrs,
+ attrs,
);
}
hir::TraitItemKind::Fn(ref sig, ref trait_fn) => {
@@ -1027,7 +1014,7 @@
self.process_method(
sig,
body,
- trait_item.hir_id,
+ trait_item.hir_id(),
trait_item.ident,
&trait_item.generics,
&respan,
@@ -1037,15 +1024,13 @@
hir::TraitItemKind::Type(ref bounds, ref default_ty) => {
// FIXME do something with _bounds (for type refs)
let name = trait_item.ident.name.to_string();
- let qualname = format!(
- "::{}",
- self.tcx
- .def_path_str(self.tcx.hir().local_def_id(trait_item.hir_id).to_def_id())
- );
+ let qualname =
+ format!("::{}", self.tcx.def_path_str(trait_item.def_id.to_def_id()));
if !self.span.filter_generated(trait_item.ident.span) {
let span = self.span_from_span(trait_item.ident.span);
- let id = id_from_hir_id(trait_item.hir_id, &self.save_ctxt);
+ let id = id_from_def_id(trait_item.def_id.to_def_id());
+ let attrs = self.tcx.hir().attrs(trait_item.hir_id());
self.dumper.dump_def(
&Access { public: true, reachable: true },
@@ -1059,18 +1044,15 @@
parent: Some(id_from_def_id(trait_id)),
children: vec![],
decl_id: None,
- docs: self.save_ctxt.docs_for_attrs(&trait_item.attrs),
+ docs: self.save_ctxt.docs_for_attrs(attrs),
sig: sig::assoc_type_signature(
- trait_item.hir_id,
+ trait_item.hir_id(),
trait_item.ident,
Some(bounds),
default_ty.as_ref().map(|ty| &**ty),
&self.save_ctxt,
),
- attributes: lower_attributes(
- trait_item.attrs.to_vec(),
- &self.save_ctxt,
- ),
+ attributes: lower_attributes(attrs.to_vec(), &self.save_ctxt),
},
);
}
@@ -1087,21 +1069,22 @@
match impl_item.kind {
hir::ImplItemKind::Const(ref ty, body) => {
let body = self.tcx.hir().body(body);
+ let attrs = self.tcx.hir().attrs(impl_item.hir_id());
self.process_assoc_const(
- impl_item.hir_id,
+ impl_item.hir_id(),
impl_item.ident,
&ty,
Some(&body.value),
impl_id,
&impl_item.vis,
- &impl_item.attrs,
+ attrs,
);
}
hir::ImplItemKind::Fn(ref sig, body) => {
self.process_method(
sig,
Some(body),
- impl_item.hir_id,
+ impl_item.hir_id(),
impl_item.ident,
&impl_item.generics,
&impl_item.vis,
@@ -1130,9 +1113,10 @@
.module
.item_ids
.iter()
- .map(|i| id_from_hir_id(i.id, &self.save_ctxt))
+ .map(|i| id_from_def_id(i.def_id.to_def_id()))
.collect();
let span = self.span_from_span(krate.item.span);
+ let attrs = self.tcx.hir().attrs(id);
self.dumper.dump_def(
&Access { public: true, reachable: true },
@@ -1146,9 +1130,9 @@
children,
parent: None,
decl_id: None,
- docs: self.save_ctxt.docs_for_attrs(krate.item.attrs),
+ docs: self.save_ctxt.docs_for_attrs(attrs),
sig: None,
- attributes: lower_attributes(krate.item.attrs.to_owned(), &self.save_ctxt),
+ attributes: lower_attributes(attrs.to_owned(), &self.save_ctxt),
},
);
intravisit::walk_crate(self, krate);
@@ -1179,16 +1163,11 @@
hir::ItemKind::Use(path, hir::UseKind::Single) => {
let sub_span = path.segments.last().unwrap().ident.span;
if !self.span.filter_generated(sub_span) {
- let access = access_from!(self.save_ctxt, item, item.hir_id);
- let ref_id = self.lookup_def_id(item.hir_id).map(id_from_def_id);
+ let access = access_from!(self.save_ctxt, item, item.hir_id());
+ let ref_id = self.lookup_def_id(item.hir_id()).map(id_from_def_id);
let span = self.span_from_span(sub_span);
- let parent = self
- .save_ctxt
- .tcx
- .hir()
- .opt_local_def_id(item.hir_id)
- .and_then(|id| self.save_ctxt.tcx.parent(id.to_def_id()))
- .map(id_from_def_id);
+ let parent =
+ self.save_ctxt.tcx.parent(item.def_id.to_def_id()).map(id_from_def_id);
self.dumper.import(
&access,
Import {
@@ -1206,23 +1185,17 @@
}
hir::ItemKind::Use(path, hir::UseKind::Glob) => {
// Make a comma-separated list of names of imported modules.
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
- let names = self.tcx.names_imported_by_glob_use(def_id);
+ let names = self.tcx.names_imported_by_glob_use(item.def_id);
let names: Vec<_> = names.iter().map(|n| n.to_string()).collect();
// Otherwise it's a span with wrong macro expansion info, which
// we don't want to track anyway, since it's probably macro-internal `use`
if let Some(sub_span) = self.span.sub_span_of_star(item.span) {
if !self.span.filter_generated(item.span) {
- let access = access_from!(self.save_ctxt, item, item.hir_id);
+ let access = access_from!(self.save_ctxt, item, item.hir_id());
let span = self.span_from_span(sub_span);
- let parent = self
- .save_ctxt
- .tcx
- .hir()
- .opt_local_def_id(item.hir_id)
- .and_then(|id| self.save_ctxt.tcx.parent(id.to_def_id()))
- .map(id_from_def_id);
+ let parent =
+ self.save_ctxt.tcx.parent(item.def_id.to_def_id()).map(id_from_def_id);
self.dumper.import(
&access,
Import {
@@ -1243,13 +1216,8 @@
let name_span = item.ident.span;
if !self.span.filter_generated(name_span) {
let span = self.span_from_span(name_span);
- let parent = self
- .save_ctxt
- .tcx
- .hir()
- .opt_local_def_id(item.hir_id)
- .and_then(|id| self.save_ctxt.tcx.parent(id.to_def_id()))
- .map(id_from_def_id);
+ let parent =
+ self.save_ctxt.tcx.parent(item.def_id.to_def_id()).map(id_from_def_id);
self.dumper.import(
&Access { public: false, reachable: false },
Import {
@@ -1286,20 +1254,18 @@
}
hir::ItemKind::Mod(ref m) => {
self.process_mod(item);
- intravisit::walk_mod(self, m, item.hir_id);
+ intravisit::walk_mod(self, m, item.hir_id());
}
hir::ItemKind::TyAlias(ty, ref generics) => {
- let qualname = format!(
- "::{}",
- self.tcx.def_path_str(self.tcx.hir().local_def_id(item.hir_id).to_def_id())
- );
+ let qualname = format!("::{}", self.tcx.def_path_str(item.def_id.to_def_id()));
let value = ty_to_string(&ty);
if !self.span.filter_generated(item.ident.span) {
let span = self.span_from_span(item.ident.span);
- let id = id_from_hir_id(item.hir_id, &self.save_ctxt);
+ let id = id_from_def_id(item.def_id.to_def_id());
+ let attrs = self.tcx.hir().attrs(item.hir_id());
self.dumper.dump_def(
- &access_from!(self.save_ctxt, item, item.hir_id),
+ &access_from!(self.save_ctxt, item, item.hir_id()),
Def {
kind: DefKind::Type,
id,
@@ -1310,15 +1276,15 @@
parent: None,
children: vec![],
decl_id: None,
- docs: self.save_ctxt.docs_for_attrs(&item.attrs),
+ docs: self.save_ctxt.docs_for_attrs(attrs),
sig: sig::item_signature(item, &self.save_ctxt),
- attributes: lower_attributes(item.attrs.to_vec(), &self.save_ctxt),
+ attributes: lower_attributes(attrs.to_vec(), &self.save_ctxt),
},
);
}
self.visit_ty(ty);
- self.process_generic_params(generics, &qualname, item.hir_id);
+ self.process_generic_params(generics, &qualname, item.hir_id());
}
_ => intravisit::walk_item(self, item),
}
@@ -1382,10 +1348,8 @@
});
}
hir::TyKind::OpaqueDef(item_id, _) => {
- let item = self.tcx.hir().item(item_id.id);
- self.nest_typeck_results(self.tcx.hir().local_def_id(item_id.id), |v| {
- v.visit_item(item)
- });
+ let item = self.tcx.hir().item(item_id);
+ self.nest_typeck_results(item_id.def_id, |v| v.visit_item(item));
}
_ => intravisit::walk_ty(self, t),
}
@@ -1485,14 +1449,14 @@
}
fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
- let access = access_from!(self.save_ctxt, item, item.hir_id);
+ let access = access_from!(self.save_ctxt, item, item.hir_id());
match item.kind {
hir::ForeignItemKind::Fn(decl, _, ref generics) => {
if let Some(fn_data) = self.save_ctxt.get_extern_item_data(item) {
down_cast_data!(fn_data, DefData, item.span);
- self.process_generic_params(generics, &fn_data.qualname, item.hir_id);
+ self.process_generic_params(generics, &fn_data.qualname, item.hir_id());
self.dumper.dump_def(&access, fn_data);
}
diff --git a/compiler/rustc_save_analysis/src/lib.rs b/compiler/rustc_save_analysis/src/lib.rs
index 1291233..2acae29 100644
--- a/compiler/rustc_save_analysis/src/lib.rs
+++ b/compiler/rustc_save_analysis/src/lib.rs
@@ -137,8 +137,9 @@
}
pub fn get_extern_item_data(&self, item: &hir::ForeignItem<'_>) -> Option<Data> {
- let def_id = self.tcx.hir().local_def_id(item.hir_id).to_def_id();
+ let def_id = item.def_id.to_def_id();
let qualname = format!("::{}", self.tcx.def_path_str(def_id));
+ let attrs = self.tcx.hir().attrs(item.hir_id());
match item.kind {
hir::ForeignItemKind::Fn(ref decl, arg_names, ref generics) => {
filter!(self.span_utils, item.ident.span);
@@ -156,7 +157,7 @@
unsafety: hir::Unsafety::Unsafe,
// functions in extern block cannot be const
constness: hir::Constness::NotConst,
- abi: self.tcx.hir().get_foreign_abi(item.hir_id),
+ abi: self.tcx.hir().get_foreign_abi(item.hir_id()),
// functions in extern block cannot be async
asyncness: hir::IsAsync::NotAsync,
},
@@ -169,9 +170,9 @@
parent: None,
children: vec![],
decl_id: None,
- docs: self.docs_for_attrs(&item.attrs),
+ docs: self.docs_for_attrs(attrs),
sig: sig::foreign_item_signature(item, self),
- attributes: lower_attributes(item.attrs.to_vec(), self),
+ attributes: lower_attributes(attrs.to_vec(), self),
}))
}
hir::ForeignItemKind::Static(ref ty, _) => {
@@ -190,9 +191,9 @@
parent: None,
children: vec![],
decl_id: None,
- docs: self.docs_for_attrs(&item.attrs),
+ docs: self.docs_for_attrs(attrs),
sig: sig::foreign_item_signature(item, self),
- attributes: lower_attributes(item.attrs.to_vec(), self),
+ attributes: lower_attributes(attrs.to_vec(), self),
}))
}
// FIXME(plietar): needs a new DefKind in rls-data
@@ -201,7 +202,8 @@
}
pub fn get_item_data(&self, item: &hir::Item<'_>) -> Option<Data> {
- let def_id = self.tcx.hir().local_def_id(item.hir_id).to_def_id();
+ let def_id = item.def_id.to_def_id();
+ let attrs = self.tcx.hir().attrs(item.hir_id());
match item.kind {
hir::ItemKind::Fn(ref sig, ref generics, _) => {
let qualname = format!("::{}", self.tcx.def_path_str(def_id));
@@ -224,9 +226,9 @@
parent: None,
children: vec![],
decl_id: None,
- docs: self.docs_for_attrs(&item.attrs),
+ docs: self.docs_for_attrs(attrs),
sig: sig::item_signature(item, self),
- attributes: lower_attributes(item.attrs.to_vec(), self),
+ attributes: lower_attributes(attrs.to_vec(), self),
}))
}
hir::ItemKind::Static(ref typ, ..) => {
@@ -247,9 +249,9 @@
parent: None,
children: vec![],
decl_id: None,
- docs: self.docs_for_attrs(&item.attrs),
+ docs: self.docs_for_attrs(attrs),
sig: sig::item_signature(item, self),
- attributes: lower_attributes(item.attrs.to_vec(), self),
+ attributes: lower_attributes(attrs.to_vec(), self),
}))
}
hir::ItemKind::Const(ref typ, _) => {
@@ -269,9 +271,9 @@
parent: None,
children: vec![],
decl_id: None,
- docs: self.docs_for_attrs(&item.attrs),
+ docs: self.docs_for_attrs(attrs),
sig: sig::item_signature(item, self),
- attributes: lower_attributes(item.attrs.to_vec(), self),
+ attributes: lower_attributes(attrs.to_vec(), self),
}))
}
hir::ItemKind::Mod(ref m) => {
@@ -290,11 +292,15 @@
span: self.span_from_span(item.ident.span),
value: filename.to_string(),
parent: None,
- children: m.item_ids.iter().map(|i| id_from_hir_id(i.id, self)).collect(),
+ children: m
+ .item_ids
+ .iter()
+ .map(|i| id_from_def_id(i.def_id.to_def_id()))
+ .collect(),
decl_id: None,
- docs: self.docs_for_attrs(&item.attrs),
+ docs: self.docs_for_attrs(attrs),
sig: sig::item_signature(item, self),
- attributes: lower_attributes(item.attrs.to_vec(), self),
+ attributes: lower_attributes(attrs.to_vec(), self),
}))
}
hir::ItemKind::Enum(ref def, ref generics) => {
@@ -313,9 +319,9 @@
parent: None,
children: def.variants.iter().map(|v| id_from_hir_id(v.id, self)).collect(),
decl_id: None,
- docs: self.docs_for_attrs(&item.attrs),
+ docs: self.docs_for_attrs(attrs),
sig: sig::item_signature(item, self),
- attributes: lower_attributes(item.attrs.to_vec(), self),
+ attributes: lower_attributes(attrs.to_vec(), self),
}))
}
hir::ItemKind::Impl(hir::Impl { ref of_trait, ref self_ty, ref items, .. }) => {
@@ -354,7 +360,7 @@
parent: None,
children: items
.iter()
- .map(|i| id_from_hir_id(i.id.hir_id, self))
+ .map(|i| id_from_def_id(i.id.def_id.to_def_id()))
.collect(),
docs: String::new(),
sig: None,
@@ -373,7 +379,7 @@
}
}
- pub fn get_field_data(&self, field: &hir::StructField<'_>, scope: hir::HirId) -> Option<Def> {
+ pub fn get_field_data(&self, field: &hir::FieldDef<'_>, scope: hir::HirId) -> Option<Def> {
let name = field.ident.to_string();
let scope_def_id = self.tcx.hir().local_def_id(scope).to_def_id();
let qualname = format!("::{}::{}", self.tcx.def_path_str(scope_def_id), field.ident);
@@ -383,6 +389,7 @@
let id = id_from_def_id(field_def_id);
let span = self.span_from_span(field.ident.span);
+ let attrs = self.tcx.hir().attrs(field.hir_id);
Some(Def {
kind: DefKind::Field,
@@ -394,9 +401,9 @@
parent: Some(id_from_def_id(scope_def_id)),
children: vec![],
decl_id: None,
- docs: self.docs_for_attrs(&field.attrs),
+ docs: self.docs_for_attrs(attrs),
sig: sig::field_signature(field, self),
- attributes: lower_attributes(field.attrs.to_vec(), self),
+ attributes: lower_attributes(attrs.to_vec(), self),
})
}
@@ -420,9 +427,9 @@
let trait_id = self.tcx.trait_id_of_impl(impl_id);
let mut docs = String::new();
let mut attrs = vec![];
- if let Some(Node::ImplItem(item)) = hir.find(hir_id) {
- docs = self.docs_for_attrs(&item.attrs);
- attrs = item.attrs.to_vec();
+ if let Some(Node::ImplItem(_)) = hir.find(hir_id) {
+ attrs = self.tcx.hir().attrs(hir_id).to_vec();
+ docs = self.docs_for_attrs(&attrs);
}
let mut decl_id = None;
@@ -466,9 +473,9 @@
let mut docs = String::new();
let mut attrs = vec![];
- if let Some(Node::TraitItem(item)) = self.tcx.hir().find(hir_id) {
- docs = self.docs_for_attrs(&item.attrs);
- attrs = item.attrs.to_vec();
+ if let Some(Node::TraitItem(_)) = self.tcx.hir().find(hir_id) {
+ attrs = self.tcx.hir().attrs(hir_id).to_vec();
+ docs = self.docs_for_attrs(&attrs);
}
(
@@ -762,7 +769,7 @@
pub fn get_field_ref_data(
&self,
- field_ref: &hir::Field<'_>,
+ field_ref: &hir::ExprField<'_>,
variant: &ty::VariantDef,
) -> Option<Ref> {
filter!(self.span_utils, field_ref.ident.span);
diff --git a/compiler/rustc_save_analysis/src/sig.rs b/compiler/rustc_save_analysis/src/sig.rs
index 8ada7e3..53150a9 100644
--- a/compiler/rustc_save_analysis/src/sig.rs
+++ b/compiler/rustc_save_analysis/src/sig.rs
@@ -55,7 +55,7 @@
/// Signature for a struct or tuple field declaration.
/// Does not include a trailing comma.
-pub fn field_signature(field: &hir::StructField<'_>, scx: &SaveContext<'_>) -> Option<Signature> {
+pub fn field_signature(field: &hir::FieldDef<'_>, scx: &SaveContext<'_>) -> Option<Signature> {
if !scx.config.signatures {
return None;
}
@@ -317,8 +317,8 @@
Ok(replace_text(nested_ty, text))
}
hir::TyKind::OpaqueDef(item_id, _) => {
- let item = scx.tcx.hir().item(item_id.id);
- item.make(offset, Some(item_id.id), scx)
+ let item = scx.tcx.hir().item(item_id);
+ item.make(offset, Some(item_id.hir_id()), scx)
}
hir::TyKind::Typeof(_) | hir::TyKind::Infer | hir::TyKind::Err => Err("Ty"),
}
@@ -327,7 +327,7 @@
impl<'hir> Sig for hir::Item<'hir> {
fn make(&self, offset: usize, _parent_id: Option<hir::HirId>, scx: &SaveContext<'_>) -> Result {
- let id = Some(self.hir_id);
+ let id = Some(self.hir_id());
match self.kind {
hir::ItemKind::Static(ref ty, m, ref body) => {
@@ -337,7 +337,7 @@
}
let name = self.ident.to_string();
let defs = vec![SigElement {
- id: id_from_hir_id(self.hir_id, scx),
+ id: id_from_def_id(self.def_id.to_def_id()),
start: offset + text.len(),
end: offset + text.len() + name.len(),
}];
@@ -359,7 +359,7 @@
let mut text = "const ".to_owned();
let name = self.ident.to_string();
let defs = vec![SigElement {
- id: id_from_hir_id(self.hir_id, scx),
+ id: id_from_def_id(self.def_id.to_def_id()),
start: offset + text.len(),
end: offset + text.len() + name.len(),
}];
@@ -391,7 +391,7 @@
text.push_str("fn ");
let mut sig =
- name_and_generics(text, offset, generics, self.hir_id, self.ident, scx)?;
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
sig.text.push('(');
for i in decl.inputs {
@@ -420,7 +420,7 @@
let mut text = "mod ".to_owned();
let name = self.ident.to_string();
let defs = vec![SigElement {
- id: id_from_hir_id(self.hir_id, scx),
+ id: id_from_def_id(self.def_id.to_def_id()),
start: offset + text.len(),
end: offset + text.len() + name.len(),
}];
@@ -433,7 +433,7 @@
hir::ItemKind::TyAlias(ref ty, ref generics) => {
let text = "type ".to_owned();
let mut sig =
- name_and_generics(text, offset, generics, self.hir_id, self.ident, scx)?;
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
sig.text.push_str(" = ");
let ty = ty.make(offset + sig.text.len(), id, scx)?;
@@ -445,21 +445,21 @@
hir::ItemKind::Enum(_, ref generics) => {
let text = "enum ".to_owned();
let mut sig =
- name_and_generics(text, offset, generics, self.hir_id, self.ident, scx)?;
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
sig.text.push_str(" {}");
Ok(sig)
}
hir::ItemKind::Struct(_, ref generics) => {
let text = "struct ".to_owned();
let mut sig =
- name_and_generics(text, offset, generics, self.hir_id, self.ident, scx)?;
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
sig.text.push_str(" {}");
Ok(sig)
}
hir::ItemKind::Union(_, ref generics) => {
let text = "union ".to_owned();
let mut sig =
- name_and_generics(text, offset, generics, self.hir_id, self.ident, scx)?;
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
sig.text.push_str(" {}");
Ok(sig)
}
@@ -475,7 +475,7 @@
}
text.push_str("trait ");
let mut sig =
- name_and_generics(text, offset, generics, self.hir_id, self.ident, scx)?;
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
if !bounds.is_empty() {
sig.text.push_str(": ");
@@ -490,7 +490,7 @@
let mut text = String::new();
text.push_str("trait ");
let mut sig =
- name_and_generics(text, offset, generics, self.hir_id, self.ident, scx)?;
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
if !bounds.is_empty() {
sig.text.push_str(" = ");
@@ -655,7 +655,7 @@
}
}
-impl<'hir> Sig for hir::StructField<'hir> {
+impl<'hir> Sig for hir::FieldDef<'hir> {
fn make(&self, offset: usize, _parent_id: Option<hir::HirId>, scx: &SaveContext<'_>) -> Result {
let mut text = String::new();
@@ -736,14 +736,14 @@
impl<'hir> Sig for hir::ForeignItem<'hir> {
fn make(&self, offset: usize, _parent_id: Option<hir::HirId>, scx: &SaveContext<'_>) -> Result {
- let id = Some(self.hir_id);
+ let id = Some(self.hir_id());
match self.kind {
hir::ForeignItemKind::Fn(decl, _, ref generics) => {
let mut text = String::new();
text.push_str("fn ");
let mut sig =
- name_and_generics(text, offset, generics, self.hir_id, self.ident, scx)?;
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
sig.text.push('(');
for i in decl.inputs {
@@ -774,7 +774,7 @@
}
let name = self.ident.to_string();
let defs = vec![SigElement {
- id: id_from_hir_id(self.hir_id, scx),
+ id: id_from_def_id(self.def_id.to_def_id()),
start: offset + text.len(),
end: offset + text.len() + name.len(),
}];
@@ -790,7 +790,7 @@
let mut text = "type ".to_owned();
let name = self.ident.to_string();
let defs = vec![SigElement {
- id: id_from_hir_id(self.hir_id, scx),
+ id: id_from_def_id(self.def_id.to_def_id()),
start: offset + text.len(),
end: offset + text.len() + name.len(),
}];
diff --git a/compiler/rustc_serialize/Cargo.toml b/compiler/rustc_serialize/Cargo.toml
index 16c5dff..05fc6a4 100644
--- a/compiler/rustc_serialize/Cargo.toml
+++ b/compiler/rustc_serialize/Cargo.toml
@@ -6,7 +6,7 @@
[dependencies]
indexmap = "1"
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
[dev-dependencies]
rustc_macros = { path = "../rustc_macros" }
diff --git a/compiler/rustc_serialize/src/lib.rs b/compiler/rustc_serialize/src/lib.rs
index 53c3adc..e439ddc 100644
--- a/compiler/rustc_serialize/src/lib.rs
+++ b/compiler/rustc_serialize/src/lib.rs
@@ -13,7 +13,6 @@
#![feature(never_type)]
#![feature(nll)]
#![feature(associated_type_bounds)]
-#![cfg_attr(bootstrap, feature(min_const_generics))]
#![feature(min_specialization)]
#![feature(vec_spare_capacity)]
#![feature(core_intrinsics)]
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
index a6d4dcb..85448b7 100644
--- a/compiler/rustc_session/src/config.rs
+++ b/compiler/rustc_session/src/config.rs
@@ -15,6 +15,8 @@
use rustc_target::abi::{Align, TargetDataLayout};
use rustc_target::spec::{SplitDebuginfo, Target, TargetTriple};
+use rustc_serialize::json;
+
use crate::parse::CrateConfig;
use rustc_feature::UnstableFeatures;
use rustc_span::edition::{Edition, DEFAULT_EDITION, EDITION_NAME_LIST};
@@ -41,6 +43,7 @@
const LEAK = 1 << 1;
const MEMORY = 1 << 2;
const THREAD = 1 << 3;
+ const HWADDRESS = 1 << 4;
}
}
@@ -54,6 +57,7 @@
SanitizerSet::LEAK => "leak",
SanitizerSet::MEMORY => "memory",
SanitizerSet::THREAD => "thread",
+ SanitizerSet::HWADDRESS => "hwaddress",
_ => panic!("unrecognized sanitizer {:?}", s),
};
if !first {
@@ -71,12 +75,18 @@
type IntoIter = std::vec::IntoIter<SanitizerSet>;
fn into_iter(self) -> Self::IntoIter {
- [SanitizerSet::ADDRESS, SanitizerSet::LEAK, SanitizerSet::MEMORY, SanitizerSet::THREAD]
- .iter()
- .copied()
- .filter(|&s| self.contains(s))
- .collect::<Vec<_>>()
- .into_iter()
+ [
+ SanitizerSet::ADDRESS,
+ SanitizerSet::LEAK,
+ SanitizerSet::MEMORY,
+ SanitizerSet::THREAD,
+ SanitizerSet::HWADDRESS,
+ ]
+ .iter()
+ .copied()
+ .filter(|&s| self.contains(s))
+ .collect::<Vec<_>>()
+ .into_iter()
}
}
@@ -408,6 +418,9 @@
#[derive(Clone)]
pub struct Externs(BTreeMap<String, ExternEntry>);
+#[derive(Clone)]
+pub struct ExternDepSpecs(BTreeMap<String, ExternDepSpec>);
+
#[derive(Clone, Debug)]
pub struct ExternEntry {
pub location: ExternLocation,
@@ -439,6 +452,27 @@
ExactPaths(BTreeSet<CanonicalizedPath>),
}
+/// Supplied source location of a dependency - for example in a build specification
+/// file like Cargo.toml. We support several syntaxes: if it makes sense to reference
+/// a file and line, then the build system can specify that. On the other hand, it may
+/// make more sense to have an arbitrary raw string.
+#[derive(Clone, PartialEq)]
+pub enum ExternDepSpec {
+ /// Raw string
+ Raw(String),
+ /// Raw data in json format
+ Json(json::Json),
+}
+
+impl<'a> From<&'a ExternDepSpec> for rustc_lint_defs::ExternDepSpec {
+ fn from(from: &'a ExternDepSpec) -> Self {
+ match from {
+ ExternDepSpec::Raw(s) => rustc_lint_defs::ExternDepSpec::Raw(s.clone()),
+ ExternDepSpec::Json(json) => rustc_lint_defs::ExternDepSpec::Json(json.clone()),
+ }
+ }
+}
+
impl Externs {
pub fn new(data: BTreeMap<String, ExternEntry>) -> Externs {
Externs(data)
@@ -466,6 +500,25 @@
}
}
+impl ExternDepSpecs {
+ pub fn new(data: BTreeMap<String, ExternDepSpec>) -> ExternDepSpecs {
+ ExternDepSpecs(data)
+ }
+
+ pub fn get(&self, key: &str) -> Option<&ExternDepSpec> {
+ self.0.get(key)
+ }
+}
+
+impl fmt::Display for ExternDepSpec {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ ExternDepSpec::Raw(raw) => fmt.write_str(raw),
+ ExternDepSpec::Json(json) => json::as_json(json).fmt(fmt),
+ }
+ }
+}
+
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum PrintRequest {
FileNames,
@@ -614,17 +667,6 @@
path
}
- /// Returns the name of the Split DWARF file - this can differ depending on which Split DWARF
- /// mode is being used, which is the logic that this function is intended to encapsulate.
- pub fn split_dwarf_filename(
- &self,
- split_debuginfo_kind: SplitDebuginfo,
- cgu_name: Option<&str>,
- ) -> Option<PathBuf> {
- self.split_dwarf_path(split_debuginfo_kind, cgu_name)
- .map(|path| path.strip_prefix(&self.out_directory).unwrap_or(&path).to_path_buf())
- }
-
/// Returns the path for the Split DWARF file - this can differ depending on which Split DWARF
/// mode is being used, which is the logic that this function is intended to encapsulate.
pub fn split_dwarf_path(
@@ -679,6 +721,7 @@
cg: basic_codegen_options(),
error_format: ErrorOutputType::default(),
externs: Externs(BTreeMap::new()),
+ extern_dep_specs: ExternDepSpecs(BTreeMap::new()),
crate_name: None,
alt_std_name: None,
libs: Vec::new(),
@@ -1105,6 +1148,12 @@
"Specify where an external rust library is located",
"NAME[=PATH]",
),
+ opt::multi_s(
+ "",
+ "extern-location",
+ "Location where an external crate dependency is specified",
+ "NAME=LOCATION",
+ ),
opt::opt_s("", "sysroot", "Override the system root", "PATH"),
opt::multi("Z", "", "Set internal debugging options", "FLAG"),
opt::opt_s(
@@ -1487,7 +1536,7 @@
early_error(error_format, &format!("target file {:?} does not exist", path))
})
}
- Some(target) => TargetTriple::from_alias(target),
+ Some(target) => TargetTriple::TargetTriple(target),
_ => TargetTriple::from_triple(host_triple()),
}
}
@@ -1727,6 +1776,68 @@
Externs(externs)
}
+fn parse_extern_dep_specs(
+ matches: &getopts::Matches,
+ debugging_opts: &DebuggingOptions,
+ error_format: ErrorOutputType,
+) -> ExternDepSpecs {
+ let is_unstable_enabled = debugging_opts.unstable_options;
+ let mut map = BTreeMap::new();
+
+ for arg in matches.opt_strs("extern-location") {
+ if !is_unstable_enabled {
+ early_error(
+ error_format,
+ "`--extern-location` option is unstable: set `-Z unstable-options`",
+ );
+ }
+
+ let mut parts = arg.splitn(2, '=');
+ let name = parts.next().unwrap_or_else(|| {
+ early_error(error_format, "`--extern-location` value must not be empty")
+ });
+ let loc = parts.next().unwrap_or_else(|| {
+ early_error(
+ error_format,
+ &format!("`--extern-location`: specify location for extern crate `{}`", name),
+ )
+ });
+
+ let locparts: Vec<_> = loc.split(":").collect();
+ let spec = match &locparts[..] {
+ ["raw", ..] => {
+ // Don't want `:` split string
+ let raw = loc.splitn(2, ':').nth(1).unwrap_or_else(|| {
+ early_error(error_format, "`--extern-location`: missing `raw` location")
+ });
+ ExternDepSpec::Raw(raw.to_string())
+ }
+ ["json", ..] => {
+ // Don't want `:` split string
+ let raw = loc.splitn(2, ':').nth(1).unwrap_or_else(|| {
+ early_error(error_format, "`--extern-location`: missing `json` location")
+ });
+ let json = json::from_str(raw).unwrap_or_else(|_| {
+ early_error(
+ error_format,
+ &format!("`--extern-location`: malformed json location `{}`", raw),
+ )
+ });
+ ExternDepSpec::Json(json)
+ }
+ [bad, ..] => early_error(
+ error_format,
+ &format!("unknown location type `{}`: use `raw` or `json`", bad),
+ ),
+ [] => early_error(error_format, "missing location specification"),
+ };
+
+ map.insert(name.to_string(), spec);
+ }
+
+ ExternDepSpecs::new(map)
+}
+
fn parse_remap_path_prefix(
matches: &getopts::Matches,
error_format: ErrorOutputType,
@@ -1774,7 +1885,12 @@
check_thread_count(&debugging_opts, error_format);
- let incremental = cg.incremental.as_ref().map(PathBuf::from);
+ let incremental =
+ if std::env::var_os("RUSTC_FORCE_INCREMENTAL").map(|v| v == "1").unwrap_or(false) {
+ cg.incremental.as_ref().map(PathBuf::from)
+ } else {
+ None
+ };
if debugging_opts.profile && incremental.is_some() {
early_error(
@@ -1826,23 +1942,6 @@
}
Some(SymbolManglingVersion::V0) => {}
}
-
- if debugging_opts.mir_opt_level > 1 {
- // Functions inlined during MIR transform can, at best, make it impossible to
- // effectively cover inlined functions, and, at worst, break coverage map generation
- // during LLVM codegen. For example, function counter IDs are only unique within a
- // function. Inlining after these counters are injected can produce duplicate counters,
- // resulting in an invalid coverage map (and ICE); so this option combination is not
- // allowed.
- early_warn(
- error_format,
- &format!(
- "`-Z mir-opt-level={}` (or any level > 1) enables function inlining, which \
- is incompatible with `-Z instrument-coverage`. Inlining will be disabled.",
- debugging_opts.mir_opt_level,
- ),
- );
- }
}
if let Ok(graphviz_font) = std::env::var("RUSTC_GRAPHVIZ_FONT") {
@@ -1888,6 +1987,7 @@
}
let externs = parse_externs(matches, &debugging_opts, error_format);
+ let extern_dep_specs = parse_extern_dep_specs(matches, &debugging_opts, error_format);
let crate_name = matches.opt_str("crate-name");
@@ -1924,6 +2024,7 @@
error_format,
externs,
unstable_features: UnstableFeatures::from_environment(crate_name.as_deref()),
+ extern_dep_specs,
crate_name,
alt_std_name: None,
libs,
@@ -1944,40 +2045,24 @@
debugging_opts: &DebuggingOptions,
efmt: ErrorOutputType,
) -> Option<PpMode> {
- let pretty = if debugging_opts.unstable_options {
- matches.opt_default("pretty", "normal").map(|a| {
- // stable pretty-print variants only
- parse_pretty_inner(efmt, &a, false)
- })
- } else {
- None
- };
-
- return if pretty.is_none() {
- debugging_opts.unpretty.as_ref().map(|a| {
- // extended with unstable pretty-print variants
- parse_pretty_inner(efmt, &a, true)
- })
- } else {
- pretty
- };
-
fn parse_pretty_inner(efmt: ErrorOutputType, name: &str, extended: bool) -> PpMode {
use PpMode::*;
- use PpSourceMode::*;
let first = match (name, extended) {
- ("normal", _) => PpmSource(PpmNormal),
- ("identified", _) => PpmSource(PpmIdentified),
- ("everybody_loops", true) => PpmSource(PpmEveryBodyLoops),
- ("expanded", _) => PpmSource(PpmExpanded),
- ("expanded,identified", _) => PpmSource(PpmExpandedIdentified),
- ("expanded,hygiene", _) => PpmSource(PpmExpandedHygiene),
- ("hir", true) => PpmHir(PpmNormal),
- ("hir,identified", true) => PpmHir(PpmIdentified),
- ("hir,typed", true) => PpmHir(PpmTyped),
- ("hir-tree", true) => PpmHirTree(PpmNormal),
- ("mir", true) => PpmMir,
- ("mir-cfg", true) => PpmMirCFG,
+ ("normal", _) => Source(PpSourceMode::Normal),
+ ("identified", _) => Source(PpSourceMode::Identified),
+ ("everybody_loops", true) => Source(PpSourceMode::EveryBodyLoops),
+ ("expanded", _) => Source(PpSourceMode::Expanded),
+ ("expanded,identified", _) => Source(PpSourceMode::ExpandedIdentified),
+ ("expanded,hygiene", _) => Source(PpSourceMode::ExpandedHygiene),
+ ("ast-tree", true) => AstTree(PpAstTreeMode::Normal),
+ ("ast-tree,expanded", true) => AstTree(PpAstTreeMode::Expanded),
+ ("hir", true) => Hir(PpHirMode::Normal),
+ ("hir,identified", true) => Hir(PpHirMode::Identified),
+ ("hir,typed", true) => Hir(PpHirMode::Typed),
+ ("hir-tree", true) => HirTree,
+ ("thir-tree", true) => ThirTree,
+ ("mir", true) => Mir,
+ ("mir-cfg", true) => MirCFG,
_ => {
if extended {
early_error(
@@ -1986,8 +2071,8 @@
"argument to `unpretty` must be one of `normal`, \
`expanded`, `identified`, `expanded,identified`, \
`expanded,hygiene`, `everybody_loops`, \
- `hir`, `hir,identified`, `hir,typed`, `hir-tree`, \
- `mir` or `mir-cfg`; got {}",
+ `ast-tree`, `ast-tree,expanded`, `hir`, `hir,identified`, \
+ `hir,typed`, `hir-tree`, `mir` or `mir-cfg`; got {}",
name
),
);
@@ -2006,6 +2091,18 @@
tracing::debug!("got unpretty option: {:?}", first);
first
}
+
+ if debugging_opts.unstable_options {
+ if let Some(a) = matches.opt_default("pretty", "normal") {
+ // stable pretty-print variants only
+ return Some(parse_pretty_inner(efmt, &a, false));
+ }
+ }
+
+ debugging_opts.unpretty.as_ref().map(|a| {
+ // extended with unstable pretty-print variants
+ parse_pretty_inner(efmt, &a, true)
+ })
}
pub fn make_crate_type_option() -> RustcOptGroup {
@@ -2113,22 +2210,54 @@
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum PpSourceMode {
- PpmNormal,
- PpmEveryBodyLoops,
- PpmExpanded,
- PpmIdentified,
- PpmExpandedIdentified,
- PpmExpandedHygiene,
- PpmTyped,
+ /// `--pretty=normal`
+ Normal,
+ /// `-Zunpretty=everybody_loops`
+ EveryBodyLoops,
+ /// `--pretty=expanded`
+ Expanded,
+ /// `--pretty=identified`
+ Identified,
+ /// `--pretty=expanded,identified`
+ ExpandedIdentified,
+ /// `--pretty=expanded,hygiene`
+ ExpandedHygiene,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum PpAstTreeMode {
+ /// `-Zunpretty=ast`
+ Normal,
+ /// `-Zunpretty=ast,expanded`
+ Expanded,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum PpHirMode {
+ /// `-Zunpretty=hir`
+ Normal,
+ /// `-Zunpretty=hir,identified`
+ Identified,
+ /// `-Zunpretty=hir,typed`
+ Typed,
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum PpMode {
- PpmSource(PpSourceMode),
- PpmHir(PpSourceMode),
- PpmHirTree(PpSourceMode),
- PpmMir,
- PpmMirCFG,
+ /// Options that print the source code, i.e.
+ /// `--pretty` and `-Zunpretty=everybody_loops`
+ Source(PpSourceMode),
+ AstTree(PpAstTreeMode),
+ /// Options that print the HIR, i.e. `-Zunpretty=hir`
+ Hir(PpHirMode),
+ /// `-Zunpretty=hir-tree`
+ HirTree,
+ /// `-Zunpretty=thir-tree`
+ ThirTree,
+ /// `-Zunpretty=mir`
+ Mir,
+ /// `-Zunpretty=mir-cfg`
+ MirCFG,
}
impl PpMode {
@@ -2136,22 +2265,21 @@
use PpMode::*;
use PpSourceMode::*;
match *self {
- PpmSource(PpmNormal | PpmIdentified) => false,
+ Source(Normal | Identified) | AstTree(PpAstTreeMode::Normal) => false,
- PpmSource(
- PpmExpanded | PpmEveryBodyLoops | PpmExpandedIdentified | PpmExpandedHygiene,
- )
- | PpmHir(_)
- | PpmHirTree(_)
- | PpmMir
- | PpmMirCFG => true,
- PpmSource(PpmTyped) => panic!("invalid state"),
+ Source(Expanded | EveryBodyLoops | ExpandedIdentified | ExpandedHygiene)
+ | AstTree(PpAstTreeMode::Expanded)
+ | Hir(_)
+ | HirTree
+ | ThirTree
+ | Mir
+ | MirCFG => true,
}
}
pub fn needs_analysis(&self) -> bool {
use PpMode::*;
- matches!(*self, PpmMir | PpmMirCFG)
+ matches!(*self, Mir | MirCFG)
}
}
@@ -2189,6 +2317,7 @@
use std::collections::hash_map::DefaultHasher;
use std::collections::BTreeMap;
use std::hash::Hash;
+ use std::num::NonZeroUsize;
use std::path::PathBuf;
pub trait DepTrackingHash {
@@ -2229,6 +2358,7 @@
impl_dep_tracking_hash_via_hash!(lint::Level);
impl_dep_tracking_hash_via_hash!(Option<bool>);
impl_dep_tracking_hash_via_hash!(Option<usize>);
+ impl_dep_tracking_hash_via_hash!(Option<NonZeroUsize>);
impl_dep_tracking_hash_via_hash!(Option<String>);
impl_dep_tracking_hash_via_hash!(Option<(String, u64)>);
impl_dep_tracking_hash_via_hash!(Option<Vec<String>>);
diff --git a/compiler/rustc_session/src/filesearch.rs b/compiler/rustc_session/src/filesearch.rs
index 47f14fa..2df3266 100644
--- a/compiler/rustc_session/src/filesearch.rs
+++ b/compiler/rustc_session/src/filesearch.rs
@@ -169,7 +169,7 @@
// Check if sysroot is found using env::args().next(), and if is not found,
// use env::current_exe() to imply sysroot.
- from_env_args_next().unwrap_or(from_current_exe())
+ from_env_args_next().unwrap_or_else(from_current_exe)
}
// The name of the directory rustc expects libraries to be located.
diff --git a/compiler/rustc_session/src/lib.rs b/compiler/rustc_session/src/lib.rs
index 36bf863..7eaeae5 100644
--- a/compiler/rustc_session/src/lib.rs
+++ b/compiler/rustc_session/src/lib.rs
@@ -1,7 +1,7 @@
#![feature(crate_visibility_modifier)]
#![feature(once_cell)]
#![feature(or_patterns)]
-#![feature(str_split_once)]
+#![recursion_limit = "256"]
#[macro_use]
extern crate bitflags;
diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs
index 779e042..d9e5a18 100644
--- a/compiler/rustc_session/src/options.rs
+++ b/compiler/rustc_session/src/options.rs
@@ -16,6 +16,7 @@
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
+use std::num::NonZeroUsize;
use std::path::PathBuf;
use std::str;
@@ -112,6 +113,7 @@
borrowck_mode: BorrowckMode [UNTRACKED],
cg: CodegenOptions [TRACKED],
externs: Externs [UNTRACKED],
+ extern_dep_specs: ExternDepSpecs [UNTRACKED],
crate_name: Option<String> [TRACKED],
// An optional name to use as the crate for std during std injection,
// written `extern crate name as std`. Defaults to `std`. Used by
@@ -252,7 +254,7 @@
pub const parse_passes: &str = "a space-separated list of passes, or `all`";
pub const parse_panic_strategy: &str = "either `unwind` or `abort`";
pub const parse_relro_level: &str = "one of: `full`, `partial`, or `off`";
- pub const parse_sanitizers: &str = "comma separated list of sanitizers: `address`, `leak`, `memory` or `thread`";
+ pub const parse_sanitizers: &str = "comma separated list of sanitizers: `address`, `hwaddress`, `leak`, `memory` or `thread`";
pub const parse_sanitizer_memory_track_origins: &str = "0, 1, or 2";
pub const parse_cfguard: &str =
"either a boolean (`yes`, `no`, `on`, `off`, etc), `checks`, or `nochecks`";
@@ -475,6 +477,7 @@
"leak" => SanitizerSet::LEAK,
"memory" => SanitizerSet::MEMORY,
"thread" => SanitizerSet::THREAD,
+ "hwaddress" => SanitizerSet::HWADDRESS,
_ => return false,
}
}
@@ -589,10 +592,10 @@
true
}
- fn parse_treat_err_as_bug(slot: &mut Option<usize>, v: Option<&str>) -> bool {
+ fn parse_treat_err_as_bug(slot: &mut Option<NonZeroUsize>, v: Option<&str>) -> bool {
match v {
- Some(s) => { *slot = s.parse().ok().filter(|&x| x != 0); slot.unwrap_or(0) != 0 }
- None => { *slot = Some(1); true }
+ Some(s) => { *slot = s.parse().ok(); slot.is_some() }
+ None => { *slot = NonZeroUsize::new(1); true }
}
}
@@ -954,18 +957,16 @@
(default: no)"),
incremental_verify_ich: bool = (false, parse_bool, [UNTRACKED],
"verify incr. comp. hashes of green query instances (default: no)"),
- inline_mir_threshold: usize = (50, parse_uint, [TRACKED],
+ inline_mir: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "enable MIR inlining (default: no)"),
+ inline_mir_threshold: Option<usize> = (None, parse_opt_uint, [TRACKED],
"a default MIR inlining threshold (default: 50)"),
- inline_mir_hint_threshold: usize = (100, parse_uint, [TRACKED],
+ inline_mir_hint_threshold: Option<usize> = (None, parse_opt_uint, [TRACKED],
"inlining threshold for functions with inline hint (default: 100)"),
inline_in_all_cgus: Option<bool> = (None, parse_opt_bool, [TRACKED],
"control whether `#[inline]` functions are in all CGUs"),
input_stats: bool = (false, parse_bool, [UNTRACKED],
"gather statistics about the input (default: no)"),
- insert_sideeffect: bool = (false, parse_bool, [TRACKED],
- "fix undefined behavior when a thread doesn't eventually make progress \
- (such as entering an empty infinite loop) by inserting llvm.sideeffect \
- (default: no)"),
instrument_coverage: bool = (false, parse_bool, [TRACKED],
"instrument the generated code to support LLVM source-based code coverage \
reports (note, the compiler build config must include `profiler = true`, \
@@ -994,8 +995,8 @@
mir_emit_retag: bool = (false, parse_bool, [TRACKED],
"emit Retagging MIR statements, interpreted e.g., by miri; implies -Zmir-opt-level=0 \
(default: no)"),
- mir_opt_level: usize = (1, parse_uint, [TRACKED],
- "MIR optimization level (0-3; default: 1)"),
+ mir_opt_level: Option<usize> = (None, parse_opt_uint, [TRACKED],
+ "MIR optimization level (0-4; default: 1 in non optimized builds and 2 in optimized builds)"),
mutable_noalias: bool = (false, parse_bool, [TRACKED],
"emit noalias metadata for mutable references (default: no)"),
new_llvm_pass_manager: bool = (false, parse_bool, [TRACKED],
@@ -1139,7 +1140,7 @@
"for every macro invocation, print its name and arguments (default: no)"),
trap_unreachable: Option<bool> = (None, parse_opt_bool, [TRACKED],
"generate trap instructions for unreachable intrinsics (default: use target setting, usually yes)"),
- treat_err_as_bug: Option<usize> = (None, parse_treat_err_as_bug, [TRACKED],
+ treat_err_as_bug: Option<NonZeroUsize> = (None, parse_treat_err_as_bug, [TRACKED],
"treat error number `val` that occurs as bug"),
trim_diagnostic_paths: bool = (true, parse_bool, [UNTRACKED],
"in diagnostics, use heuristics to shorten paths referring to items"),
@@ -1153,6 +1154,8 @@
`expanded`, `expanded,identified`,
`expanded,hygiene` (with internal representations),
`everybody_loops` (all function bodies replaced with `loop {}`),
+ `ast-tree` (raw AST before expansion),
+ `ast-tree,expanded` (raw AST after expansion),
`hir` (the HIR), `hir,identified`,
`hir,typed` (HIR with types for each node),
`hir-tree` (dump the raw HIR),
diff --git a/compiler/rustc_session/src/parse.rs b/compiler/rustc_session/src/parse.rs
index 81b3834..592773b 100644
--- a/compiler/rustc_session/src/parse.rs
+++ b/compiler/rustc_session/src/parse.rs
@@ -13,7 +13,6 @@
use rustc_span::source_map::{FilePathMapping, SourceMap};
use rustc_span::{MultiSpan, Span, Symbol};
-use std::path::PathBuf;
use std::str;
/// The set of keys (and, optionally, values) that define the compilation
@@ -122,8 +121,6 @@
pub missing_fragment_specifiers: Lock<FxHashMap<Span, NodeId>>,
/// Places where raw identifiers were used. This is used for feature-gating raw identifiers.
pub raw_identifier_spans: Lock<Vec<Span>>,
- /// Used to determine and report recursive module inclusions.
- pub included_mod_stack: Lock<Vec<PathBuf>>,
source_map: Lrc<SourceMap>,
pub buffered_lints: Lock<Vec<BufferedEarlyLint>>,
/// Contains the spans of block expressions that could have been incomplete based on the
@@ -157,7 +154,6 @@
edition: ExpnId::root().expn_data().edition,
missing_fragment_specifiers: Default::default(),
raw_identifier_spans: Lock::new(Vec::new()),
- included_mod_stack: Lock::new(vec![]),
source_map,
buffered_lints: Lock::new(vec![]),
ambiguous_block_expr_parse: Lock::new(FxHashMap::default()),
diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs
index 69aa72d..fc57b6b 100644
--- a/compiler/rustc_session/src/session.rs
+++ b/compiler/rustc_session/src/session.rs
@@ -8,7 +8,6 @@
use crate::search_paths::{PathKind, SearchPath};
pub use rustc_ast::attr::MarkedAttrs;
-pub use rustc_ast::crate_disambiguator::CrateDisambiguator;
pub use rustc_ast::Attribute;
use rustc_data_structures::flock;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
@@ -23,6 +22,7 @@
use rustc_errors::registry::Registry;
use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, DiagnosticId, ErrorReported};
use rustc_lint_defs::FutureBreakage;
+pub use rustc_span::crate_disambiguator::CrateDisambiguator;
use rustc_span::edition::Edition;
use rustc_span::source_map::{FileLoader, MultiSpan, RealFileLoader, SourceMap, Span};
use rustc_span::{sym, SourceFileHashAlgorithm, Symbol};
@@ -77,6 +77,7 @@
/// Check that `value` is within the limit. Ensures that the same comparisons are used
/// throughout the compiler, as mismatches can cause ICEs, see #72540.
+ #[inline]
pub fn value_within_limit(&self, value: usize) -> bool {
value <= self.0
}
@@ -347,10 +348,12 @@
self.crate_types.set(crate_types).expect("`crate_types` was initialized twice")
}
+ #[inline]
pub fn recursion_limit(&self) -> Limit {
self.recursion_limit.get().copied().unwrap()
}
+ #[inline]
pub fn type_length_limit(&self) -> Limit {
self.type_length_limit.get().copied().unwrap()
}
@@ -637,6 +640,12 @@
pub fn binary_dep_depinfo(&self) -> bool {
self.opts.debugging_opts.binary_dep_depinfo
}
+ pub fn mir_opt_level(&self) -> usize {
+ self.opts
+ .debugging_opts
+ .mir_opt_level
+ .unwrap_or_else(|| if self.opts.optimize != config::OptLevel::No { 2 } else { 1 })
+ }
/// Gets the features enabled for the current compilation session.
/// DO NOT USE THIS METHOD if there is a TyCtxt available, as it circumvents
@@ -784,6 +793,13 @@
}
}
+ pub fn inline_asm_dialect(&self) -> rustc_ast::LlvmAsmDialect {
+ match self.asm_arch {
+ Some(InlineAsmArch::X86 | InlineAsmArch::X86_64) => rustc_ast::LlvmAsmDialect::Intel,
+ _ => rustc_ast::LlvmAsmDialect::Att,
+ }
+ }
+
pub fn relocation_model(&self) -> RelocModel {
self.opts.cg.relocation_model.unwrap_or(self.target.relocation_model)
}
@@ -959,19 +975,19 @@
}
pub fn print_perf_stats(&self) {
- println!(
+ eprintln!(
"Total time spent computing symbol hashes: {}",
duration_to_secs_str(*self.perf_stats.symbol_hash_time.lock())
);
- println!(
+ eprintln!(
"Total queries canonicalized: {}",
self.perf_stats.queries_canonicalized.load(Ordering::Relaxed)
);
- println!(
+ eprintln!(
"normalize_generic_arg_after_erasing_regions: {}",
self.perf_stats.normalize_generic_arg_after_erasing_regions.load(Ordering::Relaxed)
);
- println!(
+ eprintln!(
"normalize_projection_ty: {}",
self.perf_stats.normalize_projection_ty.load(Ordering::Relaxed)
);
@@ -1126,7 +1142,8 @@
self.opts.optimize != config::OptLevel::No
// AddressSanitizer uses lifetimes to detect use after scope bugs.
// MemorySanitizer uses lifetimes to detect use of uninitialized stack variables.
- || self.opts.debugging_opts.sanitizer.intersects(SanitizerSet::ADDRESS | SanitizerSet::MEMORY)
+ // HWAddressSanitizer will use lifetimes to detect use after scope bugs in the future.
+ || self.opts.debugging_opts.sanitizer.intersects(SanitizerSet::ADDRESS | SanitizerSet::MEMORY | SanitizerSet::HWADDRESS)
}
pub fn link_dead_code(&self) -> bool {
@@ -1562,6 +1579,8 @@
"x86_64-unknown-freebsd",
"x86_64-unknown-linux-gnu",
];
+ const HWASAN_SUPPORTED_TARGETS: &[&str] =
+ &["aarch64-linux-android", "aarch64-unknown-linux-gnu"];
// Sanitizers can only be used on some tested platforms.
for s in sess.opts.debugging_opts.sanitizer {
@@ -1570,6 +1589,7 @@
SanitizerSet::LEAK => LSAN_SUPPORTED_TARGETS,
SanitizerSet::MEMORY => MSAN_SUPPORTED_TARGETS,
SanitizerSet::THREAD => TSAN_SUPPORTED_TARGETS,
+ SanitizerSet::HWADDRESS => HWASAN_SUPPORTED_TARGETS,
_ => panic!("unrecognized sanitizer {}", s),
};
if !supported_targets.contains(&&*sess.opts.target_triple.triple()) {
diff --git a/compiler/rustc_ast/src/crate_disambiguator.rs b/compiler/rustc_span/src/crate_disambiguator.rs
similarity index 100%
rename from compiler/rustc_ast/src/crate_disambiguator.rs
rename to compiler/rustc_span/src/crate_disambiguator.rs
diff --git a/compiler/rustc_span/src/def_id.rs b/compiler/rustc_span/src/def_id.rs
index b24ede9..885f30e 100644
--- a/compiler/rustc_span/src/def_id.rs
+++ b/compiler/rustc_span/src/def_id.rs
@@ -1,3 +1,4 @@
+use crate::crate_disambiguator::CrateDisambiguator;
use crate::HashStableContext;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
@@ -105,10 +106,72 @@
}
}
+/// A `DefPathHash` is a fixed-size representation of a `DefPath` that is
+/// stable across crate and compilation session boundaries. It consists of two
+/// separate 64-bit hashes. The first uniquely identifies the crate this
+/// `DefPathHash` originates from (see [StableCrateId]), and the second
+/// uniquely identifies the corresponding `DefPath` within that crate. Together
+/// they form a unique identifier within an entire crate graph.
+///
+/// There is a very small chance of hash collisions, which would mean that two
+/// different `DefPath`s map to the same `DefPathHash`. Proceeding compilation
+/// with such a hash collision would very probably lead to an ICE, and in the
+/// worst case lead to a silent mis-compilation. The compiler therefore actively
+/// and exhaustively checks for such hash collisions and aborts compilation if
+/// it finds one.
+///
+/// `DefPathHash` uses 64-bit hashes for both the crate-id part and the
+/// crate-internal part, even though it is likely that there are many more
+/// `LocalDefId`s in a single crate than there are individual crates in a crate
+/// graph. Since we use the same number of bits in both cases, the collision
+/// probability for the crate-local part will be quite a bit higher (though
+/// still very small).
+///
+/// This imbalance is not by accident: A hash collision in the
+/// crate-local part of a `DefPathHash` will be detected and reported while
+/// compiling the crate in question. Such a collision does not depend on
+/// outside factors and can be easily fixed by the crate maintainer (e.g. by
+/// renaming the item in question or by bumping the crate version in a harmless
+/// way).
+///
+/// A collision between crate-id hashes on the other hand is harder to fix
+/// because it depends on the set of crates in the entire crate graph of a
+/// compilation session. Again, using the same crate with a different version
+/// number would fix the issue with a high probability -- but that might be
+/// easier said then done if the crates in questions are dependencies of
+/// third-party crates.
+///
+/// That being said, given a high quality hash function, the collision
+/// probabilities in question are very small. For example, for a big crate like
+/// `rustc_middle` (with ~50000 `LocalDefId`s as of the time of writing) there
+/// is a probability of roughly 1 in 14,750,000,000 of a crate-internal
+/// collision occurring. For a big crate graph with 1000 crates in it, there is
+/// a probability of 1 in 36,890,000,000,000 of a `StableCrateId` collision.
#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Debug)]
#[derive(HashStable_Generic, Encodable, Decodable)]
pub struct DefPathHash(pub Fingerprint);
+impl DefPathHash {
+ /// Returns the [StableCrateId] identifying the crate this [DefPathHash]
+ /// originates from.
+ #[inline]
+ pub fn stable_crate_id(&self) -> StableCrateId {
+ StableCrateId(self.0.as_value().0)
+ }
+
+ /// Returns the crate-local part of the [DefPathHash].
+ #[inline]
+ pub fn local_hash(&self) -> u64 {
+ self.0.as_value().1
+ }
+
+ /// Builds a new [DefPathHash] with the given [StableCrateId] and
+ /// `local_hash`, where `local_hash` must be unique within its crate.
+ pub fn new(stable_crate_id: StableCrateId, local_hash: u64) -> DefPathHash {
+ DefPathHash(Fingerprint::new(stable_crate_id.0, local_hash))
+ }
+}
+
impl Borrow<Fingerprint> for DefPathHash {
#[inline]
fn borrow(&self) -> &Fingerprint {
@@ -116,6 +179,30 @@
}
}
+/// A [StableCrateId] is a 64 bit hash of `(crate-name, crate-disambiguator)`. It
+/// is to [CrateNum] what [DefPathHash] is to [DefId]. It is stable across
+/// compilation sessions.
+///
+/// Since the ID is a hash value there is a (very small) chance that two crates
+/// end up with the same [StableCrateId]. The compiler will check for such
+/// collisions when loading crates and abort compilation in order to avoid
+/// further trouble.
+#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Debug, Encodable, Decodable)]
+pub struct StableCrateId(u64);
+
+impl StableCrateId {
+ /// Computes the stable ID for a crate with the given name and
+ /// disambiguator.
+ pub fn new(crate_name: &str, crate_disambiguator: CrateDisambiguator) -> StableCrateId {
+ use std::hash::Hash;
+
+ let mut hasher = StableHasher::new();
+ crate_name.hash(&mut hasher);
+ crate_disambiguator.hash(&mut hasher);
+ StableCrateId(hasher.finish())
+ }
+}
+
rustc_index::newtype_index! {
/// A DefIndex is an index into the hir-map for a crate, identifying a
/// particular definition. It should really be considered an interned
@@ -227,6 +314,8 @@
pub local_def_index: DefIndex,
}
+pub const CRATE_DEF_ID: LocalDefId = LocalDefId { local_def_index: CRATE_DEF_INDEX };
+
impl Idx for LocalDefId {
#[inline]
fn new(idx: usize) -> Self {
@@ -268,6 +357,8 @@
}
}
+rustc_data_structures::define_id_collections!(LocalDefIdMap, LocalDefIdSet, LocalDefId);
+
impl<CTX: HashStableContext> HashStable<CTX> for DefId {
fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
hcx.hash_def_id(*self, hasher)
diff --git a/compiler/rustc_span/src/edition.rs b/compiler/rustc_span/src/edition.rs
index a9200dd..8544acd 100644
--- a/compiler/rustc_span/src/edition.rs
+++ b/compiler/rustc_span/src/edition.rs
@@ -20,7 +20,7 @@
Edition2015,
/// The 2018 edition
Edition2018,
- /// The 2021 ediiton
+ /// The 2021 edition
Edition2021,
}
diff --git a/compiler/rustc_span/src/hygiene.rs b/compiler/rustc_span/src/hygiene.rs
index 9f265f3..eb5b7c4 100644
--- a/compiler/rustc_span/src/hygiene.rs
+++ b/compiler/rustc_span/src/hygiene.rs
@@ -118,7 +118,8 @@
HygieneData::with(|data| {
let old_expn_data = &mut data.expn_data[self.0 as usize];
assert!(old_expn_data.is_none(), "expansion data is reset for an expansion ID");
- expn_data.orig_id.replace(self.as_u32()).expect_none("orig_id should be None");
+ assert_eq!(expn_data.orig_id, None);
+ expn_data.orig_id = Some(self.as_u32());
*old_expn_data = Some(expn_data);
});
update_disambiguator(self)
@@ -202,7 +203,8 @@
fn fresh_expn(&mut self, mut expn_data: Option<ExpnData>) -> ExpnId {
let raw_id = self.expn_data.len() as u32;
if let Some(data) = expn_data.as_mut() {
- data.orig_id.replace(raw_id).expect_none("orig_id should be None");
+ assert_eq!(data.orig_id, None);
+ data.orig_id = Some(raw_id);
}
self.expn_data.push(expn_data);
ExpnId(raw_id)
@@ -1362,12 +1364,6 @@
fn hash_spans(&self) -> bool {
true
}
- fn byte_pos_to_line_and_col(
- &mut self,
- byte: BytePos,
- ) -> Option<(Lrc<SourceFile>, usize, BytePos)> {
- self.caching_source_map.byte_pos_to_line_and_col(byte)
- }
fn span_data_to_lines_and_cols(
&mut self,
span: &crate::SpanData,
@@ -1405,8 +1401,8 @@
});
if modified {
- info!("Set disambiguator for {:?} (hash {:?})", expn_id, first_hash);
- info!("expn_data = {:?}", expn_id.expn_data());
+ debug!("Set disambiguator for {:?} (hash {:?})", expn_id, first_hash);
+ debug!("expn_data = {:?}", expn_id.expn_data());
// Verify that the new disambiguator makes the hash unique
#[cfg(debug_assertions)]
@@ -1416,9 +1412,11 @@
let new_hash: Fingerprint = hasher.finish();
HygieneData::with(|data| {
- data.expn_data_disambiguators
- .get(&new_hash)
- .expect_none("Hash collision after disambiguator update!");
+ assert_eq!(
+ data.expn_data_disambiguators.get(&new_hash),
+ None,
+ "Hash collision after disambiguator update!",
+ );
});
};
}
diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs
index f3d876a..d279033 100644
--- a/compiler/rustc_span/src/lib.rs
+++ b/compiler/rustc_span/src/lib.rs
@@ -21,7 +21,6 @@
#![feature(negative_impls)]
#![feature(nll)]
#![feature(min_specialization)]
-#![feature(option_expect_none)]
#[macro_use]
extern crate rustc_macros;
@@ -47,6 +46,8 @@
mod span_encoding;
pub use span_encoding::{Span, DUMMY_SP};
+pub mod crate_disambiguator;
+
pub mod symbol;
pub use symbol::{sym, Symbol};
@@ -509,11 +510,10 @@
/// items can be used (that is, a macro marked with
/// `#[allow_internal_unstable]`).
pub fn allows_unstable(&self, feature: Symbol) -> bool {
- self.ctxt().outer_expn_data().allow_internal_unstable.map_or(false, |features| {
- features
- .iter()
- .any(|&f| f == feature || f == sym::allow_internal_unstable_backcompat_hack)
- })
+ self.ctxt()
+ .outer_expn_data()
+ .allow_internal_unstable
+ .map_or(false, |features| features.iter().any(|&f| f == feature))
}
/// Checks if this span arises from a compiler desugaring of kind `kind`.
@@ -1873,10 +1873,6 @@
fn expn_id_cache() -> &'static LocalKey<ExpnIdCache>;
fn hash_crate_num(&mut self, _: CrateNum, hasher: &mut StableHasher);
fn hash_spans(&self) -> bool;
- fn byte_pos_to_line_and_col(
- &mut self,
- byte: BytePos,
- ) -> Option<(Lrc<SourceFile>, usize, BytePos)>;
fn span_data_to_lines_and_cols(
&mut self,
span: &SpanData,
@@ -1905,9 +1901,10 @@
return;
}
+ self.ctxt().hash_stable(ctx, hasher);
+
if self.is_dummy() {
Hash::hash(&TAG_INVALID_SPAN, hasher);
- self.ctxt().hash_stable(ctx, hasher);
return;
}
@@ -1920,7 +1917,6 @@
Some(pos) => pos,
None => {
Hash::hash(&TAG_INVALID_SPAN, hasher);
- span.ctxt.hash_stable(ctx, hasher);
return;
}
};
@@ -1947,7 +1943,6 @@
let len = (span.hi - span.lo).0;
Hash::hash(&col_line, hasher);
Hash::hash(&len, hasher);
- span.ctxt.hash_stable(ctx, hasher);
}
}
@@ -2000,7 +1995,8 @@
if cache.len() < new_len {
cache.resize(new_len, None);
}
- cache[index].replace(sub_hash).expect_none("Cache slot was filled");
+ let prev = cache[index].replace(sub_hash);
+ assert_eq!(prev, None, "Cache slot was filled");
});
sub_hash.hash_stable(ctx, hasher);
}
diff --git a/compiler/rustc_span/src/source_map.rs b/compiler/rustc_span/src/source_map.rs
index 2b42937..f612d14 100644
--- a/compiler/rustc_span/src/source_map.rs
+++ b/compiler/rustc_span/src/source_map.rs
@@ -453,41 +453,6 @@
}
}
- /// Returns `Some(span)`, a union of the LHS and RHS span. The LHS must precede the RHS. If
- /// there are gaps between LHS and RHS, the resulting union will cross these gaps.
- /// For this to work,
- ///
- /// * the syntax contexts of both spans much match,
- /// * the LHS span needs to end on the same line the RHS span begins,
- /// * the LHS span must start at or before the RHS span.
- pub fn merge_spans(&self, sp_lhs: Span, sp_rhs: Span) -> Option<Span> {
- // Ensure we're at the same expansion ID.
- if sp_lhs.ctxt() != sp_rhs.ctxt() {
- return None;
- }
-
- let lhs_end = match self.lookup_line(sp_lhs.hi()) {
- Ok(x) => x,
- Err(_) => return None,
- };
- let rhs_begin = match self.lookup_line(sp_rhs.lo()) {
- Ok(x) => x,
- Err(_) => return None,
- };
-
- // If we must cross lines to merge, don't merge.
- if lhs_end.line != rhs_begin.line {
- return None;
- }
-
- // Ensure these follow the expected order and that we don't overlap.
- if (sp_lhs.lo() <= sp_rhs.lo()) && (sp_lhs.hi() <= sp_rhs.lo()) {
- Some(sp_lhs.to(sp_rhs))
- } else {
- None
- }
- }
-
pub fn span_to_string(&self, sp: Span) -> String {
if self.files.borrow().source_files.is_empty() && sp.is_dummy() {
return "no-location".to_string();
@@ -539,7 +504,7 @@
pub fn is_line_before_span_empty(&self, sp: Span) -> bool {
match self.span_to_prev_source(sp) {
- Ok(s) => s.split('\n').last().map_or(false, |l| l.trim_start().is_empty()),
+ Ok(s) => s.rsplit_once('\n').unwrap_or(("", &s)).1.trim_start().is_empty(),
Err(_) => false,
}
}
@@ -632,10 +597,11 @@
pub fn span_to_margin(&self, sp: Span) -> Option<usize> {
match self.span_to_prev_source(sp) {
Err(_) => None,
- Ok(source) => source
- .split('\n')
- .last()
- .map(|last_line| last_line.len() - last_line.trim_start().len()),
+ Ok(source) => {
+ let last_line = source.rsplit_once('\n').unwrap_or(("", &source)).1;
+
+ Some(last_line.len() - last_line.trim_start().len())
+ }
}
}
@@ -651,7 +617,7 @@
pub fn span_extend_to_prev_char(&self, sp: Span, c: char, accept_newlines: bool) -> Span {
if let Ok(prev_source) = self.span_to_prev_source(sp) {
let prev_source = prev_source.rsplit(c).next().unwrap_or("");
- if !prev_source.is_empty() && (!prev_source.contains('\n') || accept_newlines) {
+ if !prev_source.is_empty() && (accept_newlines || !prev_source.contains('\n')) {
return sp.with_lo(BytePos(sp.lo().0 - prev_source.len() as u32));
}
}
@@ -673,7 +639,7 @@
let prev_source = prev_source.rsplit(&pat).next().unwrap_or("").trim_start();
if prev_source.is_empty() && sp.lo().0 != 0 {
return sp.with_lo(BytePos(sp.lo().0 - 1));
- } else if !prev_source.contains('\n') || accept_newlines {
+ } else if accept_newlines || !prev_source.contains('\n') {
return sp.with_lo(BytePos(sp.lo().0 - prev_source.len() as u32));
}
}
@@ -693,7 +659,7 @@
pub fn span_extend_to_next_char(&self, sp: Span, c: char, accept_newlines: bool) -> Span {
if let Ok(next_source) = self.span_to_next_source(sp) {
let next_source = next_source.split(c).next().unwrap_or("");
- if !next_source.is_empty() && (!next_source.contains('\n') || accept_newlines) {
+ if !next_source.is_empty() && (accept_newlines || !next_source.contains('\n')) {
return sp.with_hi(BytePos(sp.hi().0 + next_source.len() as u32));
}
}
@@ -777,16 +743,35 @@
self.span_until_char(sp, '{')
}
- /// Returns a new span representing just the start point of this span.
+ /// Returns a new span representing just the first character of the given span.
pub fn start_point(&self, sp: Span) -> Span {
- let pos = sp.lo().0;
- let width = self.find_width_of_character_at_span(sp, false);
- let corrected_start_position = pos.checked_add(width).unwrap_or(pos);
- let end_point = BytePos(cmp::max(corrected_start_position, sp.lo().0));
- sp.with_hi(end_point)
+ let width = {
+ let sp = sp.data();
+ let local_begin = self.lookup_byte_offset(sp.lo);
+ let start_index = local_begin.pos.to_usize();
+ let src = local_begin.sf.external_src.borrow();
+
+ let snippet = if let Some(ref src) = local_begin.sf.src {
+ Some(&src[start_index..])
+ } else if let Some(src) = src.get_source() {
+ Some(&src[start_index..])
+ } else {
+ None
+ };
+
+ match snippet {
+ None => 1,
+ Some(snippet) => match snippet.chars().next() {
+ None => 1,
+ Some(c) => c.len_utf8(),
+ },
+ }
+ };
+
+ sp.with_hi(BytePos(sp.lo().0 + width as u32))
}
- /// Returns a new span representing just the end point of this span.
+ /// Returns a new span representing just the last character of this span.
pub fn end_point(&self, sp: Span) -> Span {
let pos = sp.hi().0;
@@ -815,7 +800,8 @@
Span::new(BytePos(start_of_next_point), end_of_next_point, sp.ctxt())
}
- /// Finds the width of a character, either before or after the provided span.
+ /// Finds the width of the character, either before or after the end of provided span,
+ /// depending on the `forwards` parameter.
fn find_width_of_character_at_span(&self, sp: Span, forwards: bool) -> u32 {
let sp = sp.data();
if sp.lo == sp.hi {
@@ -862,11 +848,9 @@
// We need to extend the snippet to the end of the src rather than to end_index so when
// searching forwards for boundaries we've got somewhere to search.
let snippet = if let Some(ref src) = local_begin.sf.src {
- let len = src.len();
- &src[start_index..len]
+ &src[start_index..]
} else if let Some(src) = src.get_source() {
- let len = src.len();
- &src[start_index..len]
+ &src[start_index..]
} else {
return 1;
};
@@ -912,13 +896,6 @@
SourceFileAndBytePos { sf, pos: offset }
}
- /// Converts an absolute `BytePos` to a `CharPos` relative to the `SourceFile`.
- pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
- let idx = self.lookup_source_file_idx(bpos);
- let sf = &(*self.files.borrow().source_files)[idx];
- sf.bytepos_to_file_charpos(bpos)
- }
-
// Returns the index of the `SourceFile` (in `self.files`) that contains `pos`.
// This index is guaranteed to be valid for the lifetime of this `SourceMap`,
// since `source_files` is a `MonotonicVec`
diff --git a/compiler/rustc_span/src/source_map/tests.rs b/compiler/rustc_span/src/source_map/tests.rs
index 3f22829..7d814f1 100644
--- a/compiler/rustc_span/src/source_map/tests.rs
+++ b/compiler/rustc_span/src/source_map/tests.rs
@@ -10,6 +10,50 @@
sm
}
+impl SourceMap {
+ /// Returns `Some(span)`, a union of the LHS and RHS span. The LHS must precede the RHS. If
+ /// there are gaps between LHS and RHS, the resulting union will cross these gaps.
+ /// For this to work,
+ ///
+ /// * the syntax contexts of both spans much match,
+ /// * the LHS span needs to end on the same line the RHS span begins,
+ /// * the LHS span must start at or before the RHS span.
+ fn merge_spans(&self, sp_lhs: Span, sp_rhs: Span) -> Option<Span> {
+ // Ensure we're at the same expansion ID.
+ if sp_lhs.ctxt() != sp_rhs.ctxt() {
+ return None;
+ }
+
+ let lhs_end = match self.lookup_line(sp_lhs.hi()) {
+ Ok(x) => x,
+ Err(_) => return None,
+ };
+ let rhs_begin = match self.lookup_line(sp_rhs.lo()) {
+ Ok(x) => x,
+ Err(_) => return None,
+ };
+
+ // If we must cross lines to merge, don't merge.
+ if lhs_end.line != rhs_begin.line {
+ return None;
+ }
+
+ // Ensure these follow the expected order and that we don't overlap.
+ if (sp_lhs.lo() <= sp_rhs.lo()) && (sp_lhs.hi() <= sp_rhs.lo()) {
+ Some(sp_lhs.to(sp_rhs))
+ } else {
+ None
+ }
+ }
+
+ /// Converts an absolute `BytePos` to a `CharPos` relative to the `SourceFile`.
+ fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
+ let idx = self.lookup_source_file_idx(bpos);
+ let sf = &(*self.files.borrow().source_files)[idx];
+ sf.bytepos_to_file_charpos(bpos)
+ }
+}
+
/// Tests `lookup_byte_offset`.
#[test]
fn t3() {
@@ -243,7 +287,7 @@
substring: &str,
n: usize,
) -> Span {
- println!(
+ eprintln!(
"span_substr(file={:?}/{:?}, substring={:?}, n={})",
file.name, file.start_pos, substring, n
);
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index 86f8061..cd3dabb 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -18,7 +18,7 @@
#[cfg(test)]
mod tests;
-// The proc macro code for this is in `src/librustc_macros/src/symbols.rs`.
+// The proc macro code for this is in `compiler/rustc_macros/src/symbols.rs`.
symbols! {
// After modifying this list adjust `is_special`, `is_used_keyword`/`is_unused_keyword`,
// this should be rarely necessary though if the keywords are kept in alphabetic order.
@@ -126,6 +126,10 @@
Argument,
ArgumentV1,
Arguments,
+ BTreeMap,
+ BTreeSet,
+ BinaryHeap,
+ Borrow,
C,
CString,
Center,
@@ -138,6 +142,7 @@
Decodable,
Decoder,
Default,
+ Deref,
Encodable,
Encoder,
Eq,
@@ -163,16 +168,21 @@
Iterator,
Layout,
Left,
+ LinkedList,
LintPass,
None,
Ok,
Option,
Ord,
Ordering,
+ OsStr,
+ OsString,
Output,
Param,
PartialEq,
PartialOrd,
+ Path,
+ PathBuf,
Pending,
Pin,
Poll,
@@ -187,6 +197,7 @@
RangeToInclusive,
Rc,
Ready,
+ Receiver,
Result,
Return,
Right,
@@ -198,6 +209,8 @@
StructuralPartialEq,
Sync,
Target,
+ ToOwned,
+ ToString,
Try,
Ty,
TyCtxt,
@@ -252,7 +265,6 @@
allow_fail,
allow_internal_unsafe,
allow_internal_unstable,
- allow_internal_unstable_backcompat_hack,
allowed,
always,
and,
@@ -318,6 +330,7 @@
bridge,
bswap,
c_str,
+ c_unwind,
c_variadic,
call,
call_mut,
@@ -332,6 +345,7 @@
cfg_attr,
cfg_attr_multi,
cfg_doctest,
+ cfg_eval,
cfg_panic,
cfg_sanitize,
cfg_target_feature,
@@ -476,6 +490,7 @@
dropck_eyepatch,
dropck_parametricity,
dylib,
+ dyn_metadata,
dyn_trait,
edition_macro_pats,
eh_catch_typeinfo,
@@ -554,6 +569,7 @@
format_args,
format_args_capture,
format_args_nl,
+ format_macro,
freeze,
freg,
frem_fast,
@@ -585,6 +601,8 @@
gt,
half_open_range_patterns,
hash,
+ hashmap_type,
+ hashset_type,
hexagon_target_feature,
hidden,
homogeneous_aggregate,
@@ -593,6 +611,7 @@
html_no_source,
html_playground_url,
html_root_url,
+ hwaddress,
i,
i128,
i128_type,
@@ -619,6 +638,7 @@
index_mut,
infer_outlives_requirements,
infer_static_outlives_requirements,
+ inherent_associated_types,
inlateout,
inline,
inline_const,
@@ -679,6 +699,7 @@
loop_break_value,
lt,
macro_at_most_once_rep,
+ macro_attributes_in_derive_output,
macro_escape,
macro_export,
macro_lifetime_matcher,
@@ -708,12 +729,14 @@
memory,
message,
meta,
+ metadata_type,
min_align_of,
min_align_of_val,
min_const_fn,
min_const_generics,
min_const_unsafe_fn,
min_specialization,
+ min_type_alias_impl_trait,
minnumf32,
minnumf64,
mips_target_feature,
@@ -771,6 +794,9 @@
none_error,
nontemporal_store,
nontrapping_dash_fptoint: "nontrapping-fptoint",
+ noop_method_borrow,
+ noop_method_clone,
+ noop_method_deref,
noreturn,
nostack,
not,
@@ -830,6 +856,7 @@
plugin,
plugin_registrar,
plugins,
+ pointee_trait,
pointer,
pointer_trait,
pointer_trait_fmt,
@@ -870,6 +897,7 @@
ptr_guaranteed_eq,
ptr_guaranteed_ne,
ptr_offset_from,
+ pub_macro_rules,
pub_restricted,
pure,
pushpop_unsafe,
@@ -932,8 +960,11 @@
rt,
rtm_target_feature,
rust,
+ rust_2015,
rust_2015_preview,
+ rust_2018,
rust_2018_preview,
+ rust_2021,
rust_2021_preview,
rust_begin_unwind,
rust_eh_catch_typeinfo,
@@ -969,6 +1000,7 @@
rustc_layout,
rustc_layout_scalar_valid_range_end,
rustc_layout_scalar_valid_range_start,
+ rustc_legacy_const_generics,
rustc_macro_transparency,
rustc_mir,
rustc_nonnull_optimization_guaranteed,
@@ -1000,6 +1032,7 @@
rustc_then_this_would_need,
rustc_unsafe_specialization_marker,
rustc_variance,
+ rustdoc,
rustfmt,
rvalue_static_promotion,
sanitize,
@@ -1048,6 +1081,7 @@
simd_lt,
simd_mul,
simd_ne,
+ simd_neg,
simd_or,
simd_reduce_add_ordered,
simd_reduce_add_unordered,
@@ -1079,6 +1113,7 @@
size_of,
size_of_val,
sized,
+ skip,
slice,
slice_alloc,
slice_patterns,
@@ -1243,6 +1278,7 @@
variant_count,
vec,
vec_type,
+ vecdeque_type,
version,
vis,
visible_private_types,
@@ -1602,6 +1638,7 @@
use super::Symbol;
use std::convert::TryInto;
+ #[doc(inline)]
pub use super::sym_generated::*;
// Used from a macro in `librustc_feature/accepted.rs`
diff --git a/compiler/rustc_symbol_mangling/src/test.rs b/compiler/rustc_symbol_mangling/src/test.rs
index 8c5e438..bfe9c4d 100644
--- a/compiler/rustc_symbol_mangling/src/test.rs
+++ b/compiler/rustc_symbol_mangling/src/test.rs
@@ -5,6 +5,7 @@
//! paths etc in all kinds of annoying scenarios.
use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{subst::InternalSubsts, Instance, TyCtxt};
use rustc_span::symbol::{sym, Symbol};
@@ -31,9 +32,8 @@
}
impl SymbolNamesTest<'tcx> {
- fn process_attrs(&mut self, hir_id: hir::HirId) {
+ fn process_attrs(&mut self, def_id: LocalDefId) {
let tcx = self.tcx;
- let def_id = tcx.hir().local_def_id(hir_id);
for attr in tcx.get_attrs(def_id.to_def_id()).iter() {
if tcx.sess.check_name(attr, SYMBOL_NAME) {
let def_id = def_id.to_def_id();
@@ -61,18 +61,18 @@
impl hir::itemlikevisit::ItemLikeVisitor<'tcx> for SymbolNamesTest<'tcx> {
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- self.process_attrs(item.hir_id);
+ self.process_attrs(item.def_id);
}
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
- self.process_attrs(trait_item.hir_id);
+ self.process_attrs(trait_item.def_id);
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
- self.process_attrs(impl_item.hir_id);
+ self.process_attrs(impl_item.def_id);
}
fn visit_foreign_item(&mut self, foreign_item: &'tcx hir::ForeignItem<'tcx>) {
- self.process_attrs(foreign_item.hir_id);
+ self.process_attrs(foreign_item.def_id);
}
}
diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs
index bbf7ecc..12c0a14 100644
--- a/compiler/rustc_symbol_mangling/src/v0.rs
+++ b/compiler/rustc_symbol_mangling/src/v0.rs
@@ -440,7 +440,7 @@
}
match sig.abi {
Abi::Rust => {}
- Abi::C => cx.push("KC"),
+ Abi::C { unwind: false } => cx.push("KC"),
abi => {
cx.push("K");
let name = abi.name();
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index 9c49922..0deb118 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -603,6 +603,13 @@
Ty: TyAndLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyAndLayout = TyAndLayout<'a, Ty>> + HasDataLayout + HasTargetSpec,
{
+ if abi == spec::abi::Abi::X86Interrupt {
+ if let Some(arg) = self.args.first_mut() {
+ arg.make_indirect_byval();
+ }
+ return Ok(());
+ }
+
match &cx.target_spec().arch[..] {
"x86" => {
let flavor = if abi == spec::abi::Abi::Fastcall {
diff --git a/compiler/rustc_target/src/asm/mod.rs b/compiler/rustc_target/src/asm/mod.rs
index 3c65c84..a09c87b 100644
--- a/compiler/rustc_target/src/asm/mod.rs
+++ b/compiler/rustc_target/src/asm/mod.rs
@@ -13,7 +13,7 @@
$class:ident,
)*
}) => {
- #[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, Hash, HashStable_Generic)]
+ #[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, PartialOrd, Hash, HashStable_Generic)]
#[allow(non_camel_case_types)]
pub enum $arch_regclass {
$($class,)*
@@ -62,7 +62,7 @@
)*
}) => {
#[allow(unreachable_code)]
- #[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, Hash, HashStable_Generic)]
+ #[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, PartialOrd, Hash, HashStable_Generic)]
#[allow(non_camel_case_types)]
pub enum $arch_reg {
$($reg,)*
@@ -207,7 +207,18 @@
}
}
-#[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, Hash, HashStable_Generic)]
+#[derive(
+ Copy,
+ Clone,
+ Encodable,
+ Decodable,
+ Debug,
+ Eq,
+ PartialEq,
+ PartialOrd,
+ Hash,
+ HashStable_Generic
+)]
pub enum InlineAsmReg {
X86(X86InlineAsmReg),
Arm(ArmInlineAsmReg),
@@ -218,6 +229,8 @@
Mips(MipsInlineAsmReg),
SpirV(SpirVInlineAsmReg),
Wasm(WasmInlineAsmReg),
+ // Placeholder for invalid register constraints for the current target
+ Err,
}
impl InlineAsmReg {
@@ -229,6 +242,7 @@
Self::RiscV(r) => r.name(),
Self::Hexagon(r) => r.name(),
Self::Mips(r) => r.name(),
+ Self::Err => "<reg>",
}
}
@@ -240,6 +254,7 @@
Self::RiscV(r) => InlineAsmRegClass::RiscV(r.reg_class()),
Self::Hexagon(r) => InlineAsmRegClass::Hexagon(r.reg_class()),
Self::Mips(r) => InlineAsmRegClass::Mips(r.reg_class()),
+ Self::Err => InlineAsmRegClass::Err,
}
}
@@ -298,6 +313,7 @@
Self::RiscV(r) => r.emit(out, arch, modifier),
Self::Hexagon(r) => r.emit(out, arch, modifier),
Self::Mips(r) => r.emit(out, arch, modifier),
+ Self::Err => unreachable!("Use of InlineAsmReg::Err"),
}
}
@@ -309,11 +325,23 @@
Self::RiscV(_) => cb(self),
Self::Hexagon(r) => r.overlapping_regs(|r| cb(Self::Hexagon(r))),
Self::Mips(_) => cb(self),
+ Self::Err => unreachable!("Use of InlineAsmReg::Err"),
}
}
}
-#[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, Hash, HashStable_Generic)]
+#[derive(
+ Copy,
+ Clone,
+ Encodable,
+ Decodable,
+ Debug,
+ Eq,
+ PartialEq,
+ PartialOrd,
+ Hash,
+ HashStable_Generic
+)]
pub enum InlineAsmRegClass {
X86(X86InlineAsmRegClass),
Arm(ArmInlineAsmRegClass),
@@ -324,6 +352,8 @@
Mips(MipsInlineAsmRegClass),
SpirV(SpirVInlineAsmRegClass),
Wasm(WasmInlineAsmRegClass),
+ // Placeholder for invalid register constraints for the current target
+ Err,
}
impl InlineAsmRegClass {
@@ -338,6 +368,7 @@
Self::Mips(r) => r.name(),
Self::SpirV(r) => r.name(),
Self::Wasm(r) => r.name(),
+ Self::Err => rustc_span::symbol::sym::reg,
}
}
@@ -355,6 +386,7 @@
Self::Mips(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Mips),
Self::SpirV(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::SpirV),
Self::Wasm(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Wasm),
+ Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
}
}
@@ -379,6 +411,7 @@
Self::Mips(r) => r.suggest_modifier(arch, ty),
Self::SpirV(r) => r.suggest_modifier(arch, ty),
Self::Wasm(r) => r.suggest_modifier(arch, ty),
+ Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
}
}
@@ -399,6 +432,7 @@
Self::Mips(r) => r.default_modifier(arch),
Self::SpirV(r) => r.default_modifier(arch),
Self::Wasm(r) => r.default_modifier(arch),
+ Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
}
}
@@ -418,6 +452,7 @@
Self::Mips(r) => r.supported_types(arch),
Self::SpirV(r) => r.supported_types(arch),
Self::Wasm(r) => r.supported_types(arch),
+ Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
}
}
@@ -454,11 +489,23 @@
Self::Mips(r) => r.valid_modifiers(arch),
Self::SpirV(r) => r.valid_modifiers(arch),
Self::Wasm(r) => r.valid_modifiers(arch),
+ Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
}
}
}
-#[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, Hash, HashStable_Generic)]
+#[derive(
+ Copy,
+ Clone,
+ Encodable,
+ Decodable,
+ Debug,
+ Eq,
+ PartialEq,
+ PartialOrd,
+ Hash,
+ HashStable_Generic
+)]
pub enum InlineAsmRegOrRegClass {
Reg(InlineAsmReg),
RegClass(InlineAsmRegClass),
diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs
index 1ad5758..fb747df 100644
--- a/compiler/rustc_target/src/lib.rs
+++ b/compiler/rustc_target/src/lib.rs
@@ -15,7 +15,6 @@
#![feature(never_type)]
#![feature(associated_type_bounds)]
#![feature(exhaustive_patterns)]
-#![feature(str_split_once)]
#[macro_use]
extern crate rustc_macros;
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs b/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
index 3a88197..758950b 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
@@ -4,12 +4,12 @@
pub fn target() -> Target {
let base = opts("ios", Arch::Arm64_macabi);
Target {
- llvm_target: "arm64-apple-ios-macabi".to_string(),
+ llvm_target: "arm64-apple-ios14.0-macabi".to_string(),
pointer_width: 64,
data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
options: TargetOptions {
- features: "+neon,+fp-armv8,+apple-a7".to_string(),
+ features: "+neon,+fp-armv8,+apple-a12".to_string(),
eliminate_frame_pointer: false,
max_atomic_width: Some(128),
unsupported_abis: super::arm_base::unsupported_abis(),
@@ -18,11 +18,9 @@
// These arguments are not actually invoked - they just have
// to look right to pass App Store validation.
bitcode_llvm_cmdline: "-triple\0\
- arm64-apple-ios-macabi\0\
+ arm64-apple-ios14.0-macabi\0\
-emit-obj\0\
-disable-llvm-passes\0\
- -target-abi\0\
- darwinpcs\0\
-Os\0"
.to_string(),
..base
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_ios_sim.rs b/compiler/rustc_target/src/spec/aarch64_apple_ios_sim.rs
new file mode 100644
index 0000000..e594cee
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_apple_ios_sim.rs
@@ -0,0 +1,39 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ let base = opts("ios", Arch::Arm64_sim);
+
+ // Clang automatically chooses a more specific target based on
+ // IPHONEOS_DEPLOYMENT_TARGET.
+ // This is required for the simulator target to pick the right
+ // MACH-O commands, so we do too.
+ let arch = "arm64";
+ let llvm_target = super::apple_base::ios_sim_llvm_target(arch);
+
+ Target {
+ llvm_target: llvm_target,
+ pointer_width: 64,
+ data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(),
+ arch: "aarch64".to_string(),
+ options: TargetOptions {
+ features: "+neon,+fp-armv8,+apple-a7".to_string(),
+ eliminate_frame_pointer: false,
+ max_atomic_width: Some(128),
+ unsupported_abis: super::arm_base::unsupported_abis(),
+ forces_embed_bitcode: true,
+ // Taken from a clang build on Xcode 11.4.1.
+ // These arguments are not actually invoked - they just have
+ // to look right to pass App Store validation.
+ bitcode_llvm_cmdline: "-triple\0\
+ arm64-apple-ios14.0-simulator\0\
+ -emit-obj\0\
+ -disable-llvm-passes\0\
+ -target-abi\0\
+ darwinpcs\0\
+ -Os\0"
+ .to_string(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/abi.rs b/compiler/rustc_target/src/spec/abi.rs
index 65e8a4e..17eb33b 100644
--- a/compiler/rustc_target/src/spec/abi.rs
+++ b/compiler/rustc_target/src/spec/abi.rs
@@ -8,24 +8,21 @@
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Debug)]
#[derive(HashStable_Generic, Encodable, Decodable)]
pub enum Abi {
- // N.B., this ordering MUST match the AbiDatas array below.
- // (This is ensured by the test indices_are_correct().)
-
// Multiplatform / generic ABIs
//
// These ABIs come first because every time we add a new ABI, we
// have to re-bless all the hashing tests. These are used in many
// places, so giving them stable values reduces test churn. The
// specific values are meaningless.
- Rust = 0,
- C = 1,
+ Rust,
+ C { unwind: bool },
// Single platform ABIs
Cdecl,
- Stdcall,
+ Stdcall { unwind: bool },
Fastcall,
Vectorcall,
- Thiscall,
+ Thiscall { unwind: bool },
Aapcs,
Win64,
SysV64,
@@ -39,7 +36,7 @@
CCmseNonSecureCall,
// Multiplatform / generic ABIs
- System,
+ System { unwind: bool },
RustIntrinsic,
RustCall,
PlatformIntrinsic,
@@ -61,13 +58,16 @@
const AbiDatas: &[AbiData] = &[
// Cross-platform ABIs
AbiData { abi: Abi::Rust, name: "Rust", generic: true },
- AbiData { abi: Abi::C, name: "C", generic: true },
+ AbiData { abi: Abi::C { unwind: false }, name: "C", generic: true },
+ AbiData { abi: Abi::C { unwind: true }, name: "C-unwind", generic: true },
// Platform-specific ABIs
AbiData { abi: Abi::Cdecl, name: "cdecl", generic: false },
- AbiData { abi: Abi::Stdcall, name: "stdcall", generic: false },
+ AbiData { abi: Abi::Stdcall { unwind: false }, name: "stdcall", generic: false },
+ AbiData { abi: Abi::Stdcall { unwind: true }, name: "stdcall-unwind", generic: false },
AbiData { abi: Abi::Fastcall, name: "fastcall", generic: false },
AbiData { abi: Abi::Vectorcall, name: "vectorcall", generic: false },
- AbiData { abi: Abi::Thiscall, name: "thiscall", generic: false },
+ AbiData { abi: Abi::Thiscall { unwind: false }, name: "thiscall", generic: false },
+ AbiData { abi: Abi::Thiscall { unwind: true }, name: "thiscall-unwind", generic: false },
AbiData { abi: Abi::Aapcs, name: "aapcs", generic: false },
AbiData { abi: Abi::Win64, name: "win64", generic: false },
AbiData { abi: Abi::SysV64, name: "sysv64", generic: false },
@@ -84,7 +84,8 @@
},
AbiData { abi: Abi::CCmseNonSecureCall, name: "C-cmse-nonsecure-call", generic: false },
// Cross-platform ABIs
- AbiData { abi: Abi::System, name: "system", generic: true },
+ AbiData { abi: Abi::System { unwind: false }, name: "system", generic: true },
+ AbiData { abi: Abi::System { unwind: true }, name: "system-unwind", generic: true },
AbiData { abi: Abi::RustIntrinsic, name: "rust-intrinsic", generic: true },
AbiData { abi: Abi::RustCall, name: "rust-call", generic: true },
AbiData { abi: Abi::PlatformIntrinsic, name: "platform-intrinsic", generic: true },
@@ -103,7 +104,52 @@
impl Abi {
#[inline]
pub fn index(self) -> usize {
- self as usize
+ // N.B., this ordering MUST match the AbiDatas array above.
+ // (This is ensured by the test indices_are_correct().)
+ use Abi::*;
+ let i = match self {
+ // Cross-platform ABIs
+ Rust => 0,
+ C { unwind: false } => 1,
+ C { unwind: true } => 2,
+ // Platform-specific ABIs
+ Cdecl => 3,
+ Stdcall { unwind: false } => 4,
+ Stdcall { unwind: true } => 5,
+ Fastcall => 6,
+ Vectorcall => 7,
+ Thiscall { unwind: false } => 8,
+ Thiscall { unwind: true } => 9,
+ Aapcs => 10,
+ Win64 => 11,
+ SysV64 => 12,
+ PtxKernel => 13,
+ Msp430Interrupt => 14,
+ X86Interrupt => 15,
+ AmdGpuKernel => 16,
+ EfiApi => 17,
+ AvrInterrupt => 18,
+ AvrNonBlockingInterrupt => 19,
+ CCmseNonSecureCall => 20,
+ // Cross-platform ABIs
+ System { unwind: false } => 21,
+ System { unwind: true } => 22,
+ RustIntrinsic => 23,
+ RustCall => 24,
+ PlatformIntrinsic => 25,
+ Unadjusted => 26,
+ };
+ debug_assert!(
+ AbiDatas
+ .iter()
+ .enumerate()
+ .find(|(_, AbiData { abi, .. })| *abi == self)
+ .map(|(index, _)| index)
+ .expect("abi variant has associated data")
+ == i,
+ "Abi index did not match `AbiDatas` ordering"
+ );
+ i
}
#[inline]
@@ -122,6 +168,8 @@
impl fmt::Display for Abi {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "\"{}\"", self.name())
+ match self {
+ abi => write!(f, "\"{}\"", abi.name()),
+ }
}
}
diff --git a/compiler/rustc_target/src/spec/apple_base.rs b/compiler/rustc_target/src/spec/apple_base.rs
index 3b45896..23f1357a 100644
--- a/compiler/rustc_target/src/spec/apple_base.rs
+++ b/compiler/rustc_target/src/spec/apple_base.rs
@@ -54,14 +54,16 @@
}
}
-fn macos_deployment_target() -> (u32, u32) {
- let deployment_target = env::var("MACOSX_DEPLOYMENT_TARGET").ok();
- let version = deployment_target
+fn deployment_target(var_name: &str) -> Option<(u32, u32)> {
+ let deployment_target = env::var(var_name).ok();
+ deployment_target
.as_ref()
.and_then(|s| s.split_once('.'))
- .and_then(|(a, b)| a.parse::<u32>().and_then(|a| b.parse::<u32>().map(|b| (a, b))).ok());
+ .and_then(|(a, b)| a.parse::<u32>().and_then(|a| b.parse::<u32>().map(|b| (a, b))).ok())
+}
- version.unwrap_or((10, 7))
+fn macos_deployment_target() -> (u32, u32) {
+ deployment_target("MACOSX_DEPLOYMENT_TARGET").unwrap_or((10, 7))
}
pub fn macos_llvm_target(arch: &str) -> String {
@@ -84,3 +86,12 @@
env_remove.push("IPHONEOS_DEPLOYMENT_TARGET".to_string());
env_remove
}
+
+fn ios_deployment_target() -> (u32, u32) {
+ deployment_target("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or((7, 0))
+}
+
+pub fn ios_sim_llvm_target(arch: &str) -> String {
+ let (major, minor) = ios_deployment_target();
+ format!("{}-apple-ios{}.{}.0-simulator", arch, major, minor)
+}
diff --git a/compiler/rustc_target/src/spec/apple_sdk_base.rs b/compiler/rustc_target/src/spec/apple_sdk_base.rs
index d894f75..538c4ca 100644
--- a/compiler/rustc_target/src/spec/apple_sdk_base.rs
+++ b/compiler/rustc_target/src/spec/apple_sdk_base.rs
@@ -11,6 +11,7 @@
X86_64,
X86_64_macabi,
Arm64_macabi,
+ Arm64_sim,
}
fn target_cpu(arch: Arch) -> String {
@@ -22,13 +23,16 @@
X86_64 => "core2",
X86_64_macabi => "core2",
Arm64_macabi => "apple-a12",
+ Arm64_sim => "apple-a12",
}
.to_string()
}
fn link_env_remove(arch: Arch) -> Vec<String> {
match arch {
- Armv7 | Armv7s | Arm64 | I386 | X86_64 => vec!["MACOSX_DEPLOYMENT_TARGET".to_string()],
+ Armv7 | Armv7s | Arm64 | I386 | X86_64 | Arm64_sim => {
+ vec!["MACOSX_DEPLOYMENT_TARGET".to_string()]
+ }
X86_64_macabi | Arm64_macabi => vec!["IPHONEOS_DEPLOYMENT_TARGET".to_string()],
}
}
diff --git a/compiler/rustc_target/src/spec/arm_base.rs b/compiler/rustc_target/src/spec/arm_base.rs
index b74d80d..01f5733 100644
--- a/compiler/rustc_target/src/spec/arm_base.rs
+++ b/compiler/rustc_target/src/spec/arm_base.rs
@@ -2,5 +2,14 @@
// All the calling conventions trigger an assertion(Unsupported calling convention) in llvm on arm
pub fn unsupported_abis() -> Vec<Abi> {
- vec![Abi::Stdcall, Abi::Fastcall, Abi::Vectorcall, Abi::Thiscall, Abi::Win64, Abi::SysV64]
+ vec![
+ Abi::Stdcall { unwind: false },
+ Abi::Stdcall { unwind: true },
+ Abi::Fastcall,
+ Abi::Vectorcall,
+ Abi::Thiscall { unwind: false },
+ Abi::Thiscall { unwind: true },
+ Abi::Win64,
+ Abi::SysV64,
+ ]
}
diff --git a/compiler/rustc_target/src/spec/crt_objects.rs b/compiler/rustc_target/src/spec/crt_objects.rs
index 32da16a..2fc9ab2 100644
--- a/compiler/rustc_target/src/spec/crt_objects.rs
+++ b/compiler/rustc_target/src/spec/crt_objects.rs
@@ -64,17 +64,24 @@
pub(super) fn pre_musl_fallback() -> CrtObjects {
new(&[
- (LinkOutputKind::DynamicNoPicExe, &["crt1.o", "crti.o"]),
- (LinkOutputKind::DynamicPicExe, &["Scrt1.o", "crti.o"]),
- (LinkOutputKind::StaticNoPicExe, &["crt1.o", "crti.o"]),
- (LinkOutputKind::StaticPicExe, &["rcrt1.o", "crti.o"]),
- (LinkOutputKind::DynamicDylib, &["crti.o"]),
- (LinkOutputKind::StaticDylib, &["crti.o"]),
+ (LinkOutputKind::DynamicNoPicExe, &["crt1.o", "crti.o", "crtbegin.o"]),
+ (LinkOutputKind::DynamicPicExe, &["Scrt1.o", "crti.o", "crtbeginS.o"]),
+ (LinkOutputKind::StaticNoPicExe, &["crt1.o", "crti.o", "crtbegin.o"]),
+ (LinkOutputKind::StaticPicExe, &["rcrt1.o", "crti.o", "crtbeginS.o"]),
+ (LinkOutputKind::DynamicDylib, &["crti.o", "crtbeginS.o"]),
+ (LinkOutputKind::StaticDylib, &["crti.o", "crtbeginS.o"]),
])
}
pub(super) fn post_musl_fallback() -> CrtObjects {
- all("crtn.o")
+ new(&[
+ (LinkOutputKind::DynamicNoPicExe, &["crtend.o", "crtn.o"]),
+ (LinkOutputKind::DynamicPicExe, &["crtendS.o", "crtn.o"]),
+ (LinkOutputKind::StaticNoPicExe, &["crtend.o", "crtn.o"]),
+ (LinkOutputKind::StaticPicExe, &["crtendS.o", "crtn.o"]),
+ (LinkOutputKind::DynamicDylib, &["crtendS.o", "crtn.o"]),
+ (LinkOutputKind::StaticDylib, &["crtendS.o", "crtn.o"]),
+ ])
}
pub(super) fn pre_mingw_fallback() -> CrtObjects {
@@ -101,11 +108,13 @@
}
pub(super) fn pre_wasi_fallback() -> CrtObjects {
+ // Use crt1-command.o instead of crt1.o to enable support for new-style
+ // commands. See https://reviews.llvm.org/D81689 for more info.
new(&[
- (LinkOutputKind::DynamicNoPicExe, &["crt1.o"]),
- (LinkOutputKind::DynamicPicExe, &["crt1.o"]),
- (LinkOutputKind::StaticNoPicExe, &["crt1.o"]),
- (LinkOutputKind::StaticPicExe, &["crt1.o"]),
+ (LinkOutputKind::DynamicNoPicExe, &["crt1-command.o"]),
+ (LinkOutputKind::DynamicPicExe, &["crt1-command.o"]),
+ (LinkOutputKind::StaticNoPicExe, &["crt1-command.o"]),
+ (LinkOutputKind::StaticPicExe, &["crt1-command.o"]),
(LinkOutputKind::WasiReactorExe, &["crt1-reactor.o"]),
])
}
diff --git a/compiler/rustc_target/src/spec/i386_apple_ios.rs b/compiler/rustc_target/src/spec/i386_apple_ios.rs
index 2f94fbc..f5d7be4 100644
--- a/compiler/rustc_target/src/spec/i386_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/i386_apple_ios.rs
@@ -12,7 +12,7 @@
arch: "x86".to_string(),
options: TargetOptions {
max_atomic_width: Some(64),
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
stack_probes: StackProbeType::Call,
..base
},
diff --git a/compiler/rustc_target/src/spec/i686_apple_darwin.rs b/compiler/rustc_target/src/spec/i686_apple_darwin.rs
index 3181308..06d71db 100644
--- a/compiler/rustc_target/src/spec/i686_apple_darwin.rs
+++ b/compiler/rustc_target/src/spec/i686_apple_darwin.rs
@@ -6,7 +6,7 @@
base.max_atomic_width = Some(64);
base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m32".to_string()]);
base.link_env_remove.extend(super::apple_base::macos_link_env_remove());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
base.eliminate_frame_pointer = false;
diff --git a/compiler/rustc_target/src/spec/i686_linux_android.rs b/compiler/rustc_target/src/spec/i686_linux_android.rs
index 89eb41e..19d7b3c 100644
--- a/compiler/rustc_target/src/spec/i686_linux_android.rs
+++ b/compiler/rustc_target/src/spec/i686_linux_android.rs
@@ -11,7 +11,7 @@
// http://developer.android.com/ndk/guides/abis.html#x86
base.cpu = "pentiumpro".to_string();
base.features = "+mmx,+sse,+sse2,+sse3,+ssse3".to_string();
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs b/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs
index a8cc099..75c3ea6 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs
@@ -7,8 +7,7 @@
let pre_link_args = base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap();
pre_link_args.push("-m32".to_string());
pre_link_args.push("-Wl,-znotext".to_string());
-
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/i686_unknown_haiku.rs b/compiler/rustc_target/src/spec/i686_unknown_haiku.rs
index 0807913..e4c01db 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_haiku.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_haiku.rs
@@ -5,7 +5,7 @@
base.cpu = "pentium4".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m32".to_string()]);
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs
index 656136c..9daf1d3 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs
@@ -5,7 +5,7 @@
base.cpu = "pentium4".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs
index cb154b7..5fd5400 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs
@@ -6,7 +6,7 @@
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-Wl,-melf_i386".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
// The unwinder used by i686-unknown-linux-musl, the LLVM libunwind
diff --git a/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs b/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs
index 26bdc04..716bb9e 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs
@@ -5,7 +5,7 @@
base.cpu = "pentium4".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs b/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs
index e6a244c..e53462c 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs
@@ -6,7 +6,7 @@
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-fuse-ld=lld".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs b/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs
index 8732b47..2f8702d 100644
--- a/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs
@@ -5,7 +5,7 @@
base.cpu = "pentium4".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/linux_kernel_base.rs b/compiler/rustc_target/src/spec/linux_kernel_base.rs
index ddb9a7b..738ff38 100644
--- a/compiler/rustc_target/src/spec/linux_kernel_base.rs
+++ b/compiler/rustc_target/src/spec/linux_kernel_base.rs
@@ -13,7 +13,7 @@
env: "gnu".to_string(),
disable_redzone: true,
panic_strategy: PanicStrategy::Abort,
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
stack_probes: StackProbeType::Call,
eliminate_frame_pointer: false,
linker_is_gnu: true,
diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_none.rs b/compiler/rustc_target/src/spec/mipsel_unknown_none.rs
index 0f9d3c3..110c8dd 100644
--- a/compiler/rustc_target/src/spec/mipsel_unknown_none.rs
+++ b/compiler/rustc_target/src/spec/mipsel_unknown_none.rs
@@ -23,10 +23,12 @@
panic_strategy: PanicStrategy::Abort,
relocation_model: RelocModel::Static,
unsupported_abis: vec![
- Abi::Stdcall,
+ Abi::Stdcall { unwind: false },
+ Abi::Stdcall { unwind: true },
Abi::Fastcall,
Abi::Vectorcall,
- Abi::Thiscall,
+ Abi::Thiscall { unwind: false },
+ Abi::Thiscall { unwind: true },
Abi::Win64,
Abi::SysV64,
],
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index 7a93bac..c9fffd2 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -589,7 +589,7 @@
Ok(StackProbeType::InlineOrCall { min_llvm_version_for_inline })
}
_ => Err(String::from(
- "`kind` expected to be one of `inline-or-none`, `call` or `inline-or-call`",
+ "`kind` expected to be one of `none`, `inline`, `call` or `inline-or-call`",
)),
}
}
@@ -641,6 +641,7 @@
("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu),
("powerpc64le-unknown-linux-musl", powerpc64le_unknown_linux_musl),
("s390x-unknown-linux-gnu", s390x_unknown_linux_gnu),
+ ("s390x-unknown-linux-musl", s390x_unknown_linux_musl),
("sparc-unknown-linux-gnu", sparc_unknown_linux_gnu),
("sparc64-unknown-linux-gnu", sparc64_unknown_linux_gnu),
("arm-unknown-linux-gnueabi", arm_unknown_linux_gnueabi),
@@ -678,7 +679,7 @@
("thumbv7neon-linux-androideabi", thumbv7neon_linux_androideabi),
("aarch64-linux-android", aarch64_linux_android),
- ("x86_64-linux-kernel", x86_64_linux_kernel),
+ ("x86_64-unknown-none-linuxkernel", x86_64_unknown_none_linuxkernel),
("aarch64-unknown-freebsd", aarch64_unknown_freebsd),
("armv6-unknown-freebsd", armv6_unknown_freebsd),
@@ -693,6 +694,7 @@
("i686-unknown-openbsd", i686_unknown_openbsd),
("sparc64-unknown-openbsd", sparc64_unknown_openbsd),
("x86_64-unknown-openbsd", x86_64_unknown_openbsd),
+ ("powerpc-unknown-openbsd", powerpc_unknown_openbsd),
("aarch64-unknown-netbsd", aarch64_unknown_netbsd),
("armv6-unknown-netbsd-eabihf", armv6_unknown_netbsd_eabihf),
@@ -701,7 +703,6 @@
("powerpc-unknown-netbsd", powerpc_unknown_netbsd),
("sparc64-unknown-netbsd", sparc64_unknown_netbsd),
("x86_64-unknown-netbsd", x86_64_unknown_netbsd),
- ("x86_64-rumprun-netbsd", x86_64_rumprun_netbsd),
("i686-unknown-haiku", i686_unknown_haiku),
("x86_64-unknown-haiku", x86_64_unknown_haiku),
@@ -727,6 +728,7 @@
("armv7s-apple-ios", armv7s_apple_ios),
("x86_64-apple-ios-macabi", x86_64_apple_ios_macabi),
("aarch64-apple-ios-macabi", aarch64_apple_ios_macabi),
+ ("aarch64-apple-ios-sim", aarch64_apple_ios_sim),
("aarch64-apple-tvos", aarch64_apple_tvos),
("x86_64-apple-tvos", x86_64_apple_tvos),
@@ -735,9 +737,8 @@
("armv7r-none-eabi", armv7r_none_eabi),
("armv7r-none-eabihf", armv7r_none_eabihf),
- // `x86_64-pc-solaris` is an alias for `x86_64_sun_solaris` for backwards compatibility reasons.
- // (See <https://github.com/rust-lang/rust/issues/40531>.)
- ("x86_64-sun-solaris", "x86_64-pc-solaris", x86_64_sun_solaris),
+ ("x86_64-pc-solaris", x86_64_pc_solaris),
+ ("x86_64-sun-solaris", x86_64_sun_solaris),
("sparcv9-sun-solaris", sparcv9_sun_solaris),
("x86_64-unknown-illumos", x86_64_unknown_illumos),
@@ -777,15 +778,18 @@
("aarch64-unknown-hermit", aarch64_unknown_hermit),
("x86_64-unknown-hermit", x86_64_unknown_hermit),
- ("x86_64-unknown-hermit-kernel", x86_64_unknown_hermit_kernel),
+
+ ("x86_64-unknown-none-hermitkernel", x86_64_unknown_none_hermitkernel),
("riscv32i-unknown-none-elf", riscv32i_unknown_none_elf),
("riscv32imc-unknown-none-elf", riscv32imc_unknown_none_elf),
("riscv32imac-unknown-none-elf", riscv32imac_unknown_none_elf),
("riscv32gc-unknown-linux-gnu", riscv32gc_unknown_linux_gnu),
+ ("riscv32gc-unknown-linux-musl", riscv32gc_unknown_linux_musl),
("riscv64imac-unknown-none-elf", riscv64imac_unknown_none_elf),
("riscv64gc-unknown-none-elf", riscv64gc_unknown_none_elf),
("riscv64gc-unknown-linux-gnu", riscv64gc_unknown_linux_gnu),
+ ("riscv64gc-unknown-linux-musl", riscv64gc_unknown_linux_musl),
("aarch64-unknown-none", aarch64_unknown_none),
("aarch64-unknown-none-softfloat", aarch64_unknown_none_softfloat),
@@ -1280,24 +1284,31 @@
/// Given a function ABI, turn it into the correct ABI for this target.
pub fn adjust_abi(&self, abi: Abi) -> Abi {
match abi {
- Abi::System => {
+ Abi::System { unwind } => {
if self.is_like_windows && self.arch == "x86" {
- Abi::Stdcall
+ Abi::Stdcall { unwind }
} else {
- Abi::C
+ Abi::C { unwind }
}
}
// These ABI kinds are ignored on non-x86 Windows targets.
// See https://docs.microsoft.com/en-us/cpp/cpp/argument-passing-and-naming-conventions
// and the individual pages for __stdcall et al.
- Abi::Stdcall | Abi::Fastcall | Abi::Vectorcall | Abi::Thiscall => {
- if self.is_like_windows && self.arch != "x86" { Abi::C } else { abi }
+ Abi::Stdcall { unwind } | Abi::Thiscall { unwind } => {
+ if self.is_like_windows && self.arch != "x86" { Abi::C { unwind } } else { abi }
+ }
+ Abi::Fastcall | Abi::Vectorcall => {
+ if self.is_like_windows && self.arch != "x86" {
+ Abi::C { unwind: false }
+ } else {
+ abi
+ }
}
Abi::EfiApi => {
if self.arch == "x86_64" {
Abi::Win64
} else {
- Abi::C
+ Abi::C { unwind: false }
}
}
abi => abi,
@@ -1492,7 +1503,7 @@
} );
($key_name:ident = $json_name:expr, optional) => ( {
let name = $json_name;
- if let Some(o) = obj.find(&name[..]) {
+ if let Some(o) = obj.find(name) {
base.$key_name = o
.as_string()
.map(|s| s.to_string() );
@@ -1983,24 +1994,6 @@
Ok(TargetTriple::TargetPath(canonicalized_path))
}
- /// Creates a target triple from its alias
- pub fn from_alias(triple: String) -> Self {
- macro_rules! target_aliases {
- ( $(($alias:literal, $target:literal ),)+ ) => {
- match triple.as_str() {
- $( $alias => TargetTriple::from_triple($target), )+
- _ => TargetTriple::TargetTriple(triple),
- }
- }
- }
-
- target_aliases! {
- // `x86_64-pc-solaris` is an alias for `x86_64_sun_solaris` for backwards compatibility reasons.
- // (See <https://github.com/rust-lang/rust/issues/40531>.)
- ("x86_64-pc-solaris", "x86_64-sun-solaris"),
- }
- }
-
/// Returns a string triple for this target.
///
/// If this target is a path, the file name (without extension) is returned.
diff --git a/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs b/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs
index 3c9c7d5..15d8e48 100644
--- a/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs
+++ b/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs
@@ -49,10 +49,12 @@
// create the tests for this.
unsupported_abis: vec![
Abi::Cdecl,
- Abi::Stdcall,
+ Abi::Stdcall { unwind: false },
+ Abi::Stdcall { unwind: true },
Abi::Fastcall,
Abi::Vectorcall,
- Abi::Thiscall,
+ Abi::Thiscall { unwind: false },
+ Abi::Thiscall { unwind: true },
Abi::Aapcs,
Abi::Win64,
Abi::SysV64,
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
index 751022c..9db880b 100644
--- a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
@@ -14,7 +14,7 @@
Target {
llvm_target: "powerpc64-unknown-linux-gnu".to_string(),
pointer_width: 64,
- data_layout: "E-m:e-i64:64-n32:64".to_string(),
+ data_layout: "E-m:e-i64:64-n32:64-v256:256:256-v512:512:512".to_string(),
arch: "powerpc64".to_string(),
options: TargetOptions { endian: Endian::Big, mcount: "_mcount".to_string(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
index 546dfba..8767f86 100644
--- a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
@@ -10,7 +10,7 @@
Target {
llvm_target: "powerpc64-unknown-linux-musl".to_string(),
pointer_width: 64,
- data_layout: "E-m:e-i64:64-n32:64".to_string(),
+ data_layout: "E-m:e-i64:64-n32:64-v256:256:256-v512:512:512".to_string(),
arch: "powerpc64".to_string(),
options: TargetOptions { endian: Endian::Big, mcount: "_mcount".to_string(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
index bb55872..2f28a85 100644
--- a/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
@@ -10,7 +10,7 @@
Target {
llvm_target: "powerpc64-unknown-linux-gnu".to_string(),
pointer_width: 64,
- data_layout: "E-m:e-i64:64-n32:64".to_string(),
+ data_layout: "E-m:e-i64:64-n32:64-v256:256:256-v512:512:512".to_string(),
arch: "powerpc64".to_string(),
options: TargetOptions { endian: Endian::Big, ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
index 07e0bf8..4cbd997 100644
--- a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
@@ -9,7 +9,7 @@
Target {
llvm_target: "powerpc64le-unknown-linux-gnu".to_string(),
pointer_width: 64,
- data_layout: "e-m:e-i64:64-n32:64".to_string(),
+ data_layout: "e-m:e-i64:64-n32:64-v256:256:256-v512:512:512".to_string(),
arch: "powerpc64".to_string(),
options: TargetOptions { mcount: "_mcount".to_string(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
index 41c78a5..efdc9ad 100644
--- a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
@@ -9,7 +9,7 @@
Target {
llvm_target: "powerpc64le-unknown-linux-musl".to_string(),
pointer_width: 64,
- data_layout: "e-m:e-i64:64-n32:64".to_string(),
+ data_layout: "e-m:e-i64:64-n32:64-v256:256:256-v512:512:512".to_string(),
arch: "powerpc64".to_string(),
options: TargetOptions { mcount: "_mcount".to_string(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs b/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs
new file mode 100644
index 0000000..c17183f
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs
@@ -0,0 +1,16 @@
+use crate::abi::Endian;
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::openbsd_base::opts();
+ base.endian = Endian::Big;
+ base.max_atomic_width = Some(32);
+
+ Target {
+ llvm_target: "powerpc-unknown-openbsd".to_string(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(),
+ arch: "powerpc".to_string(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_musl.rs
new file mode 100644
index 0000000..e5fbd09
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_musl.rs
@@ -0,0 +1,19 @@
+use crate::spec::{CodeModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "riscv32-unknown-linux-musl".to_string(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-i64:64-n32-S128".to_string(),
+ arch: "riscv32".to_string(),
+ options: TargetOptions {
+ unsupported_abis: super::riscv_base::unsupported_abis(),
+ code_model: Some(CodeModel::Medium),
+ cpu: "generic-rv32".to_string(),
+ features: "+m,+a,+f,+d,+c".to_string(),
+ llvm_abiname: "ilp32d".to_string(),
+ max_atomic_width: Some(32),
+ ..super::linux_musl_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_musl.rs
new file mode 100644
index 0000000..0232b15
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_musl.rs
@@ -0,0 +1,19 @@
+use crate::spec::{CodeModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "riscv64-unknown-linux-musl".to_string(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".to_string(),
+ arch: "riscv64".to_string(),
+ options: TargetOptions {
+ unsupported_abis: super::riscv_base::unsupported_abis(),
+ code_model: Some(CodeModel::Medium),
+ cpu: "generic-rv64".to_string(),
+ features: "+m,+a,+f,+d,+c".to_string(),
+ llvm_abiname: "lp64d".to_string(),
+ max_atomic_width: Some(64),
+ ..super::linux_musl_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
index 33a785f..aa823b1 100644
--- a/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
@@ -11,6 +11,7 @@
options: TargetOptions {
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
linker: Some("rust-lld".to_string()),
+ llvm_abiname: "lp64d".to_string(),
cpu: "generic-rv64".to_string(),
max_atomic_width: Some(64),
atomic_cas: true,
diff --git a/compiler/rustc_target/src/spec/riscv_base.rs b/compiler/rustc_target/src/spec/riscv_base.rs
index 64cf890..5bcbb2e 100644
--- a/compiler/rustc_target/src/spec/riscv_base.rs
+++ b/compiler/rustc_target/src/spec/riscv_base.rs
@@ -5,10 +5,12 @@
pub fn unsupported_abis() -> Vec<Abi> {
vec![
Abi::Cdecl,
- Abi::Stdcall,
+ Abi::Stdcall { unwind: false },
+ Abi::Stdcall { unwind: true },
Abi::Fastcall,
Abi::Vectorcall,
- Abi::Thiscall,
+ Abi::Thiscall { unwind: false },
+ Abi::Thiscall { unwind: true },
Abi::Aapcs,
Abi::Win64,
Abi::SysV64,
diff --git a/compiler/rustc_target/src/spec/s390x_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/s390x_unknown_linux_musl.rs
new file mode 100644
index 0000000..4f811ce
--- /dev/null
+++ b/compiler/rustc_target/src/spec/s390x_unknown_linux_musl.rs
@@ -0,0 +1,24 @@
+use crate::abi::Endian;
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.endian = Endian::Big;
+ // z10 is the oldest CPU supported by LLVM
+ base.cpu = "z10".to_string();
+ // FIXME: The data_layout string below and the ABI implementation in
+ // cabi_s390x.rs are for now hard-coded to assume the no-vector ABI.
+ // Pass the -vector feature string to LLVM to respect this assumption.
+ base.features = "-vector".to_string();
+ base.max_atomic_width = Some(64);
+ base.min_global_align = Some(16);
+ base.static_position_independent_executables = true;
+
+ Target {
+ llvm_target: "s390x-unknown-linux-musl".to_string(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64".to_string(),
+ arch: "s390x".to_string(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/solaris_base.rs b/compiler/rustc_target/src/spec/solaris_base.rs
index 33e0cf8..59731f2 100644
--- a/compiler/rustc_target/src/spec/solaris_base.rs
+++ b/compiler/rustc_target/src/spec/solaris_base.rs
@@ -3,7 +3,6 @@
pub fn opts() -> TargetOptions {
TargetOptions {
os: "solaris".to_string(),
- vendor: "sun".to_string(),
dynamic_linking: true,
executables: true,
has_rpath: true,
diff --git a/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs b/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs
index 9ac56ca..abc46a8 100644
--- a/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs
+++ b/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs
@@ -7,6 +7,7 @@
base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m64".to_string()]);
// llvm calls this "v9"
base.cpu = "v9".to_string();
+ base.vendor = "sun".to_string();
base.max_atomic_width = Some(64);
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
index 6affd7f..570cf79 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
@@ -10,7 +10,7 @@
vec!["-m64".to_string(), "-arch".to_string(), "x86_64".to_string()],
);
base.link_env_remove.extend(super::apple_base::macos_link_env_remove());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
// Clang automatically chooses a more specific target based on
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_ios.rs b/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
index ddf6870..adb8771 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
@@ -11,7 +11,7 @@
arch: "x86_64".to_string(),
options: TargetOptions {
max_atomic_width: Some(64),
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
stack_probes: StackProbeType::Call,
..base
},
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs b/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
index e7c3d66..c228e42 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
@@ -11,7 +11,7 @@
arch: "x86_64".to_string(),
options: TargetOptions {
max_atomic_width: Some(64),
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
stack_probes: StackProbeType::Call,
..base
},
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs b/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
index 8727e48..e3a5de4 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
@@ -10,7 +10,7 @@
arch: "x86_64".to_string(),
options: TargetOptions {
max_atomic_width: Some(64),
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
stack_probes: StackProbeType::Call,
..base
},
diff --git a/compiler/rustc_target/src/spec/x86_64_fuchsia.rs b/compiler/rustc_target/src/spec/x86_64_fuchsia.rs
index b838b04..7774308 100644
--- a/compiler/rustc_target/src/spec/x86_64_fuchsia.rs
+++ b/compiler/rustc_target/src/spec/x86_64_fuchsia.rs
@@ -4,7 +4,7 @@
let mut base = super::fuchsia_base::opts();
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_linux_android.rs b/compiler/rustc_target/src/spec/x86_64_linux_android.rs
index f328188..19bf27c 100644
--- a/compiler/rustc_target/src/spec/x86_64_linux_android.rs
+++ b/compiler/rustc_target/src/spec/x86_64_linux_android.rs
@@ -7,7 +7,7 @@
base.features = "+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs b/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs
new file mode 100644
index 0000000..b78e43d
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs
@@ -0,0 +1,20 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::solaris_base::opts();
+ base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m64".to_string()]);
+ base.cpu = "x86-64".to_string();
+ base.vendor = "pc".to_string();
+ base.max_atomic_width = Some(64);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "x86_64-pc-solaris".to_string(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .to_string(),
+ arch: "x86_64".to_string(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_rumprun_netbsd.rs b/compiler/rustc_target/src/spec/x86_64_rumprun_netbsd.rs
deleted file mode 100644
index b1dce60..0000000
--- a/compiler/rustc_target/src/spec/x86_64_rumprun_netbsd.rs
+++ /dev/null
@@ -1,26 +0,0 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target, TargetOptions};
-
-pub fn target() -> Target {
- let mut base = super::netbsd_base::opts();
- base.vendor = "rumprun".to_string();
- base.cpu = "x86-64".to_string();
- base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
- base.linker = Some("x86_64-rumprun-netbsd-gcc".to_string());
- base.max_atomic_width = Some(64);
-
- base.dynamic_linking = false;
- base.has_rpath = false;
- base.position_independent_executables = false;
- base.disable_redzone = true;
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
- base.stack_probes = StackProbeType::Call;
-
- Target {
- llvm_target: "x86_64-rumprun-netbsd".to_string(),
- pointer_width: 64,
- data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
- .to_string(),
- arch: "x86_64".to_string(),
- options: TargetOptions { mcount: "__mcount".to_string(), ..base },
- }
-}
diff --git a/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs b/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs
index 0f7422d..2fa5347 100644
--- a/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs
+++ b/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs
@@ -4,8 +4,9 @@
let mut base = super::solaris_base::opts();
base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m64".to_string()]);
base.cpu = "x86-64".to_string();
+ base.vendor = "sun".to_string();
base.max_atomic_width = Some(64);
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs b/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs
index 754f473..d551ce6 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs
@@ -5,7 +5,7 @@
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs
index 055602c..e51f6c1 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs
@@ -5,7 +5,7 @@
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs b/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs
index 8b3c9f5..fcd96dd 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs
@@ -5,7 +5,7 @@
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m64".to_string()]);
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
// This option is required to build executables on Haiku x86_64
base.position_independent_executables = true;
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs b/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
index 69fcb46..1ef24b6 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
@@ -5,7 +5,7 @@
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
base.features = "+rdrnd,+rdseed".to_string();
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs
index 2ba6d36..04499bc 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs
@@ -5,7 +5,7 @@
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs
index 268f231..5bf9a19 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs
@@ -5,7 +5,7 @@
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-mx32".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
base.has_elf_tls = false;
// BUG(GabrielMajeri): disabling the PLT on x86_64 Linux with x32 ABI
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs
index b4d704f..658dc97 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs
@@ -5,9 +5,8 @@
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
-
base.static_position_independent_executables = true;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs
index a5d8800..4718ad2 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs
@@ -5,7 +5,7 @@
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_hermit_kernel.rs b/compiler/rustc_target/src/spec/x86_64_unknown_none_hermitkernel.rs
similarity index 80%
rename from compiler/rustc_target/src/spec/x86_64_unknown_hermit_kernel.rs
rename to compiler/rustc_target/src/spec/x86_64_unknown_none_hermitkernel.rs
index e2c18d3..28d9801 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_hermit_kernel.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_none_hermitkernel.rs
@@ -7,11 +7,11 @@
base.features =
"-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2,+soft-float"
.to_string();
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
- llvm_target: "x86_64-unknown-hermit".to_string(),
+ llvm_target: "x86_64-unknown-none-elf".to_string(),
pointer_width: 64,
data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.to_string(),
diff --git a/compiler/rustc_target/src/spec/x86_64_linux_kernel.rs b/compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs
similarity index 76%
rename from compiler/rustc_target/src/spec/x86_64_linux_kernel.rs
rename to compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs
index 43e683d..68d8020 100644
--- a/compiler/rustc_target/src/spec/x86_64_linux_kernel.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs
@@ -14,8 +14,11 @@
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
Target {
- // FIXME: Some dispute, the linux-on-clang folks think this should use "Linux"
- llvm_target: "x86_64-elf".to_string(),
+ // FIXME: Some dispute, the linux-on-clang folks think this should use
+ // "Linux". We disagree because running *on* Linux is nothing like
+ // running *as" linux, and historically the "os" component as has always
+ // been used to mean the "on" part.
+ llvm_target: "x86_64-unknown-none-elf".to_string(),
pointer_width: 64,
data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.to_string(),
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs
index fa0b667..2c5ac43 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs
@@ -5,7 +5,7 @@
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs b/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs
index 0a8d7b2..b5aa37d 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs
@@ -5,7 +5,7 @@
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs
index a066f11..6c59817 100644
--- a/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs
@@ -5,7 +5,7 @@
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
- // don't use probe-stack=inline-asm until rust-lang/rust#83139 is resolved.
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
base.disable_redzone = true;
diff --git a/compiler/rustc_trait_selection/Cargo.toml b/compiler/rustc_trait_selection/Cargo.toml
index a72c172..c5d4c24 100644
--- a/compiler/rustc_trait_selection/Cargo.toml
+++ b/compiler/rustc_trait_selection/Cargo.toml
@@ -22,4 +22,4 @@
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
rustc_target = { path = "../rustc_target" }
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_trait_selection/src/autoderef.rs b/compiler/rustc_trait_selection/src/autoderef.rs
index 05b6c4a..3f24a33 100644
--- a/compiler/rustc_trait_selection/src/autoderef.rs
+++ b/compiler/rustc_trait_selection/src/autoderef.rs
@@ -6,7 +6,6 @@
use rustc_middle::ty::{self, TraitRef, Ty, TyCtxt, WithConstness};
use rustc_middle::ty::{ToPredicate, TypeFoldable};
use rustc_session::DiagnosticMessageId;
-use rustc_span::symbol::{sym, Ident};
use rustc_span::Span;
#[derive(Copy, Clone, Debug)]
@@ -146,11 +145,10 @@
let normalized_ty = fulfillcx.normalize_projection_type(
&self.infcx,
self.param_env,
- ty::ProjectionTy::from_ref_and_name(
- tcx,
- trait_ref,
- Ident::with_dummy_span(sym::Target),
- ),
+ ty::ProjectionTy {
+ item_def_id: tcx.lang_items().deref_target()?,
+ substs: trait_ref.substs,
+ },
cause,
);
if let Err(e) = fulfillcx.select_where_possible(&self.infcx) {
diff --git a/compiler/rustc_trait_selection/src/infer.rs b/compiler/rustc_trait_selection/src/infer.rs
index da66fbc..a9ffb55 100644
--- a/compiler/rustc_trait_selection/src/infer.rs
+++ b/compiler/rustc_trait_selection/src/infer.rs
@@ -124,7 +124,7 @@
DUMMY_SP,
canonical_key,
|ref infcx, key, canonical_inference_vars| {
- let mut fulfill_cx = TraitEngine::new(infcx.tcx);
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
let value = operation(infcx, &mut *fulfill_cx, key)?;
infcx.make_canonicalized_query_response(
canonical_inference_vars,
diff --git a/compiler/rustc_trait_selection/src/lib.rs b/compiler/rustc_trait_selection/src/lib.rs
index e1f8d59..aea3d8e 100644
--- a/compiler/rustc_trait_selection/src/lib.rs
+++ b/compiler/rustc_trait_selection/src/lib.rs
@@ -23,7 +23,7 @@
#[macro_use]
extern crate rustc_macros;
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
#[macro_use]
extern crate rustc_data_structures;
#[macro_use]
diff --git a/compiler/rustc_trait_selection/src/opaque_types.rs b/compiler/rustc_trait_selection/src/opaque_types.rs
index 25ba489..d6a585e 100644
--- a/compiler/rustc_trait_selection/src/opaque_types.rs
+++ b/compiler/rustc_trait_selection/src/opaque_types.rs
@@ -422,7 +422,9 @@
}
// These opaque type inherit all lifetime parameters from their
// parent, so we have to check them all.
- hir::OpaqueTyOrigin::Binding | hir::OpaqueTyOrigin::Misc => 0,
+ hir::OpaqueTyOrigin::Binding
+ | hir::OpaqueTyOrigin::TyAlias
+ | hir::OpaqueTyOrigin::Misc => 0,
};
let span = tcx.def_span(def_id);
@@ -581,6 +583,7 @@
// Otherwise, generate the label we'll use in the error message.
hir::OpaqueTyOrigin::Binding
| hir::OpaqueTyOrigin::FnReturn
+ | hir::OpaqueTyOrigin::TyAlias
| hir::OpaqueTyOrigin::Misc => "impl Trait",
};
let msg = format!("ambiguous lifetime bound in `{}`", context_name);
diff --git a/compiler/rustc_trait_selection/src/traits/auto_trait.rs b/compiler/rustc_trait_selection/src/traits/auto_trait.rs
index 6593c10..6510c94 100644
--- a/compiler/rustc_trait_selection/src/traits/auto_trait.rs
+++ b/compiler/rustc_trait_selection/src/traits/auto_trait.rs
@@ -77,7 +77,7 @@
ty: Ty<'tcx>,
orig_env: ty::ParamEnv<'tcx>,
trait_did: DefId,
- auto_trait_callback: impl Fn(&InferCtxt<'_, 'tcx>, AutoTraitInfo<'tcx>) -> A,
+ mut auto_trait_callback: impl FnMut(AutoTraitInfo<'tcx>) -> A,
) -> AutoTraitResult<A> {
let tcx = self.tcx;
@@ -211,7 +211,7 @@
let info = AutoTraitInfo { full_user_env, region_data, vid_to_region };
- AutoTraitResult::PositiveImpl(auto_trait_callback(&infcx, info))
+ AutoTraitResult::PositiveImpl(auto_trait_callback(info))
})
}
}
diff --git a/compiler/rustc_trait_selection/src/traits/codegen.rs b/compiler/rustc_trait_selection/src/traits/codegen.rs
index 657d5c1..45853a6 100644
--- a/compiler/rustc_trait_selection/src/traits/codegen.rs
+++ b/compiler/rustc_trait_selection/src/traits/codegen.rs
@@ -91,7 +91,7 @@
});
let impl_source = drain_fulfillment_cx_or_panic(&infcx, &mut fulfill_cx, impl_source);
- info!("Cache miss: {:?} => {:?}", trait_ref, impl_source);
+ debug!("Cache miss: {:?} => {:?}", trait_ref, impl_source);
Ok(impl_source)
})
}
diff --git a/compiler/rustc_trait_selection/src/traits/coherence.rs b/compiler/rustc_trait_selection/src/traits/coherence.rs
index 99b96f6..e8ae1f4 100644
--- a/compiler/rustc_trait_selection/src/traits/coherence.rs
+++ b/compiler/rustc_trait_selection/src/traits/coherence.rs
@@ -11,7 +11,7 @@
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_middle::ty::fold::TypeFoldable;
use rustc_middle::ty::subst::Subst;
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, fast_reject, Ty, TyCtxt};
use rustc_span::symbol::sym;
use rustc_span::DUMMY_SP;
use std::iter;
@@ -67,6 +67,34 @@
impl2_def_id={:?})",
impl1_def_id, impl2_def_id,
);
+ // Before doing expensive operations like entering an inference context, do
+ // a quick check via fast_reject to tell if the impl headers could possibly
+ // unify.
+ let impl1_ref = tcx.impl_trait_ref(impl1_def_id);
+ let impl2_ref = tcx.impl_trait_ref(impl2_def_id);
+
+ // Check if any of the input types definitely do not unify.
+ if impl1_ref
+ .iter()
+ .flat_map(|tref| tref.substs.types())
+ .zip(impl2_ref.iter().flat_map(|tref| tref.substs.types()))
+ .any(|(ty1, ty2)| {
+ let t1 = fast_reject::simplify_type(tcx, ty1, false);
+ let t2 = fast_reject::simplify_type(tcx, ty2, false);
+ if let (Some(t1), Some(t2)) = (t1, t2) {
+ // Simplified successfully
+ // Types cannot unify if they differ in their reference mutability or simplify to different types
+ t1 != t2 || ty1.ref_mutability() != ty2.ref_mutability()
+ } else {
+ // Types might unify
+ false
+ }
+ })
+ {
+ // Some types involved are definitely different, so the impls couldn't possibly overlap.
+ debug!("overlapping_impls: fast_reject early-exit");
+ return no_overlap();
+ }
let overlaps = tcx.infer_ctxt().enter(|infcx| {
let selcx = &mut SelectionContext::intercrate(&infcx);
diff --git a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
index 89820bb..670527f 100644
--- a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
+++ b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
@@ -72,17 +72,16 @@
// We were unable to unify the abstract constant with
// a constant found in the caller bounds, there are
// now three possible cases here.
- //
- // - The substs are concrete enough that we can simply
- // try and evaluate the given constant.
- // - The abstract const still references an inference
- // variable, in this case we return `TooGeneric`.
- // - The abstract const references a generic parameter,
- // this means that we emit an error here.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
enum FailureKind {
+ /// The abstract const still references an inference
+ /// variable, in this case we return `TooGeneric`.
MentionsInfer,
+ /// The abstract const references a generic parameter,
+ /// this means that we emit an error here.
MentionsParam,
+ /// The substs are concrete enough that we can simply
+ /// try and evaluate the given constant.
Concrete,
}
let mut failure_kind = FailureKind::Concrete;
@@ -117,7 +116,7 @@
{
err.span_help(
tcx.def_span(def.did),
- &format!("try adding a `where` bound using this expression: where [u8; {}]: Sized", snippet),
+ &format!("try adding a `where` bound using this expression: `where [u8; {}]: Sized`", snippet),
);
} else {
err.span_help(
@@ -378,7 +377,10 @@
let local = self.place_to_local(span, p)?;
Ok(self.locals[local])
}
- mir::Operand::Constant(ct) => Ok(self.add_node(Node::Leaf(ct.literal), span)),
+ mir::Operand::Constant(ct) => match ct.literal {
+ mir::ConstantKind::Ty(ct) => Ok(self.add_node(Node::Leaf(ct), span)),
+ mir::ConstantKind::Val(..) => self.error(Some(span), "unsupported constant")?,
+ },
}
}
@@ -412,7 +414,7 @@
self.locals[local] = self.operand_to_node(span, operand)?;
Ok(())
}
- Rvalue::BinaryOp(op, ref lhs, ref rhs) if Self::check_binop(op) => {
+ Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) if Self::check_binop(op) => {
let lhs = self.operand_to_node(span, lhs)?;
let rhs = self.operand_to_node(span, rhs)?;
self.locals[local] = self.add_node(Node::Binop(op, lhs, rhs), span);
@@ -422,7 +424,9 @@
Ok(())
}
}
- Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) if Self::check_binop(op) => {
+ Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs))
+ if Self::check_binop(op) =>
+ {
let lhs = self.operand_to_node(span, lhs)?;
let rhs = self.operand_to_node(span, rhs)?;
self.locals[local] = self.add_node(Node::Binop(op, lhs, rhs), span);
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
index d3b3403..a3faf4c 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
@@ -468,22 +468,21 @@
trait_ref,
obligation.cause.body_id,
);
- } else {
- if !have_alt_message {
- // Can't show anything else useful, try to find similar impls.
- let impl_candidates = self.find_similar_impl_candidates(trait_ref);
- self.report_similar_impl_candidates(impl_candidates, &mut err);
- }
- // Changing mutability doesn't make a difference to whether we have
- // an `Unsize` impl (Fixes ICE in #71036)
- if !is_unsize {
- self.suggest_change_mut(
- &obligation,
- &mut err,
- trait_ref,
- points_at_arg,
- );
- }
+ } else if !have_alt_message {
+ // Can't show anything else useful, try to find similar impls.
+ let impl_candidates = self.find_similar_impl_candidates(trait_ref);
+ self.report_similar_impl_candidates(impl_candidates, &mut err);
+ }
+
+ // Changing mutability doesn't make a difference to whether we have
+ // an `Unsize` impl (Fixes ICE in #71036)
+ if !is_unsize {
+ self.suggest_change_mut(
+ &obligation,
+ &mut err,
+ trait_ref,
+ points_at_arg,
+ );
}
// If this error is due to `!: Trait` not implemented but `(): Trait` is
@@ -820,7 +819,7 @@
sig.decl
.inputs
.iter()
- .map(|arg| match arg.clone().kind {
+ .map(|arg| match arg.kind {
hir::TyKind::Tup(ref tys) => ArgKind::Tuple(
Some(arg.span),
vec![("_".to_owned(), "_".to_owned()); tys.len()],
@@ -1369,8 +1368,8 @@
Some(t) => Some(t),
None => {
let ty = parent_trait_ref.skip_binder().self_ty();
- let span =
- TyCategory::from_ty(ty).map(|(_, def_id)| self.tcx.def_span(def_id));
+ let span = TyCategory::from_ty(self.tcx, ty)
+ .map(|(_, def_id)| self.tcx.def_span(def_id));
Some((ty.to_string(), span))
}
}
@@ -1590,8 +1589,7 @@
self.emit_inference_failure_err(body_id, span, a.into(), vec![], ErrorCode::E0282)
}
ty::PredicateKind::Projection(data) => {
- let trait_ref = bound_predicate.rebind(data).to_poly_trait_ref(self.tcx);
- let self_ty = trait_ref.skip_binder().self_ty();
+ let self_ty = data.projection_ty.self_ty();
let ty = data.ty;
if predicate.references_error() {
return;
@@ -1780,7 +1778,7 @@
multispan.push_span_label(
sp,
format!(
- "...if indirection was used here: `Box<{}>`",
+ "...if indirection were used here: `Box<{}>`",
param.name.ident(),
),
);
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
index 69f66f6..e6a1cf5 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
@@ -200,22 +200,15 @@
if let Some(def) = aty.ty_adt_def() {
// We also want to be able to select the array's type's original
// signature with no type arguments resolved
- flags.push((
- sym::_Self,
- Some(format!("[{}]", self.tcx.type_of(def.did).to_string())),
- ));
- let tcx = self.tcx;
- if let Some(len) = len.try_eval_usize(tcx, ty::ParamEnv::empty()) {
- flags.push((
- sym::_Self,
- Some(format!("[{}; {}]", self.tcx.type_of(def.did).to_string(), len)),
- ));
- } else {
- flags.push((
- sym::_Self,
- Some(format!("[{}; _]", self.tcx.type_of(def.did).to_string())),
- ));
- }
+ let type_string = self.tcx.type_of(def.did).to_string();
+ flags.push((sym::_Self, Some(format!("[{}]", type_string))));
+
+ let len = len.val.try_to_value().and_then(|v| v.try_to_machine_usize(self.tcx));
+ let string = match len {
+ Some(n) => format!("[{}; {}]", type_string, n),
+ None => format!("[{}; _]", type_string),
+ };
+ flags.push((sym::_Self, Some(string)));
}
}
if let ty::Dynamic(traits, _) = self_ty.kind() {
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
index 2182800..c1b105f 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
@@ -17,8 +17,8 @@
use rustc_hir::lang_items::LangItem;
use rustc_hir::{AsyncGeneratorKind, GeneratorKind, Node};
use rustc_middle::ty::{
- self, suggest_constraining_type_param, AdtKind, DefIdTree, Infer, InferTy, ToPredicate, Ty,
- TyCtxt, TypeFoldable, WithConstness,
+ self, suggest_arbitrary_trait_bound, suggest_constraining_type_param, AdtKind, DefIdTree,
+ Infer, InferTy, ToPredicate, Ty, TyCtxt, TypeFoldable, WithConstness,
};
use rustc_middle::ty::{TypeAndMut, TypeckResults};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
@@ -334,7 +334,7 @@
let (param_ty, projection) = match self_ty.kind() {
ty::Param(_) => (true, None),
ty::Projection(projection) => (false, Some(projection)),
- _ => return,
+ _ => (false, None),
};
// FIXME: Add check for trait bound that is already present, particularly `?Sized` so we
@@ -453,6 +453,26 @@
}
}
+ hir::Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Struct(_, generics)
+ | hir::ItemKind::Enum(_, generics)
+ | hir::ItemKind::Union(_, generics)
+ | hir::ItemKind::Trait(_, _, generics, ..)
+ | hir::ItemKind::Impl(hir::Impl { generics, .. })
+ | hir::ItemKind::Fn(_, generics, _)
+ | hir::ItemKind::TyAlias(_, generics)
+ | hir::ItemKind::TraitAlias(generics, _)
+ | hir::ItemKind::OpaqueTy(hir::OpaqueTy { generics, .. }),
+ ..
+ }) if !param_ty => {
+ // Missing generic type parameter bound.
+ let param_name = self_ty.to_string();
+ let constraint = trait_ref.print_only_trait_path().to_string();
+ if suggest_arbitrary_trait_bound(generics, &mut err, ¶m_name, &constraint) {
+ return;
+ }
+ }
hir::Node::Crate(..) => return,
_ => {}
@@ -1103,7 +1123,7 @@
// This is currently not possible to trigger because E0038 takes precedence, but
// leave it in for completeness in case anything changes in an earlier stage.
err.note(&format!(
- "if trait `{}` was object safe, you could return a trait object",
+ "if trait `{}` were object-safe, you could return a trait object",
trait_obj,
));
}
@@ -1881,10 +1901,26 @@
ObligationCauseCode::Coercion { source: _, target } => {
err.note(&format!("required by cast to type `{}`", self.ty_to_string(target)));
}
- ObligationCauseCode::RepeatVec => {
+ ObligationCauseCode::RepeatVec(is_const_fn) => {
err.note(
"the `Copy` trait is required because the repeated element will be copied",
);
+
+ if is_const_fn {
+ err.help(
+ "consider creating a new `const` item and initializing it with the result \
+ of the function call to be used in the repeat position, like \
+ `const VAL: Type = const_fn();` and `let x = [VAL; 42];`",
+ );
+ }
+
+ if self.tcx.sess.is_nightly_build() && is_const_fn {
+ err.help(
+ "create an inline `const` block, see RFC \
+ #2920 <https://github.com/rust-lang/rfcs/pull/2920> \
+ for more information",
+ );
+ }
}
ObligationCauseCode::VariableType(hir_id) => {
let parent_node = self.tcx.hir().get_parent_node(hir_id);
diff --git a/compiler/rustc_trait_selection/src/traits/fulfill.rs b/compiler/rustc_trait_selection/src/traits/fulfill.rs
index d4ced20..7d451fc 100644
--- a/compiler/rustc_trait_selection/src/traits/fulfill.rs
+++ b/compiler/rustc_trait_selection/src/traits/fulfill.rs
@@ -6,6 +6,7 @@
use rustc_infer::traits::{TraitEngine, TraitEngineExt as _, TraitObligation};
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::error::ExpectedFound;
+use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::ToPredicate;
use rustc_middle::ty::{self, Binder, Const, Ty, TypeFoldable};
use std::marker::PhantomData;
@@ -86,7 +87,7 @@
}
// `PendingPredicateObligation` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(PendingPredicateObligation<'_>, 56);
impl<'a, 'tcx> FulfillmentContext<'tcx> {
@@ -498,10 +499,10 @@
) {
Ok(()) => ProcessResult::Changed(vec![]),
Err(ErrorHandled::TooGeneric) => {
- pending_obligation.stalled_on = substs
- .iter()
- .filter_map(TyOrConstInferVar::maybe_from_generic_arg)
- .collect();
+ pending_obligation.stalled_on.clear();
+ pending_obligation.stalled_on.extend(
+ substs.iter().filter_map(TyOrConstInferVar::maybe_from_generic_arg),
+ );
ProcessResult::Unchanged
}
Err(e) => ProcessResult::Error(CodeSelectionError(ConstEvalFailure(e))),
@@ -543,13 +544,10 @@
) {
Ok(val) => Ok(Const::from_value(self.selcx.tcx(), val, c.ty)),
Err(ErrorHandled::TooGeneric) => {
- stalled_on.append(
- &mut substs
+ stalled_on.extend(
+ substs
.iter()
- .filter_map(|arg| {
- TyOrConstInferVar::maybe_from_generic_arg(arg)
- })
- .collect(),
+ .filter_map(TyOrConstInferVar::maybe_from_generic_arg),
);
Err(ErrorHandled::TooGeneric)
}
@@ -633,10 +631,11 @@
// only reason we can fail to make progress on
// trait selection is because we don't have enough
// information about the types in the trait.
- *stalled_on = trait_ref_infer_vars(
+ stalled_on.clear();
+ stalled_on.extend(substs_infer_vars(
self.selcx,
- trait_obligation.predicate.map_bound(|pred| pred.trait_ref),
- );
+ trait_obligation.predicate.map_bound(|pred| pred.trait_ref.substs),
+ ));
debug!(
"process_predicate: pending obligation {:?} now stalled on {:?}",
@@ -647,7 +646,7 @@
ProcessResult::Unchanged
}
Err(selection_err) => {
- info!("selecting trait at depth {} yielded Err", obligation.recursion_depth);
+ debug!("selecting trait at depth {} yielded Err", obligation.recursion_depth);
ProcessResult::Error(CodeSelectionError(selection_err))
}
@@ -663,10 +662,11 @@
match project::poly_project_and_unify_type(self.selcx, &project_obligation) {
Ok(Ok(Some(os))) => ProcessResult::Changed(mk_pending(os)),
Ok(Ok(None)) => {
- *stalled_on = trait_ref_infer_vars(
+ stalled_on.clear();
+ stalled_on.extend(substs_infer_vars(
self.selcx,
- project_obligation.predicate.to_poly_trait_ref(tcx),
- );
+ project_obligation.predicate.map_bound(|pred| pred.projection_ty.substs),
+ ));
ProcessResult::Unchanged
}
// Let the caller handle the recursion
@@ -678,23 +678,28 @@
}
}
-/// Returns the set of inference variables contained in a trait ref.
-fn trait_ref_infer_vars<'a, 'tcx>(
+/// Returns the set of inference variables contained in `substs`.
+fn substs_infer_vars<'a, 'tcx>(
selcx: &mut SelectionContext<'a, 'tcx>,
- trait_ref: ty::PolyTraitRef<'tcx>,
-) -> Vec<TyOrConstInferVar<'tcx>> {
+ substs: ty::Binder<SubstsRef<'tcx>>,
+) -> impl Iterator<Item = TyOrConstInferVar<'tcx>> {
selcx
.infcx()
- .resolve_vars_if_possible(trait_ref)
- .skip_binder()
- .substs
+ .resolve_vars_if_possible(substs)
+ .skip_binder() // ok because this check doesn't care about regions
.iter()
- // FIXME(eddyb) try using `skip_current_subtree` to skip everything that
- // doesn't contain inference variables, not just the outermost level.
.filter(|arg| arg.has_infer_types_or_consts())
- .flat_map(|arg| arg.walk())
+ .flat_map(|arg| {
+ let mut walker = arg.walk();
+ while let Some(c) = walker.next() {
+ if !c.has_infer_types_or_consts() {
+ walker.visited.remove(&c);
+ walker.skip_current_subtree();
+ }
+ }
+ walker.visited.into_iter()
+ })
.filter_map(TyOrConstInferVar::maybe_from_generic_arg)
- .collect()
}
fn to_fulfillment_error<'tcx>(
diff --git a/compiler/rustc_trait_selection/src/traits/mod.rs b/compiler/rustc_trait_selection/src/traits/mod.rs
index f7c0baf..5a8c53a 100644
--- a/compiler/rustc_trait_selection/src/traits/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/mod.rs
@@ -65,7 +65,8 @@
get_vtable_index_of_object_method, impl_item_is_final, predicate_for_trait_def, upcast_choices,
};
pub use self::util::{
- supertrait_def_ids, supertraits, transitive_bounds, SupertraitDefIds, Supertraits,
+ supertrait_def_ids, supertraits, transitive_bounds, transitive_bounds_that_define_assoc_type,
+ SupertraitDefIds, Supertraits,
};
pub use self::chalk_fulfill::FulfillmentContext as ChalkFulfillmentContext;
@@ -454,7 +455,6 @@
/// Given a trait `trait_ref`, iterates the vtable entries
/// that come from `trait_ref`, including its supertraits.
-#[inline] // FIXME(#35870): avoid closures being unexported due to `impl Trait`.
fn vtable_methods<'tcx>(
tcx: TyCtxt<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
diff --git a/compiler/rustc_trait_selection/src/traits/object_safety.rs b/compiler/rustc_trait_selection/src/traits/object_safety.rs
index e155f03..7de20e4 100644
--- a/compiler/rustc_trait_selection/src/traits/object_safety.rs
+++ b/compiler/rustc_trait_selection/src/traits/object_safety.rs
@@ -292,11 +292,7 @@
//
// This is ALT2 in issue #56288, see that for discussion of the
// possible alternatives.
- if data.projection_ty.trait_ref(tcx).substs[1..].iter().any(has_self_ty) {
- Some(sp)
- } else {
- None
- }
+ if data.projection_ty.substs[1..].iter().any(has_self_ty) { Some(sp) } else { None }
}
ty::PredicateKind::WellFormed(..)
| ty::PredicateKind::ObjectSafe(..)
diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs
index 6908480..0af6d64 100644
--- a/compiler/rustc_trait_selection/src/traits/project.rs
+++ b/compiler/rustc_trait_selection/src/traits/project.rs
@@ -12,7 +12,7 @@
use super::SelectionError;
use super::{
ImplSourceClosureData, ImplSourceDiscriminantKindData, ImplSourceFnPointerData,
- ImplSourceGeneratorData, ImplSourceUserDefinedData,
+ ImplSourceGeneratorData, ImplSourcePointeeData, ImplSourceUserDefinedData,
};
use super::{Normalized, NormalizedTy, ProjectionCacheEntry, ProjectionCacheKey};
@@ -741,11 +741,7 @@
return Err(ProjectionTyError::TraitSelectionError(SelectionError::Overflow));
}
- let obligation_trait_ref = &obligation.predicate.trait_ref(selcx.tcx());
-
- debug!(?obligation_trait_ref);
-
- if obligation_trait_ref.references_error() {
+ if obligation.predicate.references_error() {
return Ok(ProjectedTy::Progress(Progress::error(selcx.tcx())));
}
@@ -754,19 +750,19 @@
// Make sure that the following procedures are kept in order. ParamEnv
// needs to be first because it has highest priority, and Select checks
// the return value of push_candidate which assumes it's ran at last.
- assemble_candidates_from_param_env(selcx, obligation, &obligation_trait_ref, &mut candidates);
+ assemble_candidates_from_param_env(selcx, obligation, &mut candidates);
- assemble_candidates_from_trait_def(selcx, obligation, &obligation_trait_ref, &mut candidates);
+ assemble_candidates_from_trait_def(selcx, obligation, &mut candidates);
- assemble_candidates_from_object_ty(selcx, obligation, &obligation_trait_ref, &mut candidates);
+ assemble_candidates_from_object_ty(selcx, obligation, &mut candidates);
if let ProjectionTyCandidateSet::Single(ProjectionTyCandidate::Object(_)) = candidates {
// Avoid normalization cycle from selection (see
// `assemble_candidates_from_object_ty`).
// FIXME(lazy_normalization): Lazy normalization should save us from
- // having to do special case this.
+ // having to special case this.
} else {
- assemble_candidates_from_impls(selcx, obligation, &obligation_trait_ref, &mut candidates);
+ assemble_candidates_from_impls(selcx, obligation, &mut candidates);
};
match candidates {
@@ -792,14 +788,12 @@
fn assemble_candidates_from_param_env<'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
- obligation_trait_ref: &ty::TraitRef<'tcx>,
candidate_set: &mut ProjectionTyCandidateSet<'tcx>,
) {
debug!("assemble_candidates_from_param_env(..)");
assemble_candidates_from_predicates(
selcx,
obligation,
- obligation_trait_ref,
candidate_set,
ProjectionTyCandidate::ParamEnv,
obligation.param_env.caller_bounds().iter(),
@@ -820,7 +814,6 @@
fn assemble_candidates_from_trait_def<'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
- obligation_trait_ref: &ty::TraitRef<'tcx>,
candidate_set: &mut ProjectionTyCandidateSet<'tcx>,
) {
debug!("assemble_candidates_from_trait_def(..)");
@@ -828,7 +821,7 @@
let tcx = selcx.tcx();
// Check whether the self-type is itself a projection.
// If so, extract what we know from the trait and try to come up with a good answer.
- let bounds = match *obligation_trait_ref.self_ty().kind() {
+ let bounds = match *obligation.predicate.self_ty().kind() {
ty::Projection(ref data) => tcx.item_bounds(data.item_def_id).subst(tcx, data.substs),
ty::Opaque(def_id, substs) => tcx.item_bounds(def_id).subst(tcx, substs),
ty::Infer(ty::TyVar(_)) => {
@@ -843,7 +836,6 @@
assemble_candidates_from_predicates(
selcx,
obligation,
- obligation_trait_ref,
candidate_set,
ProjectionTyCandidate::TraitDef,
bounds.iter(),
@@ -863,14 +855,13 @@
fn assemble_candidates_from_object_ty<'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
- obligation_trait_ref: &ty::TraitRef<'tcx>,
candidate_set: &mut ProjectionTyCandidateSet<'tcx>,
) {
debug!("assemble_candidates_from_object_ty(..)");
let tcx = selcx.tcx();
- let self_ty = obligation_trait_ref.self_ty();
+ let self_ty = obligation.predicate.self_ty();
let object_ty = selcx.infcx().shallow_resolve(self_ty);
let data = match object_ty.kind() {
ty::Dynamic(data, ..) => data,
@@ -890,7 +881,6 @@
assemble_candidates_from_predicates(
selcx,
obligation,
- obligation_trait_ref,
candidate_set,
ProjectionTyCandidate::Object,
env_predicates,
@@ -901,7 +891,6 @@
fn assemble_candidates_from_predicates<'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
- obligation_trait_ref: &ty::TraitRef<'tcx>,
candidate_set: &mut ProjectionTyCandidateSet<'tcx>,
ctor: fn(ty::PolyProjectionPredicate<'tcx>) -> ProjectionTyCandidate<'tcx>,
env_predicates: impl Iterator<Item = ty::Predicate<'tcx>>,
@@ -921,8 +910,7 @@
&& infcx.probe(|_| {
selcx.match_projection_projections(
obligation,
- obligation_trait_ref,
- &data,
+ data,
potentially_unnormalized_candidates,
)
});
@@ -948,14 +936,13 @@
fn assemble_candidates_from_impls<'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
- obligation_trait_ref: &ty::TraitRef<'tcx>,
candidate_set: &mut ProjectionTyCandidateSet<'tcx>,
) {
debug!("assemble_candidates_from_impls");
// If we are resolving `<T as TraitRef<...>>::Item == Type`,
// start out by selecting the predicate `T as TraitRef<...>`:
- let poly_trait_ref = ty::Binder::dummy(*obligation_trait_ref);
+ let poly_trait_ref = obligation.predicate.trait_ref(selcx.tcx()).to_poly_trait_ref();
let trait_obligation = obligation.with(poly_trait_ref.to_poly_trait_predicate());
let _ = selcx.infcx().commit_if_ok(|_| {
let impl_source = match selcx.select(&trait_obligation) {
@@ -1069,6 +1056,51 @@
| ty::Error(_) => false,
}
}
+ super::ImplSource::Pointee(..) => {
+ // While `Pointee` is automatically implemented for every type,
+ // the concrete metadata type may not be known yet.
+ //
+ // Any type with multiple potential metadata types is therefore not eligible.
+ let self_ty = selcx.infcx().shallow_resolve(obligation.predicate.self_ty());
+
+ // FIXME: should this normalize?
+ let tail = selcx.tcx().struct_tail_without_normalization(self_ty);
+ match tail.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Foreign(_)
+ | ty::Str
+ | ty::Array(..)
+ | ty::Slice(_)
+ | ty::RawPtr(..)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(..)
+ | ty::Dynamic(..)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Never
+ // If returned by `struct_tail_without_normalization` this is a unit struct
+ // without any fields, or not a struct, and therefore is Sized.
+ | ty::Adt(..)
+ // If returned by `struct_tail_without_normalization` this is the empty tuple.
+ | ty::Tuple(..)
+ // Integers and floats are always Sized, and so have unit type metadata.
+ | ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(..)) => true,
+
+ ty::Projection(..)
+ | ty::Opaque(..)
+ | ty::Param(..)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Infer(..)
+ | ty::Error(_) => false,
+ }
+ }
super::ImplSource::Param(..) => {
// This case tell us nothing about the value of an
// associated type. Consider:
@@ -1169,6 +1201,7 @@
super::ImplSource::DiscriminantKind(data) => {
confirm_discriminant_kind_candidate(selcx, obligation, data)
}
+ super::ImplSource::Pointee(data) => confirm_pointee_candidate(selcx, obligation, data),
super::ImplSource::Object(_)
| super::ImplSource::AutoImpl(..)
| super::ImplSource::Param(..)
@@ -1256,6 +1289,26 @@
confirm_param_env_candidate(selcx, obligation, ty::Binder::dummy(predicate), false)
}
+fn confirm_pointee_candidate<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ _: ImplSourcePointeeData,
+) -> Progress<'tcx> {
+ let tcx = selcx.tcx();
+
+ let self_ty = selcx.infcx().shallow_resolve(obligation.predicate.self_ty());
+ let substs = tcx.mk_substs([self_ty.into()].iter());
+
+ let metadata_def_id = tcx.require_lang_item(LangItem::Metadata, None);
+
+ let predicate = ty::ProjectionPredicate {
+ projection_ty: ty::ProjectionTy { substs, item_def_id: metadata_def_id },
+ ty: self_ty.ptr_metadata_ty(tcx),
+ };
+
+ confirm_param_env_candidate(selcx, obligation, ty::Binder::bind(predicate), false)
+}
+
fn confirm_fn_pointer_candidate<'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
@@ -1344,25 +1397,25 @@
poly_cache_entry,
);
- let cache_trait_ref = cache_entry.projection_ty.trait_ref(infcx.tcx);
- let obligation_trait_ref = obligation.predicate.trait_ref(infcx.tcx);
+ let cache_projection = cache_entry.projection_ty;
+ let obligation_projection = obligation.predicate;
let mut nested_obligations = Vec::new();
- let cache_trait_ref = if potentially_unnormalized_candidate {
+ let cache_projection = if potentially_unnormalized_candidate {
ensure_sufficient_stack(|| {
normalize_with_depth_to(
selcx,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
- cache_trait_ref,
+ cache_projection,
&mut nested_obligations,
)
})
} else {
- cache_trait_ref
+ cache_projection
};
- match infcx.at(cause, param_env).eq(cache_trait_ref, obligation_trait_ref) {
+ match infcx.at(cause, param_env).eq(cache_projection, obligation_projection) {
Ok(InferOk { value: _, obligations }) => {
nested_obligations.extend(obligations);
assoc_ty_own_obligations(selcx, obligation, &mut nested_obligations);
diff --git a/compiler/rustc_trait_selection/src/traits/query/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
index 33cd509..c908e14 100644
--- a/compiler/rustc_trait_selection/src/traits/query/normalize.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
@@ -97,7 +97,7 @@
self.infcx.tcx
}
- #[instrument(skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
if !ty.has_projections() {
return ty;
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs
index 1688539..4f5476c 100644
--- a/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs
@@ -43,7 +43,7 @@
info!("fully_perform({:?})", self);
}
- scrape_region_constraints(infcx, || Ok((self.closure)(infcx)?))
+ scrape_region_constraints(infcx, || (self.closure)(infcx))
}
}
@@ -62,7 +62,7 @@
infcx: &InferCtxt<'_, 'tcx>,
op: impl FnOnce() -> Fallible<InferOk<'tcx, R>>,
) -> Fallible<(R, Option<Rc<QueryRegionConstraints<'tcx>>>)> {
- let mut fulfill_cx = TraitEngine::new(infcx.tcx);
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
let dummy_body_id = ObligationCause::dummy().body_id;
// During NLL, we expect that nobody will register region
diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
index f09ce8d..752f6a8 100644
--- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
@@ -267,6 +267,9 @@
} else if lang_items.discriminant_kind_trait() == Some(def_id) {
// `DiscriminantKind` is automatically implemented for every type.
candidates.vec.push(DiscriminantKindCandidate);
+ } else if lang_items.pointee_trait() == Some(def_id) {
+ // `Pointee` is automatically implemented for every type.
+ candidates.vec.push(PointeeCandidate);
} else if lang_items.sized_trait() == Some(def_id) {
// Sized is never implementable by end-users, it is
// always automatically computed.
diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
index ed3e117..272930f 100644
--- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
@@ -30,7 +30,8 @@
use crate::traits::{
ImplSourceAutoImplData, ImplSourceBuiltinData, ImplSourceClosureData,
ImplSourceDiscriminantKindData, ImplSourceFnPointerData, ImplSourceGeneratorData,
- ImplSourceObjectData, ImplSourceTraitAliasData, ImplSourceUserDefinedData,
+ ImplSourceObjectData, ImplSourcePointeeData, ImplSourceTraitAliasData,
+ ImplSourceUserDefinedData,
};
use crate::traits::{ObjectCastObligation, PredicateObligation, TraitObligation};
use crate::traits::{Obligation, ObligationCause};
@@ -99,6 +100,8 @@
Ok(ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData))
}
+ PointeeCandidate => Ok(ImplSource::Pointee(ImplSourcePointeeData)),
+
TraitAliasCandidate(alias_def_id) => {
let data = self.confirm_trait_alias_candidate(obligation, alias_def_id);
Ok(ImplSource::TraitAlias(data))
diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs
index 87c8099..45b5aff 100644
--- a/compiler/rustc_trait_selection/src/traits/select/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs
@@ -32,6 +32,7 @@
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_hir::Constness;
+use rustc_infer::infer::LateBoundRegionConversionTime;
use rustc_middle::dep_graph::{DepKind, DepNodeIndex};
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::fast_reject;
@@ -1254,32 +1255,33 @@
pub(super) fn match_projection_projections(
&mut self,
obligation: &ProjectionTyObligation<'tcx>,
- obligation_trait_ref: &ty::TraitRef<'tcx>,
- data: &PolyProjectionPredicate<'tcx>,
+ env_predicate: PolyProjectionPredicate<'tcx>,
potentially_unnormalized_candidates: bool,
) -> bool {
let mut nested_obligations = Vec::new();
- let projection_ty = if potentially_unnormalized_candidates {
+ let (infer_predicate, _) = self.infcx.replace_bound_vars_with_fresh_vars(
+ obligation.cause.span,
+ LateBoundRegionConversionTime::HigherRankedType,
+ env_predicate,
+ );
+ let infer_projection = if potentially_unnormalized_candidates {
ensure_sufficient_stack(|| {
project::normalize_with_depth_to(
self,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
- data.map_bound(|data| data.projection_ty),
+ infer_predicate.projection_ty,
&mut nested_obligations,
)
})
} else {
- data.map_bound(|data| data.projection_ty)
+ infer_predicate.projection_ty
};
- // FIXME(generic_associated_types): Compare the whole projections
- let data_poly_trait_ref = projection_ty.map_bound(|proj| proj.trait_ref(self.tcx()));
- let obligation_poly_trait_ref = ty::Binder::dummy(*obligation_trait_ref);
self.infcx
.at(&obligation.cause, obligation.param_env)
- .sup(obligation_poly_trait_ref, data_poly_trait_ref)
+ .sup(obligation.predicate, infer_projection)
.map_or(false, |InferOk { obligations, value: () }| {
self.evaluate_predicates_recursively(
TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
@@ -1318,8 +1320,8 @@
let is_global =
|cand: &ty::PolyTraitRef<'_>| cand.is_global() && !cand.has_late_bound_regions();
- // (*) Prefer `BuiltinCandidate { has_nested: false }` and `DiscriminantKindCandidate`
- // to anything else.
+ // (*) Prefer `BuiltinCandidate { has_nested: false }`, `PointeeCandidate`,
+ // and `DiscriminantKindCandidate` to anything else.
//
// This is a fix for #53123 and prevents winnowing from accidentally extending the
// lifetime of a variable.
@@ -1332,8 +1334,18 @@
}
// (*)
- (BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate, _) => true,
- (_, BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate) => false,
+ (
+ BuiltinCandidate { has_nested: false }
+ | DiscriminantKindCandidate
+ | PointeeCandidate,
+ _,
+ ) => true,
+ (
+ _,
+ BuiltinCandidate { has_nested: false }
+ | DiscriminantKindCandidate
+ | PointeeCandidate,
+ ) => false,
(ParamCandidate(other), ParamCandidate(victim)) => {
if other.value == victim.value && victim.constness == Constness::NotConst {
diff --git a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
index 0133a96..264cc4f 100644
--- a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
@@ -349,7 +349,7 @@
E0751,
"found both positive and negative implementation of trait `{}`{}:",
overlap.trait_desc,
- overlap.self_desc.clone().map_or(String::new(), |ty| format!(" for type `{}`", ty))
+ overlap.self_desc.clone().map_or_else(String::new, |ty| format!(" for type `{}`", ty))
);
match tcx.span_of_impl(negative_impl_def_id) {
@@ -397,7 +397,10 @@
let msg = format!(
"conflicting implementations of trait `{}`{}:{}",
overlap.trait_desc,
- overlap.self_desc.clone().map_or(String::new(), |ty| { format!(" for type `{}`", ty) }),
+ overlap
+ .self_desc
+ .clone()
+ .map_or_else(String::new, |ty| { format!(" for type `{}`", ty) }),
match used_to_be_allowed {
Some(FutureCompatOverlapErrorKind::Issue33140) => " (E0119)",
_ => "",
@@ -415,7 +418,7 @@
impl_span,
format!(
"conflicting implementation{}",
- overlap.self_desc.map_or(String::new(), |ty| format!(" for `{}`", ty))
+ overlap.self_desc.map_or_else(String::new, |ty| format!(" for `{}`", ty))
),
);
}
diff --git a/compiler/rustc_traits/Cargo.toml b/compiler/rustc_traits/Cargo.toml
index 8fdbc3b..a7ce14a 100644
--- a/compiler/rustc_traits/Cargo.toml
+++ b/compiler/rustc_traits/Cargo.toml
@@ -16,6 +16,6 @@
chalk-ir = "0.55.0"
chalk-solve = "0.55.0"
chalk-engine = "0.55.0"
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
rustc_infer = { path = "../rustc_infer" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
diff --git a/compiler/rustc_traits/src/chalk/lowering.rs b/compiler/rustc_traits/src/chalk/lowering.rs
index 7d3589c..fdf5f69 100644
--- a/compiler/rustc_traits/src/chalk/lowering.rs
+++ b/compiler/rustc_traits/src/chalk/lowering.rs
@@ -779,14 +779,11 @@
self,
interner: &RustInterner<'tcx>,
) -> chalk_solve::rust_ir::AliasEqBound<RustInterner<'tcx>> {
- let trait_ref = self.projection_ty.trait_ref(interner.tcx);
+ let (trait_ref, own_substs) = self.projection_ty.trait_ref_and_own_substs(interner.tcx);
chalk_solve::rust_ir::AliasEqBound {
trait_bound: trait_ref.lower_into(interner),
associated_ty_id: chalk_ir::AssocTypeId(self.projection_ty.item_def_id),
- parameters: self.projection_ty.substs[trait_ref.substs.len()..]
- .iter()
- .map(|arg| arg.lower_into(interner))
- .collect(),
+ parameters: own_substs.iter().map(|arg| arg.lower_into(interner)).collect(),
value: self.ty.lower_into(interner),
}
}
diff --git a/compiler/rustc_traits/src/chalk/mod.rs b/compiler/rustc_traits/src/chalk/mod.rs
index d98f181..b7275ba 100644
--- a/compiler/rustc_traits/src/chalk/mod.rs
+++ b/compiler/rustc_traits/src/chalk/mod.rs
@@ -165,7 +165,7 @@
// let's just ignore that
let sol = Canonical {
max_universe: ty::UniverseIndex::from_usize(0),
- variables: obligation.variables.clone(),
+ variables: obligation.variables,
value: QueryResponse {
var_values: CanonicalVarValues { var_values: IndexVec::new() }
.make_identity(tcx),
diff --git a/compiler/rustc_traits/src/dropck_outlives.rs b/compiler/rustc_traits/src/dropck_outlives.rs
index 2827163..9d5b9d7 100644
--- a/compiler/rustc_traits/src/dropck_outlives.rs
+++ b/compiler/rustc_traits/src/dropck_outlives.rs
@@ -75,12 +75,12 @@
// Set used to detect infinite recursion.
let mut ty_set = FxHashSet::default();
- let mut fulfill_cx = TraitEngine::new(infcx.tcx);
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
let cause = ObligationCause::dummy();
let mut constraints = DtorckConstraint::empty();
while let Some((ty, depth)) = ty_stack.pop() {
- info!(
+ debug!(
"{} kinds, {} overflows, {} ty_stack",
result.kinds.len(),
result.overflows.len(),
diff --git a/compiler/rustc_ty_utils/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs
index cf2c6ef..6b9d46e 100644
--- a/compiler/rustc_ty_utils/src/instance.rs
+++ b/compiler/rustc_ty_utils/src/instance.rs
@@ -10,6 +10,7 @@
use tracing::debug;
+#[instrument(level = "debug", skip(tcx))]
fn resolve_instance<'tcx>(
tcx: TyCtxt<'tcx>,
key: ty::ParamEnvAnd<'tcx, (DefId, SubstsRef<'tcx>)>,
@@ -38,13 +39,13 @@
)
}
+#[instrument(level = "debug", skip(tcx))]
fn inner_resolve_instance<'tcx>(
tcx: TyCtxt<'tcx>,
key: ty::ParamEnvAnd<'tcx, (ty::WithOptConstParam<DefId>, SubstsRef<'tcx>)>,
) -> Result<Option<Instance<'tcx>>, ErrorReported> {
let (param_env, (def, substs)) = key.into_parts();
- debug!("resolve(def={:?}, substs={:?})", def.did, substs);
let result = if let Some(trait_def_id) = tcx.trait_of_item(def.did) {
debug!(" => associated item, attempting to find impl in param_env {:#?}", param_env);
let item = tcx.associated_item(def.did);
@@ -93,7 +94,7 @@
};
Ok(Some(Instance { def, substs }))
};
- debug!("resolve(def.did={:?}, substs={:?}) = {:?}", def.did, substs, result);
+ debug!("inner_resolve_instance: result={:?}", result);
result
}
@@ -274,7 +275,8 @@
traits::ImplSource::AutoImpl(..)
| traits::ImplSource::Param(..)
| traits::ImplSource::TraitAlias(..)
- | traits::ImplSource::DiscriminantKind(..) => None,
+ | traits::ImplSource::DiscriminantKind(..)
+ | traits::ImplSource::Pointee(..) => None,
})
}
diff --git a/compiler/rustc_ty_utils/src/lib.rs b/compiler/rustc_ty_utils/src/lib.rs
index 904c006..aa5d338 100644
--- a/compiler/rustc_ty_utils/src/lib.rs
+++ b/compiler/rustc_ty_utils/src/lib.rs
@@ -5,6 +5,8 @@
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(half_open_range_patterns)]
+#![feature(exclusive_range_pattern)]
#![feature(nll)]
#![recursion_limit = "256"]
diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs
index 77aa441..29f1761 100644
--- a/compiler/rustc_ty_utils/src/ty.rs
+++ b/compiler/rustc_ty_utils/src/ty.rs
@@ -82,7 +82,7 @@
parent_def_id: LocalDefId,
trait_item_ref: &hir::TraitItemRef,
) -> ty::AssocItem {
- let def_id = tcx.hir().local_def_id(trait_item_ref.id.hir_id);
+ let def_id = trait_item_ref.id.def_id;
let (kind, has_self) = match trait_item_ref.kind {
hir::AssocItemKind::Const => (ty::AssocKind::Const, false),
hir::AssocItemKind::Fn { has_self } => (ty::AssocKind::Fn, has_self),
@@ -105,7 +105,7 @@
parent_def_id: LocalDefId,
impl_item_ref: &hir::ImplItemRef<'_>,
) -> ty::AssocItem {
- let def_id = tcx.hir().local_def_id(impl_item_ref.id.hir_id);
+ let def_id = impl_item_ref.id.def_id;
let (kind, has_self) = match impl_item_ref.kind {
hir::AssocItemKind::Const => (ty::AssocKind::Const, false),
hir::AssocItemKind::Fn { has_self } => (ty::AssocKind::Fn, has_self),
@@ -130,7 +130,9 @@
let parent_item = tcx.hir().expect_item(parent_id);
match parent_item.kind {
hir::ItemKind::Impl(ref impl_) => {
- if let Some(impl_item_ref) = impl_.items.iter().find(|i| i.id.hir_id == id) {
+ if let Some(impl_item_ref) =
+ impl_.items.iter().find(|i| i.id.def_id.to_def_id() == def_id)
+ {
let assoc_item =
associated_item_from_impl_item_ref(tcx, parent_def_id, impl_item_ref);
debug_assert_eq!(assoc_item.def_id, def_id);
@@ -139,7 +141,9 @@
}
hir::ItemKind::Trait(.., ref trait_item_refs) => {
- if let Some(trait_item_ref) = trait_item_refs.iter().find(|i| i.id.hir_id == id) {
+ if let Some(trait_item_ref) =
+ trait_item_refs.iter().find(|i| i.id.def_id.to_def_id() == def_id)
+ {
let assoc_item =
associated_item_from_trait_item_ref(tcx, parent_def_id, trait_item_ref);
debug_assert_eq!(assoc_item.def_id, def_id);
@@ -196,17 +200,10 @@
let item = tcx.hir().expect_item(id);
match item.kind {
hir::ItemKind::Trait(.., ref trait_item_refs) => tcx.arena.alloc_from_iter(
- trait_item_refs
- .iter()
- .map(|trait_item_ref| trait_item_ref.id)
- .map(|id| tcx.hir().local_def_id(id.hir_id).to_def_id()),
+ trait_item_refs.iter().map(|trait_item_ref| trait_item_ref.id.def_id.to_def_id()),
),
hir::ItemKind::Impl(ref impl_) => tcx.arena.alloc_from_iter(
- impl_
- .items
- .iter()
- .map(|impl_item_ref| impl_item_ref.id)
- .map(|id| tcx.hir().local_def_id(id.hir_id).to_def_id()),
+ impl_.items.iter().map(|impl_item_ref| impl_item_ref.id.def_id.to_def_id()),
),
hir::ItemKind::TraitAlias(..) => &[],
_ => span_bug!(item.span, "associated_item_def_ids: not impl or trait"),
@@ -484,6 +481,63 @@
fn_like.asyncness()
}
+/// Don't call this directly: use ``tcx.conservative_is_privately_uninhabited`` instead.
+#[instrument(level = "debug", skip(tcx))]
+pub fn conservative_is_privately_uninhabited_raw<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env_and: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+) -> bool {
+ let (param_env, ty) = param_env_and.into_parts();
+ match ty.kind() {
+ ty::Never => {
+ debug!("ty::Never =>");
+ true
+ }
+ ty::Adt(def, _) if def.is_union() => {
+ debug!("ty::Adt(def, _) if def.is_union() =>");
+ // For now, `union`s are never considered uninhabited.
+ false
+ }
+ ty::Adt(def, substs) => {
+ debug!("ty::Adt(def, _) if def.is_not_union() =>");
+ // Any ADT is uninhabited if either:
+ // (a) It has no variants (i.e. an empty `enum`);
+ // (b) Each of its variants (a single one in the case of a `struct`) has at least
+ // one uninhabited field.
+ def.variants.iter().all(|var| {
+ var.fields.iter().any(|field| {
+ let ty = tcx.type_of(field.did).subst(tcx, substs);
+ tcx.conservative_is_privately_uninhabited(param_env.and(ty))
+ })
+ })
+ }
+ ty::Tuple(..) => {
+ debug!("ty::Tuple(..) =>");
+ ty.tuple_fields().any(|ty| tcx.conservative_is_privately_uninhabited(param_env.and(ty)))
+ }
+ ty::Array(ty, len) => {
+ debug!("ty::Array(ty, len) =>");
+ match len.try_eval_usize(tcx, param_env) {
+ Some(0) | None => false,
+ // If the array is definitely non-empty, it's uninhabited if
+ // the type of its elements is uninhabited.
+ Some(1..) => tcx.conservative_is_privately_uninhabited(param_env.and(ty)),
+ }
+ }
+ ty::Ref(..) => {
+ debug!("ty::Ref(..) =>");
+ // References to uninitialised memory is valid for any type, including
+ // uninhabited types, in unsafe code, so we treat all references as
+ // inhabited.
+ false
+ }
+ _ => {
+ debug!("_ =>");
+ false
+ }
+ }
+}
+
pub fn provide(providers: &mut ty::query::Providers) {
*providers = ty::query::Providers {
asyncness,
@@ -501,6 +555,7 @@
instance_def_size_estimate,
issue33140_self_ty,
impl_defaultness,
+ conservative_is_privately_uninhabited: conservative_is_privately_uninhabited_raw,
..*providers
};
}
diff --git a/compiler/rustc_type_ir/src/lib.rs b/compiler/rustc_type_ir/src/lib.rs
index 7e70af2..fccd8b7 100644
--- a/compiler/rustc_type_ir/src/lib.rs
+++ b/compiler/rustc_type_ir/src/lib.rs
@@ -427,7 +427,7 @@
}
}
-#[derive(Copy, Clone, PartialEq, Decodable, Encodable)]
+#[derive(Copy, Clone, PartialEq, Decodable, Encodable, Hash)]
pub enum Variance {
Covariant, // T<A> <: T<B> iff A <: B -- e.g., function return type
Invariant, // T<A> <: T<B> iff B == A -- e.g., type of mutable cell
diff --git a/compiler/rustc_typeck/Cargo.toml b/compiler/rustc_typeck/Cargo.toml
index e3ba0be..d92d317 100644
--- a/compiler/rustc_typeck/Cargo.toml
+++ b/compiler/rustc_typeck/Cargo.toml
@@ -20,7 +20,7 @@
rustc_hir_pretty = { path = "../rustc_hir_pretty" }
rustc_target = { path = "../rustc_target" }
rustc_session = { path = "../rustc_session" }
-smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
rustc_ast = { path = "../rustc_ast" }
rustc_span = { path = "../rustc_span" }
rustc_index = { path = "../rustc_index" }
diff --git a/compiler/rustc_typeck/src/astconv/errors.rs b/compiler/rustc_typeck/src/astconv/errors.rs
index 545c301..b5404c3 100644
--- a/compiler/rustc_typeck/src/astconv/errors.rs
+++ b/compiler/rustc_typeck/src/astconv/errors.rs
@@ -237,7 +237,7 @@
}
}
if let ([], [bound]) = (&potential_assoc_types[..], &trait_bounds) {
- match &bound.trait_ref.path.segments[..] {
+ match bound.trait_ref.path.segments {
// FIXME: `trait_ref.path.span` can point to a full path with multiple
// segments, even though `trait_ref.path.segments` is of length `1`. Work
// around that bug here, even though it should be fixed elsewhere.
diff --git a/compiler/rustc_typeck/src/astconv/generics.rs b/compiler/rustc_typeck/src/astconv/generics.rs
index 67e37ca..0ea0cca 100644
--- a/compiler/rustc_typeck/src/astconv/generics.rs
+++ b/compiler/rustc_typeck/src/astconv/generics.rs
@@ -6,8 +6,9 @@
use crate::errors::AssocTypeBindingNotAllowed;
use crate::structured_errors::{StructuredDiagnostic, WrongNumberOfGenericArgs};
use rustc_ast::ast::ParamKindOrd;
-use rustc_errors::{struct_span_err, Applicability, ErrorReported};
+use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorReported};
use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::GenericArg;
use rustc_middle::ty::{
@@ -43,23 +44,57 @@
}
}
+ let add_braces_suggestion = |arg: &GenericArg<'_>, err: &mut DiagnosticBuilder<'_>| {
+ let suggestions = vec![
+ (arg.span().shrink_to_lo(), String::from("{ ")),
+ (arg.span().shrink_to_hi(), String::from(" }")),
+ ];
+ err.multipart_suggestion(
+ "if this generic argument was intended as a const parameter, \
+ surround it with braces",
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ };
+
// Specific suggestion set for diagnostics
match (arg, ¶m.kind) {
(
- GenericArg::Type(hir::Ty { kind: hir::TyKind::Path { .. }, .. }),
- GenericParamDefKind::Const { .. },
- ) => {
- let suggestions = vec![
- (arg.span().shrink_to_lo(), String::from("{ ")),
- (arg.span().shrink_to_hi(), String::from(" }")),
- ];
- err.multipart_suggestion(
- "if this generic argument was intended as a const parameter, \
- try surrounding it with braces:",
- suggestions,
- Applicability::MaybeIncorrect,
- );
- }
+ GenericArg::Type(hir::Ty {
+ kind: hir::TyKind::Path(rustc_hir::QPath::Resolved(_, path)),
+ ..
+ }),
+ GenericParamDefKind::Const,
+ ) => match path.res {
+ Res::Err => {
+ add_braces_suggestion(arg, &mut err);
+ err.set_primary_message(
+ "unresolved item provided when a constant was expected",
+ )
+ .emit();
+ return;
+ }
+ Res::Def(DefKind::TyParam, src_def_id) => {
+ if let Some(param_local_id) = param.def_id.as_local() {
+ let param_hir_id = tcx.hir().local_def_id_to_hir_id(param_local_id);
+ let param_name = tcx.hir().ty_param_name(param_hir_id);
+ let param_type = tcx.type_of(param.def_id);
+ if param_type.is_suggestable() {
+ err.span_suggestion(
+ tcx.def_span(src_def_id),
+ "consider changing this type paramater to a `const`-generic",
+ format!("const {}: {}", param_name, param_type),
+ Applicability::MaybeIncorrect,
+ );
+ };
+ }
+ }
+ _ => add_braces_suggestion(arg, &mut err),
+ },
+ (
+ GenericArg::Type(hir::Ty { kind: hir::TyKind::Path(_), .. }),
+ GenericParamDefKind::Const,
+ ) => add_braces_suggestion(arg, &mut err),
(
GenericArg::Type(hir::Ty { kind: hir::TyKind::Array(_, len), .. }),
GenericParamDefKind::Const { .. },
diff --git a/compiler/rustc_typeck/src/astconv/mod.rs b/compiler/rustc_typeck/src/astconv/mod.rs
index 5659345..7c53980 100644
--- a/compiler/rustc_typeck/src/astconv/mod.rs
+++ b/compiler/rustc_typeck/src/astconv/mod.rs
@@ -49,9 +49,10 @@
fn default_constness_for_trait_bounds(&self) -> Constness;
- /// Returns predicates in scope of the form `X: Foo`, where `X` is
- /// a type parameter `X` with the given id `def_id`. This is a
- /// subset of the full set of predicates.
+ /// Returns predicates in scope of the form `X: Foo<T>`, where `X`
+ /// is a type parameter `X` with the given id `def_id` and T
+ /// matches `assoc_name`. This is a subset of the full set of
+ /// predicates.
///
/// This is used for one specific purpose: resolving "short-hand"
/// associated type references like `T::Item`. In principle, we
@@ -60,7 +61,12 @@
/// but this can lead to cycle errors. The problem is that we have
/// to do this resolution *in order to create the predicates in
/// the first place*. Hence, we have this "special pass".
- fn get_type_parameter_bounds(&self, span: Span, def_id: DefId) -> ty::GenericPredicates<'tcx>;
+ fn get_type_parameter_bounds(
+ &self,
+ span: Span,
+ def_id: DefId,
+ assoc_name: Ident,
+ ) -> ty::GenericPredicates<'tcx>;
/// Returns the lifetime to use when a lifetime is omitted (and not elided).
fn re_infer(&self, param: Option<&ty::GenericParamDef>, span: Span)
@@ -792,7 +798,7 @@
}
// Returns `true` if a bounds list includes `?Sized`.
- pub fn is_unsized(&self, ast_bounds: &[hir::GenericBound<'_>], span: Span) -> bool {
+ pub fn is_unsized(&self, ast_bounds: &[&hir::GenericBound<'_>], span: Span) -> bool {
let tcx = self.tcx();
// Try to find an unbound in bounds.
@@ -850,7 +856,7 @@
fn add_bounds(
&self,
param_ty: Ty<'tcx>,
- ast_bounds: &[hir::GenericBound<'_>],
+ ast_bounds: &[&hir::GenericBound<'_>],
bounds: &mut Bounds<'tcx>,
) {
let constness = self.default_constness_for_trait_bounds();
@@ -865,7 +871,7 @@
hir::GenericBound::Trait(_, hir::TraitBoundModifier::Maybe) => {}
hir::GenericBound::LangItemTrait(lang_item, span, hir_id, args) => self
.instantiate_lang_item_trait_ref(
- lang_item, span, hir_id, args, param_ty, bounds,
+ *lang_item, *span, *hir_id, args, param_ty, bounds,
),
hir::GenericBound::Outlives(ref l) => bounds
.region_bounds
@@ -897,10 +903,45 @@
sized_by_default: SizedByDefault,
span: Span,
) -> Bounds<'tcx> {
+ let ast_bounds: Vec<_> = ast_bounds.iter().collect();
+ self.compute_bounds_inner(param_ty, &ast_bounds, sized_by_default, span)
+ }
+
+ /// Convert the bounds in `ast_bounds` that refer to traits which define an associated type
+ /// named `assoc_name` into ty::Bounds. Ignore the rest.
+ pub fn compute_bounds_that_match_assoc_type(
+ &self,
+ param_ty: Ty<'tcx>,
+ ast_bounds: &[hir::GenericBound<'_>],
+ sized_by_default: SizedByDefault,
+ span: Span,
+ assoc_name: Ident,
+ ) -> Bounds<'tcx> {
+ let mut result = Vec::new();
+
+ for ast_bound in ast_bounds {
+ if let Some(trait_ref) = ast_bound.trait_ref() {
+ if let Some(trait_did) = trait_ref.trait_def_id() {
+ if self.tcx().trait_may_define_assoc_type(trait_did, assoc_name) {
+ result.push(ast_bound);
+ }
+ }
+ }
+ }
+
+ self.compute_bounds_inner(param_ty, &result, sized_by_default, span)
+ }
+
+ fn compute_bounds_inner(
+ &self,
+ param_ty: Ty<'tcx>,
+ ast_bounds: &[&hir::GenericBound<'_>],
+ sized_by_default: SizedByDefault,
+ span: Span,
+ ) -> Bounds<'tcx> {
let mut bounds = Bounds::default();
self.add_bounds(param_ty, ast_bounds, &mut bounds);
- bounds.trait_bounds.sort_by_key(|(t, _, _)| t.def_id());
bounds.implicitly_sized = if let SizedByDefault::Yes = sized_by_default {
if !self.is_unsized(ast_bounds, span) { Some(span) } else { None }
@@ -943,10 +984,7 @@
//
// We want to produce `<B as SuperTrait<i32>>::T == foo`.
- debug!(
- "add_predicates_for_ast_type_binding(hir_ref_id {:?}, trait_ref {:?}, binding {:?}, bounds {:?}",
- hir_ref_id, trait_ref, binding, bounds
- );
+ debug!(?hir_ref_id, ?trait_ref, ?binding, ?bounds, "add_predicates_for_ast_type_binding",);
let tcx = self.tcx();
let candidate =
@@ -1098,7 +1136,8 @@
// parameter to have a skipped binder.
let param_ty =
tcx.mk_projection(assoc_ty.def_id, projection_ty.skip_binder().substs);
- self.add_bounds(param_ty, ast_bounds, bounds);
+ let ast_bounds: Vec<_> = ast_bounds.iter().collect();
+ self.add_bounds(param_ty, &ast_bounds, bounds);
}
}
Ok(())
@@ -1278,42 +1317,42 @@
// De-duplicate auto traits so that, e.g., `dyn Trait + Send + Send` is the same as
// `dyn Trait + Send`.
- auto_traits.sort_by_key(|i| i.trait_ref().def_id());
- auto_traits.dedup_by_key(|i| i.trait_ref().def_id());
+ // We remove duplicates by inserting into a `FxHashSet` to avoid re-ordering
+ // the bounds
+ let mut duplicates = FxHashSet::default();
+ auto_traits.retain(|i| duplicates.insert(i.trait_ref().def_id()));
debug!("regular_traits: {:?}", regular_traits);
debug!("auto_traits: {:?}", auto_traits);
- // Transform a `PolyTraitRef` into a `PolyExistentialTraitRef` by
- // removing the dummy `Self` type (`trait_object_dummy_self`).
- let trait_ref_to_existential = |trait_ref: ty::TraitRef<'tcx>| {
- if trait_ref.self_ty() != dummy_self {
- // FIXME: There appears to be a missing filter on top of `expand_trait_aliases`,
- // which picks up non-supertraits where clauses - but also, the object safety
- // completely ignores trait aliases, which could be object safety hazards. We
- // `delay_span_bug` here to avoid an ICE in stable even when the feature is
- // disabled. (#66420)
- tcx.sess.delay_span_bug(
- DUMMY_SP,
- &format!(
- "trait_ref_to_existential called on {:?} with non-dummy Self",
- trait_ref,
- ),
- );
- }
- ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)
- };
-
// Erase the `dummy_self` (`trait_object_dummy_self`) used above.
- let existential_trait_refs =
- regular_traits.iter().map(|i| i.trait_ref().map_bound(trait_ref_to_existential));
+ let existential_trait_refs = regular_traits.iter().map(|i| {
+ i.trait_ref().map_bound(|trait_ref: ty::TraitRef<'tcx>| {
+ if trait_ref.self_ty() != dummy_self {
+ // FIXME: There appears to be a missing filter on top of `expand_trait_aliases`,
+ // which picks up non-supertraits where clauses - but also, the object safety
+ // completely ignores trait aliases, which could be object safety hazards. We
+ // `delay_span_bug` here to avoid an ICE in stable even when the feature is
+ // disabled. (#66420)
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ &format!(
+ "trait_ref_to_existential called on {:?} with non-dummy Self",
+ trait_ref,
+ ),
+ );
+ }
+ ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)
+ })
+ });
let existential_projections = bounds.projection_bounds.iter().map(|(bound, _)| {
bound.map_bound(|b| {
- let trait_ref = trait_ref_to_existential(b.projection_ty.trait_ref(tcx));
- ty::ExistentialProjection {
- ty: b.ty,
- item_def_id: b.projection_ty.item_def_id,
- substs: trait_ref.substs,
+ if b.projection_ty.self_ty() != dummy_self {
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ &format!("trait_ref_to_existential called on {:?} with non-dummy Self", b),
+ );
}
+ ty::ExistentialProjection::erase_self_ty(tcx, b)
})
});
@@ -1375,8 +1414,13 @@
name: Symbol,
) {
let mut err = struct_span_err!(self.tcx().sess, span, E0223, "ambiguous associated type");
- if let (Some(_), Ok(snippet)) = (
- self.tcx().sess.confused_type_with_std_module.borrow().get(&span),
+ if let (true, Ok(snippet)) = (
+ self.tcx()
+ .sess
+ .confused_type_with_std_module
+ .borrow()
+ .keys()
+ .any(|full_span| full_span.contains(span)),
self.tcx().sess.source_map().span_to_snippet(span),
) {
err.span_suggestion(
@@ -1413,8 +1457,9 @@
ty_param_def_id, assoc_name, span,
);
- let predicates =
- &self.get_type_parameter_bounds(span, ty_param_def_id.to_def_id()).predicates;
+ let predicates = &self
+ .get_type_parameter_bounds(span, ty_param_def_id.to_def_id(), assoc_name)
+ .predicates;
debug!("find_bound_for_assoc_item: predicates={:#?}", predicates);
@@ -1422,11 +1467,12 @@
let param_name = tcx.hir().ty_param_name(param_hir_id);
self.one_bound_for_assoc_type(
|| {
- traits::transitive_bounds(
+ traits::transitive_bounds_that_define_assoc_type(
tcx,
predicates.iter().filter_map(|(p, _)| {
p.to_opt_poly_trait_ref().map(|trait_ref| trait_ref.value)
}),
+ assoc_name,
)
},
|| param_name.to_string(),
@@ -1562,6 +1608,8 @@
// the whole path.
// Will fail except for `T::A` and `Self::A`; i.e., if `qself_ty`/`qself_def` are not a type
// parameter or `Self`.
+ // NOTE: When this function starts resolving `Trait::AssocTy` successfully
+ // it should also start reportint the `BARE_TRAIT_OBJECTS` lint.
pub fn associated_path_to_ty(
&self,
hir_ref_id: hir::HirId,
@@ -2145,15 +2193,17 @@
}
hir::TyKind::BareFn(ref bf) => {
require_c_abi_if_c_variadic(tcx, &bf.decl, bf.abi, ast_ty.span);
+
tcx.mk_fn_ptr(self.ty_of_fn(
bf.unsafety,
bf.abi,
&bf.decl,
&hir::Generics::empty(),
None,
+ Some(ast_ty),
))
}
- hir::TyKind::TraitObject(ref bounds, ref lifetime) => {
+ hir::TyKind::TraitObject(ref bounds, ref lifetime, _) => {
self.conv_object_ty_poly_trait_ref(ast_ty.span, bounds, lifetime, borrowed)
}
hir::TyKind::Path(hir::QPath::Resolved(ref maybe_qself, ref path)) => {
@@ -2162,8 +2212,8 @@
self.res_to_ty(opt_self_ty, path, false)
}
hir::TyKind::OpaqueDef(item_id, ref lifetimes) => {
- let opaque_ty = tcx.hir().expect_item(item_id.id);
- let def_id = tcx.hir().local_def_id(item_id.id).to_def_id();
+ let opaque_ty = tcx.hir().item(item_id);
+ let def_id = item_id.def_id.to_def_id();
match opaque_ty.kind {
hir::ItemKind::OpaqueTy(hir::OpaqueTy { impl_trait_fn, .. }) => {
@@ -2290,6 +2340,7 @@
decl: &hir::FnDecl<'_>,
generics: &hir::Generics<'_>,
ident_span: Option<Span>,
+ hir_ty: Option<&hir::Ty<'_>>,
) -> ty::PolyFnSig<'tcx> {
debug!("ty_of_fn");
@@ -2321,12 +2372,14 @@
// only want to emit an error complaining about them if infer types (`_`) are not
// allowed. `allow_ty_infer` gates this behavior. We check for the presence of
// `ident_span` to not emit an error twice when we have `fn foo(_: fn() -> _)`.
+
crate::collect::placeholder_type_error(
tcx,
ident_span.map(|sp| sp.shrink_to_hi()),
- &generics.params[..],
+ generics.params,
visitor.0,
true,
+ hir_ty,
);
}
diff --git a/compiler/rustc_typeck/src/check/_match.rs b/compiler/rustc_typeck/src/check/_match.rs
index 30e0e3e..d056f2c 100644
--- a/compiler/rustc_typeck/src/check/_match.rs
+++ b/compiler/rustc_typeck/src/check/_match.rs
@@ -1,10 +1,11 @@
use crate::check::coercion::{AsCoercionSite, CoerceMany};
use crate::check::{Diverges, Expectation, FnCtxt, Needs};
+use rustc_errors::{Applicability, DiagnosticBuilder};
use rustc_hir::{self as hir, ExprKind};
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_infer::traits::Obligation;
use rustc_middle::ty::{self, ToPredicate, Ty, TyS};
-use rustc_span::Span;
+use rustc_span::{MultiSpan, Span};
use rustc_trait_selection::opaque_types::InferCtxtExt as _;
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
use rustc_trait_selection::traits::{
@@ -206,7 +207,64 @@
),
};
let cause = self.cause(span, code);
- coercion.coerce(self, &cause, &arm.body, arm_ty);
+
+ // This is the moral equivalent of `coercion.coerce(self, cause, arm.body, arm_ty)`.
+ // We use it this way to be able to expand on the potential error and detect when a
+ // `match` tail statement could be a tail expression instead. If so, we suggest
+ // removing the stray semicolon.
+ coercion.coerce_inner(
+ self,
+ &cause,
+ Some(&arm.body),
+ arm_ty,
+ Some(&mut |err: &mut DiagnosticBuilder<'_>| {
+ let can_coerce_to_return_ty = match self.ret_coercion.as_ref() {
+ Some(ret_coercion) if self.in_tail_expr => {
+ let ret_ty = ret_coercion.borrow().expected_ty();
+ let ret_ty = self.inh.infcx.shallow_resolve(ret_ty);
+ self.can_coerce(arm_ty, ret_ty)
+ && prior_arm_ty.map_or(true, |t| self.can_coerce(t, ret_ty))
+ // The match arms need to unify for the case of `impl Trait`.
+ && !matches!(ret_ty.kind(), ty::Opaque(..))
+ }
+ _ => false,
+ };
+ if let (Expectation::IsLast(stmt), Some(ret), true) =
+ (orig_expected, self.ret_type_span, can_coerce_to_return_ty)
+ {
+ let semi_span = expr.span.shrink_to_hi().with_hi(stmt.hi());
+ let mut ret_span: MultiSpan = semi_span.into();
+ ret_span.push_span_label(
+ expr.span,
+ "this could be implicitly returned but it is a statement, not a \
+ tail expression"
+ .to_owned(),
+ );
+ ret_span.push_span_label(
+ ret,
+ "the `match` arms can conform to this return type".to_owned(),
+ );
+ ret_span.push_span_label(
+ semi_span,
+ "the `match` is a statement because of this semicolon, consider \
+ removing it"
+ .to_owned(),
+ );
+ err.span_note(
+ ret_span,
+ "you might have meant to return the `match` expression",
+ );
+ err.tool_only_span_suggestion(
+ semi_span,
+ "remove this semicolon",
+ String::new(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }),
+ false,
+ );
+
other_arms.push(arm_span);
if other_arms.len() > 5 {
other_arms.remove(0);
diff --git a/compiler/rustc_typeck/src/check/callee.rs b/compiler/rustc_typeck/src/check/callee.rs
index 4836418..a29f551 100644
--- a/compiler/rustc_typeck/src/check/callee.rs
+++ b/compiler/rustc_typeck/src/check/callee.rs
@@ -4,7 +4,7 @@
use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder};
use rustc_hir as hir;
-use rustc_hir::def::Res;
+use rustc_hir::def::{Namespace, Res};
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_infer::{infer, traits};
@@ -77,11 +77,17 @@
let output = match result {
None => {
// this will report an error since original_callee_ty is not a fn
- self.confirm_builtin_call(call_expr, original_callee_ty, arg_exprs, expected)
+ self.confirm_builtin_call(
+ call_expr,
+ callee_expr,
+ original_callee_ty,
+ arg_exprs,
+ expected,
+ )
}
Some(CallStep::Builtin(callee_ty)) => {
- self.confirm_builtin_call(call_expr, callee_ty, arg_exprs, expected)
+ self.confirm_builtin_call(call_expr, callee_expr, callee_ty, arg_exprs, expected)
}
Some(CallStep::DeferredClosure(fn_sig)) => {
@@ -281,6 +287,7 @@
fn confirm_builtin_call(
&self,
call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
callee_ty: Ty<'tcx>,
arg_exprs: &'tcx [hir::Expr<'tcx>],
expected: Expectation<'tcx>,
@@ -299,92 +306,105 @@
}
}
- if let hir::ExprKind::Call(callee, _) = call_expr.kind {
- let mut err = type_error_struct!(
- self.tcx.sess,
- callee.span,
- callee_ty,
- E0618,
- "expected function, found {}",
- match unit_variant {
- Some(ref path) => format!("enum variant `{}`", path),
- None => format!("`{}`", callee_ty),
- }
- );
+ let mut err = type_error_struct!(
+ self.tcx.sess,
+ callee_expr.span,
+ callee_ty,
+ E0618,
+ "expected function, found {}",
+ match unit_variant {
+ Some(ref path) => format!("enum variant `{}`", path),
+ None => format!("`{}`", callee_ty),
+ }
+ );
- self.identify_bad_closure_def_and_call(
- &mut err,
- call_expr.hir_id,
- &callee.kind,
- callee.span,
- );
+ self.identify_bad_closure_def_and_call(
+ &mut err,
+ call_expr.hir_id,
+ &callee_expr.kind,
+ callee_expr.span,
+ );
- if let Some(ref path) = unit_variant {
- err.span_suggestion(
- call_expr.span,
- &format!(
- "`{}` is a unit variant, you need to write it \
+ if let Some(ref path) = unit_variant {
+ err.span_suggestion(
+ call_expr.span,
+ &format!(
+ "`{}` is a unit variant, you need to write it \
without the parenthesis",
- path
- ),
- path.to_string(),
- Applicability::MachineApplicable,
- );
- }
-
- let mut inner_callee_path = None;
- let def = match callee.kind {
- hir::ExprKind::Path(ref qpath) => {
- self.typeck_results.borrow().qpath_res(qpath, callee.hir_id)
- }
- hir::ExprKind::Call(ref inner_callee, _) => {
- // If the call spans more than one line and the callee kind is
- // itself another `ExprCall`, that's a clue that we might just be
- // missing a semicolon (Issue #51055)
- let call_is_multiline =
- self.tcx.sess.source_map().is_multiline(call_expr.span);
- if call_is_multiline {
- err.span_suggestion(
- callee.span.shrink_to_hi(),
- "try adding a semicolon",
- ";".to_owned(),
- Applicability::MaybeIncorrect,
- );
- }
- if let hir::ExprKind::Path(ref inner_qpath) = inner_callee.kind {
- inner_callee_path = Some(inner_qpath);
- self.typeck_results
- .borrow()
- .qpath_res(inner_qpath, inner_callee.hir_id)
- } else {
- Res::Err
- }
- }
- _ => Res::Err,
- };
-
- err.span_label(call_expr.span, "call expression requires function");
-
- if let Some(span) = self.tcx.hir().res_span(def) {
- let callee_ty = callee_ty.to_string();
- let label = match (unit_variant, inner_callee_path) {
- (Some(path), _) => Some(format!("`{}` defined here", path)),
- (_, Some(hir::QPath::Resolved(_, path))) => {
- self.tcx.sess.source_map().span_to_snippet(path.span).ok().map(
- |p| format!("`{}` defined here returns `{}`", p, callee_ty),
- )
- }
- _ => Some(format!("`{}` defined here", callee_ty)),
- };
- if let Some(label) = label {
- err.span_label(span, label);
- }
- }
- err.emit();
- } else {
- bug!("call_expr.kind should be an ExprKind::Call, got {:?}", call_expr.kind);
+ path
+ ),
+ path.to_string(),
+ Applicability::MachineApplicable,
+ );
}
+ let mut inner_callee_path = None;
+ let def = match callee_expr.kind {
+ hir::ExprKind::Path(ref qpath) => {
+ self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id)
+ }
+ hir::ExprKind::Call(ref inner_callee, _) => {
+ // If the call spans more than one line and the callee kind is
+ // itself another `ExprCall`, that's a clue that we might just be
+ // missing a semicolon (Issue #51055)
+ let call_is_multiline =
+ self.tcx.sess.source_map().is_multiline(call_expr.span);
+ if call_is_multiline {
+ err.span_suggestion(
+ callee_expr.span.shrink_to_hi(),
+ "consider using a semicolon here",
+ ";".to_owned(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ if let hir::ExprKind::Path(ref inner_qpath) = inner_callee.kind {
+ inner_callee_path = Some(inner_qpath);
+ self.typeck_results.borrow().qpath_res(inner_qpath, inner_callee.hir_id)
+ } else {
+ Res::Err
+ }
+ }
+ _ => Res::Err,
+ };
+
+ err.span_label(call_expr.span, "call expression requires function");
+
+ if let Some(span) = self.tcx.hir().res_span(def) {
+ let callee_ty = callee_ty.to_string();
+ let label = match (unit_variant, inner_callee_path) {
+ (Some(path), _) => Some(format!("`{}` defined here", path)),
+ (_, Some(hir::QPath::Resolved(_, path))) => self
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(path.span)
+ .ok()
+ .map(|p| format!("`{}` defined here returns `{}`", p, callee_ty)),
+ _ => {
+ match def {
+ // Emit a different diagnostic for local variables, as they are not
+ // type definitions themselves, but rather variables *of* that type.
+ Res::Local(hir_id) => Some(format!(
+ "`{}` has type `{}`",
+ self.tcx.hir().name(hir_id),
+ callee_ty
+ )),
+ Res::Def(kind, def_id) if kind.ns() == Some(Namespace::ValueNS) => {
+ Some(format!(
+ "`{}` defined here",
+ self.tcx.def_path_str(def_id),
+ ))
+ }
+ _ => Some(format!("`{}` defined here", callee_ty)),
+ }
+ }
+ };
+ if let Some(label) = label {
+ err.span_label(span, label);
+ }
+ }
+ err.emit();
+
// This is the "default" function signature, used in case of error.
// In that case, we check each argument against "error" in order to
// set up all the node type bindings.
@@ -446,7 +466,7 @@
let expected_arg_tys = self.expected_inputs_for_expected_output(
call_expr.span,
expected,
- fn_sig.output().clone(),
+ fn_sig.output(),
fn_sig.inputs(),
);
diff --git a/compiler/rustc_typeck/src/check/cast.rs b/compiler/rustc_typeck/src/check/cast.rs
index 7924ffe..16c344e 100644
--- a/compiler/rustc_typeck/src/check/cast.rs
+++ b/compiler/rustc_typeck/src/check/cast.rs
@@ -765,9 +765,8 @@
m_expr: ty::TypeAndMut<'tcx>,
m_cast: ty::TypeAndMut<'tcx>,
) -> Result<CastKind, CastError> {
- // array-ptr-cast.
-
- if m_expr.mutbl == hir::Mutability::Not && m_cast.mutbl == hir::Mutability::Not {
+ // array-ptr-cast: allow mut-to-mut, mut-to-const, const-to-const
+ if m_expr.mutbl == hir::Mutability::Mut || m_cast.mutbl == hir::Mutability::Not {
if let ty::Array(ety, _) = m_expr.ty.kind() {
// Due to the limitations of LLVM global constants,
// region pointers end up pointing at copies of
diff --git a/compiler/rustc_typeck/src/check/check.rs b/compiler/rustc_typeck/src/check/check.rs
index 8e2b0bf..e2fc1da 100644
--- a/compiler/rustc_typeck/src/check/check.rs
+++ b/compiler/rustc_typeck/src/check/check.rs
@@ -7,11 +7,13 @@
use rustc_errors::{Applicability, ErrorReported};
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId, LOCAL_CRATE};
+use rustc_hir::intravisit::Visitor;
use rustc_hir::lang_items::LangItem;
-use rustc_hir::{ItemKind, Node};
+use rustc_hir::{def::Res, ItemKind, Node, PathSegment};
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_infer::infer::{RegionVariableOrigin, TyCtxtInferExt};
use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::layout::MAX_SIMD_LANES;
use rustc_middle::ty::subst::GenericArgKind;
use rustc_middle::ty::util::{Discr, IntTypeExt, Representability};
use rustc_middle::ty::{self, ParamEnv, RegionKind, ToPredicate, Ty, TyCtxt};
@@ -85,8 +87,69 @@
let declared_ret_ty = fn_sig.output();
- let revealed_ret_ty =
- fcx.instantiate_opaque_types_from_value(fn_id, declared_ret_ty, decl.output.span());
+ let feature = match tcx.hir().get(fn_id) {
+ // TAIT usage in function return position.
+ // Example:
+ //
+ // ```rust
+ // type Foo = impl Debug;
+ // fn bar() -> Foo { 42 }
+ // ```
+ Node::Item(hir::Item { kind: ItemKind::Fn(..), .. }) |
+ // TAIT usage in associated function return position.
+ //
+ // Example with a free type alias:
+ //
+ // ```rust
+ // type Foo = impl Debug;
+ // impl SomeTrait for SomeType {
+ // fn bar() -> Foo { 42 }
+ // }
+ // ```
+ //
+ // Example with an associated TAIT:
+ //
+ // ```rust
+ // impl SomeTrait for SomeType {
+ // type Foo = impl Debug;
+ // fn bar() -> Self::Foo { 42 }
+ // }
+ // ```
+ Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(..), ..
+ }) => None,
+ // Forbid TAIT in trait declarations for now.
+ // Examples:
+ //
+ // ```rust
+ // type Foo = impl Debug;
+ // trait Bar {
+ // fn bar() -> Foo;
+ // }
+ // trait Bop {
+ // type Bop: PartialEq<Foo>;
+ // }
+ // ```
+ Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(..),
+ ..
+ }) |
+ // Forbid TAIT in closure return position for now.
+ // Example:
+ //
+ // ```rust
+ // type Foo = impl Debug;
+ // let x = |y| -> Foo { 42 + y };
+ // ```
+ Node::Expr(hir::Expr { kind: hir::ExprKind::Closure(..), .. }) => Some(sym::type_alias_impl_trait),
+ node => bug!("Item being checked wasn't a function/closure: {:?}", node),
+ };
+ let revealed_ret_ty = fcx.instantiate_opaque_types_from_value(
+ fn_id,
+ declared_ret_ty,
+ decl.output.span(),
+ feature,
+ );
debug!("check_fn: declared_ret_ty: {}, revealed_ret_ty: {}", declared_ret_ty, revealed_ret_ty);
fcx.ret_coercion = Some(RefCell::new(CoerceMany::new(revealed_ret_ty)));
fcx.ret_type_span = Some(decl.output.span());
@@ -372,8 +435,7 @@
(fcx, gen_ty)
}
-pub(super) fn check_struct(tcx: TyCtxt<'_>, id: hir::HirId, span: Span) {
- let def_id = tcx.hir().local_def_id(id);
+fn check_struct(tcx: TyCtxt<'_>, def_id: LocalDefId, span: Span) {
let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
check_representable(tcx, span, def_id);
@@ -386,8 +448,7 @@
check_packed(tcx, span, def);
}
-fn check_union(tcx: TyCtxt<'_>, id: hir::HirId, span: Span) {
- let def_id = tcx.hir().local_def_id(id);
+fn check_union(tcx: TyCtxt<'_>, def_id: LocalDefId, span: Span) {
let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
check_representable(tcx, span, def_id);
@@ -476,7 +537,7 @@
/// Checks that an opaque type does not use `Self` or `T::Foo` projections that would result
/// in "inheriting lifetimes".
-#[instrument(skip(tcx, span))]
+#[instrument(level = "debug", skip(tcx, span))]
pub(super) fn check_opaque_for_inheriting_lifetimes(
tcx: TyCtxt<'tcx>,
def_id: LocalDefId,
@@ -514,10 +575,11 @@
}
}
- #[derive(Debug)]
struct ProhibitOpaqueVisitor<'tcx> {
opaque_identity_ty: Ty<'tcx>,
generics: &'tcx ty::Generics,
+ tcx: TyCtxt<'tcx>,
+ selftys: Vec<(Span, Option<String>)>,
}
impl<'tcx> ty::fold::TypeVisitor<'tcx> for ProhibitOpaqueVisitor<'tcx> {
@@ -534,6 +596,29 @@
}
}
+ impl Visitor<'tcx> for ProhibitOpaqueVisitor<'tcx> {
+ type Map = rustc_middle::hir::map::Map<'tcx>;
+
+ fn nested_visit_map(&mut self) -> hir::intravisit::NestedVisitorMap<Self::Map> {
+ hir::intravisit::NestedVisitorMap::OnlyBodies(self.tcx.hir())
+ }
+
+ fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) {
+ match arg.kind {
+ hir::TyKind::Path(hir::QPath::Resolved(None, path)) => match &path.segments {
+ [PathSegment { res: Some(Res::SelfTy(_, impl_ref)), .. }] => {
+ let impl_ty_name =
+ impl_ref.map(|(def_id, _)| self.tcx.def_path_str(def_id));
+ self.selftys.push((path.span, impl_ty_name));
+ }
+ _ => {}
+ },
+ _ => {}
+ }
+ hir::intravisit::walk_ty(self, arg);
+ }
+ }
+
if let ItemKind::OpaqueTy(hir::OpaqueTy {
origin: hir::OpaqueTyOrigin::AsyncFn | hir::OpaqueTyOrigin::FnReturn,
..
@@ -545,17 +630,20 @@
InternalSubsts::identity_for_item(tcx, def_id.to_def_id()),
),
generics: tcx.generics_of(def_id),
+ tcx,
+ selftys: vec![],
};
let prohibit_opaque = tcx
.explicit_item_bounds(def_id)
.iter()
.try_for_each(|(predicate, _)| predicate.visit_with(&mut visitor));
debug!(
- "check_opaque_for_inheriting_lifetimes: prohibit_opaque={:?}, visitor={:?}",
- prohibit_opaque, visitor
+ "check_opaque_for_inheriting_lifetimes: prohibit_opaque={:?}, visitor.opaque_identity_ty={:?}, visitor.generics={:?}",
+ prohibit_opaque, visitor.opaque_identity_ty, visitor.generics
);
if let Some(ty) = prohibit_opaque.break_value() {
+ visitor.visit_item(&item);
let is_async = match item.kind {
ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => {
matches!(origin, hir::OpaqueTyOrigin::AsyncFn)
@@ -572,15 +660,13 @@
if is_async { "async fn" } else { "impl Trait" },
);
- if let Ok(snippet) = tcx.sess.source_map().span_to_snippet(span) {
- if snippet == "Self" {
- err.span_suggestion(
- span,
- "consider spelling out the type instead",
- format!("{:?}", ty),
- Applicability::MaybeIncorrect,
- );
- }
+ for (span, name) in visitor.selftys {
+ err.span_suggestion(
+ span,
+ "consider spelling out the type instead",
+ name.unwrap_or_else(|| format!("{:?}", ty)),
+ Applicability::MaybeIncorrect,
+ );
}
err.emit();
}
@@ -634,7 +720,8 @@
// Checked when type checking the function containing them.
hir::OpaqueTyOrigin::FnReturn | hir::OpaqueTyOrigin::AsyncFn => return,
// Can have different predicates to their defining use
- hir::OpaqueTyOrigin::Binding | hir::OpaqueTyOrigin::Misc => {}
+ hir::OpaqueTyOrigin::Binding | hir::OpaqueTyOrigin::Misc | hir::OpaqueTyOrigin::TyAlias => {
+ }
}
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
@@ -682,34 +769,32 @@
pub fn check_item_type<'tcx>(tcx: TyCtxt<'tcx>, it: &'tcx hir::Item<'tcx>) {
debug!(
- "check_item_type(it.hir_id={}, it.name={})",
- it.hir_id,
- tcx.def_path_str(tcx.hir().local_def_id(it.hir_id).to_def_id())
+ "check_item_type(it.def_id={:?}, it.name={})",
+ it.def_id,
+ tcx.def_path_str(it.def_id.to_def_id())
);
let _indenter = indenter();
match it.kind {
// Consts can play a role in type-checking, so they are included here.
hir::ItemKind::Static(..) => {
- let def_id = tcx.hir().local_def_id(it.hir_id);
- tcx.ensure().typeck(def_id);
- maybe_check_static_with_link_section(tcx, def_id, it.span);
- check_static_inhabited(tcx, def_id, it.span);
+ tcx.ensure().typeck(it.def_id);
+ maybe_check_static_with_link_section(tcx, it.def_id, it.span);
+ check_static_inhabited(tcx, it.def_id, it.span);
}
hir::ItemKind::Const(..) => {
- tcx.ensure().typeck(tcx.hir().local_def_id(it.hir_id));
+ tcx.ensure().typeck(it.def_id);
}
hir::ItemKind::Enum(ref enum_definition, _) => {
- check_enum(tcx, it.span, &enum_definition.variants, it.hir_id);
+ check_enum(tcx, it.span, &enum_definition.variants, it.def_id);
}
hir::ItemKind::Fn(..) => {} // entirely within check_item_body
hir::ItemKind::Impl(ref impl_) => {
- debug!("ItemKind::Impl {} with id {}", it.ident, it.hir_id);
- let impl_def_id = tcx.hir().local_def_id(it.hir_id);
- if let Some(impl_trait_ref) = tcx.impl_trait_ref(impl_def_id) {
+ debug!("ItemKind::Impl {} with id {:?}", it.ident, it.def_id);
+ if let Some(impl_trait_ref) = tcx.impl_trait_ref(it.def_id) {
check_impl_items_against_trait(
tcx,
it.span,
- impl_def_id,
+ it.def_id,
impl_trait_ref,
&impl_.items,
);
@@ -718,8 +803,7 @@
}
}
hir::ItemKind::Trait(_, _, _, _, ref items) => {
- let def_id = tcx.hir().local_def_id(it.hir_id);
- check_on_unimplemented(tcx, def_id.to_def_id(), it);
+ check_on_unimplemented(tcx, it.def_id.to_def_id(), it);
for item in items.iter() {
let item = tcx.hir().trait_item(item.id);
@@ -729,16 +813,15 @@
fn_maybe_err(tcx, item.ident.span, abi);
}
hir::TraitItemKind::Type(.., Some(_default)) => {
- let item_def_id = tcx.hir().local_def_id(item.hir_id).to_def_id();
- let assoc_item = tcx.associated_item(item_def_id);
+ let assoc_item = tcx.associated_item(item.def_id);
let trait_substs =
- InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
+ InternalSubsts::identity_for_item(tcx, it.def_id.to_def_id());
let _: Result<_, rustc_errors::ErrorReported> = check_type_bounds(
tcx,
assoc_item,
assoc_item,
item.span,
- ty::TraitRef { def_id: def_id.to_def_id(), substs: trait_substs },
+ ty::TraitRef { def_id: it.def_id.to_def_id(), substs: trait_substs },
);
}
_ => {}
@@ -746,10 +829,10 @@
}
}
hir::ItemKind::Struct(..) => {
- check_struct(tcx, it.hir_id, it.span);
+ check_struct(tcx, it.def_id, it.span);
}
hir::ItemKind::Union(..) => {
- check_union(tcx, it.hir_id, it.span);
+ check_union(tcx, it.def_id, it.span);
}
hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => {
// HACK(jynelson): trying to infer the type of `impl trait` breaks documenting
@@ -757,16 +840,13 @@
// Since rustdoc doesn't care about the concrete type behind `impl Trait`, just don't look at it!
// See https://github.com/rust-lang/rust/issues/75100
if !tcx.sess.opts.actually_rustdoc {
- let def_id = tcx.hir().local_def_id(it.hir_id);
-
- let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
- check_opaque(tcx, def_id, substs, it.span, &origin);
+ let substs = InternalSubsts::identity_for_item(tcx, it.def_id.to_def_id());
+ check_opaque(tcx, it.def_id, substs, it.span, &origin);
}
}
hir::ItemKind::TyAlias(..) => {
- let def_id = tcx.hir().local_def_id(it.hir_id);
- let pty_ty = tcx.type_of(def_id);
- let generics = tcx.generics_of(def_id);
+ let pty_ty = tcx.type_of(it.def_id);
+ let generics = tcx.generics_of(it.def_id);
check_type_params_are_used(tcx, &generics, pty_ty);
}
hir::ItemKind::ForeignMod { abi, items } => {
@@ -784,7 +864,7 @@
}
} else {
for item in items {
- let def_id = tcx.hir().local_def_id(item.id.hir_id);
+ let def_id = item.id.def_id;
let generics = tcx.generics_of(def_id);
let own_counts = generics.own_counts();
if generics.params.len() - own_counts.lifetimes != 0 {
@@ -834,9 +914,8 @@
}
pub(super) fn check_on_unimplemented(tcx: TyCtxt<'_>, trait_def_id: DefId, item: &hir::Item<'_>) {
- let item_def_id = tcx.hir().local_def_id(item.hir_id);
// an error would be reported if this fails.
- let _ = traits::OnUnimplementedDirective::of_item(tcx, trait_def_id, item_def_id.to_def_id());
+ let _ = traits::OnUnimplementedDirective::of_item(tcx, trait_def_id, item.def_id.to_def_id());
}
pub(super) fn check_specialization_validity<'tcx>(
@@ -937,7 +1016,7 @@
// Check existing impl methods to see if they are both present in trait
// and compatible with trait signature
for impl_item in impl_items {
- let ty_impl_item = tcx.associated_item(tcx.hir().local_def_id(impl_item.hir_id));
+ let ty_impl_item = tcx.associated_item(impl_item.def_id);
let mut items =
associated_items.filter_by_name(tcx, ty_impl_item.ident, impl_trait_ref.def_id);
@@ -1134,6 +1213,29 @@
.emit();
return;
}
+
+ let len = if let ty::Array(_ty, c) = e.kind() {
+ c.try_eval_usize(tcx, tcx.param_env(def.did))
+ } else {
+ Some(fields.len() as u64)
+ };
+ if let Some(len) = len {
+ if len == 0 {
+ struct_span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty").emit();
+ return;
+ } else if len > MAX_SIMD_LANES {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0075,
+ "SIMD vector cannot have more than {} elements",
+ MAX_SIMD_LANES,
+ )
+ .emit();
+ return;
+ }
+ }
+
match e.kind() {
ty::Param(_) => { /* struct<T>(T, T, T, T) is ok */ }
_ if e.is_machine() => { /* struct(u8, u8, u8, u8) is ok */ }
@@ -1312,13 +1414,12 @@
}
#[allow(trivial_numeric_casts)]
-pub fn check_enum<'tcx>(
+fn check_enum<'tcx>(
tcx: TyCtxt<'tcx>,
sp: Span,
vs: &'tcx [hir::Variant<'tcx>],
- id: hir::HirId,
+ def_id: LocalDefId,
) {
- let def_id = tcx.hir().local_def_id(id);
let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
@@ -1472,6 +1573,9 @@
struct_span_err!(tcx.sess, span, E0733, "recursion in an `async fn` requires boxing")
.span_label(span, "recursive `async fn`")
.note("a recursive `async fn` must be rewritten to return a boxed `dyn Future`")
+ .note(
+ "consider using the `async_recursion` crate: https://crates.io/crates/async_recursion",
+ )
.emit();
}
diff --git a/compiler/rustc_typeck/src/check/closure.rs b/compiler/rustc_typeck/src/check/closure.rs
index f34aaec..431e6d7 100644
--- a/compiler/rustc_typeck/src/check/closure.rs
+++ b/compiler/rustc_typeck/src/check/closure.rs
@@ -208,7 +208,7 @@
});
// Even if we can't infer the full signature, we may be able to
- // infer the kind. This can occur if there is a trait-reference
+ // infer the kind. This can occur when we elaborate a predicate
// like `F : Fn<A>`. Note that due to subtyping we could encounter
// many viable options, so pick the most restrictive.
let expected_kind = self
@@ -234,11 +234,11 @@
debug!("deduce_sig_from_projection({:?})", projection);
- let trait_ref = projection.to_poly_trait_ref(tcx);
+ let trait_def_id = projection.trait_def_id(tcx);
- let is_fn = tcx.fn_trait_kind_from_lang_item(trait_ref.def_id()).is_some();
+ let is_fn = tcx.fn_trait_kind_from_lang_item(trait_def_id).is_some();
let gen_trait = tcx.require_lang_item(LangItem::Generator, cause_span);
- let is_gen = gen_trait == trait_ref.def_id();
+ let is_gen = gen_trait == trait_def_id;
if !is_fn && !is_gen {
debug!("deduce_sig_from_projection: not fn or generator");
return None;
@@ -256,7 +256,7 @@
}
let input_tys = if is_fn {
- let arg_param_ty = trait_ref.skip_binder().substs.type_at(1);
+ let arg_param_ty = projection.skip_binder().projection_ty.substs.type_at(1);
let arg_param_ty = self.resolve_vars_if_possible(arg_param_ty);
debug!("deduce_sig_from_projection: arg_param_ty={:?}", arg_param_ty);
@@ -662,9 +662,9 @@
};
// Check that this is a projection from the `Future` trait.
- let trait_ref = predicate.projection_ty.trait_ref(self.tcx);
+ let trait_def_id = predicate.projection_ty.trait_def_id(self.tcx);
let future_trait = self.tcx.require_lang_item(LangItem::Future, Some(cause_span));
- if trait_ref.def_id != future_trait {
+ if trait_def_id != future_trait {
debug!("deduce_future_output_from_projection: not a future");
return None;
}
diff --git a/compiler/rustc_typeck/src/check/coercion.rs b/compiler/rustc_typeck/src/check/coercion.rs
index b2395b7..94aee87 100644
--- a/compiler/rustc_typeck/src/check/coercion.rs
+++ b/compiler/rustc_typeck/src/check/coercion.rs
@@ -42,6 +42,7 @@
use rustc_hir::def_id::DefId;
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_infer::infer::{Coercion, InferOk, InferResult};
+use rustc_middle::lint::in_external_macro;
use rustc_middle::ty::adjustment::{
Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCast,
};
@@ -1236,7 +1237,7 @@
/// The inner coercion "engine". If `expression` is `None`, this
/// is a forced-unit case, and hence `expression_ty` must be
/// `Nil`.
- fn coerce_inner<'a>(
+ crate fn coerce_inner<'a>(
&mut self,
fcx: &FnCtxt<'a, 'tcx>,
cause: &ObligationCause<'tcx>,
@@ -1448,9 +1449,16 @@
expected.is_unit(),
pointing_at_return_type,
) {
- if cond_expr.span.desugaring_kind().is_none() {
+ // If the block is from an external macro, then do not suggest
+ // adding a semicolon, because there's nowhere to put it.
+ // See issue #81943.
+ if cond_expr.span.desugaring_kind().is_none()
+ && !in_external_macro(fcx.tcx.sess, cond_expr.span)
+ {
err.span_label(cond_expr.span, "expected this to be `()`");
- fcx.suggest_semicolon_at_end(cond_expr.span, &mut err);
+ if expr.can_have_side_effects() {
+ fcx.suggest_semicolon_at_end(cond_expr.span, &mut err);
+ }
}
}
fcx.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main))
@@ -1458,7 +1466,7 @@
fcx.get_fn_decl(parent_id)
};
- if let (Some((fn_decl, can_suggest)), _) = (fn_decl, pointing_at_return_type) {
+ if let Some((fn_decl, can_suggest)) = fn_decl {
if expression.is_none() {
pointing_at_return_type |= fcx.suggest_missing_return_type(
&mut err,
@@ -1472,6 +1480,16 @@
fn_output = Some(&fn_decl.output); // `impl Trait` return type
}
}
+
+ let parent_id = fcx.tcx.hir().get_parent_item(id);
+ let parent_item = fcx.tcx.hir().get(parent_id);
+
+ if let (Some((expr, _)), Some((fn_decl, _, _))) =
+ (expression, fcx.get_node_fn_decl(parent_item))
+ {
+ fcx.suggest_missing_return_expr(&mut err, expr, fn_decl, expected, found);
+ }
+
if let (Some(sp), Some(fn_output)) = (fcx.ret_coercion_span.get(), fn_output) {
self.add_impl_trait_explanation(&mut err, cause, fcx, expected, sp, fn_output);
}
@@ -1536,7 +1554,7 @@
if let hir::FnRetTy::Return(ty) = fn_output {
// Get the return type.
if let hir::TyKind::OpaqueDef(..) = ty.kind {
- let ty = AstConv::ast_ty_to_ty(fcx, ty);
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(fcx, ty);
// Get the `impl Trait`'s `DefId`.
if let ty::Opaque(def_id, _) = ty.kind() {
let hir_id = fcx.tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
@@ -1598,7 +1616,7 @@
fn is_return_ty_unsized(&self, fcx: &FnCtxt<'a, 'tcx>, blk_id: hir::HirId) -> bool {
if let Some((fn_decl, _)) = fcx.get_fn_decl(blk_id) {
if let hir::FnRetTy::Return(ty) = fn_decl.output {
- let ty = AstConv::ast_ty_to_ty(fcx, ty);
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(fcx, ty);
if let ty::Dynamic(..) = ty.kind() {
return true;
}
diff --git a/compiler/rustc_typeck/src/check/compare_method.rs b/compiler/rustc_typeck/src/check/compare_method.rs
index d37d6bc..a30a810 100644
--- a/compiler/rustc_typeck/src/check/compare_method.rs
+++ b/compiler/rustc_typeck/src/check/compare_method.rs
@@ -823,11 +823,11 @@
// FIXME: this is obviously suboptimal since the name can already be used
// as another generic argument
let new_name = tcx.sess.source_map().span_to_snippet(trait_span).ok()?;
- let trait_m = tcx.hir().local_def_id_to_hir_id(trait_m.def_id.as_local()?);
- let trait_m = tcx.hir().trait_item(hir::TraitItemId { hir_id: trait_m });
+ let trait_m = trait_m.def_id.as_local()?;
+ let trait_m = tcx.hir().trait_item(hir::TraitItemId { def_id: trait_m });
- let impl_m = tcx.hir().local_def_id_to_hir_id(impl_m.def_id.as_local()?);
- let impl_m = tcx.hir().impl_item(hir::ImplItemId { hir_id: impl_m });
+ let impl_m = impl_m.def_id.as_local()?;
+ let impl_m = tcx.hir().impl_item(hir::ImplItemId { def_id: impl_m });
// in case there are no generics, take the spot between the function name
// and the opening paren of the argument list
@@ -860,8 +860,8 @@
(None, Some(hir::SyntheticTyParamKind::ImplTrait)) => {
err.span_label(impl_span, "expected `impl Trait`, found generic parameter");
(|| {
- let impl_m = tcx.hir().local_def_id_to_hir_id(impl_m.def_id.as_local()?);
- let impl_m = tcx.hir().impl_item(hir::ImplItemId { hir_id: impl_m });
+ let impl_m = impl_m.def_id.as_local()?;
+ let impl_m = tcx.hir().impl_item(hir::ImplItemId { def_id: impl_m });
let input_tys = match impl_m.kind {
hir::ImplItemKind::Fn(ref sig, _) => sig.decl.inputs,
_ => unreachable!(),
diff --git a/compiler/rustc_typeck/src/check/demand.rs b/compiler/rustc_typeck/src/check/demand.rs
index 3c9c683..f9f6776 100644
--- a/compiler/rustc_typeck/src/check/demand.rs
+++ b/compiler/rustc_typeck/src/check/demand.rs
@@ -200,7 +200,12 @@
if self.can_coerce(expr_ty, sole_field_ty) {
let variant_path = self.tcx.def_path_str(variant.def_id);
// FIXME #56861: DRYer prelude filtering
- Some(variant_path.trim_start_matches("std::prelude::v1::").to_string())
+ if let Some(path) = variant_path.strip_prefix("std::prelude::") {
+ if let Some((_, path)) = path.split_once("::") {
+ return Some(path.to_string());
+ }
+ }
+ Some(variant_path)
} else {
None
}
@@ -616,10 +621,30 @@
}
_ if sp == expr.span && !is_macro => {
if let Some(steps) = self.deref_steps(checked_ty, expected) {
+ let expr = expr.peel_blocks();
+
if steps == 1 {
- // For a suggestion to make sense, the type would need to be `Copy`.
- if self.infcx.type_is_copy_modulo_regions(self.param_env, expected, sp) {
- if let Ok(code) = sm.span_to_snippet(sp) {
+ if let hir::ExprKind::AddrOf(_, mutbl, inner) = expr.kind {
+ // If the expression has `&`, removing it would fix the error
+ let prefix_span = expr.span.with_hi(inner.span.lo());
+ let message = match mutbl {
+ hir::Mutability::Not => "consider removing the `&`",
+ hir::Mutability::Mut => "consider removing the `&mut`",
+ };
+ let suggestion = String::new();
+ return Some((
+ prefix_span,
+ message,
+ suggestion,
+ Applicability::MachineApplicable,
+ ));
+ } else if self.infcx.type_is_copy_modulo_regions(
+ self.param_env,
+ expected,
+ sp,
+ ) {
+ // For this suggestion to make sense, the type would need to be `Copy`.
+ if let Ok(code) = sm.span_to_snippet(expr.span) {
let message = if checked_ty.is_region_ptr() {
"consider dereferencing the borrow"
} else {
@@ -631,7 +656,7 @@
format!("*{}", code)
};
return Some((
- sp,
+ expr.span,
message,
suggestion,
Applicability::MachineApplicable,
@@ -773,7 +798,7 @@
if let hir::ExprKind::Lit(lit) = &expr.kind { lit.node.is_suffixed() } else { false }
};
let is_negative_int =
- |expr: &hir::Expr<'_>| matches!(expr.kind, hir::ExprKind::Unary(hir::UnOp::UnNeg, ..));
+ |expr: &hir::Expr<'_>| matches!(expr.kind, hir::ExprKind::Unary(hir::UnOp::Neg, ..));
let is_uint = |ty: Ty<'_>| matches!(ty.kind(), ty::Uint(..));
let in_const_context = self.tcx.hir().is_inside_const_context(expr.hir_id);
diff --git a/compiler/rustc_typeck/src/check/dropck.rs b/compiler/rustc_typeck/src/check/dropck.rs
index 4c3c4fd..4d74962 100644
--- a/compiler/rustc_typeck/src/check/dropck.rs
+++ b/compiler/rustc_typeck/src/check/dropck.rs
@@ -77,7 +77,7 @@
tcx.infer_ctxt().enter(|ref infcx| {
let impl_param_env = tcx.param_env(self_type_did);
let tcx = infcx.tcx;
- let mut fulfillment_cx = TraitEngine::new(tcx);
+ let mut fulfillment_cx = <dyn TraitEngine<'_>>::new(tcx);
let named_type = tcx.type_of(self_type_did);
diff --git a/compiler/rustc_typeck/src/check/expectation.rs b/compiler/rustc_typeck/src/check/expectation.rs
index 5a5fc89..22be10a 100644
--- a/compiler/rustc_typeck/src/check/expectation.rs
+++ b/compiler/rustc_typeck/src/check/expectation.rs
@@ -21,6 +21,8 @@
/// This rvalue expression will be wrapped in `&` or `Box` and coerced
/// to `&Ty` or `Box<Ty>`, respectively. `Ty` is `[A]` or `Trait`.
ExpectRvalueLikeUnsized(Ty<'tcx>),
+
+ IsLast(Span),
}
impl<'a, 'tcx> Expectation<'tcx> {
@@ -79,19 +81,20 @@
// Resolves `expected` by a single level if it is a variable. If
// there is no expected type or resolution is not possible (e.g.,
- // no constraints yet present), just returns `None`.
+ // no constraints yet present), just returns `self`.
fn resolve(self, fcx: &FnCtxt<'a, 'tcx>) -> Expectation<'tcx> {
match self {
NoExpectation => NoExpectation,
ExpectCastableToType(t) => ExpectCastableToType(fcx.resolve_vars_if_possible(t)),
ExpectHasType(t) => ExpectHasType(fcx.resolve_vars_if_possible(t)),
ExpectRvalueLikeUnsized(t) => ExpectRvalueLikeUnsized(fcx.resolve_vars_if_possible(t)),
+ IsLast(sp) => IsLast(sp),
}
}
pub(super) fn to_option(self, fcx: &FnCtxt<'a, 'tcx>) -> Option<Ty<'tcx>> {
match self.resolve(fcx) {
- NoExpectation => None,
+ NoExpectation | IsLast(_) => None,
ExpectCastableToType(ty) | ExpectHasType(ty) | ExpectRvalueLikeUnsized(ty) => Some(ty),
}
}
@@ -103,7 +106,9 @@
pub(super) fn only_has_type(self, fcx: &FnCtxt<'a, 'tcx>) -> Option<Ty<'tcx>> {
match self.resolve(fcx) {
ExpectHasType(ty) => Some(ty),
- NoExpectation | ExpectCastableToType(_) | ExpectRvalueLikeUnsized(_) => None,
+ NoExpectation | ExpectCastableToType(_) | ExpectRvalueLikeUnsized(_) | IsLast(_) => {
+ None
+ }
}
}
diff --git a/compiler/rustc_typeck/src/check/expr.rs b/compiler/rustc_typeck/src/check/expr.rs
index 33b1c0b..8951c08 100644
--- a/compiler/rustc_typeck/src/check/expr.rs
+++ b/compiler/rustc_typeck/src/check/expr.rs
@@ -168,7 +168,7 @@
// without the final expr (e.g. `try { return; }`). We don't want to generate an
// unreachable_code lint for it since warnings for autogenerated code are confusing.
let is_try_block_generated_unit_expr = match expr.kind {
- ExprKind::Call(_, ref args) if expr.span.is_desugaring(DesugaringKind::TryBlock) => {
+ ExprKind::Call(_, args) if expr.span.is_desugaring(DesugaringKind::TryBlock) => {
args.len() == 1 && args[0].span.is_desugaring(DesugaringKind::TryBlock)
}
@@ -193,9 +193,7 @@
// diverging expression (e.g. it arose from desugaring of `try { return }`),
// we skip issuing a warning because it is autogenerated code.
ExprKind::Call(..) if expr.span.is_desugaring(DesugaringKind::TryBlock) => {}
- ExprKind::Call(ref callee, _) => {
- self.warn_if_unreachable(expr.hir_id, callee.span, "call")
- }
+ ExprKind::Call(callee, _) => self.warn_if_unreachable(expr.hir_id, callee.span, "call"),
ExprKind::MethodCall(_, ref span, _, _) => {
self.warn_if_unreachable(expr.hir_id, *span, "call")
}
@@ -231,15 +229,15 @@
let tcx = self.tcx;
match expr.kind {
- ExprKind::Box(ref subexpr) => self.check_expr_box(subexpr, expected),
+ ExprKind::Box(subexpr) => self.check_expr_box(subexpr, expected),
ExprKind::Lit(ref lit) => self.check_lit(&lit, expected),
- ExprKind::Binary(op, ref lhs, ref rhs) => self.check_binop(expr, op, lhs, rhs),
- ExprKind::Assign(ref lhs, ref rhs, ref span) => {
+ ExprKind::Binary(op, lhs, rhs) => self.check_binop(expr, op, lhs, rhs),
+ ExprKind::Assign(lhs, rhs, ref span) => {
self.check_expr_assign(expr, expected, lhs, rhs, span)
}
- ExprKind::AssignOp(op, ref lhs, ref rhs) => self.check_binop_assign(expr, op, lhs, rhs),
- ExprKind::Unary(unop, ref oprnd) => self.check_expr_unary(unop, oprnd, expected, expr),
- ExprKind::AddrOf(kind, mutbl, ref oprnd) => {
+ ExprKind::AssignOp(op, lhs, rhs) => self.check_binop_assign(expr, op, lhs, rhs),
+ ExprKind::Unary(unop, oprnd) => self.check_expr_unary(unop, oprnd, expected, expr),
+ ExprKind::AddrOf(kind, mutbl, oprnd) => {
self.check_expr_addr_of(kind, mutbl, oprnd, expected, expr)
}
ExprKind::Path(QPath::LangItem(lang_item, _)) => {
@@ -247,7 +245,7 @@
}
ExprKind::Path(ref qpath) => self.check_expr_path(qpath, expr),
ExprKind::InlineAsm(asm) => self.check_expr_asm(asm),
- ExprKind::LlvmInlineAsm(ref asm) => {
+ ExprKind::LlvmInlineAsm(asm) => {
for expr in asm.outputs_exprs.iter().chain(asm.inputs_exprs.iter()) {
self.check_expr(expr);
}
@@ -265,46 +263,42 @@
}
}
ExprKind::Ret(ref expr_opt) => self.check_expr_return(expr_opt.as_deref(), expr),
- ExprKind::Loop(ref body, _, source, _) => {
+ ExprKind::Loop(body, _, source, _) => {
self.check_expr_loop(body, source, expected, expr)
}
- ExprKind::Match(ref discrim, ref arms, match_src) => {
+ ExprKind::Match(discrim, arms, match_src) => {
self.check_match(expr, &discrim, arms, expected, match_src)
}
- ExprKind::Closure(capture, ref decl, body_id, _, gen) => {
+ ExprKind::Closure(capture, decl, body_id, _, gen) => {
self.check_expr_closure(expr, capture, &decl, body_id, gen, expected)
}
- ExprKind::Block(ref body, _) => self.check_block_with_expected(&body, expected),
- ExprKind::Call(ref callee, ref args) => self.check_call(expr, &callee, args, expected),
- ExprKind::MethodCall(ref segment, span, ref args, _) => {
+ ExprKind::Block(body, _) => self.check_block_with_expected(&body, expected),
+ ExprKind::Call(callee, args) => self.check_call(expr, &callee, args, expected),
+ ExprKind::MethodCall(segment, span, args, _) => {
self.check_method_call(expr, segment, span, args, expected)
}
- ExprKind::Cast(ref e, ref t) => self.check_expr_cast(e, t, expr),
- ExprKind::Type(ref e, ref t) => {
+ ExprKind::Cast(e, t) => self.check_expr_cast(e, t, expr),
+ ExprKind::Type(e, t) => {
let ty = self.to_ty_saving_user_provided_ty(&t);
self.check_expr_eq_type(&e, ty);
ty
}
- ExprKind::If(ref cond, ref then_expr, ref opt_else_expr) => self.check_then_else(
- &cond,
- then_expr,
- opt_else_expr.as_ref().map(|e| &**e),
- expr.span,
- expected,
- ),
- ExprKind::DropTemps(ref e) => self.check_expr_with_expectation(e, expected),
- ExprKind::Array(ref args) => self.check_expr_array(args, expected, expr),
+ ExprKind::If(cond, then_expr, opt_else_expr) => {
+ self.check_then_else(cond, then_expr, opt_else_expr, expr.span, expected)
+ }
+ ExprKind::DropTemps(e) => self.check_expr_with_expectation(e, expected),
+ ExprKind::Array(args) => self.check_expr_array(args, expected, expr),
ExprKind::ConstBlock(ref anon_const) => self.to_const(anon_const).ty,
- ExprKind::Repeat(ref element, ref count) => {
+ ExprKind::Repeat(element, ref count) => {
self.check_expr_repeat(element, count, expected, expr)
}
- ExprKind::Tup(ref elts) => self.check_expr_tuple(elts, expected, expr),
- ExprKind::Struct(ref qpath, fields, ref base_expr) => {
+ ExprKind::Tup(elts) => self.check_expr_tuple(elts, expected, expr),
+ ExprKind::Struct(qpath, fields, ref base_expr) => {
self.check_expr_struct(expr, expected, qpath, fields, base_expr)
}
- ExprKind::Field(ref base, field) => self.check_field(expr, &base, field),
- ExprKind::Index(ref base, ref idx) => self.check_expr_index(base, idx, expr),
- ExprKind::Yield(ref value, ref src) => self.check_expr_yield(value, expr, src),
+ ExprKind::Field(base, field) => self.check_field(expr, &base, field),
+ ExprKind::Index(base, idx) => self.check_expr_index(base, idx, expr),
+ ExprKind::Yield(value, ref src) => self.check_expr_yield(value, expr, src),
hir::ExprKind::Err => tcx.ty_error(),
}
}
@@ -327,15 +321,15 @@
) -> Ty<'tcx> {
let tcx = self.tcx;
let expected_inner = match unop {
- hir::UnOp::UnNot | hir::UnOp::UnNeg => expected,
- hir::UnOp::UnDeref => NoExpectation,
+ hir::UnOp::Not | hir::UnOp::Neg => expected,
+ hir::UnOp::Deref => NoExpectation,
};
let mut oprnd_t = self.check_expr_with_expectation(&oprnd, expected_inner);
if !oprnd_t.references_error() {
oprnd_t = self.structurally_resolved_type(expr.span, oprnd_t);
match unop {
- hir::UnOp::UnDeref => {
+ hir::UnOp::Deref => {
if let Some(ty) = self.lookup_derefing(expr, oprnd, oprnd_t) {
oprnd_t = ty;
} else {
@@ -357,14 +351,14 @@
oprnd_t = tcx.ty_error();
}
}
- hir::UnOp::UnNot => {
+ hir::UnOp::Not => {
let result = self.check_user_unop(expr, oprnd_t, unop);
// If it's builtin, we can reuse the type, this helps inference.
if !(oprnd_t.is_integral() || *oprnd_t.kind() == ty::Bool) {
oprnd_t = result;
}
}
- hir::UnOp::UnNeg => {
+ hir::UnOp::Neg => {
let result = self.check_user_unop(expr, oprnd_t, unop);
// If it's builtin, we can reuse the type, this helps inference.
if !oprnd_t.is_numeric() {
@@ -545,7 +539,7 @@
let tcx = self.tcx;
if let Ok(target_id) = destination.target_id {
let (e_ty, cause);
- if let Some(ref e) = expr_opt {
+ if let Some(e) = expr_opt {
// If this is a break with a value, we need to type-check
// the expression. Get an expected type from the loop context.
let opt_coerce_to = {
@@ -654,12 +648,12 @@
// We still need to assign a type to the inner expression to
// prevent the ICE in #43162.
- if let Some(ref e) = expr_opt {
+ if let Some(e) = expr_opt {
self.check_expr_with_hint(e, err);
// ... except when we try to 'break rust;'.
// ICE this expression in particular (see #43162).
- if let ExprKind::Path(QPath::Resolved(_, ref path)) = e.kind {
+ if let ExprKind::Path(QPath::Resolved(_, path)) = e.kind {
if path.segments.len() == 1 && path.segments[0].ident.name == sym::rust {
fatally_break_rust(self.tcx.sess);
}
@@ -678,7 +672,7 @@
) -> Ty<'tcx> {
if self.ret_coercion.is_none() {
self.tcx.sess.emit_err(ReturnStmtOutsideOfFnBody { span: expr.span });
- } else if let Some(ref e) = expr_opt {
+ } else if let Some(e) = expr_opt {
if self.ret_coercion_span.get().is_none() {
self.ret_coercion_span.set(Some(e.span));
}
@@ -717,7 +711,7 @@
});
let ret_ty = ret_coercion.borrow().expected_ty();
- let return_expr_ty = self.check_expr_with_hint(return_expr, ret_ty.clone());
+ let return_expr_ty = self.check_expr_with_hint(return_expr, ret_ty);
ret_coercion.borrow_mut().coerce(
self,
&self.cause(return_expr.span, ObligationCauseCode::ReturnValue(return_expr.hir_id)),
@@ -1137,13 +1131,13 @@
let flds = expected.only_has_type(self).and_then(|ty| {
let ty = self.resolve_vars_with_obligations(ty);
match ty.kind() {
- ty::Tuple(ref flds) => Some(&flds[..]),
+ ty::Tuple(flds) => Some(&flds[..]),
_ => None,
}
});
let elt_ts_iter = elts.iter().enumerate().map(|(i, e)| match flds {
- Some(ref fs) if i < fs.len() => {
+ Some(fs) if i < fs.len() => {
let ety = fs[i].expect_ty();
self.check_expr_coercable_to_type(&e, ety, None);
ety
@@ -1164,7 +1158,7 @@
expr: &hir::Expr<'_>,
expected: Expectation<'tcx>,
qpath: &QPath<'_>,
- fields: &'tcx [hir::Field<'tcx>],
+ fields: &'tcx [hir::ExprField<'tcx>],
base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
) -> Ty<'tcx> {
// Find the relevant variant
@@ -1237,7 +1231,7 @@
expr_id: hir::HirId,
span: Span,
variant: &'tcx ty::VariantDef,
- ast_fields: &'tcx [hir::Field<'tcx>],
+ ast_fields: &'tcx [hir::ExprField<'tcx>],
check_completeness: bool,
) -> bool {
let tcx = self.tcx;
@@ -1326,13 +1320,13 @@
fn check_struct_fields_on_error(
&self,
- fields: &'tcx [hir::Field<'tcx>],
+ fields: &'tcx [hir::ExprField<'tcx>],
base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
) {
for field in fields {
self.check_expr(&field.expr);
}
- if let Some(ref base) = *base_expr {
+ if let Some(base) = *base_expr {
self.check_expr(&base);
}
}
@@ -1354,7 +1348,6 @@
span: Span,
remaining_fields: FxHashMap<Ident, (usize, &ty::FieldDef)>,
) {
- let tcx = self.tcx;
let len = remaining_fields.len();
let mut displayable_field_names =
@@ -1362,25 +1355,29 @@
displayable_field_names.sort();
- let truncated_fields_error = if len <= 3 {
- String::new()
- } else {
- format!(" and {} other field{}", (len - 3), if len - 3 == 1 { "" } else { "s" })
+ let mut truncated_fields_error = String::new();
+ let remaining_fields_names = match &displayable_field_names[..] {
+ [field1] => format!("`{}`", field1),
+ [field1, field2] => format!("`{}` and `{}`", field1, field2),
+ [field1, field2, field3] => format!("`{}`, `{}` and `{}`", field1, field2, field3),
+ _ => {
+ truncated_fields_error =
+ format!(" and {} other field{}", len - 3, pluralize!(len - 3));
+ displayable_field_names
+ .iter()
+ .take(3)
+ .map(|n| format!("`{}`", n))
+ .collect::<Vec<_>>()
+ .join(", ")
+ }
};
- let remaining_fields_names = displayable_field_names
- .iter()
- .take(3)
- .map(|n| format!("`{}`", n))
- .collect::<Vec<_>>()
- .join(", ");
-
struct_span_err!(
- tcx.sess,
+ self.tcx.sess,
span,
E0063,
"missing field{} {}{} in initializer of `{}`",
- pluralize!(remaining_fields.len()),
+ pluralize!(len),
remaining_fields_names,
truncated_fields_error,
adt_ty
@@ -1414,8 +1411,8 @@
&self,
ty: Ty<'tcx>,
variant: &'tcx ty::VariantDef,
- field: &hir::Field<'_>,
- skip_fields: &[hir::Field<'_>],
+ field: &hir::ExprField<'_>,
+ skip_fields: &[hir::ExprField<'_>],
kind_name: &str,
ty_span: Span,
) {
@@ -1460,34 +1457,39 @@
),
);
err.span_label(field.ident.span, "field does not exist");
- err.span_label(
+ err.span_suggestion(
ty_span,
- format!(
- "`{adt}::{variant}` is a tuple {kind_name}, \
- use the appropriate syntax: `{adt}::{variant}(/* fields */)`",
+ &format!(
+ "`{adt}::{variant}` is a tuple {kind_name}, use the appropriate syntax",
adt = ty,
variant = variant.ident,
- kind_name = kind_name
),
+ format!(
+ "{adt}::{variant}(/* fields */)",
+ adt = ty,
+ variant = variant.ident,
+ ),
+ Applicability::HasPlaceholders,
);
}
_ => {
err.span_label(variant.ident.span, format!("`{adt}` defined here", adt = ty));
err.span_label(field.ident.span, "field does not exist");
- err.span_label(
+ err.span_suggestion(
ty_span,
- format!(
- "`{adt}` is a tuple {kind_name}, \
- use the appropriate syntax: `{adt}(/* fields */)`",
+ &format!(
+ "`{adt}` is a tuple {kind_name}, use the appropriate syntax",
adt = ty,
- kind_name = kind_name
+ kind_name = kind_name,
),
+ format!("{adt}(/* fields */)", adt = ty),
+ Applicability::HasPlaceholders,
);
}
},
_ => {
// prevent all specified fields from being suggested
- let skip_fields = skip_fields.iter().map(|ref x| x.ident.name);
+ let skip_fields = skip_fields.iter().map(|x| x.ident.name);
if let Some(field_name) =
Self::suggest_field_name(variant, field.ident.name, skip_fields.collect())
{
@@ -1616,7 +1618,7 @@
private_candidate = Some((base_def.did, field_ty));
}
}
- ty::Tuple(ref tys) => {
+ ty::Tuple(tys) => {
let fstr = field.as_str();
if let Ok(index) = fstr.parse::<usize>() {
if fstr == index.to_string() {
@@ -2082,6 +2084,8 @@
}
_ => {
self.tcx.sess.emit_err(YieldExprOutsideOfGenerator { span: expr.span });
+ // Avoid expressions without types during writeback (#78653).
+ self.check_expr(value);
self.tcx.mk_unit()
}
}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs b/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs
index bc1a078..dc8a804 100644
--- a/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs
@@ -6,6 +6,7 @@
use crate::check::method::{self, MethodCallee, SelfSource};
use crate::check::{BreakableCtxt, Diverges, Expectation, FallbackMode, FnCtxt, LocalTy};
+use rustc_ast::TraitObjectSyntax;
use rustc_data_structures::captures::Captures;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{Applicability, DiagnosticBuilder, ErrorReported};
@@ -13,7 +14,7 @@
use rustc_hir::def::{CtorOf, DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::lang_items::LangItem;
-use rustc_hir::{ExprKind, GenericArg, Node, QPath};
+use rustc_hir::{ExprKind, GenericArg, Node, QPath, TyKind};
use rustc_infer::infer::canonical::{Canonical, OriginalQueryValues, QueryResponse};
use rustc_infer::infer::error_reporting::TypeAnnotationNeeded::E0282;
use rustc_infer::infer::{InferOk, InferResult};
@@ -27,10 +28,12 @@
Ty, UserType,
};
use rustc_session::lint;
-use rustc_span::hygiene::DesugaringKind;
+use rustc_session::lint::builtin::BARE_TRAIT_OBJECTS;
+use rustc_session::parse::feature_err;
use rustc_span::source_map::{original_sp, DUMMY_SP};
use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::{self, BytePos, MultiSpan, Span};
+use rustc_span::{hygiene::DesugaringKind, Symbol};
use rustc_trait_selection::infer::InferCtxtExt as _;
use rustc_trait_selection::opaque_types::InferCtxtExt as _;
use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
@@ -362,6 +365,7 @@
parent_id: hir::HirId,
value: T,
value_span: Span,
+ feature: Option<Symbol>,
) -> T {
let parent_def_id = self.tcx.hir().local_def_id(parent_id);
debug!(
@@ -380,7 +384,21 @@
let mut opaque_types = self.opaque_types.borrow_mut();
let mut opaque_types_vars = self.opaque_types_vars.borrow_mut();
+
for (ty, decl) in opaque_type_map {
+ if let Some(feature) = feature {
+ if let hir::OpaqueTyOrigin::TyAlias = decl.origin {
+ if !self.tcx.features().enabled(feature) {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ feature,
+ value_span,
+ "type alias impl trait is not permitted here",
+ )
+ .emit();
+ }
+ }
+ }
let _ = opaque_types.insert(ty, decl);
let _ = opaque_types_vars.insert(decl.concrete_ty, decl.opaque_type);
}
@@ -457,7 +475,7 @@
}
pub fn to_ty(&self, ast_t: &hir::Ty<'_>) -> Ty<'tcx> {
- let t = AstConv::ast_ty_to_ty(self, ast_t);
+ let t = <dyn AstConv<'_>>::ast_ty_to_ty(self, ast_t);
self.register_wf_obligation(t.into(), ast_t.span, traits::MiscObligation);
t
}
@@ -769,9 +787,10 @@
.filter_map(move |obligation| {
let bound_predicate = obligation.predicate.kind();
match bound_predicate.skip_binder() {
- ty::PredicateKind::Projection(data) => {
- Some((bound_predicate.rebind(data).to_poly_trait_ref(self.tcx), obligation))
- }
+ ty::PredicateKind::Projection(data) => Some((
+ bound_predicate.rebind(data).required_poly_trait_ref(self.tcx),
+ obligation,
+ )),
ty::PredicateKind::Trait(data, _) => {
Some((bound_predicate.rebind(data).to_poly_trait_ref(), obligation))
}
@@ -838,7 +857,7 @@
// out unconstrained or ambiguous, as we're
// just trying to get hints here.
self.save_and_restore_in_snapshot_flag(|_| {
- let mut fulfill = TraitEngine::new(self.tcx);
+ let mut fulfill = <dyn TraitEngine<'_>>::new(self.tcx);
for obligation in ok.obligations {
fulfill.register_predicate_obligation(self, obligation);
}
@@ -897,7 +916,7 @@
return (
path.res,
opt_qself.as_ref().map(|qself| self.to_ty(qself)),
- &path.segments[..],
+ path.segments,
);
}
QPath::TypeRelative(ref qself, ref segment) => (self.to_ty(qself), qself, segment),
@@ -931,6 +950,10 @@
result
});
+ if result.is_ok() {
+ self.maybe_lint_bare_trait(qpath, hir_id);
+ }
+
// Write back the new resolution.
self.write_resolution(hir_id, result);
(
@@ -940,6 +963,29 @@
)
}
+ fn maybe_lint_bare_trait(&self, qpath: &QPath<'_>, hir_id: hir::HirId) {
+ if let QPath::TypeRelative(self_ty, _) = qpath {
+ if let TyKind::TraitObject([poly_trait_ref, ..], _, TraitObjectSyntax::None) =
+ self_ty.kind
+ {
+ self.tcx.struct_span_lint_hir(BARE_TRAIT_OBJECTS, hir_id, self_ty.span, |lint| {
+ let mut db = lint
+ .build(&format!("trait objects without an explicit `dyn` are deprecated"));
+ let (sugg, app) = match self.tcx.sess.source_map().span_to_snippet(self_ty.span)
+ {
+ Ok(s) if poly_trait_ref.trait_ref.path.is_global() => {
+ (format!("<dyn ({})>", s), Applicability::MachineApplicable)
+ }
+ Ok(s) => (format!("<dyn {}>", s), Applicability::MachineApplicable),
+ Err(_) => ("<dyn <type>>".to_string(), Applicability::HasPlaceholders),
+ };
+ db.span_suggestion(self_ty.span, "use `dyn`", sugg, app);
+ db.emit()
+ });
+ }
+ }
+ }
+
/// Given a function `Node`, return its `FnDecl` if it exists, or `None` otherwise.
pub(in super::super) fn get_node_fn_decl(
&self,
@@ -1073,13 +1119,26 @@
};
let last_expr_ty = self.node_ty(last_expr.hir_id);
let needs_box = match (last_expr_ty.kind(), expected_ty.kind()) {
+ (ty::Opaque(last_def_id, _), ty::Opaque(exp_def_id, _))
+ if last_def_id == exp_def_id =>
+ {
+ StatementAsExpression::CorrectType
+ }
(ty::Opaque(last_def_id, last_bounds), ty::Opaque(exp_def_id, exp_bounds)) => {
debug!(
"both opaque, likely future {:?} {:?} {:?} {:?}",
last_def_id, last_bounds, exp_def_id, exp_bounds
);
- let last_hir_id = self.tcx.hir().local_def_id_to_hir_id(last_def_id.expect_local());
- let exp_hir_id = self.tcx.hir().local_def_id_to_hir_id(exp_def_id.expect_local());
+
+ let (last_local_id, exp_local_id) =
+ match (last_def_id.as_local(), exp_def_id.as_local()) {
+ (Some(last_hir_id), Some(exp_hir_id)) => (last_hir_id, exp_hir_id),
+ (_, _) => return None,
+ };
+
+ let last_hir_id = self.tcx.hir().local_def_id_to_hir_id(last_local_id);
+ let exp_hir_id = self.tcx.hir().local_def_id_to_hir_id(exp_local_id);
+
match (
&self.tcx.hir().expect_item(last_hir_id).kind,
&self.tcx.hir().expect_item(exp_hir_id).kind,
@@ -1145,9 +1204,9 @@
let path_segs = match res {
Res::Local(_) | Res::SelfCtor(_) => vec![],
- Res::Def(kind, def_id) => {
- AstConv::def_ids_for_value_path_segments(self, segments, self_ty, kind, def_id)
- }
+ Res::Def(kind, def_id) => <dyn AstConv<'_>>::def_ids_for_value_path_segments(
+ self, segments, self_ty, kind, def_id,
+ ),
_ => bug!("instantiate_value_path on {:?}", res),
};
@@ -1190,7 +1249,7 @@
// errors if type parameters are provided in an inappropriate place.
let generic_segs: FxHashSet<_> = path_segs.iter().map(|PathSeg(_, index)| index).collect();
- let generics_has_err = AstConv::prohibit_generics(
+ let generics_has_err = <dyn AstConv<'_>>::prohibit_generics(
self,
segments.iter().enumerate().filter_map(|(index, seg)| {
if !generic_segs.contains(&index) || is_alias_variant_ctor {
@@ -1233,7 +1292,7 @@
if let GenericArgCountResult {
correct: Err(GenericArgCountMismatch { reported: Some(_), .. }),
..
- } = AstConv::check_generic_arg_count_for_call(
+ } = <dyn AstConv<'_>>::check_generic_arg_count_for_call(
tcx,
span,
def_id,
@@ -1341,7 +1400,7 @@
) -> subst::GenericArg<'tcx> {
match (¶m.kind, arg) {
(GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
- AstConv::ast_region_to_region(self.fcx, lt, Some(param)).into()
+ <dyn AstConv<'_>>::ast_region_to_region(self.fcx, lt, Some(param)).into()
}
(GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
self.fcx.to_ty(ty).into()
@@ -1394,7 +1453,7 @@
}
let substs = self_ctor_substs.unwrap_or_else(|| {
- AstConv::create_substs_for_generic_args(
+ <dyn AstConv<'_>>::create_substs_for_generic_args(
tcx,
def_id,
&[][..],
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs
index 3326be7..c92c7f7 100644
--- a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs
@@ -439,7 +439,7 @@
qpath: &QPath<'_>,
hir_id: hir::HirId,
) -> Option<(&'tcx ty::VariantDef, Ty<'tcx>)> {
- let path_span = qpath.qself_span();
+ let path_span = qpath.span();
let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, hir_id);
let variant = match def {
Res::Err => {
@@ -539,7 +539,7 @@
self.overwrite_local_ty_if_err(local, ty, pat_ty);
}
- pub fn check_stmt(&self, stmt: &'tcx hir::Stmt<'tcx>) {
+ pub fn check_stmt(&self, stmt: &'tcx hir::Stmt<'tcx>, is_last: bool) {
// Don't do all the complex logic below for `DeclItem`.
match stmt.kind {
hir::StmtKind::Item(..) => return,
@@ -561,11 +561,20 @@
hir::StmtKind::Expr(ref expr) => {
// Check with expected type of `()`.
self.check_expr_has_type_or_error(&expr, self.tcx.mk_unit(), |err| {
- self.suggest_semicolon_at_end(expr.span, err);
+ if expr.can_have_side_effects() {
+ self.suggest_semicolon_at_end(expr.span, err);
+ }
});
}
hir::StmtKind::Semi(ref expr) => {
- self.check_expr(&expr);
+ // All of this is equivalent to calling `check_expr`, but it is inlined out here
+ // in order to capture the fact that this `match` is the last statement in its
+ // function. This is done for better suggestions to remove the `;`.
+ let expectation = match expr.kind {
+ hir::ExprKind::Match(..) if is_last => IsLast(stmt.span),
+ _ => NoExpectation,
+ };
+ self.check_expr_with_expectation(expr, expectation);
}
}
@@ -624,8 +633,8 @@
let ctxt = BreakableCtxt { coerce: Some(coerce), may_break: false };
let (ctxt, ()) = self.with_breakable_ctxt(blk.hir_id, ctxt, || {
- for s in blk.stmts {
- self.check_stmt(s);
+ for (pos, s) in blk.stmts.iter().enumerate() {
+ self.check_stmt(s, blk.stmts.len() - 1 == pos);
}
// check the tail expression **without** holding the
@@ -866,7 +875,7 @@
match *qpath {
QPath::Resolved(ref maybe_qself, ref path) => {
let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself));
- let ty = AstConv::res_to_ty(self, self_ty, path, true);
+ let ty = <dyn AstConv<'_>>::res_to_ty(self, self_ty, path, true);
(path.res, ty)
}
QPath::TypeRelative(ref qself, ref segment) => {
@@ -877,8 +886,9 @@
} else {
Res::Err
};
- let result =
- AstConv::associated_path_to_ty(self, hir_id, path_span, ty, res, segment, true);
+ let result = <dyn AstConv<'_>>::associated_path_to_ty(
+ self, hir_id, path_span, ty, res, segment, true,
+ );
let ty = result.map(|(ty, _, _)| ty).unwrap_or_else(|_| self.tcx().ty_error());
let result = result.map(|(_, kind, def_id)| (kind, def_id));
@@ -991,7 +1001,7 @@
// would trigger in `is_send::<T::AssocType>();`
// from `typeck-default-trait-impl-assoc-type.rs`.
} else {
- let ty = AstConv::ast_ty_to_ty(self, hir_ty);
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, hir_ty);
let ty = self.resolve_vars_if_possible(ty);
if ty == predicate.self_ty() {
error.obligation.cause.make_mut().span = hir_ty.span;
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs b/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs
index e9223f7..4da4835 100644
--- a/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs
@@ -20,6 +20,7 @@
use rustc_middle::ty::subst::GenericArgKind;
use rustc_middle::ty::{self, Const, Ty, TyCtxt};
use rustc_session::Session;
+use rustc_span::symbol::Ident;
use rustc_span::{self, Span};
use rustc_trait_selection::traits::{ObligationCause, ObligationCauseCode};
@@ -183,7 +184,12 @@
}
}
- fn get_type_parameter_bounds(&self, _: Span, def_id: DefId) -> ty::GenericPredicates<'tcx> {
+ fn get_type_parameter_bounds(
+ &self,
+ _: Span,
+ def_id: DefId,
+ _: Ident,
+ ) -> ty::GenericPredicates<'tcx> {
let tcx = self.tcx;
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
let item_id = tcx.hir().ty_param_owner(hir_id);
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs
index a0465ca..f90159e 100644
--- a/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs
@@ -10,7 +10,8 @@
use rustc_hir::lang_items::LangItem;
use rustc_hir::{ExprKind, ItemKind, Node};
use rustc_infer::infer;
-use rustc_middle::ty::{self, Ty};
+use rustc_middle::lint::in_external_macro;
+use rustc_middle::ty::{self, Binder, Ty};
use rustc_span::symbol::kw;
use std::iter;
@@ -44,11 +45,17 @@
blk_id: hir::HirId,
) -> bool {
let expr = expr.peel_drop_temps();
- self.suggest_missing_semicolon(err, expr, expected, cause_span);
+ // If the expression is from an external macro, then do not suggest
+ // adding a semicolon, because there's nowhere to put it.
+ // See issue #81943.
+ if expr.can_have_side_effects() && !in_external_macro(self.tcx.sess, cause_span) {
+ self.suggest_missing_semicolon(err, expr, expected, cause_span);
+ }
let mut pointing_at_return_type = false;
if let Some((fn_decl, can_suggest)) = self.get_fn_decl(blk_id) {
pointing_at_return_type =
self.suggest_missing_return_type(err, &fn_decl, expected, found, can_suggest);
+ self.suggest_missing_return_expr(err, expr, &fn_decl, expected, found);
}
pointing_at_return_type
}
@@ -392,10 +399,12 @@
| ExprKind::Loop(..)
| ExprKind::If(..)
| ExprKind::Match(..)
- | ExprKind::Block(..) => {
+ | ExprKind::Block(..)
+ if expression.can_have_side_effects() =>
+ {
err.span_suggestion(
cause_span.shrink_to_hi(),
- "try adding a semicolon",
+ "consider using a semicolon here",
";".to_string(),
Applicability::MachineApplicable,
);
@@ -452,7 +461,7 @@
// are not, the expectation must have been caused by something else.
debug!("suggest_missing_return_type: return type {:?} node {:?}", ty, ty.kind);
let sp = ty.span;
- let ty = AstConv::ast_ty_to_ty(self, ty);
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, ty);
debug!("suggest_missing_return_type: return type {:?}", ty);
debug!("suggest_missing_return_type: expected type {:?}", ty);
if ty.kind() == expected.kind() {
@@ -464,6 +473,35 @@
}
}
+ pub(in super::super) fn suggest_missing_return_expr(
+ &self,
+ err: &mut DiagnosticBuilder<'_>,
+ expr: &'tcx hir::Expr<'tcx>,
+ fn_decl: &hir::FnDecl<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) {
+ if !expected.is_unit() {
+ return;
+ }
+ let found = self.resolve_vars_with_obligations(found);
+ if let hir::FnRetTy::Return(ty) = fn_decl.output {
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, ty);
+ let ty = self.tcx.erase_late_bound_regions(Binder::bind(ty));
+ let ty = self.normalize_associated_types_in(expr.span, ty);
+ if self.can_coerce(found, ty) {
+ err.multipart_suggestion(
+ "you might have meant to return this value",
+ vec![
+ (expr.span.shrink_to_lo(), "return ".to_string()),
+ (expr.span.shrink_to_hi(), ";".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
pub(in super::super) fn suggest_missing_parentheses(
&self,
err: &mut DiagnosticBuilder<'_>,
diff --git a/compiler/rustc_typeck/src/check/gather_locals.rs b/compiler/rustc_typeck/src/check/gather_locals.rs
index 825ebc1..4c5d16d 100644
--- a/compiler/rustc_typeck/src/check/gather_locals.rs
+++ b/compiler/rustc_typeck/src/check/gather_locals.rs
@@ -4,7 +4,7 @@
use rustc_hir::PatKind;
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_middle::ty::Ty;
-use rustc_span::Span;
+use rustc_span::{sym, Span};
use rustc_trait_selection::traits;
use std::mem;
@@ -58,11 +58,12 @@
Some(ref ty) => {
let o_ty = self.fcx.to_ty(&ty);
- let revealed_ty = if self.fcx.tcx.features().impl_trait_in_bindings {
- self.fcx.instantiate_opaque_types_from_value(self.parent_id, o_ty, ty.span)
- } else {
- o_ty
- };
+ let revealed_ty = self.fcx.instantiate_opaque_types_from_value(
+ self.parent_id,
+ o_ty,
+ ty.span,
+ Some(sym::impl_trait_in_bindings),
+ );
let c_ty =
self.fcx.inh.infcx.canonicalize_user_type_annotation(UserType::Ty(revealed_ty));
diff --git a/compiler/rustc_typeck/src/check/inherited.rs b/compiler/rustc_typeck/src/check/inherited.rs
index 0011a3f..1dacbad 100644
--- a/compiler/rustc_typeck/src/check/inherited.rs
+++ b/compiler/rustc_typeck/src/check/inherited.rs
@@ -117,7 +117,7 @@
maybe_typeck_results: infcx.in_progress_typeck_results,
},
infcx,
- fulfillment_cx: RefCell::new(TraitEngine::new(tcx)),
+ fulfillment_cx: RefCell::new(<dyn TraitEngine<'_>>::new(tcx)),
locals: RefCell::new(Default::default()),
deferred_sized_obligations: RefCell::new(Vec::new()),
deferred_call_resolutions: RefCell::new(Default::default()),
diff --git a/compiler/rustc_typeck/src/check/intrinsic.rs b/compiler/rustc_typeck/src/check/intrinsic.rs
index e99db7a..990ed5a 100644
--- a/compiler/rustc_typeck/src/check/intrinsic.rs
+++ b/compiler/rustc_typeck/src/check/intrinsic.rs
@@ -9,7 +9,6 @@
use rustc_errors::struct_span_err;
use rustc_hir as hir;
-use rustc_hir::def_id::DefId;
use rustc_middle::traits::{ObligationCause, ObligationCauseCode};
use rustc_middle::ty::subst::Subst;
use rustc_middle::ty::{self, TyCtxt};
@@ -21,7 +20,6 @@
fn equate_intrinsic_type<'tcx>(
tcx: TyCtxt<'tcx>,
it: &hir::ForeignItem<'_>,
- def_id: DefId,
n_tps: usize,
sig: ty::PolyFnSig<'tcx>,
) {
@@ -35,7 +33,7 @@
}
}
- let i_n_tps = tcx.generics_of(def_id).own_counts().types;
+ let i_n_tps = tcx.generics_of(it.def_id).own_counts().types;
if i_n_tps != n_tps {
let span = match it.kind {
hir::ForeignItemKind::Fn(_, _, ref generics) => generics.span,
@@ -51,8 +49,8 @@
}
let fty = tcx.mk_fn_ptr(sig);
- let cause = ObligationCause::new(it.span, it.hir_id, ObligationCauseCode::IntrinsicType);
- require_same_types(tcx, &cause, tcx.mk_fn_ptr(tcx.fn_sig(def_id)), fty);
+ let cause = ObligationCause::new(it.span, it.hir_id(), ObligationCauseCode::IntrinsicType);
+ require_same_types(tcx, &cause, tcx.mk_fn_ptr(tcx.fn_sig(it.def_id)), fty);
}
/// Returns `true` if the given intrinsic is unsafe to call or not.
@@ -100,8 +98,7 @@
/// and in `library/core/src/intrinsics.rs`.
pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
let param = |n| tcx.mk_ty_param(n, Symbol::intern(&format!("P{}", n)));
- let def_id = tcx.hir().local_def_id(it.hir_id).to_def_id();
- let intrinsic_name = tcx.item_name(def_id);
+ let intrinsic_name = tcx.item_name(it.def_id.to_def_id());
let name_str = intrinsic_name.as_str();
let mk_va_list_ty = |mutbl| {
@@ -370,7 +367,7 @@
};
let sig = tcx.mk_fn_sig(inputs.into_iter(), output, false, unsafety, Abi::RustIntrinsic);
let sig = ty::Binder::bind(sig);
- equate_intrinsic_type(tcx, it, def_id, n_tps, sig)
+ equate_intrinsic_type(tcx, it, n_tps, sig)
}
/// Type-check `extern "platform-intrinsic" { ... }` functions.
@@ -380,7 +377,6 @@
tcx.mk_ty_param(n, name)
};
- let def_id = tcx.hir().local_def_id(it.hir_id).to_def_id();
let name = it.ident.name;
let (n_tps, inputs, output) = match name {
@@ -402,6 +398,7 @@
| sym::simd_fpow
| sym::simd_saturating_add
| sym::simd_saturating_sub => (1, vec![param(0), param(0)], param(0)),
+ sym::simd_neg => (1, vec![param(0)], param(0)),
sym::simd_fsqrt
| sym::simd_fsin
| sym::simd_fcos
@@ -464,5 +461,5 @@
Abi::PlatformIntrinsic,
);
let sig = ty::Binder::dummy(sig);
- equate_intrinsic_type(tcx, it, def_id, n_tps, sig)
+ equate_intrinsic_type(tcx, it, n_tps, sig)
}
diff --git a/compiler/rustc_typeck/src/check/method/confirm.rs b/compiler/rustc_typeck/src/check/method/confirm.rs
index e5f1928..4a2dd6f 100644
--- a/compiler/rustc_typeck/src/check/method/confirm.rs
+++ b/compiler/rustc_typeck/src/check/method/confirm.rs
@@ -155,32 +155,46 @@
let mut target =
self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false));
- if let Some(mutbl) = pick.autoref {
- let region = self.next_region_var(infer::Autoref(self.span, pick.item));
- target = self.tcx.mk_ref(region, ty::TypeAndMut { mutbl, ty: target });
- let mutbl = match mutbl {
- hir::Mutability::Not => AutoBorrowMutability::Not,
- hir::Mutability::Mut => AutoBorrowMutability::Mut {
- // Method call receivers are the primary use case
- // for two-phase borrows.
- allow_two_phase_borrow: AllowTwoPhase::Yes,
- },
- };
- adjustments
- .push(Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)), target });
+ match &pick.autoref_or_ptr_adjustment {
+ Some(probe::AutorefOrPtrAdjustment::Autoref { mutbl, unsize }) => {
+ let region = self.next_region_var(infer::Autoref(self.span, pick.item));
+ target = self.tcx.mk_ref(region, ty::TypeAndMut { mutbl: *mutbl, ty: target });
+ let mutbl = match mutbl {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // Method call receivers are the primary use case
+ // for two-phase borrows.
+ allow_two_phase_borrow: AllowTwoPhase::Yes,
+ },
+ };
+ adjustments.push(Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)),
+ target,
+ });
- if let Some(unsize_target) = pick.unsize {
- target = self
- .tcx
- .mk_ref(region, ty::TypeAndMut { mutbl: mutbl.into(), ty: unsize_target });
- adjustments.push(Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), target });
+ if let Some(unsize_target) = unsize {
+ target = self
+ .tcx
+ .mk_ref(region, ty::TypeAndMut { mutbl: mutbl.into(), ty: unsize_target });
+ adjustments
+ .push(Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), target });
+ }
}
- } else {
- // No unsizing should be performed without autoref (at
- // least during method dispach). This is because we
- // currently only unsize `[T;N]` to `[T]`, and naturally
- // that must occur being a reference.
- assert!(pick.unsize.is_none());
+ Some(probe::AutorefOrPtrAdjustment::ToConstPtr) => {
+ target = match target.kind() {
+ ty::RawPtr(ty::TypeAndMut { ty, mutbl }) => {
+ assert_eq!(*mutbl, hir::Mutability::Mut);
+ self.tcx.mk_ptr(ty::TypeAndMut { mutbl: hir::Mutability::Not, ty })
+ }
+ other => panic!("Cannot adjust receiver type {:?} to const ptr", other),
+ };
+
+ adjustments.push(Adjustment {
+ kind: Adjust::Pointer(PointerCast::MutToConstPointer),
+ target,
+ });
+ }
+ None => {}
}
self.register_predicates(autoderef.into_obligations());
@@ -300,7 +314,7 @@
// variables.
let generics = self.tcx.generics_of(pick.item.def_id);
- let arg_count_correct = AstConv::check_generic_arg_count_for_call(
+ let arg_count_correct = <dyn AstConv<'_>>::check_generic_arg_count_for_call(
self.tcx,
self.span,
pick.item.def_id,
@@ -338,7 +352,8 @@
) -> subst::GenericArg<'tcx> {
match (¶m.kind, arg) {
(GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
- AstConv::ast_region_to_region(self.cfcx.fcx, lt, Some(param)).into()
+ <dyn AstConv<'_>>::ast_region_to_region(self.cfcx.fcx, lt, Some(param))
+ .into()
}
(GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
self.cfcx.to_ty(ty).into()
@@ -359,7 +374,7 @@
self.cfcx.var_for_def(self.cfcx.span, param)
}
}
- AstConv::create_substs_for_generic_args(
+ <dyn AstConv<'_>>::create_substs_for_generic_args(
self.tcx,
pick.item.def_id,
parent_substs,
diff --git a/compiler/rustc_typeck/src/check/method/probe.rs b/compiler/rustc_typeck/src/check/method/probe.rs
index 158c214..3006cab 100644
--- a/compiler/rustc_typeck/src/check/method/probe.rs
+++ b/compiler/rustc_typeck/src/check/method/probe.rs
@@ -10,6 +10,7 @@
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::sync::Lrc;
+use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_hir::def::Namespace;
use rustc_infer::infer::canonical::OriginalQueryValues;
@@ -153,28 +154,56 @@
Match,
}
+/// When adjusting a receiver we often want to do one of
+///
+/// - Add a `&` (or `&mut`), converting the recevier from `T` to `&T` (or `&mut T`)
+/// - If the receiver has type `*mut T`, convert it to `*const T`
+///
+/// This type tells us which one to do.
+///
+/// Note that in principle we could do both at the same time. For example, when the receiver has
+/// type `T`, we could autoref it to `&T`, then convert to `*const T`. Or, when it has type `*mut
+/// T`, we could convert it to `*const T`, then autoref to `&*const T`. However, currently we do
+/// (at most) one of these. Either the receiver has type `T` and we convert it to `&T` (or with
+/// `mut`), or it has type `*mut T` and we convert it to `*const T`.
+#[derive(Debug, PartialEq, Clone)]
+pub enum AutorefOrPtrAdjustment<'tcx> {
+ /// Receiver has type `T`, add `&` or `&mut` (it `T` is `mut`), and maybe also "unsize" it.
+ /// Unsizing is used to convert a `[T; N]` to `[T]`, which only makes sense when autorefing.
+ Autoref {
+ mutbl: hir::Mutability,
+
+ /// Indicates that the source expression should be "unsized" to a target type. This should
+ /// probably eventually go away in favor of just coercing method receivers.
+ unsize: Option<Ty<'tcx>>,
+ },
+ /// Receiver has type `*mut T`, convert to `*const T`
+ ToConstPtr,
+}
+
+impl<'tcx> AutorefOrPtrAdjustment<'tcx> {
+ fn get_unsize(&self) -> Option<Ty<'tcx>> {
+ match self {
+ AutorefOrPtrAdjustment::Autoref { mutbl: _, unsize } => unsize.clone(),
+ AutorefOrPtrAdjustment::ToConstPtr => None,
+ }
+ }
+}
+
#[derive(Debug, PartialEq, Clone)]
pub struct Pick<'tcx> {
pub item: ty::AssocItem,
pub kind: PickKind<'tcx>,
pub import_ids: SmallVec<[LocalDefId; 1]>,
- // Indicates that the source expression should be autoderef'd N times
- //
- // A = expr | *expr | **expr | ...
+ /// Indicates that the source expression should be autoderef'd N times
+ ///
+ /// A = expr | *expr | **expr | ...
pub autoderefs: usize,
- // Indicates that an autoref is applied after the optional autoderefs
- //
- // B = A | &A | &mut A
- pub autoref: Option<hir::Mutability>,
-
- // Indicates that the source expression should be "unsized" to a
- // target type. This should probably eventually go away in favor
- // of just coercing method receivers.
- //
- // C = B | unsize(B)
- pub unsize: Option<Ty<'tcx>>,
+ /// Indicates that we want to add an autoref (and maybe also unsize it), or if the receiver is
+ /// `*mut T`, convert it to `*const T`.
+ pub autoref_or_ptr_adjustment: Option<AutorefOrPtrAdjustment<'tcx>>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -1085,24 +1114,23 @@
self.pick_by_value_method(step, self_ty).or_else(|| {
self.pick_autorefd_method(step, self_ty, hir::Mutability::Not)
.or_else(|| self.pick_autorefd_method(step, self_ty, hir::Mutability::Mut))
+ .or_else(|| self.pick_const_ptr_method(step, self_ty))
})
})
.next()
}
+ /// For each type `T` in the step list, this attempts to find a method where
+ /// the (transformed) self type is exactly `T`. We do however do one
+ /// transformation on the adjustment: if we are passing a region pointer in,
+ /// we will potentially *reborrow* it to a shorter lifetime. This allows us
+ /// to transparently pass `&mut` pointers, in particular, without consuming
+ /// them for their entire lifetime.
fn pick_by_value_method(
&mut self,
step: &CandidateStep<'tcx>,
self_ty: Ty<'tcx>,
) -> Option<PickResult<'tcx>> {
- //! For each type `T` in the step list, this attempts to find a
- //! method where the (transformed) self type is exactly `T`. We
- //! do however do one transformation on the adjustment: if we
- //! are passing a region pointer in, we will potentially
- //! *reborrow* it to a shorter lifetime. This allows us to
- //! transparently pass `&mut` pointers, in particular, without
- //! consuming them for their entire lifetime.
-
if step.unsize {
return None;
}
@@ -1114,7 +1142,10 @@
// Insert a `&*` or `&mut *` if this is a reference type:
if let ty::Ref(_, _, mutbl) = *step.self_ty.value.value.kind() {
pick.autoderefs += 1;
- pick.autoref = Some(mutbl);
+ pick.autoref_or_ptr_adjustment = Some(AutorefOrPtrAdjustment::Autoref {
+ mutbl,
+ unsize: pick.autoref_or_ptr_adjustment.and_then(|a| a.get_unsize()),
+ })
}
pick
@@ -1137,8 +1168,39 @@
self.pick_method(autoref_ty).map(|r| {
r.map(|mut pick| {
pick.autoderefs = step.autoderefs;
- pick.autoref = Some(mutbl);
- pick.unsize = step.unsize.then_some(self_ty);
+ pick.autoref_or_ptr_adjustment = Some(AutorefOrPtrAdjustment::Autoref {
+ mutbl,
+ unsize: step.unsize.then_some(self_ty),
+ });
+ pick
+ })
+ })
+ }
+
+ /// If `self_ty` is `*mut T` then this picks `*const T` methods. The reason why we have a
+ /// special case for this is because going from `*mut T` to `*const T` with autoderefs and
+ /// autorefs would require dereferencing the pointer, which is not safe.
+ fn pick_const_ptr_method(
+ &mut self,
+ step: &CandidateStep<'tcx>,
+ self_ty: Ty<'tcx>,
+ ) -> Option<PickResult<'tcx>> {
+ // Don't convert an unsized reference to ptr
+ if step.unsize {
+ return None;
+ }
+
+ let ty = match self_ty.kind() {
+ ty::RawPtr(ty::TypeAndMut { ty, mutbl: hir::Mutability::Mut }) => ty,
+ _ => return None,
+ };
+
+ let const_self_ty = ty::TypeAndMut { ty, mutbl: hir::Mutability::Not };
+ let const_ptr_ty = self.tcx.mk_ptr(const_self_ty);
+ self.pick_method(const_ptr_ty).map(|r| {
+ r.map(|mut pick| {
+ pick.autoderefs = step.autoderefs;
+ pick.autoref_or_ptr_adjustment = Some(AutorefOrPtrAdjustment::ToConstPtr);
pick
})
})
@@ -1167,7 +1229,7 @@
//
// We suppress warning if we're picking the method only because it is a
// suggestion.
- self.emit_unstable_name_collision_hint(p, &unstable_candidates);
+ self.emit_unstable_name_collision_hint(p, &unstable_candidates, self_ty);
}
}
return Some(pick);
@@ -1246,24 +1308,46 @@
&self,
stable_pick: &Pick<'_>,
unstable_candidates: &[(&Candidate<'tcx>, Symbol)],
+ self_ty: Ty<'tcx>,
) {
self.tcx.struct_span_lint_hir(
lint::builtin::UNSTABLE_NAME_COLLISIONS,
self.fcx.body_id,
self.span,
|lint| {
- let mut diag = lint.build(
- "a method with this name may be added to the standard library in the future",
- );
- // FIXME: This should be a `span_suggestion` instead of `help`
- // However `self.span` only
- // highlights the method name, so we can't use it. Also consider reusing the code from
- // `report_method_error()`.
- diag.help(&format!(
- "call with fully qualified syntax `{}(...)` to keep using the current method",
- self.tcx.def_path_str(stable_pick.item.def_id),
+ let def_kind = stable_pick.item.kind.as_def_kind();
+ let mut diag = lint.build(&format!(
+ "{} {} with this name may be added to the standard library in the future",
+ def_kind.article(),
+ def_kind.descr(stable_pick.item.def_id),
));
-
+ match (stable_pick.item.kind, stable_pick.item.container) {
+ (ty::AssocKind::Fn, _) => {
+ // FIXME: This should be a `span_suggestion` instead of `help`
+ // However `self.span` only
+ // highlights the method name, so we can't use it. Also consider reusing
+ // the code from `report_method_error()`.
+ diag.help(&format!(
+ "call with fully qualified syntax `{}(...)` to keep using the current \
+ method",
+ self.tcx.def_path_str(stable_pick.item.def_id),
+ ));
+ }
+ (ty::AssocKind::Const, ty::AssocItemContainer::TraitContainer(def_id)) => {
+ diag.span_suggestion(
+ self.span,
+ "use the fully qualified path to the associated const",
+ format!(
+ "<{} as {}>::{}",
+ self_ty,
+ self.tcx.def_path_str(def_id),
+ stable_pick.item.ident
+ ),
+ Applicability::MachineApplicable,
+ );
+ }
+ _ => {}
+ }
if self.tcx.sess.is_nightly_build() {
for (candidate, feature) in unstable_candidates {
diag.help(&format!(
@@ -1489,8 +1573,7 @@
kind: TraitPick,
import_ids: probes[0].0.import_ids.clone(),
autoderefs: 0,
- autoref: None,
- unsize: None,
+ autoref_or_ptr_adjustment: None,
})
}
@@ -1695,7 +1778,7 @@
} else {
self.fcx
.associated_item(def_id, name, Namespace::ValueNS)
- .map_or(Vec::new(), |x| vec![x])
+ .map_or_else(Vec::new, |x| vec![x])
}
} else {
self.tcx.associated_items(def_id).in_definition_order().copied().collect()
@@ -1727,8 +1810,7 @@
},
import_ids: self.import_ids.clone(),
autoderefs: 0,
- autoref: None,
- unsize: None,
+ autoref_or_ptr_adjustment: None,
}
}
}
diff --git a/compiler/rustc_typeck/src/check/method/suggest.rs b/compiler/rustc_typeck/src/check/method/suggest.rs
index d49c7ca..13757ac 100644
--- a/compiler/rustc_typeck/src/check/method/suggest.rs
+++ b/compiler/rustc_typeck/src/check/method/suggest.rs
@@ -11,7 +11,6 @@
use rustc_hir::lang_items::LangItem;
use rustc_hir::{ExprKind, Node, QPath};
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
-use rustc_middle::hir::map as hir_map;
use rustc_middle::ty::fast_reject::simplify_type;
use rustc_middle::ty::print::with_crate_prefix;
use rustc_middle::ty::{
@@ -24,6 +23,7 @@
use rustc_trait_selection::traits::Obligation;
use std::cmp::Ordering;
+use std::iter;
use super::probe::Mode;
use super::{CandidateSource, MethodError, NoMatchData};
@@ -390,7 +390,7 @@
"no {} named `{}` found for {} `{}` in the current scope",
item_kind,
item_name,
- actual.prefix_string(),
+ actual.prefix_string(self.tcx),
ty_str,
);
if let Mode::MethodCall = mode {
@@ -517,21 +517,21 @@
}
if self.is_fn_ty(&rcvr_ty, span) {
- macro_rules! report_function {
- ($span:expr, $name:expr) => {
- err.note(&format!(
- "`{}` is a function, perhaps you wish to call it",
- $name
- ));
- };
+ fn report_function<T: std::fmt::Display>(
+ err: &mut DiagnosticBuilder<'_>,
+ name: T,
+ ) {
+ err.note(
+ &format!("`{}` is a function, perhaps you wish to call it", name,),
+ );
}
if let SelfSource::MethodCall(expr) = source {
if let Ok(expr_string) = tcx.sess.source_map().span_to_snippet(expr.span) {
- report_function!(expr.span, expr_string);
+ report_function(&mut err, expr_string);
} else if let ExprKind::Path(QPath::Resolved(_, ref path)) = expr.kind {
if let Some(segment) = path.segments.last() {
- report_function!(expr.span, segment.ident);
+ report_function(&mut err, segment.ident);
}
}
}
@@ -600,7 +600,7 @@
});
if let Some(hir::Node::Item(hir::Item { kind, .. })) = node {
if let Some(g) = kind.generics() {
- let key = match &g.where_clause.predicates[..] {
+ let key = match g.where_clause.predicates {
[.., pred] => (pred.span().shrink_to_hi(), false),
[] => (
g.where_clause
@@ -649,21 +649,25 @@
ty::PredicateKind::Projection(pred) => {
let pred = bound_predicate.rebind(pred);
// `<Foo as Iterator>::Item = String`.
- let trait_ref =
- pred.skip_binder().projection_ty.trait_ref(self.tcx);
- let assoc = self
- .tcx
- .associated_item(pred.skip_binder().projection_ty.item_def_id);
- let ty = pred.skip_binder().ty;
- let obligation = format!("{}::{} = {}", trait_ref, assoc.ident, ty);
- let quiet = format!(
- "<_ as {}>::{} = {}",
- trait_ref.print_only_trait_path(),
- assoc.ident,
- ty
+ let projection_ty = pred.skip_binder().projection_ty;
+
+ let substs_with_infer_self = tcx.mk_substs(
+ iter::once(tcx.mk_ty_var(ty::TyVid { index: 0 }).into())
+ .chain(projection_ty.substs.iter().skip(1)),
);
- bound_span_label(trait_ref.self_ty(), &obligation, &quiet);
- Some((obligation, trait_ref.self_ty()))
+
+ let quiet_projection_ty = ty::ProjectionTy {
+ substs: substs_with_infer_self,
+ item_def_id: projection_ty.item_def_id,
+ };
+
+ let ty = pred.skip_binder().ty;
+
+ let obligation = format!("{} = {}", projection_ty, ty);
+ let quiet = format!("{} = {}", quiet_projection_ty, ty);
+
+ bound_span_label(projection_ty.self_ty(), &obligation, &quiet);
+ Some((obligation, projection_ty.self_ty()))
}
ty::PredicateKind::Trait(poly_trait_ref, _) => {
let p = poly_trait_ref.trait_ref;
@@ -728,7 +732,7 @@
.map(|(_, path)| path)
.collect::<Vec<_>>()
.join("\n");
- let actual_prefix = actual.prefix_string();
+ let actual_prefix = actual.prefix_string(self.tcx);
err.set_primary_message(&format!(
"the {item_kind} `{item_name}` exists for {actual_prefix} `{ty_str}`, but its trait bounds were not satisfied"
));
@@ -1142,7 +1146,7 @@
let trait_def_ids: FxHashSet<DefId> = param
.bounds
.iter()
- .filter_map(|bound| Some(bound.trait_ref()?.trait_def_id()?))
+ .filter_map(|bound| bound.trait_ref()?.trait_def_id())
.collect();
if !candidates.iter().any(|t| trait_def_ids.contains(&t.def_id)) {
err.span_suggestions(
@@ -1352,17 +1356,15 @@
// Crate-local:
- struct Visitor<'a, 'tcx> {
- map: &'a hir_map::Map<'tcx>,
+ struct Visitor<'a> {
traits: &'a mut Vec<DefId>,
}
- impl<'v, 'a, 'tcx> itemlikevisit::ItemLikeVisitor<'v> for Visitor<'a, 'tcx> {
+ impl<'v, 'a> itemlikevisit::ItemLikeVisitor<'v> for Visitor<'a> {
fn visit_item(&mut self, i: &'v hir::Item<'v>) {
match i.kind {
hir::ItemKind::Trait(..) | hir::ItemKind::TraitAlias(..) => {
- let def_id = self.map.local_def_id(i.hir_id);
- self.traits.push(def_id.to_def_id());
+ self.traits.push(i.def_id.to_def_id());
}
_ => (),
}
@@ -1375,7 +1377,7 @@
fn visit_foreign_item(&mut self, _foreign_item: &hir::ForeignItem<'_>) {}
}
- tcx.hir().krate().visit_all_item_likes(&mut Visitor { map: &tcx.hir(), traits: &mut traits });
+ tcx.hir().krate().visit_all_item_likes(&mut Visitor { traits: &mut traits });
// Cross-crate:
@@ -1445,8 +1447,8 @@
return;
}
// Find a `use` statement.
- for item_id in module.item_ids {
- let item = self.tcx.hir().expect_item(item_id.id);
+ for &item_id in module.item_ids {
+ let item = self.tcx.hir().item(item_id);
match item.kind {
hir::ItemKind::Use(..) => {
// Don't suggest placing a `use` before the prelude
@@ -1464,11 +1466,12 @@
if self.span.map_or(true, |span| item.span < span) {
if !item.span.from_expansion() {
// Don't insert between attributes and an item.
- if item.attrs.is_empty() {
+ let attrs = self.tcx.hir().attrs(item.hir_id());
+ if attrs.is_empty() {
self.span = Some(item.span.shrink_to_lo());
} else {
// Find the first attribute on the item.
- for attr in item.attrs {
+ for attr in attrs {
if self.span.map_or(true, |span| attr.span < span) {
self.span = Some(attr.span.shrink_to_lo());
}
diff --git a/compiler/rustc_typeck/src/check/mod.rs b/compiler/rustc_typeck/src/check/mod.rs
index dc3e3b4..ad9bb70 100644
--- a/compiler/rustc_typeck/src/check/mod.rs
+++ b/compiler/rustc_typeck/src/check/mod.rs
@@ -121,9 +121,9 @@
use rustc_session::config;
use rustc_session::parse::feature_err;
use rustc_session::Session;
-use rustc_span::source_map::DUMMY_SP;
use rustc_span::symbol::{kw, Ident};
use rustc_span::{self, BytePos, MultiSpan, Span};
+use rustc_span::{source_map::DUMMY_SP, sym};
use rustc_target::abi::VariantIdx;
use rustc_target::spec::abi::Abi;
use rustc_trait_selection::traits;
@@ -495,13 +495,14 @@
let fcx = if let (Some(header), Some(decl)) = (fn_header, fn_decl) {
let fn_sig = if crate::collect::get_infer_ret_ty(&decl.output).is_some() {
let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id);
- AstConv::ty_of_fn(
+ <dyn AstConv<'_>>::ty_of_fn(
&fcx,
header.unsafety,
header.abi,
decl,
&hir::Generics::empty(),
None,
+ None,
)
} else {
tcx.fn_sig(def_id)
@@ -526,7 +527,7 @@
let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id);
let expected_type = body_ty
.and_then(|ty| match ty.kind {
- hir::TyKind::Infer => Some(AstConv::ast_ty_to_ty(&fcx, ty)),
+ hir::TyKind::Infer => Some(<dyn AstConv<'_>>::ast_ty_to_ty(&fcx, ty)),
_ => None,
})
.unwrap_or_else(|| match tcx.hir().get(id) {
@@ -546,11 +547,12 @@
let expected_type = fcx.normalize_associated_types_in(body.value.span, expected_type);
fcx.require_type_is_sized(expected_type, body.value.span, traits::ConstSized);
- let revealed_ty = if tcx.features().impl_trait_in_bindings {
- fcx.instantiate_opaque_types_from_value(id, expected_type, body.value.span)
- } else {
- expected_type
- };
+ let revealed_ty = fcx.instantiate_opaque_types_from_value(
+ id,
+ expected_type,
+ body.value.span,
+ Some(sym::impl_trait_in_bindings),
+ );
// Gather locals in statics (because of block expressions).
GatherLocalsVisitor::new(&fcx, id).visit_body(body);
@@ -838,7 +840,7 @@
// Obtain the level of indentation ending in `sugg_sp`.
let indentation = tcx.sess.source_map().span_to_margin(sugg_sp).unwrap_or(0);
// Make the whitespace that will make the suggestion have the right indentation.
- let padding: String = (0..indentation).map(|_| " ").collect();
+ let padding: String = std::iter::repeat(" ").take(indentation).collect();
for trait_item in missing_items {
let snippet = suggestion_signature(&trait_item, tcx);
@@ -1061,7 +1063,10 @@
E0533,
"expected unit struct, unit variant or constant, found {}{}",
res.descr(),
- tcx.sess.source_map().span_to_snippet(span).map_or(String::new(), |s| format!(" `{}`", s)),
+ tcx.sess
+ .source_map()
+ .span_to_snippet(span)
+ .map_or_else(|_| String::new(), |s| format!(" `{}`", s)),
)
.emit();
}
diff --git a/compiler/rustc_typeck/src/check/op.rs b/compiler/rustc_typeck/src/check/op.rs
index 9ab056c..567cb1a 100644
--- a/compiler/rustc_typeck/src/check/op.rs
+++ b/compiler/rustc_typeck/src/check/op.rs
@@ -681,7 +681,7 @@
format!("cannot apply unary operator `{}`", op.as_str()),
);
match actual.kind() {
- Uint(_) if op == hir::UnOp::UnNeg => {
+ Uint(_) if op == hir::UnOp::Neg => {
err.note("unsigned values cannot be negated");
if let hir::ExprKind::Unary(
@@ -711,9 +711,9 @@
Ref(_, ref lty, _) if *lty.kind() == Str => {}
_ => {
let missing_trait = match op {
- hir::UnOp::UnNeg => "std::ops::Neg",
- hir::UnOp::UnNot => "std::ops::Not",
- hir::UnOp::UnDeref => "std::ops::UnDerf",
+ hir::UnOp::Neg => "std::ops::Neg",
+ hir::UnOp::Not => "std::ops::Not",
+ hir::UnOp::Deref => "std::ops::UnDerf",
};
suggest_impl_missing(&mut err, operand_ty, &missing_trait);
}
@@ -782,9 +782,9 @@
span_bug!(span, "&& and || are not overloadable")
}
}
- } else if let Op::Unary(hir::UnOp::UnNot, _) = op {
+ } else if let Op::Unary(hir::UnOp::Not, _) = op {
(sym::not, lang.not_trait())
- } else if let Op::Unary(hir::UnOp::UnNeg, _) = op {
+ } else if let Op::Unary(hir::UnOp::Neg, _) = op {
(sym::neg, lang.neg_trait())
} else {
bug!("lookup_op_method: op not supported: {:?}", op)
diff --git a/compiler/rustc_typeck/src/check/pat.rs b/compiler/rustc_typeck/src/check/pat.rs
index d7e6966..79c544b 100644
--- a/compiler/rustc_typeck/src/check/pat.rs
+++ b/compiler/rustc_typeck/src/check/pat.rs
@@ -17,6 +17,7 @@
use rustc_span::symbol::Ident;
use rustc_span::{BytePos, DUMMY_SP};
use rustc_trait_selection::traits::{ObligationCause, Pattern};
+use ty::VariantDef;
use std::cmp;
use std::collections::hash_map::Entry::{Occupied, Vacant};
@@ -150,7 +151,7 @@
///
/// Outside of this module, `check_pat_top` should always be used.
/// Conversely, inside this module, `check_pat_top` should never be used.
- #[instrument(skip(self, ti))]
+ #[instrument(level = "debug", skip(self, ti))]
fn check_pat(
&self,
pat: &'tcx Pat<'tcx>,
@@ -679,7 +680,7 @@
&self,
pat: &'tcx Pat<'tcx>,
qpath: &hir::QPath<'_>,
- fields: &'tcx [hir::FieldPat<'tcx>],
+ fields: &'tcx [hir::PatField<'tcx>],
etc: bool,
expected: Ty<'tcx>,
def_bm: BindingMode,
@@ -878,7 +879,7 @@
let sm = tcx.sess.source_map();
let path_str = sm
.span_to_snippet(sm.span_until_char(pat.span, '('))
- .map_or(String::new(), |s| format!(" `{}`", s.trim_end()));
+ .map_or_else(|_| String::new(), |s| format!(" `{}`", s.trim_end()));
let msg = format!(
"expected tuple struct or tuple variant, found {}{}",
res.descr(),
@@ -1150,7 +1151,7 @@
adt_ty: Ty<'tcx>,
pat: &'tcx Pat<'tcx>,
variant: &'tcx ty::VariantDef,
- fields: &'tcx [hir::FieldPat<'tcx>],
+ fields: &'tcx [hir::PatField<'tcx>],
etc: bool,
def_bm: BindingMode,
ti: TopInfo<'tcx>,
@@ -1264,14 +1265,64 @@
u.emit();
}
}
- (None, Some(mut err)) | (Some(mut err), None) => {
+ (None, Some(mut u)) => {
+ if let Some(mut e) = self.error_tuple_variant_as_struct_pat(pat, fields, variant) {
+ u.delay_as_bug();
+ e.emit();
+ } else {
+ u.emit();
+ }
+ }
+ (Some(mut err), None) => {
err.emit();
}
- (None, None) => {}
+ (None, None) => {
+ if let Some(mut err) =
+ self.error_tuple_variant_index_shorthand(variant, pat, fields)
+ {
+ err.emit();
+ }
+ }
}
no_field_errors
}
+ fn error_tuple_variant_index_shorthand(
+ &self,
+ variant: &VariantDef,
+ pat: &'_ Pat<'_>,
+ fields: &[hir::PatField<'_>],
+ ) -> Option<DiagnosticBuilder<'_>> {
+ // if this is a tuple struct, then all field names will be numbers
+ // so if any fields in a struct pattern use shorthand syntax, they will
+ // be invalid identifiers (for example, Foo { 0, 1 }).
+ if let (CtorKind::Fn, PatKind::Struct(qpath, field_patterns, ..)) =
+ (variant.ctor_kind, &pat.kind)
+ {
+ let has_shorthand_field_name = field_patterns.iter().any(|field| field.is_shorthand);
+ if has_shorthand_field_name {
+ let path = rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| {
+ s.print_qpath(qpath, false)
+ });
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ pat.span,
+ E0769,
+ "tuple variant `{}` written as struct variant",
+ path
+ );
+ err.span_suggestion_verbose(
+ qpath.span().shrink_to_hi().to(pat.span.shrink_to_hi()),
+ "use the tuple variant pattern syntax instead",
+ format!("({})", self.get_suggested_tuple_struct_pattern(fields, variant)),
+ Applicability::MaybeIncorrect,
+ );
+ return Some(err);
+ }
+ }
+ None
+ }
+
fn error_foreign_non_exhaustive_spat(&self, pat: &Pat<'_>, descr: &str, no_fields: bool) {
let sess = self.tcx.sess;
let sm = sess.source_map();
@@ -1395,7 +1446,7 @@
fn error_tuple_variant_as_struct_pat(
&self,
pat: &Pat<'_>,
- fields: &'tcx [hir::FieldPat<'tcx>],
+ fields: &'tcx [hir::PatField<'tcx>],
variant: &ty::VariantDef,
) -> Option<DiagnosticBuilder<'tcx>> {
if let (CtorKind::Fn, PatKind::Struct(qpath, ..)) = (variant.ctor_kind, &pat.kind) {
@@ -1411,16 +1462,7 @@
);
let (sugg, appl) = if fields.len() == variant.fields.len() {
(
- fields
- .iter()
- .map(|f| match self.tcx.sess.source_map().span_to_snippet(f.pat.span) {
- Ok(f) => f,
- Err(_) => rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| {
- s.print_pat(f.pat)
- }),
- })
- .collect::<Vec<String>>()
- .join(", "),
+ self.get_suggested_tuple_struct_pattern(fields, variant),
Applicability::MachineApplicable,
)
} else {
@@ -1429,10 +1471,10 @@
Applicability::MaybeIncorrect,
)
};
- err.span_suggestion(
- pat.span,
+ err.span_suggestion_verbose(
+ qpath.span().shrink_to_hi().to(pat.span.shrink_to_hi()),
"use the tuple variant pattern syntax instead",
- format!("{}({})", path, sugg),
+ format!("({})", sugg),
appl,
);
return Some(err);
@@ -1440,6 +1482,34 @@
None
}
+ fn get_suggested_tuple_struct_pattern(
+ &self,
+ fields: &[hir::PatField<'_>],
+ variant: &VariantDef,
+ ) -> String {
+ let variant_field_idents = variant.fields.iter().map(|f| f.ident).collect::<Vec<Ident>>();
+ fields
+ .iter()
+ .map(|field| {
+ match self.tcx.sess.source_map().span_to_snippet(field.pat.span) {
+ Ok(f) => {
+ // Field names are numbers, but numbers
+ // are not valid identifiers
+ if variant_field_idents.contains(&field.ident) {
+ String::from("_")
+ } else {
+ f
+ }
+ }
+ Err(_) => rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| {
+ s.print_pat(field.pat)
+ }),
+ }
+ })
+ .collect::<Vec<String>>()
+ .join(", ")
+ }
+
/// Returns a diagnostic reporting a struct pattern which is missing an `..` due to
/// inaccessible fields.
///
@@ -1458,7 +1528,7 @@
fn error_no_accessible_fields(
&self,
pat: &Pat<'_>,
- fields: &'tcx [hir::FieldPat<'tcx>],
+ fields: &'tcx [hir::PatField<'tcx>],
) -> DiagnosticBuilder<'tcx> {
let mut err = self
.tcx
@@ -1504,7 +1574,7 @@
&self,
pat: &Pat<'_>,
unmentioned_fields: &[(&ty::FieldDef, Ident)],
- fields: &'tcx [hir::FieldPat<'tcx>],
+ fields: &'tcx [hir::PatField<'tcx>],
) -> DiagnosticBuilder<'tcx> {
let field_names = if unmentioned_fields.len() == 1 {
format!("field `{}`", unmentioned_fields[0].1)
diff --git a/compiler/rustc_typeck/src/check/place_op.rs b/compiler/rustc_typeck/src/check/place_op.rs
index 502cb56..5bd3851 100644
--- a/compiler/rustc_typeck/src/check/place_op.rs
+++ b/compiler/rustc_typeck/src/check/place_op.rs
@@ -103,9 +103,9 @@
let method =
self.try_overloaded_place_op(expr.span, self_ty, &[input_ty], PlaceOp::Index);
- let result = method.map(|ok| {
+ if let Some(result) = method {
debug!("try_index_step: success, using overloaded indexing");
- let method = self.register_infer_ok_obligations(ok);
+ let method = self.register_infer_ok_obligations(result);
let mut adjustments = self.adjust_steps(autoderef);
if let ty::Ref(region, _, hir::Mutability::Not) = method.sig.inputs()[0].kind() {
@@ -128,10 +128,8 @@
self.apply_adjustments(base_expr, adjustments);
self.write_method_call(expr.hir_id, method);
- (input_ty, self.make_overloaded_place_return_type(method).ty)
- });
- if result.is_some() {
- return result;
+
+ return Some((input_ty, self.make_overloaded_place_return_type(method).ty));
}
}
@@ -203,7 +201,7 @@
while let hir::ExprKind::Field(ref expr, _)
| hir::ExprKind::Index(ref expr, _)
- | hir::ExprKind::Unary(hir::UnOp::UnDeref, ref expr) = exprs.last().unwrap().kind
+ | hir::ExprKind::Unary(hir::UnOp::Deref, ref expr) = exprs.last().unwrap().kind
{
exprs.push(&expr);
}
@@ -216,7 +214,7 @@
debug!("convert_place_derefs_to_mutable: i={} expr={:?}", i, expr);
let mut source = self.node_ty(expr.hir_id);
- if matches!(expr.kind, hir::ExprKind::Unary(hir::UnOp::UnDeref, _)) {
+ if matches!(expr.kind, hir::ExprKind::Unary(hir::UnOp::Deref, _)) {
// Clear previous flag; after a pointer indirection it does not apply any more.
inside_union = false;
}
@@ -270,7 +268,7 @@
hir::ExprKind::Index(ref base_expr, ..) => {
self.convert_place_op_to_mutable(PlaceOp::Index, expr, base_expr);
}
- hir::ExprKind::Unary(hir::UnOp::UnDeref, ref base_expr) => {
+ hir::ExprKind::Unary(hir::UnOp::Deref, ref base_expr) => {
self.convert_place_op_to_mutable(PlaceOp::Deref, expr, base_expr);
}
_ => {}
diff --git a/compiler/rustc_typeck/src/check/regionck.rs b/compiler/rustc_typeck/src/check/regionck.rs
index 88e8dd3..8f8514c 100644
--- a/compiler/rustc_typeck/src/check/regionck.rs
+++ b/compiler/rustc_typeck/src/check/regionck.rs
@@ -354,7 +354,7 @@
hir_id: hir::HirId,
) {
assert!(
- matches!(fk, intravisit::FnKind::Closure(..)),
+ matches!(fk, intravisit::FnKind::Closure),
"visit_fn invoked for something other than a closure"
);
@@ -771,21 +771,39 @@
debug!("link_upvar_region(borrorw_region={:?}, upvar_id={:?}", borrow_region, upvar_id);
// A by-reference upvar can't be borrowed for longer than the
// upvar is borrowed from the environment.
- match self.typeck_results.borrow().upvar_capture(upvar_id) {
- ty::UpvarCapture::ByRef(upvar_borrow) => {
- self.sub_regions(
- infer::ReborrowUpvar(span, upvar_id),
- borrow_region,
- upvar_borrow.region,
- );
- if let ty::ImmBorrow = upvar_borrow.kind {
- debug!("link_upvar_region: capture by shared ref");
- return;
+ let closure_local_def_id = upvar_id.closure_expr_id;
+ let mut all_captures_are_imm_borrow = true;
+ for captured_place in self
+ .typeck_results
+ .borrow()
+ .closure_min_captures
+ .get(&closure_local_def_id.to_def_id())
+ .and_then(|root_var_min_cap| root_var_min_cap.get(&upvar_id.var_path.hir_id))
+ .into_iter()
+ .flatten()
+ {
+ match captured_place.info.capture_kind {
+ ty::UpvarCapture::ByRef(upvar_borrow) => {
+ self.sub_regions(
+ infer::ReborrowUpvar(span, upvar_id),
+ borrow_region,
+ upvar_borrow.region,
+ );
+ if let ty::ImmBorrow = upvar_borrow.kind {
+ debug!("link_upvar_region: capture by shared ref");
+ } else {
+ all_captures_are_imm_borrow = false;
+ }
+ }
+ ty::UpvarCapture::ByValue(_) => {
+ all_captures_are_imm_borrow = false;
}
}
- ty::UpvarCapture::ByValue(_) => {}
}
- let fn_hir_id = self.tcx.hir().local_def_id_to_hir_id(upvar_id.closure_expr_id);
+ if all_captures_are_imm_borrow {
+ return;
+ }
+ let fn_hir_id = self.tcx.hir().local_def_id_to_hir_id(closure_local_def_id);
let ty = self.resolve_node_type(fn_hir_id);
debug!("link_upvar_region: ty={:?}", ty);
diff --git a/compiler/rustc_typeck/src/check/upvar.rs b/compiler/rustc_typeck/src/check/upvar.rs
index 04a9e65..8a4c69b 100644
--- a/compiler/rustc_typeck/src/check/upvar.rs
+++ b/compiler/rustc_typeck/src/check/upvar.rs
@@ -40,13 +40,17 @@
use rustc_hir::def_id::LocalDefId;
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc_infer::infer::UpvarRegion;
-use rustc_middle::hir::place::{Place, PlaceBase, PlaceWithHirId, ProjectionKind};
+use rustc_middle::hir::place::{Place, PlaceBase, PlaceWithHirId, Projection, ProjectionKind};
+use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::fold::TypeFoldable;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeckResults, UpvarSubsts};
use rustc_session::lint;
use rustc_span::sym;
use rustc_span::{MultiSpan, Span, Symbol};
+use rustc_index::vec::Idx;
+use rustc_target::abi::VariantIdx;
+
/// Describe the relationship between the paths of two places
/// eg:
/// - `foo` is ancestor of `foo.bar.baz`
@@ -142,6 +146,7 @@
current_closure_kind: ty::ClosureKind::LATTICE_BOTTOM,
current_origin: None,
capture_information: Default::default(),
+ fake_reads: Default::default(),
};
euv::ExprUseVisitor::new(
&mut delegate,
@@ -177,7 +182,8 @@
debug!("seed place {:?}", place);
let upvar_id = ty::UpvarId::new(*var_hir_id, local_def_id);
- let capture_kind = self.init_capture_kind(capture_clause, upvar_id, span);
+ let capture_kind =
+ self.init_capture_kind_for_place(&place, capture_clause, upvar_id, span);
let fake_info = ty::CaptureInfo {
capture_kind_expr_id: None,
path_expr_id: None,
@@ -202,11 +208,8 @@
// If we have an origin, store it.
if let Some(origin) = delegate.current_origin.clone() {
let origin = if self.tcx.features().capture_disjoint_fields {
- origin
+ (origin.0, restrict_capture_precision(origin.1))
} else {
- // FIXME(project-rfc-2229#31): Once the changes to support reborrowing are
- // made, make sure we are selecting and restricting
- // the origin correctly.
(origin.0, Place { projections: vec![], ..origin.1 })
};
@@ -219,8 +222,6 @@
self.log_closure_min_capture_info(closure_def_id, span);
- self.min_captures_to_closure_captures_bridge(closure_def_id);
-
// Now that we've analyzed the closure, we know how each
// variable is borrowed, and we know what traits the closure
// implements (Fn vs FnMut etc). We now have some updates to do
@@ -245,6 +246,13 @@
let final_tupled_upvars_type = self.tcx.mk_tup(final_upvar_tys.iter());
self.demand_suptype(span, substs.tupled_upvars_ty(), final_tupled_upvars_type);
+ let fake_reads = delegate
+ .fake_reads
+ .into_iter()
+ .map(|(place, cause, hir_id)| (place, cause, hir_id))
+ .collect();
+ self.typeck_results.borrow_mut().closure_fake_reads.insert(closure_def_id, fake_reads);
+
// If we are also inferred the closure kind here,
// process any deferred resolutions.
let deferred_call_resolutions = self.remove_deferred_call_resolutions(closure_def_id);
@@ -260,8 +268,6 @@
// local crate or were inlined into it along with some function.
// This may change if abstract return types of some sort are
// implemented.
- let tcx = self.tcx;
-
self.typeck_results
.borrow()
.closure_min_captures_flattened(closure_id)
@@ -276,7 +282,7 @@
match capture {
ty::UpvarCapture::ByValue(_) => upvar_ty,
- ty::UpvarCapture::ByRef(borrow) => tcx.mk_ref(
+ ty::UpvarCapture::ByRef(borrow) => self.tcx.mk_ref(
borrow.region,
ty::TypeAndMut { ty: upvar_ty, mutbl: borrow.kind.to_mutbl_lossy() },
),
@@ -285,80 +291,6 @@
.collect()
}
- /// Bridge for closure analysis
- /// ----------------------------
- ///
- /// For closure with DefId `c`, the bridge converts structures required for supporting RFC 2229,
- /// to structures currently used in the compiler for handling closure captures.
- ///
- /// For example the following structure will be converted:
- ///
- /// closure_min_captures
- /// foo -> [ {foo.x, ImmBorrow}, {foo.y, MutBorrow} ]
- /// bar -> [ {bar.z, ByValue}, {bar.q, MutBorrow} ]
- ///
- /// to
- ///
- /// 1. closure_captures
- /// foo -> UpvarId(foo, c), bar -> UpvarId(bar, c)
- ///
- /// 2. upvar_capture_map
- /// UpvarId(foo,c) -> MutBorrow, UpvarId(bar, c) -> ByValue
- fn min_captures_to_closure_captures_bridge(&self, closure_def_id: DefId) {
- let mut closure_captures: FxIndexMap<hir::HirId, ty::UpvarId> = Default::default();
- let mut upvar_capture_map = ty::UpvarCaptureMap::default();
-
- if let Some(min_captures) =
- self.typeck_results.borrow().closure_min_captures.get(&closure_def_id)
- {
- for (var_hir_id, min_list) in min_captures.iter() {
- for captured_place in min_list {
- let place = &captured_place.place;
- let capture_info = captured_place.info;
-
- let upvar_id = match place.base {
- PlaceBase::Upvar(upvar_id) => upvar_id,
- base => bug!("Expected upvar, found={:?}", base),
- };
-
- assert_eq!(upvar_id.var_path.hir_id, *var_hir_id);
- assert_eq!(upvar_id.closure_expr_id, closure_def_id.expect_local());
-
- closure_captures.insert(*var_hir_id, upvar_id);
-
- let new_capture_kind =
- if let Some(capture_kind) = upvar_capture_map.get(&upvar_id) {
- // upvar_capture_map only stores the UpvarCapture (CaptureKind),
- // so we create a fake capture info with no expression.
- let fake_capture_info = ty::CaptureInfo {
- capture_kind_expr_id: None,
- path_expr_id: None,
- capture_kind: *capture_kind,
- };
- determine_capture_info(fake_capture_info, capture_info).capture_kind
- } else {
- capture_info.capture_kind
- };
- upvar_capture_map.insert(upvar_id, new_capture_kind);
- }
- }
- }
- debug!("For closure_def_id={:?}, closure_captures={:#?}", closure_def_id, closure_captures);
- debug!(
- "For closure_def_id={:?}, upvar_capture_map={:#?}",
- closure_def_id, upvar_capture_map
- );
-
- if !closure_captures.is_empty() {
- self.typeck_results
- .borrow_mut()
- .closure_captures
- .insert(closure_def_id, closure_captures);
-
- self.typeck_results.borrow_mut().upvar_capture_map.extend(upvar_capture_map);
- }
- }
-
/// Analyzes the information collected by `InferBorrowKind` to compute the min number of
/// Places (and corresponding capture kind) that we need to keep track of to support all
/// the required captured paths.
@@ -448,7 +380,7 @@
base => bug!("Expected upvar, found={:?}", base),
};
- let place = restrict_capture_precision(place, capture_info.capture_kind);
+ let place = restrict_capture_precision(place);
let min_cap_list = match root_var_min_capture_list.get_mut(&var_hir_id) {
None => {
@@ -537,7 +469,7 @@
span: Span,
body: &'tcx hir::Body<'tcx>,
) {
- let need_migrations = self.compute_2229_migrations_first_pass(
+ let need_migrations = self.compute_2229_migrations(
closure_def_id,
span,
capture_clause,
@@ -546,9 +478,7 @@
);
if !need_migrations.is_empty() {
- let need_migrations_hir_id = need_migrations.iter().map(|m| m.0).collect::<Vec<_>>();
-
- let migrations_text = migration_suggestion_for_2229(self.tcx, &need_migrations_hir_id);
+ let migrations_text = migration_suggestion_for_2229(self.tcx, &need_migrations);
let local_def_id = closure_def_id.expect_local();
let closure_hir_id = self.tcx.hir().local_def_id_to_hir_id(local_def_id);
@@ -575,15 +505,15 @@
/// - It would have been moved into the closure when `capture_disjoint_fields` wasn't
/// enabled, **and**
/// - It wasn't completely captured by the closure, **and**
- /// - The type of the root variable needs Drop.
- fn compute_2229_migrations_first_pass(
+ /// - One of the paths starting at this root variable, that is not captured needs Drop.
+ fn compute_2229_migrations(
&self,
closure_def_id: DefId,
closure_span: Span,
closure_clause: hir::CaptureBy,
body: &'tcx hir::Body<'tcx>,
min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
- ) -> Vec<(hir::HirId, Ty<'tcx>)> {
+ ) -> Vec<hir::HirId> {
fn resolve_ty<T: TypeFoldable<'tcx>>(
fcx: &FnCtxt<'_, 'tcx>,
span: Span,
@@ -619,7 +549,7 @@
match closure_clause {
// Only migrate if closure is a move closure
- hir::CaptureBy::Value => need_migrations.push((var_hir_id, ty)),
+ hir::CaptureBy::Value => need_migrations.push(var_hir_id),
hir::CaptureBy::Ref => {}
}
@@ -627,30 +557,295 @@
continue;
};
- let is_moved = root_var_min_capture_list
+ let projections_list = root_var_min_capture_list
.iter()
- .any(|capture| matches!(capture.info.capture_kind, ty::UpvarCapture::ByValue(_)));
+ .filter_map(|captured_place| match captured_place.info.capture_kind {
+ // Only care about captures that are moved into the closure
+ ty::UpvarCapture::ByValue(..) => {
+ Some(captured_place.place.projections.as_slice())
+ }
+ ty::UpvarCapture::ByRef(..) => None,
+ })
+ .collect::<Vec<_>>();
+
+ let is_moved = !projections_list.is_empty();
let is_not_completely_captured =
root_var_min_capture_list.iter().any(|capture| capture.place.projections.len() > 0);
- if is_moved && is_not_completely_captured {
- need_migrations.push((var_hir_id, ty));
+ if is_moved
+ && is_not_completely_captured
+ && self.has_significant_drop_outside_of_captures(
+ closure_def_id,
+ closure_span,
+ ty,
+ projections_list,
+ )
+ {
+ need_migrations.push(var_hir_id);
}
}
need_migrations
}
- fn init_capture_kind(
+ /// This is a helper function to `compute_2229_migrations_precise_pass`. Provided the type
+ /// of a root variable and a list of captured paths starting at this root variable (expressed
+ /// using list of `Projection` slices), it returns true if there is a path that is not
+ /// captured starting at this root variable that implements Drop.
+ ///
+ /// FIXME(project-rfc-2229#35): This should return true only for significant drops.
+ /// A drop is significant if it's implemented by the user or does
+ /// anything that will have any observable behavior (other than
+ /// freeing up memory).
+ ///
+ /// The way this function works is at a given call it looks at type `base_path_ty` of some base
+ /// path say P and then list of projection slices which represent the different captures moved
+ /// into the closure starting off of P.
+ ///
+ /// This will make more sense with an example:
+ ///
+ /// ```rust
+ /// #![feature(capture_disjoint_fields)]
+ ///
+ /// struct FancyInteger(i32); // This implements Drop
+ ///
+ /// struct Point { x: FancyInteger, y: FancyInteger }
+ /// struct Color;
+ ///
+ /// struct Wrapper { p: Point, c: Color }
+ ///
+ /// fn f(w: Wrapper) {
+ /// let c = || {
+ /// // Closure captures w.p.x and w.c by move.
+ /// };
+ ///
+ /// c();
+ /// }
+ /// ```
+ ///
+ /// If `capture_disjoint_fields` wasn't enabled the closure would've moved `w` instead of the
+ /// precise paths. If we look closely `w.p.y` isn't captured which implements Drop and
+ /// therefore Drop ordering would change and we want this function to return true.
+ ///
+ /// Call stack to figure out if we need to migrate for `w` would look as follows:
+ ///
+ /// Our initial base path is just `w`, and the paths captured from it are `w[p, x]` and
+ /// `w[c]`.
+ /// Notation:
+ /// - Ty(place): Type of place
+ /// - `(a, b)`: Represents the function parameters `base_path_ty` and `captured_projs`
+ /// respectively.
+ /// ```
+ /// (Ty(w), [ &[p, x], &[c] ])
+ /// |
+ /// ----------------------------
+ /// | |
+ /// v v
+ /// (Ty(w.p), [ &[x] ]) (Ty(w.c), [ &[] ]) // I(1)
+ /// | |
+ /// v v
+ /// (Ty(w.p), [ &[x] ]) false
+ /// |
+ /// |
+ /// -------------------------------
+ /// | |
+ /// v v
+ /// (Ty((w.p).x), [ &[] ]) (Ty((w.p).y), []) // IMP 2
+ /// | |
+ /// v v
+ /// false NeedsDrop(Ty(w.p.y))
+ /// |
+ /// v
+ /// true
+ /// ```
+ ///
+ /// IMP 1 `(Ty(w.c), [ &[] ])`: Notice the single empty slice inside `captured_projs`.
+ /// This implies that the `w.c` is completely captured by the closure.
+ /// Since drop for this path will be called when the closure is
+ /// dropped we don't need to migrate for it.
+ ///
+ /// IMP 2 `(Ty((w.p).y), [])`: Notice that `captured_projs` is empty. This implies that this
+ /// path wasn't captured by the closure. Also note that even
+ /// though we didn't capture this path, the function visits it,
+ /// which is kind of the point of this function. We then return
+ /// if the type of `w.p.y` implements Drop, which in this case is
+ /// true.
+ ///
+ /// Consider another example:
+ ///
+ /// ```rust
+ /// struct X;
+ /// impl Drop for X {}
+ ///
+ /// struct Y(X);
+ /// impl Drop for Y {}
+ ///
+ /// fn foo() {
+ /// let y = Y(X);
+ /// let c = || move(y.0);
+ /// }
+ /// ```
+ ///
+ /// Note that `y.0` is captured by the closure. When this function is called for `y`, it will
+ /// return true, because even though all paths starting at `y` are captured, `y` itself
+ /// implements Drop which will be affected since `y` isn't completely captured.
+ fn has_significant_drop_outside_of_captures(
&self,
+ closure_def_id: DefId,
+ closure_span: Span,
+ base_path_ty: Ty<'tcx>,
+ captured_projs: Vec<&[Projection<'tcx>]>,
+ ) -> bool {
+ let needs_drop = |ty: Ty<'tcx>| {
+ ty.needs_drop(self.tcx, self.tcx.param_env(closure_def_id.expect_local()))
+ };
+
+ let is_drop_defined_for_ty = |ty: Ty<'tcx>| {
+ let drop_trait = self.tcx.require_lang_item(hir::LangItem::Drop, Some(closure_span));
+ let ty_params = self.tcx.mk_substs_trait(base_path_ty, &[]);
+ self.tcx.type_implements_trait((
+ drop_trait,
+ ty,
+ ty_params,
+ self.tcx.param_env(closure_def_id.expect_local()),
+ ))
+ };
+
+ let is_drop_defined_for_ty = is_drop_defined_for_ty(base_path_ty);
+
+ // If there is a case where no projection is applied on top of current place
+ // then there must be exactly one capture corresponding to such a case. Note that this
+ // represents the case of the path being completely captured by the variable.
+ //
+ // eg. If `a.b` is captured and we are processing `a.b`, then we can't have the closure also
+ // capture `a.b.c`, because that voilates min capture.
+ let is_completely_captured = captured_projs.iter().any(|projs| projs.is_empty());
+
+ assert!(!is_completely_captured || (captured_projs.len() == 1));
+
+ if is_completely_captured {
+ // The place is captured entirely, so doesn't matter if needs dtor, it will be drop
+ // when the closure is dropped.
+ return false;
+ }
+
+ if is_drop_defined_for_ty {
+ // If drop is implemented for this type then we need it to be fully captured,
+ // which we know it is not because of the previous check. Therefore we need to
+ // do migrate.
+ return true;
+ }
+
+ if captured_projs.is_empty() {
+ return needs_drop(base_path_ty);
+ }
+
+ match base_path_ty.kind() {
+ // Observations:
+ // - `captured_projs` is not empty. Therefore we can call
+ // `captured_projs.first().unwrap()` safely.
+ // - All entries in `captured_projs` have atleast one projection.
+ // Therefore we can call `captured_projs.first().unwrap().first().unwrap()` safely.
+
+ // We don't capture derefs in case of move captures, which would have be applied to
+ // access any further paths.
+ ty::Adt(def, _) if def.is_box() => unreachable!(),
+ ty::Ref(..) => unreachable!(),
+ ty::RawPtr(..) => unreachable!(),
+
+ ty::Adt(def, substs) => {
+ // Multi-varaint enums are captured in entirety,
+ // which would've been handled in the case of single empty slice in `captured_projs`.
+ assert_eq!(def.variants.len(), 1);
+
+ // Only Field projections can be applied to a non-box Adt.
+ assert!(
+ captured_projs.iter().all(|projs| matches!(
+ projs.first().unwrap().kind,
+ ProjectionKind::Field(..)
+ ))
+ );
+ def.variants.get(VariantIdx::new(0)).unwrap().fields.iter().enumerate().any(
+ |(i, field)| {
+ let paths_using_field = captured_projs
+ .iter()
+ .filter_map(|projs| {
+ if let ProjectionKind::Field(field_idx, _) =
+ projs.first().unwrap().kind
+ {
+ if (field_idx as usize) == i { Some(&projs[1..]) } else { None }
+ } else {
+ unreachable!();
+ }
+ })
+ .collect();
+
+ let after_field_ty = field.ty(self.tcx, substs);
+ self.has_significant_drop_outside_of_captures(
+ closure_def_id,
+ closure_span,
+ after_field_ty,
+ paths_using_field,
+ )
+ },
+ )
+ }
+
+ ty::Tuple(..) => {
+ // Only Field projections can be applied to a tuple.
+ assert!(
+ captured_projs.iter().all(|projs| matches!(
+ projs.first().unwrap().kind,
+ ProjectionKind::Field(..)
+ ))
+ );
+
+ base_path_ty.tuple_fields().enumerate().any(|(i, element_ty)| {
+ let paths_using_field = captured_projs
+ .iter()
+ .filter_map(|projs| {
+ if let ProjectionKind::Field(field_idx, _) = projs.first().unwrap().kind
+ {
+ if (field_idx as usize) == i { Some(&projs[1..]) } else { None }
+ } else {
+ unreachable!();
+ }
+ })
+ .collect();
+
+ self.has_significant_drop_outside_of_captures(
+ closure_def_id,
+ closure_span,
+ element_ty,
+ paths_using_field,
+ )
+ })
+ }
+
+ // Anything else would be completely captured and therefore handled already.
+ _ => unreachable!(),
+ }
+ }
+
+ fn init_capture_kind_for_place(
+ &self,
+ place: &Place<'tcx>,
capture_clause: hir::CaptureBy,
upvar_id: ty::UpvarId,
closure_span: Span,
) -> ty::UpvarCapture<'tcx> {
match capture_clause {
- hir::CaptureBy::Value => ty::UpvarCapture::ByValue(None),
- hir::CaptureBy::Ref => {
+ // In case of a move closure if the data is accessed through a reference we
+ // want to capture by ref to allow precise capture using reborrows.
+ //
+ // If the data will be moved out of this place, then the place will be truncated
+ // at the first Deref in `adjust_upvar_borrow_kind_for_consume` and then moved into
+ // the closure.
+ hir::CaptureBy::Value if !place.deref_tys().any(ty::TyS::is_ref) => {
+ ty::UpvarCapture::ByValue(None)
+ }
+ hir::CaptureBy::Value | hir::CaptureBy::Ref => {
let origin = UpvarRegion(upvar_id, closure_span);
let upvar_region = self.next_region_var(origin);
let upvar_borrow = ty::UpvarBorrow { kind: ty::ImmBorrow, region: upvar_region };
@@ -791,6 +986,52 @@
}
}
+/// Truncate the capture so that the place being borrowed is in accordance with RFC 1240,
+/// which states that it's unsafe to take a reference into a struct marked `repr(packed)`.
+fn restrict_repr_packed_field_ref_capture<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ place: &Place<'tcx>,
+) -> Place<'tcx> {
+ let pos = place.projections.iter().enumerate().position(|(i, p)| {
+ let ty = place.ty_before_projection(i);
+
+ // Return true for fields of packed structs, unless those fields have alignment 1.
+ match p.kind {
+ ProjectionKind::Field(..) => match ty.kind() {
+ ty::Adt(def, _) if def.repr.packed() => {
+ match tcx.layout_raw(param_env.and(p.ty)) {
+ Ok(layout) if layout.align.abi.bytes() == 1 => {
+ // if the alignment is 1, the type can't be further
+ // disaligned.
+ debug!(
+ "restrict_repr_packed_field_ref_capture: ({:?}) - align = 1",
+ place
+ );
+ false
+ }
+ _ => {
+ debug!("restrict_repr_packed_field_ref_capture: ({:?}) - true", place);
+ true
+ }
+ }
+ }
+
+ _ => false,
+ },
+ _ => false,
+ }
+ });
+
+ let mut place = place.clone();
+
+ if let Some(pos) = pos {
+ place.projections.truncate(pos);
+ }
+
+ place
+}
+
struct InferBorrowKind<'a, 'tcx> {
fcx: &'a FnCtxt<'a, 'tcx>,
@@ -840,6 +1081,7 @@
/// Place { V1, [ProjectionKind::Field(Index=1, Variant=0)] } : CaptureKind { E2, MutableBorrow }
/// ```
capture_information: InferredCaptureInformation<'tcx>,
+ fake_reads: Vec<(Place<'tcx>, FakeReadCause, hir::HirId)>,
}
impl<'a, 'tcx> InferBorrowKind<'a, 'tcx> {
@@ -854,12 +1096,25 @@
place_with_id, diag_expr_id, mode
);
- // we only care about moves
- match mode {
- euv::Copy => {
+ match (self.capture_clause, mode) {
+ // In non-move closures, we only care about moves
+ (hir::CaptureBy::Ref, euv::Copy) => return,
+
+ // We want to capture Copy types that read through a ref via a reborrow
+ (hir::CaptureBy::Value, euv::Copy)
+ if place_with_id.place.deref_tys().any(ty::TyS::is_ref) =>
+ {
return;
}
- euv::Move => {}
+
+ (hir::CaptureBy::Ref, euv::Move) | (hir::CaptureBy::Value, euv::Move | euv::Copy) => {}
+ };
+
+ let place = truncate_capture_for_move(place_with_id.place.clone());
+ let place_with_id = PlaceWithHirId { place: place.clone(), hir_id: place_with_id.hir_id };
+
+ if !self.capture_information.contains_key(&place) {
+ self.init_capture_info_for_place(&place_with_id, diag_expr_id);
}
let tcx = self.fcx.tcx;
@@ -873,13 +1128,15 @@
let usage_span = tcx.hir().span(diag_expr_id);
- // To move out of an upvar, this must be a FnOnce closure
- self.adjust_closure_kind(
- upvar_id.closure_expr_id,
- ty::ClosureKind::FnOnce,
- usage_span,
- place_with_id.place.clone(),
- );
+ if matches!(mode, euv::Move) {
+ // To move out of an upvar, this must be a FnOnce closure
+ self.adjust_closure_kind(
+ upvar_id.closure_expr_id,
+ ty::ClosureKind::FnOnce,
+ usage_span,
+ place.clone(),
+ );
+ }
let capture_info = ty::CaptureInfo {
capture_kind_expr_id: Some(diag_expr_id),
@@ -1062,8 +1319,12 @@
if let PlaceBase::Upvar(upvar_id) = place_with_id.place.base {
assert_eq!(self.closure_def_id.expect_local(), upvar_id.closure_expr_id);
- let capture_kind =
- self.fcx.init_capture_kind(self.capture_clause, upvar_id, self.closure_span);
+ let capture_kind = self.fcx.init_capture_kind_for_place(
+ &place_with_id.place,
+ self.capture_clause,
+ upvar_id,
+ self.closure_span,
+ );
let expr_id = Some(diag_expr_id);
let capture_info = ty::CaptureInfo {
@@ -1082,6 +1343,12 @@
}
impl<'a, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'tcx> {
+ fn fake_read(&mut self, place: Place<'tcx>, cause: FakeReadCause, diag_expr_id: hir::HirId) {
+ if let PlaceBase::Upvar(_) = place.base {
+ self.fake_reads.push((place, cause, diag_expr_id));
+ }
+ }
+
fn consume(
&mut self,
place_with_id: &PlaceWithHirId<'tcx>,
@@ -1110,8 +1377,15 @@
place_with_id, diag_expr_id, bk
);
+ let place = restrict_repr_packed_field_ref_capture(
+ self.fcx.tcx,
+ self.fcx.param_env,
+ &place_with_id.place,
+ );
+ let place_with_id = PlaceWithHirId { place, ..*place_with_id };
+
if !self.capture_information.contains_key(&place_with_id.place) {
- self.init_capture_info_for_place(place_with_id, diag_expr_id);
+ self.init_capture_info_for_place(&place_with_id, diag_expr_id);
}
match bk {
@@ -1128,24 +1402,15 @@
fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
debug!("mutate(assignee_place={:?}, diag_expr_id={:?})", assignee_place, diag_expr_id);
- if !self.capture_information.contains_key(&assignee_place.place) {
- self.init_capture_info_for_place(assignee_place, diag_expr_id);
- }
-
- self.adjust_upvar_borrow_kind_for_mut(assignee_place, diag_expr_id);
+ self.borrow(assignee_place, diag_expr_id, ty::BorrowKind::MutBorrow);
}
}
/// Truncate projections so that following rules are obeyed by the captured `place`:
-///
-/// - No Derefs in move closure, this will result in value behind a reference getting moved.
/// - No projections are applied to raw pointers, since these require unsafe blocks. We capture
/// them completely.
/// - No Index projections are captured, since arrays are captured completely.
-fn restrict_capture_precision<'tcx>(
- mut place: Place<'tcx>,
- capture_kind: ty::UpvarCapture<'tcx>,
-) -> Place<'tcx> {
+fn restrict_capture_precision<'tcx>(mut place: Place<'tcx>) -> Place<'tcx> {
if place.projections.is_empty() {
// Nothing to do here
return place;
@@ -1157,7 +1422,6 @@
}
let mut truncated_length = usize::MAX;
- let mut first_deref_projection = usize::MAX;
for (i, proj) in place.projections.iter().enumerate() {
if proj.ty.is_unsafe_ptr() {
@@ -1171,31 +1435,30 @@
truncated_length = truncated_length.min(i);
break;
}
- ProjectionKind::Deref => {
- // We only drop Derefs in case of move closures
- // There might be an index projection or raw ptr ahead, so we don't stop here.
- first_deref_projection = first_deref_projection.min(i);
- }
+ ProjectionKind::Deref => {}
ProjectionKind::Field(..) => {} // ignore
ProjectionKind::Subslice => {} // We never capture this
}
}
- let length = place
- .projections
- .len()
- .min(truncated_length)
- // In case of capture `ByValue` we want to not capture derefs
- .min(match capture_kind {
- ty::UpvarCapture::ByValue(..) => first_deref_projection,
- ty::UpvarCapture::ByRef(..) => usize::MAX,
- });
+ let length = place.projections.len().min(truncated_length);
place.projections.truncate(length);
place
}
+/// Truncates a place so that the resultant capture doesn't move data out of a reference
+fn truncate_capture_for_move(mut place: Place<'tcx>) -> Place<'tcx> {
+ if let Some(i) = place.projections.iter().position(|proj| proj.kind == ProjectionKind::Deref) {
+ // We only drop Derefs in case of move closures
+ // There might be an index projection or raw ptr ahead, so we don't stop here.
+ place.projections.truncate(i);
+ }
+
+ place
+}
+
fn construct_place_string(tcx: TyCtxt<'_>, place: &Place<'tcx>) -> String {
let variable_name = match place.base {
PlaceBase::Upvar(upvar_id) => var_name(tcx, upvar_id.var_path.hir_id).to_string(),
@@ -1211,7 +1474,7 @@
ProjectionKind::Subslice => String::from("Subslice"),
};
if i != 0 {
- projections_str.push_str(",");
+ projections_str.push(',');
}
projections_str.push_str(proj.as_str());
}
@@ -1382,14 +1645,8 @@
// Assume of length of projections_b = m
let projections_b = &place_b.projections;
- let mut same_initial_projections = true;
-
- for (proj_a, proj_b) in projections_a.iter().zip(projections_b.iter()) {
- if proj_a != proj_b {
- same_initial_projections = false;
- break;
- }
- }
+ let same_initial_projections =
+ projections_a.iter().zip(projections_b.iter()).all(|(proj_a, proj_b)| proj_a == proj_b);
if same_initial_projections {
// First min(n, m) projections are the same
diff --git a/compiler/rustc_typeck/src/check/wfcheck.rs b/compiler/rustc_typeck/src/check/wfcheck.rs
index c90db47..00c6550 100644
--- a/compiler/rustc_typeck/src/check/wfcheck.rs
+++ b/compiler/rustc_typeck/src/check/wfcheck.rs
@@ -80,8 +80,8 @@
let item = tcx.hir().expect_item(hir_id);
debug!(
- "check_item_well_formed(it.hir_id={:?}, it.name={})",
- item.hir_id,
+ "check_item_well_formed(it.def_id={:?}, it.name={})",
+ item.def_id,
tcx.def_path_str(def_id.to_def_id())
);
@@ -105,7 +105,7 @@
// for `T`
hir::ItemKind::Impl(ref impl_) => {
let is_auto = tcx
- .impl_trait_ref(tcx.hir().local_def_id(item.hir_id))
+ .impl_trait_ref(item.def_id)
.map_or(false, |trait_ref| tcx.trait_is_auto(trait_ref.def_id));
if let (hir::Defaultness::Default { .. }, true) = (impl_.defaultness, is_auto) {
let sp = impl_.of_trait.as_ref().map_or(item.span, |t| t.path.span);
@@ -141,23 +141,23 @@
}
}
hir::ItemKind::Fn(ref sig, ..) => {
- check_item_fn(tcx, item.hir_id, item.ident, item.span, sig.decl);
+ check_item_fn(tcx, item.hir_id(), item.ident, item.span, sig.decl);
}
hir::ItemKind::Static(ref ty, ..) => {
- check_item_type(tcx, item.hir_id, ty.span, false);
+ check_item_type(tcx, item.hir_id(), ty.span, false);
}
hir::ItemKind::Const(ref ty, ..) => {
- check_item_type(tcx, item.hir_id, ty.span, false);
+ check_item_type(tcx, item.hir_id(), ty.span, false);
}
hir::ItemKind::ForeignMod { items, .. } => {
for it in items.iter() {
let it = tcx.hir().foreign_item(it.id);
match it.kind {
hir::ForeignItemKind::Fn(ref decl, ..) => {
- check_item_fn(tcx, it.hir_id, it.ident, it.span, decl)
+ check_item_fn(tcx, it.hir_id(), it.ident, it.span, decl)
}
hir::ForeignItemKind::Static(ref ty, ..) => {
- check_item_type(tcx, it.hir_id, ty.span, true)
+ check_item_type(tcx, it.hir_id(), ty.span, true)
}
hir::ForeignItemKind::Type => (),
}
@@ -197,7 +197,7 @@
_ => None,
};
check_object_unsafe_self_trait_by_name(tcx, &trait_item);
- check_associated_item(tcx, trait_item.hir_id, trait_item.span, method_sig);
+ check_associated_item(tcx, trait_item.hir_id(), trait_item.span, method_sig);
}
fn could_be_self(trait_def_id: LocalDefId, ty: &hir::Ty<'_>) -> bool {
@@ -213,9 +213,9 @@
/// Detect when an object unsafe trait is referring to itself in one of its associated items.
/// When this is done, suggest using `Self` instead.
fn check_object_unsafe_self_trait_by_name(tcx: TyCtxt<'_>, item: &hir::TraitItem<'_>) {
- let (trait_name, trait_def_id) = match tcx.hir().get(tcx.hir().get_parent_item(item.hir_id)) {
+ let (trait_name, trait_def_id) = match tcx.hir().get(tcx.hir().get_parent_item(item.hir_id())) {
hir::Node::Item(item) => match item.kind {
- hir::ItemKind::Trait(..) => (item.ident, tcx.hir().local_def_id(item.hir_id)),
+ hir::ItemKind::Trait(..) => (item.ident, item.def_id),
_ => return,
},
_ => return,
@@ -271,7 +271,7 @@
_ => None,
};
- check_associated_item(tcx, impl_item.hir_id, impl_item.span, method_sig);
+ check_associated_item(tcx, impl_item.hir_id(), impl_item.span, method_sig);
}
fn check_param_wf(tcx: TyCtxt<'_>, param: &hir::GenericParam<'_>) {
@@ -432,7 +432,7 @@
}
fn for_item<'tcx>(tcx: TyCtxt<'tcx>, item: &hir::Item<'_>) -> CheckWfFcxBuilder<'tcx> {
- for_id(tcx, item.hir_id, item.span)
+ for_id(tcx, item.hir_id(), item.span)
}
fn for_id(tcx: TyCtxt<'_>, id: hir::HirId, span: Span) -> CheckWfFcxBuilder<'_> {
@@ -465,8 +465,7 @@
{
for_item(tcx, item).with_fcx(|fcx, fcx_tcx| {
let variants = lookup_fields(fcx);
- let def_id = fcx.tcx.hir().local_def_id(item.hir_id);
- let packed = fcx.tcx.adt_def(def_id).repr.packed();
+ let packed = fcx.tcx.adt_def(item.def_id).repr.packed();
for variant in &variants {
// For DST, or when drop needs to copy things around, all
@@ -482,7 +481,7 @@
// Just treat unresolved type expression as if it needs drop.
true
} else {
- ty.needs_drop(fcx_tcx, fcx_tcx.param_env(def_id))
+ ty.needs_drop(fcx_tcx, fcx_tcx.param_env(item.def_id))
}
}
};
@@ -541,7 +540,7 @@
}
}
- check_where_clauses(tcx, fcx, item.span, def_id.to_def_id(), None);
+ check_where_clauses(tcx, fcx, item.span, item.def_id.to_def_id(), None);
// No implied bounds in a struct definition.
vec![]
@@ -549,15 +548,13 @@
}
fn check_trait(tcx: TyCtxt<'_>, item: &hir::Item<'_>) {
- debug!("check_trait: {:?}", item.hir_id);
+ debug!("check_trait: {:?}", item.def_id);
- let trait_def_id = tcx.hir().local_def_id(item.hir_id);
-
- let trait_def = tcx.trait_def(trait_def_id);
+ let trait_def = tcx.trait_def(item.def_id);
if trait_def.is_marker
|| matches!(trait_def.specialization_kind, TraitSpecializationKind::Marker)
{
- for associated_def_id in &*tcx.associated_item_def_ids(trait_def_id) {
+ for associated_def_id in &*tcx.associated_item_def_ids(item.def_id) {
struct_span_err!(
tcx.sess,
tcx.def_span(*associated_def_id),
@@ -569,7 +566,7 @@
}
for_item(tcx, item).with_fcx(|fcx, _| {
- check_where_clauses(tcx, fcx, item.span, trait_def_id.to_def_id(), None);
+ check_where_clauses(tcx, fcx, item.span, item.def_id.to_def_id(), None);
vec![]
});
@@ -665,14 +662,12 @@
debug!("check_impl: {:?}", item);
for_item(tcx, item).with_fcx(|fcx, tcx| {
- let item_def_id = fcx.tcx.hir().local_def_id(item.hir_id);
-
match *ast_trait_ref {
Some(ref ast_trait_ref) => {
// `#[rustc_reservation_impl]` impls are not real impls and
// therefore don't need to be WF (the trait's `Self: Trait` predicate
// won't hold).
- let trait_ref = fcx.tcx.impl_trait_ref(item_def_id).unwrap();
+ let trait_ref = fcx.tcx.impl_trait_ref(item.def_id).unwrap();
let trait_ref =
fcx.normalize_associated_types_in(ast_trait_ref.path.span, trait_ref);
let obligations = traits::wf::trait_obligations(
@@ -688,7 +683,7 @@
}
}
None => {
- let self_ty = fcx.tcx.type_of(item_def_id);
+ let self_ty = fcx.tcx.type_of(item.def_id);
let self_ty = fcx.normalize_associated_types_in(item.span, self_ty);
fcx.register_wf_obligation(
self_ty.into(),
@@ -698,9 +693,9 @@
}
}
- check_where_clauses(tcx, fcx, item.span, item_def_id.to_def_id(), None);
+ check_where_clauses(tcx, fcx, item.span, item.def_id.to_def_id(), None);
- fcx.impl_implied_bounds(item_def_id.to_def_id(), item.span)
+ fcx.impl_implied_bounds(item.def_id.to_def_id(), item.span)
});
}
@@ -1238,15 +1233,14 @@
item: &hir::Item<'tcx>,
hir_generics: &hir::Generics<'_>,
) {
- let item_def_id = tcx.hir().local_def_id(item.hir_id);
- let ty = tcx.type_of(item_def_id);
+ let ty = tcx.type_of(item.def_id);
if tcx.has_error_field(ty) {
return;
}
- let ty_predicates = tcx.predicates_of(item_def_id);
+ let ty_predicates = tcx.predicates_of(item.def_id);
assert_eq!(ty_predicates.parent, None);
- let variances = tcx.variances_of(item_def_id);
+ let variances = tcx.variances_of(item.def_id);
let mut constrained_parameters: FxHashSet<_> = variances
.iter()
@@ -1354,22 +1348,19 @@
fn visit_item(&mut self, i: &'tcx hir::Item<'tcx>) {
debug!("visit_item: {:?}", i);
- let def_id = self.tcx.hir().local_def_id(i.hir_id);
- self.tcx.ensure().check_item_well_formed(def_id);
+ self.tcx.ensure().check_item_well_formed(i.def_id);
hir_visit::walk_item(self, i);
}
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
debug!("visit_trait_item: {:?}", trait_item);
- let def_id = self.tcx.hir().local_def_id(trait_item.hir_id);
- self.tcx.ensure().check_trait_item_well_formed(def_id);
+ self.tcx.ensure().check_trait_item_well_formed(trait_item.def_id);
hir_visit::walk_trait_item(self, trait_item);
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
debug!("visit_impl_item: {:?}", impl_item);
- let def_id = self.tcx.hir().local_def_id(impl_item.hir_id);
- self.tcx.ensure().check_impl_item_well_formed(def_id);
+ self.tcx.ensure().check_impl_item_well_formed(impl_item.def_id);
hir_visit::walk_impl_item(self, impl_item);
}
diff --git a/compiler/rustc_typeck/src/check/writeback.rs b/compiler/rustc_typeck/src/check/writeback.rs
index 4d18b2c..9a183ed 100644
--- a/compiler/rustc_typeck/src/check/writeback.rs
+++ b/compiler/rustc_typeck/src/check/writeback.rs
@@ -4,11 +4,15 @@
use crate::check::FnCtxt;
+use rustc_data_structures::stable_map::FxHashMap;
use rustc_errors::ErrorReported;
use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc_infer::infer::error_reporting::TypeAnnotationNeeded::E0282;
use rustc_infer::infer::InferCtxt;
+use rustc_middle::hir::place::Place as HirPlace;
+use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCast};
use rustc_middle::ty::fold::{TypeFoldable, TypeFolder};
use rustc_middle::ty::{self, Ty, TyCtxt};
@@ -56,7 +60,7 @@
}
wbcx.visit_body(body);
wbcx.visit_min_capture_map();
- wbcx.visit_upvar_capture_map();
+ wbcx.visit_fake_reads_map();
wbcx.visit_closures();
wbcx.visit_liberated_fn_sigs();
wbcx.visit_fru_field_types();
@@ -74,9 +78,6 @@
wbcx.typeck_results.treat_byte_string_as_slice =
mem::take(&mut self.typeck_results.borrow_mut().treat_byte_string_as_slice);
- wbcx.typeck_results.closure_captures =
- mem::take(&mut self.typeck_results.borrow_mut().closure_captures);
-
if self.is_tainted_by_errors() {
// FIXME(eddyb) keep track of `ErrorReported` from where the error was emitted.
wbcx.typeck_results.tainted_by_errors = Some(ErrorReported);
@@ -138,7 +139,7 @@
// operating on scalars, we clear the overload.
fn fix_scalar_builtin_expr(&mut self, e: &hir::Expr<'_>) {
match e.kind {
- hir::ExprKind::Unary(hir::UnOp::UnNeg | hir::UnOp::UnNot, ref inner) => {
+ hir::ExprKind::Unary(hir::UnOp::Neg | hir::UnOp::Not, ref inner) => {
let inner_ty = self.fcx.node_ty(inner.hir_id);
let inner_ty = self.fcx.resolve_vars_if_possible(inner_ty);
@@ -348,9 +349,9 @@
let min_list_wb = min_list
.iter()
.map(|captured_place| {
- let locatable = captured_place.info.path_expr_id.unwrap_or(
- self.tcx().hir().local_def_id_to_hir_id(closure_def_id.expect_local()),
- );
+ let locatable = captured_place.info.path_expr_id.unwrap_or_else(|| {
+ self.tcx().hir().local_def_id_to_hir_id(closure_def_id.expect_local())
+ });
self.resolve(captured_place.clone(), &locatable)
})
@@ -363,20 +364,25 @@
self.typeck_results.closure_min_captures = min_captures_wb;
}
- fn visit_upvar_capture_map(&mut self) {
- for (upvar_id, upvar_capture) in self.fcx.typeck_results.borrow().upvar_capture_map.iter() {
- let new_upvar_capture = match *upvar_capture {
- ty::UpvarCapture::ByValue(span) => ty::UpvarCapture::ByValue(span),
- ty::UpvarCapture::ByRef(ref upvar_borrow) => {
- ty::UpvarCapture::ByRef(ty::UpvarBorrow {
- kind: upvar_borrow.kind,
- region: self.tcx().lifetimes.re_erased,
- })
- }
- };
- debug!("Upvar capture for {:?} resolved to {:?}", upvar_id, new_upvar_capture);
- self.typeck_results.upvar_capture_map.insert(*upvar_id, new_upvar_capture);
+ fn visit_fake_reads_map(&mut self) {
+ let mut resolved_closure_fake_reads: FxHashMap<
+ DefId,
+ Vec<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>,
+ > = Default::default();
+ for (closure_def_id, fake_reads) in
+ self.fcx.typeck_results.borrow().closure_fake_reads.iter()
+ {
+ let mut resolved_fake_reads = Vec::<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>::new();
+ for (place, cause, hir_id) in fake_reads.iter() {
+ let locatable =
+ self.tcx().hir().local_def_id_to_hir_id(closure_def_id.expect_local());
+
+ let resolved_fake_read = self.resolve(place.clone(), &locatable);
+ resolved_fake_reads.push((resolved_fake_read, *cause, *hir_id));
+ }
+ resolved_closure_fake_reads.insert(*closure_def_id, resolved_fake_reads);
}
+ self.typeck_results.closure_fake_reads = resolved_closure_fake_reads;
}
fn visit_closures(&mut self) {
@@ -497,7 +503,8 @@
let mut skip_add = false;
if let ty::Opaque(defin_ty_def_id, _substs) = *definition_ty.kind() {
- if let hir::OpaqueTyOrigin::Misc = opaque_defn.origin {
+ if let hir::OpaqueTyOrigin::Misc | hir::OpaqueTyOrigin::TyAlias = opaque_defn.origin
+ {
if def_id == defin_ty_def_id {
debug!(
"skipping adding concrete definition for opaque type {:?} {:?}",
diff --git a/compiler/rustc_typeck/src/check_unused.rs b/compiler/rustc_typeck/src/check_unused.rs
index 31121ec..e1743a5 100644
--- a/compiler/rustc_typeck/src/check_unused.rs
+++ b/compiler/rustc_typeck/src/check_unused.rs
@@ -28,7 +28,7 @@
return;
}
if let hir::ItemKind::Use(ref path, _) = item.kind {
- self.check_import(item.hir_id, path.span);
+ self.check_import(item.item_id(), path.span);
}
}
@@ -45,24 +45,28 @@
}
impl CheckVisitor<'tcx> {
- fn check_import(&self, id: hir::HirId, span: Span) {
- let def_id = self.tcx.hir().local_def_id(id);
- if !self.tcx.maybe_unused_trait_import(def_id) {
+ fn check_import(&self, item_id: hir::ItemId, span: Span) {
+ if !self.tcx.maybe_unused_trait_import(item_id.def_id) {
return;
}
- if self.used_trait_imports.contains(&def_id) {
+ if self.used_trait_imports.contains(&item_id.def_id) {
return;
}
- self.tcx.struct_span_lint_hir(lint::builtin::UNUSED_IMPORTS, id, span, |lint| {
- let msg = if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
- format!("unused import: `{}`", snippet)
- } else {
- "unused import".to_owned()
- };
- lint.build(&msg).emit();
- });
+ self.tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_IMPORTS,
+ item_id.hir_id(),
+ span,
+ |lint| {
+ let msg = if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
+ format!("unused import: `{}`", snippet)
+ } else {
+ "unused import".to_owned()
+ };
+ lint.build(&msg).emit();
+ },
+ );
}
}
@@ -109,7 +113,6 @@
// Collect all the extern crates (in a reliable order).
let mut crates_to_lint = vec![];
tcx.hir().krate().visit_all_item_likes(&mut CollectExternCrateVisitor {
- tcx,
crates_to_lint: &mut crates_to_lint,
});
@@ -189,8 +192,7 @@
}
}
-struct CollectExternCrateVisitor<'a, 'tcx> {
- tcx: TyCtxt<'tcx>,
+struct CollectExternCrateVisitor<'a> {
crates_to_lint: &'a mut Vec<ExternCrateToLint>,
}
@@ -211,12 +213,11 @@
warn_if_unused: bool,
}
-impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for CollectExternCrateVisitor<'a, 'tcx> {
+impl<'a, 'v> ItemLikeVisitor<'v> for CollectExternCrateVisitor<'a> {
fn visit_item(&mut self, item: &hir::Item<'_>) {
if let hir::ItemKind::ExternCrate(orig_name) = item.kind {
- let extern_crate_def_id = self.tcx.hir().local_def_id(item.hir_id);
self.crates_to_lint.push(ExternCrateToLint {
- def_id: extern_crate_def_id.to_def_id(),
+ def_id: item.def_id.to_def_id(),
span: item.span,
orig_name,
warn_if_unused: !item.ident.as_str().starts_with('_'),
diff --git a/compiler/rustc_typeck/src/coherence/builtin.rs b/compiler/rustc_typeck/src/coherence/builtin.rs
index 6726b9b..8cae61e 100644
--- a/compiler/rustc_typeck/src/coherence/builtin.rs
+++ b/compiler/rustc_typeck/src/coherence/builtin.rs
@@ -38,8 +38,7 @@
F: FnMut(TyCtxt<'tcx>, LocalDefId),
{
if Some(self.trait_def_id) == trait_def_id {
- for &impl_id in self.tcx.hir().trait_impls(self.trait_def_id) {
- let impl_def_id = self.tcx.hir().local_def_id(impl_id);
+ for &impl_def_id in self.tcx.hir().trait_impls(self.trait_def_id) {
f(self.tcx, impl_def_id);
}
}
@@ -247,7 +246,7 @@
))
.emit();
} else {
- let mut fulfill_cx = TraitEngine::new(infcx.tcx);
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
for field in coerced_fields {
let predicate = predicate_for_trait_def(
@@ -507,7 +506,7 @@
}
};
- let mut fulfill_cx = TraitEngine::new(infcx.tcx);
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
// Register an obligation for `A: Trait<B>`.
let cause = traits::ObligationCause::misc(span, impl_hir_id);
diff --git a/compiler/rustc_typeck/src/coherence/inherent_impls.rs b/compiler/rustc_typeck/src/coherence/inherent_impls.rs
index 8a50085..cc592c7 100644
--- a/compiler/rustc_typeck/src/coherence/inherent_impls.rs
+++ b/compiler/rustc_typeck/src/coherence/inherent_impls.rs
@@ -50,8 +50,7 @@
_ => return,
};
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
- let self_ty = self.tcx.type_of(def_id);
+ let self_ty = self.tcx.type_of(item.def_id);
let lang_items = self.tcx.lang_items();
match *self_ty.kind() {
ty::Adt(def, _) => {
@@ -65,7 +64,7 @@
}
ty::Bool => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.bool_impl(),
None,
"bool",
@@ -76,7 +75,7 @@
}
ty::Char => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.char_impl(),
None,
"char",
@@ -87,7 +86,7 @@
}
ty::Str => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.str_impl(),
lang_items.str_alloc_impl(),
"str",
@@ -98,7 +97,7 @@
}
ty::Slice(slice_item) if slice_item == self.tcx.types.u8 => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.slice_u8_impl(),
lang_items.slice_u8_alloc_impl(),
"slice_u8",
@@ -109,7 +108,7 @@
}
ty::Slice(_) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.slice_impl(),
lang_items.slice_alloc_impl(),
"slice",
@@ -120,7 +119,7 @@
}
ty::Array(_, _) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.array_impl(),
None,
"array",
@@ -133,7 +132,7 @@
if matches!(inner.kind(), ty::Slice(_)) =>
{
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.const_slice_ptr_impl(),
None,
"const_slice_ptr",
@@ -146,7 +145,7 @@
if matches!(inner.kind(), ty::Slice(_)) =>
{
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.mut_slice_ptr_impl(),
None,
"mut_slice_ptr",
@@ -157,7 +156,7 @@
}
ty::RawPtr(ty::TypeAndMut { ty: _, mutbl: hir::Mutability::Not }) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.const_ptr_impl(),
None,
"const_ptr",
@@ -168,7 +167,7 @@
}
ty::RawPtr(ty::TypeAndMut { ty: _, mutbl: hir::Mutability::Mut }) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.mut_ptr_impl(),
None,
"mut_ptr",
@@ -179,7 +178,7 @@
}
ty::Int(ty::IntTy::I8) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.i8_impl(),
None,
"i8",
@@ -190,7 +189,7 @@
}
ty::Int(ty::IntTy::I16) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.i16_impl(),
None,
"i16",
@@ -201,7 +200,7 @@
}
ty::Int(ty::IntTy::I32) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.i32_impl(),
None,
"i32",
@@ -212,7 +211,7 @@
}
ty::Int(ty::IntTy::I64) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.i64_impl(),
None,
"i64",
@@ -223,7 +222,7 @@
}
ty::Int(ty::IntTy::I128) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.i128_impl(),
None,
"i128",
@@ -234,7 +233,7 @@
}
ty::Int(ty::IntTy::Isize) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.isize_impl(),
None,
"isize",
@@ -245,7 +244,7 @@
}
ty::Uint(ty::UintTy::U8) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.u8_impl(),
None,
"u8",
@@ -256,7 +255,7 @@
}
ty::Uint(ty::UintTy::U16) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.u16_impl(),
None,
"u16",
@@ -267,7 +266,7 @@
}
ty::Uint(ty::UintTy::U32) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.u32_impl(),
None,
"u32",
@@ -278,7 +277,7 @@
}
ty::Uint(ty::UintTy::U64) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.u64_impl(),
None,
"u64",
@@ -289,7 +288,7 @@
}
ty::Uint(ty::UintTy::U128) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.u128_impl(),
None,
"u128",
@@ -300,7 +299,7 @@
}
ty::Uint(ty::UintTy::Usize) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.usize_impl(),
None,
"usize",
@@ -311,7 +310,7 @@
}
ty::Float(ty::FloatTy::F32) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.f32_impl(),
lang_items.f32_runtime_impl(),
"f32",
@@ -322,7 +321,7 @@
}
ty::Float(ty::FloatTy::F64) => {
self.check_primitive_impl(
- def_id,
+ item.def_id,
lang_items.f64_impl(),
lang_items.f64_runtime_impl(),
"f64",
@@ -369,9 +368,8 @@
// Add the implementation to the mapping from implementation to base
// type def ID, if there is a base type for this implementation and
// the implementation does not have any associated traits.
- let impl_def_id = self.tcx.hir().local_def_id(item.hir_id);
let vec = self.impls_map.inherent_impls.entry(def_id).or_default();
- vec.push(impl_def_id.to_def_id());
+ vec.push(item.def_id.to_def_id());
} else {
struct_span_err!(
self.tcx.sess,
diff --git a/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs b/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs
index 50d8867..2965409 100644
--- a/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs
+++ b/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs
@@ -123,8 +123,7 @@
| hir::ItemKind::Struct(..)
| hir::ItemKind::Trait(..)
| hir::ItemKind::Union(..) => {
- let ty_def_id = self.tcx.hir().local_def_id(item.hir_id);
- let impls = self.tcx.inherent_impls(ty_def_id);
+ let impls = self.tcx.inherent_impls(item.def_id);
// If there is only one inherent impl block,
// there is nothing to overlap check it with
diff --git a/compiler/rustc_typeck/src/coherence/mod.rs b/compiler/rustc_typeck/src/coherence/mod.rs
index 4294450..f04782a 100644
--- a/compiler/rustc_typeck/src/coherence/mod.rs
+++ b/compiler/rustc_typeck/src/coherence/mod.rs
@@ -48,7 +48,20 @@
let did = Some(trait_def_id);
let li = tcx.lang_items();
- // Disallow *all* explicit impls of `DiscriminantKind`, `Sized` and `Unsize` for now.
+ // Disallow *all* explicit impls of `Pointee`, `DiscriminantKind`, `Sized` and `Unsize` for now.
+ if did == li.pointee_trait() {
+ let span = impl_header_span(tcx, impl_def_id);
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0322,
+ "explicit impls for the `Pointee` trait are not permitted"
+ )
+ .span_label(span, "impl of 'Pointee' not allowed")
+ .emit();
+ return;
+ }
+
if did == li.discriminant_kind_trait() {
let span = impl_header_span(tcx, impl_def_id);
struct_span_err!(
@@ -172,8 +185,7 @@
tcx.ensure().specialization_graph_of(def_id);
let impls = tcx.hir().trait_impls(def_id);
- for &hir_id in impls {
- let impl_def_id = tcx.hir().local_def_id(hir_id);
+ for &impl_def_id in impls {
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
check_impl(tcx, impl_def_id, trait_ref);
diff --git a/compiler/rustc_typeck/src/coherence/orphan.rs b/compiler/rustc_typeck/src/coherence/orphan.rs
index 9333aac..0593242 100644
--- a/compiler/rustc_typeck/src/coherence/orphan.rs
+++ b/compiler/rustc_typeck/src/coherence/orphan.rs
@@ -24,7 +24,6 @@
/// to prevent inundating the user with a bunch of similar error
/// reports.
fn visit_item(&mut self, item: &hir::Item<'_>) {
- let def_id = self.tcx.hir().local_def_id(item.hir_id);
// "Trait" impl
if let hir::ItemKind::Impl(hir::Impl {
generics, of_trait: Some(ref tr), self_ty, ..
@@ -32,13 +31,13 @@
{
debug!(
"coherence2::orphan check: trait impl {}",
- self.tcx.hir().node_to_string(item.hir_id)
+ self.tcx.hir().node_to_string(item.hir_id())
);
- let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap();
+ let trait_ref = self.tcx.impl_trait_ref(item.def_id).unwrap();
let trait_def_id = trait_ref.def_id;
let sm = self.tcx.sess.source_map();
let sp = sm.guess_head_span(item.span);
- match traits::orphan_check(self.tcx, def_id.to_def_id()) {
+ match traits::orphan_check(self.tcx, item.def_id.to_def_id()) {
Ok(()) => {}
Err(traits::OrphanCheckErr::NonLocalInputType(tys)) => {
let mut err = struct_span_err!(
diff --git a/compiler/rustc_typeck/src/coherence/unsafety.rs b/compiler/rustc_typeck/src/coherence/unsafety.rs
index 3a290b7..6b995b9 100644
--- a/compiler/rustc_typeck/src/coherence/unsafety.rs
+++ b/compiler/rustc_typeck/src/coherence/unsafety.rs
@@ -24,8 +24,7 @@
unsafety: hir::Unsafety,
polarity: hir::ImplPolarity,
) {
- let local_did = self.tcx.hir().local_def_id(item.hir_id);
- if let Some(trait_ref) = self.tcx.impl_trait_ref(local_did) {
+ if let Some(trait_ref) = self.tcx.impl_trait_ref(item.def_id) {
let trait_def = self.tcx.trait_def(trait_ref.def_id);
let unsafe_attr = impl_generics.and_then(|generics| {
generics.params.iter().find(|p| p.pure_wrt_drop).map(|_| "may_dangle")
diff --git a/compiler/rustc_typeck/src/collect.rs b/compiler/rustc_typeck/src/collect.rs
index c6cc54d..e513635 100644
--- a/compiler/rustc_typeck/src/collect.rs
+++ b/compiler/rustc_typeck/src/collect.rs
@@ -1,3 +1,4 @@
+// ignore-tidy-filelength
//! "Collection" is the process of determining the type and other external
//! details of each item in Rust. Collection is specifically concerned
//! with *inter-procedural* things -- for example, for a function
@@ -77,6 +78,7 @@
projection_ty_from_predicates,
explicit_predicates_of,
super_predicates_of,
+ super_predicates_that_define_assoc_type,
trait_explicit_predicates_and_bounds,
type_param_predicates,
trait_def,
@@ -141,6 +143,7 @@
generics: &[hir::GenericParam<'_>],
placeholder_types: Vec<Span>,
suggest: bool,
+ hir_ty: Option<&hir::Ty<'_>>,
) {
if placeholder_types.is_empty() {
return;
@@ -171,12 +174,40 @@
}
let mut err = bad_placeholder_type(tcx, placeholder_types);
+
+ // Suggest, but only if it is not a function in const or static
if suggest {
- err.multipart_suggestion(
- "use type parameters instead",
- sugg,
- Applicability::HasPlaceholders,
- );
+ let mut is_fn = false;
+ let mut is_const = false;
+ let mut is_static = false;
+
+ if let Some(hir_ty) = hir_ty {
+ if let hir::TyKind::BareFn(_) = hir_ty.kind {
+ is_fn = true;
+
+ // Check if parent is const or static
+ let parent_id = tcx.hir().get_parent_node(hir_ty.hir_id);
+ let parent_node = tcx.hir().get(parent_id);
+
+ if let hir::Node::Item(item) = parent_node {
+ if let hir::ItemKind::Const(_, _) = item.kind {
+ is_const = true;
+ } else if let hir::ItemKind::Static(_, _, _) = item.kind {
+ is_static = true;
+ }
+ }
+ }
+ }
+
+ // if function is wrapped around a const or static,
+ // then don't show the suggestion
+ if !(is_fn && (is_const || is_static)) {
+ err.multipart_suggestion(
+ "use type parameters instead",
+ sugg,
+ Applicability::HasPlaceholders,
+ );
+ }
}
err.emit();
}
@@ -198,7 +229,7 @@
let mut visitor = PlaceholderHirTyCollector::default();
visitor.visit_item(item);
- placeholder_type_error(tcx, Some(generics.span), &generics.params[..], visitor.0, suggest);
+ placeholder_type_error(tcx, Some(generics.span), generics.params, visitor.0, suggest, None);
}
impl Visitor<'tcx> for CollectItemTypesVisitor<'tcx> {
@@ -209,7 +240,7 @@
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- convert_item(self.tcx, item.hir_id);
+ convert_item(self.tcx, item.item_id());
reject_placeholder_type_signatures_in_item(self.tcx, item);
intravisit::walk_item(self, item);
}
@@ -243,12 +274,12 @@
}
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
- convert_trait_item(self.tcx, trait_item.hir_id);
+ convert_trait_item(self.tcx, trait_item.trait_item_id());
intravisit::walk_trait_item(self, trait_item);
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
- convert_impl_item(self.tcx, impl_item.hir_id);
+ convert_impl_item(self.tcx, impl_item.impl_item_id());
intravisit::walk_impl_item(self, impl_item);
}
}
@@ -278,8 +309,8 @@
ItemCtxt { tcx, item_def_id }
}
- pub fn to_ty(&self, ast_ty: &'tcx hir::Ty<'tcx>) -> Ty<'tcx> {
- AstConv::ast_ty_to_ty(self, ast_ty)
+ pub fn to_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
+ <dyn AstConv<'_>>::ast_ty_to_ty(self, ast_ty)
}
pub fn hir_id(&self) -> hir::HirId {
@@ -308,8 +339,17 @@
}
}
- fn get_type_parameter_bounds(&self, span: Span, def_id: DefId) -> ty::GenericPredicates<'tcx> {
- self.tcx.at(span).type_param_predicates((self.item_def_id, def_id.expect_local()))
+ fn get_type_parameter_bounds(
+ &self,
+ span: Span,
+ def_id: DefId,
+ assoc_name: Ident,
+ ) -> ty::GenericPredicates<'tcx> {
+ self.tcx.at(span).type_param_predicates((
+ self.item_def_id,
+ def_id.expect_local(),
+ assoc_name,
+ ))
}
fn re_infer(&self, _: Option<&ty::GenericParamDef>, _: Span) -> Option<ty::Region<'tcx>> {
@@ -375,7 +415,7 @@
| hir::ItemKind::Struct(_, generics)
| hir::ItemKind::Union(_, generics) => {
let lt_name = get_new_lifetime_name(self.tcx, poly_trait_ref, generics);
- let (lt_sp, sugg) = match &generics.params[..] {
+ let (lt_sp, sugg) = match generics.params {
[] => (generics.span, format!("<{}>", lt_name)),
[bound, ..] => {
(bound.span.shrink_to_lo(), format!("{}, ", lt_name))
@@ -495,7 +535,7 @@
/// `X: Foo` where `X` is the type parameter `def_id`.
fn type_param_predicates(
tcx: TyCtxt<'_>,
- (item_def_id, def_id): (DefId, LocalDefId),
+ (item_def_id, def_id, assoc_name): (DefId, LocalDefId, Ident),
) -> ty::GenericPredicates<'_> {
use rustc_hir::*;
@@ -520,7 +560,7 @@
let mut result = parent
.map(|parent| {
let icx = ItemCtxt::new(tcx, parent);
- icx.get_type_parameter_bounds(DUMMY_SP, def_id.to_def_id())
+ icx.get_type_parameter_bounds(DUMMY_SP, def_id.to_def_id(), assoc_name)
})
.unwrap_or_default();
let mut extend = None;
@@ -563,12 +603,18 @@
let icx = ItemCtxt::new(tcx, item_def_id);
let extra_predicates = extend.into_iter().chain(
- icx.type_parameter_bounds_in_generics(ast_generics, param_id, ty, OnlySelfBounds(true))
- .into_iter()
- .filter(|(predicate, _)| match predicate.kind().skip_binder() {
- ty::PredicateKind::Trait(data, _) => data.self_ty().is_param(index),
- _ => false,
- }),
+ icx.type_parameter_bounds_in_generics(
+ ast_generics,
+ param_id,
+ ty,
+ OnlySelfBounds(true),
+ Some(assoc_name),
+ )
+ .into_iter()
+ .filter(|(predicate, _)| match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(data, _) => data.self_ty().is_param(index),
+ _ => false,
+ }),
);
result.predicates =
tcx.arena.alloc_from_iter(result.predicates.iter().copied().chain(extra_predicates));
@@ -586,6 +632,7 @@
param_id: hir::HirId,
ty: Ty<'tcx>,
only_self_bounds: OnlySelfBounds,
+ assoc_name: Option<Ident>,
) -> Vec<(ty::Predicate<'tcx>, Span)> {
let constness = self.default_constness_for_trait_bounds();
let from_ty_params = ast_generics
@@ -596,6 +643,10 @@
_ => None,
})
.flat_map(|bounds| bounds.iter())
+ .filter(|b| match assoc_name {
+ Some(assoc_name) => self.bound_defines_assoc_item(b, assoc_name),
+ None => true,
+ })
.flat_map(|b| predicates_from_bound(self, ty, b, constness));
let from_where_clauses = ast_generics
@@ -614,12 +665,34 @@
} else {
None
};
- bp.bounds.iter().filter_map(move |b| bt.map(|bt| (bt, b)))
+ bp.bounds
+ .iter()
+ .filter(|b| match assoc_name {
+ Some(assoc_name) => self.bound_defines_assoc_item(b, assoc_name),
+ None => true,
+ })
+ .filter_map(move |b| bt.map(|bt| (bt, b)))
})
.flat_map(|(bt, b)| predicates_from_bound(self, bt, b, constness));
from_ty_params.chain(from_where_clauses).collect()
}
+
+ fn bound_defines_assoc_item(&self, b: &hir::GenericBound<'_>, assoc_name: Ident) -> bool {
+ debug!("bound_defines_assoc_item(b={:?}, assoc_name={:?})", b, assoc_name);
+
+ match b {
+ hir::GenericBound::Trait(poly_trait_ref, _) => {
+ let trait_ref = &poly_trait_ref.trait_ref;
+ if let Some(trait_did) = trait_ref.trait_def_id() {
+ self.tcx.trait_may_define_assoc_type(trait_did, assoc_name)
+ } else {
+ false
+ }
+ }
+ _ => false,
+ }
+ }
}
/// Tests whether this is the AST for a reference to the type
@@ -639,10 +712,11 @@
}
}
-fn convert_item(tcx: TyCtxt<'_>, item_id: hir::HirId) {
- let it = tcx.hir().expect_item(item_id);
- debug!("convert: item {} with id {}", it.ident, it.hir_id);
- let def_id = tcx.hir().local_def_id(item_id);
+fn convert_item(tcx: TyCtxt<'_>, item_id: hir::ItemId) {
+ let it = tcx.hir().item(item_id);
+ debug!("convert: item {} with id {}", it.ident, it.hir_id());
+ let def_id = item_id.def_id;
+
match it.kind {
// These don't define types.
hir::ItemKind::ExternCrate(_)
@@ -652,12 +726,11 @@
hir::ItemKind::ForeignMod { items, .. } => {
for item in items {
let item = tcx.hir().foreign_item(item.id);
- let def_id = tcx.hir().local_def_id(item.hir_id);
- tcx.ensure().generics_of(def_id);
- tcx.ensure().type_of(def_id);
- tcx.ensure().predicates_of(def_id);
+ tcx.ensure().generics_of(item.def_id);
+ tcx.ensure().type_of(item.def_id);
+ tcx.ensure().predicates_of(item.def_id);
if let hir::ForeignItemKind::Fn(..) = item.kind {
- tcx.ensure().fn_sig(def_id);
+ tcx.ensure().fn_sig(item.def_id);
}
}
}
@@ -728,57 +801,57 @@
}
}
-fn convert_trait_item(tcx: TyCtxt<'_>, trait_item_id: hir::HirId) {
- let trait_item = tcx.hir().expect_trait_item(trait_item_id);
- let def_id = tcx.hir().local_def_id(trait_item.hir_id);
- tcx.ensure().generics_of(def_id);
+fn convert_trait_item(tcx: TyCtxt<'_>, trait_item_id: hir::TraitItemId) {
+ let trait_item = tcx.hir().trait_item(trait_item_id);
+ tcx.ensure().generics_of(trait_item_id.def_id);
match trait_item.kind {
hir::TraitItemKind::Fn(..) => {
- tcx.ensure().type_of(def_id);
- tcx.ensure().fn_sig(def_id);
+ tcx.ensure().type_of(trait_item_id.def_id);
+ tcx.ensure().fn_sig(trait_item_id.def_id);
}
hir::TraitItemKind::Const(.., Some(_)) => {
- tcx.ensure().type_of(def_id);
+ tcx.ensure().type_of(trait_item_id.def_id);
}
hir::TraitItemKind::Const(..) => {
- tcx.ensure().type_of(def_id);
+ tcx.ensure().type_of(trait_item_id.def_id);
// Account for `const C: _;`.
let mut visitor = PlaceholderHirTyCollector::default();
visitor.visit_trait_item(trait_item);
- placeholder_type_error(tcx, None, &[], visitor.0, false);
+ placeholder_type_error(tcx, None, &[], visitor.0, false, None);
}
hir::TraitItemKind::Type(_, Some(_)) => {
- tcx.ensure().item_bounds(def_id);
- tcx.ensure().type_of(def_id);
+ tcx.ensure().item_bounds(trait_item_id.def_id);
+ tcx.ensure().type_of(trait_item_id.def_id);
// Account for `type T = _;`.
let mut visitor = PlaceholderHirTyCollector::default();
visitor.visit_trait_item(trait_item);
- placeholder_type_error(tcx, None, &[], visitor.0, false);
+ placeholder_type_error(tcx, None, &[], visitor.0, false, None);
}
hir::TraitItemKind::Type(_, None) => {
- tcx.ensure().item_bounds(def_id);
+ tcx.ensure().item_bounds(trait_item_id.def_id);
// #74612: Visit and try to find bad placeholders
// even if there is no concrete type.
let mut visitor = PlaceholderHirTyCollector::default();
visitor.visit_trait_item(trait_item);
- placeholder_type_error(tcx, None, &[], visitor.0, false);
+
+ placeholder_type_error(tcx, None, &[], visitor.0, false, None);
}
};
- tcx.ensure().predicates_of(def_id);
+ tcx.ensure().predicates_of(trait_item_id.def_id);
}
-fn convert_impl_item(tcx: TyCtxt<'_>, impl_item_id: hir::HirId) {
- let def_id = tcx.hir().local_def_id(impl_item_id);
+fn convert_impl_item(tcx: TyCtxt<'_>, impl_item_id: hir::ImplItemId) {
+ let def_id = impl_item_id.def_id;
tcx.ensure().generics_of(def_id);
tcx.ensure().type_of(def_id);
tcx.ensure().predicates_of(def_id);
- let impl_item = tcx.hir().expect_impl_item(impl_item_id);
+ let impl_item = tcx.hir().impl_item(impl_item_id);
match impl_item.kind {
hir::ImplItemKind::Fn(..) => {
tcx.ensure().fn_sig(def_id);
@@ -787,7 +860,8 @@
// Account for `type T = _;`
let mut visitor = PlaceholderHirTyCollector::default();
visitor.visit_impl_item(impl_item);
- placeholder_type_error(tcx, None, &[], visitor.0, false);
+
+ placeholder_type_error(tcx, None, &[], visitor.0, false, None);
}
hir::ImplItemKind::Const(..) => {}
}
@@ -988,54 +1062,97 @@
/// the transitive super-predicates are converted.
fn super_predicates_of(tcx: TyCtxt<'_>, trait_def_id: DefId) -> ty::GenericPredicates<'_> {
debug!("super_predicates(trait_def_id={:?})", trait_def_id);
- let trait_hir_id = tcx.hir().local_def_id_to_hir_id(trait_def_id.expect_local());
+ tcx.super_predicates_that_define_assoc_type((trait_def_id, None))
+}
- let item = match tcx.hir().get(trait_hir_id) {
- Node::Item(item) => item,
- _ => bug!("trait_node_id {} is not an item", trait_hir_id),
- };
-
- let (generics, bounds) = match item.kind {
- hir::ItemKind::Trait(.., ref generics, ref supertraits, _) => (generics, supertraits),
- hir::ItemKind::TraitAlias(ref generics, ref supertraits) => (generics, supertraits),
- _ => span_bug!(item.span, "super_predicates invoked on non-trait"),
- };
-
- let icx = ItemCtxt::new(tcx, trait_def_id);
-
- // Convert the bounds that follow the colon, e.g., `Bar + Zed` in `trait Foo: Bar + Zed`.
- let self_param_ty = tcx.types.self_param;
- let superbounds1 =
- AstConv::compute_bounds(&icx, self_param_ty, bounds, SizedByDefault::No, item.span);
-
- let superbounds1 = superbounds1.predicates(tcx, self_param_ty);
-
- // Convert any explicit superbounds in the where-clause,
- // e.g., `trait Foo where Self: Bar`.
- // In the case of trait aliases, however, we include all bounds in the where-clause,
- // so e.g., `trait Foo = where u32: PartialEq<Self>` would include `u32: PartialEq<Self>`
- // as one of its "superpredicates".
- let is_trait_alias = tcx.is_trait_alias(trait_def_id);
- let superbounds2 = icx.type_parameter_bounds_in_generics(
- generics,
- item.hir_id,
- self_param_ty,
- OnlySelfBounds(!is_trait_alias),
+/// Ensures that the super-predicates of the trait with a `DefId`
+/// of `trait_def_id` are converted and stored. This also ensures that
+/// the transitive super-predicates are converted.
+fn super_predicates_that_define_assoc_type(
+ tcx: TyCtxt<'_>,
+ (trait_def_id, assoc_name): (DefId, Option<Ident>),
+) -> ty::GenericPredicates<'_> {
+ debug!(
+ "super_predicates_that_define_assoc_type(trait_def_id={:?}, assoc_name={:?})",
+ trait_def_id, assoc_name
);
+ if trait_def_id.is_local() {
+ debug!("super_predicates_that_define_assoc_type: local trait_def_id={:?}", trait_def_id);
+ let trait_hir_id = tcx.hir().local_def_id_to_hir_id(trait_def_id.expect_local());
- // Combine the two lists to form the complete set of superbounds:
- let superbounds = &*tcx.arena.alloc_from_iter(superbounds1.into_iter().chain(superbounds2));
+ let item = match tcx.hir().get(trait_hir_id) {
+ Node::Item(item) => item,
+ _ => bug!("trait_node_id {} is not an item", trait_hir_id),
+ };
- // Now require that immediate supertraits are converted,
- // which will, in turn, reach indirect supertraits.
- for &(pred, span) in superbounds {
- debug!("superbound: {:?}", pred);
- if let ty::PredicateKind::Trait(bound, _) = pred.kind().skip_binder() {
- tcx.at(span).super_predicates_of(bound.def_id());
+ let (generics, bounds) = match item.kind {
+ hir::ItemKind::Trait(.., ref generics, ref supertraits, _) => (generics, supertraits),
+ hir::ItemKind::TraitAlias(ref generics, ref supertraits) => (generics, supertraits),
+ _ => span_bug!(item.span, "super_predicates invoked on non-trait"),
+ };
+
+ let icx = ItemCtxt::new(tcx, trait_def_id);
+
+ // Convert the bounds that follow the colon, e.g., `Bar + Zed` in `trait Foo: Bar + Zed`.
+ let self_param_ty = tcx.types.self_param;
+ let superbounds1 = if let Some(assoc_name) = assoc_name {
+ <dyn AstConv<'_>>::compute_bounds_that_match_assoc_type(
+ &icx,
+ self_param_ty,
+ &bounds,
+ SizedByDefault::No,
+ item.span,
+ assoc_name,
+ )
+ } else {
+ <dyn AstConv<'_>>::compute_bounds(
+ &icx,
+ self_param_ty,
+ &bounds,
+ SizedByDefault::No,
+ item.span,
+ )
+ };
+
+ let superbounds1 = superbounds1.predicates(tcx, self_param_ty);
+
+ // Convert any explicit superbounds in the where-clause,
+ // e.g., `trait Foo where Self: Bar`.
+ // In the case of trait aliases, however, we include all bounds in the where-clause,
+ // so e.g., `trait Foo = where u32: PartialEq<Self>` would include `u32: PartialEq<Self>`
+ // as one of its "superpredicates".
+ let is_trait_alias = tcx.is_trait_alias(trait_def_id);
+ let superbounds2 = icx.type_parameter_bounds_in_generics(
+ generics,
+ item.hir_id(),
+ self_param_ty,
+ OnlySelfBounds(!is_trait_alias),
+ assoc_name,
+ );
+
+ // Combine the two lists to form the complete set of superbounds:
+ let superbounds = &*tcx.arena.alloc_from_iter(superbounds1.into_iter().chain(superbounds2));
+
+ // Now require that immediate supertraits are converted,
+ // which will, in turn, reach indirect supertraits.
+ if assoc_name.is_none() {
+ // Now require that immediate supertraits are converted,
+ // which will, in turn, reach indirect supertraits.
+ for &(pred, span) in superbounds {
+ debug!("superbound: {:?}", pred);
+ if let ty::PredicateKind::Trait(bound, _) = pred.kind().skip_binder() {
+ tcx.at(span).super_predicates_of(bound.def_id());
+ }
+ }
}
- }
- ty::GenericPredicates { parent: None, predicates: superbounds }
+ ty::GenericPredicates { parent: None, predicates: superbounds }
+ } else {
+ // if `assoc_name` is None, then the query should've been redirected to an
+ // external provider
+ assert!(assoc_name.is_some());
+ tcx.super_predicates_of(trait_def_id)
+ }
}
fn trait_def(tcx: TyCtxt<'_>, def_id: DefId) -> ty::TraitDef {
@@ -1331,12 +1448,12 @@
//
// Something of a hack: use the node id for the trait, also as
// the node id for the Self type parameter.
- let param_id = item.hir_id;
+ let param_id = item.def_id;
opt_self = Some(ty::GenericParamDef {
index: 0,
name: kw::SelfUpper,
- def_id: tcx.hir().local_def_id(param_id).to_def_id(),
+ def_id: param_id.to_def_id(),
pure_wrt_drop: false,
kind: ty::GenericParamDefKind::Type {
has_default: false,
@@ -1578,13 +1695,14 @@
ty::Binder::bind(fn_sig)
}
- None => AstConv::ty_of_fn(
+ None => <dyn AstConv<'_>>::ty_of_fn(
&icx,
sig.header.unsafety,
sig.header.abi,
&sig.decl,
&generics,
Some(ident.span),
+ None,
),
}
}
@@ -1594,9 +1712,15 @@
ident,
generics,
..
- }) => {
- AstConv::ty_of_fn(&icx, header.unsafety, header.abi, decl, &generics, Some(ident.span))
- }
+ }) => <dyn AstConv<'_>>::ty_of_fn(
+ &icx,
+ header.unsafety,
+ header.abi,
+ decl,
+ &generics,
+ Some(ident.span),
+ None,
+ ),
ForeignItem(&hir::ForeignItem {
kind: ForeignItemKind::Fn(ref fn_decl, _, _),
@@ -1649,7 +1773,7 @@
match tcx.hir().expect_item(hir_id).kind {
hir::ItemKind::Impl(ref impl_) => impl_.of_trait.as_ref().map(|ast_trait_ref| {
let selfty = tcx.type_of(def_id);
- AstConv::instantiate_mono_trait_ref(&icx, ast_trait_ref, selfty)
+ <dyn AstConv<'_>>::instantiate_mono_trait_ref(&icx, ast_trait_ref, selfty)
}),
_ => bug!(),
}
@@ -1900,7 +2024,7 @@
GenericParamKind::Lifetime { .. } => {
param.bounds.iter().for_each(|bound| match bound {
hir::GenericBound::Outlives(lt) => {
- let bound = AstConv::ast_region_to_region(&icx, <, None);
+ let bound = <dyn AstConv<'_>>::ast_region_to_region(&icx, <, None);
let outlives = ty::Binder::bind(ty::OutlivesPredicate(region, bound));
predicates.insert((outlives.to_predicate(tcx), lt.span));
}
@@ -1923,8 +2047,13 @@
index += 1;
let sized = SizedByDefault::Yes;
- let bounds =
- AstConv::compute_bounds(&icx, param_ty, ¶m.bounds, sized, param.span);
+ let bounds = <dyn AstConv<'_>>::compute_bounds(
+ &icx,
+ param_ty,
+ ¶m.bounds,
+ sized,
+ param.span,
+ );
predicates.extend(bounds.predicates(tcx, param_ty));
}
GenericParamKind::Const { .. } => {
@@ -1973,7 +2102,7 @@
};
let mut bounds = Bounds::default();
- let _ = AstConv::instantiate_poly_trait_ref(
+ let _ = <dyn AstConv<'_>>::instantiate_poly_trait_ref(
&icx,
&poly_trait_ref,
constness,
@@ -1985,7 +2114,7 @@
&hir::GenericBound::LangItemTrait(lang_item, span, hir_id, args) => {
let mut bounds = Bounds::default();
- AstConv::instantiate_lang_item_trait_ref(
+ <dyn AstConv<'_>>::instantiate_lang_item_trait_ref(
&icx,
lang_item,
span,
@@ -1998,7 +2127,8 @@
}
hir::GenericBound::Outlives(lifetime) => {
- let region = AstConv::ast_region_to_region(&icx, lifetime, None);
+ let region =
+ <dyn AstConv<'_>>::ast_region_to_region(&icx, lifetime, None);
predicates.insert((
ty::Binder::bind(ty::PredicateKind::TypeOutlives(
ty::OutlivesPredicate(ty, region),
@@ -2012,11 +2142,11 @@
}
hir::WherePredicate::RegionPredicate(region_pred) => {
- let r1 = AstConv::ast_region_to_region(&icx, ®ion_pred.lifetime, None);
+ let r1 = <dyn AstConv<'_>>::ast_region_to_region(&icx, ®ion_pred.lifetime, None);
predicates.extend(region_pred.bounds.iter().map(|bound| {
let (r2, span) = match bound {
hir::GenericBound::Outlives(lt) => {
- (AstConv::ast_region_to_region(&icx, lt, None), lt.span)
+ (<dyn AstConv<'_>>::ast_region_to_region(&icx, lt, None), lt.span)
}
_ => bug!(),
};
@@ -2259,13 +2389,14 @@
} else {
hir::Unsafety::Unsafe
};
- let fty = AstConv::ty_of_fn(
+ let fty = <dyn AstConv<'_>>::ty_of_fn(
&ItemCtxt::new(tcx, def_id),
unsafety,
abi,
decl,
&hir::Generics::empty(),
Some(ident.span),
+ None,
);
// Feature gate SIMD types in FFI, since I am not sure that the
@@ -2280,7 +2411,7 @@
.sess
.source_map()
.span_to_snippet(ast_ty.span)
- .map_or(String::new(), |s| format!(" `{}`", s));
+ .map_or_else(|_| String::new(), |s| format!(" `{}`", s));
tcx.sess
.struct_span_err(
ast_ty.span,
@@ -2547,7 +2678,7 @@
} else if tcx.sess.check_name(attr, sym::used) {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED;
} else if tcx.sess.check_name(attr, sym::cmse_nonsecure_entry) {
- if tcx.fn_sig(id).abi() != abi::Abi::C {
+ if !matches!(tcx.fn_sig(id).abi(), abi::Abi::C { .. }) {
struct_span_err!(
tcx.sess,
attr.span,
@@ -2640,10 +2771,12 @@
codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMORY;
} else if item.has_name(sym::thread) {
codegen_fn_attrs.no_sanitize |= SanitizerSet::THREAD;
+ } else if item.has_name(sym::hwaddress) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::HWADDRESS;
} else {
tcx.sess
.struct_span_err(item.span(), "invalid argument for `no_sanitize`")
- .note("expected one of: `address`, `memory` or `thread`")
+ .note("expected one of: `address`, `hwaddress`, `memory` or `thread`")
.emit();
}
}
diff --git a/compiler/rustc_typeck/src/collect/item_bounds.rs b/compiler/rustc_typeck/src/collect/item_bounds.rs
index 537a583..a5b3644 100644
--- a/compiler/rustc_typeck/src/collect/item_bounds.rs
+++ b/compiler/rustc_typeck/src/collect/item_bounds.rs
@@ -25,10 +25,10 @@
InternalSubsts::identity_for_item(tcx, assoc_item_def_id),
);
- let bounds = AstConv::compute_bounds(
+ let bounds = <dyn AstConv<'_>>::compute_bounds(
&ItemCtxt::new(tcx, assoc_item_def_id),
item_ty,
- bounds,
+ &bounds,
SizedByDefault::Yes,
span,
);
@@ -66,10 +66,10 @@
let item_ty =
tcx.mk_opaque(opaque_def_id, InternalSubsts::identity_for_item(tcx, opaque_def_id));
- let bounds = AstConv::compute_bounds(
+ let bounds = <dyn AstConv<'_>>::compute_bounds(
&ItemCtxt::new(tcx, opaque_def_id),
item_ty,
- bounds,
+ &bounds,
SizedByDefault::Yes,
span,
)
diff --git a/compiler/rustc_typeck/src/collect/type_of.rs b/compiler/rustc_typeck/src/collect/type_of.rs
index e4eabca..3f2f244 100644
--- a/compiler/rustc_typeck/src/collect/type_of.rs
+++ b/compiler/rustc_typeck/src/collect/type_of.rs
@@ -1,4 +1,3 @@
-use crate::errors::AssocTypeOnInherentImpl;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{Applicability, ErrorReported, StashKey};
use rustc_hir as hir;
@@ -29,6 +28,73 @@
let parent_node = tcx.hir().get(parent_node_id);
match parent_node {
+ // This match arm is for when the def_id appears in a GAT whose
+ // path can't be resolved without typechecking e.g.
+ //
+ // trait Foo {
+ // type Assoc<const N: usize>;
+ // fn foo() -> Self::Assoc<3>;
+ // }
+ //
+ // In the above code we would call this query with the def_id of 3 and
+ // the parent_node we match on would be the hir node for Self::Assoc<3>
+ //
+ // `Self::Assoc<3>` cant be resolved without typchecking here as we
+ // didnt write <Self as Foo>::Assoc<3>. If we did then another match
+ // arm would handle this.
+ //
+ // I believe this match arm is only needed for GAT but I am not 100% sure - BoxyUwU
+ Node::Ty(hir_ty @ Ty { kind: TyKind::Path(QPath::TypeRelative(_, segment)), .. }) => {
+ // Find the Item containing the associated type so we can create an ItemCtxt.
+ // Using the ItemCtxt convert the HIR for the unresolved assoc type into a
+ // ty which is a fully resolved projection.
+ // For the code example above, this would mean converting Self::Assoc<3>
+ // into a ty::Projection(<Self as Foo>::Assoc<3>)
+ let item_hir_id = tcx
+ .hir()
+ .parent_iter(hir_id)
+ .filter(|(_, node)| matches!(node, Node::Item(_)))
+ .map(|(id, _)| id)
+ .next()
+ .unwrap();
+ let item_did = tcx.hir().local_def_id(item_hir_id).to_def_id();
+ let item_ctxt = &ItemCtxt::new(tcx, item_did) as &dyn crate::astconv::AstConv<'_>;
+ let ty = item_ctxt.ast_ty_to_ty(hir_ty);
+
+ // Iterate through the generics of the projection to find the one that corresponds to
+ // the def_id that this query was called with. We filter to only const args here as a
+ // precaution for if it's ever allowed to elide lifetimes in GAT's. It currently isn't
+ // but it can't hurt to be safe ^^
+ if let ty::Projection(projection) = ty.kind() {
+ let generics = tcx.generics_of(projection.item_def_id);
+
+ let arg_index = segment
+ .args
+ .and_then(|args| {
+ args.args
+ .iter()
+ .filter(|arg| arg.is_const())
+ .position(|arg| arg.id() == hir_id)
+ })
+ .unwrap_or_else(|| {
+ bug!("no arg matching AnonConst in segment");
+ });
+
+ return generics
+ .params
+ .iter()
+ .filter(|param| matches!(param.kind, ty::GenericParamDefKind::Const))
+ .nth(arg_index)
+ .map(|param| param.def_id);
+ }
+
+ // I dont think it's possible to reach this but I'm not 100% sure - BoxyUwU
+ tcx.sess.delay_span_bug(
+ tcx.def_span(def_id),
+ "unexpected non-GAT usage of an anon const",
+ );
+ return None;
+ }
Node::Expr(&Expr {
kind:
ExprKind::MethodCall(segment, ..) | ExprKind::Path(QPath::TypeRelative(_, segment)),
@@ -227,7 +293,7 @@
}
ImplItemKind::TyAlias(ref ty) => {
if tcx.impl_trait_ref(tcx.hir().get_parent_did(hir_id).to_def_id()).is_none() {
- report_assoc_ty_on_inherent_impl(tcx, item.span);
+ check_feature_inherent_assoc_ty(tcx, item.span);
}
icx.to_ty(ty)
@@ -515,26 +581,23 @@
}
fn visit_item(&mut self, it: &'tcx Item<'tcx>) {
debug!("find_existential_constraints: visiting {:?}", it);
- let def_id = self.tcx.hir().local_def_id(it.hir_id);
// The opaque type itself or its children are not within its reveal scope.
- if def_id.to_def_id() != self.def_id {
- self.check(def_id);
+ if it.def_id.to_def_id() != self.def_id {
+ self.check(it.def_id);
intravisit::walk_item(self, it);
}
}
fn visit_impl_item(&mut self, it: &'tcx ImplItem<'tcx>) {
debug!("find_existential_constraints: visiting {:?}", it);
- let def_id = self.tcx.hir().local_def_id(it.hir_id);
// The opaque type itself or its children are not within its reveal scope.
- if def_id.to_def_id() != self.def_id {
- self.check(def_id);
+ if it.def_id.to_def_id() != self.def_id {
+ self.check(it.def_id);
intravisit::walk_impl_item(self, it);
}
}
fn visit_trait_item(&mut self, it: &'tcx TraitItem<'tcx>) {
debug!("find_existential_constraints: visiting {:?}", it);
- let def_id = self.tcx.hir().local_def_id(it.hir_id);
- self.check(def_id);
+ self.check(it.def_id);
intravisit::walk_trait_item(self, it);
}
}
@@ -659,11 +722,12 @@
format!("{}: {}", item_ident, ty),
Applicability::MachineApplicable,
)
- .emit();
+ .emit_unless(ty.references_error());
}
None => {
let mut diag = bad_placeholder_type(tcx, vec![span]);
- if !matches!(ty.kind(), ty::Error(_)) {
+
+ if !ty.references_error() {
diag.span_suggestion(
span,
"replace `_` with the correct type",
@@ -671,6 +735,7 @@
Applicability::MaybeIncorrect,
);
}
+
diag.emit();
}
}
@@ -682,6 +747,16 @@
})
}
-fn report_assoc_ty_on_inherent_impl(tcx: TyCtxt<'_>, span: Span) {
- tcx.sess.emit_err(AssocTypeOnInherentImpl { span });
+fn check_feature_inherent_assoc_ty(tcx: TyCtxt<'_>, span: Span) {
+ if !tcx.features().inherent_associated_types {
+ use rustc_session::parse::feature_err;
+ use rustc_span::symbol::sym;
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::inherent_associated_types,
+ span,
+ "inherent associated types are unstable",
+ )
+ .emit();
+ }
}
diff --git a/compiler/rustc_typeck/src/constrained_generic_params.rs b/compiler/rustc_typeck/src/constrained_generic_params.rs
index 95670b9..529de1a 100644
--- a/compiler/rustc_typeck/src/constrained_generic_params.rs
+++ b/compiler/rustc_typeck/src/constrained_generic_params.rs
@@ -198,7 +198,7 @@
// `<<T as Bar>::Baz as Iterator>::Output = <U as Iterator>::Output`
// Then the projection only applies if `T` is known, but it still
// does not determine `U`.
- let inputs = parameters_for(&projection.projection_ty.trait_ref(tcx), true);
+ let inputs = parameters_for(&projection.projection_ty, true);
let relies_only_on_inputs = inputs.iter().all(|p| input_parameters.contains(&p));
if !relies_only_on_inputs {
continue;
diff --git a/compiler/rustc_typeck/src/errors.rs b/compiler/rustc_typeck/src/errors.rs
index a769e48..5068242 100644
--- a/compiler/rustc_typeck/src/errors.rs
+++ b/compiler/rustc_typeck/src/errors.rs
@@ -83,13 +83,6 @@
}
#[derive(SessionDiagnostic)]
-#[error = "E0202"]
-pub struct AssocTypeOnInherentImpl {
- #[message = "associated types are not yet supported in inherent impls (see #8995)"]
- pub span: Span,
-}
-
-#[derive(SessionDiagnostic)]
#[error = "E0203"]
pub struct MultipleRelaxedDefaultBounds {
#[message = "type parameter has more than one relaxed default bound, only one is supported"]
diff --git a/compiler/rustc_typeck/src/expr_use_visitor.rs b/compiler/rustc_typeck/src/expr_use_visitor.rs
index bd2c266..b172cb9 100644
--- a/compiler/rustc_typeck/src/expr_use_visitor.rs
+++ b/compiler/rustc_typeck/src/expr_use_visitor.rs
@@ -5,8 +5,9 @@
pub use self::ConsumeMode::*;
// Export these here so that Clippy can use them.
-pub use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId, Projection};
+pub use rustc_middle::hir::place::{Place, PlaceBase, PlaceWithHirId, Projection};
+use rustc_data_structures::fx::FxIndexMap;
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_hir::def_id::LocalDefId;
@@ -14,6 +15,7 @@
use rustc_index::vec::Idx;
use rustc_infer::infer::InferCtxt;
use rustc_middle::hir::place::ProjectionKind;
+use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::{self, adjustment, TyCtxt};
use rustc_target::abi::VariantIdx;
@@ -51,6 +53,9 @@
// The path at `assignee_place` is being assigned to.
// `diag_expr_id` is the id used for diagnostics (see `consume` for more details).
fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId);
+
+ // The `place` should be a fake read because of specified `cause`.
+ fn fake_read(&mut self, place: Place<'tcx>, cause: FakeReadCause, diag_expr_id: hir::HirId);
}
#[derive(Copy, Clone, PartialEq, Debug)]
@@ -184,7 +189,7 @@
hir::ExprKind::Type(ref subexpr, _) => self.walk_expr(subexpr),
- hir::ExprKind::Unary(hir::UnOp::UnDeref, ref base) => {
+ hir::ExprKind::Unary(hir::UnOp::Deref, ref base) => {
// *base
self.select_from_expr(base);
}
@@ -229,7 +234,61 @@
hir::ExprKind::Match(ref discr, arms, _) => {
let discr_place = return_if_err!(self.mc.cat_expr(&discr));
- self.borrow_expr(&discr, ty::ImmBorrow);
+
+ // Matching should not always be considered a use of the place, hence
+ // discr does not necessarily need to be borrowed.
+ // We only want to borrow discr if the pattern contain something other
+ // than wildcards.
+ let ExprUseVisitor { ref mc, body_owner: _, delegate: _ } = *self;
+ let mut needs_to_be_read = false;
+ for arm in arms.iter() {
+ return_if_err!(mc.cat_pattern(discr_place.clone(), &arm.pat, |place, pat| {
+ match &pat.kind {
+ PatKind::Binding(.., opt_sub_pat) => {
+ // If the opt_sub_pat is None, than the binding does not count as
+ // a wildcard for the purpose of borrowing discr.
+ if opt_sub_pat.is_none() {
+ needs_to_be_read = true;
+ }
+ }
+ PatKind::TupleStruct(..)
+ | PatKind::Path(..)
+ | PatKind::Struct(..)
+ | PatKind::Tuple(..) => {
+ // If the PatKind is a TupleStruct, Struct or Tuple then we want to check
+ // whether the Variant is a MultiVariant or a SingleVariant. We only want
+ // to borrow discr if it is a MultiVariant.
+ // If it is a SingleVariant and creates a binding we will handle that when
+ // this callback gets called again.
+ if let ty::Adt(def, _) = place.place.base_ty.kind() {
+ if def.variants.len() > 1 {
+ needs_to_be_read = true;
+ }
+ }
+ }
+ PatKind::Lit(_) => {
+ // If the PatKind is a Lit then we want
+ // to borrow discr.
+ needs_to_be_read = true;
+ }
+ _ => {}
+ }
+ }));
+ }
+
+ if needs_to_be_read {
+ self.borrow_expr(&discr, ty::ImmBorrow);
+ } else {
+ self.delegate.fake_read(
+ discr_place.place.clone(),
+ FakeReadCause::ForMatchedPlace,
+ discr_place.hir_id,
+ );
+
+ // We always want to walk the discriminant. We want to make sure, for instance,
+ // that the discriminant has been initialized.
+ self.walk_expr(&discr);
+ }
// treatment of the discriminant is handled while walking the arms.
for arm in arms {
@@ -397,7 +456,7 @@
fn walk_struct_expr(
&mut self,
- fields: &[hir::Field<'_>],
+ fields: &[hir::ExprField<'_>],
opt_with: &Option<&'hir hir::Expr<'_>>,
) {
// Consume the expressions supplying values for each field.
@@ -518,6 +577,11 @@
}
fn walk_arm(&mut self, discr_place: &PlaceWithHirId<'tcx>, arm: &hir::Arm<'_>) {
+ self.delegate.fake_read(
+ discr_place.place.clone(),
+ FakeReadCause::ForMatchedPlace,
+ discr_place.hir_id,
+ );
self.walk_pat(discr_place, &arm.pat);
if let Some(hir::Guard::If(ref e)) = arm.guard {
@@ -530,6 +594,11 @@
/// Walks a pat that occurs in isolation (i.e., top-level of fn argument or
/// let binding, and *not* a match arm or nested pat.)
fn walk_irrefutable_pat(&mut self, discr_place: &PlaceWithHirId<'tcx>, pat: &hir::Pat<'_>) {
+ self.delegate.fake_read(
+ discr_place.place.clone(),
+ FakeReadCause::ForLet,
+ discr_place.hir_id,
+ );
self.walk_pat(discr_place, pat);
}
@@ -597,6 +666,14 @@
/// - When reporting the Place back to the Delegate, ensure that the UpvarId uses the enclosing
/// closure as the DefId.
fn walk_captures(&mut self, closure_expr: &hir::Expr<'_>) {
+ fn upvar_is_local_variable(
+ upvars: Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>>,
+ upvar_id: &hir::HirId,
+ body_owner_is_closure: bool,
+ ) -> bool {
+ upvars.map(|upvars| !upvars.contains_key(upvar_id)).unwrap_or(body_owner_is_closure)
+ }
+
debug!("walk_captures({:?})", closure_expr);
let closure_def_id = self.tcx().hir().local_def_id(closure_expr.hir_id).to_def_id();
@@ -608,6 +685,46 @@
ty::Closure(..) | ty::Generator(..)
);
+ // If we have a nested closure, we want to include the fake reads present in the nested closure.
+ if let Some(fake_reads) = self.mc.typeck_results.closure_fake_reads.get(&closure_def_id) {
+ for (fake_read, cause, hir_id) in fake_reads.iter() {
+ match fake_read.base {
+ PlaceBase::Upvar(upvar_id) => {
+ if upvar_is_local_variable(
+ upvars,
+ &upvar_id.var_path.hir_id,
+ body_owner_is_closure,
+ ) {
+ // The nested closure might be fake reading the current (enclosing) closure's local variables.
+ // The only places we want to fake read before creating the parent closure are the ones that
+ // are not local to it/ defined by it.
+ //
+ // ```rust,ignore(cannot-test-this-because-pseduo-code)
+ // let v1 = (0, 1);
+ // let c = || { // fake reads: v1
+ // let v2 = (0, 1);
+ // let e = || { // fake reads: v1, v2
+ // let (_, t1) = v1;
+ // let (_, t2) = v2;
+ // }
+ // }
+ // ```
+ // This check is performed when visiting the body of the outermost closure (`c`) and ensures
+ // that we don't add a fake read of v2 in c.
+ continue;
+ }
+ }
+ _ => {
+ bug!(
+ "Do not know how to get HirId out of Rvalue and StaticItem {:?}",
+ fake_read.base
+ );
+ }
+ };
+ self.delegate.fake_read(fake_read.clone(), *cause, *hir_id);
+ }
+ }
+
if let Some(min_captures) = self.mc.typeck_results.closure_min_captures.get(&closure_def_id)
{
for (var_hir_id, min_list) in min_captures.iter() {
diff --git a/compiler/rustc_typeck/src/impl_wf_check.rs b/compiler/rustc_typeck/src/impl_wf_check.rs
index 0bdcbaa..7713381 100644
--- a/compiler/rustc_typeck/src/impl_wf_check.rs
+++ b/compiler/rustc_typeck/src/impl_wf_check.rs
@@ -59,7 +59,7 @@
// but it's one that we must perform earlier than the rest of
// WfCheck.
for &module in tcx.hir().krate().modules.keys() {
- tcx.ensure().check_mod_impl_wf(tcx.hir().local_def_id(module));
+ tcx.ensure().check_mod_impl_wf(module);
}
}
@@ -81,11 +81,10 @@
impl ItemLikeVisitor<'tcx> for ImplWfCheck<'tcx> {
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
if let hir::ItemKind::Impl(ref impl_) = item.kind {
- let impl_def_id = self.tcx.hir().local_def_id(item.hir_id);
- enforce_impl_params_are_constrained(self.tcx, impl_def_id, impl_.items);
+ enforce_impl_params_are_constrained(self.tcx, item.def_id, impl_.items);
enforce_impl_items_are_distinct(self.tcx, impl_.items);
if self.min_specialization {
- check_min_specialization(self.tcx, impl_def_id.to_def_id(), item.span);
+ check_min_specialization(self.tcx, item.def_id.to_def_id(), item.span);
}
}
}
@@ -131,7 +130,7 @@
// Disallow unconstrained lifetimes, but only if they appear in assoc types.
let lifetimes_in_associated_types: FxHashSet<_> = impl_item_refs
.iter()
- .map(|item_ref| tcx.hir().local_def_id(item_ref.id.hir_id))
+ .map(|item_ref| item_ref.id.def_id)
.flat_map(|def_id| {
let item = tcx.associated_item(def_id);
match item.kind {
diff --git a/compiler/rustc_typeck/src/lib.rs b/compiler/rustc_typeck/src/lib.rs
index fd44baf..88b47bf 100644
--- a/compiler/rustc_typeck/src/lib.rs
+++ b/compiler/rustc_typeck/src/lib.rs
@@ -56,6 +56,7 @@
*/
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(bindings_after_at)]
#![feature(bool_to_option)]
#![feature(box_syntax)]
#![feature(crate_visibility_modifier)]
@@ -117,14 +118,19 @@
use bounds::Bounds;
fn require_c_abi_if_c_variadic(tcx: TyCtxt<'_>, decl: &hir::FnDecl<'_>, abi: Abi, span: Span) {
- if decl.c_variadic && !(abi == Abi::C || abi == Abi::Cdecl) {
- let mut err = struct_span_err!(
- tcx.sess,
- span,
- E0045,
- "C-variadic function must have C or cdecl calling convention"
- );
- err.span_label(span, "C-variadics require C or cdecl calling convention").emit();
+ match (decl.c_variadic, abi) {
+ // The function has the correct calling convention, or isn't a "C-variadic" function.
+ (false, _) | (true, Abi::C { .. }) | (true, Abi::Cdecl) => {}
+ // The function is a "C-variadic" function with an incorrect calling convention.
+ (true, _) => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0045,
+ "C-variadic function must have C or cdecl calling convention"
+ );
+ err.span_label(span, "C-variadics require C or cdecl calling convention").emit();
+ }
}
}
@@ -136,7 +142,7 @@
) -> bool {
tcx.infer_ctxt().enter(|ref infcx| {
let param_env = ty::ParamEnv::empty();
- let mut fulfill_cx = TraitEngine::new(infcx.tcx);
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
match infcx.at(&cause, param_env).eq(expected, actual) {
Ok(InferOk { obligations, .. }) => {
fulfill_cx.register_predicate_obligations(infcx, obligations);
@@ -200,7 +206,8 @@
error = true;
}
- for attr in it.attrs {
+ let attrs = tcx.hir().attrs(main_id);
+ for attr in attrs {
if tcx.sess.check_name(attr, sym::track_caller) {
tcx.sess
.struct_span_err(
@@ -299,7 +306,8 @@
error = true;
}
- for attr in it.attrs {
+ let attrs = tcx.hir().attrs(start_id);
+ for attr in attrs {
if tcx.sess.check_name(attr, sym::track_caller) {
tcx.sess
.struct_span_err(
@@ -368,7 +376,7 @@
tcx.sess.track_errors(|| {
tcx.sess.time("type_collecting", || {
for &module in tcx.hir().krate().modules.keys() {
- tcx.ensure().collect_mod_item_types(tcx.hir().local_def_id(module));
+ tcx.ensure().collect_mod_item_types(module);
}
});
})?;
@@ -400,7 +408,7 @@
// NOTE: This is copy/pasted in librustdoc/core.rs and should be kept in sync.
tcx.sess.time("item_types_checking", || {
for &module in tcx.hir().krate().modules.keys() {
- tcx.ensure().check_mod_item_types(tcx.hir().local_def_id(module));
+ tcx.ensure().check_mod_item_types(module);
}
});
@@ -421,8 +429,7 @@
let env_node_id = tcx.hir().get_parent_item(hir_ty.hir_id);
let env_def_id = tcx.hir().local_def_id(env_node_id);
let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id.to_def_id());
-
- astconv::AstConv::ast_ty_to_ty(&item_cx, hir_ty)
+ item_cx.to_ty(hir_ty)
}
pub fn hir_trait_to_predicates<'tcx>(
@@ -437,7 +444,7 @@
let env_def_id = tcx.hir().local_def_id(env_hir_id);
let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id.to_def_id());
let mut bounds = Bounds::default();
- let _ = AstConv::instantiate_poly_trait_ref_inner(
+ let _ = <dyn AstConv<'_>>::instantiate_poly_trait_ref_inner(
&item_cx,
hir_trait,
DUMMY_SP,
diff --git a/compiler/rustc_typeck/src/mem_categorization.rs b/compiler/rustc_typeck/src/mem_categorization.rs
index fef52a3..14af110 100644
--- a/compiler/rustc_typeck/src/mem_categorization.rs
+++ b/compiler/rustc_typeck/src/mem_categorization.rs
@@ -303,7 +303,7 @@
let expr_ty = self.expr_ty(expr)?;
match expr.kind {
- hir::ExprKind::Unary(hir::UnOp::UnDeref, ref e_base) => {
+ hir::ExprKind::Unary(hir::UnOp::Deref, ref e_base) => {
if self.typeck_results.is_method_call(expr) {
self.cat_overloaded_place(expr, e_base)
} else {
diff --git a/compiler/rustc_typeck/src/outlives/implicit_infer.rs b/compiler/rustc_typeck/src/outlives/implicit_infer.rs
index 02008e1..6e6ecf6 100644
--- a/compiler/rustc_typeck/src/outlives/implicit_infer.rs
+++ b/compiler/rustc_typeck/src/outlives/implicit_infer.rs
@@ -2,7 +2,6 @@
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_hir::itemlikevisit::ItemLikeVisitor;
-use rustc_hir::Node;
use rustc_middle::ty::subst::{GenericArg, GenericArgKind, Subst};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::Span;
@@ -53,16 +52,10 @@
impl<'cx, 'tcx> ItemLikeVisitor<'tcx> for InferVisitor<'cx, 'tcx> {
fn visit_item(&mut self, item: &hir::Item<'_>) {
- let item_did = self.tcx.hir().local_def_id(item.hir_id);
+ let item_did = item.def_id;
debug!("InferVisitor::visit_item(item={:?})", item_did);
- let hir_id = self.tcx.hir().local_def_id_to_hir_id(item_did);
- let item = match self.tcx.hir().get(hir_id) {
- Node::Item(item) => item,
- _ => bug!(),
- };
-
let mut item_required_predicates = RequiredPredicates::default();
match item.kind {
hir::ItemKind::Union(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Struct(..) => {
diff --git a/compiler/rustc_typeck/src/outlives/test.rs b/compiler/rustc_typeck/src/outlives/test.rs
index 56d42f7..d4bef0c 100644
--- a/compiler/rustc_typeck/src/outlives/test.rs
+++ b/compiler/rustc_typeck/src/outlives/test.rs
@@ -14,12 +14,10 @@
impl ItemLikeVisitor<'tcx> for OutlivesTest<'tcx> {
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- let item_def_id = self.tcx.hir().local_def_id(item.hir_id);
-
// For unit testing: check for a special "rustc_outlives"
// attribute and report an error with various results if found.
- if self.tcx.has_attr(item_def_id.to_def_id(), sym::rustc_outlives) {
- let inferred_outlives_of = self.tcx.inferred_outlives_of(item_def_id);
+ if self.tcx.has_attr(item.def_id.to_def_id(), sym::rustc_outlives) {
+ let inferred_outlives_of = self.tcx.inferred_outlives_of(item.def_id);
struct_span_err!(self.tcx.sess, item.span, E0640, "{:?}", inferred_outlives_of).emit();
}
}
diff --git a/compiler/rustc_typeck/src/variance/constraints.rs b/compiler/rustc_typeck/src/variance/constraints.rs
index 339eb5f..f5355ea 100644
--- a/compiler/rustc_typeck/src/variance/constraints.rs
+++ b/compiler/rustc_typeck/src/variance/constraints.rs
@@ -71,7 +71,7 @@
fn visit_item(&mut self, item: &hir::Item<'_>) {
match item.kind {
hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
- self.visit_node_helper(item.hir_id);
+ self.visit_node_helper(item.hir_id());
if let hir::VariantData::Tuple(..) = *struct_def {
self.visit_node_helper(struct_def.ctor_hir_id().unwrap());
@@ -79,7 +79,7 @@
}
hir::ItemKind::Enum(ref enum_def, _) => {
- self.visit_node_helper(item.hir_id);
+ self.visit_node_helper(item.hir_id());
for variant in enum_def.variants {
if let hir::VariantData::Tuple(..) = variant.data {
@@ -89,7 +89,7 @@
}
hir::ItemKind::Fn(..) => {
- self.visit_node_helper(item.hir_id);
+ self.visit_node_helper(item.hir_id());
}
_ => {}
@@ -98,19 +98,19 @@
fn visit_trait_item(&mut self, trait_item: &hir::TraitItem<'_>) {
if let hir::TraitItemKind::Fn(..) = trait_item.kind {
- self.visit_node_helper(trait_item.hir_id);
+ self.visit_node_helper(trait_item.hir_id());
}
}
fn visit_impl_item(&mut self, impl_item: &hir::ImplItem<'_>) {
if let hir::ImplItemKind::Fn(..) = impl_item.kind {
- self.visit_node_helper(impl_item.hir_id);
+ self.visit_node_helper(impl_item.hir_id());
}
}
fn visit_foreign_item(&mut self, foreign_item: &hir::ForeignItem<'_>) {
if let hir::ForeignItemKind::Fn(..) = foreign_item.kind {
- self.visit_node_helper(foreign_item.hir_id);
+ self.visit_node_helper(foreign_item.hir_id());
}
}
}
@@ -207,7 +207,7 @@
}
}
- #[instrument(skip(self, current))]
+ #[instrument(level = "debug", skip(self, current))]
fn add_constraints_from_invariant_substs(
&mut self,
current: &CurrentItem,
diff --git a/compiler/rustc_typeck/src/variance/terms.rs b/compiler/rustc_typeck/src/variance/terms.rs
index 3b2a1c2..5d5baf78 100644
--- a/compiler/rustc_typeck/src/variance/terms.rs
+++ b/compiler/rustc_typeck/src/variance/terms.rs
@@ -128,11 +128,11 @@
impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for TermsContext<'a, 'tcx> {
fn visit_item(&mut self, item: &hir::Item<'_>) {
- debug!("add_inferreds for item {}", self.tcx.hir().node_to_string(item.hir_id));
+ debug!("add_inferreds for item {}", self.tcx.hir().node_to_string(item.hir_id()));
match item.kind {
hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
- self.add_inferreds_for_item(item.hir_id);
+ self.add_inferreds_for_item(item.hir_id());
if let hir::VariantData::Tuple(..) = *struct_def {
self.add_inferreds_for_item(struct_def.ctor_hir_id().unwrap());
@@ -140,7 +140,7 @@
}
hir::ItemKind::Enum(ref enum_def, _) => {
- self.add_inferreds_for_item(item.hir_id);
+ self.add_inferreds_for_item(item.hir_id());
for variant in enum_def.variants {
if let hir::VariantData::Tuple(..) = variant.data {
@@ -150,7 +150,7 @@
}
hir::ItemKind::Fn(..) => {
- self.add_inferreds_for_item(item.hir_id);
+ self.add_inferreds_for_item(item.hir_id());
}
_ => {}
@@ -159,19 +159,19 @@
fn visit_trait_item(&mut self, trait_item: &hir::TraitItem<'_>) {
if let hir::TraitItemKind::Fn(..) = trait_item.kind {
- self.add_inferreds_for_item(trait_item.hir_id);
+ self.add_inferreds_for_item(trait_item.hir_id());
}
}
fn visit_impl_item(&mut self, impl_item: &hir::ImplItem<'_>) {
if let hir::ImplItemKind::Fn(..) = impl_item.kind {
- self.add_inferreds_for_item(impl_item.hir_id);
+ self.add_inferreds_for_item(impl_item.hir_id());
}
}
fn visit_foreign_item(&mut self, foreign_item: &hir::ForeignItem<'_>) {
if let hir::ForeignItemKind::Fn(..) = foreign_item.kind {
- self.add_inferreds_for_item(foreign_item.hir_id);
+ self.add_inferreds_for_item(foreign_item.hir_id());
}
}
}
diff --git a/compiler/rustc_typeck/src/variance/test.rs b/compiler/rustc_typeck/src/variance/test.rs
index d6e43b6..2a0d950 100644
--- a/compiler/rustc_typeck/src/variance/test.rs
+++ b/compiler/rustc_typeck/src/variance/test.rs
@@ -14,12 +14,10 @@
impl ItemLikeVisitor<'tcx> for VarianceTest<'tcx> {
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- let item_def_id = self.tcx.hir().local_def_id(item.hir_id);
-
// For unit testing: check for a special "rustc_variance"
// attribute and report an error with various results if found.
- if self.tcx.has_attr(item_def_id.to_def_id(), sym::rustc_variance) {
- let variances_of = self.tcx.variances_of(item_def_id);
+ if self.tcx.has_attr(item.def_id.to_def_id(), sym::rustc_variance) {
+ let variances_of = self.tcx.variances_of(item.def_id);
struct_span_err!(self.tcx.sess, item.span, E0208, "{:?}", variances_of).emit();
}
}