Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rollup of 11 pull requests #122337

Closed
wants to merge 31 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
9a77ec9
Rename `-Zno_parallel_llvm` -> `-Zno_parallel_backend`
WaffleLapkin Feb 15, 2024
f368922
Allow codegen backends to opt-out of parallel codegen
WaffleLapkin Feb 15, 2024
8bb49e2
Propagate the resolved type of assoc const bindings via query feeding
fmease Dec 31, 2023
b94498a
Use existing query feeding workarounds
oli-obk Feb 21, 2024
858d336
Slightly simplify feeding of assoc const eq bounds
fmease Feb 23, 2024
d9a2886
add comment and test: we do not do value-based reasoning for promotio…
RalfJung Feb 29, 2024
30fa6a8
Rename `DropTreeBuilder::add_entry` to `link_entry_point`
Zalathar Mar 6, 2024
3bd8df9
Assert that `link_entry_point` sees the expected dummy terminator
Zalathar Mar 6, 2024
fbdac30
Rename `DropTree::add_entry` to `add_entry_point`
Zalathar Mar 6, 2024
5ba70bd
Replace tuples in `DropTree` with named structs
Zalathar Mar 6, 2024
d673fd8
Remove the unused `field_remapping` field from `TypeLowering`
beetrees Mar 8, 2024
bf47df8
interpret: do not call machine read hooks during validation
RalfJung Mar 9, 2024
58f6aaa
Improve diagnostics for parenthesized type arguments
wutchzone Mar 7, 2024
3830510
Ignore tests w/ current/next revisions from compare-mode=next-solver
compiler-errors Mar 11, 2024
73fc170
Store backtrace for must_produce_diag
compiler-errors Mar 10, 2024
01e6b43
Mark some next-solver-behavior tests explicitly with revisions
compiler-errors Mar 11, 2024
aea60b0
unix_sigpipe: Replace `inherit` with `sig_dfl` in syntax tests
Enselic Feb 9, 2024
816dc96
bootstrap readme: fix, improve, update
tshepang Mar 11, 2024
279465b
const-checking: add some corner case tests, and fix some nits
RalfJung Mar 2, 2024
fb802f2
promote-not: add test that distinguishes promotion from outer scope rule
RalfJung Mar 11, 2024
8f33766
Rollup merge of #116791 - WaffleLapkin:unparallel-backends, r=oli-obk
matthiaskrgr Mar 11, 2024
ccbeb90
Rollup merge of #119385 - fmease:assoc-const-eq-fixes-2, r=oli-obk,cj…
matthiaskrgr Mar 11, 2024
f477194
Rollup merge of #121893 - RalfJung:const-interior-mut-tests, r=oli-obk
matthiaskrgr Mar 11, 2024
627428c
Rollup merge of #122080 - Zalathar:drop-tree, r=oli-obk
matthiaskrgr Mar 11, 2024
23e4968
Rollup merge of #122152 - wutchzone:120892, r=fmease
matthiaskrgr Mar 11, 2024
0623371
Rollup merge of #122166 - beetrees:remove-field-remapping, r=davidtwco
matthiaskrgr Mar 11, 2024
cb48f18
Rollup merge of #122249 - RalfJung:machine-read-hook, r=oli-obk
matthiaskrgr Mar 11, 2024
1961865
Rollup merge of #122299 - compiler-errors:bt-for-must-diag, r=nnether…
matthiaskrgr Mar 11, 2024
1d4ba52
Rollup merge of #122318 - compiler-errors:next-solver-tests, r=lcnr
matthiaskrgr Mar 11, 2024
4a2a60a
Rollup merge of #122328 - Enselic:sig_dfl-not-inherit, r=davidtwco
matthiaskrgr Mar 11, 2024
2099e04
Rollup merge of #122330 - tshepang:patch-1, r=clubby789
matthiaskrgr Mar 11, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 2 additions & 11 deletions compiler/rustc_codegen_llvm/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,8 @@ pub struct CodegenCx<'ll, 'tcx> {
/// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details
pub compiler_used_statics: RefCell<Vec<&'ll Value>>,

/// Mapping of non-scalar types to llvm types and field remapping if needed.
pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), TypeLowering<'ll>>>,
/// Mapping of non-scalar types to llvm types.
pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), &'ll Type>>,

/// Mapping of scalar types to llvm types.
pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>,
Expand All @@ -105,15 +105,6 @@ pub struct CodegenCx<'ll, 'tcx> {
pub renamed_statics: RefCell<FxHashMap<DefId, &'ll Value>>,
}

pub struct TypeLowering<'ll> {
/// Associated LLVM type
pub lltype: &'ll Type,

/// If padding is used the slice maps fields from source order
/// to llvm order.
pub field_remapping: Option<SmallVec<[u32; 4]>>,
}

fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
match tls_model {
TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic,
Expand Down
30 changes: 7 additions & 23 deletions compiler/rustc_codegen_llvm/src/type_of.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
use crate::common::*;
use crate::context::TypeLowering;
use crate::type_::Type;
use rustc_codegen_ssa::traits::*;
use rustc_middle::bug;
Expand All @@ -10,15 +9,13 @@ use rustc_target::abi::HasDataLayout;
use rustc_target::abi::{Abi, Align, FieldsShape};
use rustc_target::abi::{Int, Pointer, F128, F16, F32, F64};
use rustc_target::abi::{Scalar, Size, Variants};
use smallvec::{smallvec, SmallVec};

use std::fmt::Write;

fn uncached_llvm_type<'a, 'tcx>(
cx: &CodegenCx<'a, 'tcx>,
layout: TyAndLayout<'tcx>,
defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
field_remapping: &mut Option<SmallVec<[u32; 4]>>,
) -> &'a Type {
match layout.abi {
Abi::Scalar(_) => bug!("handled elsewhere"),
Expand Down Expand Up @@ -71,8 +68,7 @@ fn uncached_llvm_type<'a, 'tcx>(
FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count),
FieldsShape::Arbitrary { .. } => match name {
None => {
let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout);
*field_remapping = new_field_remapping;
let (llfields, packed) = struct_llfields(cx, layout);
cx.type_struct(&llfields, packed)
}
Some(ref name) => {
Expand All @@ -87,15 +83,14 @@ fn uncached_llvm_type<'a, 'tcx>(
fn struct_llfields<'a, 'tcx>(
cx: &CodegenCx<'a, 'tcx>,
layout: TyAndLayout<'tcx>,
) -> (Vec<&'a Type>, bool, Option<SmallVec<[u32; 4]>>) {
) -> (Vec<&'a Type>, bool) {
debug!("struct_llfields: {:#?}", layout);
let field_count = layout.fields.count();

let mut packed = false;
let mut offset = Size::ZERO;
let mut prev_effective_align = layout.align.abi;
let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
let mut field_remapping = smallvec![0; field_count];
for i in layout.fields.index_by_increasing_offset() {
let target_offset = layout.fields.offset(i as usize);
let field = layout.field(cx, i);
Expand All @@ -120,12 +115,10 @@ fn struct_llfields<'a, 'tcx>(
result.push(cx.type_padding_filler(padding, padding_align));
debug!(" padding before: {:?}", padding);
}
field_remapping[i] = result.len() as u32;
result.push(field.llvm_type(cx));
offset = target_offset + field.size;
prev_effective_align = effective_field_align;
}
let padding_used = result.len() > field_count;
if layout.is_sized() && field_count > 0 {
if offset > layout.size {
bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
Expand All @@ -143,8 +136,7 @@ fn struct_llfields<'a, 'tcx>(
} else {
debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
}
let field_remapping = padding_used.then_some(field_remapping);
(result, packed, field_remapping)
(result, packed)
}

impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
Expand Down Expand Up @@ -224,7 +216,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
_ => None,
};
if let Some(llty) = cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
return llty.lltype;
return llty;
}

debug!("llvm_type({:#?})", self);
Expand All @@ -236,30 +228,22 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
let normal_ty = cx.tcx.erase_regions(self.ty);

let mut defer = None;
let mut field_remapping = None;
let llty = if self.ty != normal_ty {
let mut layout = cx.layout_of(normal_ty);
if let Some(v) = variant_index {
layout = layout.for_variant(cx, v);
}
layout.llvm_type(cx)
} else {
uncached_llvm_type(cx, *self, &mut defer, &mut field_remapping)
uncached_llvm_type(cx, *self, &mut defer)
};
debug!("--> mapped {:#?} to llty={:?}", self, llty);

cx.type_lowering
.borrow_mut()
.insert((self.ty, variant_index), TypeLowering { lltype: llty, field_remapping });
cx.type_lowering.borrow_mut().insert((self.ty, variant_index), llty);

if let Some((llty, layout)) = defer {
let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout);
let (llfields, packed) = struct_llfields(cx, layout);
cx.set_struct_body(llty, &llfields, packed);
cx.type_lowering
.borrow_mut()
.get_mut(&(self.ty, variant_index))
.unwrap()
.field_remapping = new_field_remapping;
}
llty
}
Expand Down
9 changes: 7 additions & 2 deletions compiler/rustc_codegen_ssa/src/back/write.rs
Original file line number Diff line number Diff line change
Expand Up @@ -374,6 +374,10 @@ pub struct CodegenContext<B: WriteBackendMethods> {
pub incr_comp_session_dir: Option<PathBuf>,
/// Channel back to the main control thread to send messages to
pub coordinator_send: Sender<Box<dyn Any + Send>>,
/// `true` if the codegen should be run in parallel.
///
/// Depends on [`CodegenBackend::supports_parallel()`] and `-Zno_parallel_backend`.
pub parallel: bool,
}

impl<B: WriteBackendMethods> CodegenContext<B> {
Expand Down Expand Up @@ -1152,6 +1156,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
target_arch: tcx.sess.target.arch.to_string(),
split_debuginfo: tcx.sess.split_debuginfo(),
split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend,
};

// This is the "main loop" of parallel work happening for parallel codegen.
Expand Down Expand Up @@ -1422,7 +1427,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
.binary_search_by_key(&cost, |&(_, cost)| cost)
.unwrap_or_else(|e| e);
work_items.insert(insertion_index, (work, cost));
if !cgcx.opts.unstable_opts.no_parallel_llvm {
if cgcx.parallel {
helper.request_token();
}
}
Expand Down Expand Up @@ -1545,7 +1550,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
};
work_items.insert(insertion_index, (llvm_work_item, cost));

if !cgcx.opts.unstable_opts.no_parallel_llvm {
if cgcx.parallel {
helper.request_token();
}
assert_eq!(main_thread_state, MainThreadState::Codegenning);
Expand Down
7 changes: 7 additions & 0 deletions compiler/rustc_codegen_ssa/src/traits/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,13 @@ pub trait CodegenBackend {
codegen_results: CodegenResults,
outputs: &OutputFilenames,
) -> Result<(), ErrorGuaranteed>;

/// Returns `true` if this backend can be safely called from multiple threads.
///
/// Defaults to `true`.
fn supports_parallel(&self) -> bool {
true
}
}

pub trait ExtraBackendMethods:
Expand Down
16 changes: 6 additions & 10 deletions compiler/rustc_const_eval/src/const_eval/eval_queries.rs
Original file line number Diff line number Diff line change
Expand Up @@ -380,16 +380,12 @@ pub fn eval_in_interpreter<'mir, 'tcx>(
}
Ok(mplace) => {
// Since evaluation had no errors, validate the resulting constant.

// Temporarily allow access to the static_root_alloc_id for the purpose of validation.
let static_root_alloc_id = ecx.machine.static_root_alloc_id.take();
let validation = const_validate_mplace(&ecx, &mplace, cid);
ecx.machine.static_root_alloc_id = static_root_alloc_id;
let res = const_validate_mplace(&ecx, &mplace, cid);

let alloc_id = mplace.ptr().provenance.unwrap().alloc_id();

// Validation failed, report an error.
if let Err(error) = validation {
if let Err(error) = res {
Err(const_report_error(&ecx, error, alloc_id))
} else {
// Convert to raw constant
Expand All @@ -412,10 +408,10 @@ pub fn const_validate_mplace<'mir, 'tcx>(
_ if cid.promoted.is_some() => CtfeValidationMode::Promoted,
Some(mutbl) => CtfeValidationMode::Static { mutbl }, // a `static`
None => {
// In normal `const` (not promoted), the outermost allocation is always only copied,
// so having `UnsafeCell` in there is okay despite them being in immutable memory.
let allow_immutable_unsafe_cell = cid.promoted.is_none() && !inner;
CtfeValidationMode::Const { allow_immutable_unsafe_cell }
// This is a normal `const` (not promoted).
// The outermost allocation is always only copied, so having `UnsafeCell` in there
// is okay despite them being in immutable memory.
CtfeValidationMode::Const { allow_immutable_unsafe_cell: !inner }
}
};
ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?;
Expand Down
4 changes: 4 additions & 0 deletions compiler/rustc_const_eval/src/interpret/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,8 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {

/// Hook for performing extra checks on a memory read access.
///
/// This will *not* be called during validation!
///
/// Takes read-only access to the allocation so we can keep all the memory read
/// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
/// need to mutate.
Expand All @@ -410,6 +412,8 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
/// Hook for performing extra checks on any memory read access,
/// that involves an allocation, even ZST reads.
///
/// This will *not* be called during validation!
///
/// Used to prevent statics from self-initializing by reading from their own memory
/// as it is being initialized.
fn before_alloc_read(
Expand Down
43 changes: 38 additions & 5 deletions compiler/rustc_const_eval/src/interpret/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

use std::assert_matches::assert_matches;
use std::borrow::Cow;
use std::cell::Cell;
use std::collections::VecDeque;
use std::fmt;
use std::ptr;
Expand Down Expand Up @@ -111,6 +112,11 @@ pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
/// that do not exist any more.
// FIXME: this should not be public, but interning currently needs access to it
pub(super) dead_alloc_map: FxIndexMap<AllocId, (Size, Align)>,

/// This stores whether we are currently doing reads purely for the purpose of validation.
/// Those reads do not trigger the machine's hooks for memory reads.
/// Needless to say, this must only be set with great care!
validation_in_progress: Cell<bool>,
}

/// A reference to some allocation that was already bounds-checked for the given region
Expand All @@ -137,6 +143,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
alloc_map: M::MemoryMap::default(),
extra_fn_ptr_map: FxIndexMap::default(),
dead_alloc_map: FxIndexMap::default(),
validation_in_progress: Cell::new(false),
}
}

Expand Down Expand Up @@ -624,18 +631,28 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
size,
CheckInAllocMsg::MemoryAccessTest,
|alloc_id, offset, prov| {
// We want to call the hook on *all* accesses that involve an AllocId,
// including zero-sized accesses. That means we have to do it here
// rather than below in the `Some` branch.
M::before_alloc_read(self, alloc_id)?;
if !self.memory.validation_in_progress.get() {
// We want to call the hook on *all* accesses that involve an AllocId,
// including zero-sized accesses. That means we have to do it here
// rather than below in the `Some` branch.
M::before_alloc_read(self, alloc_id)?;
}
let alloc = self.get_alloc_raw(alloc_id)?;
Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
},
)?;

if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
let range = alloc_range(offset, size);
M::before_memory_read(self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?;
if !self.memory.validation_in_progress.get() {
M::before_memory_read(
self.tcx,
&self.machine,
&alloc.extra,
(alloc_id, prov),
range,
)?;
}
Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
} else {
Ok(None)
Expand Down Expand Up @@ -909,6 +926,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
})
}

/// Runs the close in "validation" mode, which means the machine's memory read hooks will be
/// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
pub(super) fn run_for_validation<R>(&self, f: impl FnOnce() -> R) -> R {
assert!(
self.memory.validation_in_progress.replace(true) == false,
"`validation_in_progress` was already set"
);
let res = f();
assert!(
self.memory.validation_in_progress.replace(false) == true,
"`validation_in_progress` was unset by someone else"
);
res
}
}

#[doc(hidden)]
Expand Down Expand Up @@ -1154,6 +1186,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
};
let src_alloc = self.get_alloc_raw(src_alloc_id)?;
let src_range = alloc_range(src_offset, size);
assert!(!self.memory.validation_in_progress.get(), "we can't be copying during validation");
M::before_memory_read(
tcx,
&self.machine,
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_const_eval/src/interpret/validity.rs
Original file line number Diff line number Diff line change
Expand Up @@ -971,7 +971,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };

// Run it.
match visitor.visit_value(op) {
match self.run_for_validation(|| visitor.visit_value(op)) {
Ok(()) => Ok(()),
// Pass through validation failures and "invalid program" issues.
Err(err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ type QualifResults<'mir, 'tcx, Q> =
rustc_mir_dataflow::ResultsCursor<'mir, 'tcx, FlowSensitiveAnalysis<'mir, 'mir, 'tcx, Q>>;

#[derive(Default)]
pub struct Qualifs<'mir, 'tcx> {
pub(crate) struct Qualifs<'mir, 'tcx> {
has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>,
needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>,
needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,7 @@ where
if Q::in_adt_inherently(cx, def, args) {
return true;
}
// Don't do any value-based reasoning for unions.
if def.is_union() && Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)) {
return true;
}
Expand Down
Loading
Loading