Skip to content

Commit ad2d91c

Browse files
authored
Rollup merge of #141507 - RalfJung:atomic-intrinsics, r=bjorn3
atomic_load intrinsic: use const generic parameter for ordering We have a gazillion intrinsics for the atomics because we encode the ordering into the intrinsic name rather than making it a parameter. This is particularly bad for those operations that take two orderings. Let's fix that! This PR only converts `load`, to see if there's any feedback that would fundamentally change the strategy we pursue for the const generic intrinsics. The first two commits are preparation and could be a separate PR if you prefer. `@BoxyUwU` -- I hope this is a use of const generics that is unlikely to explode? All we need is a const generic of enum type. We could funnel it through an integer if we had to but an enum is obviously nicer... `@bjorn3` it seems like the cranelift backend entirely ignores the ordering?
2 parents a87bc9d + a387c86 commit ad2d91c

File tree

20 files changed

+210
-105
lines changed

20 files changed

+210
-105
lines changed

compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -870,11 +870,12 @@ fn codegen_regular_intrinsic_call<'tcx>(
870870
// FIXME use a compiler fence once Cranelift supports it
871871
fx.bcx.ins().fence();
872872
}
873-
_ if intrinsic.as_str().starts_with("atomic_load") => {
873+
sym::atomic_load => {
874874
intrinsic_args!(fx, args => (ptr); intrinsic);
875875
let ptr = ptr.load_scalar(fx);
876876

877877
let ty = generic_args.type_at(0);
878+
let _ord = generic_args.const_at(1).to_value(); // FIXME: forward this to cranelift once they support that
878879
match ty.kind() {
879880
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
880881
// FIXME implement 128bit atomics

compiler/rustc_codegen_gcc/src/builder.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ use rustc_abi::{Align, HasDataLayout, Size, TargetDataLayout, WrappingRange};
1212
use rustc_apfloat::{Float, Round, Status, ieee};
1313
use rustc_codegen_ssa::MemFlags;
1414
use rustc_codegen_ssa::common::{
15-
AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
15+
AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
1616
};
1717
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
1818
use rustc_codegen_ssa::mir::place::PlaceRef;
@@ -26,7 +26,7 @@ use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
2626
use rustc_middle::ty::layout::{
2727
FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTyCtxt, HasTypingEnv, LayoutError, LayoutOfHelpers,
2828
};
29-
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
29+
use rustc_middle::ty::{self, AtomicOrdering, Instance, Ty, TyCtxt};
3030
use rustc_span::Span;
3131
use rustc_span::def_id::DefId;
3232
use rustc_target::callconv::FnAbi;
@@ -75,7 +75,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
7575

7676
let load_ordering = match order {
7777
// TODO(antoyo): does this make sense?
78-
AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
78+
AtomicOrdering::AcqRel | AtomicOrdering::Release => AtomicOrdering::Acquire,
7979
_ => order,
8080
};
8181
let previous_value =
@@ -2474,8 +2474,8 @@ impl ToGccOrdering for AtomicOrdering {
24742474
AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
24752475
AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
24762476
AtomicOrdering::Release => __ATOMIC_RELEASE,
2477-
AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
2478-
AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
2477+
AtomicOrdering::AcqRel => __ATOMIC_ACQ_REL,
2478+
AtomicOrdering::SeqCst => __ATOMIC_SEQ_CST,
24792479
};
24802480
ordering as i32
24812481
}

compiler/rustc_codegen_llvm/src/builder.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -612,7 +612,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
612612
&mut self,
613613
ty: &'ll Type,
614614
ptr: &'ll Value,
615-
order: rustc_codegen_ssa::common::AtomicOrdering,
615+
order: rustc_middle::ty::AtomicOrdering,
616616
size: Size,
617617
) -> &'ll Value {
618618
unsafe {
@@ -851,7 +851,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
851851
&mut self,
852852
val: &'ll Value,
853853
ptr: &'ll Value,
854-
order: rustc_codegen_ssa::common::AtomicOrdering,
854+
order: rustc_middle::ty::AtomicOrdering,
855855
size: Size,
856856
) {
857857
debug!("Store {:?} -> {:?}", val, ptr);
@@ -1307,8 +1307,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
13071307
dst: &'ll Value,
13081308
cmp: &'ll Value,
13091309
src: &'ll Value,
1310-
order: rustc_codegen_ssa::common::AtomicOrdering,
1311-
failure_order: rustc_codegen_ssa::common::AtomicOrdering,
1310+
order: rustc_middle::ty::AtomicOrdering,
1311+
failure_order: rustc_middle::ty::AtomicOrdering,
13121312
weak: bool,
13131313
) -> (&'ll Value, &'ll Value) {
13141314
let weak = if weak { llvm::True } else { llvm::False };
@@ -1334,7 +1334,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
13341334
op: rustc_codegen_ssa::common::AtomicRmwBinOp,
13351335
dst: &'ll Value,
13361336
mut src: &'ll Value,
1337-
order: rustc_codegen_ssa::common::AtomicOrdering,
1337+
order: rustc_middle::ty::AtomicOrdering,
13381338
) -> &'ll Value {
13391339
// The only RMW operation that LLVM supports on pointers is compare-exchange.
13401340
let requires_cast_to_int = self.val_ty(src) == self.type_ptr()
@@ -1360,7 +1360,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
13601360

13611361
fn atomic_fence(
13621362
&mut self,
1363-
order: rustc_codegen_ssa::common::AtomicOrdering,
1363+
order: rustc_middle::ty::AtomicOrdering,
13641364
scope: SynchronizationScope,
13651365
) {
13661366
let single_threaded = match scope {

compiler/rustc_codegen_llvm/src/llvm/ffi.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -426,14 +426,14 @@ pub(crate) enum AtomicOrdering {
426426
}
427427

428428
impl AtomicOrdering {
429-
pub(crate) fn from_generic(ao: rustc_codegen_ssa::common::AtomicOrdering) -> Self {
430-
use rustc_codegen_ssa::common::AtomicOrdering as Common;
429+
pub(crate) fn from_generic(ao: rustc_middle::ty::AtomicOrdering) -> Self {
430+
use rustc_middle::ty::AtomicOrdering as Common;
431431
match ao {
432432
Common::Relaxed => Self::Monotonic,
433433
Common::Acquire => Self::Acquire,
434434
Common::Release => Self::Release,
435-
Common::AcquireRelease => Self::AcquireRelease,
436-
Common::SequentiallyConsistent => Self::SequentiallyConsistent,
435+
Common::AcqRel => Self::AcquireRelease,
436+
Common::SeqCst => Self::SequentiallyConsistent,
437437
}
438438
}
439439
}

compiler/rustc_codegen_ssa/src/common.rs

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -59,15 +59,6 @@ pub enum AtomicRmwBinOp {
5959
AtomicUMin,
6060
}
6161

62-
#[derive(Copy, Clone, Debug)]
63-
pub enum AtomicOrdering {
64-
Relaxed,
65-
Acquire,
66-
Release,
67-
AcquireRelease,
68-
SequentiallyConsistent,
69-
}
70-
7162
#[derive(Copy, Clone, Debug)]
7263
pub enum SynchronizationScope {
7364
SingleThread,

compiler/rustc_codegen_ssa/src/mir/intrinsic.rs

Lines changed: 54 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
9999

100100
let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
101101

102+
let ret_llval = |bx: &mut Bx, llval| {
103+
if result.layout.ty.is_bool() {
104+
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
105+
.val
106+
.store(bx, result);
107+
} else if !result.layout.ty.is_unit() {
108+
bx.store_to_place(llval, result.val);
109+
}
110+
Ok(())
111+
};
112+
102113
let llval = match name {
103114
sym::abort => {
104115
bx.abort();
@@ -334,9 +345,48 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
334345
// This requires that atomic intrinsics follow a specific naming pattern:
335346
// "atomic_<operation>[_<ordering>]"
336347
name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
337-
use crate::common::AtomicOrdering::*;
348+
use rustc_middle::ty::AtomicOrdering::*;
349+
338350
use crate::common::{AtomicRmwBinOp, SynchronizationScope};
339351

352+
let invalid_monomorphization = |ty| {
353+
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
354+
span,
355+
name,
356+
ty,
357+
});
358+
};
359+
360+
let parse_const_generic_ordering = |ord: ty::Value<'tcx>| {
361+
let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
362+
discr.to_atomic_ordering()
363+
};
364+
365+
// Some intrinsics have the ordering already converted to a const generic parameter, we handle those first.
366+
match name {
367+
sym::atomic_load => {
368+
let ty = fn_args.type_at(0);
369+
let ordering = fn_args.const_at(1).to_value();
370+
if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
371+
invalid_monomorphization(ty);
372+
return Ok(());
373+
}
374+
let layout = bx.layout_of(ty);
375+
let source = args[0].immediate();
376+
let llval = bx.atomic_load(
377+
bx.backend_type(layout),
378+
source,
379+
parse_const_generic_ordering(ordering),
380+
layout.size,
381+
);
382+
383+
return ret_llval(bx, llval);
384+
}
385+
386+
// The rest falls back to below.
387+
_ => {}
388+
}
389+
340390
let Some((instruction, ordering)) = atomic.split_once('_') else {
341391
bx.sess().dcx().emit_fatal(errors::MissingMemoryOrdering);
342392
};
@@ -345,19 +395,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
345395
"relaxed" => Relaxed,
346396
"acquire" => Acquire,
347397
"release" => Release,
348-
"acqrel" => AcquireRelease,
349-
"seqcst" => SequentiallyConsistent,
398+
"acqrel" => AcqRel,
399+
"seqcst" => SeqCst,
350400
_ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOrdering),
351401
};
352402

353-
let invalid_monomorphization = |ty| {
354-
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
355-
span,
356-
name,
357-
ty,
358-
});
359-
};
360-
361403
match instruction {
362404
"cxchg" | "cxchgweak" => {
363405
let Some((success, failure)) = ordering.split_once('_') else {
@@ -390,24 +432,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
390432
return Ok(());
391433
}
392434

393-
"load" => {
394-
let ty = fn_args.type_at(0);
395-
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
396-
let layout = bx.layout_of(ty);
397-
let size = layout.size;
398-
let source = args[0].immediate();
399-
bx.atomic_load(
400-
bx.backend_type(layout),
401-
source,
402-
parse_ordering(bx, ordering),
403-
size,
404-
)
405-
} else {
406-
invalid_monomorphization(ty);
407-
return Ok(());
408-
}
409-
}
410-
411435
"store" => {
412436
let ty = fn_args.type_at(0);
413437
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
@@ -538,14 +562,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
538562
}
539563
};
540564

541-
if result.layout.ty.is_bool() {
542-
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
543-
.val
544-
.store(bx, result);
545-
} else if !result.layout.ty.is_unit() {
546-
bx.store_to_place(llval, result.val);
547-
}
548-
Ok(())
565+
ret_llval(bx, llval)
549566
}
550567
}
551568

compiler/rustc_codegen_ssa/src/traits/builder.rs

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ use std::ops::Deref;
44
use rustc_abi::{Align, Scalar, Size, WrappingRange};
55
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
66
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
7-
use rustc_middle::ty::{Instance, Ty};
7+
use rustc_middle::ty::{AtomicOrdering, Instance, Ty};
88
use rustc_session::config::OptLevel;
99
use rustc_span::Span;
1010
use rustc_target::callconv::FnAbi;
@@ -19,9 +19,7 @@ use super::misc::MiscCodegenMethods;
1919
use super::type_::{ArgAbiBuilderMethods, BaseTypeCodegenMethods, LayoutTypeCodegenMethods};
2020
use super::{CodegenMethods, StaticBuilderMethods};
2121
use crate::MemFlags;
22-
use crate::common::{
23-
AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
24-
};
22+
use crate::common::{AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind};
2523
use crate::mir::operand::{OperandRef, OperandValue};
2624
use crate::mir::place::{PlaceRef, PlaceValue};
2725

compiler/rustc_hir_analysis/src/check/intrinsic.rs

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -204,24 +204,25 @@ pub(crate) fn check_intrinsic_type(
204204

205205
// Each atomic op has variants with different suffixes (`_seq_cst`, `_acquire`, etc.). Use
206206
// string ops to strip the suffixes, because the variants all get the same treatment here.
207-
let (n_tps, inputs, output) = match split[1] {
207+
let (n_tps, n_cts, inputs, output) = match split[1] {
208208
"cxchg" | "cxchgweak" => (
209209
1,
210+
0,
210211
vec![Ty::new_mut_ptr(tcx, param(0)), param(0), param(0)],
211212
Ty::new_tup(tcx, &[param(0), tcx.types.bool]),
212213
),
213-
"load" => (1, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)),
214-
"store" => (1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit),
214+
"load" => (1, 1, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)),
215+
"store" => (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit),
215216

216217
"xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax"
217-
| "umin" => (1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)),
218-
"fence" | "singlethreadfence" => (0, Vec::new(), tcx.types.unit),
218+
| "umin" => (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)),
219+
"fence" | "singlethreadfence" => (0, 0, Vec::new(), tcx.types.unit),
219220
op => {
220221
tcx.dcx().emit_err(UnrecognizedAtomicOperation { span, op });
221222
return;
222223
}
223224
};
224-
(n_tps, 0, 0, inputs, output, hir::Safety::Unsafe)
225+
(n_tps, 0, n_cts, inputs, output, hir::Safety::Unsafe)
225226
} else if intrinsic_name == sym::contract_check_ensures {
226227
// contract_check_ensures::<Ret, C>(Ret, C) -> Ret
227228
// where C: for<'a> Fn(&'a Ret) -> bool,

compiler/rustc_middle/src/ty/consts/int.rs

Lines changed: 33 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,19 @@ impl ConstInt {
2626
}
2727
}
2828

29+
/// An enum to represent the compiler-side view of `intrinsics::AtomicOrdering`.
30+
/// This lives here because there's a method in this file that needs it and it is entirely unclear
31+
/// where else to put this...
32+
#[derive(Debug, Copy, Clone)]
33+
pub enum AtomicOrdering {
34+
// These values must match `intrinsics::AtomicOrdering`!
35+
Relaxed = 0,
36+
Release = 1,
37+
Acquire = 2,
38+
AcqRel = 3,
39+
SeqCst = 4,
40+
}
41+
2942
impl std::fmt::Debug for ConstInt {
3043
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
3144
let Self { int, signed, is_ptr_sized_integral } = *self;
@@ -318,6 +331,25 @@ impl ScalarInt {
318331
self.to_uint(tcx.data_layout.pointer_size).try_into().unwrap()
319332
}
320333

334+
#[inline]
335+
pub fn to_atomic_ordering(self) -> AtomicOrdering {
336+
use AtomicOrdering::*;
337+
let val = self.to_u32();
338+
if val == Relaxed as u32 {
339+
Relaxed
340+
} else if val == Release as u32 {
341+
Release
342+
} else if val == Acquire as u32 {
343+
Acquire
344+
} else if val == AcqRel as u32 {
345+
AcqRel
346+
} else if val == SeqCst as u32 {
347+
SeqCst
348+
} else {
349+
panic!("not a valid atomic ordering")
350+
}
351+
}
352+
321353
/// Converts the `ScalarInt` to `bool`.
322354
/// Panics if the `size` of the `ScalarInt` is not equal to 1 byte.
323355
/// Errors if it is not a valid `bool`.
@@ -488,7 +520,7 @@ from_scalar_int_for_x_signed!(i8, i16, i32, i64, i128);
488520
impl From<std::cmp::Ordering> for ScalarInt {
489521
#[inline]
490522
fn from(c: std::cmp::Ordering) -> Self {
491-
// Here we rely on `Ordering` having the same values in host and target!
523+
// Here we rely on `cmp::Ordering` having the same values in host and target!
492524
ScalarInt::from(c as i8)
493525
}
494526
}

compiler/rustc_middle/src/ty/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,8 @@ pub use self::closure::{
7474
place_to_string_for_capture,
7575
};
7676
pub use self::consts::{
77-
AnonConstKind, Const, ConstInt, ConstKind, Expr, ExprKind, ScalarInt, UnevaluatedConst,
78-
ValTree, ValTreeKind, Value,
77+
AnonConstKind, AtomicOrdering, Const, ConstInt, ConstKind, Expr, ExprKind, ScalarInt,
78+
UnevaluatedConst, ValTree, ValTreeKind, Value,
7979
};
8080
pub use self::context::{
8181
CtxtInterners, CurrentGcx, DeducedParamAttrs, Feed, FreeRegionInfo, GlobalCtxt, Lift, TyCtxt,

compiler/rustc_span/src/symbol.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -515,6 +515,7 @@ symbols! {
515515
async_iterator_poll_next,
516516
async_trait_bounds,
517517
atomic,
518+
atomic_load,
518519
atomic_mod,
519520
atomics,
520521
att_syntax,

0 commit comments

Comments
 (0)