diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index b37797fef4ce3..9efbb34b515b6 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -2,6 +2,7 @@ use super::place::PlaceRef; use super::{FunctionCx, LocalRef}; use crate::base; +use crate::common::TypeKind; use crate::glue; use crate::traits::*; use crate::MemFlags; @@ -236,19 +237,47 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { }; match (&mut val, field.abi) { - (OperandValue::Immediate(llval), _) => { + ( + OperandValue::Immediate(llval), + Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. }, + ) => { // Bools in union fields needs to be truncated. *llval = bx.to_immediate(*llval, field); // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - *llval = bx.bitcast(*llval, bx.cx().immediate_backend_type(field)); + let ty = bx.cx().immediate_backend_type(field); + if bx.type_kind(ty) == TypeKind::Pointer { + *llval = bx.pointercast(*llval, ty); + } } (OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => { // Bools in union fields needs to be truncated. *a = bx.to_immediate_scalar(*a, a_abi); *b = bx.to_immediate_scalar(*b, b_abi); // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - *a = bx.bitcast(*a, bx.cx().scalar_pair_element_backend_type(field, 0, true)); - *b = bx.bitcast(*b, bx.cx().scalar_pair_element_backend_type(field, 1, true)); + let a_ty = bx.cx().scalar_pair_element_backend_type(field, 0, true); + let b_ty = bx.cx().scalar_pair_element_backend_type(field, 1, true); + if bx.type_kind(a_ty) == TypeKind::Pointer { + *a = bx.pointercast(*a, a_ty); + } + if bx.type_kind(b_ty) == TypeKind::Pointer { + *b = bx.pointercast(*b, b_ty); + } + } + // Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]); + (OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => { + assert!(matches!(self.layout.abi, Abi::Vector { .. })); + + let llty = bx.cx().backend_type(self.layout); + let llfield_ty = bx.cx().backend_type(field); + + // Can't bitcast an aggregate, so round trip through memory. + let lltemp = bx.alloca(llfield_ty, field.align.abi); + let llptr = bx.pointercast(lltemp, bx.cx().type_ptr_to(llty)); + bx.store(*llval, llptr, field.align.abi); + *llval = bx.load(llfield_ty, lltemp, field.align.abi); + } + (OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => { + bug!() } (OperandValue::Pair(..), _) => bug!(), (OperandValue::Ref(..), _) => bug!(), diff --git a/tests/ui/simd/issue-105439.rs b/tests/ui/simd/issue-105439.rs new file mode 100644 index 0000000000000..35ca76e989b91 --- /dev/null +++ b/tests/ui/simd/issue-105439.rs @@ -0,0 +1,25 @@ +// run-pass +// compile-flags: -O -Zverify-llvm-ir + +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + +#[allow(non_camel_case_types)] +#[derive(Clone, Copy)] +#[repr(simd)] +struct i32x4([i32; 4]); + +extern "platform-intrinsic" { + pub(crate) fn simd_add(x: T, y: T) -> T; +} + +#[inline(always)] +fn to_array(a: i32x4) -> [i32; 4] { + a.0 +} + +fn main() { + let a = i32x4([1, 2, 3, 4]); + let b = unsafe { simd_add(a, a) }; + assert_eq!(to_array(b), [2, 4, 6, 8]); +}