Skip to content

Let codegen_transmute_operand just handle everything #143860

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions compiler/rustc_codegen_ssa/src/mir/operand.rs
Original file line number Diff line number Diff line change
Expand Up @@ -347,13 +347,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {

let val = if field.is_zst() {
OperandValue::ZeroSized
} else if let BackendRepr::SimdVector { .. } = self.layout.backend_repr {
// codegen_transmute_operand doesn't support SIMD, but since the previous
// check handled ZSTs, the only possible field access into something SIMD
// is to the `non_1zst_field` that's the same SIMD. (Other things, even
// just padding, would change the wrapper's representation type.)
assert_eq!(field.size, self.layout.size);
self.val
} else if field.size == self.layout.size {
assert_eq!(offset.bytes(), 0);
fx.codegen_transmute_operand(bx, *self, field)
Expand Down
85 changes: 43 additions & 42 deletions compiler/rustc_codegen_ssa/src/mir/rvalue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@ use rustc_abi::{self as abi, FIRST_VARIANT};
use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
use rustc_middle::{bug, mir};
use rustc_middle::{bug, mir, span_bug};
use rustc_session::config::OptLevel;
use tracing::{debug, instrument};

use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
use super::place::{PlaceRef, codegen_tag_value};
use super::place::{PlaceRef, PlaceValue, codegen_tag_value};
use super::{FunctionCx, LocalRef};
use crate::common::{IntPredicate, TypeKind};
use crate::traits::*;
Expand Down Expand Up @@ -229,6 +229,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
operand: OperandRef<'tcx, Bx::Value>,
cast: TyAndLayout<'tcx>,
) -> OperandValue<Bx::Value> {
if let abi::BackendRepr::Memory { .. } = cast.backend_repr
&& !cast.is_zst()
{
span_bug!(self.mir.span, "Use `codegen_transmute` to transmute to {cast:?}");
}

// `Layout` is interned, so we can do a cheap check for things that are
// exactly the same and thus don't need any handling.
if abi::Layout::eq(&operand.layout.layout, &cast.layout) {
return operand.val;
}

// Check for transmutes that are always UB.
if operand.layout.size != cast.size
|| operand.layout.is_uninhabited()
Expand All @@ -241,11 +253,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
return OperandValue::poison(bx, cast);
}

let cx = bx.cx();
match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
_ if cast.is_zst() => OperandValue::ZeroSized,
(_, _, abi::BackendRepr::Memory { .. }) => {
bug!("Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}");
}
(OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
assert_eq!(source_place_val.llextra, None);
// The existing alignment is part of `source_place_val`,
Expand All @@ -256,16 +266,38 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Immediate(imm),
abi::BackendRepr::Scalar(from_scalar),
abi::BackendRepr::Scalar(to_scalar),
) => OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar)),
) if from_scalar.size(cx) == to_scalar.size(cx) => {
OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar))
}
(
OperandValue::Pair(imm_a, imm_b),
abi::BackendRepr::ScalarPair(in_a, in_b),
abi::BackendRepr::ScalarPair(out_a, out_b),
) => OperandValue::Pair(
transmute_scalar(bx, imm_a, in_a, out_a),
transmute_scalar(bx, imm_b, in_b, out_b),
),
_ => bug!("Cannot `codegen_transmute_operand` {operand:?} to {cast:?}"),
) if in_a.size(cx) == out_a.size(cx) && in_b.size(cx) == out_b.size(cx) => {
OperandValue::Pair(
transmute_scalar(bx, imm_a, in_a, out_a),
transmute_scalar(bx, imm_b, in_b, out_b),
)
}
_ => {
// For any other potentially-tricky cases, make a temporary instead.
// If anything else wants the target local to be in memory this won't
// be hit, as `codegen_transmute` will get called directly. Thus this
// is only for places where everything else wants the operand form,
// and thus it's not worth making those places get it from memory.
//
// Notably, Scalar ⇌ ScalarPair cases go here to avoid padding
// and endianness issues, as do SimdVector ones to avoid worrying
// about things like f32x8 ⇌ ptrx4 that would need multiple steps.
let align = Ord::max(operand.layout.align.abi, cast.align.abi);
let size = Ord::max(operand.layout.size, cast.size);
let temp = PlaceValue::alloca(bx, size, align);
bx.lifetime_start(temp.llval, size);
operand.val.store(bx, temp.with_type(operand.layout));
let val = bx.load_operand(temp.with_type(cast)).val;
bx.lifetime_end(temp.llval, size);
val
}
}
}

Expand Down Expand Up @@ -949,37 +981,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// layout in this code when the right thing will happen anyway.
pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
match *rvalue {
mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
let operand_ty = operand.ty(self.mir, self.cx.tcx());
let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
match (operand_layout.backend_repr, cast_layout.backend_repr) {
// When the output will be in memory anyway, just use its place
// (instead of the operand path) unless it's the trivial ZST case.
(_, abi::BackendRepr::Memory { .. }) => cast_layout.is_zst(),

// Otherwise (for a non-memory output) if the input is memory
// then we can just read the value from the place.
(abi::BackendRepr::Memory { .. }, _) => true,

// When we have scalar immediates, we can only convert things
// where the sizes match, to avoid endianness questions.
(abi::BackendRepr::Scalar(a), abi::BackendRepr::Scalar(b)) =>
a.size(self.cx) == b.size(self.cx),
(abi::BackendRepr::ScalarPair(a0, a1), abi::BackendRepr::ScalarPair(b0, b1)) =>
a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),

// Mixing Scalars and ScalarPairs can get quite complicated when
// padding and undef get involved, so leave that to the memory path.
(abi::BackendRepr::Scalar(_), abi::BackendRepr::ScalarPair(_, _)) |
(abi::BackendRepr::ScalarPair(_, _), abi::BackendRepr::Scalar(_)) => false,

// SIMD vectors aren't worth the trouble of dealing with complex
// cases like from vectors of f32 to vectors of pointers or
// from fat pointers to vectors of u16. (See #143194 #110021 ...)
(abi::BackendRepr::SimdVector { .. }, _) | (_, abi::BackendRepr::SimdVector { .. }) => false,
}
}
mir::Rvalue::Ref(..) |
mir::Rvalue::CopyForDeref(..) |
mir::Rvalue::RawPtr(..) |
Expand Down
26 changes: 16 additions & 10 deletions tests/codegen/intrinsics/transmute.rs
Original file line number Diff line number Diff line change
Expand Up @@ -191,22 +191,28 @@ pub unsafe fn check_byte_from_bool(x: bool) -> u8 {
// CHECK-LABEL: @check_to_pair(
#[no_mangle]
pub unsafe fn check_to_pair(x: u64) -> Option<i32> {
// CHECK: %_0 = alloca [8 x i8], align 4
// CHECK: store i64 %x, ptr %_0, align 4
// CHECK: %[[TEMP:.+]] = alloca [8 x i8], align 8
// CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr %[[TEMP]])
// CHECK: store i64 %x, ptr %[[TEMP]], align 8
// CHECK: %[[PAIR0:.+]] = load i32, ptr %[[TEMP]], align 8
// CHECK: %[[PAIR1P:.+]] = getelementptr inbounds i8, ptr %[[TEMP]], i64 4
// CHECK: %[[PAIR1:.+]] = load i32, ptr %[[PAIR1P]], align 4
// CHECK: call void @llvm.lifetime.end.p0(i64 8, ptr %[[TEMP]])
// CHECK: insertvalue {{.+}}, i32 %[[PAIR0]], 0
// CHECK: insertvalue {{.+}}, i32 %[[PAIR1]], 1
transmute(x)
}

// CHECK-LABEL: @check_from_pair(
#[no_mangle]
pub unsafe fn check_from_pair(x: Option<i32>) -> u64 {
// The two arguments are of types that are only 4-aligned, but they're
// immediates so we can write using the destination alloca's alignment.
const { assert!(std::mem::align_of::<Option<i32>>() == 4) };

// CHECK: %_0 = alloca [8 x i8], align 8
// CHECK: store i32 %x.0, ptr %_0, align 8
// CHECK: store i32 %x.1, ptr %0, align 4
// CHECK: %[[R:.+]] = load i64, ptr %_0, align 8
// CHECK: %[[TEMP:.+]] = alloca [8 x i8], align 8
// CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr %[[TEMP]])
// CHECK: store i32 %x.0, ptr %[[TEMP]], align 8
// CHECK: %[[PAIR1P:.+]] = getelementptr inbounds i8, ptr %[[TEMP]], i64 4
// CHECK: store i32 %x.1, ptr %[[PAIR1P]], align 4
// CHECK: %[[R:.+]] = load i64, ptr %[[TEMP]], align 8
// CHECK: call void @llvm.lifetime.end.p0(i64 8, ptr %[[TEMP]])
// CHECK: ret i64 %[[R]]
transmute(x)
}
Expand Down
Loading