Skip to content

Commit 283a074

Browse files
committed
Auto merge of #143860 - scottmcm:transmute-always-rvalue, r=WaffleLapkin
Let `codegen_transmute_operand` just handle everything When combined with #143720, this means `rvalue_creates_operand` can just return `true` for *every* `Rvalue`. (A future PR could consider removing it, though just letting it optimize out is fine for now.) It's nicer anyway, IMHO, because it avoids needing the layout checks to be consistent in the two places, and thus is an overall reduction in code. Plus it's a more helpful building block when used in other places this way. (TBH, it probably would have been better to have it this way the whole time, but I clearly didn't understand `rvalue_creates_operand` when I originally wrote #109843.)
2 parents ce5fdd7 + b7e025c commit 283a074

File tree

5 files changed

+258
-106
lines changed

5 files changed

+258
-106
lines changed

compiler/rustc_codegen_ssa/src/mir/analyze.rs

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -170,11 +170,6 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'b, 'tcx>> Visitor<'tcx> for LocalAnalyzer
170170

171171
if let Some(local) = place.as_local() {
172172
self.define(local, DefLocation::Assignment(location));
173-
if self.locals[local] != LocalKind::Memory {
174-
if !self.fx.rvalue_creates_operand(rvalue) {
175-
self.locals[local] = LocalKind::Memory;
176-
}
177-
}
178173
} else {
179174
self.visit_place(place, PlaceContext::MutatingUse(MutatingUseContext::Store), location);
180175
}

compiler/rustc_codegen_ssa/src/mir/operand.rs

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -335,13 +335,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
335335

336336
let val = if field.is_zst() {
337337
OperandValue::ZeroSized
338-
} else if let BackendRepr::SimdVector { .. } = self.layout.backend_repr {
339-
// codegen_transmute_operand doesn't support SIMD, but since the previous
340-
// check handled ZSTs, the only possible field access into something SIMD
341-
// is to the `non_1zst_field` that's the same SIMD. (Other things, even
342-
// just padding, would change the wrapper's representation type.)
343-
assert_eq!(field.size, self.layout.size);
344-
self.val
345338
} else if field.size == self.layout.size {
346339
assert_eq!(offset.bytes(), 0);
347340
fx.codegen_transmute_operand(bx, *self, field)

compiler/rustc_codegen_ssa/src/mir/rvalue.rs

Lines changed: 66 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,12 @@ use rustc_abi::{self as abi, FIRST_VARIANT};
22
use rustc_middle::ty::adjustment::PointerCoercion;
33
use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
44
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
5-
use rustc_middle::{bug, mir};
5+
use rustc_middle::{bug, mir, span_bug};
66
use rustc_session::config::OptLevel;
77
use tracing::{debug, instrument};
88

99
use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
10-
use super::place::{PlaceRef, codegen_tag_value};
10+
use super::place::{PlaceRef, PlaceValue, codegen_tag_value};
1111
use super::{FunctionCx, LocalRef};
1212
use crate::common::{IntPredicate, TypeKind};
1313
use crate::traits::*;
@@ -180,7 +180,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
180180
}
181181

182182
_ => {
183-
assert!(self.rvalue_creates_operand(rvalue));
184183
let temp = self.codegen_rvalue_operand(bx, rvalue);
185184
temp.val.store(bx, dest);
186185
}
@@ -218,17 +217,26 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
218217

219218
/// Transmutes an `OperandValue` to another `OperandValue`.
220219
///
221-
/// This is supported only for cases where [`Self::rvalue_creates_operand`]
222-
/// returns `true`, and will ICE otherwise. (In particular, anything that
223-
/// would need to `alloca` in order to return a `PlaceValue` will ICE,
224-
/// expecting those to go via [`Self::codegen_transmute`] instead where
225-
/// the destination place is already allocated.)
220+
/// This is supported for all cases where the `cast` type is SSA,
221+
/// but for non-ZSTs with [`abi::BackendRepr::Memory`] it ICEs.
226222
pub(crate) fn codegen_transmute_operand(
227223
&mut self,
228224
bx: &mut Bx,
229225
operand: OperandRef<'tcx, Bx::Value>,
230226
cast: TyAndLayout<'tcx>,
231227
) -> OperandValue<Bx::Value> {
228+
if let abi::BackendRepr::Memory { .. } = cast.backend_repr
229+
&& !cast.is_zst()
230+
{
231+
span_bug!(self.mir.span, "Use `codegen_transmute` to transmute to {cast:?}");
232+
}
233+
234+
// `Layout` is interned, so we can do a cheap check for things that are
235+
// exactly the same and thus don't need any handling.
236+
if abi::Layout::eq(&operand.layout.layout, &cast.layout) {
237+
return operand.val;
238+
}
239+
232240
// Check for transmutes that are always UB.
233241
if operand.layout.size != cast.size
234242
|| operand.layout.is_uninhabited()
@@ -241,11 +249,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
241249
return OperandValue::poison(bx, cast);
242250
}
243251

252+
// To or from pointers takes different methods, so we use this to restrict
253+
// the SimdVector case to types which can be `bitcast` between each other.
254+
#[inline]
255+
fn vector_can_bitcast(x: abi::Scalar) -> bool {
256+
matches!(
257+
x,
258+
abi::Scalar::Initialized {
259+
value: abi::Primitive::Int(..) | abi::Primitive::Float(..),
260+
..
261+
}
262+
)
263+
}
264+
265+
let cx = bx.cx();
244266
match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
245267
_ if cast.is_zst() => OperandValue::ZeroSized,
246-
(_, _, abi::BackendRepr::Memory { .. }) => {
247-
bug!("Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}");
248-
}
249268
(OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
250269
assert_eq!(source_place_val.llextra, None);
251270
// The existing alignment is part of `source_place_val`,
@@ -256,16 +275,46 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
256275
OperandValue::Immediate(imm),
257276
abi::BackendRepr::Scalar(from_scalar),
258277
abi::BackendRepr::Scalar(to_scalar),
259-
) => OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar)),
278+
) if from_scalar.size(cx) == to_scalar.size(cx) => {
279+
OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar))
280+
}
281+
(
282+
OperandValue::Immediate(imm),
283+
abi::BackendRepr::SimdVector { element: from_scalar, .. },
284+
abi::BackendRepr::SimdVector { element: to_scalar, .. },
285+
) if vector_can_bitcast(from_scalar) && vector_can_bitcast(to_scalar) => {
286+
let to_backend_ty = bx.cx().immediate_backend_type(cast);
287+
OperandValue::Immediate(bx.bitcast(imm, to_backend_ty))
288+
}
260289
(
261290
OperandValue::Pair(imm_a, imm_b),
262291
abi::BackendRepr::ScalarPair(in_a, in_b),
263292
abi::BackendRepr::ScalarPair(out_a, out_b),
264-
) => OperandValue::Pair(
265-
transmute_scalar(bx, imm_a, in_a, out_a),
266-
transmute_scalar(bx, imm_b, in_b, out_b),
267-
),
268-
_ => bug!("Cannot `codegen_transmute_operand` {operand:?} to {cast:?}"),
293+
) if in_a.size(cx) == out_a.size(cx) && in_b.size(cx) == out_b.size(cx) => {
294+
OperandValue::Pair(
295+
transmute_scalar(bx, imm_a, in_a, out_a),
296+
transmute_scalar(bx, imm_b, in_b, out_b),
297+
)
298+
}
299+
_ => {
300+
// For any other potentially-tricky cases, make a temporary instead.
301+
// If anything else wants the target local to be in memory this won't
302+
// be hit, as `codegen_transmute` will get called directly. Thus this
303+
// is only for places where everything else wants the operand form,
304+
// and thus it's not worth making those places get it from memory.
305+
//
306+
// Notably, Scalar ⇌ ScalarPair cases go here to avoid padding
307+
// and endianness issues, as do SimdVector ones to avoid worrying
308+
// about things like f32x8 ⇌ ptrx4 that would need multiple steps.
309+
let align = Ord::max(operand.layout.align.abi, cast.align.abi);
310+
let size = Ord::max(operand.layout.size, cast.size);
311+
let temp = PlaceValue::alloca(bx, size, align);
312+
bx.lifetime_start(temp.llval, size);
313+
operand.val.store(bx, temp.with_type(operand.layout));
314+
let val = bx.load_operand(temp.with_type(cast)).val;
315+
bx.lifetime_end(temp.llval, size);
316+
val
317+
}
269318
}
270319
}
271320

@@ -326,8 +375,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
326375
bx: &mut Bx,
327376
rvalue: &mir::Rvalue<'tcx>,
328377
) -> OperandRef<'tcx, Bx::Value> {
329-
assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {rvalue:?} to operand",);
330-
331378
match *rvalue {
332379
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
333380
let operand = self.codegen_operand(bx, source);
@@ -653,8 +700,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
653700
let ty = self.monomorphize(ty);
654701
let layout = self.cx.layout_of(ty);
655702

656-
// `rvalue_creates_operand` has arranged that we only get here if
657-
// we can build the aggregate immediate from the field immediates.
658703
let mut builder = OperandRefBuilder::new(layout);
659704
for (field_idx, field) in fields.iter_enumerated() {
660705
let op = self.codegen_operand(bx, field);
@@ -955,69 +1000,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
9551000

9561001
OperandValue::Pair(val, of)
9571002
}
958-
959-
/// Returns `true` if the `rvalue` can be computed into an [`OperandRef`],
960-
/// rather than needing a full `PlaceRef` for the assignment destination.
961-
///
962-
/// This is used by the [`super::analyze`] code to decide which MIR locals
963-
/// can stay as SSA values (as opposed to generating `alloca` slots for them).
964-
/// As such, some paths here return `true` even where the specific rvalue
965-
/// will not actually take the operand path because the result type is such
966-
/// that it always gets an `alloca`, but where it's not worth re-checking the
967-
/// layout in this code when the right thing will happen anyway.
968-
pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
969-
match *rvalue {
970-
mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
971-
let operand_ty = operand.ty(self.mir, self.cx.tcx());
972-
let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
973-
let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
974-
match (operand_layout.backend_repr, cast_layout.backend_repr) {
975-
// When the output will be in memory anyway, just use its place
976-
// (instead of the operand path) unless it's the trivial ZST case.
977-
(_, abi::BackendRepr::Memory { .. }) => cast_layout.is_zst(),
978-
979-
// Otherwise (for a non-memory output) if the input is memory
980-
// then we can just read the value from the place.
981-
(abi::BackendRepr::Memory { .. }, _) => true,
982-
983-
// When we have scalar immediates, we can only convert things
984-
// where the sizes match, to avoid endianness questions.
985-
(abi::BackendRepr::Scalar(a), abi::BackendRepr::Scalar(b)) =>
986-
a.size(self.cx) == b.size(self.cx),
987-
(abi::BackendRepr::ScalarPair(a0, a1), abi::BackendRepr::ScalarPair(b0, b1)) =>
988-
a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
989-
990-
// Mixing Scalars and ScalarPairs can get quite complicated when
991-
// padding and undef get involved, so leave that to the memory path.
992-
(abi::BackendRepr::Scalar(_), abi::BackendRepr::ScalarPair(_, _)) |
993-
(abi::BackendRepr::ScalarPair(_, _), abi::BackendRepr::Scalar(_)) => false,
994-
995-
// SIMD vectors aren't worth the trouble of dealing with complex
996-
// cases like from vectors of f32 to vectors of pointers or
997-
// from fat pointers to vectors of u16. (See #143194 #110021 ...)
998-
(abi::BackendRepr::SimdVector { .. }, _) | (_, abi::BackendRepr::SimdVector { .. }) => false,
999-
}
1000-
}
1001-
mir::Rvalue::Ref(..) |
1002-
mir::Rvalue::CopyForDeref(..) |
1003-
mir::Rvalue::RawPtr(..) |
1004-
mir::Rvalue::Len(..) |
1005-
mir::Rvalue::Cast(..) | // (*)
1006-
mir::Rvalue::ShallowInitBox(..) | // (*)
1007-
mir::Rvalue::BinaryOp(..) |
1008-
mir::Rvalue::UnaryOp(..) |
1009-
mir::Rvalue::Discriminant(..) |
1010-
mir::Rvalue::NullaryOp(..) |
1011-
mir::Rvalue::ThreadLocalRef(_) |
1012-
mir::Rvalue::Use(..) |
1013-
mir::Rvalue::Repeat(..) | // (*)
1014-
mir::Rvalue::Aggregate(..) | // (*)
1015-
mir::Rvalue::WrapUnsafeBinder(..) => // (*)
1016-
true,
1017-
}
1018-
1019-
// (*) this is only true if the type is suitable
1020-
}
10211003
}
10221004

10231005
/// Transmutes a single scalar value `imm` from `from_scalar` to `to_scalar`.

0 commit comments

Comments
 (0)