Skip to content

Commit e685530

Browse files
committed
deduplicate some copy_op code
1 parent 924a4cd commit e685530

File tree

6 files changed

+74
-87
lines changed

6 files changed

+74
-87
lines changed

Diff for: compiler/rustc_const_eval/src/interpret/cast.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -359,7 +359,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
359359
let src_field = self.operand_field(src, i)?;
360360
let dst_field = self.place_field(dest, i)?;
361361
if src_field.layout.ty == cast_ty_field.ty {
362-
self.copy_op(&src_field, &dst_field)?;
362+
self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
363363
} else {
364364
self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
365365
}

Diff for: compiler/rustc_const_eval/src/interpret/eval_context.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -800,7 +800,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
800800
.local_to_op(self.frame(), mir::RETURN_PLACE, None)
801801
.expect("return place should always be live");
802802
let dest = self.frame().return_place;
803-
let err = self.copy_op_transmute(&op, &dest);
803+
let err = self.copy_op(&op, &dest, /*allow_transmute*/ true);
804804
trace!("return value: {:?}", self.dump_place(*dest));
805805
// We delay actually short-circuiting on this error until *after* the stack frame is
806806
// popped, since we want this error to be attributed to the caller, whose type defines

Diff for: compiler/rustc_const_eval/src/interpret/intrinsics.rs

+9-5
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
174174
let val =
175175
self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
176176
let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
177-
self.copy_op(&val, dest)?;
177+
self.copy_op(&val, dest, /*allow_transmute*/ false)?;
178178
}
179179

180180
sym::ctpop
@@ -394,7 +394,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
394394
}
395395

396396
sym::transmute => {
397-
self.copy_op_transmute(&args[0], dest)?;
397+
self.copy_op(&args[0], dest, /*allow_transmute*/ true)?;
398398
}
399399
sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
400400
let ty = instance.substs.type_at(0);
@@ -461,7 +461,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
461461
let place = self.mplace_index(&dest, i)?;
462462
let value =
463463
if i == index { *elem } else { self.mplace_index(&input, i)?.into() };
464-
self.copy_op(&value, &place.into())?;
464+
self.copy_op(&value, &place.into(), /*allow_transmute*/ false)?;
465465
}
466466
}
467467
sym::simd_extract => {
@@ -473,11 +473,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
473473
index,
474474
input_len
475475
);
476-
self.copy_op(&self.mplace_index(&input, index)?.into(), dest)?;
476+
self.copy_op(
477+
&self.mplace_index(&input, index)?.into(),
478+
dest,
479+
/*allow_transmute*/ false,
480+
)?;
477481
}
478482
sym::likely | sym::unlikely | sym::black_box => {
479483
// These just return their argument
480-
self.copy_op(&args[0], dest)?;
484+
self.copy_op(&args[0], dest, /*allow_transmute*/ false)?;
481485
}
482486
sym::assume => {
483487
let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;

Diff for: compiler/rustc_const_eval/src/interpret/place.rs

+59-76
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,9 @@ use rustc_macros::HashStable;
1010
use rustc_middle::mir;
1111
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
1212
use rustc_middle::ty::{self, Ty};
13-
use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding};
14-
use rustc_target::abi::{HasDataLayout, Size, VariantIdx, Variants};
13+
use rustc_target::abi::{
14+
Abi, Align, FieldsShape, HasDataLayout, Size, TagEncoding, VariantIdx, Variants,
15+
};
1516

1617
use super::{
1718
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
@@ -772,51 +773,51 @@ where
772773
}
773774
Place::Ptr(mplace) => mplace, // already referring to memory
774775
};
775-
let dest = MPlaceTy { mplace, layout: dest.layout, align: dest.align };
776776

777777
// This is already in memory, write there.
778-
self.write_immediate_to_mplace_no_validate(src, &dest)
778+
self.write_immediate_to_mplace_no_validate(src, dest.layout, dest.align, mplace)
779779
}
780780

781781
/// Write an immediate to memory.
782782
/// If you use this you are responsible for validating that things got copied at the
783-
/// right type.
783+
/// right layout.
784784
fn write_immediate_to_mplace_no_validate(
785785
&mut self,
786786
value: Immediate<M::PointerTag>,
787-
dest: &MPlaceTy<'tcx, M::PointerTag>,
787+
layout: TyAndLayout<'tcx>,
788+
align: Align,
789+
dest: MemPlace<M::PointerTag>,
788790
) -> InterpResult<'tcx> {
789791
// Note that it is really important that the type here is the right one, and matches the
790792
// type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
791793
// to handle padding properly, which is only correct if we never look at this data with the
792794
// wrong type.
793795

794796
let tcx = *self.tcx;
795-
let Some(mut alloc) = self.get_place_alloc_mut(dest)? else {
797+
let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout, align })? else {
796798
// zero-sized access
797799
return Ok(());
798800
};
799801

800802
match value {
801803
Immediate::Scalar(scalar) => {
802-
let Abi::Scalar(s) = dest.layout.abi else { span_bug!(
804+
let Abi::Scalar(s) = layout.abi else { span_bug!(
803805
self.cur_span(),
804-
"write_immediate_to_mplace: invalid Scalar layout: {:#?}",
805-
dest.layout
806+
"write_immediate_to_mplace: invalid Scalar layout: {layout:#?}",
806807
)
807808
};
808809
let size = s.size(&tcx);
809-
assert_eq!(size, dest.layout.size, "abi::Scalar size does not match layout size");
810+
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
810811
alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
811812
}
812813
Immediate::ScalarPair(a_val, b_val) => {
813814
// We checked `ptr_align` above, so all fields will have the alignment they need.
814815
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
815816
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
816-
let Abi::ScalarPair(a, b) = dest.layout.abi else { span_bug!(
817+
let Abi::ScalarPair(a, b) = layout.abi else { span_bug!(
817818
self.cur_span(),
818819
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
819-
dest.layout
820+
layout
820821
)
821822
};
822823
let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
@@ -858,16 +859,17 @@ where
858859
Ok(())
859860
}
860861

861-
/// Copies the data from an operand to a place. This does not support transmuting!
862-
/// Use `copy_op_transmute` if the layouts could disagree.
862+
/// Copies the data from an operand to a place.
863+
/// `allow_transmute` indicates whether the layouts may disagree.
863864
#[inline(always)]
864865
#[instrument(skip(self), level = "debug")]
865866
pub fn copy_op(
866867
&mut self,
867868
src: &OpTy<'tcx, M::PointerTag>,
868869
dest: &PlaceTy<'tcx, M::PointerTag>,
870+
allow_transmute: bool,
869871
) -> InterpResult<'tcx> {
870-
self.copy_op_no_validate(src, dest)?;
872+
self.copy_op_no_validate(src, dest, allow_transmute)?;
871873

872874
if M::enforce_validity(self) {
873875
// Data got changed, better make sure it matches the type!
@@ -877,19 +879,22 @@ where
877879
Ok(())
878880
}
879881

880-
/// Copies the data from an operand to a place. This does not support transmuting!
881-
/// Use `copy_op_transmute` if the layouts could disagree.
882+
/// Copies the data from an operand to a place.
883+
/// `allow_transmute` indicates whether the layouts may disagree.
882884
/// Also, if you use this you are responsible for validating that things get copied at the
883885
/// right type.
884886
#[instrument(skip(self), level = "debug")]
885887
fn copy_op_no_validate(
886888
&mut self,
887889
src: &OpTy<'tcx, M::PointerTag>,
888890
dest: &PlaceTy<'tcx, M::PointerTag>,
891+
allow_transmute: bool,
889892
) -> InterpResult<'tcx> {
890893
// We do NOT compare the types for equality, because well-typed code can
891894
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
892-
if !mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
895+
let layout_compat =
896+
mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout);
897+
if !allow_transmute && !layout_compat {
893898
span_bug!(
894899
self.cur_span(),
895900
"type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
@@ -898,82 +903,55 @@ where
898903
);
899904
}
900905

901-
// Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
906+
// Let us see if the layout is simple so we take a shortcut,
907+
// avoid force_allocation.
902908
let src = match self.read_immediate_raw(src, /*force*/ false)? {
903909
Ok(src_val) => {
904910
assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
911+
assert!(
912+
!dest.layout.is_unsized(),
913+
"the src is sized, so the dest must also be sized"
914+
);
915+
assert_eq!(src.layout.size, dest.layout.size);
905916
// Yay, we got a value that we can write directly.
906-
return self.write_immediate_no_validate(*src_val, dest);
917+
return if layout_compat {
918+
self.write_immediate_no_validate(*src_val, dest)
919+
} else {
920+
// This is tricky. The problematic case is `ScalarPair`: the `src_val` was
921+
// loaded using the offsets defined by `src.layout`. When we put this back into
922+
// the destination, we have to use the same offsets! So (a) we make sure we
923+
// write back to memory, and (b) we use `dest` *with the source layout*.
924+
let dest_mem = self.force_allocation(dest)?;
925+
self.write_immediate_to_mplace_no_validate(
926+
*src_val,
927+
src.layout,
928+
dest_mem.align,
929+
*dest_mem,
930+
)
931+
};
907932
}
908933
Err(mplace) => mplace,
909934
};
910935
// Slow path, this does not fit into an immediate. Just memcpy.
911936
trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
912937

913-
let dest = self.force_allocation(dest)?;
938+
let dest = self.force_allocation(&dest)?;
914939
let Some((dest_size, _)) = self.size_and_align_of_mplace(&dest)? else {
915940
span_bug!(self.cur_span(), "copy_op needs (dynamically) sized values")
916941
};
917942
if cfg!(debug_assertions) {
918943
let src_size = self.size_and_align_of_mplace(&src)?.unwrap().0;
919944
assert_eq!(src_size, dest_size, "Cannot copy differently-sized data");
945+
} else {
946+
// As a cheap approximation, we compare the fixed parts of the size.
947+
assert_eq!(src.layout.size, dest.layout.size);
920948
}
921949

922950
self.mem_copy(
923951
src.ptr, src.align, dest.ptr, dest.align, dest_size, /*nonoverlapping*/ false,
924952
)
925953
}
926954

927-
/// Copies the data from an operand to a place. The layouts may disagree, but they must
928-
/// have the same size.
929-
pub fn copy_op_transmute(
930-
&mut self,
931-
src: &OpTy<'tcx, M::PointerTag>,
932-
dest: &PlaceTy<'tcx, M::PointerTag>,
933-
) -> InterpResult<'tcx> {
934-
if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
935-
// Fast path: Just use normal `copy_op`. This is faster because it tries
936-
// `read_immediate_raw` first before doing `force_allocation`.
937-
return self.copy_op(src, dest);
938-
}
939-
// Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want
940-
// to avoid that here.
941-
assert!(
942-
!src.layout.is_unsized() && !dest.layout.is_unsized(),
943-
"Cannot transmute unsized data"
944-
);
945-
// We still require the sizes to match.
946-
if src.layout.size != dest.layout.size {
947-
span_bug!(
948-
self.cur_span(),
949-
"size-changing transmute, should have been caught by transmute checking: {:#?}\ndest: {:#?}",
950-
src,
951-
dest
952-
);
953-
}
954-
955-
// The hard case is `ScalarPair`. `src` is already read from memory in this case,
956-
// using `src.layout` to figure out which bytes to use for the 1st and 2nd field.
957-
// We have to write them to `dest` at the offsets they were *read at*, which is
958-
// not necessarily the same as the offsets in `dest.layout`!
959-
// Hence we do the copy with the source layout on both sides. We also make sure to write
960-
// into memory, because if `dest` is a local we would not even have a way to write
961-
// at the `src` offsets; the fact that we came from a different layout would
962-
// just be lost.
963-
let dest = self.force_allocation(dest)?;
964-
self.copy_op_no_validate(
965-
src,
966-
&PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout, align: dest.align }),
967-
)?;
968-
969-
if M::enforce_validity(self) {
970-
// Data got changed, better make sure it matches the type!
971-
self.validate_operand(&dest.into())?;
972-
}
973-
974-
Ok(())
975-
}
976-
977955
/// Ensures that a place is in memory, and returns where it is.
978956
/// If the place currently refers to a local that doesn't yet have a matching allocation,
979957
/// create such an allocation.
@@ -997,18 +975,23 @@ where
997975
if local_layout.is_unsized() {
998976
throw_unsup_format!("unsized locals are not supported");
999977
}
1000-
let mplace = self.allocate(local_layout, MemoryKind::Stack)?;
978+
let mplace = *self.allocate(local_layout, MemoryKind::Stack)?;
1001979
if !matches!(local_val, Immediate::Uninit) {
1002980
// Preserve old value. (As an optimization, we can skip this if it was uninit.)
1003981
// We don't have to validate as we can assume the local
1004982
// was already valid for its type.
1005-
self.write_immediate_to_mplace_no_validate(local_val, &mplace)?;
983+
self.write_immediate_to_mplace_no_validate(
984+
local_val,
985+
local_layout,
986+
local_layout.align.abi,
987+
mplace,
988+
)?;
1006989
}
1007990
// Now we can call `access_mut` again, asserting it goes well,
1008991
// and actually overwrite things.
1009992
*M::access_local_mut(self, frame, local).unwrap() =
1010-
Operand::Indirect(*mplace);
1011-
*mplace
993+
Operand::Indirect(mplace);
994+
mplace
1012995
}
1013996
&mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
1014997
}

Diff for: compiler/rustc_const_eval/src/interpret/step.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
169169
Use(ref operand) => {
170170
// Avoid recomputing the layout
171171
let op = self.eval_operand(operand, Some(dest.layout))?;
172-
self.copy_op(&op, &dest)?;
172+
self.copy_op(&op, &dest, /*allow_transmute*/ false)?;
173173
}
174174

175175
BinaryOp(bin_op, box (ref left, ref right)) => {
@@ -204,7 +204,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
204204
for (field_index, operand) in operands.iter().enumerate() {
205205
let op = self.eval_operand(operand, None)?;
206206
let field_dest = self.place_field(&dest, field_index)?;
207-
self.copy_op(&op, &field_dest)?;
207+
self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
208208
}
209209
}
210210

@@ -220,7 +220,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
220220
} else {
221221
// Write the src to the first element.
222222
let first = self.mplace_field(&dest, 0)?;
223-
self.copy_op(&src, &first.into())?;
223+
self.copy_op(&src, &first.into(), /*allow_transmute*/ false)?;
224224

225225
// This is performance-sensitive code for big static/const arrays! So we
226226
// avoid writing each operand individually and instead just make many copies

Diff for: compiler/rustc_const_eval/src/interpret/terminator.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
321321
// FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
322322
// is true for all `copy_op`, but there are a lot of special cases for argument passing
323323
// specifically.)
324-
self.copy_op_transmute(&caller_arg, callee_arg)
324+
self.copy_op(&caller_arg, callee_arg, /*allow_transmute*/ true)
325325
}
326326

327327
/// Call this function -- pushing the stack frame and initializing the arguments.

0 commit comments

Comments
 (0)