@@ -2,7 +2,7 @@ use rustc_apfloat::{Float, FloatConvert};
2
2
use rustc_middle:: mir;
3
3
use rustc_middle:: mir:: interpret:: { InterpResult , Scalar } ;
4
4
use rustc_middle:: ty:: layout:: { LayoutOf , TyAndLayout } ;
5
- use rustc_middle:: ty:: { self , FloatTy , Ty } ;
5
+ use rustc_middle:: ty:: { self , FloatTy , ScalarInt , Ty } ;
6
6
use rustc_span:: symbol:: sym;
7
7
use rustc_target:: abi:: Abi ;
8
8
@@ -146,14 +146,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
146
146
fn binary_int_op (
147
147
& self ,
148
148
bin_op : mir:: BinOp ,
149
- // passing in raw bits
150
- l : u128 ,
151
- left_layout : TyAndLayout < ' tcx > ,
152
- r : u128 ,
153
- right_layout : TyAndLayout < ' tcx > ,
149
+ left : & ImmTy < ' tcx , M :: Provenance > ,
150
+ right : & ImmTy < ' tcx , M :: Provenance > ,
154
151
) -> InterpResult < ' tcx , ( ImmTy < ' tcx , M :: Provenance > , bool ) > {
155
152
use rustc_middle:: mir:: BinOp :: * ;
156
153
154
+ // This checks the size, so that we can just assert it below.
155
+ let l = left. to_scalar_int ( ) ?;
156
+ let r = right. to_scalar_int ( ) ?;
157
+ // Prepare to convert the values to signed or unsigned form.
158
+ let l_signed = || l. assert_int ( left. layout . size ) ;
159
+ let l_unsigned = || l. assert_uint ( left. layout . size ) ;
160
+ let r_signed = || r. assert_int ( right. layout . size ) ;
161
+ let r_unsigned = || r. assert_uint ( right. layout . size ) ;
162
+
157
163
let throw_ub_on_overflow = match bin_op {
158
164
AddUnchecked => Some ( sym:: unchecked_add) ,
159
165
SubUnchecked => Some ( sym:: unchecked_sub) ,
@@ -165,69 +171,72 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
165
171
166
172
// Shift ops can have an RHS with a different numeric type.
167
173
if matches ! ( bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked ) {
168
- let size = left_layout . size . bits ( ) ;
174
+ let size = left . layout . size . bits ( ) ;
169
175
// The shift offset is implicitly masked to the type size. (This is the one MIR operator
170
176
// that does *not* directly map to a single LLVM operation.) Compute how much we
171
177
// actually shift and whether there was an overflow due to shifting too much.
172
- let ( shift_amount, overflow) = if right_layout . abi . is_signed ( ) {
173
- let shift_amount = self . sign_extend ( r , right_layout ) as i128 ;
178
+ let ( shift_amount, overflow) = if right . layout . abi . is_signed ( ) {
179
+ let shift_amount = r_signed ( ) ;
174
180
let overflow = shift_amount < 0 || shift_amount >= i128:: from ( size) ;
181
+ // Deliberately wrapping `as` casts: shift_amount *can* be negative, but the result
182
+ // of the `as` will be equal modulo `size` (since it is a power of two).
175
183
let masked_amount = ( shift_amount as u128 ) % u128:: from ( size) ;
176
- debug_assert_eq ! ( overflow, shift_amount != ( masked_amount as i128 ) ) ;
184
+ assert_eq ! ( overflow, shift_amount != ( masked_amount as i128 ) ) ;
177
185
( masked_amount, overflow)
178
186
} else {
179
- let shift_amount = r ;
187
+ let shift_amount = r_unsigned ( ) ;
180
188
let masked_amount = shift_amount % u128:: from ( size) ;
181
189
( masked_amount, shift_amount != masked_amount)
182
190
} ;
183
191
let shift_amount = u32:: try_from ( shift_amount) . unwrap ( ) ; // we masked so this will always fit
184
192
// Compute the shifted result.
185
- let result = if left_layout . abi . is_signed ( ) {
186
- let l = self . sign_extend ( l , left_layout ) as i128 ;
193
+ let result = if left . layout . abi . is_signed ( ) {
194
+ let l = l_signed ( ) ;
187
195
let result = match bin_op {
188
196
Shl | ShlUnchecked => l. checked_shl ( shift_amount) . unwrap ( ) ,
189
197
Shr | ShrUnchecked => l. checked_shr ( shift_amount) . unwrap ( ) ,
190
198
_ => bug ! ( ) ,
191
199
} ;
192
- result as u128
200
+ ScalarInt :: truncate_from_int ( result, left . layout . size ) . 0
193
201
} else {
194
- match bin_op {
202
+ let l = l_unsigned ( ) ;
203
+ let result = match bin_op {
195
204
Shl | ShlUnchecked => l. checked_shl ( shift_amount) . unwrap ( ) ,
196
205
Shr | ShrUnchecked => l. checked_shr ( shift_amount) . unwrap ( ) ,
197
206
_ => bug ! ( ) ,
198
- }
207
+ } ;
208
+ ScalarInt :: truncate_from_uint ( result, left. layout . size ) . 0
199
209
} ;
200
- let truncated = self . truncate ( result, left_layout) ;
201
210
202
211
if overflow && let Some ( intrinsic_name) = throw_ub_on_overflow {
203
212
throw_ub_custom ! (
204
213
fluent:: const_eval_overflow_shift,
205
- val = if right_layout . abi. is_signed( ) {
206
- ( self . sign_extend ( r , right_layout ) as i128 ) . to_string( )
214
+ val = if right . layout . abi. is_signed( ) {
215
+ r_signed ( ) . to_string( )
207
216
} else {
208
- r . to_string( )
217
+ r_unsigned ( ) . to_string( )
209
218
} ,
210
219
name = intrinsic_name
211
220
) ;
212
221
}
213
222
214
- return Ok ( ( ImmTy :: from_uint ( truncated , left_layout ) , overflow) ) ;
223
+ return Ok ( ( ImmTy :: from_scalar_int ( result , left . layout ) , overflow) ) ;
215
224
}
216
225
217
226
// For the remaining ops, the types must be the same on both sides
218
- if left_layout . ty != right_layout . ty {
227
+ if left . layout . ty != right . layout . ty {
219
228
span_bug ! (
220
229
self . cur_span( ) ,
221
230
"invalid asymmetric binary op {bin_op:?}: {l:?} ({l_ty}), {r:?} ({r_ty})" ,
222
- l_ty = left_layout . ty,
223
- r_ty = right_layout . ty,
231
+ l_ty = left . layout . ty,
232
+ r_ty = right . layout . ty,
224
233
)
225
234
}
226
235
227
- let size = left_layout . size ;
236
+ let size = left . layout . size ;
228
237
229
238
// Operations that need special treatment for signed integers
230
- if left_layout . abi . is_signed ( ) {
239
+ if left . layout . abi . is_signed ( ) {
231
240
let op: Option < fn ( & i128 , & i128 ) -> bool > = match bin_op {
232
241
Lt => Some ( i128:: lt) ,
233
242
Le => Some ( i128:: le) ,
@@ -236,18 +245,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
236
245
_ => None ,
237
246
} ;
238
247
if let Some ( op) = op {
239
- let l = self . sign_extend ( l, left_layout) as i128 ;
240
- let r = self . sign_extend ( r, right_layout) as i128 ;
241
- return Ok ( ( ImmTy :: from_bool ( op ( & l, & r) , * self . tcx ) , false ) ) ;
248
+ return Ok ( ( ImmTy :: from_bool ( op ( & l_signed ( ) , & r_signed ( ) ) , * self . tcx ) , false ) ) ;
242
249
}
243
250
if bin_op == Cmp {
244
- let l = self . sign_extend ( l, left_layout) as i128 ;
245
- let r = self . sign_extend ( r, right_layout) as i128 ;
246
- return Ok ( self . three_way_compare ( l, r) ) ;
251
+ return Ok ( self . three_way_compare ( l_signed ( ) , r_signed ( ) ) ) ;
247
252
}
248
253
let op: Option < fn ( i128 , i128 ) -> ( i128 , bool ) > = match bin_op {
249
- Div if r == 0 => throw_ub ! ( DivisionByZero ) ,
250
- Rem if r == 0 => throw_ub ! ( RemainderByZero ) ,
254
+ Div if r. is_null ( ) => throw_ub ! ( DivisionByZero ) ,
255
+ Rem if r. is_null ( ) => throw_ub ! ( RemainderByZero ) ,
251
256
Div => Some ( i128:: overflowing_div) ,
252
257
Rem => Some ( i128:: overflowing_rem) ,
253
258
Add | AddUnchecked => Some ( i128:: overflowing_add) ,
@@ -256,8 +261,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
256
261
_ => None ,
257
262
} ;
258
263
if let Some ( op) = op {
259
- let l = self . sign_extend ( l , left_layout ) as i128 ;
260
- let r = self . sign_extend ( r , right_layout ) as i128 ;
264
+ let l = l_signed ( ) ;
265
+ let r = r_signed ( ) ;
261
266
262
267
// We need a special check for overflowing Rem and Div since they are *UB*
263
268
// on overflow, which can happen with "int_min $OP -1".
@@ -272,17 +277,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
272
277
}
273
278
274
279
let ( result, oflo) = op ( l, r) ;
275
- // This may be out-of-bounds for the result type, so we have to truncate ourselves .
280
+ // This may be out-of-bounds for the result type, so we have to truncate.
276
281
// If that truncation loses any information, we have an overflow.
277
- let result = result as u128 ;
278
- let truncated = self . truncate ( result, left_layout) ;
279
- let overflow = oflo || self . sign_extend ( truncated, left_layout) != result;
282
+ let ( result, lossy) = ScalarInt :: truncate_from_int ( result, left. layout . size ) ;
283
+ let overflow = oflo || lossy;
280
284
if overflow && let Some ( intrinsic_name) = throw_ub_on_overflow {
281
285
throw_ub_custom ! ( fluent:: const_eval_overflow, name = intrinsic_name) ;
282
286
}
283
- return Ok ( ( ImmTy :: from_uint ( truncated , left_layout ) , overflow) ) ;
287
+ return Ok ( ( ImmTy :: from_scalar_int ( result , left . layout ) , overflow) ) ;
284
288
}
285
289
}
290
+ // From here on it's okay to treat everything as unsigned.
291
+ let l = l_unsigned ( ) ;
292
+ let r = r_unsigned ( ) ;
286
293
287
294
if bin_op == Cmp {
288
295
return Ok ( self . three_way_compare ( l, r) ) ;
@@ -297,12 +304,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
297
304
Gt => ImmTy :: from_bool ( l > r, * self . tcx ) ,
298
305
Ge => ImmTy :: from_bool ( l >= r, * self . tcx ) ,
299
306
300
- BitOr => ImmTy :: from_uint ( l | r, left_layout ) ,
301
- BitAnd => ImmTy :: from_uint ( l & r, left_layout ) ,
302
- BitXor => ImmTy :: from_uint ( l ^ r, left_layout ) ,
307
+ BitOr => ImmTy :: from_uint ( l | r, left . layout ) ,
308
+ BitAnd => ImmTy :: from_uint ( l & r, left . layout ) ,
309
+ BitXor => ImmTy :: from_uint ( l ^ r, left . layout ) ,
303
310
304
311
Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => {
305
- assert ! ( !left_layout . abi. is_signed( ) ) ;
312
+ assert ! ( !left . layout . abi. is_signed( ) ) ;
306
313
let op: fn ( u128 , u128 ) -> ( u128 , bool ) = match bin_op {
307
314
Add | AddUnchecked => u128:: overflowing_add,
308
315
Sub | SubUnchecked => u128:: overflowing_sub,
@@ -316,21 +323,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
316
323
let ( result, oflo) = op ( l, r) ;
317
324
// Truncate to target type.
318
325
// If that truncation loses any information, we have an overflow.
319
- let truncated = self . truncate ( result, left_layout ) ;
320
- let overflow = oflo || truncated != result ;
326
+ let ( result , lossy ) = ScalarInt :: truncate_from_uint ( result, left . layout . size ) ;
327
+ let overflow = oflo || lossy ;
321
328
if overflow && let Some ( intrinsic_name) = throw_ub_on_overflow {
322
329
throw_ub_custom ! ( fluent:: const_eval_overflow, name = intrinsic_name) ;
323
330
}
324
- return Ok ( ( ImmTy :: from_uint ( truncated , left_layout ) , overflow) ) ;
331
+ return Ok ( ( ImmTy :: from_scalar_int ( result , left . layout ) , overflow) ) ;
325
332
}
326
333
327
334
_ => span_bug ! (
328
335
self . cur_span( ) ,
329
336
"invalid binary op {:?}: {:?}, {:?} (both {})" ,
330
337
bin_op,
331
- l ,
332
- r ,
333
- right_layout . ty,
338
+ left ,
339
+ right ,
340
+ right . layout . ty,
334
341
) ,
335
342
} ;
336
343
@@ -427,9 +434,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
427
434
right. layout. ty
428
435
) ;
429
436
430
- let l = left. to_scalar ( ) . to_bits ( left. layout . size ) ?;
431
- let r = right. to_scalar ( ) . to_bits ( right. layout . size ) ?;
432
- self . binary_int_op ( bin_op, l, left. layout , r, right. layout )
437
+ self . binary_int_op ( bin_op, left, right)
433
438
}
434
439
_ if left. layout . ty . is_any_ptr ( ) => {
435
440
// The RHS type must be a `pointer` *or an integer type* (for `Offset`).
0 commit comments