1
1
use super :: HashMap ;
2
2
use crate :: frontend:: FunctionBuilder ;
3
3
use alloc:: vec:: Vec ;
4
+ use core:: convert:: TryFrom ;
4
5
use cranelift_codegen:: ir:: condcodes:: IntCC ;
5
6
use cranelift_codegen:: ir:: * ;
6
7
use log:: debug;
7
8
8
- type EntryIndex = u64 ;
9
+ type EntryIndex = u128 ;
10
+
11
+ /*
12
+ FIXME add test for
13
+
14
+ fn main() {
15
+ let options = [1u128];
16
+ match options[0] {
17
+ 1 => (),
18
+ 0 => loop {},
19
+ v => panic(v),
20
+ };
21
+ }
22
+
23
+ fn panic(v: u128) -> !{
24
+ panic!("{}", v)
25
+ }
26
+ */
9
27
10
28
/// Unlike with `br_table`, `Switch` cases may be sparse or non-0-based.
11
29
/// They emit efficient code using branches, jump tables, or a combination of both.
@@ -152,11 +170,20 @@ impl Switch {
152
170
let left_block = bx. create_block ( ) ;
153
171
let right_block = bx. create_block ( ) ;
154
172
155
- let should_take_right_side = bx. ins ( ) . icmp_imm (
156
- IntCC :: UnsignedGreaterThanOrEqual ,
157
- val,
158
- right[ 0 ] . first_index as i64 ,
159
- ) ;
173
+ let first_index = right[ 0 ] . first_index ;
174
+ let should_take_right_side = if let Ok ( index) = u64:: try_from ( first_index) {
175
+ bx. ins ( ) . icmp_imm (
176
+ IntCC :: UnsignedGreaterThanOrEqual ,
177
+ val,
178
+ index as i64 ,
179
+ )
180
+ } else {
181
+ let ( lsb, msb) = ( first_index as u64 , ( first_index >> 64 ) as u64 ) ;
182
+ let lsb = bx. ins ( ) . iconst ( types:: I64 , lsb as i64 ) ;
183
+ let msb = bx. ins ( ) . iconst ( types:: I64 , msb as i64 ) ;
184
+ let index = bx. ins ( ) . iconcat ( lsb, msb) ;
185
+ bx. ins ( ) . icmp ( IntCC :: UnsignedGreaterThanOrEqual , val, index)
186
+ } ;
160
187
bx. ins ( ) . brnz ( should_take_right_side, right_block, & [ ] ) ;
161
188
bx. ins ( ) . jump ( left_block, & [ ] ) ;
162
189
@@ -200,7 +227,15 @@ impl Switch {
200
227
}
201
228
( 1 , _) => {
202
229
ins_fallthrough_jump ( was_branch, bx) ;
203
- let is_good_val = bx. ins ( ) . icmp_imm ( IntCC :: Equal , val, first_index as i64 ) ;
230
+ let is_good_val = if let Ok ( first_index) = u64:: try_from ( first_index) {
231
+ bx. ins ( ) . icmp_imm ( IntCC :: Equal , val, first_index as i64 )
232
+ } else {
233
+ let ( lsb, msb) = ( first_index as u64 , ( first_index >> 64 ) as u64 ) ;
234
+ let lsb = bx. ins ( ) . iconst ( types:: I64 , lsb as i64 ) ;
235
+ let msb = bx. ins ( ) . iconst ( types:: I64 , msb as i64 ) ;
236
+ let index = bx. ins ( ) . iconcat ( lsb, msb) ;
237
+ bx. ins ( ) . icmp ( IntCC :: Equal , val, index)
238
+ } ;
204
239
bx. ins ( ) . brnz ( is_good_val, blocks[ 0 ] , & [ ] ) ;
205
240
}
206
241
( _, 0 ) => {
@@ -217,11 +252,19 @@ impl Switch {
217
252
( _, _) => {
218
253
ins_fallthrough_jump ( was_branch, bx) ;
219
254
let jt_block = bx. create_block ( ) ;
220
- let is_good_val = bx. ins ( ) . icmp_imm (
221
- IntCC :: UnsignedGreaterThanOrEqual ,
222
- val,
223
- first_index as i64 ,
224
- ) ;
255
+ let is_good_val = if let Ok ( first_index) = u64:: try_from ( first_index) {
256
+ bx. ins ( ) . icmp_imm (
257
+ IntCC :: UnsignedGreaterThanOrEqual ,
258
+ val,
259
+ first_index as i64 ,
260
+ )
261
+ } else {
262
+ let ( lsb, msb) = ( first_index as u64 , ( first_index >> 64 ) as u64 ) ;
263
+ let lsb = bx. ins ( ) . iconst ( types:: I64 , lsb as i64 ) ;
264
+ let msb = bx. ins ( ) . iconst ( types:: I64 , msb as i64 ) ;
265
+ let index = bx. ins ( ) . iconcat ( lsb, msb) ;
266
+ bx. ins ( ) . icmp ( IntCC :: UnsignedGreaterThanOrEqual , val, index)
267
+ } ;
225
268
bx. ins ( ) . brnz ( is_good_val, jt_block, & [ ] ) ;
226
269
bx. seal_block ( jt_block) ;
227
270
cases_and_jt_blocks. push ( ( first_index, jt_block, blocks) ) ;
@@ -241,6 +284,10 @@ impl Switch {
241
284
cases_and_jt_blocks : Vec < ( EntryIndex , Block , Vec < Block > ) > ,
242
285
) {
243
286
for ( first_index, jt_block, blocks) in cases_and_jt_blocks. into_iter ( ) . rev ( ) {
287
+ // There are currently no 128bit systems supported by rustc, but once we do ensure that
288
+ // we don't silently ignore a part of the jump table for 128bit integers on 128bit systems.
289
+ assert ! ( core:: mem:: size_of:: <usize >( ) <= 64 , "128bit jump tables are not yet supported" ) ;
290
+
244
291
let mut jt_data = JumpTableData :: new ( ) ;
245
292
for block in blocks {
246
293
jt_data. push_entry ( block) ;
@@ -251,8 +298,31 @@ impl Switch {
251
298
let discr = if first_index == 0 {
252
299
val
253
300
} else {
254
- bx. ins ( ) . iadd_imm ( val, ( first_index as i64 ) . wrapping_neg ( ) )
301
+ if let Ok ( first_index) = u64:: try_from ( first_index) {
302
+ bx. ins ( ) . iadd_imm ( val, ( first_index as i64 ) . wrapping_neg ( ) )
303
+ } else {
304
+ let ( lsb, msb) = ( first_index as u64 , ( first_index >> 64 ) as u64 ) ;
305
+ let lsb = bx. ins ( ) . iconst ( types:: I64 , lsb as i64 ) ;
306
+ let msb = bx. ins ( ) . iconst ( types:: I64 , msb as i64 ) ;
307
+ let index = bx. ins ( ) . iconcat ( lsb, msb) ;
308
+ bx. ins ( ) . isub ( val, index)
309
+ }
255
310
} ;
311
+
312
+ let discr = if bx. func . dfg . value_type ( discr) . bits ( ) > 64 {
313
+ // Check for overflow of cast to u64.
314
+ let new_block = bx. create_block ( ) ;
315
+ let bigger_than_u64 = bx. ins ( ) . icmp_imm ( IntCC :: UnsignedGreaterThan , discr, u64:: max_value ( ) as i64 ) ;
316
+ bx. ins ( ) . brnz ( bigger_than_u64, otherwise, & [ ] ) ;
317
+ bx. ins ( ) . jump ( new_block, & [ ] ) ;
318
+ bx. switch_to_block ( new_block) ;
319
+
320
+ // Cast to u64, as br_table is not implemented for integers bigger than 64bits.
321
+ bx. ins ( ) . ireduce ( types:: I64 , discr)
322
+ } else {
323
+ discr
324
+ } ;
325
+
256
326
bx. ins ( ) . br_table ( discr, otherwise, jump_table) ;
257
327
}
258
328
}
0 commit comments