Skip to content

Commit e151964

Browse files
committed
Start moving away from the intrinsic_match macro
It isn't clear most people other than me and it blocks formatting by rustfmt.
1 parent 3deceed commit e151964

File tree

3 files changed

+346
-204
lines changed

3 files changed

+346
-204
lines changed

Diff for: src/intrinsics/llvm.rs

+83-54
Original file line numberDiff line numberDiff line change
@@ -13,23 +13,20 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
1313
ret: CPlace<'tcx>,
1414
target: Option<BasicBlock>,
1515
) {
16-
intrinsic_match! {
17-
fx, intrinsic, args,
18-
_ => {
19-
fx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
20-
crate::trap::trap_unimplemented(fx, intrinsic);
21-
};
22-
16+
match intrinsic {
2317
// Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
24-
"llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd", (c a) {
18+
"llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => {
19+
intrinsic_args!(fx, args => (a); intrinsic);
20+
2521
let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
2622
let lane_ty = fx.clif_type(lane_ty).unwrap();
2723
assert!(lane_count <= 32);
2824

2925
let mut res = fx.bcx.ins().iconst(types::I32, 0);
3026

3127
for lane in (0..lane_count).rev() {
32-
let a_lane = a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
28+
let a_lane =
29+
a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
3330

3431
// cast float to int
3532
let a_lane = match lane_ty {
@@ -49,18 +46,33 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
4946

5047
let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
5148
ret.write_cvalue(fx, res);
52-
};
53-
"llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd", (c x, c y, o kind) {
54-
let kind = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const");
55-
let flt_cc = match kind.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind)) {
49+
}
50+
"llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
51+
let (x, y, kind) = match args {
52+
[x, y, kind] => (x, y, kind),
53+
_ => bug!("wrong number of args for intrinsic {intrinsic}"),
54+
};
55+
let x = codegen_operand(fx, x);
56+
let y = codegen_operand(fx, y);
57+
let kind = crate::constant::mir_operand_get_const_val(fx, kind)
58+
.expect("llvm.x86.sse2.cmp.* kind not const");
59+
60+
let flt_cc = match kind
61+
.try_to_bits(Size::from_bytes(1))
62+
.unwrap_or_else(|| panic!("kind not scalar: {:?}", kind))
63+
{
5664
0 => FloatCC::Equal,
5765
1 => FloatCC::LessThan,
5866
2 => FloatCC::LessThanOrEqual,
5967
7 => {
60-
unimplemented!("Compares corresponding elements in `a` and `b` to see if neither is `NaN`.");
68+
unimplemented!(
69+
"Compares corresponding elements in `a` and `b` to see if neither is `NaN`."
70+
);
6171
}
6272
3 => {
63-
unimplemented!("Compares corresponding elements in `a` and `b` to see if either is `NaN`.");
73+
unimplemented!(
74+
"Compares corresponding elements in `a` and `b` to see if either is `NaN`."
75+
);
6476
}
6577
4 => FloatCC::NotEqual,
6678
5 => {
@@ -79,50 +91,67 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
7991
};
8092
bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
8193
});
82-
};
83-
"llvm.x86.sse2.psrli.d", (c a, o imm8) {
84-
let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
85-
simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
86-
match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
87-
imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
88-
_ => fx.bcx.ins().iconst(types::I32, 0),
89-
}
94+
}
95+
"llvm.x86.sse2.psrli.d" => {
96+
let (a, imm8) = match args {
97+
[a, imm8] => (a, imm8),
98+
_ => bug!("wrong number of args for intrinsic {intrinsic}"),
99+
};
100+
let a = codegen_operand(fx, a);
101+
let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
102+
.expect("llvm.x86.sse2.psrli.d imm8 not const");
103+
104+
simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
105+
.try_to_bits(Size::from_bytes(4))
106+
.unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
107+
{
108+
imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
109+
_ => fx.bcx.ins().iconst(types::I32, 0),
90110
});
91-
};
92-
"llvm.x86.sse2.pslli.d", (c a, o imm8) {
93-
let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
94-
simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
95-
match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
96-
imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
97-
_ => fx.bcx.ins().iconst(types::I32, 0),
98-
}
111+
}
112+
"llvm.x86.sse2.pslli.d" => {
113+
let (a, imm8) = match args {
114+
[a, imm8] => (a, imm8),
115+
_ => bug!("wrong number of args for intrinsic {intrinsic}"),
116+
};
117+
let a = codegen_operand(fx, a);
118+
let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
119+
.expect("llvm.x86.sse2.psrli.d imm8 not const");
120+
121+
simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
122+
.try_to_bits(Size::from_bytes(4))
123+
.unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
124+
{
125+
imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
126+
_ => fx.bcx.ins().iconst(types::I32, 0),
99127
});
100-
};
101-
"llvm.x86.sse2.storeu.dq", (v mem_addr, c a) {
128+
}
129+
"llvm.x86.sse2.storeu.dq" => {
130+
intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
131+
let mem_addr = mem_addr.load_scalar(fx);
132+
102133
// FIXME correctly handle the unalignment
103134
let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
104135
dest.write_cvalue(fx, a);
105-
};
106-
"llvm.x86.addcarry.64", (v c_in, c a, c b) {
107-
llvm_add_sub(
108-
fx,
109-
BinOp::Add,
110-
ret,
111-
c_in,
112-
a,
113-
b
114-
);
115-
};
116-
"llvm.x86.subborrow.64", (v b_in, c a, c b) {
117-
llvm_add_sub(
118-
fx,
119-
BinOp::Sub,
120-
ret,
121-
b_in,
122-
a,
123-
b
124-
);
125-
};
136+
}
137+
"llvm.x86.addcarry.64" => {
138+
intrinsic_args!(fx, args => (c_in, a, b); intrinsic);
139+
let c_in = c_in.load_scalar(fx);
140+
141+
llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b);
142+
}
143+
"llvm.x86.subborrow.64" => {
144+
intrinsic_args!(fx, args => (b_in, a, b); intrinsic);
145+
let b_in = b_in.load_scalar(fx);
146+
147+
llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b);
148+
}
149+
_ => {
150+
fx.tcx
151+
.sess
152+
.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
153+
crate::trap::trap_unimplemented(fx, intrinsic);
154+
}
126155
}
127156

128157
let dest = target.expect("all llvm intrinsics used by stdlib should return");

Diff for: src/intrinsics/mod.rs

+11-3
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,6 @@ macro_rules! intrinsic_pat {
1111
(kw.$name:ident) => {
1212
kw::$name
1313
};
14-
($name:literal) => {
15-
$name
16-
};
1714
}
1815

1916
macro_rules! intrinsic_arg {
@@ -26,6 +23,17 @@ macro_rules! intrinsic_arg {
2623
};
2724
}
2825

26+
macro_rules! intrinsic_args {
27+
($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
28+
#[allow(unused_parens)]
29+
let ($($arg),*) = if let [$($arg),*] = $args {
30+
($(codegen_operand($fx, $arg)),*)
31+
} else {
32+
bug!("wrong number of args for intrinsic {}", $intrinsic);
33+
};
34+
}
35+
}
36+
2937
macro_rules! intrinsic_match {
3038
($fx:expr, $intrinsic:expr, $args:expr,
3139
_ => $unknown:block;

0 commit comments

Comments
 (0)