Skip to content

Commit 6573765

Browse files
committed
Replace division implementations with code from the specialized-div-rem crate
1 parent 6de4f8f commit 6573765

File tree

8 files changed

+1160
-311
lines changed

8 files changed

+1160
-311
lines changed

src/int/mod.rs

+3-13
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,12 @@
11
use core::ops;
22

3-
macro_rules! hty {
4-
($ty:ty) => {
5-
<$ty as LargeInt>::HighHalf
6-
};
7-
}
8-
9-
macro_rules! os_ty {
10-
($ty:ty) => {
11-
<$ty as Int>::OtherSign
12-
};
13-
}
14-
153
pub mod addsub;
164
pub mod mul;
17-
pub mod sdiv;
185
pub mod shift;
6+
7+
mod specialized_div_rem;
198
pub mod udiv;
9+
pub mod sdiv;
2010

2111
/// Trait for some basic operations on integers
2212
pub trait Int:

src/int/sdiv.rs

+56-73
Original file line numberDiff line numberDiff line change
@@ -1,101 +1,84 @@
1-
use int::Int;
1+
use super::specialized_div_rem::*;
22

3-
trait Div: Int {
4-
/// Returns `a / b`
5-
fn div(self, other: Self) -> Self {
6-
let s_a = self >> (Self::BITS - 1);
7-
let s_b = other >> (Self::BITS - 1);
8-
// NOTE it's OK to overflow here because of the `.unsigned()` below.
9-
// This whole operation is computing the absolute value of the inputs
10-
// So some overflow will happen when dealing with e.g. `i64::MIN`
11-
// where the absolute value is `(-i64::MIN) as u64`
12-
let a = (self ^ s_a).wrapping_sub(s_a);
13-
let b = (other ^ s_b).wrapping_sub(s_b);
14-
let s = s_a ^ s_b;
15-
16-
let r = a.unsigned().aborting_div(b.unsigned());
17-
(Self::from_unsigned(r) ^ s) - s
18-
}
19-
}
20-
21-
impl Div for i32 {}
22-
impl Div for i64 {}
23-
impl Div for i128 {}
24-
25-
trait Mod: Int {
26-
/// Returns `a % b`
27-
fn mod_(self, other: Self) -> Self {
28-
let s = other >> (Self::BITS - 1);
29-
// NOTE(wrapping_sub) see comment in the `div`
30-
let b = (other ^ s).wrapping_sub(s);
31-
let s = self >> (Self::BITS - 1);
32-
let a = (self ^ s).wrapping_sub(s);
33-
34-
let r = a.unsigned().aborting_rem(b.unsigned());
35-
(Self::from_unsigned(r) ^ s) - s
36-
}
37-
}
38-
39-
impl Mod for i32 {}
40-
impl Mod for i64 {}
41-
impl Mod for i128 {}
42-
43-
trait Divmod: Int {
44-
/// Returns `a / b` and sets `*rem = n % d`
45-
fn divmod<F>(self, other: Self, rem: &mut Self, div: F) -> Self
46-
where
47-
F: Fn(Self, Self) -> Self,
48-
{
49-
let r = div(self, other);
50-
// NOTE won't overflow because it's using the result from the
51-
// previous division
52-
*rem = self - r.wrapping_mul(other);
53-
r
54-
}
55-
}
56-
57-
impl Divmod for i32 {}
58-
impl Divmod for i64 {}
3+
// NOTE: there are panics inside the specialized_div_rem functions if division by 0
4+
// is encountered, however these should be unreachable and optimized away unless
5+
// uses of `std/core::intrinsics::unchecked_div/rem` do not have a 0 check in front
6+
// of them.
597

608
intrinsics! {
619
#[maybe_use_optimized_c_shim]
6210
#[arm_aeabi_alias = __aeabi_idiv]
11+
/// Returns `n / d`
6312
pub extern "C" fn __divsi3(a: i32, b: i32) -> i32 {
64-
a.div(b)
13+
i32_div_rem_binary_long(a, b).0
6514
}
6615

6716
#[maybe_use_optimized_c_shim]
68-
pub extern "C" fn __divdi3(a: i64, b: i64) -> i64 {
69-
a.div(b)
17+
/// Returns `n % d`
18+
pub extern "C" fn __modsi3(a: i32, b: i32) -> i32 {
19+
i32_div_rem_binary_long(a, b).1
7020
}
7121

72-
#[win64_128bit_abi_hack]
73-
pub extern "C" fn __divti3(a: i128, b: i128) -> i128 {
74-
a.div(b)
22+
#[maybe_use_optimized_c_shim]
23+
/// Returns `n / d` and sets `*rem = n % d`
24+
pub extern "C" fn __divmodsi4(a: i32, b: i32, rem: &mut i32) -> i32 {
25+
let quo_rem = i32_div_rem_binary_long(a, b);
26+
*rem = quo_rem.1;
27+
quo_rem.0
7528
}
7629

30+
// `_delegate` is most efficient in the 64 bit range
31+
7732
#[maybe_use_optimized_c_shim]
78-
pub extern "C" fn __modsi3(a: i32, b: i32) -> i32 {
79-
a.mod_(b)
33+
/// Returns `n / d`
34+
pub extern "C" fn __divdi3(a: i64, b: i64) -> i64 {
35+
i64_div_rem_delegate(a, b).0
8036
}
8137

8238
#[maybe_use_optimized_c_shim]
39+
/// Returns `n % d`
8340
pub extern "C" fn __moddi3(a: i64, b: i64) -> i64 {
84-
a.mod_(b)
41+
i64_div_rem_delegate(a, b).1
42+
}
43+
44+
#[aapcs_on_arm]
45+
/// Returns `n / d` and sets `*rem = n % d`
46+
pub extern "C" fn __divmoddi4(a: i64, b: i64, rem: &mut i64) -> i64 {
47+
let quo_rem = i64_div_rem_delegate(a, b);
48+
*rem = quo_rem.1;
49+
quo_rem.0
50+
}
51+
}
52+
53+
// `_trifecta` is efficient for large divisions, even when division
54+
// hardware is not availiable at all.
55+
56+
#[cfg(not(target = "x86_64"))]
57+
intrinsics! {
58+
#[win64_128bit_abi_hack]
59+
/// Returns `n / d`
60+
pub extern "C" fn __divti3(a: i128, b: i128) -> i128 {
61+
i128_div_rem_trifecta(a, b).0
8562
}
8663

8764
#[win64_128bit_abi_hack]
65+
/// Returns `n % d`
8866
pub extern "C" fn __modti3(a: i128, b: i128) -> i128 {
89-
a.mod_(b)
67+
i128_div_rem_trifecta(a, b).1
9068
}
69+
}
9170

92-
#[maybe_use_optimized_c_shim]
93-
pub extern "C" fn __divmodsi4(a: i32, b: i32, rem: &mut i32) -> i32 {
94-
a.divmod(b, rem, |a, b| __divsi3(a, b))
71+
#[cfg(target = "x86_64")]
72+
intrinsics! {
73+
#[win64_128bit_abi_hack]
74+
/// Returns `a / b`
75+
pub extern "C" fn __divti3(a: i128, b: i128) -> i128 {
76+
specialized_div_rem::i128_div_rem_asymmetric(a, b).0
9577
}
9678

97-
#[aapcs_on_arm]
98-
pub extern "C" fn __divmoddi4(a: i64, b: i64, rem: &mut i64) -> i64 {
99-
a.divmod(b, rem, |a, b| __divdi3(a, b))
79+
#[win64_128bit_abi_hack]
80+
/// Returns `a % b`
81+
pub extern "C" fn __modti3(a: i128, b: i128) -> i128 {
82+
specialized_div_rem::i128_div_rem_asymmetric(a, b).1
10083
}
10184
}
+174
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,174 @@
1+
macro_rules! impl_asymmetric {
2+
(
3+
$unsigned_name:ident, // name of the unsigned function
4+
$signed_name:ident, // name of the signed function
5+
$half_division:ident, // function for division of a $uX by a $uX
6+
$asymmetric_division:ident, // function for division of a $uD by a $uX
7+
$n_h:expr, // the number of bits in $iH or $uH
8+
$uH:ident, // unsigned integer with half the bit width of $uX
9+
$uX:ident, // unsigned integer with half the bit width of $uD
10+
$uD:ident, // unsigned integer with double the bit width of $uX
11+
$iD:ident, // signed version of $uD
12+
$($unsigned_attr:meta),*; // attributes for the unsigned function
13+
$($signed_attr:meta),* // attributes for the signed function
14+
) => {
15+
/// Computes the quotient and remainder of `duo` divided by `div` and returns them as a
16+
/// tuple.
17+
///
18+
/// This is optimized for dividing integers with the same bitwidth as the largest operand in
19+
/// an asymmetrically sized division. For example, the x86-64 `divq` assembly instruction
20+
/// can divide a 128 bit integer by a 64 bit integer if the quotient fits in 64 bits.
21+
///
22+
/// # Panics
23+
///
24+
/// When attempting to divide by zero, this function will panic.
25+
$(
26+
#[$unsigned_attr]
27+
)*
28+
pub fn $unsigned_name(duo: $uD, div: $uD) -> ($uD,$uD) {
29+
#[inline(always)]
30+
fn carrying_mul(lhs: $uX, rhs: $uX) -> ($uX, $uX) {
31+
let tmp = (lhs as $uD).wrapping_mul(rhs as $uD);
32+
(tmp as $uX, (tmp >> ($n_h * 2)) as $uX)
33+
}
34+
#[inline(always)]
35+
fn carrying_mul_add(lhs: $uX, mul: $uX, add: $uX) -> ($uX, $uX) {
36+
let tmp = (lhs as $uD).wrapping_mul(mul as $uD).wrapping_add(add as $uD);
37+
(tmp as $uX, (tmp >> ($n_h * 2)) as $uX)
38+
}
39+
40+
let n: u32 = $n_h * 2;
41+
42+
// Many of these subalgorithms are taken from trifecta.rs, see that for better
43+
// documentation
44+
45+
let duo_lo = duo as $uX;
46+
let duo_hi = (duo >> n) as $uX;
47+
let div_lo = div as $uX;
48+
let div_hi = (div >> n) as $uX;
49+
if div_hi == 0 {
50+
if div_lo == 0 {
51+
panic!("division by zero");
52+
}
53+
if duo_hi < div_lo {
54+
// plain $uD by $uX division that will fit into $uX
55+
let tmp = unsafe { $asymmetric_division(duo, div_lo) };
56+
return (tmp.0 as $uD, tmp.1 as $uD)
57+
} else if (div_lo >> $n_h) == 0 {
58+
// Short division of $uD by a $uH.
59+
let div_0 = div_lo as $uH as $uX;
60+
let (quo_hi, rem_3) = $half_division(duo_hi, div_0);
61+
62+
let duo_mid =
63+
((duo >> $n_h) as $uH as $uX)
64+
| (rem_3 << $n_h);
65+
let (quo_1, rem_2) = $half_division(duo_mid, div_0);
66+
67+
let duo_lo =
68+
(duo as $uH as $uX)
69+
| (rem_2 << $n_h);
70+
let (quo_0, rem_1) = $half_division(duo_lo, div_0);
71+
72+
return (
73+
(quo_0 as $uD)
74+
| ((quo_1 as $uD) << $n_h)
75+
| ((quo_hi as $uD) << n),
76+
rem_1 as $uD
77+
)
78+
} else {
79+
// Short division using the $uD by $uX division
80+
let (quo_hi, rem_hi) = $half_division(duo_hi, div_lo);
81+
let tmp = unsafe {
82+
$asymmetric_division((duo_lo as $uD) | ((rem_hi as $uD) << n), div_lo)
83+
};
84+
return ((tmp.0 as $uD) | ((quo_hi as $uD) << n), tmp.1 as $uD)
85+
}
86+
}
87+
88+
let duo_lz = duo_hi.leading_zeros();
89+
let div_lz = div_hi.leading_zeros();
90+
let rel_leading_sb = div_lz.wrapping_sub(duo_lz);
91+
if rel_leading_sb < $n_h {
92+
// Some x86_64 CPUs have bad `divq` implementations that make putting
93+
// a `mul` or `mul - 1` algorithm here beneficial
94+
let shift = n.wrapping_sub(duo_lz);
95+
let duo_sig_n = (duo >> shift) as $uX;
96+
let div_sig_n = (div >> shift) as $uX;
97+
let mul = $half_division(duo_sig_n, div_sig_n).0;
98+
let div_lo = div as $uX;
99+
let div_hi = (div >> n) as $uX;
100+
let (tmp_lo, carry) = carrying_mul(mul,div_lo);
101+
let (tmp_hi, overflow) = carrying_mul_add(mul,div_hi,carry);
102+
let tmp = (tmp_lo as $uD) | ((tmp_hi as $uD) << n);
103+
if ((overflow & 1) != 0) || (duo < tmp) {
104+
return (
105+
mul.wrapping_sub(1) as $uD,
106+
duo.wrapping_add(div.wrapping_sub(tmp))
107+
)
108+
} else {
109+
return (
110+
mul as $uD,
111+
duo.wrapping_sub(tmp)
112+
)
113+
}
114+
} else {
115+
// This has been adapted from
116+
// https://www.codeproject.com/tips/785014/uint-division-modulus which was in turn
117+
// adapted from www.hackersdelight.org
118+
119+
// This is similar to the `mul` or `mul - 1` algorithm in that it uses only more
120+
// significant parts of `duo` and `div` to divide a large integer with a smaller
121+
// division instruction.
122+
let tmp = unsafe {
123+
$asymmetric_division(duo >> 1, ((div << div_lz) >> n) as $uX)
124+
};
125+
let mut quo = tmp.0 >> ((n - 1) - div_lz);
126+
if quo != 0 {
127+
quo -= 1;
128+
}
129+
// Note that this is a large $uD multiplication being used here
130+
let mut rem = duo - ((quo as $uD) * div);
131+
132+
if rem >= div {
133+
quo += 1;
134+
rem -= div;
135+
}
136+
return (quo as $uD, rem)
137+
}
138+
}
139+
140+
/// Computes the quotient and remainder of `duo` divided by `div` and returns them as a
141+
/// tuple.
142+
///
143+
/// This is optimized for dividing integers with the same bitwidth as the largest operand in
144+
/// an asymmetrically sized division. For example, the x86-64 `divq` assembly instruction
145+
/// can divide a 128 bit integer by a 64 bit integer if the quotient fits in 64 bits.
146+
///
147+
/// # Panics
148+
///
149+
/// When attempting to divide by zero, this function will panic.
150+
$(
151+
#[$signed_attr]
152+
)*
153+
pub fn $signed_name(duo: $iD, div: $iD) -> ($iD,$iD) {
154+
match (duo < 0, div < 0) {
155+
(false,false) => {
156+
let t = $unsigned_name(duo as $uD,div as $uD);
157+
(t.0 as $iD,t.1 as $iD)
158+
},
159+
(true,false) => {
160+
let t = $unsigned_name(duo.wrapping_neg() as $uD,div as $uD);
161+
((t.0 as $iD).wrapping_neg(),(t.1 as $iD).wrapping_neg())
162+
},
163+
(false,true) => {
164+
let t = $unsigned_name(duo as $uD,div.wrapping_neg() as $uD);
165+
((t.0 as $iD).wrapping_neg(),t.1 as $iD)
166+
},
167+
(true,true) => {
168+
let t = $unsigned_name(duo.wrapping_neg() as $uD,div.wrapping_neg() as $uD);
169+
(t.0 as $iD,(t.1 as $iD).wrapping_neg())
170+
},
171+
}
172+
}
173+
}
174+
}

0 commit comments

Comments
 (0)