@@ -5,6 +5,7 @@ use core::ops::{BitAnd, BitOr, BitXor};
5
5
use core:: ops:: { Div , Rem , Sub } ;
6
6
use core:: ops:: { Shl , Shr } ;
7
7
8
+ mod assign;
8
9
mod deref;
9
10
10
11
impl < I , T , const LANES : usize > core:: ops:: Index < I > for Simd < T , LANES >
@@ -65,25 +66,6 @@ macro_rules! impl_ref_ops {
65
66
}
66
67
} ;
67
68
68
- // binary assignment op
69
- {
70
- impl <const $lanes: ident: usize > core:: ops:: $trait: ident<$rhs: ty> for $type: ty
71
- where
72
- LaneCount <$lanes2: ident>: SupportedLaneCount ,
73
- {
74
- $( #[ $attrs: meta] ) *
75
- fn $fn: ident( & mut $self_tok: ident, $rhs_arg: ident: $rhs_arg_ty: ty) $body: tt
76
- }
77
- } => {
78
- impl <const $lanes: usize > core:: ops:: $trait<$rhs> for $type
79
- where
80
- LaneCount <$lanes2>: SupportedLaneCount ,
81
- {
82
- $( #[ $attrs] ) *
83
- fn $fn( & mut $self_tok, $rhs_arg: $rhs_arg_ty) $body
84
- }
85
- } ;
86
-
87
69
// unary op
88
70
{
89
71
impl <const $lanes: ident: usize > core:: ops:: $trait: ident for $type: ty
@@ -107,34 +89,34 @@ macro_rules! impl_ref_ops {
107
89
/// Automatically implements operators over vectors and scalars for a particular vector.
108
90
macro_rules! impl_op {
109
91
{ impl Add for $scalar: ty } => {
110
- impl_op! { @binary $scalar, Add :: add, AddAssign :: add_assign , simd_add }
92
+ impl_op! { @binary $scalar, Add :: add, simd_add }
111
93
} ;
112
94
{ impl Sub for $scalar: ty } => {
113
- impl_op! { @binary $scalar, Sub :: sub, SubAssign :: sub_assign , simd_sub }
95
+ impl_op! { @binary $scalar, Sub :: sub, simd_sub }
114
96
} ;
115
97
{ impl Mul for $scalar: ty } => {
116
- impl_op! { @binary $scalar, Mul :: mul, MulAssign :: mul_assign , simd_mul }
98
+ impl_op! { @binary $scalar, Mul :: mul, simd_mul }
117
99
} ;
118
100
{ impl Div for $scalar: ty } => {
119
- impl_op! { @binary $scalar, Div :: div, DivAssign :: div_assign , simd_div }
101
+ impl_op! { @binary $scalar, Div :: div, simd_div }
120
102
} ;
121
103
{ impl Rem for $scalar: ty } => {
122
- impl_op! { @binary $scalar, Rem :: rem, RemAssign :: rem_assign , simd_rem }
104
+ impl_op! { @binary $scalar, Rem :: rem, simd_rem }
123
105
} ;
124
106
{ impl Shl for $scalar: ty } => {
125
- impl_op! { @binary $scalar, Shl :: shl, ShlAssign :: shl_assign , simd_shl }
107
+ impl_op! { @binary $scalar, Shl :: shl, simd_shl }
126
108
} ;
127
109
{ impl Shr for $scalar: ty } => {
128
- impl_op! { @binary $scalar, Shr :: shr, ShrAssign :: shr_assign , simd_shr }
110
+ impl_op! { @binary $scalar, Shr :: shr, simd_shr }
129
111
} ;
130
112
{ impl BitAnd for $scalar: ty } => {
131
- impl_op! { @binary $scalar, BitAnd :: bitand, BitAndAssign :: bitand_assign , simd_and }
113
+ impl_op! { @binary $scalar, BitAnd :: bitand, simd_and }
132
114
} ;
133
115
{ impl BitOr for $scalar: ty } => {
134
- impl_op! { @binary $scalar, BitOr :: bitor, BitOrAssign :: bitor_assign , simd_or }
116
+ impl_op! { @binary $scalar, BitOr :: bitor, simd_or }
135
117
} ;
136
118
{ impl BitXor for $scalar: ty } => {
137
- impl_op! { @binary $scalar, BitXor :: bitxor, BitXorAssign :: bitxor_assign , simd_xor }
119
+ impl_op! { @binary $scalar, BitXor :: bitxor, simd_xor }
138
120
} ;
139
121
140
122
{ impl Not for $scalar: ty } => {
@@ -166,7 +148,7 @@ macro_rules! impl_op {
166
148
} ;
167
149
168
150
// generic binary op with assignment when output is `Self`
169
- { @binary $scalar: ty, $trait: ident :: $trait_fn: ident, $assign_trait : ident :: $assign_trait_fn : ident , $ intrinsic: ident } => {
151
+ { @binary $scalar: ty, $trait: ident :: $trait_fn: ident, $intrinsic: ident } => {
170
152
impl_ref_ops! {
171
153
impl <const LANES : usize > core:: ops:: $trait<Self > for Simd <$scalar, LANES >
172
154
where
@@ -210,32 +192,6 @@ macro_rules! impl_op {
210
192
}
211
193
}
212
194
}
213
-
214
- impl_ref_ops! {
215
- impl <const LANES : usize > core:: ops:: $assign_trait<Self > for Simd <$scalar, LANES >
216
- where
217
- LaneCount <LANES >: SupportedLaneCount ,
218
- {
219
- #[ inline]
220
- fn $assign_trait_fn( & mut self , rhs: Self ) {
221
- unsafe {
222
- * self = intrinsics:: $intrinsic( * self , rhs) ;
223
- }
224
- }
225
- }
226
- }
227
-
228
- impl_ref_ops! {
229
- impl <const LANES : usize > core:: ops:: $assign_trait<$scalar> for Simd <$scalar, LANES >
230
- where
231
- LaneCount <LANES >: SupportedLaneCount ,
232
- {
233
- #[ inline]
234
- fn $assign_trait_fn( & mut self , rhs: $scalar) {
235
- core:: ops:: $assign_trait:: $assign_trait_fn( self , Self :: splat( rhs) ) ;
236
- }
237
- }
238
- }
239
195
} ;
240
196
}
241
197
@@ -331,30 +287,6 @@ macro_rules! impl_unsigned_int_ops {
331
287
}
332
288
}
333
289
334
- impl_ref_ops! {
335
- impl <const LANES : usize > core:: ops:: DivAssign <Self > for Simd <$scalar, LANES >
336
- where
337
- LaneCount <LANES >: SupportedLaneCount ,
338
- {
339
- #[ inline]
340
- fn div_assign( & mut self , rhs: Self ) {
341
- * self = * self / rhs;
342
- }
343
- }
344
- }
345
-
346
- impl_ref_ops! {
347
- impl <const LANES : usize > core:: ops:: DivAssign <$scalar> for Simd <$scalar, LANES >
348
- where
349
- LaneCount <LANES >: SupportedLaneCount ,
350
- {
351
- #[ inline]
352
- fn div_assign( & mut self , rhs: $scalar) {
353
- * self = * self / rhs;
354
- }
355
- }
356
- }
357
-
358
290
// remainder panics on zero divisor
359
291
impl_ref_ops! {
360
292
impl <const LANES : usize > core:: ops:: Rem <Self > for Simd <$scalar, LANES >
@@ -421,30 +353,6 @@ macro_rules! impl_unsigned_int_ops {
421
353
}
422
354
}
423
355
424
- impl_ref_ops! {
425
- impl <const LANES : usize > core:: ops:: RemAssign <Self > for Simd <$scalar, LANES >
426
- where
427
- LaneCount <LANES >: SupportedLaneCount ,
428
- {
429
- #[ inline]
430
- fn rem_assign( & mut self , rhs: Self ) {
431
- * self = * self % rhs;
432
- }
433
- }
434
- }
435
-
436
- impl_ref_ops! {
437
- impl <const LANES : usize > core:: ops:: RemAssign <$scalar> for Simd <$scalar, LANES >
438
- where
439
- LaneCount <LANES >: SupportedLaneCount ,
440
- {
441
- #[ inline]
442
- fn rem_assign( & mut self , rhs: $scalar) {
443
- * self = * self % rhs;
444
- }
445
- }
446
- }
447
-
448
356
// shifts panic on overflow
449
357
impl_ref_ops! {
450
358
impl <const LANES : usize > core:: ops:: Shl <Self > for Simd <$scalar, LANES >
@@ -486,31 +394,6 @@ macro_rules! impl_unsigned_int_ops {
486
394
}
487
395
}
488
396
489
-
490
- impl_ref_ops! {
491
- impl <const LANES : usize > core:: ops:: ShlAssign <Self > for Simd <$scalar, LANES >
492
- where
493
- LaneCount <LANES >: SupportedLaneCount ,
494
- {
495
- #[ inline]
496
- fn shl_assign( & mut self , rhs: Self ) {
497
- * self = * self << rhs;
498
- }
499
- }
500
- }
501
-
502
- impl_ref_ops! {
503
- impl <const LANES : usize > core:: ops:: ShlAssign <$scalar> for Simd <$scalar, LANES >
504
- where
505
- LaneCount <LANES >: SupportedLaneCount ,
506
- {
507
- #[ inline]
508
- fn shl_assign( & mut self , rhs: $scalar) {
509
- * self = * self << rhs;
510
- }
511
- }
512
- }
513
-
514
397
impl_ref_ops! {
515
398
impl <const LANES : usize > core:: ops:: Shr <Self > for Simd <$scalar, LANES >
516
399
where
@@ -550,31 +433,6 @@ macro_rules! impl_unsigned_int_ops {
550
433
}
551
434
}
552
435
}
553
-
554
-
555
- impl_ref_ops! {
556
- impl <const LANES : usize > core:: ops:: ShrAssign <Self > for Simd <$scalar, LANES >
557
- where
558
- LaneCount <LANES >: SupportedLaneCount ,
559
- {
560
- #[ inline]
561
- fn shr_assign( & mut self , rhs: Self ) {
562
- * self = * self >> rhs;
563
- }
564
- }
565
- }
566
-
567
- impl_ref_ops! {
568
- impl <const LANES : usize > core:: ops:: ShrAssign <$scalar> for Simd <$scalar, LANES >
569
- where
570
- LaneCount <LANES >: SupportedLaneCount ,
571
- {
572
- #[ inline]
573
- fn shr_assign( & mut self , rhs: $scalar) {
574
- * self = * self >> rhs;
575
- }
576
- }
577
- }
578
436
) *
579
437
} ;
580
438
}
0 commit comments