@@ -8545,9 +8545,6 @@ pub unsafe fn _mm_movm_epi8(k: __mmask16) -> __m128i {
8545
8545
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kadd_mask32&expand=3207)
8546
8546
#[inline]
8547
8547
#[target_feature(enable = "avx512bw")]
8548
- #[cfg_attr(all(test, target_arch = "x86"), assert_instr(add))]
8549
- #[cfg_attr(all(test, target_arch = "x86_64"), assert_instr(lea))] // generate normal lea/add code instead of kaddd
8550
- //llvm.x86.avx512.kadd.d
8551
8548
pub unsafe fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
8552
8549
transmute(a + b)
8553
8550
}
@@ -8557,9 +8554,6 @@ pub unsafe fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
8557
8554
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kadd_mask64&expand=3208)
8558
8555
#[inline]
8559
8556
#[target_feature(enable = "avx512bw")]
8560
- #[cfg_attr(all(test, target_arch = "x86"), assert_instr(add))]
8561
- #[cfg_attr(all(test, target_arch = "x86_64"), assert_instr(lea))] // generate normal lea/add code instead of kaddd
8562
- //llvm.x86.avx512.kadd.d
8563
8557
pub unsafe fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
8564
8558
transmute(a + b)
8565
8559
}
@@ -8569,7 +8563,6 @@ pub unsafe fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
8569
8563
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kand_mask32&expand=3213)
8570
8564
#[inline]
8571
8565
#[target_feature(enable = "avx512bw")]
8572
- #[cfg_attr(test, assert_instr(and))] // generate normal and code instead of kandd
8573
8566
pub unsafe fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
8574
8567
transmute(a & b)
8575
8568
}
@@ -8579,7 +8572,6 @@ pub unsafe fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
8579
8572
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kand_mask64&expand=3214)
8580
8573
#[inline]
8581
8574
#[target_feature(enable = "avx512bw")]
8582
- #[cfg_attr(test, assert_instr(and))] // generate normal and code instead of kandq
8583
8575
pub unsafe fn _kand_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
8584
8576
transmute(a & b)
8585
8577
}
@@ -8607,7 +8599,6 @@ pub unsafe fn _knot_mask64(a: __mmask64) -> __mmask64 {
8607
8599
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kandn_mask32&expand=3219)
8608
8600
#[inline]
8609
8601
#[target_feature(enable = "avx512bw")]
8610
- #[cfg_attr(test, assert_instr(not))] // generate normal and code instead of kandnd
8611
8602
pub unsafe fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
8612
8603
transmute(_knot_mask32(a) & b)
8613
8604
}
@@ -8617,7 +8608,6 @@ pub unsafe fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
8617
8608
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kandn_mask64&expand=3220)
8618
8609
#[inline]
8619
8610
#[target_feature(enable = "avx512bw")]
8620
- #[cfg_attr(test, assert_instr(not))] // generate normal and code instead of kandnq
8621
8611
pub unsafe fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
8622
8612
transmute(_knot_mask64(a) & b)
8623
8613
}
@@ -8627,7 +8617,6 @@ pub unsafe fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
8627
8617
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kor_mask32&expand=3240)
8628
8618
#[inline]
8629
8619
#[target_feature(enable = "avx512bw")]
8630
- #[cfg_attr(test, assert_instr(or))] // generate normal and code instead of kord
8631
8620
pub unsafe fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
8632
8621
transmute(a | b)
8633
8622
}
@@ -8637,7 +8626,6 @@ pub unsafe fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
8637
8626
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kor_mask64&expand=3241)
8638
8627
#[inline]
8639
8628
#[target_feature(enable = "avx512bw")]
8640
- #[cfg_attr(test, assert_instr(or))] // generate normal and code instead of korq
8641
8629
pub unsafe fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
8642
8630
transmute(a | b)
8643
8631
}
@@ -8647,7 +8635,6 @@ pub unsafe fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
8647
8635
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kxor_mask32&expand=3292)
8648
8636
#[inline]
8649
8637
#[target_feature(enable = "avx512bw")]
8650
- #[cfg_attr(test, assert_instr(xor))] // generate normal and code instead of kxord
8651
8638
pub unsafe fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
8652
8639
transmute(a ^ b)
8653
8640
}
@@ -8657,7 +8644,6 @@ pub unsafe fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
8657
8644
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kxor_mask64&expand=3293)
8658
8645
#[inline]
8659
8646
#[target_feature(enable = "avx512bw")]
8660
- #[cfg_attr(test, assert_instr(xor))] // generate normal and code instead of kxorq
8661
8647
pub unsafe fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
8662
8648
transmute(a ^ b)
8663
8649
}
@@ -8667,7 +8653,6 @@ pub unsafe fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
8667
8653
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kxnor_mask32&expand=3286)
8668
8654
#[inline]
8669
8655
#[target_feature(enable = "avx512bw")]
8670
- #[cfg_attr(test, assert_instr(xor))] // generate normal and code instead of kxnord
8671
8656
pub unsafe fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
8672
8657
transmute(_knot_mask32(a ^ b))
8673
8658
}
@@ -8677,7 +8662,6 @@ pub unsafe fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
8677
8662
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kxnor_mask64&expand=3287)
8678
8663
#[inline]
8679
8664
#[target_feature(enable = "avx512bw")]
8680
- #[cfg_attr(test, assert_instr(xor))] // generate normal and code instead of kxnorq
8681
8665
pub unsafe fn _kxnor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
8682
8666
transmute(_knot_mask64(a ^ b))
8683
8667
}
0 commit comments