From 581c80817dbb67aa77fc2e836ba57d51e6a5ae6e Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Sun, 9 Feb 2025 09:12:30 -0800 Subject: [PATCH 1/9] Apply missing_unsafe_on_extern --- crates/core_arch/src/aarch64/mte.rs | 2 +- .../core_arch/src/aarch64/neon/generated.rs | 696 ++++----- crates/core_arch/src/aarch64/neon/mod.rs | 12 +- crates/core_arch/src/aarch64/prefetch.rs | 2 +- crates/core_arch/src/aarch64/tme.rs | 2 +- crates/core_arch/src/arm/dsp.rs | 2 +- crates/core_arch/src/arm/neon.rs | 2 +- crates/core_arch/src/arm/sat.rs | 2 +- crates/core_arch/src/arm/simd32.rs | 2 +- .../core_arch/src/arm_shared/barrier/mod.rs | 2 +- crates/core_arch/src/arm_shared/crc.rs | 2 +- crates/core_arch/src/arm_shared/crypto.rs | 2 +- crates/core_arch/src/arm_shared/hints.rs | 2 +- .../src/arm_shared/neon/generated.rs | 1336 ++++++++--------- crates/core_arch/src/arm_shared/neon/mod.rs | 8 +- .../src/loongarch64/lasx/generated.rs | 2 +- .../src/loongarch64/lsx/generated.rs | 2 +- crates/core_arch/src/loongarch64/mod.rs | 2 +- crates/core_arch/src/mips/msa.rs | 2 +- crates/core_arch/src/nvptx/mod.rs | 4 +- crates/core_arch/src/nvptx/packed.rs | 2 +- crates/core_arch/src/powerpc/altivec.rs | 6 +- crates/core_arch/src/powerpc/vsx.rs | 2 +- crates/core_arch/src/powerpc64/vsx.rs | 2 +- crates/core_arch/src/riscv32/zk.rs | 2 +- crates/core_arch/src/riscv64/zk.rs | 2 +- crates/core_arch/src/riscv_shared/zb.rs | 4 +- crates/core_arch/src/riscv_shared/zk.rs | 6 +- crates/core_arch/src/s390x/vector.rs | 2 +- crates/core_arch/src/wasm32/atomic.rs | 2 +- crates/core_arch/src/wasm32/memory.rs | 2 +- crates/core_arch/src/wasm32/mod.rs | 2 +- crates/core_arch/src/wasm32/relaxed_simd.rs | 2 +- crates/core_arch/src/wasm32/simd128.rs | 2 +- crates/core_arch/src/x86/adx.rs | 2 +- crates/core_arch/src/x86/aes.rs | 2 +- crates/core_arch/src/x86/avx.rs | 2 +- crates/core_arch/src/x86/avx2.rs | 2 +- crates/core_arch/src/x86/avx512bf16.rs | 2 +- crates/core_arch/src/x86/avx512bitalg.rs | 2 +- crates/core_arch/src/x86/avx512bw.rs | 2 +- crates/core_arch/src/x86/avx512cd.rs | 2 +- crates/core_arch/src/x86/avx512dq.rs | 2 +- crates/core_arch/src/x86/avx512f.rs | 2 +- crates/core_arch/src/x86/avx512fp16.rs | 2 +- crates/core_arch/src/x86/avx512ifma.rs | 2 +- crates/core_arch/src/x86/avx512vbmi.rs | 2 +- crates/core_arch/src/x86/avx512vbmi2.rs | 2 +- crates/core_arch/src/x86/avx512vnni.rs | 2 +- crates/core_arch/src/x86/avxneconvert.rs | 2 +- crates/core_arch/src/x86/bmi1.rs | 2 +- crates/core_arch/src/x86/bmi2.rs | 2 +- crates/core_arch/src/x86/f16c.rs | 2 +- crates/core_arch/src/x86/fxsr.rs | 2 +- crates/core_arch/src/x86/gfni.rs | 2 +- crates/core_arch/src/x86/pclmulqdq.rs | 2 +- crates/core_arch/src/x86/rdrand.rs | 2 +- crates/core_arch/src/x86/rdtsc.rs | 2 +- crates/core_arch/src/x86/rtm.rs | 2 +- crates/core_arch/src/x86/sha.rs | 2 +- crates/core_arch/src/x86/sse.rs | 2 +- crates/core_arch/src/x86/sse2.rs | 2 +- crates/core_arch/src/x86/sse3.rs | 2 +- crates/core_arch/src/x86/sse41.rs | 2 +- crates/core_arch/src/x86/sse42.rs | 2 +- crates/core_arch/src/x86/sse4a.rs | 2 +- crates/core_arch/src/x86/ssse3.rs | 2 +- crates/core_arch/src/x86/tbm.rs | 2 +- crates/core_arch/src/x86/vaes.rs | 2 +- crates/core_arch/src/x86/vpclmulqdq.rs | 2 +- crates/core_arch/src/x86/xsave.rs | 2 +- crates/core_arch/src/x86_64/adx.rs | 2 +- crates/core_arch/src/x86_64/amx.rs | 2 +- crates/core_arch/src/x86_64/avx512f.rs | 2 +- crates/core_arch/src/x86_64/avx512fp16.rs | 2 +- crates/core_arch/src/x86_64/bmi.rs | 2 +- crates/core_arch/src/x86_64/bmi2.rs | 2 +- crates/core_arch/src/x86_64/fxsr.rs | 2 +- crates/core_arch/src/x86_64/rdrand.rs | 2 +- crates/core_arch/src/x86_64/sse.rs | 2 +- crates/core_arch/src/x86_64/sse2.rs | 2 +- crates/core_arch/src/x86_64/sse42.rs | 2 +- crates/core_arch/src/x86_64/tbm.rs | 2 +- crates/core_arch/src/x86_64/xsave.rs | 2 +- crates/std_detect/src/detect/cache.rs | 2 +- .../src/detect/os/windows/aarch64.rs | 2 +- crates/stdarch-gen-arm/src/intrinsic.rs | 2 +- crates/stdarch-gen-loongarch/src/main.rs | 2 +- 88 files changed, 1116 insertions(+), 1116 deletions(-) diff --git a/crates/core_arch/src/aarch64/mte.rs b/crates/core_arch/src/aarch64/mte.rs index 014a9feafb..de5a1b4880 100644 --- a/crates/core_arch/src/aarch64/mte.rs +++ b/crates/core_arch/src/aarch64/mte.rs @@ -2,7 +2,7 @@ //! //! [ACLE documentation](https://arm-software.github.io/acle/main/acle.html#markdown-toc-mte-intrinsics) -extern "unadjusted" { +unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.irg" diff --git a/crates/core_arch/src/aarch64/neon/generated.rs b/crates/core_arch/src/aarch64/neon/generated.rs index 027dcfc874..90a70ee4d7 100644 --- a/crates/core_arch/src/aarch64/neon/generated.rs +++ b/crates/core_arch/src/aarch64/neon/generated.rs @@ -108,7 +108,7 @@ pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fabd))] pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fabd.v1f64" @@ -126,7 +126,7 @@ pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fabd))] pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fabd.v2f64" @@ -291,7 +291,7 @@ pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v4i16" @@ -309,7 +309,7 @@ pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v8i16" @@ -327,7 +327,7 @@ pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i64.v4i32" @@ -345,7 +345,7 @@ pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlp))] pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i64.v2i32" @@ -363,7 +363,7 @@ pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16" @@ -381,7 +381,7 @@ pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16" @@ -399,7 +399,7 @@ pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32" @@ -417,7 +417,7 @@ pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlp))] pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32" @@ -435,7 +435,7 @@ pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddv.f32.v2f32" @@ -453,7 +453,7 @@ pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddv.f32.v4f32" @@ -471,7 +471,7 @@ pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddv.f64.v2f64" @@ -489,7 +489,7 @@ pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v16i8" @@ -507,7 +507,7 @@ pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v8i16" @@ -525,7 +525,7 @@ pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v4i32" @@ -543,7 +543,7 @@ pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v2i64" @@ -561,7 +561,7 @@ pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v16i8" @@ -579,7 +579,7 @@ pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v8i16" @@ -597,7 +597,7 @@ pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v4i32" @@ -615,7 +615,7 @@ pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v2i64" @@ -633,7 +633,7 @@ pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32" @@ -651,7 +651,7 @@ pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32" @@ -669,7 +669,7 @@ pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64" @@ -687,7 +687,7 @@ pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32" @@ -705,7 +705,7 @@ pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32" @@ -723,7 +723,7 @@ pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64" @@ -741,7 +741,7 @@ pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v1i64.v1f64" @@ -759,7 +759,7 @@ pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v2i64.v2f64" @@ -777,7 +777,7 @@ pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.i64.f64" @@ -795,7 +795,7 @@ pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 { #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.i32.f32" @@ -813,7 +813,7 @@ pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 { #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64" @@ -831,7 +831,7 @@ pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64" @@ -849,7 +849,7 @@ pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.i64.f64" @@ -867,7 +867,7 @@ pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 { #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagts_f32(a: f32, b: f32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.i32.f32" @@ -2585,7 +2585,7 @@ pub unsafe fn vcltzd_s64(a: i64) -> u64 { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32" @@ -2603,7 +2603,7 @@ pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32" @@ -2621,7 +2621,7 @@ pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> floa #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64" @@ -2729,7 +2729,7 @@ pub unsafe fn vcmlaq_laneq_f32( #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32" @@ -2747,7 +2747,7 @@ pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32" @@ -2765,7 +2765,7 @@ pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64" @@ -2873,7 +2873,7 @@ pub unsafe fn vcmlaq_rot180_laneq_f32( #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32" @@ -2891,7 +2891,7 @@ pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32" @@ -2909,7 +2909,7 @@ pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64" @@ -3017,7 +3017,7 @@ pub unsafe fn vcmlaq_rot270_laneq_f32( #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32" @@ -3035,7 +3035,7 @@ pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32" @@ -3053,7 +3053,7 @@ pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) - #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64" @@ -6405,7 +6405,7 @@ pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64" @@ -6425,7 +6425,7 @@ pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64" @@ -6445,7 +6445,7 @@ pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64" @@ -6465,7 +6465,7 @@ pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64" @@ -6485,7 +6485,7 @@ pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64" @@ -6505,7 +6505,7 @@ pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64" @@ -6525,7 +6525,7 @@ pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64" @@ -6545,7 +6545,7 @@ pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64" @@ -6563,7 +6563,7 @@ pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptosi.sat.v1i64.v1f64" @@ -6581,7 +6581,7 @@ pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptosi.sat.v2i64.v2f64" @@ -6599,7 +6599,7 @@ pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptoui.sat.v1i64.v1f64" @@ -6617,7 +6617,7 @@ pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptoui.sat.v2i64.v2f64" @@ -6635,7 +6635,7 @@ pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32" @@ -6653,7 +6653,7 @@ pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32" @@ -6671,7 +6671,7 @@ pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64" @@ -6689,7 +6689,7 @@ pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64" @@ -6707,7 +6707,7 @@ pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32" @@ -6725,7 +6725,7 @@ pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32" @@ -6743,7 +6743,7 @@ pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64" @@ -6761,7 +6761,7 @@ pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64" @@ -6779,7 +6779,7 @@ pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.i32.f32" @@ -6797,7 +6797,7 @@ pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.i64.f64" @@ -6815,7 +6815,7 @@ pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.i32.f32" @@ -6833,7 +6833,7 @@ pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.i64.f64" @@ -6873,7 +6873,7 @@ pub unsafe fn vcvts_f32_s32(a: i32) -> f32 { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32" @@ -6891,7 +6891,7 @@ pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32" @@ -6909,7 +6909,7 @@ pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64" @@ -6927,7 +6927,7 @@ pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64" @@ -6945,7 +6945,7 @@ pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32" @@ -6963,7 +6963,7 @@ pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32" @@ -6981,7 +6981,7 @@ pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64" @@ -6999,7 +6999,7 @@ pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64" @@ -7017,7 +7017,7 @@ pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.i32.f32" @@ -7035,7 +7035,7 @@ pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.i64.f64" @@ -7053,7 +7053,7 @@ pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.i32.f32" @@ -7071,7 +7071,7 @@ pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.i64.f64" @@ -7089,7 +7089,7 @@ pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32" @@ -7107,7 +7107,7 @@ pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32" @@ -7125,7 +7125,7 @@ pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64" @@ -7143,7 +7143,7 @@ pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64" @@ -7161,7 +7161,7 @@ pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32" @@ -7179,7 +7179,7 @@ pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32" @@ -7197,7 +7197,7 @@ pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64" @@ -7215,7 +7215,7 @@ pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64" @@ -7233,7 +7233,7 @@ pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.i32.f32" @@ -7251,7 +7251,7 @@ pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.i64.f64" @@ -7269,7 +7269,7 @@ pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.i32.f32" @@ -7287,7 +7287,7 @@ pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.i64.f64" @@ -7305,7 +7305,7 @@ pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32" @@ -7323,7 +7323,7 @@ pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32" @@ -7341,7 +7341,7 @@ pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64" @@ -7359,7 +7359,7 @@ pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64" @@ -7377,7 +7377,7 @@ pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32" @@ -7395,7 +7395,7 @@ pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32" @@ -7413,7 +7413,7 @@ pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64" @@ -7431,7 +7431,7 @@ pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64" @@ -7449,7 +7449,7 @@ pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.i32.f32" @@ -7467,7 +7467,7 @@ pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.i64.f64" @@ -7485,7 +7485,7 @@ pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.i32.f32" @@ -7503,7 +7503,7 @@ pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.i64.f64" @@ -7545,7 +7545,7 @@ pub unsafe fn vcvtd_f64_u64(a: u64) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32" @@ -7565,7 +7565,7 @@ pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64" @@ -7585,7 +7585,7 @@ pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32" @@ -7605,7 +7605,7 @@ pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64" @@ -7625,7 +7625,7 @@ pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32" @@ -7645,7 +7645,7 @@ pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64" @@ -7665,7 +7665,7 @@ pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32" @@ -7685,7 +7685,7 @@ pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64" @@ -7747,7 +7747,7 @@ pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 { #[cfg_attr(test, assert_instr(fcvtxn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64" @@ -8323,7 +8323,7 @@ pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v16i8" @@ -8341,7 +8341,7 @@ pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v8i16" @@ -8359,7 +8359,7 @@ pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v4i32" @@ -8377,7 +8377,7 @@ pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v2i64" @@ -8395,7 +8395,7 @@ pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v16i8" @@ -8413,7 +8413,7 @@ pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v8i16" @@ -8431,7 +8431,7 @@ pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v4i32" @@ -8449,7 +8449,7 @@ pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v2i64" @@ -8501,7 +8501,7 @@ pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_ #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmadd))] pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.v1f64" @@ -8650,7 +8650,7 @@ pub unsafe fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f64" @@ -8670,7 +8670,7 @@ pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmla))] pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.v2f64" @@ -8717,7 +8717,7 @@ pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f32" @@ -8738,7 +8738,7 @@ pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f32" @@ -8759,7 +8759,7 @@ pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) - #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f64" @@ -9009,7 +9009,7 @@ pub unsafe fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0f64" @@ -9027,7 +9027,7 @@ pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0f64" @@ -9045,7 +9045,7 @@ pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0f64" @@ -9063,7 +9063,7 @@ pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64" @@ -9081,7 +9081,7 @@ pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64" @@ -9099,7 +9099,7 @@ pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64" @@ -9117,7 +9117,7 @@ pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v1f64.p0f64" @@ -9135,7 +9135,7 @@ pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2f64.p0f64" @@ -9153,7 +9153,7 @@ pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2i64.p0i64" @@ -9171,7 +9171,7 @@ pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v1f64.p0v1f64" @@ -9191,7 +9191,7 @@ pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> float64x1x2_t { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0i8" @@ -9211,7 +9211,7 @@ pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> int64x1x2_t { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0i8" @@ -9277,7 +9277,7 @@ pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2f64.p0v2f64" @@ -9295,7 +9295,7 @@ pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2i64.p0v2i64" @@ -9315,7 +9315,7 @@ pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) -> float64x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8" @@ -9336,7 +9336,7 @@ pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> int8x16x2_t { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8" @@ -9356,7 +9356,7 @@ pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> in #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> int64x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8" @@ -9448,7 +9448,7 @@ pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v1f64.p0f64" @@ -9466,7 +9466,7 @@ pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2f64.p0f64" @@ -9484,7 +9484,7 @@ pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2i64.p0i64" @@ -9502,7 +9502,7 @@ pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v1f64.p0v1f64" @@ -9522,7 +9522,7 @@ pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> float64x1x3_t { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0i8" @@ -9561,7 +9561,7 @@ pub unsafe fn vld3_lane_p64(a: *const p64, b: poly64x1x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> int64x1x3_t { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0i8" @@ -9620,7 +9620,7 @@ pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2f64.p0v2f64" @@ -9638,7 +9638,7 @@ pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2i64.p0v2i64" @@ -9658,7 +9658,7 @@ pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) -> float64x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8" @@ -9697,7 +9697,7 @@ pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8" @@ -9723,7 +9723,7 @@ pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> in #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> int64x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8" @@ -9808,7 +9808,7 @@ pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v1f64.p0f64" @@ -9826,7 +9826,7 @@ pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2f64.p0f64" @@ -9844,7 +9844,7 @@ pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2i64.p0i64" @@ -9862,7 +9862,7 @@ pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v1f64.p0v1f64" @@ -9882,7 +9882,7 @@ pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> float64x1x4_t { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0i8" @@ -9909,7 +9909,7 @@ pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> int64x1x4_t { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0i8" @@ -9982,7 +9982,7 @@ pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2f64.p0v2f64" @@ -10000,7 +10000,7 @@ pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2i64.p0v2i64" @@ -10020,7 +10020,7 @@ pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) -> float64x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8" @@ -10047,7 +10047,7 @@ pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8" @@ -10074,7 +10074,7 @@ pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> in #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> int64x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8" @@ -10173,7 +10173,7 @@ pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmax))] pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmax.v1f64" @@ -10191,7 +10191,7 @@ pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmax))] pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmax.v2f64" @@ -10209,7 +10209,7 @@ pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnm))] pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnm.v1f64" @@ -10227,7 +10227,7 @@ pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnm))] pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnm.v2f64" @@ -10245,7 +10245,7 @@ pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmp))] pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" @@ -10263,7 +10263,7 @@ pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmp))] pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" @@ -10281,7 +10281,7 @@ pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmv))] pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32" @@ -10299,7 +10299,7 @@ pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmin))] pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmin.v1f64" @@ -10317,7 +10317,7 @@ pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmin))] pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmin.v2f64" @@ -10335,7 +10335,7 @@ pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminnm))] pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnm.v1f64" @@ -10353,7 +10353,7 @@ pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminnm))] pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnm.v2f64" @@ -10371,7 +10371,7 @@ pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" @@ -10389,7 +10389,7 @@ pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" @@ -10407,7 +10407,7 @@ pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { #[cfg_attr(test, assert_instr(fminnmv))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32" @@ -11707,7 +11707,7 @@ pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(pmull))] pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.pmull64" @@ -11793,7 +11793,7 @@ pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v2f32" @@ -11811,7 +11811,7 @@ pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v4f32" @@ -11829,7 +11829,7 @@ pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v1f64" @@ -11847,7 +11847,7 @@ pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v2f64" @@ -11962,7 +11962,7 @@ pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.f64" @@ -11980,7 +11980,7 @@ pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.f32" @@ -12144,7 +12144,7 @@ pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddp.v4f32" @@ -12162,7 +12162,7 @@ pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddp.v2f64" @@ -12180,7 +12180,7 @@ pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmp.v2f32" @@ -12198,7 +12198,7 @@ pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmp.v4f32" @@ -12216,7 +12216,7 @@ pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmp.v2f64" @@ -12234,7 +12234,7 @@ pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" @@ -12252,7 +12252,7 @@ pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" @@ -12270,7 +12270,7 @@ pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" @@ -12288,7 +12288,7 @@ pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" @@ -12306,7 +12306,7 @@ pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmp.v2f32" @@ -12324,7 +12324,7 @@ pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmp.v4f32" @@ -12342,7 +12342,7 @@ pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmp.v2f64" @@ -12360,7 +12360,7 @@ pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" @@ -12378,7 +12378,7 @@ pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" @@ -12396,7 +12396,7 @@ pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f64.v2f64" @@ -12414,7 +12414,7 @@ pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f32.v2f32" @@ -12432,7 +12432,7 @@ pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v1i64" @@ -12450,7 +12450,7 @@ pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v2i64" @@ -12490,7 +12490,7 @@ pub unsafe fn vqabsh_s16(a: i16) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] pub unsafe fn vqabss_s32(a: i32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.i32" @@ -12508,7 +12508,7 @@ pub unsafe fn vqabss_s32(a: i32) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] pub unsafe fn vqabsd_s64(a: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.i64" @@ -12578,7 +12578,7 @@ pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqadd))] pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.i32" @@ -12596,7 +12596,7 @@ pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqadd))] pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.i64" @@ -12614,7 +12614,7 @@ pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqadd))] pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.i32" @@ -12632,7 +12632,7 @@ pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqadd))] pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.i64" @@ -13437,7 +13437,7 @@ pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { #[cfg_attr(test, assert_instr(sqdmull))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqdmulls.scalar" @@ -13529,7 +13529,7 @@ pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(sqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovnd_s64(a: i64) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64" @@ -13547,7 +13547,7 @@ pub unsafe fn vqmovnd_s64(a: i64) -> i32 { #[cfg_attr(test, assert_instr(uqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovnd_u64(a: u64) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64" @@ -13679,7 +13679,7 @@ pub unsafe fn vqmovund_s64(a: i64) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqneg))] pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v1i64" @@ -13697,7 +13697,7 @@ pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqneg))] pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v2i64" @@ -13929,7 +13929,7 @@ pub unsafe fn vqrdmlahq_laneq_s32( #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v4i16" @@ -13947,7 +13947,7 @@ pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_ #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v8i16" @@ -13965,7 +13965,7 @@ pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8 #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v2i32" @@ -13983,7 +13983,7 @@ pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_ #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v4i32" @@ -14251,7 +14251,7 @@ pub unsafe fn vqrdmlshq_laneq_s32( #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16" @@ -14269,7 +14269,7 @@ pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_ #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16" @@ -14287,7 +14287,7 @@ pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8 #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32" @@ -14305,7 +14305,7 @@ pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_ #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32" @@ -14529,7 +14529,7 @@ pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 { #[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshl.i64" @@ -14547,7 +14547,7 @@ pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 { #[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshl.i32" @@ -14565,7 +14565,7 @@ pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { #[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.i32" @@ -14583,7 +14583,7 @@ pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { #[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.i64" @@ -15032,7 +15032,7 @@ pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 { #[cfg_attr(test, assert_instr(sqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshl.i64" @@ -15050,7 +15050,7 @@ pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 { #[cfg_attr(test, assert_instr(uqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshl.i64" @@ -15208,7 +15208,7 @@ pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> u #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrn.i32" @@ -15228,7 +15228,7 @@ pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.i32" @@ -15432,7 +15432,7 @@ pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqsub))] pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.i32" @@ -15450,7 +15450,7 @@ pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqsub))] pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.i64" @@ -15468,7 +15468,7 @@ pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqsub))] pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.i32" @@ -15486,7 +15486,7 @@ pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqsub))] pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.i64" @@ -15504,7 +15504,7 @@ pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { #[cfg_attr(test, assert_instr(rax1))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.rax1" @@ -15522,7 +15522,7 @@ pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(rbit))] pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rbit.v8i8" @@ -15540,7 +15540,7 @@ pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(rbit))] pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rbit.v16i8" @@ -15602,7 +15602,7 @@ pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.v1f64" @@ -15620,7 +15620,7 @@ pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.v2f64" @@ -15638,7 +15638,7 @@ pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecped_f64(a: f64) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.f64" @@ -15656,7 +15656,7 @@ pub unsafe fn vrecped_f64(a: f64) -> f64 { #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpes_f32(a: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.f32" @@ -15674,7 +15674,7 @@ pub unsafe fn vrecpes_f32(a: f32) -> f32 { #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.v1f64" @@ -15692,7 +15692,7 @@ pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.v2f64" @@ -15710,7 +15710,7 @@ pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.f64" @@ -15728,7 +15728,7 @@ pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 { #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.f32" @@ -15746,7 +15746,7 @@ pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 { #[cfg_attr(test, assert_instr(frecpx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpxd_f64(a: f64) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpx.f64" @@ -15764,7 +15764,7 @@ pub unsafe fn vrecpxd_f64(a: f64) -> f64 { #[cfg_attr(test, assert_instr(frecpx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpxs_f32(a: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpx.f32" @@ -16464,7 +16464,7 @@ pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32x.v2f32" @@ -16482,7 +16482,7 @@ pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32x.v4f32" @@ -16500,7 +16500,7 @@ pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32x.v2f64" @@ -16518,7 +16518,7 @@ pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.frint32x.f64" @@ -16536,7 +16536,7 @@ pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32z.v2f32" @@ -16554,7 +16554,7 @@ pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32z.v4f32" @@ -16572,7 +16572,7 @@ pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32z.v2f64" @@ -16590,7 +16590,7 @@ pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.frint32z.f64" @@ -16608,7 +16608,7 @@ pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64x.v2f32" @@ -16626,7 +16626,7 @@ pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64x.v4f32" @@ -16644,7 +16644,7 @@ pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64x.v2f64" @@ -16662,7 +16662,7 @@ pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.frint64x.f64" @@ -16680,7 +16680,7 @@ pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64z.v2f32" @@ -16698,7 +16698,7 @@ pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64z.v4f32" @@ -16716,7 +16716,7 @@ pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64z.v2f64" @@ -16734,7 +16734,7 @@ pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] pub unsafe fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.frint64z.f64" @@ -16752,7 +16752,7 @@ pub unsafe fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v2f32" @@ -16770,7 +16770,7 @@ pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v4f32" @@ -16788,7 +16788,7 @@ pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v1f64" @@ -16806,7 +16806,7 @@ pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v2f64" @@ -16824,7 +16824,7 @@ pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v2f32" @@ -16842,7 +16842,7 @@ pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v4f32" @@ -16860,7 +16860,7 @@ pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v1f64" @@ -16878,7 +16878,7 @@ pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v2f64" @@ -16896,7 +16896,7 @@ pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v2f32" @@ -16914,7 +16914,7 @@ pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v4f32" @@ -16932,7 +16932,7 @@ pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v1f64" @@ -16950,7 +16950,7 @@ pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v2f64" @@ -16968,7 +16968,7 @@ pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v2f32" @@ -16986,7 +16986,7 @@ pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v4f32" @@ -17004,7 +17004,7 @@ pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v1f64" @@ -17022,7 +17022,7 @@ pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v2f64" @@ -17040,7 +17040,7 @@ pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintn))] pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v1f64" @@ -17058,7 +17058,7 @@ pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintn))] pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v2f64" @@ -17076,7 +17076,7 @@ pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintn))] pub unsafe fn vrndns_f32(a: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.roundeven.f32" @@ -17094,7 +17094,7 @@ pub unsafe fn vrndns_f32(a: f32) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v2f32" @@ -17112,7 +17112,7 @@ pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v4f32" @@ -17130,7 +17130,7 @@ pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v1f64" @@ -17148,7 +17148,7 @@ pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v2f64" @@ -17166,7 +17166,7 @@ pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v2f32" @@ -17184,7 +17184,7 @@ pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v4f32" @@ -17202,7 +17202,7 @@ pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v1f64" @@ -17220,7 +17220,7 @@ pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v2f64" @@ -17238,7 +17238,7 @@ pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(srshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srshl.i64" @@ -17256,7 +17256,7 @@ pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { #[cfg_attr(test, assert_instr(urshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urshl.i64" @@ -17386,7 +17386,7 @@ pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> u #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.v1f64" @@ -17404,7 +17404,7 @@ pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.v2f64" @@ -17422,7 +17422,7 @@ pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrted_f64(a: f64) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.f64" @@ -17440,7 +17440,7 @@ pub unsafe fn vrsqrted_f64(a: f64) -> f64 { #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtes_f32(a: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.f32" @@ -17458,7 +17458,7 @@ pub unsafe fn vrsqrtes_f32(a: f32) -> f32 { #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.v1f64" @@ -17476,7 +17476,7 @@ pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.v2f64" @@ -17494,7 +17494,7 @@ pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.f64" @@ -17512,7 +17512,7 @@ pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtss_f32(a: f32, b: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.f32" @@ -17656,7 +17656,7 @@ pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64 #[cfg_attr(test, assert_instr(sha512h2))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512h2" @@ -17674,7 +17674,7 @@ pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uin #[cfg_attr(test, assert_instr(sha512h))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512h" @@ -17692,7 +17692,7 @@ pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint #[cfg_attr(test, assert_instr(sha512su0))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512su0" @@ -17710,7 +17710,7 @@ pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(sha512su1))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512su1" @@ -17946,7 +17946,7 @@ pub unsafe fn vslid_n_u64(a: u64, b: u64) -> u64 { #[cfg_attr(test, assert_instr(sm3partw1))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3partw1" @@ -17964,7 +17964,7 @@ pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui #[cfg_attr(test, assert_instr(sm3partw2))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3partw2" @@ -17982,7 +17982,7 @@ pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui #[cfg_attr(test, assert_instr(sm3ss1))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3ss1" @@ -18000,7 +18000,7 @@ pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint3 #[cfg_attr(test, assert_instr(sm4ekey))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm4ekey" @@ -18018,7 +18018,7 @@ pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(sm4e))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm4e" @@ -18058,7 +18058,7 @@ pub unsafe fn vsqaddh_u16(a: u16, b: i16) -> u16 { #[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.i64" @@ -18076,7 +18076,7 @@ pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 { #[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.i32" @@ -18164,7 +18164,7 @@ pub unsafe fn vsrid_n_u64(a: u64, b: u64) -> u64 { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v1f64.p0f64" @@ -18182,7 +18182,7 @@ pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2f64.p0f64" @@ -18200,7 +18200,7 @@ pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v1f64.p0f64" @@ -18218,7 +18218,7 @@ pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2f64.p0f64" @@ -18236,7 +18236,7 @@ pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v1f64.p0f64" @@ -18260,7 +18260,7 @@ pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2f64.p0f64" @@ -18310,7 +18310,7 @@ pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v1f64.p0i8" @@ -18330,7 +18330,7 @@ pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v1f64.p0i8" @@ -18350,7 +18350,7 @@ pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t) { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v1i64.p0i8" @@ -18394,7 +18394,7 @@ pub unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2f64.p0i8" @@ -18412,7 +18412,7 @@ pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2i64.p0i8" @@ -18432,7 +18432,7 @@ pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8" @@ -18452,7 +18452,7 @@ pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8" @@ -18472,7 +18472,7 @@ pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8" @@ -18564,7 +18564,7 @@ pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v1f64.p0i8" @@ -18584,7 +18584,7 @@ pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v1f64.p0i8" @@ -18604,7 +18604,7 @@ pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t) { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v1i64.p0i8" @@ -18648,7 +18648,7 @@ pub unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2f64.p0i8" @@ -18666,7 +18666,7 @@ pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2i64.p0i8" @@ -18686,7 +18686,7 @@ pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8" @@ -18706,7 +18706,7 @@ pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8" @@ -18726,7 +18726,7 @@ pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8" @@ -18818,7 +18818,7 @@ pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v1f64.p0i8" @@ -18838,7 +18838,7 @@ pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v1f64.p0i8" @@ -18865,7 +18865,7 @@ pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t) { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v1i64.p0i8" @@ -18916,7 +18916,7 @@ pub unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2f64.p0i8" @@ -18934,7 +18934,7 @@ pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2i64.p0i8" @@ -18954,7 +18954,7 @@ pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8" @@ -18981,7 +18981,7 @@ pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8" @@ -19008,7 +19008,7 @@ pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8" @@ -19981,7 +19981,7 @@ pub unsafe fn vuqaddh_s16(a: i16, b: u16) -> i16 { #[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.i64" @@ -19999,7 +19999,7 @@ pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 { #[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.i32" diff --git a/crates/core_arch/src/aarch64/neon/mod.rs b/crates/core_arch/src/aarch64/neon/mod.rs index ccb231e120..a238bae6a1 100644 --- a/crates/core_arch/src/aarch64/neon/mod.rs +++ b/crates/core_arch/src/aarch64/neon/mod.rs @@ -72,7 +72,7 @@ pub struct float64x2x4_t( ); #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { // absolute value #[link_name = "llvm.aarch64.neon.abs.i64"] fn vabsd_s64_(a: i64) -> i64; @@ -3423,7 +3423,7 @@ pub unsafe fn vsm3tt1aq_u32( ) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); #[allow(improper_ctypes)] - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt1a" @@ -3446,7 +3446,7 @@ pub unsafe fn vsm3tt1bq_u32( ) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); #[allow(improper_ctypes)] - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt1b" @@ -3469,7 +3469,7 @@ pub unsafe fn vsm3tt2aq_u32( ) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); #[allow(improper_ctypes)] - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt2a" @@ -3492,7 +3492,7 @@ pub unsafe fn vsm3tt2bq_u32( ) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); #[allow(improper_ctypes)] - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt2b" @@ -3511,7 +3511,7 @@ pub unsafe fn vsm3tt2bq_u32( pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(IMM6, 6); #[allow(improper_ctypes)] - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.xar" diff --git a/crates/core_arch/src/aarch64/prefetch.rs b/crates/core_arch/src/aarch64/prefetch.rs index 1fde2ac4df..4dcbc9549f 100644 --- a/crates/core_arch/src/aarch64/prefetch.rs +++ b/crates/core_arch/src/aarch64/prefetch.rs @@ -1,7 +1,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.prefetch"] fn prefetch(p: *const i8, rw: i32, loc: i32, ty: i32); } diff --git a/crates/core_arch/src/aarch64/tme.rs b/crates/core_arch/src/aarch64/tme.rs index 29cda807c1..207633c1f8 100644 --- a/crates/core_arch/src/aarch64/tme.rs +++ b/crates/core_arch/src/aarch64/tme.rs @@ -17,7 +17,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.aarch64.tstart"] fn aarch64_tstart() -> u64; #[link_name = "llvm.aarch64.tcommit"] diff --git a/crates/core_arch/src/arm/dsp.rs b/crates/core_arch/src/arm/dsp.rs index fa91b549f1..9c03b38884 100644 --- a/crates/core_arch/src/arm/dsp.rs +++ b/crates/core_arch/src/arm/dsp.rs @@ -23,7 +23,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.arm.smulbb"] fn arm_smulbb(a: i32, b: i32) -> i32; diff --git a/crates/core_arch/src/arm/neon.rs b/crates/core_arch/src/arm/neon.rs index ff67812365..ffeb2c6fe1 100644 --- a/crates/core_arch/src/arm/neon.rs +++ b/crates/core_arch/src/arm/neon.rs @@ -5,7 +5,7 @@ use crate::mem::{align_of, transmute}; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.arm.neon.vbsl.v8i8"] fn vbsl_s8_(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; #[link_name = "llvm.arm.neon.vbsl.v16i8"] diff --git a/crates/core_arch/src/arm/sat.rs b/crates/core_arch/src/arm/sat.rs index 38ec5f4a4c..bd38f59e64 100644 --- a/crates/core_arch/src/arm/sat.rs +++ b/crates/core_arch/src/arm/sat.rs @@ -27,7 +27,7 @@ pub unsafe fn __usat(x: i32) -> u32 { arm_usat(x, WIDTH as i32) } -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.arm.ssat"] fn arm_ssat(x: i32, y: i32) -> i32; diff --git a/crates/core_arch/src/arm/simd32.rs b/crates/core_arch/src/arm/simd32.rs index cb69b2de8b..0d7c2328e2 100644 --- a/crates/core_arch/src/arm/simd32.rs +++ b/crates/core_arch/src/arm/simd32.rs @@ -93,7 +93,7 @@ macro_rules! dsp_call { }; } -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.arm.qadd8"] fn arm_qadd8(a: i32, b: i32) -> i32; diff --git a/crates/core_arch/src/arm_shared/barrier/mod.rs b/crates/core_arch/src/arm_shared/barrier/mod.rs index 82e8e6e717..e198b63521 100644 --- a/crates/core_arch/src/arm_shared/barrier/mod.rs +++ b/crates/core_arch/src/arm_shared/barrier/mod.rs @@ -144,7 +144,7 @@ where arg.__isb() } -extern "unadjusted" { +unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.dmb" diff --git a/crates/core_arch/src/arm_shared/crc.rs b/crates/core_arch/src/arm_shared/crc.rs index 9d584443c6..1c10af05f6 100644 --- a/crates/core_arch/src/arm_shared/crc.rs +++ b/crates/core_arch/src/arm_shared/crc.rs @@ -1,4 +1,4 @@ -extern "unadjusted" { +unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crc32b" diff --git a/crates/core_arch/src/arm_shared/crypto.rs b/crates/core_arch/src/arm_shared/crypto.rs index 2f2150b4be..2dcd6fdaf0 100644 --- a/crates/core_arch/src/arm_shared/crypto.rs +++ b/crates/core_arch/src/arm_shared/crypto.rs @@ -1,7 +1,7 @@ use crate::core_arch::arm_shared::{uint32x4_t, uint8x16_t}; #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.aese" diff --git a/crates/core_arch/src/arm_shared/hints.rs b/crates/core_arch/src/arm_shared/hints.rs index e7524d6458..54fd78270a 100644 --- a/crates/core_arch/src/arm_shared/hints.rs +++ b/crates/core_arch/src/arm_shared/hints.rs @@ -107,7 +107,7 @@ pub unsafe fn __nop() { crate::arch::asm!("nop", options(nomem, nostack, preserves_flags)); } -extern "unadjusted" { +unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.hint" diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index aaddbff95c..48c162de4b 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -180,7 +180,7 @@ pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -211,7 +211,7 @@ pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -242,7 +242,7 @@ pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v8i8" @@ -273,7 +273,7 @@ pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v16i8" @@ -304,7 +304,7 @@ pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v4i16" @@ -335,7 +335,7 @@ pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v8i16" @@ -366,7 +366,7 @@ pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v2i32" @@ -397,7 +397,7 @@ pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v4i32" @@ -428,7 +428,7 @@ pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v8i8" @@ -459,7 +459,7 @@ pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v16i8" @@ -490,7 +490,7 @@ pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v4i16" @@ -521,7 +521,7 @@ pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v8i16" @@ -552,7 +552,7 @@ pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v2i32" @@ -583,7 +583,7 @@ pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v4i32" @@ -1330,7 +1330,7 @@ pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -1361,7 +1361,7 @@ pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -1392,7 +1392,7 @@ pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -1423,7 +1423,7 @@ pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -2880,7 +2880,7 @@ pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -2911,7 +2911,7 @@ pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -2942,7 +2942,7 @@ pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -2973,7 +2973,7 @@ pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -3004,7 +3004,7 @@ pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -3035,7 +3035,7 @@ pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4161,7 +4161,7 @@ pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32" @@ -4182,7 +4182,7 @@ pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32" @@ -4203,7 +4203,7 @@ pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32" @@ -4224,7 +4224,7 @@ pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32" @@ -4245,7 +4245,7 @@ pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32" @@ -4266,7 +4266,7 @@ pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32" @@ -4287,7 +4287,7 @@ pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32" @@ -4308,7 +4308,7 @@ pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32" @@ -4329,7 +4329,7 @@ pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32" @@ -4350,7 +4350,7 @@ pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32" @@ -4371,7 +4371,7 @@ pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32" @@ -4392,7 +4392,7 @@ pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32" @@ -4413,7 +4413,7 @@ pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32" @@ -4434,7 +4434,7 @@ pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32" @@ -4455,7 +4455,7 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32" @@ -4476,7 +4476,7 @@ pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32" @@ -4506,7 +4506,7 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4537,7 +4537,7 @@ pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4568,7 +4568,7 @@ pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4599,7 +4599,7 @@ pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4750,7 +4750,7 @@ pub unsafe fn vdotq_lane_u32( unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4781,7 +4781,7 @@ pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4812,7 +4812,7 @@ pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4843,7 +4843,7 @@ pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -7282,7 +7282,7 @@ pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")] #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")] fn _vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; @@ -7310,7 +7310,7 @@ pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")] #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")] fn _vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; @@ -7478,7 +7478,7 @@ pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v8i8" @@ -7509,7 +7509,7 @@ pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v16i8" @@ -7540,7 +7540,7 @@ pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v4i16" @@ -7571,7 +7571,7 @@ pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v8i16" @@ -7602,7 +7602,7 @@ pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v2i32" @@ -7633,7 +7633,7 @@ pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v4i32" @@ -7664,7 +7664,7 @@ pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v8i8" @@ -7695,7 +7695,7 @@ pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v16i8" @@ -7726,7 +7726,7 @@ pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v4i16" @@ -7757,7 +7757,7 @@ pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v8i16" @@ -7788,7 +7788,7 @@ pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v2i32" @@ -7819,7 +7819,7 @@ pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v4i32" @@ -7850,7 +7850,7 @@ pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v4i16" @@ -7881,7 +7881,7 @@ pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v8i16" @@ -7912,7 +7912,7 @@ pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v2i32" @@ -7943,7 +7943,7 @@ pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v4i32" @@ -7974,7 +7974,7 @@ pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v8i8" @@ -8005,7 +8005,7 @@ pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v16i8" @@ -8036,7 +8036,7 @@ pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v8i8" @@ -8067,7 +8067,7 @@ pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v16i8" @@ -8098,7 +8098,7 @@ pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v4i16" @@ -8129,7 +8129,7 @@ pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v8i16" @@ -8160,7 +8160,7 @@ pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v2i32" @@ -8191,7 +8191,7 @@ pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v4i32" @@ -8222,7 +8222,7 @@ pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0f32" @@ -8253,7 +8253,7 @@ pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0f32" @@ -8284,7 +8284,7 @@ pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0f32" @@ -8315,7 +8315,7 @@ pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0f32" @@ -8346,7 +8346,7 @@ pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0f32" @@ -8377,7 +8377,7 @@ pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0f32" @@ -8546,7 +8546,7 @@ pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0i8" @@ -8577,7 +8577,7 @@ pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0i8" @@ -8608,7 +8608,7 @@ pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0i8" @@ -8639,7 +8639,7 @@ pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0i8" @@ -8670,7 +8670,7 @@ pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0i8" @@ -8701,7 +8701,7 @@ pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0i8" @@ -8732,7 +8732,7 @@ pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0i16" @@ -8763,7 +8763,7 @@ pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0i16" @@ -8794,7 +8794,7 @@ pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0i16" @@ -8825,7 +8825,7 @@ pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0i16" @@ -8856,7 +8856,7 @@ pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0i16" @@ -8887,7 +8887,7 @@ pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0i16" @@ -8918,7 +8918,7 @@ pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0i32" @@ -8949,7 +8949,7 @@ pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0i32" @@ -8980,7 +8980,7 @@ pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0i32" @@ -9011,7 +9011,7 @@ pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0i32" @@ -9042,7 +9042,7 @@ pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0i32" @@ -9073,7 +9073,7 @@ pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0i32" @@ -9104,7 +9104,7 @@ pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v1i64.p0i64" @@ -9135,7 +9135,7 @@ pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v1i64.p0i64" @@ -9166,7 +9166,7 @@ pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v1i64.p0i64" @@ -9197,7 +9197,7 @@ pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0i64" @@ -9228,7 +9228,7 @@ pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0i64" @@ -9259,7 +9259,7 @@ pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0i64" @@ -10107,7 +10107,7 @@ pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0i8")] fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } @@ -10123,7 +10123,7 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0i8")] fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } @@ -10139,7 +10139,7 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0i8")] fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } @@ -10155,7 +10155,7 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0i8")] fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } @@ -10171,7 +10171,7 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0i8")] fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } @@ -10187,7 +10187,7 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0i8")] fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; } @@ -10203,7 +10203,7 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0i8")] fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; } @@ -10219,7 +10219,7 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0i8")] fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; } @@ -10235,7 +10235,7 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2f32.p0f32" @@ -10254,7 +10254,7 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v4f32.p0f32" @@ -10273,7 +10273,7 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v8i8.p0i8" @@ -10292,7 +10292,7 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v16i8.p0i8" @@ -10311,7 +10311,7 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v4i16.p0i16" @@ -10330,7 +10330,7 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v8i16.p0i16" @@ -10349,7 +10349,7 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2i32.p0i32" @@ -10368,7 +10368,7 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v4i32.p0i32" @@ -10410,7 +10410,7 @@ pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v1i64.p0i8")] fn _vld2_dup_s64(ptr: *const i8, size: i32) -> int64x1x2_t; } @@ -10426,7 +10426,7 @@ pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v1i64.p0i64" @@ -10698,7 +10698,7 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32.p0i8")] fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } @@ -10714,7 +10714,7 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32.p0i8")] fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } @@ -10730,7 +10730,7 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8.p0i8")] fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } @@ -10746,7 +10746,7 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8.p0i8")] fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } @@ -10762,7 +10762,7 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16.p0i8")] fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } @@ -10778,7 +10778,7 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16.p0i8")] fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; } @@ -10794,7 +10794,7 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32.p0i8")] fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; } @@ -10810,7 +10810,7 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32.p0i8")] fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; } @@ -10826,7 +10826,7 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2f32.p0v2f32" @@ -10845,7 +10845,7 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v4f32.p0v4f32" @@ -10864,7 +10864,7 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v8i8.p0v8i8" @@ -10883,7 +10883,7 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v16i8.p0v16i8" @@ -10902,7 +10902,7 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v4i16.p0v4i16" @@ -10921,7 +10921,7 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v8i16.p0v8i16" @@ -10940,7 +10940,7 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2i32.p0v2i32" @@ -10959,7 +10959,7 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v4i32.p0v4i32" @@ -10980,7 +10980,7 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0i8" @@ -11001,7 +11001,7 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0i8" @@ -11023,7 +11023,7 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0i8" @@ -11044,7 +11044,7 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0i8" @@ -11065,7 +11065,7 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0i8" @@ -11086,7 +11086,7 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0i8" @@ -11107,7 +11107,7 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0i8" @@ -11128,7 +11128,7 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0i8")] fn _vld2_lane_f32( ptr: *const i8, @@ -11152,7 +11152,7 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0i8")] fn _vld2q_lane_f32( ptr: *const i8, @@ -11176,7 +11176,7 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0i8")] fn _vld2q_lane_s16( ptr: *const i8, @@ -11200,7 +11200,7 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0i8")] fn _vld2q_lane_s32( ptr: *const i8, @@ -11224,7 +11224,7 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0i8")] fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) -> int8x8x2_t; @@ -11243,7 +11243,7 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0i8")] fn _vld2_lane_s16( ptr: *const i8, @@ -11267,7 +11267,7 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0i8")] fn _vld2_lane_s32( ptr: *const i8, @@ -11512,7 +11512,7 @@ pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v1i64.p0i8")] fn _vld2_s64(ptr: *const i8, size: i32) -> int64x1x2_t; } @@ -11528,7 +11528,7 @@ pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v1i64.p0v1i64" @@ -11800,7 +11800,7 @@ pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2f32.p0f32" @@ -11819,7 +11819,7 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v4f32.p0f32" @@ -11838,7 +11838,7 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v8i8.p0i8" @@ -11857,7 +11857,7 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v16i8.p0i8" @@ -11876,7 +11876,7 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v4i16.p0i16" @@ -11895,7 +11895,7 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v8i16.p0i16" @@ -11914,7 +11914,7 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2i32.p0i32" @@ -11933,7 +11933,7 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v4i32.p0i32" @@ -11952,7 +11952,7 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v1i64.p0i64" @@ -11971,7 +11971,7 @@ pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0i8")] fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; } @@ -11987,7 +11987,7 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0i8")] fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; } @@ -12003,7 +12003,7 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0i8")] fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; } @@ -12019,7 +12019,7 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0i8")] fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; } @@ -12035,7 +12035,7 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0i8")] fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; } @@ -12051,7 +12051,7 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0i8")] fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; } @@ -12067,7 +12067,7 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0i8")] fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; } @@ -12083,7 +12083,7 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0i8")] fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; } @@ -12122,7 +12122,7 @@ pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v1i64.p0i8")] fn _vld3_dup_s64(ptr: *const i8, size: i32) -> int64x1x3_t; } @@ -12391,7 +12391,7 @@ pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2f32.p0v2f32" @@ -12410,7 +12410,7 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v4f32.p0v4f32" @@ -12429,7 +12429,7 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v8i8.p0v8i8" @@ -12448,7 +12448,7 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v16i8.p0v16i8" @@ -12467,7 +12467,7 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v4i16.p0v4i16" @@ -12486,7 +12486,7 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v8i16.p0v8i16" @@ -12505,7 +12505,7 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2i32.p0v2i32" @@ -12524,7 +12524,7 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v4i32.p0v4i32" @@ -12543,7 +12543,7 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0i8")] fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; } @@ -12559,7 +12559,7 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0i8")] fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; } @@ -12575,7 +12575,7 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0i8")] fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; } @@ -12591,7 +12591,7 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0i8")] fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; } @@ -12607,7 +12607,7 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0i8")] fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; } @@ -12623,7 +12623,7 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0i8")] fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; } @@ -12639,7 +12639,7 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0i8")] fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; } @@ -12655,7 +12655,7 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0i8")] fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; } @@ -12673,7 +12673,7 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0i8" @@ -12700,7 +12700,7 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0i8" @@ -12727,7 +12727,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0i8")] fn _vld3_lane_f32( ptr: *const i8, @@ -12752,7 +12752,7 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0i8" @@ -12779,7 +12779,7 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0i8" @@ -12806,7 +12806,7 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0i8" @@ -12833,7 +12833,7 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0i8" @@ -12860,7 +12860,7 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0i8" @@ -12887,7 +12887,7 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0i8")] fn _vld3_lane_s8( ptr: *const i8, @@ -12912,7 +12912,7 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0i8")] fn _vld3_lane_s16( ptr: *const i8, @@ -12937,7 +12937,7 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0i8")] fn _vld3q_lane_s16( ptr: *const i8, @@ -12962,7 +12962,7 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0i8")] fn _vld3_lane_s32( ptr: *const i8, @@ -12987,7 +12987,7 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0i8")] fn _vld3q_lane_s32( ptr: *const i8, @@ -13233,7 +13233,7 @@ pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v1i64.p0v1i64" @@ -13252,7 +13252,7 @@ pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0i8")] fn _vld3_s64(ptr: *const i8, size: i32) -> int64x1x3_t; } @@ -13523,7 +13523,7 @@ pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0i8")] fn _vld3q_lane_f32( ptr: *const i8, @@ -13546,7 +13546,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0i8")] fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; } @@ -13562,7 +13562,7 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0i8")] fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; } @@ -13578,7 +13578,7 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0i8")] fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; } @@ -13594,7 +13594,7 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0i8")] fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; } @@ -13610,7 +13610,7 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0i8")] fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; } @@ -13626,7 +13626,7 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0i8")] fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; } @@ -13642,7 +13642,7 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0i8")] fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; } @@ -13658,7 +13658,7 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0i8")] fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; } @@ -13674,7 +13674,7 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32" @@ -13693,7 +13693,7 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32" @@ -13712,7 +13712,7 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8" @@ -13731,7 +13731,7 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8" @@ -13750,7 +13750,7 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16" @@ -13769,7 +13769,7 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16" @@ -13788,7 +13788,7 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32" @@ -13807,7 +13807,7 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32" @@ -13826,7 +13826,7 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v1i64.p0i64" @@ -13868,7 +13868,7 @@ pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { #[cfg_attr(test, assert_instr(nop))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0i8")] fn _vld4_dup_s64(ptr: *const i8, size: i32) -> int64x1x4_t; } @@ -14137,7 +14137,7 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2f32.p0v2f32" @@ -14156,7 +14156,7 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v4f32.p0v4f32" @@ -14175,7 +14175,7 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v8i8.p0v8i8" @@ -14194,7 +14194,7 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v16i8.p0v16i8" @@ -14213,7 +14213,7 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v4i16.p0v4i16" @@ -14232,7 +14232,7 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v8i16.p0v8i16" @@ -14251,7 +14251,7 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2i32.p0v2i32" @@ -14270,7 +14270,7 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v4i32.p0v4i32" @@ -14289,7 +14289,7 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0i8")] fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; } @@ -14305,7 +14305,7 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0i8")] fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; } @@ -14321,7 +14321,7 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0i8")] fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; } @@ -14337,7 +14337,7 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0i8")] fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; } @@ -14353,7 +14353,7 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0i8")] fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; } @@ -14369,7 +14369,7 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0i8")] fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; } @@ -14385,7 +14385,7 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0i8")] fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; } @@ -14401,7 +14401,7 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0i8")] fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; } @@ -14419,7 +14419,7 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0i8" @@ -14447,7 +14447,7 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0i8" @@ -14475,7 +14475,7 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0i8" @@ -14503,7 +14503,7 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0i8" @@ -14531,7 +14531,7 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0i8" @@ -14559,7 +14559,7 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0i8" @@ -14587,7 +14587,7 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0i8" @@ -14615,7 +14615,7 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0i8")] fn _vld4_lane_f32( ptr: *const i8, @@ -14641,7 +14641,7 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0i8")] fn _vld4q_lane_f32( ptr: *const i8, @@ -14667,7 +14667,7 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0i8")] fn _vld4_lane_s8( ptr: *const i8, @@ -14693,7 +14693,7 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0i8")] fn _vld4_lane_s16( ptr: *const i8, @@ -14719,7 +14719,7 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0i8")] fn _vld4q_lane_s16( ptr: *const i8, @@ -14745,7 +14745,7 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0i8")] fn _vld4_lane_s32( ptr: *const i8, @@ -14771,7 +14771,7 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0i8")] fn _vld4q_lane_s32( ptr: *const i8, @@ -15018,7 +15018,7 @@ pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v1i64.p0v1i64" @@ -15037,7 +15037,7 @@ pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0i8")] fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; } @@ -15317,7 +15317,7 @@ pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15348,7 +15348,7 @@ pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15379,7 +15379,7 @@ pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15410,7 +15410,7 @@ pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15441,7 +15441,7 @@ pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15472,7 +15472,7 @@ pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15503,7 +15503,7 @@ pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15534,7 +15534,7 @@ pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15565,7 +15565,7 @@ pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15596,7 +15596,7 @@ pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15627,7 +15627,7 @@ pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15658,7 +15658,7 @@ pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15689,7 +15689,7 @@ pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15720,7 +15720,7 @@ pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15751,7 +15751,7 @@ pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15782,7 +15782,7 @@ pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15813,7 +15813,7 @@ pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15844,7 +15844,7 @@ pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15875,7 +15875,7 @@ pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15906,7 +15906,7 @@ pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15937,7 +15937,7 @@ pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15968,7 +15968,7 @@ pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -15999,7 +15999,7 @@ pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -16030,7 +16030,7 @@ pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -16061,7 +16061,7 @@ pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -16092,7 +16092,7 @@ pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -16123,7 +16123,7 @@ pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -16154,7 +16154,7 @@ pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -16185,7 +16185,7 @@ pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -16216,7 +16216,7 @@ pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -16247,7 +16247,7 @@ pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -16278,7 +16278,7 @@ pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -20615,7 +20615,7 @@ pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -20646,7 +20646,7 @@ pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -21257,7 +21257,7 @@ pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.pmull.v8i8" @@ -21288,7 +21288,7 @@ pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smull.v4i16" @@ -21319,7 +21319,7 @@ pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smull.v2i32" @@ -21350,7 +21350,7 @@ pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smull.v8i8" @@ -21381,7 +21381,7 @@ pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umull.v8i8" @@ -21412,7 +21412,7 @@ pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umull.v4i16" @@ -21443,7 +21443,7 @@ pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umull.v2i32" @@ -22026,7 +22026,7 @@ pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -22057,7 +22057,7 @@ pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v8i8" @@ -22088,7 +22088,7 @@ pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v16i8" @@ -22119,7 +22119,7 @@ pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v4i16" @@ -22150,7 +22150,7 @@ pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v8i16" @@ -22181,7 +22181,7 @@ pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v2i32" @@ -22212,7 +22212,7 @@ pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v4i32" @@ -22243,7 +22243,7 @@ pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v8i8" @@ -22274,7 +22274,7 @@ pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v16i8" @@ -22305,7 +22305,7 @@ pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v4i16" @@ -22336,7 +22336,7 @@ pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v8i16" @@ -22367,7 +22367,7 @@ pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v2i32" @@ -22398,7 +22398,7 @@ pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v4i32" @@ -22429,7 +22429,7 @@ pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v1i64" @@ -22460,7 +22460,7 @@ pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v2i64" @@ -22491,7 +22491,7 @@ pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v8i8" @@ -22522,7 +22522,7 @@ pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v16i8" @@ -22553,7 +22553,7 @@ pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v4i16" @@ -22584,7 +22584,7 @@ pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v8i16" @@ -22615,7 +22615,7 @@ pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v2i32" @@ -22646,7 +22646,7 @@ pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v4i32" @@ -22677,7 +22677,7 @@ pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v1i64" @@ -22708,7 +22708,7 @@ pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v2i64" @@ -23235,7 +23235,7 @@ pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23266,7 +23266,7 @@ pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23297,7 +23297,7 @@ pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23328,7 +23328,7 @@ pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23457,7 +23457,7 @@ pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23488,7 +23488,7 @@ pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23519,7 +23519,7 @@ pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23550,7 +23550,7 @@ pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23581,7 +23581,7 @@ pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23612,7 +23612,7 @@ pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23643,7 +23643,7 @@ pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23674,7 +23674,7 @@ pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23705,7 +23705,7 @@ pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23736,7 +23736,7 @@ pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23767,7 +23767,7 @@ pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -23798,7 +23798,7 @@ pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v8i8" @@ -23829,7 +23829,7 @@ pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v16i8" @@ -23860,7 +23860,7 @@ pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v4i16" @@ -23891,7 +23891,7 @@ pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v8i16" @@ -23922,7 +23922,7 @@ pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v2i32" @@ -23953,7 +23953,7 @@ pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v4i32" @@ -24310,7 +24310,7 @@ pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24341,7 +24341,7 @@ pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24372,7 +24372,7 @@ pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24403,7 +24403,7 @@ pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24434,7 +24434,7 @@ pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24465,7 +24465,7 @@ pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24496,7 +24496,7 @@ pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24527,7 +24527,7 @@ pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24558,7 +24558,7 @@ pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24589,7 +24589,7 @@ pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24620,7 +24620,7 @@ pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24651,7 +24651,7 @@ pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24682,7 +24682,7 @@ pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24713,7 +24713,7 @@ pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24744,7 +24744,7 @@ pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24775,7 +24775,7 @@ pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24806,7 +24806,7 @@ pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24837,7 +24837,7 @@ pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24868,7 +24868,7 @@ pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24899,7 +24899,7 @@ pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -24921,7 +24921,7 @@ pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -24947,7 +24947,7 @@ pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -24968,7 +24968,7 @@ pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -24986,7 +24986,7 @@ pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrn.v8i8" @@ -25007,7 +25007,7 @@ pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrn.v4i16" @@ -25028,7 +25028,7 @@ pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrn.v2i32" @@ -25049,7 +25049,7 @@ pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] fn _vqrshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -25077,7 +25077,7 @@ pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] fn _vqrshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -25099,7 +25099,7 @@ pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] fn _vqrshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -25121,7 +25121,7 @@ pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshrn.v8i8" @@ -25142,7 +25142,7 @@ pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshrn.v4i16" @@ -25163,7 +25163,7 @@ pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshrn.v2i32" @@ -25184,7 +25184,7 @@ pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -25211,7 +25211,7 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -25233,7 +25233,7 @@ pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -25251,7 +25251,7 @@ pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrun.v8i8" @@ -25272,7 +25272,7 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrun.v4i16" @@ -25293,7 +25293,7 @@ pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrun.v2i32" @@ -25723,7 +25723,7 @@ pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -25754,7 +25754,7 @@ pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -25785,7 +25785,7 @@ pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -25816,7 +25816,7 @@ pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -25847,7 +25847,7 @@ pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -25878,7 +25878,7 @@ pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -25909,7 +25909,7 @@ pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -25940,7 +25940,7 @@ pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -25971,7 +25971,7 @@ pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -26002,7 +26002,7 @@ pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -26033,7 +26033,7 @@ pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -26064,7 +26064,7 @@ pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -26095,7 +26095,7 @@ pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -26126,7 +26126,7 @@ pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -26157,7 +26157,7 @@ pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -26188,7 +26188,7 @@ pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -26210,7 +26210,7 @@ pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; } @@ -26236,7 +26236,7 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; } @@ -26263,7 +26263,7 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; } @@ -26285,7 +26285,7 @@ pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; } @@ -26311,7 +26311,7 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 5); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; } @@ -26329,7 +26329,7 @@ pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 5); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; } @@ -26351,7 +26351,7 @@ pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { static_assert_uimm_bits!(N, 6); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")] fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> int64x1_t; } @@ -26369,7 +26369,7 @@ pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 6); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; } @@ -26387,7 +26387,7 @@ pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i8" @@ -26416,7 +26416,7 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v16i8" @@ -26446,7 +26446,7 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i16" @@ -26471,7 +26471,7 @@ pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i16" @@ -26500,7 +26500,7 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 5); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i32" @@ -26521,7 +26521,7 @@ pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 5); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i32" @@ -26546,7 +26546,7 @@ pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { static_assert_uimm_bits!(N, 6); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v1i64" @@ -26567,7 +26567,7 @@ pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 6); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i64" @@ -26588,7 +26588,7 @@ pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -26614,7 +26614,7 @@ pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -26635,7 +26635,7 @@ pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -26653,7 +26653,7 @@ pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrn.v8i8" @@ -26674,7 +26674,7 @@ pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrn.v4i16" @@ -26695,7 +26695,7 @@ pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrn.v2i32" @@ -26716,7 +26716,7 @@ pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] fn _vqshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -26744,7 +26744,7 @@ pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] fn _vqshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -26766,7 +26766,7 @@ pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] fn _vqshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -26788,7 +26788,7 @@ pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.v8i8" @@ -26809,7 +26809,7 @@ pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.v4i16" @@ -26830,7 +26830,7 @@ pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.v2i32" @@ -26851,7 +26851,7 @@ pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -26878,7 +26878,7 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -26900,7 +26900,7 @@ pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -26918,7 +26918,7 @@ pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrun.v8i8" @@ -26939,7 +26939,7 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrun.v4i16" @@ -26960,7 +26960,7 @@ pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrun.v2i32" @@ -26990,7 +26990,7 @@ pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v8i8" @@ -27021,7 +27021,7 @@ pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v16i8" @@ -27052,7 +27052,7 @@ pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v4i16" @@ -27083,7 +27083,7 @@ pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v8i16" @@ -27114,7 +27114,7 @@ pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v2i32" @@ -27145,7 +27145,7 @@ pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v4i32" @@ -27176,7 +27176,7 @@ pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v1i64" @@ -27207,7 +27207,7 @@ pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v2i64" @@ -27238,7 +27238,7 @@ pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v8i8" @@ -27269,7 +27269,7 @@ pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v16i8" @@ -27300,7 +27300,7 @@ pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v4i16" @@ -27331,7 +27331,7 @@ pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v8i16" @@ -27362,7 +27362,7 @@ pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v2i32" @@ -27393,7 +27393,7 @@ pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v4i32" @@ -27424,7 +27424,7 @@ pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v1i64" @@ -27455,7 +27455,7 @@ pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v2i64" @@ -27486,7 +27486,7 @@ pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -27517,7 +27517,7 @@ pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -27548,7 +27548,7 @@ pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -27579,7 +27579,7 @@ pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -27610,7 +27610,7 @@ pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -27641,7 +27641,7 @@ pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34020,7 +34020,7 @@ pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v8i8" @@ -34051,7 +34051,7 @@ pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v16i8" @@ -34082,7 +34082,7 @@ pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v4i16" @@ -34113,7 +34113,7 @@ pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v8i16" @@ -34144,7 +34144,7 @@ pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v2i32" @@ -34175,7 +34175,7 @@ pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v4i32" @@ -34206,7 +34206,7 @@ pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v8i8" @@ -34237,7 +34237,7 @@ pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v16i8" @@ -34268,7 +34268,7 @@ pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v4i16" @@ -34299,7 +34299,7 @@ pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v8i16" @@ -34330,7 +34330,7 @@ pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v2i32" @@ -34361,7 +34361,7 @@ pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v4i32" @@ -34392,7 +34392,7 @@ pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v2f32" @@ -34423,7 +34423,7 @@ pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v4f32" @@ -34454,7 +34454,7 @@ pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34485,7 +34485,7 @@ pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34516,7 +34516,7 @@ pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34547,7 +34547,7 @@ pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34578,7 +34578,7 @@ pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34609,7 +34609,7 @@ pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34640,7 +34640,7 @@ pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34671,7 +34671,7 @@ pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34702,7 +34702,7 @@ pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34733,7 +34733,7 @@ pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34764,7 +34764,7 @@ pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34795,7 +34795,7 @@ pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34826,7 +34826,7 @@ pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34857,7 +34857,7 @@ pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34888,7 +34888,7 @@ pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -34919,7 +34919,7 @@ pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -35341,7 +35341,7 @@ pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -35367,7 +35367,7 @@ pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -35388,7 +35388,7 @@ pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -35406,7 +35406,7 @@ pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rshrn.v8i8" @@ -35427,7 +35427,7 @@ pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rshrn.v4i16" @@ -35448,7 +35448,7 @@ pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rshrn.v2i32" @@ -35553,7 +35553,7 @@ pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -35584,7 +35584,7 @@ pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -35615,7 +35615,7 @@ pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -35646,7 +35646,7 @@ pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -35677,7 +35677,7 @@ pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -35708,7 +35708,7 @@ pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -36139,7 +36139,7 @@ pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -36170,7 +36170,7 @@ pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -36201,7 +36201,7 @@ pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37301,7 +37301,7 @@ pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37332,7 +37332,7 @@ pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37363,7 +37363,7 @@ pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37394,7 +37394,7 @@ pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37425,7 +37425,7 @@ pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37456,7 +37456,7 @@ pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37487,7 +37487,7 @@ pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37518,7 +37518,7 @@ pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37549,7 +37549,7 @@ pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37580,7 +37580,7 @@ pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37611,7 +37611,7 @@ pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37642,7 +37642,7 @@ pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37673,7 +37673,7 @@ pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37704,7 +37704,7 @@ pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37735,7 +37735,7 @@ pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -37766,7 +37766,7 @@ pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38934,7 +38934,7 @@ pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v2f32")] fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); } @@ -38950,7 +38950,7 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v4f32")] fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); } @@ -38966,7 +38966,7 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2f32.p0f32" @@ -38985,7 +38985,7 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v4f32.p0f32" @@ -39004,7 +39004,7 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v2f32")] fn _vst1_f32_x3(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t); } @@ -39020,7 +39020,7 @@ pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { #[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v4f32")] fn _vst1q_f32_x3(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t); } @@ -39036,7 +39036,7 @@ pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2f32.p0f32" @@ -39055,7 +39055,7 @@ pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v4f32.p0f32" @@ -39074,7 +39074,7 @@ pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32")] fn _vst1_f32_x4( ptr: *mut f32, @@ -39096,7 +39096,7 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32")] fn _vst1q_f32_x4( ptr: *mut f32, @@ -39118,7 +39118,7 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2f32.p0f32" @@ -39143,7 +39143,7 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v4f32.p0f32" @@ -39881,7 +39881,7 @@ pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v8i8.p0i8" @@ -39900,7 +39900,7 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v16i8.p0i8" @@ -39919,7 +39919,7 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v4i16.p0i16" @@ -39938,7 +39938,7 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v8i16.p0i16" @@ -39957,7 +39957,7 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2i32.p0i32" @@ -39976,7 +39976,7 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v4i32.p0i32" @@ -39995,7 +39995,7 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v1i64.p0i64" @@ -40014,7 +40014,7 @@ pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2i64.p0i64" @@ -40033,7 +40033,7 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v8i8")] fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); } @@ -40049,7 +40049,7 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v16i8")] fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); } @@ -40065,7 +40065,7 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v4i16")] fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); } @@ -40081,7 +40081,7 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v8i16")] fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); } @@ -40097,7 +40097,7 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v2i32")] fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); } @@ -40113,7 +40113,7 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v4i32")] fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); } @@ -40129,7 +40129,7 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i64.v1i64")] fn _vst1_s64_x2(ptr: *mut i64, a: int64x1_t, b: int64x1_t); } @@ -40145,7 +40145,7 @@ pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i64.v2i64")] fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); } @@ -40161,7 +40161,7 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v8i8.p0i8" @@ -40180,7 +40180,7 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v16i8.p0i8" @@ -40199,7 +40199,7 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v4i16.p0i16" @@ -40218,7 +40218,7 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v8i16.p0i16" @@ -40237,7 +40237,7 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2i32.p0i32" @@ -40256,7 +40256,7 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v4i32.p0i32" @@ -40275,7 +40275,7 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v1i64.p0i64" @@ -40294,7 +40294,7 @@ pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2i64.p0i64" @@ -40313,7 +40313,7 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8")] fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); } @@ -40329,7 +40329,7 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8")] fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); } @@ -40345,7 +40345,7 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16")] fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); } @@ -40361,7 +40361,7 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16")] fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); } @@ -40377,7 +40377,7 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32")] fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); } @@ -40393,7 +40393,7 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32")] fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); } @@ -40409,7 +40409,7 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v1i64")] fn _vst1_s64_x3(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t); } @@ -40425,7 +40425,7 @@ pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64")] fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); } @@ -40441,7 +40441,7 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v8i8.p0i8" @@ -40460,7 +40460,7 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v16i8.p0i8" @@ -40479,7 +40479,7 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v4i16.p0i16" @@ -40498,7 +40498,7 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v8i16.p0i16" @@ -40517,7 +40517,7 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2i32.p0i32" @@ -40536,7 +40536,7 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v4i32.p0i32" @@ -40555,7 +40555,7 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v1i64.p0i64" @@ -40574,7 +40574,7 @@ pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2i64.p0i64" @@ -40593,7 +40593,7 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8")] fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); } @@ -40609,7 +40609,7 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8")] fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); } @@ -40625,7 +40625,7 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16")] fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); } @@ -40641,7 +40641,7 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16")] fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); } @@ -40657,7 +40657,7 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32")] fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); } @@ -40673,7 +40673,7 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32")] fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); } @@ -40689,7 +40689,7 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v1i64")] fn _vst1_s64_x4(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t); } @@ -40705,7 +40705,7 @@ pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64")] fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); } @@ -41574,7 +41574,7 @@ pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2f32.p0i8" @@ -41593,7 +41593,7 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v4f32.p0i8" @@ -41612,7 +41612,7 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v8i8.p0i8" @@ -41631,7 +41631,7 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v16i8.p0i8" @@ -41650,7 +41650,7 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v4i16.p0i8" @@ -41669,7 +41669,7 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v8i16.p0i8" @@ -41688,7 +41688,7 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2i32.p0i8" @@ -41707,7 +41707,7 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v4i32.p0i8" @@ -41726,7 +41726,7 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2f32")] fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); } @@ -41742,7 +41742,7 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4f32")] fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); } @@ -41758,7 +41758,7 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i8")] fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); } @@ -41774,7 +41774,7 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v16i8")] fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); } @@ -41790,7 +41790,7 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i16")] fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); } @@ -41806,7 +41806,7 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i16")] fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); } @@ -41822,7 +41822,7 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2i32")] fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); } @@ -41838,7 +41838,7 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i32")] fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); } @@ -41856,7 +41856,7 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2f32.p0i8" @@ -41877,7 +41877,7 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v4f32.p0i8" @@ -41898,7 +41898,7 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v8i8.p0i8" @@ -41919,7 +41919,7 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v4i16.p0i8" @@ -41940,7 +41940,7 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v8i16.p0i8" @@ -41961,7 +41961,7 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2i32.p0i8" @@ -41982,7 +41982,7 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v4i32.p0i8" @@ -42003,7 +42003,7 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2f32")] fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); } @@ -42021,7 +42021,7 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4f32")] fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); } @@ -42039,7 +42039,7 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i8")] fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); } @@ -42057,7 +42057,7 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i16")] fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); } @@ -42075,7 +42075,7 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i16")] fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); } @@ -42093,7 +42093,7 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2i32")] fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); } @@ -42111,7 +42111,7 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i32")] fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); } @@ -42350,7 +42350,7 @@ pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v1i64")] fn _vst2_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, size: i32); } @@ -42366,7 +42366,7 @@ pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v1i64.p0i8" @@ -42638,7 +42638,7 @@ pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2f32")] fn _vst3_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, size: i32); } @@ -42654,7 +42654,7 @@ pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4f32")] fn _vst3q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, size: i32); } @@ -42670,7 +42670,7 @@ pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i8")] fn _vst3_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, size: i32); } @@ -42686,7 +42686,7 @@ pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v16i8")] fn _vst3q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, size: i32); } @@ -42702,7 +42702,7 @@ pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i16")] fn _vst3_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, size: i32); } @@ -42718,7 +42718,7 @@ pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i16")] fn _vst3q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, size: i32); } @@ -42734,7 +42734,7 @@ pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2i32")] fn _vst3_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, size: i32); } @@ -42750,7 +42750,7 @@ pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i32")] fn _vst3q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, size: i32); } @@ -42766,7 +42766,7 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2f32.p0i8" @@ -42785,7 +42785,7 @@ pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v4f32.p0i8" @@ -42804,7 +42804,7 @@ pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v8i8.p0i8" @@ -42823,7 +42823,7 @@ pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v16i8.p0i8" @@ -42842,7 +42842,7 @@ pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v4i16.p0i8" @@ -42861,7 +42861,7 @@ pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v8i16.p0i8" @@ -42880,7 +42880,7 @@ pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2i32.p0i8" @@ -42899,7 +42899,7 @@ pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v4i32.p0i8" @@ -42920,7 +42920,7 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2f32")] fn _vst3_lane_f32( ptr: *mut i8, @@ -42945,7 +42945,7 @@ pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4f32")] fn _vst3q_lane_f32( ptr: *mut i8, @@ -42970,7 +42970,7 @@ pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i8")] fn _vst3_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32); } @@ -42988,7 +42988,7 @@ pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i16")] fn _vst3_lane_s16( ptr: *mut i8, @@ -43013,7 +43013,7 @@ pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i16")] fn _vst3q_lane_s16( ptr: *mut i8, @@ -43038,7 +43038,7 @@ pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2i32")] fn _vst3_lane_s32( ptr: *mut i8, @@ -43063,7 +43063,7 @@ pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i32")] fn _vst3q_lane_s32( ptr: *mut i8, @@ -43088,7 +43088,7 @@ pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2f32.p0i8" @@ -43109,7 +43109,7 @@ pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v4f32.p0i8" @@ -43130,7 +43130,7 @@ pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v8i8.p0i8" @@ -43151,7 +43151,7 @@ pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v4i16.p0i8" @@ -43172,7 +43172,7 @@ pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v8i16.p0i8" @@ -43193,7 +43193,7 @@ pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2i32.p0i8" @@ -43214,7 +43214,7 @@ pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v4i32.p0i8" @@ -43456,7 +43456,7 @@ pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v1i64.p0i8" @@ -43475,7 +43475,7 @@ pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v1i64")] fn _vst3_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, c: int64x1_t, size: i32); } @@ -43744,7 +43744,7 @@ pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2f32")] fn _vst4_f32( ptr: *mut i8, @@ -43767,7 +43767,7 @@ pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4f32")] fn _vst4q_f32( ptr: *mut i8, @@ -43790,7 +43790,7 @@ pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i8")] fn _vst4_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, size: i32); } @@ -43806,7 +43806,7 @@ pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v16i8")] fn _vst4q_s8( ptr: *mut i8, @@ -43829,7 +43829,7 @@ pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i16")] fn _vst4_s16( ptr: *mut i8, @@ -43852,7 +43852,7 @@ pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i16")] fn _vst4q_s16( ptr: *mut i8, @@ -43875,7 +43875,7 @@ pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2i32")] fn _vst4_s32( ptr: *mut i8, @@ -43898,7 +43898,7 @@ pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i32")] fn _vst4q_s32( ptr: *mut i8, @@ -43921,7 +43921,7 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2f32.p0i8" @@ -43940,7 +43940,7 @@ pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v4f32.p0i8" @@ -43959,7 +43959,7 @@ pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v8i8.p0i8" @@ -43978,7 +43978,7 @@ pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v16i8.p0i8" @@ -43997,7 +43997,7 @@ pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v4i16.p0i8" @@ -44016,7 +44016,7 @@ pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v8i16.p0i8" @@ -44035,7 +44035,7 @@ pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2i32.p0i8" @@ -44054,7 +44054,7 @@ pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v4i32.p0i8" @@ -44075,7 +44075,7 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2f32")] fn _vst4_lane_f32( ptr: *mut i8, @@ -44101,7 +44101,7 @@ pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4f32")] fn _vst4q_lane_f32( ptr: *mut i8, @@ -44127,7 +44127,7 @@ pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i8")] fn _vst4_lane_s8( ptr: *mut i8, @@ -44153,7 +44153,7 @@ pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i16")] fn _vst4_lane_s16( ptr: *mut i8, @@ -44179,7 +44179,7 @@ pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i16")] fn _vst4q_lane_s16( ptr: *mut i8, @@ -44205,7 +44205,7 @@ pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2i32")] fn _vst4_lane_s32( ptr: *mut i8, @@ -44231,7 +44231,7 @@ pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i32")] fn _vst4q_lane_s32( ptr: *mut i8, @@ -44257,7 +44257,7 @@ pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2f32.p0i8" @@ -44285,7 +44285,7 @@ pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v4f32.p0i8" @@ -44313,7 +44313,7 @@ pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v8i8.p0i8" @@ -44334,7 +44334,7 @@ pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v4i16.p0i8" @@ -44362,7 +44362,7 @@ pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v8i16.p0i8" @@ -44390,7 +44390,7 @@ pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2i32.p0i8" @@ -44418,7 +44418,7 @@ pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v4i32.p0i8" @@ -44667,7 +44667,7 @@ pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v1i64")] fn _vst4_s64( ptr: *mut i8, @@ -44690,7 +44690,7 @@ pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v1i64.p0i8" @@ -46961,7 +46961,7 @@ pub unsafe fn vusdotq_lane_s32( unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" @@ -46992,7 +46992,7 @@ pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" diff --git a/crates/core_arch/src/arm_shared/neon/mod.rs b/crates/core_arch/src/arm_shared/neon/mod.rs index 7f95e9ed98..2a28b71b29 100644 --- a/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/crates/core_arch/src/arm_shared/neon/mod.rs @@ -1145,7 +1145,7 @@ impl_sign_conversions_neon! { } #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { // absolute value (64-bit) #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i8")] #[cfg_attr( @@ -9921,7 +9921,7 @@ pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { )] pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9951,7 +9951,7 @@ pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t )] pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9981,7 +9981,7 @@ pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x )] pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usmmla.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), diff --git a/crates/core_arch/src/loongarch64/lasx/generated.rs b/crates/core_arch/src/loongarch64/lasx/generated.rs index 15bef7d7b5..2e56d8fb9b 100644 --- a/crates/core_arch/src/loongarch64/lasx/generated.rs +++ b/crates/core_arch/src/loongarch64/lasx/generated.rs @@ -9,7 +9,7 @@ use super::types::*; #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.loongarch.lasx.xvsll.b"] fn __lasx_xvsll_b(a: v32i8, b: v32i8) -> v32i8; #[link_name = "llvm.loongarch.lasx.xvsll.h"] diff --git a/crates/core_arch/src/loongarch64/lsx/generated.rs b/crates/core_arch/src/loongarch64/lsx/generated.rs index b00e00f1a4..2bc364f3e0 100644 --- a/crates/core_arch/src/loongarch64/lsx/generated.rs +++ b/crates/core_arch/src/loongarch64/lsx/generated.rs @@ -9,7 +9,7 @@ use super::types::*; #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.loongarch.lsx.vsll.b"] fn __lsx_vsll_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.loongarch.lsx.vsll.h"] diff --git a/crates/core_arch/src/loongarch64/mod.rs b/crates/core_arch/src/loongarch64/mod.rs index 8d3f70bad2..ed4bcc06f7 100644 --- a/crates/core_arch/src/loongarch64/mod.rs +++ b/crates/core_arch/src/loongarch64/mod.rs @@ -41,7 +41,7 @@ pub unsafe fn rdtimeh_w() -> (i32, isize) { } #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.loongarch.crc.w.b.w"] fn __crc_w_b_w(a: i32, b: i32) -> i32; #[link_name = "llvm.loongarch.crc.w.h.w"] diff --git a/crates/core_arch/src/mips/msa.rs b/crates/core_arch/src/mips/msa.rs index 8bee1def40..563e121a7b 100644 --- a/crates/core_arch/src/mips/msa.rs +++ b/crates/core_arch/src/mips/msa.rs @@ -45,7 +45,7 @@ types! { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.mips.add.a.b"] fn msa_add_a_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.add.a.h"] diff --git a/crates/core_arch/src/nvptx/mod.rs b/crates/core_arch/src/nvptx/mod.rs index b971a32bc8..8d16dfb53d 100644 --- a/crates/core_arch/src/nvptx/mod.rs +++ b/crates/core_arch/src/nvptx/mod.rs @@ -19,7 +19,7 @@ mod packed; pub use packed::*; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.nvvm.barrier0"] fn syncthreads() -> (); #[link_name = "llvm.nvvm.read.ptx.sreg.ntid.x"] @@ -147,7 +147,7 @@ pub unsafe fn trap() -> ! { } // Basic CUDA syscall declarations. -extern "C" { +unsafe extern "C" { /// Print formatted output from a kernel to a host-side output stream. /// /// Syscall arguments: diff --git a/crates/core_arch/src/nvptx/packed.rs b/crates/core_arch/src/nvptx/packed.rs index 03cfa2eb5c..ceccd255d7 100644 --- a/crates/core_arch/src/nvptx/packed.rs +++ b/crates/core_arch/src/nvptx/packed.rs @@ -7,7 +7,7 @@ use crate::intrinsics::simd::*; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.minnum.v2f16"] fn llvm_f16x2_minnum(a: f16x2, b: f16x2) -> f16x2; #[link_name = "llvm.minimum.v2f16"] diff --git a/crates/core_arch/src/powerpc/altivec.rs b/crates/core_arch/src/powerpc/altivec.rs index cbe7b26402..274ec88dde 100644 --- a/crates/core_arch/src/powerpc/altivec.rs +++ b/crates/core_arch/src/powerpc/altivec.rs @@ -48,7 +48,7 @@ types! { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.ppc.altivec.lvx"] fn lvx(p: *const i8) -> vector_unsigned_int; @@ -688,7 +688,7 @@ mod sealed { let addr = (b as *const u8).offset(a); // Workaround ptr::copy_nonoverlapping not being inlined - extern "rust-intrinsic" { + unsafe extern "rust-intrinsic" { #[rustc_nounwind] pub fn copy_nonoverlapping(src: *const T, dst: *mut T, count: usize); } @@ -743,7 +743,7 @@ mod sealed { let addr = (b as *mut u8).offset(a); // Workaround ptr::copy_nonoverlapping not being inlined - extern "rust-intrinsic" { + unsafe extern "rust-intrinsic" { #[rustc_nounwind] pub fn copy_nonoverlapping(src: *const T, dst: *mut T, count: usize); } diff --git a/crates/core_arch/src/powerpc/vsx.rs b/crates/core_arch/src/powerpc/vsx.rs index c107575aee..ca9fcaabe8 100644 --- a/crates/core_arch/src/powerpc/vsx.rs +++ b/crates/core_arch/src/powerpc/vsx.rs @@ -35,7 +35,7 @@ types! { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.ppc.altivec.vperm"] fn vperm( a: vector_signed_int, diff --git a/crates/core_arch/src/powerpc64/vsx.rs b/crates/core_arch/src/powerpc64/vsx.rs index d74d1b0495..7b42be8653 100644 --- a/crates/core_arch/src/powerpc64/vsx.rs +++ b/crates/core_arch/src/powerpc64/vsx.rs @@ -17,7 +17,7 @@ use stdarch_test::assert_instr; use crate::mem::transmute; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.ppc.vsx.lxvl"] fn lxvl(a: *const u8, l: usize) -> vector_signed_int; diff --git a/crates/core_arch/src/riscv32/zk.rs b/crates/core_arch/src/riscv32/zk.rs index 15eecc2ea4..6ea3793ce9 100644 --- a/crates/core_arch/src/riscv32/zk.rs +++ b/crates/core_arch/src/riscv32/zk.rs @@ -1,7 +1,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.riscv.aes32esi"] fn _aes32esi(rs1: i32, rs2: i32, bs: i32) -> i32; diff --git a/crates/core_arch/src/riscv64/zk.rs b/crates/core_arch/src/riscv64/zk.rs index f89412fce7..37ee65876f 100644 --- a/crates/core_arch/src/riscv64/zk.rs +++ b/crates/core_arch/src/riscv64/zk.rs @@ -1,7 +1,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.riscv.aes64es"] fn _aes64es(rs1: i64, rs2: i64) -> i64; diff --git a/crates/core_arch/src/riscv_shared/zb.rs b/crates/core_arch/src/riscv_shared/zb.rs index 841b707989..d7ae05d46d 100644 --- a/crates/core_arch/src/riscv_shared/zb.rs +++ b/crates/core_arch/src/riscv_shared/zb.rs @@ -2,7 +2,7 @@ use stdarch_test::assert_instr; #[cfg(target_arch = "riscv32")] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.riscv.orc.b.i32"] fn _orc_b_32(rs: i32) -> i32; @@ -17,7 +17,7 @@ extern "unadjusted" { } #[cfg(target_arch = "riscv64")] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.riscv.orc.b.i64"] fn _orc_b_64(rs1: i64) -> i64; diff --git a/crates/core_arch/src/riscv_shared/zk.rs b/crates/core_arch/src/riscv_shared/zk.rs index 2d0e8602f4..f333c826c5 100644 --- a/crates/core_arch/src/riscv_shared/zk.rs +++ b/crates/core_arch/src/riscv_shared/zk.rs @@ -1,7 +1,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.riscv.sm4ed"] fn _sm4ed(rs1: i32, rs2: i32, bs: i32) -> i32; @@ -28,7 +28,7 @@ extern "unadjusted" { } #[cfg(target_arch = "riscv32")] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.riscv.xperm8.i32"] fn _xperm8_32(rs1: i32, rs2: i32) -> i32; @@ -37,7 +37,7 @@ extern "unadjusted" { } #[cfg(target_arch = "riscv64")] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.riscv.xperm8.i64"] fn _xperm8_64(rs1: i64, rs2: i64) -> i64; diff --git a/crates/core_arch/src/s390x/vector.rs b/crates/core_arch/src/s390x/vector.rs index 56996cef5c..5449d047f3 100644 --- a/crates/core_arch/src/s390x/vector.rs +++ b/crates/core_arch/src/s390x/vector.rs @@ -53,7 +53,7 @@ types! { #[allow(improper_ctypes)] #[rustfmt::skip] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.smax.v16i8"] fn vmxb(a: vector_signed_char, b: vector_signed_char) -> vector_signed_char; #[link_name = "llvm.smax.v8i16"] fn vmxh(a: vector_signed_short, b: vector_signed_short) -> vector_signed_short; #[link_name = "llvm.smax.v4i32"] fn vmxf(a: vector_signed_int, b: vector_signed_int) -> vector_signed_int; diff --git a/crates/core_arch/src/wasm32/atomic.rs b/crates/core_arch/src/wasm32/atomic.rs index 0d5aba2f12..cdd2302d7f 100644 --- a/crates/core_arch/src/wasm32/atomic.rs +++ b/crates/core_arch/src/wasm32/atomic.rs @@ -1,7 +1,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "C" { +unsafe extern "C" { #[link_name = "llvm.wasm.memory.atomic.wait32"] fn llvm_atomic_wait_i32(ptr: *mut i32, exp: i32, timeout: i64) -> i32; #[link_name = "llvm.wasm.memory.atomic.wait64"] diff --git a/crates/core_arch/src/wasm32/memory.rs b/crates/core_arch/src/wasm32/memory.rs index 882e068152..348add286a 100644 --- a/crates/core_arch/src/wasm32/memory.rs +++ b/crates/core_arch/src/wasm32/memory.rs @@ -1,7 +1,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "C" { +unsafe extern "C" { #[link_name = "llvm.wasm.memory.grow"] fn llvm_memory_grow(mem: u32, pages: usize) -> usize; #[link_name = "llvm.wasm.memory.size"] diff --git a/crates/core_arch/src/wasm32/mod.rs b/crates/core_arch/src/wasm32/mod.rs index e6d79ea1ef..f287d6088f 100644 --- a/crates/core_arch/src/wasm32/mod.rs +++ b/crates/core_arch/src/wasm32/mod.rs @@ -174,7 +174,7 @@ pub fn f64_sqrt(a: f64) -> f64 { unsafe { crate::intrinsics::sqrtf64(a) } } -extern "C-unwind" { +unsafe extern "C-unwind" { #[link_name = "llvm.wasm.throw"] fn wasm_throw(tag: i32, ptr: *mut u8) -> !; } diff --git a/crates/core_arch/src/wasm32/relaxed_simd.rs b/crates/core_arch/src/wasm32/relaxed_simd.rs index e846379ef0..1a15f581b3 100644 --- a/crates/core_arch/src/wasm32/relaxed_simd.rs +++ b/crates/core_arch/src/wasm32/relaxed_simd.rs @@ -5,7 +5,7 @@ use crate::core_arch::simd; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.wasm.relaxed.swizzle"] fn llvm_relaxed_swizzle(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16; #[link_name = "llvm.wasm.relaxed.trunc.signed"] diff --git a/crates/core_arch/src/wasm32/simd128.rs b/crates/core_arch/src/wasm32/simd128.rs index a009c6f3b7..c54d996622 100644 --- a/crates/core_arch/src/wasm32/simd128.rs +++ b/crates/core_arch/src/wasm32/simd128.rs @@ -73,7 +73,7 @@ conversions! { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.wasm.swizzle"] fn llvm_swizzle(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16; diff --git a/crates/core_arch/src/x86/adx.rs b/crates/core_arch/src/x86/adx.rs index 587490ec88..5ba7664616 100644 --- a/crates/core_arch/src/x86/adx.rs +++ b/crates/core_arch/src/x86/adx.rs @@ -2,7 +2,7 @@ use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.x86.addcarry.32"] fn llvm_addcarry_u32(a: u8, b: u32, c: u32) -> (u8, u32); #[link_name = "llvm.x86.addcarryx.u32"] diff --git a/crates/core_arch/src/x86/aes.rs b/crates/core_arch/src/x86/aes.rs index 0346c8e05b..789081cdb5 100644 --- a/crates/core_arch/src/x86/aes.rs +++ b/crates/core_arch/src/x86/aes.rs @@ -13,7 +13,7 @@ use crate::core_arch::x86::__m128i; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.aesni.aesdec"] fn aesdec(a: __m128i, round_key: __m128i) -> __m128i; #[link_name = "llvm.x86.aesni.aesdeclast"] diff --git a/crates/core_arch/src/x86/avx.rs b/crates/core_arch/src/x86/avx.rs index fd37c6c077..f78ca6d839 100644 --- a/crates/core_arch/src/x86/avx.rs +++ b/crates/core_arch/src/x86/avx.rs @@ -2984,7 +2984,7 @@ pub unsafe fn _mm256_cvtss_f32(a: __m256) -> f32 { // LLVM intrinsics used in the above functions #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx.round.pd.256"] fn roundpd256(a: __m256d, b: i32) -> __m256d; #[link_name = "llvm.x86.avx.round.ps.256"] diff --git a/crates/core_arch/src/x86/avx2.rs b/crates/core_arch/src/x86/avx2.rs index 38aa5548a3..0bc89ba45c 100644 --- a/crates/core_arch/src/x86/avx2.rs +++ b/crates/core_arch/src/x86/avx2.rs @@ -3638,7 +3638,7 @@ pub unsafe fn _mm256_extract_epi16(a: __m256i) -> i32 { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx2.phadd.w"] fn phaddw(a: i16x16, b: i16x16) -> i16x16; #[link_name = "llvm.x86.avx2.phadd.d"] diff --git a/crates/core_arch/src/x86/avx512bf16.rs b/crates/core_arch/src/x86/avx512bf16.rs index bd0fe2f1e2..6789fb1c31 100644 --- a/crates/core_arch/src/x86/avx512bf16.rs +++ b/crates/core_arch/src/x86/avx512bf16.rs @@ -10,7 +10,7 @@ use crate::intrinsics::simd::*; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512bf16.cvtne2ps2bf16.128"] fn cvtne2ps2bf16(a: f32x4, b: f32x4) -> i16x8; #[link_name = "llvm.x86.avx512bf16.cvtne2ps2bf16.256"] diff --git a/crates/core_arch/src/x86/avx512bitalg.rs b/crates/core_arch/src/x86/avx512bitalg.rs index 0605b2dcca..819a52110d 100644 --- a/crates/core_arch/src/x86/avx512bitalg.rs +++ b/crates/core_arch/src/x86/avx512bitalg.rs @@ -27,7 +27,7 @@ use crate::mem::transmute; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512.mask.vpshufbitqmb.512"] fn bitshuffle_512(data: i8x64, indices: i8x64, mask: __mmask64) -> __mmask64; #[link_name = "llvm.x86.avx512.mask.vpshufbitqmb.256"] diff --git a/crates/core_arch/src/x86/avx512bw.rs b/crates/core_arch/src/x86/avx512bw.rs index a51e15c8ea..5e79142ae6 100644 --- a/crates/core_arch/src/x86/avx512bw.rs +++ b/crates/core_arch/src/x86/avx512bw.rs @@ -11255,7 +11255,7 @@ pub unsafe fn _mm_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512.pmul.hr.sw.512"] fn vpmulhrsw(a: i16x32, b: i16x32) -> i16x32; diff --git a/crates/core_arch/src/x86/avx512cd.rs b/crates/core_arch/src/x86/avx512cd.rs index f8e3a37307..71eceab6bd 100644 --- a/crates/core_arch/src/x86/avx512cd.rs +++ b/crates/core_arch/src/x86/avx512cd.rs @@ -491,7 +491,7 @@ pub unsafe fn _mm_maskz_lzcnt_epi64(k: __mmask8, a: __m128i) -> __m128i { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512.conflict.d.512"] fn vpconflictd(a: i32x16) -> i32x16; #[link_name = "llvm.x86.avx512.conflict.d.256"] diff --git a/crates/core_arch/src/x86/avx512dq.rs b/crates/core_arch/src/x86/avx512dq.rs index 757231279e..9e5ac789af 100644 --- a/crates/core_arch/src/x86/avx512dq.rs +++ b/crates/core_arch/src/x86/avx512dq.rs @@ -6738,7 +6738,7 @@ pub unsafe fn _mm_mask_fpclass_ss_mask(k1: __mmask8, a: __m128) } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512.sitofp.round.v2f64.v2i64"] fn vcvtqq2pd_128(a: i64x2, rounding: i32) -> f64x2; #[link_name = "llvm.x86.avx512.sitofp.round.v4f64.v4i64"] diff --git a/crates/core_arch/src/x86/avx512f.rs b/crates/core_arch/src/x86/avx512f.rs index 639dcde3be..5c45d815f2 100644 --- a/crates/core_arch/src/x86/avx512f.rs +++ b/crates/core_arch/src/x86/avx512f.rs @@ -40275,7 +40275,7 @@ pub const _MM_PERM_DDDC: _MM_PERM_ENUM = 0xFE; pub const _MM_PERM_DDDD: _MM_PERM_ENUM = 0xFF; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512.sqrt.ps.512"] fn vsqrtps(a: f32x16, rounding: i32) -> f32x16; #[link_name = "llvm.x86.avx512.sqrt.pd.512"] diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs index 5ed0397545..b9e269f652 100644 --- a/crates/core_arch/src/x86/avx512fp16.rs +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -15990,7 +15990,7 @@ pub unsafe fn _mm_cvtsi16_si128(a: i16) -> __m128i { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] fn vcmpsh(a: __m128h, b: __m128h, imm8: i32, mask: __mmask8, sae: i32) -> __mmask8; #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] diff --git a/crates/core_arch/src/x86/avx512ifma.rs b/crates/core_arch/src/x86/avx512ifma.rs index a1cb339b38..12123c2162 100644 --- a/crates/core_arch/src/x86/avx512ifma.rs +++ b/crates/core_arch/src/x86/avx512ifma.rs @@ -399,7 +399,7 @@ pub unsafe fn _mm_maskz_madd52lo_epu64(k: __mmask8, a: __m128i, b: __m128i, c: _ } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512.vpmadd52l.uq.128"] fn vpmadd52luq_128(z: __m128i, x: __m128i, y: __m128i) -> __m128i; #[link_name = "llvm.x86.avx512.vpmadd52h.uq.128"] diff --git a/crates/core_arch/src/x86/avx512vbmi.rs b/crates/core_arch/src/x86/avx512vbmi.rs index 3c16c9c424..b9bded92d6 100644 --- a/crates/core_arch/src/x86/avx512vbmi.rs +++ b/crates/core_arch/src/x86/avx512vbmi.rs @@ -431,7 +431,7 @@ pub unsafe fn _mm_maskz_multishift_epi64_epi8(k: __mmask16, a: __m128i, b: __m12 } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512.vpermi2var.qi.512"] fn vpermi2b(a: i8x64, idx: i8x64, b: i8x64) -> i8x64; #[link_name = "llvm.x86.avx512.vpermi2var.qi.256"] diff --git a/crates/core_arch/src/x86/avx512vbmi2.rs b/crates/core_arch/src/x86/avx512vbmi2.rs index a14d0d7081..f5a9cce3e6 100644 --- a/crates/core_arch/src/x86/avx512vbmi2.rs +++ b/crates/core_arch/src/x86/avx512vbmi2.rs @@ -2053,7 +2053,7 @@ pub unsafe fn _mm_maskz_shrdi_epi16( } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512.mask.compress.store.w.512"] fn vcompressstorew(mem: *mut i8, data: i16x32, mask: u32); #[link_name = "llvm.x86.avx512.mask.compress.store.w.256"] diff --git a/crates/core_arch/src/x86/avx512vnni.rs b/crates/core_arch/src/x86/avx512vnni.rs index 8d207d1638..1e1639b700 100644 --- a/crates/core_arch/src/x86/avx512vnni.rs +++ b/crates/core_arch/src/x86/avx512vnni.rs @@ -1011,7 +1011,7 @@ pub unsafe fn _mm256_dpwuuds_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m2 } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512.vpdpwssd.512"] fn vpdpwssd(src: i32x16, a: i32x16, b: i32x16) -> i32x16; #[link_name = "llvm.x86.avx512.vpdpwssd.256"] diff --git a/crates/core_arch/src/x86/avxneconvert.rs b/crates/core_arch/src/x86/avxneconvert.rs index b31923c169..4520529934 100644 --- a/crates/core_arch/src/x86/avxneconvert.rs +++ b/crates/core_arch/src/x86/avxneconvert.rs @@ -233,7 +233,7 @@ pub unsafe fn _mm256_cvtneps_avx_pbh(a: __m256) -> __m128bh { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.vbcstnebf162ps128"] fn bcstnebf162ps_128(a: *const bf16) -> __m128; #[link_name = "llvm.x86.vbcstnebf162ps256"] diff --git a/crates/core_arch/src/x86/bmi1.rs b/crates/core_arch/src/x86/bmi1.rs index 620358c982..b8eca101de 100644 --- a/crates/core_arch/src/x86/bmi1.rs +++ b/crates/core_arch/src/x86/bmi1.rs @@ -124,7 +124,7 @@ pub unsafe fn _mm_tzcnt_32(x: u32) -> i32 { x.trailing_zeros() as i32 } -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.bmi.bextr.32"] fn x86_bmi_bextr_32(x: u32, y: u32) -> u32; } diff --git a/crates/core_arch/src/x86/bmi2.rs b/crates/core_arch/src/x86/bmi2.rs index efe7199e98..2f7b31c43b 100644 --- a/crates/core_arch/src/x86/bmi2.rs +++ b/crates/core_arch/src/x86/bmi2.rs @@ -66,7 +66,7 @@ pub unsafe fn _pext_u32(a: u32, mask: u32) -> u32 { x86_bmi2_pext_32(a, mask) } -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.bmi.bzhi.32"] fn x86_bmi2_bzhi_32(x: u32, y: u32) -> u32; #[link_name = "llvm.x86.bmi.pdep.32"] diff --git a/crates/core_arch/src/x86/f16c.rs b/crates/core_arch/src/x86/f16c.rs index 9feb0f44f9..ff3a544117 100644 --- a/crates/core_arch/src/x86/f16c.rs +++ b/crates/core_arch/src/x86/f16c.rs @@ -8,7 +8,7 @@ use crate::core_arch::{simd::*, x86::*}; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.x86.vcvtph2ps.128"] fn llvm_vcvtph2ps_128(a: i16x8) -> f32x4; #[link_name = "llvm.x86.vcvtph2ps.256"] diff --git a/crates/core_arch/src/x86/fxsr.rs b/crates/core_arch/src/x86/fxsr.rs index 691aafd655..71fd52ca14 100644 --- a/crates/core_arch/src/x86/fxsr.rs +++ b/crates/core_arch/src/x86/fxsr.rs @@ -4,7 +4,7 @@ use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.fxsave"] fn fxsave(p: *mut u8); #[link_name = "llvm.x86.fxrstor"] diff --git a/crates/core_arch/src/x86/gfni.rs b/crates/core_arch/src/x86/gfni.rs index 206b528f28..42387e3d21 100644 --- a/crates/core_arch/src/x86/gfni.rs +++ b/crates/core_arch/src/x86/gfni.rs @@ -23,7 +23,7 @@ use crate::mem::transmute; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.vgf2p8affineinvqb.512"] fn vgf2p8affineinvqb_512(x: i8x64, a: i8x64, imm8: u8) -> i8x64; #[link_name = "llvm.x86.vgf2p8affineinvqb.256"] diff --git a/crates/core_arch/src/x86/pclmulqdq.rs b/crates/core_arch/src/x86/pclmulqdq.rs index cf192eedfe..e346464fb3 100644 --- a/crates/core_arch/src/x86/pclmulqdq.rs +++ b/crates/core_arch/src/x86/pclmulqdq.rs @@ -11,7 +11,7 @@ use crate::core_arch::x86::__m128i; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.pclmulqdq"] fn pclmulqdq(a: __m128i, round_key: __m128i, imm8: u8) -> __m128i; } diff --git a/crates/core_arch/src/x86/rdrand.rs b/crates/core_arch/src/x86/rdrand.rs index cfb7fc7969..5009791521 100644 --- a/crates/core_arch/src/x86/rdrand.rs +++ b/crates/core_arch/src/x86/rdrand.rs @@ -4,7 +4,7 @@ #![allow(clippy::module_name_repetitions)] #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.x86.rdrand.16"] fn x86_rdrand16_step() -> (u16, i32); #[link_name = "llvm.x86.rdrand.32"] diff --git a/crates/core_arch/src/x86/rdtsc.rs b/crates/core_arch/src/x86/rdtsc.rs index c8f3c418ce..e714aa863b 100644 --- a/crates/core_arch/src/x86/rdtsc.rs +++ b/crates/core_arch/src/x86/rdtsc.rs @@ -50,7 +50,7 @@ pub unsafe fn __rdtscp(aux: *mut u32) -> u64 { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.rdtsc"] fn rdtsc() -> u64; #[link_name = "llvm.x86.rdtscp"] diff --git a/crates/core_arch/src/x86/rtm.rs b/crates/core_arch/src/x86/rtm.rs index 65a9f0e3cb..b807305d6a 100644 --- a/crates/core_arch/src/x86/rtm.rs +++ b/crates/core_arch/src/x86/rtm.rs @@ -16,7 +16,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.xbegin"] fn x86_xbegin() -> i32; #[link_name = "llvm.x86.xend"] diff --git a/crates/core_arch/src/x86/sha.rs b/crates/core_arch/src/x86/sha.rs index 144677818a..13aee75c00 100644 --- a/crates/core_arch/src/x86/sha.rs +++ b/crates/core_arch/src/x86/sha.rs @@ -1,7 +1,7 @@ use crate::core_arch::{simd::*, x86::*}; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.sha1msg1"] fn sha1msg1(a: i32x4, b: i32x4) -> i32x4; #[link_name = "llvm.x86.sha1msg2"] diff --git a/crates/core_arch/src/x86/sse.rs b/crates/core_arch/src/x86/sse.rs index ee03628cba..6753caa21b 100644 --- a/crates/core_arch/src/x86/sse.rs +++ b/crates/core_arch/src/x86/sse.rs @@ -1907,7 +1907,7 @@ pub unsafe fn _MM_TRANSPOSE4_PS( } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.sse.rcp.ss"] fn rcpss(a: __m128) -> __m128; #[link_name = "llvm.x86.sse.rcp.ps"] diff --git a/crates/core_arch/src/x86/sse2.rs b/crates/core_arch/src/x86/sse2.rs index d63ac8e7e1..2f5d234c74 100644 --- a/crates/core_arch/src/x86/sse2.rs +++ b/crates/core_arch/src/x86/sse2.rs @@ -2945,7 +2945,7 @@ pub unsafe fn _mm_unpacklo_pd(a: __m128d, b: __m128d) -> __m128d { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.sse2.pause"] fn pause(); #[link_name = "llvm.x86.sse2.clflush"] diff --git a/crates/core_arch/src/x86/sse3.rs b/crates/core_arch/src/x86/sse3.rs index 94553844e1..35960441fd 100644 --- a/crates/core_arch/src/x86/sse3.rs +++ b/crates/core_arch/src/x86/sse3.rs @@ -148,7 +148,7 @@ pub unsafe fn _mm_moveldup_ps(a: __m128) -> __m128 { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.sse3.hadd.pd"] fn haddpd(a: __m128d, b: __m128d) -> __m128d; #[link_name = "llvm.x86.sse3.hadd.ps"] diff --git a/crates/core_arch/src/x86/sse41.rs b/crates/core_arch/src/x86/sse41.rs index 17d3e719ba..a4c7b587dc 100644 --- a/crates/core_arch/src/x86/sse41.rs +++ b/crates/core_arch/src/x86/sse41.rs @@ -1087,7 +1087,7 @@ pub unsafe fn _mm_stream_load_si128(mem_addr: *const __m128i) -> __m128i { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.sse41.insertps"] fn insertps(a: __m128, b: __m128, imm8: u8) -> __m128; #[link_name = "llvm.x86.sse41.packusdw"] diff --git a/crates/core_arch/src/x86/sse42.rs b/crates/core_arch/src/x86/sse42.rs index 36350a1d93..206e9014b0 100644 --- a/crates/core_arch/src/x86/sse42.rs +++ b/crates/core_arch/src/x86/sse42.rs @@ -568,7 +568,7 @@ pub unsafe fn _mm_cmpgt_epi64(a: __m128i, b: __m128i) -> __m128i { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { // SSE 4.2 string and text comparison ops #[link_name = "llvm.x86.sse42.pcmpestrm128"] fn pcmpestrm128(a: i8x16, la: i32, b: i8x16, lb: i32, imm8: i8) -> u8x16; diff --git a/crates/core_arch/src/x86/sse4a.rs b/crates/core_arch/src/x86/sse4a.rs index 61989035c2..16642a0df0 100644 --- a/crates/core_arch/src/x86/sse4a.rs +++ b/crates/core_arch/src/x86/sse4a.rs @@ -6,7 +6,7 @@ use crate::core_arch::{simd::*, x86::*}; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.sse4a.extrq"] fn extrq(x: i64x2, y: i8x16) -> i64x2; #[link_name = "llvm.x86.sse4a.extrqi"] diff --git a/crates/core_arch/src/x86/ssse3.rs b/crates/core_arch/src/x86/ssse3.rs index 5a35d5cb3c..ce8d749c80 100644 --- a/crates/core_arch/src/x86/ssse3.rs +++ b/crates/core_arch/src/x86/ssse3.rs @@ -293,7 +293,7 @@ pub unsafe fn _mm_sign_epi32(a: __m128i, b: __m128i) -> __m128i { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.ssse3.pshuf.b.128"] fn pshufb128(a: u8x16, b: u8x16) -> u8x16; diff --git a/crates/core_arch/src/x86/tbm.rs b/crates/core_arch/src/x86/tbm.rs index ecd788b5b0..a245e69328 100644 --- a/crates/core_arch/src/x86/tbm.rs +++ b/crates/core_arch/src/x86/tbm.rs @@ -13,7 +13,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.tbm.bextri.u32"] fn bextri_u32(a: u32, control: u32) -> u32; } diff --git a/crates/core_arch/src/x86/vaes.rs b/crates/core_arch/src/x86/vaes.rs index 2c3cead874..0a7abbea8a 100644 --- a/crates/core_arch/src/x86/vaes.rs +++ b/crates/core_arch/src/x86/vaes.rs @@ -14,7 +14,7 @@ use crate::core_arch::x86::__m512i; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.aesni.aesenc.256"] fn aesenc_256(a: __m256i, round_key: __m256i) -> __m256i; #[link_name = "llvm.x86.aesni.aesenclast.256"] diff --git a/crates/core_arch/src/x86/vpclmulqdq.rs b/crates/core_arch/src/x86/vpclmulqdq.rs index 37bbd502e5..2afc4a0bd7 100644 --- a/crates/core_arch/src/x86/vpclmulqdq.rs +++ b/crates/core_arch/src/x86/vpclmulqdq.rs @@ -12,7 +12,7 @@ use crate::core_arch::x86::__m512i; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.pclmulqdq.256"] fn pclmulqdq_256(a: __m256i, round_key: __m256i, imm8: u8) -> __m256i; #[link_name = "llvm.x86.pclmulqdq.512"] diff --git a/crates/core_arch/src/x86/xsave.rs b/crates/core_arch/src/x86/xsave.rs index a05fd05b9f..b5dc73c068 100644 --- a/crates/core_arch/src/x86/xsave.rs +++ b/crates/core_arch/src/x86/xsave.rs @@ -5,7 +5,7 @@ use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.xsave"] fn xsave(p: *mut u8, hi: u32, lo: u32); #[link_name = "llvm.x86.xrstor"] diff --git a/crates/core_arch/src/x86_64/adx.rs b/crates/core_arch/src/x86_64/adx.rs index f1d5853125..bdc534b5a5 100644 --- a/crates/core_arch/src/x86_64/adx.rs +++ b/crates/core_arch/src/x86_64/adx.rs @@ -2,7 +2,7 @@ use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.x86.addcarry.64"] fn llvm_addcarry_u64(a: u8, b: u64, c: u64) -> (u8, u64); #[link_name = "llvm.x86.addcarryx.u64"] diff --git a/crates/core_arch/src/x86_64/amx.rs b/crates/core_arch/src/x86_64/amx.rs index 69c62e8801..7c437e9704 100644 --- a/crates/core_arch/src/x86_64/amx.rs +++ b/crates/core_arch/src/x86_64/amx.rs @@ -252,7 +252,7 @@ pub unsafe fn _tile_cmmrlfp16ps() { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.ldtilecfg"] fn ldtilecfg(mem_addr: *const u8); #[link_name = "llvm.x86.sttilecfg"] diff --git a/crates/core_arch/src/x86_64/avx512f.rs b/crates/core_arch/src/x86_64/avx512f.rs index 8a8a82b735..c1c79585b0 100644 --- a/crates/core_arch/src/x86_64/avx512f.rs +++ b/crates/core_arch/src/x86_64/avx512f.rs @@ -479,7 +479,7 @@ pub unsafe fn _mm_cvtt_roundss_u64(a: __m128) -> u64 { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512.vcvtss2si64"] fn vcvtss2si64(a: f32x4, rounding: i32) -> i64; #[link_name = "llvm.x86.avx512.vcvtss2usi64"] diff --git a/crates/core_arch/src/x86_64/avx512fp16.rs b/crates/core_arch/src/x86_64/avx512fp16.rs index aca3b01556..dbf88ab57f 100644 --- a/crates/core_arch/src/x86_64/avx512fp16.rs +++ b/crates/core_arch/src/x86_64/avx512fp16.rs @@ -199,7 +199,7 @@ pub unsafe fn _mm_cvtt_roundsh_u64(a: __m128h) -> u64 { } #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.avx512fp16.vcvtsi642sh"] fn vcvtsi642sh(a: __m128h, b: i64, rounding: i32) -> __m128h; #[link_name = "llvm.x86.avx512fp16.vcvtusi642sh"] diff --git a/crates/core_arch/src/x86_64/bmi.rs b/crates/core_arch/src/x86_64/bmi.rs index 3345b361c7..167ad26bfc 100644 --- a/crates/core_arch/src/x86_64/bmi.rs +++ b/crates/core_arch/src/x86_64/bmi.rs @@ -116,7 +116,7 @@ pub unsafe fn _mm_tzcnt_64(x: u64) -> i64 { x.trailing_zeros() as i64 } -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.bmi.bextr.64"] fn x86_bmi_bextr_64(x: u64, y: u64) -> u64; } diff --git a/crates/core_arch/src/x86_64/bmi2.rs b/crates/core_arch/src/x86_64/bmi2.rs index efc252d8f4..5ca4c6bde4 100644 --- a/crates/core_arch/src/x86_64/bmi2.rs +++ b/crates/core_arch/src/x86_64/bmi2.rs @@ -68,7 +68,7 @@ pub unsafe fn _pext_u64(a: u64, mask: u64) -> u64 { x86_bmi2_pext_64(a, mask) } -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.bmi.bzhi.64"] fn x86_bmi2_bzhi_64(x: u64, y: u64) -> u64; #[link_name = "llvm.x86.bmi.pdep.64"] diff --git a/crates/core_arch/src/x86_64/fxsr.rs b/crates/core_arch/src/x86_64/fxsr.rs index 65ebc2da8e..a24b44fb1f 100644 --- a/crates/core_arch/src/x86_64/fxsr.rs +++ b/crates/core_arch/src/x86_64/fxsr.rs @@ -4,7 +4,7 @@ use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.fxsave64"] fn fxsave64(p: *mut u8); #[link_name = "llvm.x86.fxrstor64"] diff --git a/crates/core_arch/src/x86_64/rdrand.rs b/crates/core_arch/src/x86_64/rdrand.rs index c5bb929759..42e907b4e4 100644 --- a/crates/core_arch/src/x86_64/rdrand.rs +++ b/crates/core_arch/src/x86_64/rdrand.rs @@ -5,7 +5,7 @@ #![allow(clippy::module_name_repetitions)] #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { #[link_name = "llvm.x86.rdrand.64"] fn x86_rdrand64_step() -> (u64, i32); #[link_name = "llvm.x86.rdseed.64"] diff --git a/crates/core_arch/src/x86_64/sse.rs b/crates/core_arch/src/x86_64/sse.rs index 75d0a7b46a..c5f70ccb39 100644 --- a/crates/core_arch/src/x86_64/sse.rs +++ b/crates/core_arch/src/x86_64/sse.rs @@ -6,7 +6,7 @@ use crate::core_arch::x86::*; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.sse.cvtss2si64"] fn cvtss2si64(a: __m128) -> i64; #[link_name = "llvm.x86.sse.cvttss2si64"] diff --git a/crates/core_arch/src/x86_64/sse2.rs b/crates/core_arch/src/x86_64/sse2.rs index 5c814b0941..b1cd12506a 100644 --- a/crates/core_arch/src/x86_64/sse2.rs +++ b/crates/core_arch/src/x86_64/sse2.rs @@ -6,7 +6,7 @@ use crate::core_arch::x86::*; use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.sse2.cvtsd2si64"] fn cvtsd2si64(a: __m128d) -> i64; #[link_name = "llvm.x86.sse2.cvttsd2si64"] diff --git a/crates/core_arch/src/x86_64/sse42.rs b/crates/core_arch/src/x86_64/sse42.rs index 164def4336..6c3111b719 100644 --- a/crates/core_arch/src/x86_64/sse42.rs +++ b/crates/core_arch/src/x86_64/sse42.rs @@ -4,7 +4,7 @@ use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.sse42.crc32.64.64"] fn crc32_64_64(crc: u64, v: u64) -> u64; } diff --git a/crates/core_arch/src/x86_64/tbm.rs b/crates/core_arch/src/x86_64/tbm.rs index 119f637ebd..002e005916 100644 --- a/crates/core_arch/src/x86_64/tbm.rs +++ b/crates/core_arch/src/x86_64/tbm.rs @@ -13,7 +13,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.tbm.bextri.u64"] fn bextri_u64(a: u64, control: u64) -> u64; } diff --git a/crates/core_arch/src/x86_64/xsave.rs b/crates/core_arch/src/x86_64/xsave.rs index 9177069100..d8e72969ed 100644 --- a/crates/core_arch/src/x86_64/xsave.rs +++ b/crates/core_arch/src/x86_64/xsave.rs @@ -6,7 +6,7 @@ use stdarch_test::assert_instr; #[allow(improper_ctypes)] -extern "C" { +unsafe extern "C" { #[link_name = "llvm.x86.xsave64"] fn xsave64(p: *mut u8, hi: u32, lo: u32); #[link_name = "llvm.x86.xrstor64"] diff --git a/crates/std_detect/src/detect/cache.rs b/crates/std_detect/src/detect/cache.rs index cee07b0ec7..3056c28a80 100644 --- a/crates/std_detect/src/detect/cache.rs +++ b/crates/std_detect/src/detect/cache.rs @@ -140,7 +140,7 @@ cfg_if::cfg_if! { if #[cfg(windows)] { use alloc::vec; #[link(name = "kernel32")] - extern "system" { + unsafe extern "system" { fn GetEnvironmentVariableA(name: *const u8, buffer: *mut u8, size: u32) -> u32; } let len = unsafe { GetEnvironmentVariableA(RUST_STD_DETECT_UNSTABLE.as_ptr().cast::(), core::ptr::null_mut(), 0) }; diff --git a/crates/std_detect/src/detect/os/windows/aarch64.rs b/crates/std_detect/src/detect/os/windows/aarch64.rs index faded671cc..ec31701aec 100644 --- a/crates/std_detect/src/detect/os/windows/aarch64.rs +++ b/crates/std_detect/src/detect/os/windows/aarch64.rs @@ -20,7 +20,7 @@ pub(crate) fn detect_features() -> cache::Initializer { const PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE: u32 = 44; const PF_ARM_V83_LRCPC_INSTRUCTIONS_AVAILABLE: u32 = 45; - extern "system" { + unsafe extern "system" { pub fn IsProcessorFeaturePresent(ProcessorFeature: DWORD) -> BOOL; } diff --git a/crates/stdarch-gen-arm/src/intrinsic.rs b/crates/stdarch-gen-arm/src/intrinsic.rs index c3f2ad7b91..60470dd77e 100644 --- a/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/crates/stdarch-gen-arm/src/intrinsic.rs @@ -663,7 +663,7 @@ impl ToTokens for LLVMLink { let signature = self.signature.as_ref().unwrap(); let links = self.links.as_ref().unwrap(); tokens.append_all(quote! { - extern "unadjusted" { + unsafe extern "unadjusted" { #(#links)* #signature; } diff --git a/crates/stdarch-gen-loongarch/src/main.rs b/crates/stdarch-gen-loongarch/src/main.rs index d0b9261347..160926293b 100644 --- a/crates/stdarch-gen-loongarch/src/main.rs +++ b/crates/stdarch-gen-loongarch/src/main.rs @@ -163,7 +163,7 @@ use super::types::*; out.push_str( r#" #[allow(improper_ctypes)] -extern "unadjusted" { +unsafe extern "unadjusted" { "#, ); From 22068b9ed3acb47e00f7c9cc9fa86b6dfd62bf22 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Sun, 9 Feb 2025 09:48:01 -0800 Subject: [PATCH 2/9] Apply keyword_idents_2024 --- examples/hex.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/hex.rs b/examples/hex.rs index 2c10f8d347..add2bc47f6 100644 --- a/examples/hex.rs +++ b/examples/hex.rs @@ -329,7 +329,7 @@ mod benches { ) { let mut rng = rand::thread_rng(); let input = std::iter::repeat(()) - .map(|()| rng.gen::()) + .map(|()| rng.r#gen::()) .take(len) .collect::>(); let mut dst = vec![0; input.len() * 2]; From a56a862d26ddeda775c26281fc44e346e253544b Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Sun, 9 Feb 2025 10:04:29 -0800 Subject: [PATCH 3/9] Apply rust_2024_incompatible_pat --- crates/stdarch-gen-arm/src/wildstring.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/stdarch-gen-arm/src/wildstring.rs b/crates/stdarch-gen-arm/src/wildstring.rs index 2eb467b962..095b18b846 100644 --- a/crates/stdarch-gen-arm/src/wildstring.rs +++ b/crates/stdarch-gen-arm/src/wildstring.rs @@ -90,7 +90,7 @@ impl WildString { self.iter_mut().try_for_each(|wp| -> Result<(), String> { if let WildStringPart::Wildcard(w) = wp { match w { - Wildcard::NEONType(_, _, ref maybe_suffix_kind) => { + &mut Wildcard::NEONType(_, _, ref maybe_suffix_kind) => { if let Some(suffix_kind) = maybe_suffix_kind { let x = ctx.provide_type_wildcard(w).unwrap(); *wp = WildStringPart::String(make_neon_suffix(x, *suffix_kind)) @@ -111,7 +111,7 @@ impl WildString { self.iter_mut().try_for_each(|wp| -> Result<(), String> { if let WildStringPart::Wildcard(w) = wp { match w { - Wildcard::NEONType(_, _, ref maybe_suffix_kind) => { + &mut Wildcard::NEONType(_, _, ref maybe_suffix_kind) => { if let Some(suffix_kind) = maybe_suffix_kind { let x = ctx.provide_type_wildcard(w).unwrap(); *wp = WildStringPart::String(make_neon_suffix(x, *suffix_kind)) From 60bf1cd548c8647e5c7db4850d96d8622787e2a6 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Sun, 9 Feb 2025 10:53:07 -0800 Subject: [PATCH 4/9] Apply unsafe_attr_outside_unsafe --- crates/assert-instr-macro/src/lib.rs | 4 ++-- examples/wasm.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/assert-instr-macro/src/lib.rs b/crates/assert-instr-macro/src/lib.rs index 4821a31617..43dce850cf 100644 --- a/crates/assert-instr-macro/src/lib.rs +++ b/crates/assert-instr-macro/src/lib.rs @@ -143,7 +143,7 @@ pub fn assert_instr( quote! { #attrs #maybe_allow_deprecated - #[no_mangle] + #[unsafe(no_mangle)] #[inline(never)] pub unsafe extern #abi fn #shim_name(#(#inputs),*) #ret { #name::<#(#const_vals),*>(#(#input_vals),*) @@ -156,7 +156,7 @@ pub fn assert_instr( #attrs #maybe_allow_deprecated - #[no_mangle] + #[unsafe(no_mangle)] #[inline(never)] pub unsafe extern #abi fn #shim_name(#(#inputs),*) #ret { // The compiler in optimized mode by default runs a pass called diff --git a/examples/wasm.rs b/examples/wasm.rs index 8a95ed54e1..7d75fefea2 100644 --- a/examples/wasm.rs +++ b/examples/wasm.rs @@ -8,7 +8,7 @@ use core_arch::arch::wasm32::*; static mut HEAD: *mut *mut u8 = 0 as _; -#[no_mangle] +#[unsafe(no_mangle)] pub unsafe extern "C" fn page_alloc() -> *mut u8 { if !HEAD.is_null() { let next = *HEAD; @@ -27,14 +27,14 @@ pub unsafe extern "C" fn page_alloc() -> *mut u8 { ((ret as u32) * page_size()) as *mut u8 } -#[no_mangle] +#[unsafe(no_mangle)] pub unsafe extern "C" fn page_free(page: *mut u8) { let page = page as *mut *mut u8; *page = HEAD as *mut u8; HEAD = page; } -#[no_mangle] +#[unsafe(no_mangle)] pub unsafe extern "C" fn memory_used() -> usize { (page_size() * (memory_size(0) as u32)) as usize } From 12ce6d5fb2a7f13912546d668468d41a23397486 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Sun, 9 Feb 2025 10:13:17 -0800 Subject: [PATCH 5/9] Allow unsafe_op_in_unsafe_fn Because stdarch has a really large number of unsafe functions with single-line calls, `unsafe_op_in_unsafe_fn` would end up adding a lot of noise, so for now we will allow it to migrate to 2024. --- crates/core_arch/src/lib.rs | 1 + crates/std_detect/src/lib.rs | 1 + examples/connect5.rs | 1 + examples/hex.rs | 1 + examples/wasm.rs | 1 + 5 files changed, 5 insertions(+) diff --git a/crates/core_arch/src/lib.rs b/crates/core_arch/src/lib.rs index acec8d3f76..b85200a73a 100644 --- a/crates/core_arch/src/lib.rs +++ b/crates/core_arch/src/lib.rs @@ -3,6 +3,7 @@ #![allow(dead_code)] #![allow(unused_features)] #![allow(internal_features)] +#![allow(unsafe_op_in_unsafe_fn)] #![deny(rust_2018_idioms)] #![feature( custom_inner_attributes, diff --git a/crates/std_detect/src/lib.rs b/crates/std_detect/src/lib.rs index ab1b77bad5..7a07452992 100644 --- a/crates/std_detect/src/lib.rs +++ b/crates/std_detect/src/lib.rs @@ -18,6 +18,7 @@ #![feature(staged_api, doc_cfg, allow_internal_unstable)] #![deny(rust_2018_idioms)] #![allow(clippy::shadow_reuse)] +#![allow(unsafe_op_in_unsafe_fn)] #![cfg_attr(test, allow(unused_imports))] #![no_std] #![allow(internal_features)] diff --git a/examples/connect5.rs b/examples/connect5.rs index 6f33c5a140..88bb18878c 100644 --- a/examples/connect5.rs +++ b/examples/connect5.rs @@ -29,6 +29,7 @@ //! each move. #![allow(internal_features)] +#![allow(unsafe_op_in_unsafe_fn)] #![feature(avx512_target_feature)] #![cfg_attr(target_arch = "x86", feature(stdarch_x86_avx512, stdarch_internal))] #![cfg_attr(target_arch = "x86_64", feature(stdarch_x86_avx512, stdarch_internal))] diff --git a/examples/hex.rs b/examples/hex.rs index add2bc47f6..be42e2e41c 100644 --- a/examples/hex.rs +++ b/examples/hex.rs @@ -29,6 +29,7 @@ clippy::cast_sign_loss, clippy::missing_docs_in_private_items )] +#![allow(unsafe_op_in_unsafe_fn)] use std::{ io::{self, Read}, diff --git a/examples/wasm.rs b/examples/wasm.rs index 7d75fefea2..8ad38f3a03 100644 --- a/examples/wasm.rs +++ b/examples/wasm.rs @@ -1,6 +1,7 @@ //! A simple slab allocator for pages in wasm #![cfg(target_arch = "wasm32")] +#![allow(unsafe_op_in_unsafe_fn)] use std::ptr; From bc7c7c4f1ff059c5bac2fe88942de4f1d89d2d62 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Sun, 9 Feb 2025 11:32:20 -0800 Subject: [PATCH 6/9] Update documentation for unsafe_op_in_unsafe_fn For the documentation side, we probably want to show the Rust 2024 style. --- crates/core_arch/src/core_arch_docs.md | 84 +++++++++++++------------- crates/core_arch/src/x86/mod.rs | 24 ++++---- crates/core_arch/src/x86/sse41.rs | 4 +- 3 files changed, 57 insertions(+), 55 deletions(-) diff --git a/crates/core_arch/src/core_arch_docs.md b/crates/core_arch/src/core_arch_docs.md index db97058abc..93a2e3d33e 100644 --- a/crates/core_arch/src/core_arch_docs.md +++ b/crates/core_arch/src/core_arch_docs.md @@ -131,7 +131,7 @@ unsafe fn foo_avx2() { #[cfg(target_arch = "x86_64")] use std::arch::x86_64::_mm256_add_epi64; - _mm256_add_epi64(...); + unsafe { _mm256_add_epi64(...); } } ``` @@ -287,47 +287,49 @@ unsafe fn hex_encode_sse41(mut src: &[u8], dst: &mut [u8]) { #[cfg(target_arch = "x86_64")] use std::arch::x86_64::*; - let ascii_zero = _mm_set1_epi8(b'0' as i8); - let nines = _mm_set1_epi8(9); - let ascii_a = _mm_set1_epi8((b'a' - 9 - 1) as i8); - let and4bits = _mm_set1_epi8(0xf); - - let mut i = 0_isize; - while src.len() >= 16 { - let invec = _mm_loadu_si128(src.as_ptr() as *const _); - - let masked1 = _mm_and_si128(invec, and4bits); - let masked2 = _mm_and_si128(_mm_srli_epi64(invec, 4), and4bits); - - // return 0xff corresponding to the elements > 9, or 0x00 otherwise - let cmpmask1 = _mm_cmpgt_epi8(masked1, nines); - let cmpmask2 = _mm_cmpgt_epi8(masked2, nines); - - // add '0' or the offset depending on the masks - let masked1 = _mm_add_epi8( - masked1, - _mm_blendv_epi8(ascii_zero, ascii_a, cmpmask1), - ); - let masked2 = _mm_add_epi8( - masked2, - _mm_blendv_epi8(ascii_zero, ascii_a, cmpmask2), - ); - - // interleave masked1 and masked2 bytes - let res1 = _mm_unpacklo_epi8(masked2, masked1); - let res2 = _mm_unpackhi_epi8(masked2, masked1); - - _mm_storeu_si128(dst.as_mut_ptr().offset(i * 2) as *mut _, res1); - _mm_storeu_si128( - dst.as_mut_ptr().offset(i * 2 + 16) as *mut _, - res2, - ); - src = &src[16..]; - i += 16; - } + unsafe { + let ascii_zero = _mm_set1_epi8(b'0' as i8); + let nines = _mm_set1_epi8(9); + let ascii_a = _mm_set1_epi8((b'a' - 9 - 1) as i8); + let and4bits = _mm_set1_epi8(0xf); + + let mut i = 0_isize; + while src.len() >= 16 { + let invec = _mm_loadu_si128(src.as_ptr() as *const _); + + let masked1 = _mm_and_si128(invec, and4bits); + let masked2 = _mm_and_si128(_mm_srli_epi64(invec, 4), and4bits); + + // return 0xff corresponding to the elements > 9, or 0x00 otherwise + let cmpmask1 = _mm_cmpgt_epi8(masked1, nines); + let cmpmask2 = _mm_cmpgt_epi8(masked2, nines); + + // add '0' or the offset depending on the masks + let masked1 = _mm_add_epi8( + masked1, + _mm_blendv_epi8(ascii_zero, ascii_a, cmpmask1), + ); + let masked2 = _mm_add_epi8( + masked2, + _mm_blendv_epi8(ascii_zero, ascii_a, cmpmask2), + ); + + // interleave masked1 and masked2 bytes + let res1 = _mm_unpacklo_epi8(masked2, masked1); + let res2 = _mm_unpackhi_epi8(masked2, masked1); + + _mm_storeu_si128(dst.as_mut_ptr().offset(i * 2) as *mut _, res1); + _mm_storeu_si128( + dst.as_mut_ptr().offset(i * 2 + 16) as *mut _, + res2, + ); + src = &src[16..]; + i += 16; + } - let i = i as usize; - hex_encode_fallback(src, &mut dst[i * 2..]); + let i = i as usize; + hex_encode_fallback(src, &mut dst[i * 2..]); + } } fn hex_encode_fallback(src: &[u8], dst: &mut [u8]) { diff --git a/crates/core_arch/src/x86/mod.rs b/crates/core_arch/src/x86/mod.rs index bd0af1cb79..a538304869 100644 --- a/crates/core_arch/src/x86/mod.rs +++ b/crates/core_arch/src/x86/mod.rs @@ -47,11 +47,11 @@ types! { /// /// # fn main() { /// # #[target_feature(enable = "sse2")] - /// # unsafe fn foo() { + /// # unsafe fn foo() { unsafe { /// let all_bytes_zero = _mm_setzero_si128(); /// let all_bytes_one = _mm_set1_epi8(1); /// let four_i32 = _mm_set_epi32(1, 2, 3, 4); - /// # } + /// # }} /// # if is_x86_feature_detected!("sse2") { unsafe { foo() } } /// # } /// ``` @@ -89,11 +89,11 @@ types! { /// /// # fn main() { /// # #[target_feature(enable = "sse")] - /// # unsafe fn foo() { + /// # unsafe fn foo() { unsafe { /// let four_zeros = _mm_setzero_ps(); /// let four_ones = _mm_set1_ps(1.0); /// let four_floats = _mm_set_ps(1.0, 2.0, 3.0, 4.0); - /// # } + /// # }} /// # if is_x86_feature_detected!("sse") { unsafe { foo() } } /// # } /// ``` @@ -131,11 +131,11 @@ types! { /// /// # fn main() { /// # #[target_feature(enable = "sse")] - /// # unsafe fn foo() { + /// # unsafe fn foo() { unsafe { /// let two_zeros = _mm_setzero_pd(); /// let two_ones = _mm_set1_pd(1.0); /// let two_floats = _mm_set_pd(1.0, 2.0); - /// # } + /// # }} /// # if is_x86_feature_detected!("sse") { unsafe { foo() } } /// # } /// ``` @@ -177,11 +177,11 @@ types! { /// /// # fn main() { /// # #[target_feature(enable = "avx")] - /// # unsafe fn foo() { + /// # unsafe fn foo() { unsafe { /// let all_bytes_zero = _mm256_setzero_si256(); /// let all_bytes_one = _mm256_set1_epi8(1); /// let eight_i32 = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); - /// # } + /// # }} /// # if is_x86_feature_detected!("avx") { unsafe { foo() } } /// # } /// ``` @@ -219,11 +219,11 @@ types! { /// /// # fn main() { /// # #[target_feature(enable = "avx")] - /// # unsafe fn foo() { + /// # unsafe fn foo() { unsafe { /// let eight_zeros = _mm256_setzero_ps(); /// let eight_ones = _mm256_set1_ps(1.0); /// let eight_floats = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - /// # } + /// # }} /// # if is_x86_feature_detected!("avx") { unsafe { foo() } } /// # } /// ``` @@ -261,11 +261,11 @@ types! { /// /// # fn main() { /// # #[target_feature(enable = "avx")] - /// # unsafe fn foo() { + /// # unsafe fn foo() { unsafe { /// let four_zeros = _mm256_setzero_pd(); /// let four_ones = _mm256_set1_pd(1.0); /// let four_floats = _mm256_set_pd(1.0, 2.0, 3.0, 4.0); - /// # } + /// # }} /// # if is_x86_feature_detected!("avx") { unsafe { foo() } } /// # } /// ``` diff --git a/crates/core_arch/src/x86/sse41.rs b/crates/core_arch/src/x86/sse41.rs index a4c7b587dc..21a580e44f 100644 --- a/crates/core_arch/src/x86/sse41.rs +++ b/crates/core_arch/src/x86/sse41.rs @@ -177,13 +177,13 @@ pub unsafe fn _mm_blend_ps(a: __m128, b: __m128) -> __m128 { /// # fn main() { /// # if is_x86_feature_detected!("sse4.1") { /// # #[target_feature(enable = "sse4.1")] -/// # unsafe fn worker() { +/// # unsafe fn worker() { unsafe { /// let mut float_store = vec![1.0, 1.0, 2.0, 3.0]; /// let simd_floats = _mm_set_ps(2.5, 5.0, 7.5, 10.0); /// let x: i32 = _mm_extract_ps::<2>(simd_floats); /// float_store.push(f32::from_bits(x as u32)); /// assert_eq!(float_store, vec![1.0, 1.0, 2.0, 3.0, 5.0]); -/// # } +/// # }} /// # unsafe { worker() } /// # } /// # } From 999ec72982fbe4e5bd9003c79734240bfb4c2b9d Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Sun, 9 Feb 2025 10:13:55 -0800 Subject: [PATCH 7/9] Update all crates to Rust 2024 --- crates/assert-instr-macro/Cargo.toml | 2 +- crates/core_arch/Cargo.toml | 2 +- crates/intrinsic-test/Cargo.toml | 2 +- crates/simd-test-macro/Cargo.toml | 2 +- crates/std_detect/Cargo.toml | 2 +- crates/stdarch-gen-arm/Cargo.toml | 2 +- crates/stdarch-gen-loongarch/Cargo.toml | 2 +- crates/stdarch-test/Cargo.toml | 2 +- crates/stdarch-verify/Cargo.toml | 4 ++-- examples/Cargo.toml | 2 +- 10 files changed, 11 insertions(+), 11 deletions(-) diff --git a/crates/assert-instr-macro/Cargo.toml b/crates/assert-instr-macro/Cargo.toml index 881c8109c1..47410a6679 100644 --- a/crates/assert-instr-macro/Cargo.toml +++ b/crates/assert-instr-macro/Cargo.toml @@ -2,7 +2,7 @@ name = "assert-instr-macro" version = "0.1.0" authors = ["Alex Crichton "] -edition = "2021" +edition = "2024" [lib] proc-macro = true diff --git a/crates/core_arch/Cargo.toml b/crates/core_arch/Cargo.toml index 8237ea257b..296abc9ecf 100644 --- a/crates/core_arch/Cargo.toml +++ b/crates/core_arch/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" keywords = ["core", "simd", "arch", "intrinsics"] categories = ["hardware-support", "no-std"] license = "MIT OR Apache-2.0" -edition = "2021" +edition = "2024" [badges] is-it-maintained-issue-resolution = { repository = "rust-lang/stdarch" } diff --git a/crates/intrinsic-test/Cargo.toml b/crates/intrinsic-test/Cargo.toml index e5c6186436..a358bea4b8 100644 --- a/crates/intrinsic-test/Cargo.toml +++ b/crates/intrinsic-test/Cargo.toml @@ -6,7 +6,7 @@ authors = ["Jamie Cunliffe ", "Adam Gemmell "] license = "MIT OR Apache-2.0" -edition = "2021" +edition = "2024" [dependencies] lazy_static = "1.4.0" diff --git a/crates/simd-test-macro/Cargo.toml b/crates/simd-test-macro/Cargo.toml index c9e692d8e2..8f9f9b1327 100644 --- a/crates/simd-test-macro/Cargo.toml +++ b/crates/simd-test-macro/Cargo.toml @@ -2,7 +2,7 @@ name = "simd-test-macro" version = "0.1.0" authors = ["Alex Crichton "] -edition = "2021" +edition = "2024" [lib] proc-macro = true diff --git a/crates/std_detect/Cargo.toml b/crates/std_detect/Cargo.toml index 88d30a1fea..6f4f1c7b61 100644 --- a/crates/std_detect/Cargo.toml +++ b/crates/std_detect/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" keywords = ["std", "run-time", "feature", "detection"] categories = ["hardware-support"] license = "MIT OR Apache-2.0" -edition = "2021" +edition = "2024" [badges] is-it-maintained-issue-resolution = { repository = "rust-lang/stdarch" } diff --git a/crates/stdarch-gen-arm/Cargo.toml b/crates/stdarch-gen-arm/Cargo.toml index 4112018666..899296d25e 100644 --- a/crates/stdarch-gen-arm/Cargo.toml +++ b/crates/stdarch-gen-arm/Cargo.toml @@ -7,7 +7,7 @@ authors = ["Luca Vizzarro ", "Jacob Bramley ", "James Barford-Evans "] license = "MIT OR Apache-2.0" -edition = "2021" +edition = "2024" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/crates/stdarch-gen-loongarch/Cargo.toml b/crates/stdarch-gen-loongarch/Cargo.toml index 66419363c2..d3ac607c55 100644 --- a/crates/stdarch-gen-loongarch/Cargo.toml +++ b/crates/stdarch-gen-loongarch/Cargo.toml @@ -2,7 +2,7 @@ name = "stdarch-gen-loongarch" version = "0.1.0" authors = ["ZHAI Xiang ", "WANG Rui "] -edition = "2021" +edition = "2024" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/crates/stdarch-test/Cargo.toml b/crates/stdarch-test/Cargo.toml index 3682fcd7ed..e4791e4ec5 100644 --- a/crates/stdarch-test/Cargo.toml +++ b/crates/stdarch-test/Cargo.toml @@ -2,7 +2,7 @@ name = "stdarch-test" version = "0.1.0" authors = ["Alex Crichton "] -edition = "2021" +edition = "2024" [dependencies] assert-instr-macro = { path = "../assert-instr-macro" } diff --git a/crates/stdarch-verify/Cargo.toml b/crates/stdarch-verify/Cargo.toml index 83a48e9468..c82a1262d0 100644 --- a/crates/stdarch-verify/Cargo.toml +++ b/crates/stdarch-verify/Cargo.toml @@ -2,7 +2,7 @@ name = "stdarch-verify" version = "0.1.0" authors = ["Alex Crichton "] -edition = "2021" +edition = "2024" [dependencies] proc-macro2 = "1.0" @@ -16,4 +16,4 @@ test = false [dev-dependencies] serde = { version = "1.0", features = ['derive'] } serde_json = "1.0.96" -quick-xml = { version = "0.33.0", features = ["serialize", "overlapped-lists"] } \ No newline at end of file +quick-xml = { version = "0.33.0", features = ["serialize", "overlapped-lists"] } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index d9034dd803..61184494e1 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -7,7 +7,7 @@ authors = [ "Gonzalo Brito Gadeschi ", ] description = "Examples of the stdarch crate." -edition = "2021" +edition = "2024" default-run = "hex" [dependencies] From f6b9432aa4f15e286987e8984c5b7db1bf746acf Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Sun, 9 Feb 2025 10:19:55 -0800 Subject: [PATCH 8/9] Format with style edition 2024 --- crates/assert-instr-macro/src/lib.rs | 2 +- crates/core_arch/src/aarch64/neon/mod.rs | 258 ++------ crates/core_arch/src/arm/dsp.rs | 2 +- crates/core_arch/src/arm/simd32.rs | 2 +- crates/core_arch/src/arm_shared/crypto.rs | 6 +- crates/core_arch/src/arm_shared/neon/mod.rs | 560 +++--------------- crates/core_arch/src/macros.rs | 12 +- crates/core_arch/src/mod.rs | 2 +- crates/core_arch/src/powerpc/altivec.rs | 8 +- crates/core_arch/src/wasm32/simd128.rs | 4 +- crates/core_arch/src/x86/avx2.rs | 4 +- crates/core_arch/src/x86/avx512bitalg.rs | 8 +- crates/core_arch/src/x86/avx512bw.rs | 4 +- crates/core_arch/src/x86/avx512dq.rs | 2 +- crates/core_arch/src/x86/avx512f.rs | 8 +- crates/core_arch/src/x86/avx512fp16.rs | 22 +- crates/core_arch/src/x86/avx512vpopcntdq.rs | 2 +- crates/core_arch/src/x86/sse2.rs | 6 +- crates/core_arch/src/x86/test.rs | 2 +- crates/core_arch/src/x86_64/amx.rs | 2 +- crates/intrinsic-test/src/argument.rs | 2 +- crates/intrinsic-test/src/types.rs | 2 +- crates/std_detect/src/detect/os/aarch64.rs | 2 +- .../src/detect/os/darwin/aarch64.rs | 2 +- .../std_detect/src/detect/os/freebsd/arm.rs | 2 +- .../src/detect/os/freebsd/powerpc.rs | 2 +- .../std_detect/src/detect/os/linux/aarch64.rs | 2 +- crates/std_detect/src/detect/os/linux/arm.rs | 2 +- .../src/detect/os/linux/loongarch.rs | 2 +- crates/std_detect/src/detect/os/linux/mips.rs | 2 +- .../std_detect/src/detect/os/linux/powerpc.rs | 2 +- .../std_detect/src/detect/os/linux/riscv.rs | 2 +- .../std_detect/src/detect/os/linux/s390x.rs | 2 +- .../src/detect/os/windows/aarch64.rs | 2 +- crates/std_detect/src/detect/os/x86.rs | 2 +- crates/stdarch-gen-arm/src/assert_instr.rs | 4 +- crates/stdarch-gen-arm/src/context.rs | 4 +- crates/stdarch-gen-arm/src/expression.rs | 2 +- crates/stdarch-gen-arm/src/input.rs | 4 +- crates/stdarch-gen-arm/src/intrinsic.rs | 24 +- .../stdarch-gen-arm/src/load_store_tests.rs | 5 +- crates/stdarch-gen-arm/src/typekinds.rs | 2 +- crates/stdarch-gen-arm/src/wildstring.rs | 2 +- crates/stdarch-gen-loongarch/src/main.rs | 32 +- examples/connect5.rs | 6 +- 45 files changed, 228 insertions(+), 803 deletions(-) diff --git a/crates/assert-instr-macro/src/lib.rs b/crates/assert-instr-macro/src/lib.rs index 43dce850cf..acc764e7aa 100644 --- a/crates/assert-instr-macro/src/lib.rs +++ b/crates/assert-instr-macro/src/lib.rs @@ -202,7 +202,7 @@ struct Invoc { impl syn::parse::Parse for Invoc { fn parse(input: syn::parse::ParseStream<'_>) -> syn::Result { - use syn::{ext::IdentExt, Token}; + use syn::{Token, ext::IdentExt}; let mut instr = String::new(); while !input.is_empty() { diff --git a/crates/core_arch/src/aarch64/neon/mod.rs b/crates/core_arch/src/aarch64/neon/mod.rs index a238bae6a1..1b82a90719 100644 --- a/crates/core_arch/src/aarch64/neon/mod.rs +++ b/crates/core_arch/src/aarch64/neon/mod.rs @@ -300,7 +300,7 @@ unsafe extern "unadjusted" { #[link_name = "llvm.aarch64.neon.tbl4.v8i8"] fn vqtbl4(a0: int8x16_t, a1: int8x16_t, a2: int8x16_t, a3: int8x16_t, b: uint8x8_t) - -> int8x8_t; + -> int8x8_t; #[link_name = "llvm.aarch64.neon.tbl4.v16i8"] fn vqtbl4q( a0: int8x16_t, @@ -4448,26 +4448,14 @@ mod tests { unsafe fn test_vceq_u64() { test_cmp_u64( |i, j| vceq_u64(i, j), - |a: u64, b: u64| -> u64 { - if a == b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: u64, b: u64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_u64() { testq_cmp_u64( |i, j| vceqq_u64(i, j), - |a: u64, b: u64| -> u64 { - if a == b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: u64, b: u64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4475,26 +4463,14 @@ mod tests { unsafe fn test_vceq_s64() { test_cmp_s64( |i, j| vceq_s64(i, j), - |a: i64, b: i64| -> u64 { - if a == b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: i64, b: i64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_s64() { testq_cmp_s64( |i, j| vceqq_s64(i, j), - |a: i64, b: i64| -> u64 { - if a == b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: i64, b: i64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4502,26 +4478,14 @@ mod tests { unsafe fn test_vceq_p64() { test_cmp_p64( |i, j| vceq_p64(i, j), - |a: u64, b: u64| -> u64 { - if a == b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: u64, b: u64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_p64() { testq_cmp_p64( |i, j| vceqq_p64(i, j), - |a: u64, b: u64| -> u64 { - if a == b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: u64, b: u64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4529,26 +4493,14 @@ mod tests { unsafe fn test_vceq_f64() { test_cmp_f64( |i, j| vceq_f64(i, j), - |a: f64, b: f64| -> u64 { - if a == b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: f64, b: f64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_f64() { testq_cmp_f64( |i, j| vceqq_f64(i, j), - |a: f64, b: f64| -> u64 { - if a == b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: f64, b: f64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4556,26 +4508,14 @@ mod tests { unsafe fn test_vcgt_s64() { test_cmp_s64( |i, j| vcgt_s64(i, j), - |a: i64, b: i64| -> u64 { - if a > b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: i64, b: i64| -> u64 { if a > b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_s64() { testq_cmp_s64( |i, j| vcgtq_s64(i, j), - |a: i64, b: i64| -> u64 { - if a > b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: i64, b: i64| -> u64 { if a > b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4583,26 +4523,14 @@ mod tests { unsafe fn test_vcgt_u64() { test_cmp_u64( |i, j| vcgt_u64(i, j), - |a: u64, b: u64| -> u64 { - if a > b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: u64, b: u64| -> u64 { if a > b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_u64() { testq_cmp_u64( |i, j| vcgtq_u64(i, j), - |a: u64, b: u64| -> u64 { - if a > b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: u64, b: u64| -> u64 { if a > b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4610,26 +4538,14 @@ mod tests { unsafe fn test_vcgt_f64() { test_cmp_f64( |i, j| vcgt_f64(i, j), - |a: f64, b: f64| -> u64 { - if a > b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: f64, b: f64| -> u64 { if a > b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_f64() { testq_cmp_f64( |i, j| vcgtq_f64(i, j), - |a: f64, b: f64| -> u64 { - if a > b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: f64, b: f64| -> u64 { if a > b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4637,26 +4553,14 @@ mod tests { unsafe fn test_vclt_s64() { test_cmp_s64( |i, j| vclt_s64(i, j), - |a: i64, b: i64| -> u64 { - if a < b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: i64, b: i64| -> u64 { if a < b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_s64() { testq_cmp_s64( |i, j| vcltq_s64(i, j), - |a: i64, b: i64| -> u64 { - if a < b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: i64, b: i64| -> u64 { if a < b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4664,26 +4568,14 @@ mod tests { unsafe fn test_vclt_u64() { test_cmp_u64( |i, j| vclt_u64(i, j), - |a: u64, b: u64| -> u64 { - if a < b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: u64, b: u64| -> u64 { if a < b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_u64() { testq_cmp_u64( |i, j| vcltq_u64(i, j), - |a: u64, b: u64| -> u64 { - if a < b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: u64, b: u64| -> u64 { if a < b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4691,26 +4583,14 @@ mod tests { unsafe fn test_vltq_f64() { test_cmp_f64( |i, j| vclt_f64(i, j), - |a: f64, b: f64| -> u64 { - if a < b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: f64, b: f64| -> u64 { if a < b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_f64() { testq_cmp_f64( |i, j| vcltq_f64(i, j), - |a: f64, b: f64| -> u64 { - if a < b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: f64, b: f64| -> u64 { if a < b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4718,26 +4598,14 @@ mod tests { unsafe fn test_vcle_s64() { test_cmp_s64( |i, j| vcle_s64(i, j), - |a: i64, b: i64| -> u64 { - if a <= b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: i64, b: i64| -> u64 { if a <= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_s64() { testq_cmp_s64( |i, j| vcleq_s64(i, j), - |a: i64, b: i64| -> u64 { - if a <= b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: i64, b: i64| -> u64 { if a <= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4745,26 +4613,14 @@ mod tests { unsafe fn test_vcle_u64() { test_cmp_u64( |i, j| vcle_u64(i, j), - |a: u64, b: u64| -> u64 { - if a <= b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: u64, b: u64| -> u64 { if a <= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_u64() { testq_cmp_u64( |i, j| vcleq_u64(i, j), - |a: u64, b: u64| -> u64 { - if a <= b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: u64, b: u64| -> u64 { if a <= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4772,26 +4628,14 @@ mod tests { unsafe fn test_vleq_f64() { test_cmp_f64( |i, j| vcle_f64(i, j), - |a: f64, b: f64| -> u64 { - if a <= b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: f64, b: f64| -> u64 { if a <= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_f64() { testq_cmp_f64( |i, j| vcleq_f64(i, j), - |a: f64, b: f64| -> u64 { - if a <= b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: f64, b: f64| -> u64 { if a <= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4799,26 +4643,14 @@ mod tests { unsafe fn test_vcge_s64() { test_cmp_s64( |i, j| vcge_s64(i, j), - |a: i64, b: i64| -> u64 { - if a >= b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: i64, b: i64| -> u64 { if a >= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_s64() { testq_cmp_s64( |i, j| vcgeq_s64(i, j), - |a: i64, b: i64| -> u64 { - if a >= b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: i64, b: i64| -> u64 { if a >= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4826,26 +4658,14 @@ mod tests { unsafe fn test_vcge_u64() { test_cmp_u64( |i, j| vcge_u64(i, j), - |a: u64, b: u64| -> u64 { - if a >= b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: u64, b: u64| -> u64 { if a >= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_u64() { testq_cmp_u64( |i, j| vcgeq_u64(i, j), - |a: u64, b: u64| -> u64 { - if a >= b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: u64, b: u64| -> u64 { if a >= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } @@ -4853,26 +4673,14 @@ mod tests { unsafe fn test_vgeq_f64() { test_cmp_f64( |i, j| vcge_f64(i, j), - |a: f64, b: f64| -> u64 { - if a >= b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: f64, b: f64| -> u64 { if a >= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_f64() { testq_cmp_f64( |i, j| vcgeq_f64(i, j), - |a: f64, b: f64| -> u64 { - if a >= b { - 0xFFFFFFFFFFFFFFFF - } else { - 0 - } - }, + |a: f64, b: f64| -> u64 { if a >= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, ); } diff --git a/crates/core_arch/src/arm/dsp.rs b/crates/core_arch/src/arm/dsp.rs index 9c03b38884..22517e5929 100644 --- a/crates/core_arch/src/arm/dsp.rs +++ b/crates/core_arch/src/arm/dsp.rs @@ -244,7 +244,7 @@ pub unsafe fn __smlawt(a: i32, b: i32, c: i32) -> i32 { mod tests { use crate::core_arch::{ arm::*, - simd::{i16x2, i8x4, u8x4}, + simd::{i8x4, i16x2, u8x4}, }; use std::mem::transmute; use stdarch_test::simd_test; diff --git a/crates/core_arch/src/arm/simd32.rs b/crates/core_arch/src/arm/simd32.rs index 0d7c2328e2..2a9908ab2b 100644 --- a/crates/core_arch/src/arm/simd32.rs +++ b/crates/core_arch/src/arm/simd32.rs @@ -500,7 +500,7 @@ pub unsafe fn __usada8(a: int8x4_t, b: int8x4_t, c: u32) -> u32 { #[cfg(test)] mod tests { - use crate::core_arch::simd::{i16x2, i8x4, u8x4}; + use crate::core_arch::simd::{i8x4, i16x2, u8x4}; use std::mem::transmute; use stdarch_test::simd_test; diff --git a/crates/core_arch/src/arm_shared/crypto.rs b/crates/core_arch/src/arm_shared/crypto.rs index 2dcd6fdaf0..07c96008d9 100644 --- a/crates/core_arch/src/arm_shared/crypto.rs +++ b/crates/core_arch/src/arm_shared/crypto.rs @@ -1,4 +1,4 @@ -use crate::core_arch::arm_shared::{uint32x4_t, uint8x16_t}; +use crate::core_arch::arm_shared::{uint8x16_t, uint32x4_t}; #[allow(improper_ctypes)] unsafe extern "unadjusted" { @@ -418,7 +418,9 @@ mod tests { let r: u8x16 = mem::transmute(vaesimcq_u8(data)); assert_eq!( r, - u8x16::new(43, 60, 33, 50, 103, 80, 125, 70, 43, 60, 33, 50, 103, 80, 125, 70) + u8x16::new( + 43, 60, 33, 50, 103, 80, 125, 70, 43, 60, 33, 50, 103, 80, 125, 70 + ) ); } diff --git a/crates/core_arch/src/arm_shared/neon/mod.rs b/crates/core_arch/src/arm_shared/neon/mod.rs index 2a28b71b29..b7854051ae 100644 --- a/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/crates/core_arch/src/arm_shared/neon/mod.rs @@ -13818,78 +13818,42 @@ mod tests { unsafe fn test_vceq_s8() { test_cmp_s8( |i, j| vceq_s8(i, j), - |a: i8, b: i8| -> u8 { - if a == b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a == b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_s8() { testq_cmp_s8( |i, j| vceqq_s8(i, j), - |a: i8, b: i8| -> u8 { - if a == b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a == b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceq_s16() { test_cmp_s16( |i, j| vceq_s16(i, j), - |a: i16, b: i16| -> u16 { - if a == b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a == b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_s16() { testq_cmp_s16( |i, j| vceqq_s16(i, j), - |a: i16, b: i16| -> u16 { - if a == b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a == b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceq_s32() { test_cmp_s32( |i, j| vceq_s32(i, j), - |a: i32, b: i32| -> u32 { - if a == b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_s32() { testq_cmp_s32( |i, j| vceqq_s32(i, j), - |a: i32, b: i32| -> u32 { - if a == b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, ); } @@ -13897,78 +13861,42 @@ mod tests { unsafe fn test_vceq_u8() { test_cmp_u8( |i, j| vceq_u8(i, j), - |a: u8, b: u8| -> u8 { - if a == b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a == b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_u8() { testq_cmp_u8( |i, j| vceqq_u8(i, j), - |a: u8, b: u8| -> u8 { - if a == b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a == b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceq_u16() { test_cmp_u16( |i, j| vceq_u16(i, j), - |a: u16, b: u16| -> u16 { - if a == b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a == b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_u16() { testq_cmp_u16( |i, j| vceqq_u16(i, j), - |a: u16, b: u16| -> u16 { - if a == b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a == b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceq_u32() { test_cmp_u32( |i, j| vceq_u32(i, j), - |a: u32, b: u32| -> u32 { - if a == b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_u32() { testq_cmp_u32( |i, j| vceqq_u32(i, j), - |a: u32, b: u32| -> u32 { - if a == b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, ); } @@ -13976,26 +13904,14 @@ mod tests { unsafe fn test_vceq_f32() { test_cmp_f32( |i, j| vcge_f32(i, j), - |a: f32, b: f32| -> u32 { - if a == b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_f32() { testq_cmp_f32( |i, j| vcgeq_f32(i, j), - |a: f32, b: f32| -> u32 { - if a == b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, ); } @@ -14003,78 +13919,42 @@ mod tests { unsafe fn test_vcgt_s8() { test_cmp_s8( |i, j| vcgt_s8(i, j), - |a: i8, b: i8| -> u8 { - if a > b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a > b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_s8() { testq_cmp_s8( |i, j| vcgtq_s8(i, j), - |a: i8, b: i8| -> u8 { - if a > b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a > b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgt_s16() { test_cmp_s16( |i, j| vcgt_s16(i, j), - |a: i16, b: i16| -> u16 { - if a > b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a > b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_s16() { testq_cmp_s16( |i, j| vcgtq_s16(i, j), - |a: i16, b: i16| -> u16 { - if a > b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a > b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgt_s32() { test_cmp_s32( |i, j| vcgt_s32(i, j), - |a: i32, b: i32| -> u32 { - if a > b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_s32() { testq_cmp_s32( |i, j| vcgtq_s32(i, j), - |a: i32, b: i32| -> u32 { - if a > b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, ); } @@ -14082,78 +13962,42 @@ mod tests { unsafe fn test_vcgt_u8() { test_cmp_u8( |i, j| vcgt_u8(i, j), - |a: u8, b: u8| -> u8 { - if a > b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a > b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_u8() { testq_cmp_u8( |i, j| vcgtq_u8(i, j), - |a: u8, b: u8| -> u8 { - if a > b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a > b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgt_u16() { test_cmp_u16( |i, j| vcgt_u16(i, j), - |a: u16, b: u16| -> u16 { - if a > b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a > b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_u16() { testq_cmp_u16( |i, j| vcgtq_u16(i, j), - |a: u16, b: u16| -> u16 { - if a > b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a > b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgt_u32() { test_cmp_u32( |i, j| vcgt_u32(i, j), - |a: u32, b: u32| -> u32 { - if a > b { - 0xFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a > b { 0xFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_u32() { testq_cmp_u32( |i, j| vcgtq_u32(i, j), - |a: u32, b: u32| -> u32 { - if a > b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, ); } @@ -14161,26 +14005,14 @@ mod tests { unsafe fn test_vcgt_f32() { test_cmp_f32( |i, j| vcgt_f32(i, j), - |a: f32, b: f32| -> u32 { - if a > b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_f32() { testq_cmp_f32( |i, j| vcgtq_f32(i, j), - |a: f32, b: f32| -> u32 { - if a > b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, ); } @@ -14188,78 +14020,42 @@ mod tests { unsafe fn test_vclt_s8() { test_cmp_s8( |i, j| vclt_s8(i, j), - |a: i8, b: i8| -> u8 { - if a < b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a < b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_s8() { testq_cmp_s8( |i, j| vcltq_s8(i, j), - |a: i8, b: i8| -> u8 { - if a < b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a < b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vclt_s16() { test_cmp_s16( |i, j| vclt_s16(i, j), - |a: i16, b: i16| -> u16 { - if a < b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a < b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_s16() { testq_cmp_s16( |i, j| vcltq_s16(i, j), - |a: i16, b: i16| -> u16 { - if a < b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a < b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vclt_s32() { test_cmp_s32( |i, j| vclt_s32(i, j), - |a: i32, b: i32| -> u32 { - if a < b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_s32() { testq_cmp_s32( |i, j| vcltq_s32(i, j), - |a: i32, b: i32| -> u32 { - if a < b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, ); } @@ -14267,78 +14063,42 @@ mod tests { unsafe fn test_vclt_u8() { test_cmp_u8( |i, j| vclt_u8(i, j), - |a: u8, b: u8| -> u8 { - if a < b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a < b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_u8() { testq_cmp_u8( |i, j| vcltq_u8(i, j), - |a: u8, b: u8| -> u8 { - if a < b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a < b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vclt_u16() { test_cmp_u16( |i, j| vclt_u16(i, j), - |a: u16, b: u16| -> u16 { - if a < b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a < b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_u16() { testq_cmp_u16( |i, j| vcltq_u16(i, j), - |a: u16, b: u16| -> u16 { - if a < b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a < b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vclt_u32() { test_cmp_u32( |i, j| vclt_u32(i, j), - |a: u32, b: u32| -> u32 { - if a < b { - 0xFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a < b { 0xFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_u32() { testq_cmp_u32( |i, j| vcltq_u32(i, j), - |a: u32, b: u32| -> u32 { - if a < b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, ); } @@ -14346,26 +14106,14 @@ mod tests { unsafe fn test_vclt_f32() { test_cmp_f32( |i, j| vclt_f32(i, j), - |a: f32, b: f32| -> u32 { - if a < b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_f32() { testq_cmp_f32( |i, j| vcltq_f32(i, j), - |a: f32, b: f32| -> u32 { - if a < b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, ); } @@ -14373,78 +14121,42 @@ mod tests { unsafe fn test_vcle_s8() { test_cmp_s8( |i, j| vcle_s8(i, j), - |a: i8, b: i8| -> u8 { - if a <= b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a <= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_s8() { testq_cmp_s8( |i, j| vcleq_s8(i, j), - |a: i8, b: i8| -> u8 { - if a <= b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a <= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcle_s16() { test_cmp_s16( |i, j| vcle_s16(i, j), - |a: i16, b: i16| -> u16 { - if a <= b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a <= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_s16() { testq_cmp_s16( |i, j| vcleq_s16(i, j), - |a: i16, b: i16| -> u16 { - if a <= b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a <= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcle_s32() { test_cmp_s32( |i, j| vcle_s32(i, j), - |a: i32, b: i32| -> u32 { - if a <= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_s32() { testq_cmp_s32( |i, j| vcleq_s32(i, j), - |a: i32, b: i32| -> u32 { - if a <= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, ); } @@ -14452,78 +14164,42 @@ mod tests { unsafe fn test_vcle_u8() { test_cmp_u8( |i, j| vcle_u8(i, j), - |a: u8, b: u8| -> u8 { - if a <= b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a <= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_u8() { testq_cmp_u8( |i, j| vcleq_u8(i, j), - |a: u8, b: u8| -> u8 { - if a <= b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a <= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcle_u16() { test_cmp_u16( |i, j| vcle_u16(i, j), - |a: u16, b: u16| -> u16 { - if a <= b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a <= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_u16() { testq_cmp_u16( |i, j| vcleq_u16(i, j), - |a: u16, b: u16| -> u16 { - if a <= b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a <= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcle_u32() { test_cmp_u32( |i, j| vcle_u32(i, j), - |a: u32, b: u32| -> u32 { - if a <= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_u32() { testq_cmp_u32( |i, j| vcleq_u32(i, j), - |a: u32, b: u32| -> u32 { - if a <= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, ); } @@ -14531,26 +14207,14 @@ mod tests { unsafe fn test_vcle_f32() { test_cmp_f32( |i, j| vcle_f32(i, j), - |a: f32, b: f32| -> u32 { - if a <= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_f32() { testq_cmp_f32( |i, j| vcleq_f32(i, j), - |a: f32, b: f32| -> u32 { - if a <= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, ); } @@ -14558,78 +14222,42 @@ mod tests { unsafe fn test_vcge_s8() { test_cmp_s8( |i, j| vcge_s8(i, j), - |a: i8, b: i8| -> u8 { - if a >= b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a >= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_s8() { testq_cmp_s8( |i, j| vcgeq_s8(i, j), - |a: i8, b: i8| -> u8 { - if a >= b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a >= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcge_s16() { test_cmp_s16( |i, j| vcge_s16(i, j), - |a: i16, b: i16| -> u16 { - if a >= b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a >= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_s16() { testq_cmp_s16( |i, j| vcgeq_s16(i, j), - |a: i16, b: i16| -> u16 { - if a >= b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a >= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcge_s32() { test_cmp_s32( |i, j| vcge_s32(i, j), - |a: i32, b: i32| -> u32 { - if a >= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_s32() { testq_cmp_s32( |i, j| vcgeq_s32(i, j), - |a: i32, b: i32| -> u32 { - if a >= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, ); } @@ -14637,78 +14265,42 @@ mod tests { unsafe fn test_vcge_u8() { test_cmp_u8( |i, j| vcge_u8(i, j), - |a: u8, b: u8| -> u8 { - if a >= b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a >= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_u8() { testq_cmp_u8( |i, j| vcgeq_u8(i, j), - |a: u8, b: u8| -> u8 { - if a >= b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a >= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcge_u16() { test_cmp_u16( |i, j| vcge_u16(i, j), - |a: u16, b: u16| -> u16 { - if a >= b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a >= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_u16() { testq_cmp_u16( |i, j| vcgeq_u16(i, j), - |a: u16, b: u16| -> u16 { - if a >= b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a >= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcge_u32() { test_cmp_u32( |i, j| vcge_u32(i, j), - |a: u32, b: u32| -> u32 { - if a >= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_u32() { testq_cmp_u32( |i, j| vcgeq_u32(i, j), - |a: u32, b: u32| -> u32 { - if a >= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, ); } @@ -14716,26 +14308,14 @@ mod tests { unsafe fn test_vcge_f32() { test_cmp_f32( |i, j| vcge_f32(i, j), - |a: f32, b: f32| -> u32 { - if a >= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_f32() { testq_cmp_f32( |i, j| vcgeq_f32(i, j), - |a: f32, b: f32| -> u32 { - if a >= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, ); } diff --git a/crates/core_arch/src/macros.rs b/crates/core_arch/src/macros.rs index e2ddcde98a..f9c650b2ca 100644 --- a/crates/core_arch/src/macros.rs +++ b/crates/core_arch/src/macros.rs @@ -131,17 +131,11 @@ macro_rules! simd_shuffle { #[allow(unused)] macro_rules! simd_insert { - ($x:expr, $idx:expr, $val:expr $(,)?) => {{ - $crate::intrinsics::simd::simd_insert($x, const { $idx }, $val) - }}; + ($x:expr, $idx:expr, $val:expr $(,)?) => {{ $crate::intrinsics::simd::simd_insert($x, const { $idx }, $val) }}; } #[allow(unused)] macro_rules! simd_extract { - ($x:expr, $idx:expr $(,)?) => {{ - $crate::intrinsics::simd::simd_extract($x, const { $idx }) - }}; - ($x:expr, $idx:expr, $ty:ty $(,)?) => {{ - $crate::intrinsics::simd::simd_extract::<_, $ty>($x, const { $idx }) - }}; + ($x:expr, $idx:expr $(,)?) => {{ $crate::intrinsics::simd::simd_extract($x, const { $idx }) }}; + ($x:expr, $idx:expr, $ty:ty $(,)?) => {{ $crate::intrinsics::simd::simd_extract::<_, $ty>($x, const { $idx }) }}; } diff --git a/crates/core_arch/src/mod.rs b/crates/core_arch/src/mod.rs index 87ce18ca7b..d6714a1649 100644 --- a/crates/core_arch/src/mod.rs +++ b/crates/core_arch/src/mod.rs @@ -72,8 +72,8 @@ pub mod arch { #[doc(cfg(any(target_arch = "riscv32")))] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub mod riscv32 { - pub use crate::core_arch::riscv32::*; pub use crate::core_arch::riscv_shared::*; + pub use crate::core_arch::riscv32::*; } /// Platform-specific intrinsics for the `riscv64` platform. diff --git a/crates/core_arch/src/powerpc/altivec.rs b/crates/core_arch/src/powerpc/altivec.rs index 274ec88dde..7d0081f03a 100644 --- a/crates/core_arch/src/powerpc/altivec.rs +++ b/crates/core_arch/src/powerpc/altivec.rs @@ -4720,7 +4720,9 @@ mod tests { let v: u8x16 = transmute(vec_ld(0, (pat.as_ptr() as *const u8).offset(off))); assert_eq!( v, - u8x16::new(16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31) + u8x16::new( + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 + ) ); } } @@ -4778,7 +4780,9 @@ mod tests { let v: u8x16 = transmute(vec_ldl(0, (pat.as_ptr() as *const u8).offset(off))); assert_eq!( v, - u8x16::new(16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31) + u8x16::new( + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 + ) ); } } diff --git a/crates/core_arch/src/wasm32/simd128.rs b/crates/core_arch/src/wasm32/simd128.rs index c54d996622..ce74323b68 100644 --- a/crates/core_arch/src/wasm32/simd128.rs +++ b/crates/core_arch/src/wasm32/simd128.rs @@ -4361,7 +4361,9 @@ mod tests { }; assert_eq!( bytes, - [-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] + [ + -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16 + ] ); } diff --git a/crates/core_arch/src/x86/avx2.rs b/crates/core_arch/src/x86/avx2.rs index 0bc89ba45c..10c1f2de8d 100644 --- a/crates/core_arch/src/x86/avx2.rs +++ b/crates/core_arch/src/x86/avx2.rs @@ -3785,7 +3785,7 @@ unsafe extern "C" { ) -> __m256d; #[link_name = "llvm.x86.avx2.gather.d.ps"] fn pgatherdps(src: __m128, slice: *const i8, offsets: i32x4, mask: __m128, scale: i8) - -> __m128; + -> __m128; #[link_name = "llvm.x86.avx2.gather.d.ps.256"] fn vpgatherdps( src: __m256, @@ -3796,7 +3796,7 @@ unsafe extern "C" { ) -> __m256; #[link_name = "llvm.x86.avx2.gather.q.ps"] fn pgatherqps(src: __m128, slice: *const i8, offsets: i64x2, mask: __m128, scale: i8) - -> __m128; + -> __m128; #[link_name = "llvm.x86.avx2.gather.q.ps.256"] fn vpgatherqps( src: __m128, diff --git a/crates/core_arch/src/x86/avx512bitalg.rs b/crates/core_arch/src/x86/avx512bitalg.rs index 819a52110d..5640ef8bf4 100644 --- a/crates/core_arch/src/x86/avx512bitalg.rs +++ b/crates/core_arch/src/x86/avx512bitalg.rs @@ -7,19 +7,19 @@ //! //! [intel64_ref]: http://www.intel.de/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf -use crate::core_arch::simd::i16x16; -use crate::core_arch::simd::i16x32; -use crate::core_arch::simd::i16x8; use crate::core_arch::simd::i8x16; use crate::core_arch::simd::i8x32; use crate::core_arch::simd::i8x64; +use crate::core_arch::simd::i16x8; +use crate::core_arch::simd::i16x16; +use crate::core_arch::simd::i16x32; use crate::core_arch::x86::__m128i; use crate::core_arch::x86::__m256i; use crate::core_arch::x86::__m512i; +use crate::core_arch::x86::__mmask8; use crate::core_arch::x86::__mmask16; use crate::core_arch::x86::__mmask32; use crate::core_arch::x86::__mmask64; -use crate::core_arch::x86::__mmask8; use crate::intrinsics::simd::{simd_ctpop, simd_select_bitmask}; use crate::mem::transmute; diff --git a/crates/core_arch/src/x86/avx512bw.rs b/crates/core_arch/src/x86/avx512bw.rs index 5e79142ae6..caac75b346 100644 --- a/crates/core_arch/src/x86/avx512bw.rs +++ b/crates/core_arch/src/x86/avx512bw.rs @@ -9572,7 +9572,7 @@ pub unsafe fn _mm512_movepi8_mask(a: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw,avx512vl")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovmskb))] // should be vpmovb2m but compiled to vpmovmskb in the test shim because that takes less cycles than - // using vpmovb2m plus converting the mask register to a standard register. +// using vpmovb2m plus converting the mask register to a standard register. pub unsafe fn _mm256_movepi8_mask(a: __m256i) -> __mmask32 { let filter = _mm256_set1_epi8(1 << 7); let a = _mm256_and_si256(a, filter); @@ -9586,7 +9586,7 @@ pub unsafe fn _mm256_movepi8_mask(a: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovmskb))] // should be vpmovb2m but compiled to vpmovmskb in the test shim because that takes less cycles than - // using vpmovb2m plus converting the mask register to a standard register. +// using vpmovb2m plus converting the mask register to a standard register. pub unsafe fn _mm_movepi8_mask(a: __m128i) -> __mmask16 { let filter = _mm_set1_epi8(1 << 7); let a = _mm_and_si128(a, filter); diff --git a/crates/core_arch/src/x86/avx512dq.rs b/crates/core_arch/src/x86/avx512dq.rs index 9e5ac789af..66d0feebb6 100644 --- a/crates/core_arch/src/x86/avx512dq.rs +++ b/crates/core_arch/src/x86/avx512dq.rs @@ -6836,7 +6836,7 @@ unsafe extern "C" { fn vrangeps_256(a: f32x8, b: f32x8, imm8: i32, src: f32x8, k: __mmask8) -> f32x8; #[link_name = "llvm.x86.avx512.mask.range.ps.512"] fn vrangeps_512(a: f32x16, b: f32x16, imm8: i32, src: f32x16, k: __mmask16, sae: i32) - -> f32x16; + -> f32x16; #[link_name = "llvm.x86.avx512.mask.range.sd"] fn vrangesd(a: f64x2, b: f64x2, src: f64x2, k: __mmask8, imm8: i32, sae: i32) -> f64x2; diff --git a/crates/core_arch/src/x86/avx512f.rs b/crates/core_arch/src/x86/avx512f.rs index 5c45d815f2..45ba31ec38 100644 --- a/crates/core_arch/src/x86/avx512f.rs +++ b/crates/core_arch/src/x86/avx512f.rs @@ -26220,12 +26220,16 @@ pub unsafe fn _mm512_alignr_epi32(a: __m512i, b: __m512i) -> __ 0 => simd_shuffle!( a, b, - [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,], + [ + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + ], ), 1 => simd_shuffle!( a, b, - [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0,], + [ + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0, + ], ), 2 => simd_shuffle!( a, diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs index b9e269f652..20dace5e9c 100644 --- a/crates/core_arch/src/x86/avx512fp16.rs +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -10866,7 +10866,9 @@ pub unsafe fn _mm512_reduce_add_ph(a: __m512h) -> f16 { let q = simd_shuffle!( a, a, - [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] + [ + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 + ] ); _mm256_reduce_add_ph(_mm256_add_ph(p, q)) } @@ -10911,7 +10913,9 @@ pub unsafe fn _mm512_reduce_mul_ph(a: __m512h) -> f16 { let q = simd_shuffle!( a, a, - [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] + [ + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 + ] ); _mm256_reduce_mul_ph(_mm256_mul_ph(p, q)) } @@ -10957,7 +10961,9 @@ pub unsafe fn _mm512_reduce_min_ph(a: __m512h) -> f16 { let q = simd_shuffle!( a, a, - [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] + [ + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 + ] ); _mm256_reduce_min_ph(_mm256_min_ph(p, q)) } @@ -11003,7 +11009,9 @@ pub unsafe fn _mm512_reduce_max_ph(a: __m512h) -> f16 { let q = simd_shuffle!( a, a, - [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] + [ + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 + ] ); _mm256_reduce_max_ph(_mm256_max_ph(p, q)) } @@ -16059,10 +16067,10 @@ unsafe extern "C" { fn vfcmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.512"] fn vfcmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) - -> __m512; + -> __m512; #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512"] fn vfcmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) - -> __m512; + -> __m512; #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.csh"] fn vfcmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.csh"] @@ -16179,7 +16187,7 @@ unsafe extern "C" { fn vreduceph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; #[link_name = "llvm.x86.avx512fp16.mask.reduce.sh"] fn vreducesh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, imm8: i32, sae: i32) - -> __m128h; + -> __m128h; #[link_name = "llvm.x86.avx512fp16.mask.fpclass.sh"] fn vfpclasssh(a: __m128h, imm8: i32, k: __mmask8) -> __mmask8; diff --git a/crates/core_arch/src/x86/avx512vpopcntdq.rs b/crates/core_arch/src/x86/avx512vpopcntdq.rs index d8f21e5ab0..0bc343acae 100644 --- a/crates/core_arch/src/x86/avx512vpopcntdq.rs +++ b/crates/core_arch/src/x86/avx512vpopcntdq.rs @@ -11,8 +11,8 @@ use crate::core_arch::simd::*; use crate::core_arch::x86::__m128i; use crate::core_arch::x86::__m256i; use crate::core_arch::x86::__m512i; -use crate::core_arch::x86::__mmask16; use crate::core_arch::x86::__mmask8; +use crate::core_arch::x86::__mmask16; use crate::intrinsics::simd::{simd_ctpop, simd_select_bitmask}; use crate::mem::transmute; diff --git a/crates/core_arch/src/x86/sse2.rs b/crates/core_arch/src/x86/sse2.rs index 2f5d234c74..08b3712ea8 100644 --- a/crates/core_arch/src/x86/sse2.rs +++ b/crates/core_arch/src/x86/sse2.rs @@ -449,11 +449,7 @@ pub unsafe fn _mm_slli_si128(a: __m128i) -> __m128i { unsafe fn _mm_slli_si128_impl(a: __m128i) -> __m128i { const fn mask(shift: i32, i: u32) -> u32 { let shift = shift as u32 & 0xff; - if shift > 15 { - i - } else { - 16 - shift + i - } + if shift > 15 { i } else { 16 - shift + i } } transmute::(simd_shuffle!( i8x16::ZERO, diff --git a/crates/core_arch/src/x86/test.rs b/crates/core_arch/src/x86/test.rs index dd78321135..050a51f9fa 100644 --- a/crates/core_arch/src/x86/test.rs +++ b/crates/core_arch/src/x86/test.rs @@ -133,7 +133,7 @@ mod x86_polyfill { #[cfg(target_arch = "x86_64")] mod x86_polyfill { - pub use crate::core_arch::x86_64::{_mm256_insert_epi64, _mm_insert_epi64}; + pub use crate::core_arch::x86_64::{_mm_insert_epi64, _mm256_insert_epi64}; } pub use self::x86_polyfill::*; diff --git a/crates/core_arch/src/x86_64/amx.rs b/crates/core_arch/src/x86_64/amx.rs index 7c437e9704..4fd086dce1 100644 --- a/crates/core_arch/src/x86_64/amx.rs +++ b/crates/core_arch/src/x86_64/amx.rs @@ -292,7 +292,7 @@ mod tests { use core::mem::transmute; use stdarch_test::simd_test; #[cfg(target_os = "linux")] - use syscalls::{syscall, Sysno}; + use syscalls::{Sysno, syscall}; #[allow(non_camel_case_types)] #[repr(packed)] diff --git a/crates/intrinsic-test/src/argument.rs b/crates/intrinsic-test/src/argument.rs index e80760ca3a..ebabe31927 100644 --- a/crates/intrinsic-test/src/argument.rs +++ b/crates/intrinsic-test/src/argument.rs @@ -1,9 +1,9 @@ use std::ops::Range; +use crate::Language; use crate::format::Indentation; use crate::json_parser::ArgPrep; use crate::types::{IntrinsicType, TypeKind}; -use crate::Language; /// An argument for the intrinsic. #[derive(Debug, PartialEq, Clone)] diff --git a/crates/intrinsic-test/src/types.rs b/crates/intrinsic-test/src/types.rs index 594fe4d2d2..1eb44896f7 100644 --- a/crates/intrinsic-test/src/types.rs +++ b/crates/intrinsic-test/src/types.rs @@ -3,9 +3,9 @@ use std::str::FromStr; use itertools::Itertools as _; +use crate::Language; use crate::format::Indentation; use crate::values::value_for_array; -use crate::Language; #[derive(Debug, PartialEq, Copy, Clone)] pub enum TypeKind { diff --git a/crates/std_detect/src/detect/os/aarch64.rs b/crates/std_detect/src/detect/os/aarch64.rs index acc616c0e8..1ff2a17e6e 100644 --- a/crates/std_detect/src/detect/os/aarch64.rs +++ b/crates/std_detect/src/detect/os/aarch64.rs @@ -17,7 +17,7 @@ //! - [Linux documentation](https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt) //! - [ARM documentation](https://developer.arm.com/documentation/ddi0601/2022-12/AArch64-Registers?lang=en) -use crate::detect::{cache, Feature}; +use crate::detect::{Feature, cache}; use core::arch::asm; /// Try to read the features from the system registers. diff --git a/crates/std_detect/src/detect/os/darwin/aarch64.rs b/crates/std_detect/src/detect/os/darwin/aarch64.rs index aca10e90b5..6699a66b1a 100644 --- a/crates/std_detect/src/detect/os/darwin/aarch64.rs +++ b/crates/std_detect/src/detect/os/darwin/aarch64.rs @@ -2,7 +2,7 @@ //! //! -use crate::detect::{cache, Feature}; +use crate::detect::{Feature, cache}; use core::ffi::CStr; #[inline] diff --git a/crates/std_detect/src/detect/os/freebsd/arm.rs b/crates/std_detect/src/detect/os/freebsd/arm.rs index d904eaebd1..0a15156e1b 100644 --- a/crates/std_detect/src/detect/os/freebsd/arm.rs +++ b/crates/std_detect/src/detect/os/freebsd/arm.rs @@ -1,7 +1,7 @@ //! Run-time feature detection for ARM on FreeBSD use super::auxvec; -use crate::detect::{cache, Feature}; +use crate::detect::{Feature, cache}; // Defined in machine/elf.h. // https://github.com/freebsd/freebsd-src/blob/deb63adf945d446ed91a9d84124c71f15ae571d1/sys/arm/include/elf.h diff --git a/crates/std_detect/src/detect/os/freebsd/powerpc.rs b/crates/std_detect/src/detect/os/freebsd/powerpc.rs index 6bfab631a9..d03af68cd0 100644 --- a/crates/std_detect/src/detect/os/freebsd/powerpc.rs +++ b/crates/std_detect/src/detect/os/freebsd/powerpc.rs @@ -1,7 +1,7 @@ //! Run-time feature detection for PowerPC on FreeBSD. use super::auxvec; -use crate::detect::{cache, Feature}; +use crate::detect::{Feature, cache}; pub(crate) fn detect_features() -> cache::Initializer { let mut value = cache::Initializer::default(); diff --git a/crates/std_detect/src/detect/os/linux/aarch64.rs b/crates/std_detect/src/detect/os/linux/aarch64.rs index 27816c09e6..d13aa0168c 100644 --- a/crates/std_detect/src/detect/os/linux/aarch64.rs +++ b/crates/std_detect/src/detect/os/linux/aarch64.rs @@ -1,7 +1,7 @@ //! Run-time feature detection for Aarch64 on Linux. use super::auxvec; -use crate::detect::{bit, cache, Feature}; +use crate::detect::{Feature, bit, cache}; /// Try to read the features from the auxiliary vector, and if that fails, try /// to read them from /proc/cpuinfo. diff --git a/crates/std_detect/src/detect/os/linux/arm.rs b/crates/std_detect/src/detect/os/linux/arm.rs index 4dc9590e18..7a68d2f87b 100644 --- a/crates/std_detect/src/detect/os/linux/arm.rs +++ b/crates/std_detect/src/detect/os/linux/arm.rs @@ -1,7 +1,7 @@ //! Run-time feature detection for ARM on Linux. use super::auxvec; -use crate::detect::{bit, cache, Feature}; +use crate::detect::{Feature, bit, cache}; /// Try to read the features from the auxiliary vector, and if that fails, try /// to read them from /proc/cpuinfo. diff --git a/crates/std_detect/src/detect/os/linux/loongarch.rs b/crates/std_detect/src/detect/os/linux/loongarch.rs index 27c800ec07..a46a4a9d08 100644 --- a/crates/std_detect/src/detect/os/linux/loongarch.rs +++ b/crates/std_detect/src/detect/os/linux/loongarch.rs @@ -1,7 +1,7 @@ //! Run-time feature detection for LoongArch on Linux. use super::auxvec; -use crate::detect::{bit, cache, Feature}; +use crate::detect::{Feature, bit, cache}; use core::arch::asm; /// Try to read the features from the auxiliary vector. diff --git a/crates/std_detect/src/detect/os/linux/mips.rs b/crates/std_detect/src/detect/os/linux/mips.rs index f1bea38252..159b053ba6 100644 --- a/crates/std_detect/src/detect/os/linux/mips.rs +++ b/crates/std_detect/src/detect/os/linux/mips.rs @@ -1,7 +1,7 @@ //! Run-time feature detection for MIPS on Linux. use super::auxvec; -use crate::detect::{bit, cache, Feature}; +use crate::detect::{Feature, bit, cache}; /// Try to read the features from the auxiliary vector, and if that fails, try /// to read them from `/proc/cpuinfo`. diff --git a/crates/std_detect/src/detect/os/linux/powerpc.rs b/crates/std_detect/src/detect/os/linux/powerpc.rs index c3308e8158..5ebae195f2 100644 --- a/crates/std_detect/src/detect/os/linux/powerpc.rs +++ b/crates/std_detect/src/detect/os/linux/powerpc.rs @@ -1,7 +1,7 @@ //! Run-time feature detection for PowerPC on Linux. use super::auxvec; -use crate::detect::{cache, Feature}; +use crate::detect::{Feature, cache}; /// Try to read the features from the auxiliary vector, and if that fails, try /// to read them from /proc/cpuinfo. diff --git a/crates/std_detect/src/detect/os/linux/riscv.rs b/crates/std_detect/src/detect/os/linux/riscv.rs index 91a85d58e7..d3865cb5e9 100644 --- a/crates/std_detect/src/detect/os/linux/riscv.rs +++ b/crates/std_detect/src/detect/os/linux/riscv.rs @@ -1,7 +1,7 @@ //! Run-time feature detection for RISC-V on Linux. use super::auxvec; -use crate::detect::{bit, cache, Feature}; +use crate::detect::{Feature, bit, cache}; /// Read list of supported features from the auxiliary vector. pub(crate) fn detect_features() -> cache::Initializer { diff --git a/crates/std_detect/src/detect/os/linux/s390x.rs b/crates/std_detect/src/detect/os/linux/s390x.rs index 91ce55e94e..e0c79d167e 100644 --- a/crates/std_detect/src/detect/os/linux/s390x.rs +++ b/crates/std_detect/src/detect/os/linux/s390x.rs @@ -1,7 +1,7 @@ //! Run-time feature detection for s390x on Linux. use super::auxvec; -use crate::detect::{bit, cache, Feature}; +use crate::detect::{Feature, bit, cache}; /// Try to read the features from the auxiliary vector pub(crate) fn detect_features() -> cache::Initializer { diff --git a/crates/std_detect/src/detect/os/windows/aarch64.rs b/crates/std_detect/src/detect/os/windows/aarch64.rs index ec31701aec..4bda06d673 100644 --- a/crates/std_detect/src/detect/os/windows/aarch64.rs +++ b/crates/std_detect/src/detect/os/windows/aarch64.rs @@ -1,6 +1,6 @@ //! Run-time feature detection for Aarch64 on Windows. -use crate::detect::{cache, Feature}; +use crate::detect::{Feature, cache}; /// Try to read the features using IsProcessorFeaturePresent. pub(crate) fn detect_features() -> cache::Initializer { diff --git a/crates/std_detect/src/detect/os/x86.rs b/crates/std_detect/src/detect/os/x86.rs index 979fd9ca1f..c28f20baab 100644 --- a/crates/std_detect/src/detect/os/x86.rs +++ b/crates/std_detect/src/detect/os/x86.rs @@ -7,7 +7,7 @@ use core::arch::x86_64::*; use core::mem; -use crate::detect::{bit, cache, Feature}; +use crate::detect::{Feature, bit, cache}; /// Run-time feature detection on x86 works by using the CPUID instruction. /// diff --git a/crates/stdarch-gen-arm/src/assert_instr.rs b/crates/stdarch-gen-arm/src/assert_instr.rs index ce1bbe8b55..799b3379a8 100644 --- a/crates/stdarch-gen-arm/src/assert_instr.rs +++ b/crates/stdarch-gen-arm/src/assert_instr.rs @@ -1,7 +1,7 @@ use proc_macro2::TokenStream; -use quote::{format_ident, quote, ToTokens, TokenStreamExt}; +use quote::{ToTokens, TokenStreamExt, format_ident, quote}; use serde::de::{self, MapAccess, Visitor}; -use serde::{ser::SerializeSeq, Deserialize, Deserializer, Serialize}; +use serde::{Deserialize, Deserializer, Serialize, ser::SerializeSeq}; use std::fmt; use crate::{ diff --git a/crates/stdarch-gen-arm/src/context.rs b/crates/stdarch-gen-arm/src/context.rs index 8405428b7a..aa29eda820 100644 --- a/crates/stdarch-gen-arm/src/context.rs +++ b/crates/stdarch-gen-arm/src/context.rs @@ -240,7 +240,9 @@ impl LocalContext { format!("{variable}, 0, {max}"), )) } else { - Err(format!("can't make an assertion out of constraint {self:?}: no types are being used")) + Err(format!( + "can't make an assertion out of constraint {self:?}: no types are being used" + )) } } _ => unreachable!("constraints were not built successfully!"), diff --git a/crates/stdarch-gen-arm/src/expression.rs b/crates/stdarch-gen-arm/src/expression.rs index 8398467958..4a572db3e8 100644 --- a/crates/stdarch-gen-arm/src/expression.rs +++ b/crates/stdarch-gen-arm/src/expression.rs @@ -1,7 +1,7 @@ use itertools::Itertools; use lazy_static::lazy_static; use proc_macro2::{Literal, Punct, Spacing, TokenStream}; -use quote::{format_ident, quote, ToTokens, TokenStreamExt}; +use quote::{ToTokens, TokenStreamExt, format_ident, quote}; use regex::Regex; use serde::de::{self, MapAccess, Visitor}; use serde::{Deserialize, Deserializer, Serialize}; diff --git a/crates/stdarch-gen-arm/src/input.rs b/crates/stdarch-gen-arm/src/input.rs index ca104fa9c0..adefbf3215 100644 --- a/crates/stdarch-gen-arm/src/input.rs +++ b/crates/stdarch-gen-arm/src/input.rs @@ -1,5 +1,5 @@ use itertools::Itertools; -use serde::{de, Deserialize, Deserializer, Serialize}; +use serde::{Deserialize, Deserializer, Serialize, de}; use crate::{ context::{self, GlobalContext}, @@ -87,7 +87,7 @@ impl Ord for InputType { } mod many_or_one { - use serde::{de::Deserializer, ser::Serializer, Deserialize, Serialize}; + use serde::{Deserialize, Serialize, de::Deserializer, ser::Serializer}; pub fn serialize(vec: &Vec, serializer: S) -> Result where diff --git a/crates/stdarch-gen-arm/src/intrinsic.rs b/crates/stdarch-gen-arm/src/intrinsic.rs index 60470dd77e..cabe58f9d6 100644 --- a/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/crates/stdarch-gen-arm/src/intrinsic.rs @@ -1,6 +1,6 @@ use itertools::Itertools; use proc_macro2::{Delimiter, Group, Punct, Spacing, TokenStream}; -use quote::{format_ident, quote, ToTokens, TokenStreamExt}; +use quote::{ToTokens, TokenStreamExt, format_ident, quote}; use serde::{Deserialize, Serialize}; use serde_with::{DeserializeFromStr, SerializeDisplay}; use std::collections::{HashMap, HashSet}; @@ -17,7 +17,7 @@ use crate::{ assert_instr::InstructionAssertionMethod, context::{self, ArchitectureSettings, Context, LocalContext, VariableType}, expression::{Expression, FnCall, IdentifierType}, - fn_suffix::{type_to_size, SuffixKind}, + fn_suffix::{SuffixKind, type_to_size}, input::IntrinsicInput, matching::{KindMatchable, SizeMatchable}, typekinds::*, @@ -995,7 +995,10 @@ impl Intrinsic { }; if variant.attr.is_none() && variant.assert_instr.is_none() { - panic!("Error: {} is missing both 'attr' and 'assert_instr' fields. You must either manually declare the attributes using the 'attr' field or use 'assert_instr'!", variant.signature.name.to_string()); + panic!( + "Error: {} is missing both 'attr' and 'assert_instr' fields. You must either manually declare the attributes using the 'attr' field or use 'assert_instr'!", + variant.signature.name.to_string() + ); } if variant.attr.is_some() { @@ -1181,7 +1184,10 @@ impl Intrinsic { .any(|w| matches!(w, Wildcard::NVariant)); if !has_n_wildcard { - return Err(format!("cannot generate `_n` variant for {}, no wildcard {{_n}} was specified in the intrinsic's name", variant.signature.name)); + return Err(format!( + "cannot generate `_n` variant for {}, no wildcard {{_n}} was specified in the intrinsic's name", + variant.signature.name + )); } // Build signature @@ -1205,9 +1211,9 @@ impl Intrinsic { } _ => { return Err(format!( - "cannot generate `_n` variant for {}, the given operand is not a valid SVE type", - variant.signature.name - )) + "cannot generate `_n` variant for {}, the given operand is not a valid SVE type", + variant.signature.name + )); } }; @@ -1434,7 +1440,9 @@ impl Intrinsic { (Some(BaseTypeKind::Poly), Some(BaseTypeKind::Poly)) => ex, (None, None) => ex, - _ => unreachable!("unsupported conversion case from {from_base_type:?} to {to_base_type:?} hit"), + _ => unreachable!( + "unsupported conversion case from {from_base_type:?} to {to_base_type:?} hit" + ), } } else { ex diff --git a/crates/stdarch-gen-arm/src/load_store_tests.rs b/crates/stdarch-gen-arm/src/load_store_tests.rs index 83d0ac975c..a238c5fb33 100644 --- a/crates/stdarch-gen-arm/src/load_store_tests.rs +++ b/crates/stdarch-gen-arm/src/load_store_tests.rs @@ -76,7 +76,10 @@ pub fn generate_load_store_tests( }) .try_collect()?; - assert!(used_stores.into_iter().all(|b| b), "Not all store tests have been paired with a load. Consider generating specifc store-only tests"); + assert!( + used_stores.into_iter().all(|b| b), + "Not all store tests have been paired with a load. Consider generating specifc store-only tests" + ); let preamble = TokenStream::from_str(&PREAMBLE).map_err(|e| format!("Preamble is invalid: {e}"))?; diff --git a/crates/stdarch-gen-arm/src/typekinds.rs b/crates/stdarch-gen-arm/src/typekinds.rs index 7a4fed85ce..bf22501b1d 100644 --- a/crates/stdarch-gen-arm/src/typekinds.rs +++ b/crates/stdarch-gen-arm/src/typekinds.rs @@ -1,6 +1,6 @@ use lazy_static::lazy_static; use proc_macro2::TokenStream; -use quote::{quote, ToTokens, TokenStreamExt}; +use quote::{ToTokens, TokenStreamExt, quote}; use regex::Regex; use serde_with::{DeserializeFromStr, SerializeDisplay}; use std::fmt; diff --git a/crates/stdarch-gen-arm/src/wildstring.rs b/crates/stdarch-gen-arm/src/wildstring.rs index 095b18b846..616c1172d4 100644 --- a/crates/stdarch-gen-arm/src/wildstring.rs +++ b/crates/stdarch-gen-arm/src/wildstring.rs @@ -1,6 +1,6 @@ use itertools::Itertools; use proc_macro2::TokenStream; -use quote::{quote, ToTokens, TokenStreamExt}; +use quote::{ToTokens, TokenStreamExt, quote}; use serde_with::{DeserializeFromStr, SerializeDisplay}; use std::str::pattern::Pattern; use std::{fmt, str::FromStr}; diff --git a/crates/stdarch-gen-loongarch/src/main.rs b/crates/stdarch-gen-loongarch/src/main.rs index 160926293b..ea67682b56 100644 --- a/crates/stdarch-gen-loongarch/src/main.rs +++ b/crates/stdarch-gen-loongarch/src/main.rs @@ -495,7 +495,10 @@ fn gen_bind_body( }; } else if para_num == 2 && in_t[0] == "CVPOINTER" && in_t[1] == "SI" { call_params = if asm_fmts[2].starts_with("si") { - format!("static_assert_simm_bits!(IMM_S{0}, {0});\n __{current_name}(mem_addr, IMM_S{0})", asm_fmts[2].get(2..).unwrap()) + format!( + "static_assert_simm_bits!(IMM_S{0}, {0});\n __{current_name}(mem_addr, IMM_S{0})", + asm_fmts[2].get(2..).unwrap() + ) } else { panic!("unsupported assembly format: {}", asm_fmts[2]) } @@ -515,7 +518,9 @@ fn gen_bind_body( } } else if para_num == 3 && in_t[1] == "CVPOINTER" && in_t[2] == "SI" { call_params = match asm_fmts[2].as_str() { - "si12" => format!("static_assert_simm_bits!(IMM_S12, 12);\n __{current_name}(a, mem_addr, IMM_S12)"), + "si12" => format!( + "static_assert_simm_bits!(IMM_S12, 12);\n __{current_name}(a, mem_addr, IMM_S12)" + ), _ => panic!("unsupported assembly format: {}", asm_fmts[2]), }; } else if para_num == 3 && in_t[1] == "CVPOINTER" && in_t[2] == "DI" { @@ -525,8 +530,14 @@ fn gen_bind_body( }; } else if para_num == 4 { call_params = match (asm_fmts[2].as_str(), current_name.chars().last().unwrap()) { - ("si8", t) => format!("static_assert_simm_bits!(IMM_S8, 8);\n static_assert_uimm_bits!(IMM{0}, {0});\n __{current_name}(a, mem_addr, IMM_S8, IMM{0})", type_to_imm(t)), - (_, _) => panic!("unsupported assembly format: {} for {}", asm_fmts[2], current_name), + ("si8", t) => format!( + "static_assert_simm_bits!(IMM_S8, 8);\n static_assert_uimm_bits!(IMM{0}, {0});\n __{current_name}(a, mem_addr, IMM_S8, IMM{0})", + type_to_imm(t) + ), + (_, _) => panic!( + "unsupported assembly format: {} for {}", + asm_fmts[2], current_name + ), } } let function = if !rustc_legacy_const_generics.is_empty() { @@ -1186,7 +1197,10 @@ fn gen_test_body( "UQI" => " printf(\" let r: u32 = %u;\\n\", o);", "QI" => " printf(\" let r: i32 = %d;\\n\", o);", "HI" => " printf(\" let r: i32 = %d;\\n\", o);", - "V32QI" | "V16HI" | "V8SI" | "V4DI" | "UV32QI" | "UV16HI" | "UV8SI" | "UV4DI" | "V8SF" | "V4DF" => " printf(\" let r = i64x4::new(%ld, %ld, %ld, %ld);\\n\", o.i64[0], o.i64[1], o.i64[2], o.i64[3]);", + "V32QI" | "V16HI" | "V8SI" | "V4DI" | "UV32QI" | "UV16HI" | "UV8SI" | "UV4DI" + | "V8SF" | "V4DF" => { + " printf(\" let r = i64x4::new(%ld, %ld, %ld, %ld);\\n\", o.i64[0], o.i64[1], o.i64[2], o.i64[3]);" + } _ => " printf(\" let r = i64x2::new(%ld, %ld);\\n\", o.i64[0], o.i64[1]);", } }; @@ -1480,9 +1494,13 @@ fn gen_test_body( }; let fn_assert = { if out_t.to_lowercase() == "void" { - format!(" printf(\"\\n {current_name}{as_params};\\n assert_eq!(r, transmute(o));\\n\"{as_args});") + format!( + " printf(\"\\n {current_name}{as_params};\\n assert_eq!(r, transmute(o));\\n\"{as_args});" + ) } else { - format!(" printf(\"\\n assert_eq!(r, transmute({current_name}{as_params}));\\n\"{as_args});") + format!( + " printf(\"\\n assert_eq!(r, transmute({current_name}{as_params}));\\n\"{as_args});" + ) } }; format!( diff --git a/examples/connect5.rs b/examples/connect5.rs index 88bb18878c..0a357dd429 100644 --- a/examples/connect5.rs +++ b/examples/connect5.rs @@ -560,11 +560,7 @@ fn search(pos: &Pos, alpha: i32, beta: i32, depth: i32, _ply: i32) -> i32 { assert_ne!(bm, MOVE_NONE); assert!(bs >= -EVAL_INF && bs <= EVAL_INF); - if _ply == 0 { - bm - } else { - bs - } //best move at the root node, best score elsewhere + if _ply == 0 { bm } else { bs } //best move at the root node, best score elsewhere } /// Evaluation function: give different scores to different patterns after a fixed depth. From 818c71dae1c612bd7b4ffba69e02c8d7270a18f6 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Sun, 9 Feb 2025 10:23:23 -0800 Subject: [PATCH 9/9] Add .git-blame-ignore-revs This will ignore certain commits in `git blame` --- .git-blame-ignore-revs | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .git-blame-ignore-revs diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..d6021c4f2a --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,4 @@ +# Use `git config blame.ignorerevsfile .git-blame-ignore-revs` to make `git blame` ignore the following commits. + +# format with style edition 2024 +fc87bd98d689590a0b6f5ee4110c5b9f962faa66