@@ -21988,7 +21988,6 @@ pub unsafe fn vqrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
21988
21988
#[allow(improper_ctypes)]
21989
21989
extern "unadjusted" {
21990
21990
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")]
21991
- #[rustc_intrinsic_const_vector_arg(1)]
21992
21991
fn vqrshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t;
21993
21992
}
21994
21993
vqrshrn_n_s16_(a, const { int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16) })
@@ -22027,7 +22026,6 @@ pub unsafe fn vqrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
22027
22026
#[allow(improper_ctypes)]
22028
22027
extern "unadjusted" {
22029
22028
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")]
22030
- #[rustc_intrinsic_const_vector_arg(1)]
22031
22029
fn vqrshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t;
22032
22030
}
22033
22031
vqrshrn_n_s32_(a, const { int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32) })
@@ -22066,7 +22064,6 @@ pub unsafe fn vqrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
22066
22064
#[allow(improper_ctypes)]
22067
22065
extern "unadjusted" {
22068
22066
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")]
22069
- #[rustc_intrinsic_const_vector_arg(1)]
22070
22067
fn vqrshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t;
22071
22068
}
22072
22069
vqrshrn_n_s64_(a, const { int64x2_t(-N as i64, -N as i64) })
@@ -22105,7 +22102,6 @@ pub unsafe fn vqrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
22105
22102
#[allow(improper_ctypes)]
22106
22103
extern "unadjusted" {
22107
22104
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")]
22108
- #[rustc_intrinsic_const_vector_arg(1)]
22109
22105
fn vqrshrn_n_u16_(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t;
22110
22106
}
22111
22107
vqrshrn_n_u16_(a, const { uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16) })
@@ -22144,7 +22140,6 @@ pub unsafe fn vqrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
22144
22140
#[allow(improper_ctypes)]
22145
22141
extern "unadjusted" {
22146
22142
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")]
22147
- #[rustc_intrinsic_const_vector_arg(1)]
22148
22143
fn vqrshrn_n_u32_(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t;
22149
22144
}
22150
22145
vqrshrn_n_u32_(a, const { uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32) })
@@ -22183,7 +22178,6 @@ pub unsafe fn vqrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
22183
22178
#[allow(improper_ctypes)]
22184
22179
extern "unadjusted" {
22185
22180
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")]
22186
- #[rustc_intrinsic_const_vector_arg(1)]
22187
22181
fn vqrshrn_n_u64_(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t;
22188
22182
}
22189
22183
vqrshrn_n_u64_(a, const { uint64x2_t(-N as u64, -N as u64) })
@@ -22222,7 +22216,6 @@ pub unsafe fn vqrshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
22222
22216
#[allow(improper_ctypes)]
22223
22217
extern "unadjusted" {
22224
22218
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")]
22225
- #[rustc_intrinsic_const_vector_arg(1)]
22226
22219
fn vqrshrun_n_s16_(a: int16x8_t, n: int16x8_t) -> uint8x8_t;
22227
22220
}
22228
22221
vqrshrun_n_s16_(a, const { int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16) })
@@ -22261,7 +22254,6 @@ pub unsafe fn vqrshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
22261
22254
#[allow(improper_ctypes)]
22262
22255
extern "unadjusted" {
22263
22256
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")]
22264
- #[rustc_intrinsic_const_vector_arg(1)]
22265
22257
fn vqrshrun_n_s32_(a: int32x4_t, n: int32x4_t) -> uint16x4_t;
22266
22258
}
22267
22259
vqrshrun_n_s32_(a, const { int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32) })
@@ -22300,7 +22292,6 @@ pub unsafe fn vqrshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
22300
22292
#[allow(improper_ctypes)]
22301
22293
extern "unadjusted" {
22302
22294
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")]
22303
- #[rustc_intrinsic_const_vector_arg(1)]
22304
22295
fn vqrshrun_n_s64_(a: int64x2_t, n: int64x2_t) -> uint32x2_t;
22305
22296
}
22306
22297
vqrshrun_n_s64_(a, const { int64x2_t(-N as i64, -N as i64) })
@@ -22915,7 +22906,6 @@ pub unsafe fn vqshlu_n_s8<const N: i32>(a: int8x8_t) -> uint8x8_t {
22915
22906
#[allow(improper_ctypes)]
22916
22907
extern "unadjusted" {
22917
22908
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")]
22918
- #[rustc_intrinsic_const_vector_arg(1)]
22919
22909
fn vqshlu_n_s8_(a: int8x8_t, n: int8x8_t) -> uint8x8_t;
22920
22910
}
22921
22911
vqshlu_n_s8_(a, const { int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8) })
@@ -22934,11 +22924,10 @@ pub unsafe fn vqshlu_n_s8<const N: i32>(a: int8x8_t) -> uint8x8_t {
22934
22924
static_assert_uimm_bits!(N, 3);
22935
22925
#[allow(improper_ctypes)]
22936
22926
extern "unadjusted" {
22937
- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v8i8")]
22938
- #[rustc_intrinsic_const_vector_arg(1)]
22927
+ #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i8")]
22939
22928
fn vqshlu_n_s8_(a: int8x8_t, n: int8x8_t) -> uint8x8_t;
22940
22929
}
22941
- vqshlu_n_s8_(a, const { int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8) })
22930
+ vqshlu_n_s8_(a, const { int8x8_t([ N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8] ) })
22942
22931
}
22943
22932
22944
22933
/// Signed saturating shift left unsigned
@@ -22955,7 +22944,6 @@ pub unsafe fn vqshlu_n_s16<const N: i32>(a: int16x4_t) -> uint16x4_t {
22955
22944
#[allow(improper_ctypes)]
22956
22945
extern "unadjusted" {
22957
22946
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")]
22958
- #[rustc_intrinsic_const_vector_arg(1)]
22959
22947
fn vqshlu_n_s16_(a: int16x4_t, n: int16x4_t) -> uint16x4_t;
22960
22948
}
22961
22949
vqshlu_n_s16_(a, const { int16x4_t(N as i16, N as i16, N as i16, N as i16) })
@@ -22974,11 +22962,10 @@ pub unsafe fn vqshlu_n_s16<const N: i32>(a: int16x4_t) -> uint16x4_t {
22974
22962
static_assert_uimm_bits!(N, 4);
22975
22963
#[allow(improper_ctypes)]
22976
22964
extern "unadjusted" {
22977
- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v4i16")]
22978
- #[rustc_intrinsic_const_vector_arg(1)]
22965
+ #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i16")]
22979
22966
fn vqshlu_n_s16_(a: int16x4_t, n: int16x4_t) -> uint16x4_t;
22980
22967
}
22981
- vqshlu_n_s16_(a, const { int16x4_t(N as i16, N as i16, N as i16, N as i16) })
22968
+ vqshlu_n_s16_(a, const { int16x4_t([ N as i16, N as i16, N as i16, N as i16] ) })
22982
22969
}
22983
22970
22984
22971
/// Signed saturating shift left unsigned
@@ -22995,7 +22982,6 @@ pub unsafe fn vqshlu_n_s32<const N: i32>(a: int32x2_t) -> uint32x2_t {
22995
22982
#[allow(improper_ctypes)]
22996
22983
extern "unadjusted" {
22997
22984
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")]
22998
- #[rustc_intrinsic_const_vector_arg(1)]
22999
22985
fn vqshlu_n_s32_(a: int32x2_t, n: int32x2_t) -> uint32x2_t;
23000
22986
}
23001
22987
vqshlu_n_s32_(a, const { int32x2_t(N as i32, N as i32) })
@@ -23014,11 +23000,10 @@ pub unsafe fn vqshlu_n_s32<const N: i32>(a: int32x2_t) -> uint32x2_t {
23014
23000
static_assert_uimm_bits!(N, 5);
23015
23001
#[allow(improper_ctypes)]
23016
23002
extern "unadjusted" {
23017
- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v2i32")]
23018
- #[rustc_intrinsic_const_vector_arg(1)]
23003
+ #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i32")]
23019
23004
fn vqshlu_n_s32_(a: int32x2_t, n: int32x2_t) -> uint32x2_t;
23020
23005
}
23021
- vqshlu_n_s32_(a, const { int32x2_t(N as i32, N as i32) })
23006
+ vqshlu_n_s32_(a, const { int32x2_t([ N as i32, N as i32] ) })
23022
23007
}
23023
23008
23024
23009
/// Signed saturating shift left unsigned
@@ -23035,7 +23020,6 @@ pub unsafe fn vqshlu_n_s64<const N: i32>(a: int64x1_t) -> uint64x1_t {
23035
23020
#[allow(improper_ctypes)]
23036
23021
extern "unadjusted" {
23037
23022
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")]
23038
- #[rustc_intrinsic_const_vector_arg(1)]
23039
23023
fn vqshlu_n_s64_(a: int64x1_t, n: int64x1_t) -> uint64x1_t;
23040
23024
}
23041
23025
vqshlu_n_s64_(a, const { int64x1_t(N as i64) })
@@ -23054,11 +23038,10 @@ pub unsafe fn vqshlu_n_s64<const N: i32>(a: int64x1_t) -> uint64x1_t {
23054
23038
static_assert_uimm_bits!(N, 6);
23055
23039
#[allow(improper_ctypes)]
23056
23040
extern "unadjusted" {
23057
- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v1i64")]
23058
- #[rustc_intrinsic_const_vector_arg(1)]
23041
+ #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v1i64")]
23059
23042
fn vqshlu_n_s64_(a: int64x1_t, n: int64x1_t) -> uint64x1_t;
23060
23043
}
23061
- vqshlu_n_s64_(a, const { int64x1_t(N as i64) })
23044
+ vqshlu_n_s64_(a, const { int64x1_t([ N as i64] ) })
23062
23045
}
23063
23046
23064
23047
/// Signed saturating shift left unsigned
@@ -23075,7 +23058,6 @@ pub unsafe fn vqshluq_n_s8<const N: i32>(a: int8x16_t) -> uint8x16_t {
23075
23058
#[allow(improper_ctypes)]
23076
23059
extern "unadjusted" {
23077
23060
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")]
23078
- #[rustc_intrinsic_const_vector_arg(1)]
23079
23061
fn vqshluq_n_s8_(a: int8x16_t, n: int8x16_t) -> uint8x16_t;
23080
23062
}
23081
23063
vqshluq_n_s8_(a, const { int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8) })
@@ -23094,11 +23076,10 @@ pub unsafe fn vqshluq_n_s8<const N: i32>(a: int8x16_t) -> uint8x16_t {
23094
23076
static_assert_uimm_bits!(N, 3);
23095
23077
#[allow(improper_ctypes)]
23096
23078
extern "unadjusted" {
23097
- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v16i8")]
23098
- #[rustc_intrinsic_const_vector_arg(1)]
23079
+ #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v16i8")]
23099
23080
fn vqshluq_n_s8_(a: int8x16_t, n: int8x16_t) -> uint8x16_t;
23100
23081
}
23101
- vqshluq_n_s8_(a, const { int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8) })
23082
+ vqshluq_n_s8_(a, const { int8x16_t([ N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8] ) })
23102
23083
}
23103
23084
23104
23085
/// Signed saturating shift left unsigned
@@ -23115,7 +23096,6 @@ pub unsafe fn vqshluq_n_s16<const N: i32>(a: int16x8_t) -> uint16x8_t {
23115
23096
#[allow(improper_ctypes)]
23116
23097
extern "unadjusted" {
23117
23098
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")]
23118
- #[rustc_intrinsic_const_vector_arg(1)]
23119
23099
fn vqshluq_n_s16_(a: int16x8_t, n: int16x8_t) -> uint16x8_t;
23120
23100
}
23121
23101
vqshluq_n_s16_(a, const { int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16) })
@@ -23134,11 +23114,10 @@ pub unsafe fn vqshluq_n_s16<const N: i32>(a: int16x8_t) -> uint16x8_t {
23134
23114
static_assert_uimm_bits!(N, 4);
23135
23115
#[allow(improper_ctypes)]
23136
23116
extern "unadjusted" {
23137
- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v8i16")]
23138
- #[rustc_intrinsic_const_vector_arg(1)]
23117
+ #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i16")]
23139
23118
fn vqshluq_n_s16_(a: int16x8_t, n: int16x8_t) -> uint16x8_t;
23140
23119
}
23141
- vqshluq_n_s16_(a, const { int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16) })
23120
+ vqshluq_n_s16_(a, const { int16x8_t([ N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16] ) })
23142
23121
}
23143
23122
23144
23123
/// Signed saturating shift left unsigned
@@ -23155,7 +23134,6 @@ pub unsafe fn vqshluq_n_s32<const N: i32>(a: int32x4_t) -> uint32x4_t {
23155
23134
#[allow(improper_ctypes)]
23156
23135
extern "unadjusted" {
23157
23136
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")]
23158
- #[rustc_intrinsic_const_vector_arg(1)]
23159
23137
fn vqshluq_n_s32_(a: int32x4_t, n: int32x4_t) -> uint32x4_t;
23160
23138
}
23161
23139
vqshluq_n_s32_(a, const { int32x4_t(N as i32, N as i32, N as i32, N as i32) })
@@ -23174,11 +23152,10 @@ pub unsafe fn vqshluq_n_s32<const N: i32>(a: int32x4_t) -> uint32x4_t {
23174
23152
static_assert_uimm_bits!(N, 5);
23175
23153
#[allow(improper_ctypes)]
23176
23154
extern "unadjusted" {
23177
- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v4i32")]
23178
- #[rustc_intrinsic_const_vector_arg(1)]
23155
+ #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i32")]
23179
23156
fn vqshluq_n_s32_(a: int32x4_t, n: int32x4_t) -> uint32x4_t;
23180
23157
}
23181
- vqshluq_n_s32_(a, const { int32x4_t(N as i32, N as i32, N as i32, N as i32) })
23158
+ vqshluq_n_s32_(a, const { int32x4_t([ N as i32, N as i32, N as i32, N as i32] ) })
23182
23159
}
23183
23160
23184
23161
/// Signed saturating shift left unsigned
@@ -23195,7 +23172,6 @@ pub unsafe fn vqshluq_n_s64<const N: i32>(a: int64x2_t) -> uint64x2_t {
23195
23172
#[allow(improper_ctypes)]
23196
23173
extern "unadjusted" {
23197
23174
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")]
23198
- #[rustc_intrinsic_const_vector_arg(1)]
23199
23175
fn vqshluq_n_s64_(a: int64x2_t, n: int64x2_t) -> uint64x2_t;
23200
23176
}
23201
23177
vqshluq_n_s64_(a, const { int64x2_t(N as i64, N as i64) })
@@ -23214,11 +23190,10 @@ pub unsafe fn vqshluq_n_s64<const N: i32>(a: int64x2_t) -> uint64x2_t {
23214
23190
static_assert_uimm_bits!(N, 6);
23215
23191
#[allow(improper_ctypes)]
23216
23192
extern "unadjusted" {
23217
- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v2i64")]
23218
- #[rustc_intrinsic_const_vector_arg(1)]
23193
+ #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i64")]
23219
23194
fn vqshluq_n_s64_(a: int64x2_t, n: int64x2_t) -> uint64x2_t;
23220
23195
}
23221
- vqshluq_n_s64_(a, const { int64x2_t(N as i64, N as i64) })
23196
+ vqshluq_n_s64_(a, const { int64x2_t([ N as i64, N as i64] ) })
23222
23197
}
23223
23198
23224
23199
/// Signed saturating shift right narrow
@@ -23235,7 +23210,6 @@ pub unsafe fn vqshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
23235
23210
#[allow(improper_ctypes)]
23236
23211
extern "unadjusted" {
23237
23212
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")]
23238
- #[rustc_intrinsic_const_vector_arg(1)]
23239
23213
fn vqshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t;
23240
23214
}
23241
23215
vqshrn_n_s16_(a, const { int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16) })
@@ -23274,7 +23248,6 @@ pub unsafe fn vqshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
23274
23248
#[allow(improper_ctypes)]
23275
23249
extern "unadjusted" {
23276
23250
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")]
23277
- #[rustc_intrinsic_const_vector_arg(1)]
23278
23251
fn vqshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t;
23279
23252
}
23280
23253
vqshrn_n_s32_(a, const { int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32) })
@@ -23313,7 +23286,6 @@ pub unsafe fn vqshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
23313
23286
#[allow(improper_ctypes)]
23314
23287
extern "unadjusted" {
23315
23288
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")]
23316
- #[rustc_intrinsic_const_vector_arg(1)]
23317
23289
fn vqshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t;
23318
23290
}
23319
23291
vqshrn_n_s64_(a, const { int64x2_t(-N as i64, -N as i64) })
@@ -23352,7 +23324,6 @@ pub unsafe fn vqshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
23352
23324
#[allow(improper_ctypes)]
23353
23325
extern "unadjusted" {
23354
23326
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")]
23355
- #[rustc_intrinsic_const_vector_arg(1)]
23356
23327
fn vqshrn_n_u16_(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t;
23357
23328
}
23358
23329
vqshrn_n_u16_(a, const { uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16) })
@@ -23391,7 +23362,6 @@ pub unsafe fn vqshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
23391
23362
#[allow(improper_ctypes)]
23392
23363
extern "unadjusted" {
23393
23364
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")]
23394
- #[rustc_intrinsic_const_vector_arg(1)]
23395
23365
fn vqshrn_n_u32_(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t;
23396
23366
}
23397
23367
vqshrn_n_u32_(a, const { uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32) })
@@ -23430,7 +23400,6 @@ pub unsafe fn vqshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
23430
23400
#[allow(improper_ctypes)]
23431
23401
extern "unadjusted" {
23432
23402
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")]
23433
- #[rustc_intrinsic_const_vector_arg(1)]
23434
23403
fn vqshrn_n_u64_(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t;
23435
23404
}
23436
23405
vqshrn_n_u64_(a, const { uint64x2_t(-N as u64, -N as u64) })
@@ -23469,7 +23438,6 @@ pub unsafe fn vqshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
23469
23438
#[allow(improper_ctypes)]
23470
23439
extern "unadjusted" {
23471
23440
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")]
23472
- #[rustc_intrinsic_const_vector_arg(1)]
23473
23441
fn vqshrun_n_s16_(a: int16x8_t, n: int16x8_t) -> uint8x8_t;
23474
23442
}
23475
23443
vqshrun_n_s16_(a, const { int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16) })
@@ -23508,7 +23476,6 @@ pub unsafe fn vqshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
23508
23476
#[allow(improper_ctypes)]
23509
23477
extern "unadjusted" {
23510
23478
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")]
23511
- #[rustc_intrinsic_const_vector_arg(1)]
23512
23479
fn vqshrun_n_s32_(a: int32x4_t, n: int32x4_t) -> uint16x4_t;
23513
23480
}
23514
23481
vqshrun_n_s32_(a, const { int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32) })
@@ -23547,7 +23514,6 @@ pub unsafe fn vqshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
23547
23514
#[allow(improper_ctypes)]
23548
23515
extern "unadjusted" {
23549
23516
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")]
23550
- #[rustc_intrinsic_const_vector_arg(1)]
23551
23517
fn vqshrun_n_s64_(a: int64x2_t, n: int64x2_t) -> uint32x2_t;
23552
23518
}
23553
23519
vqshrun_n_s64_(a, const { int64x2_t(-N as i64, -N as i64) })
@@ -28266,7 +28232,6 @@ pub unsafe fn vrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
28266
28232
#[allow(improper_ctypes)]
28267
28233
extern "unadjusted" {
28268
28234
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")]
28269
- #[rustc_intrinsic_const_vector_arg(1)]
28270
28235
fn vrshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t;
28271
28236
}
28272
28237
vrshrn_n_s16_(a, const { int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16) })
@@ -28305,7 +28270,6 @@ pub unsafe fn vrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
28305
28270
#[allow(improper_ctypes)]
28306
28271
extern "unadjusted" {
28307
28272
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")]
28308
- #[rustc_intrinsic_const_vector_arg(1)]
28309
28273
fn vrshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t;
28310
28274
}
28311
28275
vrshrn_n_s32_(a, const { int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32) })
@@ -28344,7 +28308,6 @@ pub unsafe fn vrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
28344
28308
#[allow(improper_ctypes)]
28345
28309
extern "unadjusted" {
28346
28310
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")]
28347
- #[rustc_intrinsic_const_vector_arg(1)]
28348
28311
fn vrshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t;
28349
28312
}
28350
28313
vrshrn_n_s64_(a, const { int64x2_t(-N as i64, -N as i64) })
0 commit comments