From 0646f0dda6332a1f7c185036f899b3350eb99a4a Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Tue, 16 Aug 2022 15:46:17 -0700 Subject: [PATCH 001/112] Move the cast_float_to_int fallback code to GCC Now that we require at least LLVM 13, that codegen backend is always using its intrinsic `fptosi.sat` and `fptoui.sat` conversions, so it doesn't need the manual implementation. However, the GCC backend still needs it, so we can move all of that code down there. --- src/builder.rs | 174 +++++++++++++++++++++++++++++++++++++++++++++++-- src/lib.rs | 1 + 2 files changed, 170 insertions(+), 5 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 4d40dd0994d..6994eeb00c3 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -15,8 +15,11 @@ use gccjit::{ Type, UnaryOp, }; +use rustc_apfloat::{ieee, Float, Round, Status}; use rustc_codegen_ssa::MemFlags; -use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope}; +use rustc_codegen_ssa::common::{ + AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind, +}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ @@ -31,6 +34,7 @@ use rustc_codegen_ssa::traits::{ StaticBuilderMethods, }; use rustc_data_structures::fx::FxHashSet; +use rustc_middle::bug; use rustc_middle::ty::{ParamEnv, Ty, TyCtxt}; use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout}; use rustc_span::Span; @@ -1271,12 +1275,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { val } - fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option> { - None + fn fptoui_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.fptoint_sat(false, val, dest_ty) } - fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option> { - None + fn fptosi_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.fptoint_sat(true, val, dest_ty) } fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) { @@ -1285,6 +1289,166 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { + fn fptoint_sat(&mut self, signed: bool, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + let src_ty = self.cx.val_ty(val); + let (float_ty, int_ty) = if self.cx.type_kind(src_ty) == TypeKind::Vector { + assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty)); + (self.cx.element_type(src_ty), self.cx.element_type(dest_ty)) + } else { + (src_ty, dest_ty) + }; + + // FIXME(jistone): the following was originally the fallback SSA implementation, before LLVM 13 + // added native `fptosi.sat` and `fptoui.sat` conversions, but it was used by GCC as well. + // Now that LLVM always relies on its own, the code has been moved to GCC, but the comments are + // still LLVM-specific. This should be updated, and use better GCC specifics if possible. + + let int_width = self.cx.int_width(int_ty); + let float_width = self.cx.float_width(float_ty); + // LLVM's fpto[su]i returns undef when the input val is infinite, NaN, or does not fit into the + // destination integer type after rounding towards zero. This `undef` value can cause UB in + // safe code (see issue #10184), so we implement a saturating conversion on top of it: + // Semantically, the mathematical value of the input is rounded towards zero to the next + // mathematical integer, and then the result is clamped into the range of the destination + // integer type. Positive and negative infinity are mapped to the maximum and minimum value of + // the destination integer type. NaN is mapped to 0. + // + // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to + // a value representable in int_ty. + // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits. + // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two. + // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly + // representable. Note that this only works if float_ty's exponent range is sufficiently large. + // f16 or 256 bit integers would break this property. Right now the smallest float type is f32 + // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127. + // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because + // we're rounding towards zero, we just get float_ty::MAX (which is always an integer). + // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX. + let int_max = |signed: bool, int_width: u64| -> u128 { + let shift_amount = 128 - int_width; + if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount } + }; + let int_min = |signed: bool, int_width: u64| -> i128 { + if signed { i128::MIN >> (128 - int_width) } else { 0 } + }; + + let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) { + let rounded_min = + ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero); + assert_eq!(rounded_min.status, Status::OK); + let rounded_max = + ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero); + assert!(rounded_max.value.is_finite()); + (rounded_min.value.to_bits(), rounded_max.value.to_bits()) + }; + let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) { + let rounded_min = + ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero); + assert_eq!(rounded_min.status, Status::OK); + let rounded_max = + ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero); + assert!(rounded_max.value.is_finite()); + (rounded_min.value.to_bits(), rounded_max.value.to_bits()) + }; + // To implement saturation, we perform the following steps: + // + // 1. Cast val to an integer with fpto[su]i. This may result in undef. + // 2. Compare val to f_min and f_max, and use the comparison results to select: + // a) int_ty::MIN if val < f_min or val is NaN + // b) int_ty::MAX if val > f_max + // c) the result of fpto[su]i otherwise + // 3. If val is NaN, return 0.0, otherwise return the result of step 2. + // + // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the + // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of + // undef does not introduce any non-determinism either. + // More importantly, the above procedure correctly implements saturating conversion. + // Proof (sketch): + // If val is NaN, 0 is returned by definition. + // Otherwise, val is finite or infinite and thus can be compared with f_min and f_max. + // This yields three cases to consider: + // (1) if val in [f_min, f_max], the result of fpto[su]i is returned, which agrees with + // saturating conversion for inputs in that range. + // (2) if val > f_max, then val is larger than int_ty::MAX. This holds even if f_max is rounded + // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger + // than int_ty::MAX. Because val is larger than int_ty::MAX, the return value of int_ty::MAX + // is correct. + // (3) if val < f_min, then val is smaller than int_ty::MIN. As shown earlier, f_min exactly equals + // int_ty::MIN and therefore the return value of int_ty::MIN is correct. + // QED. + + let float_bits_to_llval = |bx: &mut Self, bits| { + let bits_llval = match float_width { + 32 => bx.cx().const_u32(bits as u32), + 64 => bx.cx().const_u64(bits as u64), + n => bug!("unsupported float width {}", n), + }; + bx.bitcast(bits_llval, float_ty) + }; + let (f_min, f_max) = match float_width { + 32 => compute_clamp_bounds_single(signed, int_width), + 64 => compute_clamp_bounds_double(signed, int_width), + n => bug!("unsupported float width {}", n), + }; + let f_min = float_bits_to_llval(self, f_min); + let f_max = float_bits_to_llval(self, f_max); + let int_max = self.cx.const_uint_big(int_ty, int_max(signed, int_width)); + let int_min = self.cx.const_uint_big(int_ty, int_min(signed, int_width) as u128); + let zero = self.cx.const_uint(int_ty, 0); + + // If we're working with vectors, constants must be "splatted": the constant is duplicated + // into each lane of the vector. The algorithm stays the same, we are just using the + // same constant across all lanes. + let maybe_splat = |bx: &mut Self, val| { + if bx.cx().type_kind(dest_ty) == TypeKind::Vector { + bx.vector_splat(bx.vector_length(dest_ty), val) + } else { + val + } + }; + let f_min = maybe_splat(self, f_min); + let f_max = maybe_splat(self, f_max); + let int_max = maybe_splat(self, int_max); + let int_min = maybe_splat(self, int_min); + let zero = maybe_splat(self, zero); + + // Step 1 ... + let fptosui_result = if signed { self.fptosi(val, dest_ty) } else { self.fptoui(val, dest_ty) }; + let less_or_nan = self.fcmp(RealPredicate::RealULT, val, f_min); + let greater = self.fcmp(RealPredicate::RealOGT, val, f_max); + + // Step 2: We use two comparisons and two selects, with %s1 being the + // result: + // %less_or_nan = fcmp ult %val, %f_min + // %greater = fcmp olt %val, %f_max + // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result + // %s1 = select %greater, int_ty::MAX, %s0 + // Note that %less_or_nan uses an *unordered* comparison. This + // comparison is true if the operands are not comparable (i.e., if val is + // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if + // val is NaN. + // + // Performance note: Unordered comparison can be lowered to a "flipped" + // comparison and a negation, and the negation can be merged into the + // select. Therefore, it not necessarily any more expensive than an + // ordered ("normal") comparison. Whether these optimizations will be + // performed is ultimately up to the backend, but at least x86 does + // perform them. + let s0 = self.select(less_or_nan, int_min, fptosui_result); + let s1 = self.select(greater, int_max, s0); + + // Step 3: NaN replacement. + // For unsigned types, the above step already yielded int_ty::MIN == 0 if val is NaN. + // Therefore we only need to execute this step for signed integer types. + if signed { + // LLVM has no isNaN predicate, so we use (val == val) instead + let cmp = self.fcmp(RealPredicate::RealOEQ, val, val); + self.select(cmp, s1, zero) + } else { + s1 + } + } + #[cfg(feature="master")] pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> { let struct_type = mask.get_type().is_struct().expect("mask of struct type"); diff --git a/src/lib.rs b/src/lib.rs index 8a206c0368f..223466fb9b5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,6 +19,7 @@ #![warn(rust_2018_idioms)] #![warn(unused_lifetimes)] +extern crate rustc_apfloat; extern crate rustc_ast; extern crate rustc_codegen_ssa; extern crate rustc_data_structures; From 34d33f2299b30103a7cc640013e4d75fcdd77d1b Mon Sep 17 00:00:00 2001 From: Maybe Waffle Date: Wed, 11 May 2022 22:01:53 +0400 Subject: [PATCH 002/112] Implement `ptr_mask` intrinsic in cg gcc --- src/intrinsic/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index 5fbdedac0c4..b0e7e5e1dd0 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -309,6 +309,8 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { return; } + sym::ptr_mask => self.and(args[0].immediate(), args[1].immediate()), + _ if name_str.starts_with("simd_") => { match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { Ok(llval) => llval, From 1acc3b45e3c83423e12154a6c893941b2bb4ef7a Mon Sep 17 00:00:00 2001 From: Maybe Waffle Date: Fri, 13 May 2022 17:08:44 +0400 Subject: [PATCH 003/112] Fix `ptr_mask` impl in cg gcc --- src/intrinsic/mod.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index b0e7e5e1dd0..beb9a800144 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -309,8 +309,18 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { return; } - sym::ptr_mask => self.and(args[0].immediate(), args[1].immediate()), + sym::ptr_mask => { + let usize_type = self.context.new_type::(); + let void_ptr_type = self.context.new_type::<*const ()>(); + let ptr = args[0].immediate(); + let mask = args[1].immediate(); + + let addr = self.bitcast(ptr, usize_type); + let masked = self.and(addr, mask); + self.bitcast(masked, void_ptr_type) + }, + _ if name_str.starts_with("simd_") => { match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { Ok(llval) => llval, From 93191f66dc4f4b5d11cb695277be13d38f3248d3 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Thu, 25 Aug 2022 17:52:37 +1000 Subject: [PATCH 004/112] Box `CastTarget` within `PassMode`. Because `PassMode::Cast` is by far the largest variant, but is relatively rare. This requires making `PassMode` not impl `Copy`, and `Clone` is no longer necessary. This causes lots of sigil adjusting, but nothing very notable. --- src/abi.rs | 4 ++-- src/intrinsic/mod.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/abi.rs b/src/abi.rs index 0ed3e1fbe93..9b55db6a547 100644 --- a/src/abi.rs +++ b/src/abi.rs @@ -133,7 +133,7 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { match self.ret.mode { PassMode::Ignore => cx.type_void(), PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx), - PassMode::Cast(cast) => cast.gcc_type(cx), + PassMode::Cast(ref cast) => cast.gcc_type(cx), PassMode::Indirect { .. } => { argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); cx.type_void() @@ -157,7 +157,7 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Indirect { extra_attrs: Some(_), .. } => { unimplemented!(); } - PassMode::Cast(cast) => cast.gcc_type(cx), + PassMode::Cast(ref cast) => cast.gcc_type(cx), PassMode::Indirect { extra_attrs: None, on_stack: true, .. } => { on_stack_param_indices.insert(argument_tys.len()); arg.memory_ty(cx) diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index 5fbdedac0c4..f4493776ea2 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -130,7 +130,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { sym::volatile_load | sym::unaligned_volatile_load => { let tp_ty = substs.type_at(0); let mut ptr = args[0].immediate(); - if let PassMode::Cast(ty) = fn_abi.ret.mode { + if let PassMode::Cast(ty) = &fn_abi.ret.mode { ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self))); } let load = self.volatile_load(ptr.get_type(), ptr); @@ -320,7 +320,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { }; if !fn_abi.ret.is_ignore() { - if let PassMode::Cast(ty) = fn_abi.ret.mode { + if let PassMode::Cast(ty) = &fn_abi.ret.mode { let ptr_llty = self.type_ptr_to(ty.gcc_type(self)); let ptr = self.pointercast(result.llval, ptr_llty); self.store(llval, ptr, result.align); @@ -416,7 +416,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { else if self.is_unsized_indirect() { bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); } - else if let PassMode::Cast(cast) = self.mode { + else if let PassMode::Cast(ref cast) = self.mode { // FIXME(eddyb): Figure out when the simpler Store is safe, clang // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; From f78592f1bcfba609cc83361dc39c795729292eb1 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Thu, 25 Aug 2022 19:08:04 +1000 Subject: [PATCH 005/112] Change `FnAbi::args` to a boxed slice. --- src/abi.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/abi.rs b/src/abi.rs index 9b55db6a547..7f313583c82 100644 --- a/src/abi.rs +++ b/src/abi.rs @@ -140,7 +140,7 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } }; - for arg in &self.args { + for arg in self.args.iter() { // add padding if let Some(ty) = arg.pad { argument_tys.push(ty.gcc_type(cx)); From 5a6992916e8957927260171f8802b0d8b8809ec5 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Fri, 26 Aug 2022 10:37:51 +1000 Subject: [PATCH 006/112] Simplify arg capacity calculations. Currently they try to be very precise. But they are wrong, i.e. they don't match what's happening in the loop below. This code isn't hot enough for it to matter that much. --- src/abi.rs | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/src/abi.rs b/src/abi.rs index 7f313583c82..87b730d29cd 100644 --- a/src/abi.rs +++ b/src/abi.rs @@ -107,26 +107,10 @@ pub trait FnAbiGccExt<'gcc, 'tcx> { impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec>, bool, FxHashSet) { let mut on_stack_param_indices = FxHashSet::default(); - let args_capacity: usize = self.args.iter().map(|arg| - if arg.pad.is_some() { - 1 - } - else { - 0 - } + - if let PassMode::Pair(_, _) = arg.mode { - 2 - } else { - 1 - } - ).sum(); + + // This capacity calculation is approximate. let mut argument_tys = Vec::with_capacity( - if let PassMode::Indirect { .. } = self.ret.mode { - 1 - } - else { - 0 - } + args_capacity, + self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } ); let return_ty = From d83636d3fac0c0c6b2498bd0699ad6848032f083 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Thu, 25 Aug 2022 19:18:01 +1000 Subject: [PATCH 007/112] Turn `ArgAbi::pad` into a `bool`. Because it's only ever set to `None` or `Some(Reg::i32())`. --- src/abi.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/abi.rs b/src/abi.rs index 87b730d29cd..3186b363e35 100644 --- a/src/abi.rs +++ b/src/abi.rs @@ -126,8 +126,8 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { for arg in self.args.iter() { // add padding - if let Some(ty) = arg.pad { - argument_tys.push(ty.gcc_type(cx)); + if arg.pad_i32 { + argument_tys.push(Reg::i32().gcc_type(cx)); } let arg_ty = match arg.mode { From 89003418b391d0f260b2354472a9b788b8128b7b Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Thu, 25 Aug 2022 22:19:38 +1000 Subject: [PATCH 008/112] Move `ArgAbi::pad_i32` into `PassMode::Cast`. Because it's only needed for that variant. This shrinks the types and clarifies the logic. --- src/abi.rs | 15 ++++++++------- src/intrinsic/mod.rs | 8 ++++---- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/abi.rs b/src/abi.rs index 3186b363e35..848c34211ff 100644 --- a/src/abi.rs +++ b/src/abi.rs @@ -117,7 +117,7 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { match self.ret.mode { PassMode::Ignore => cx.type_void(), PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx), - PassMode::Cast(ref cast) => cast.gcc_type(cx), + PassMode::Cast(ref cast, _) => cast.gcc_type(cx), PassMode::Indirect { .. } => { argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); cx.type_void() @@ -125,11 +125,6 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { }; for arg in self.args.iter() { - // add padding - if arg.pad_i32 { - argument_tys.push(Reg::i32().gcc_type(cx)); - } - let arg_ty = match arg.mode { PassMode::Ignore => continue, PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx), @@ -141,7 +136,13 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Indirect { extra_attrs: Some(_), .. } => { unimplemented!(); } - PassMode::Cast(ref cast) => cast.gcc_type(cx), + PassMode::Cast(ref cast, pad_i32) => { + // add padding + if pad_i32 { + argument_tys.push(Reg::i32().gcc_type(cx)); + } + cast.gcc_type(cx) + } PassMode::Indirect { extra_attrs: None, on_stack: true, .. } => { on_stack_param_indices.insert(argument_tys.len()); arg.memory_ty(cx) diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index f4493776ea2..352fe7568ef 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -130,7 +130,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { sym::volatile_load | sym::unaligned_volatile_load => { let tp_ty = substs.type_at(0); let mut ptr = args[0].immediate(); - if let PassMode::Cast(ty) = &fn_abi.ret.mode { + if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self))); } let load = self.volatile_load(ptr.get_type(), ptr); @@ -320,7 +320,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { }; if !fn_abi.ret.is_ignore() { - if let PassMode::Cast(ty) = &fn_abi.ret.mode { + if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { let ptr_llty = self.type_ptr_to(ty.gcc_type(self)); let ptr = self.pointercast(result.llval, ptr_llty); self.store(llval, ptr, result.align); @@ -416,7 +416,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { else if self.is_unsized_indirect() { bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); } - else if let PassMode::Cast(ref cast) = self.mode { + else if let PassMode::Cast(ref cast, _) = self.mode { // FIXME(eddyb): Figure out when the simpler Store is safe, clang // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; @@ -481,7 +481,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { PassMode::Indirect { extra_attrs: Some(_), .. } => { OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); }, - PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => { + PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(..) => { let next_arg = next(); self.store(bx, next_arg, dst); }, From c32ad5c229e3b3812e555cfe43caa579a4a4aa8c Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 27 Aug 2022 14:11:19 -0400 Subject: [PATCH 009/112] =?UTF-8?q?interpret:=20rename=20relocation=20?= =?UTF-8?q?=E2=86=92=20provenance?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/consts.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/consts.rs b/src/consts.rs index c0b8d21818f..356c03ee3c1 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -127,7 +127,7 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { // // We could remove this hack whenever we decide to drop macOS 10.10 support. if self.tcx.sess.target.options.is_like_osx { - // The `inspect` method is okay here because we checked relocations, and + // The `inspect` method is okay here because we checked for provenance, and // because we are doing this access to inspect the final interpreter state // (not as part of the interpreter execution). // @@ -296,17 +296,17 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAllocation<'tcx>) -> RValue<'gcc> { let alloc = alloc.inner(); - let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1); + let mut llvals = Vec::with_capacity(alloc.provenance().len() + 1); let dl = cx.data_layout(); let pointer_size = dl.pointer_size.bytes() as usize; let mut next_offset = 0; - for &(offset, alloc_id) in alloc.relocations().iter() { + for &(offset, alloc_id) in alloc.provenance().iter() { let offset = offset.bytes(); assert_eq!(offset as usize as u64, offset); let offset = offset as usize; if offset > next_offset { - // This `inspect` is okay since we have checked that it is not within a relocation, it + // This `inspect` is okay since we have checked that it is not within a pointer with provenance, it // is within the bounds of the allocation, and it doesn't affect interpreter execution // (we inspect the result after interpreter execution). Any undef byte is replaced with // some arbitrary byte value. @@ -319,7 +319,7 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl read_target_uint( dl.endian, // This `inspect` is okay since it is within the bounds of the allocation, it doesn't // affect interpreter execution (we inspect the result after interpreter execution), - // and we properly interpret the relocation as a relocation pointer offset. + // and we properly interpret the provenance as a relocation pointer offset. alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)), ) .expect("const_alloc_to_llvm: could not read relocation pointer") @@ -336,7 +336,7 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl } if alloc.len() >= next_offset { let range = next_offset..alloc.len(); - // This `inspect` is okay since we have check that it is after all relocations, it is + // This `inspect` is okay since we have check that it is after all provenance, it is // within the bounds of the allocation, and it doesn't affect interpreter execution (we // inspect the result after interpreter execution). Any undef byte is replaced with some // arbitrary byte value. From c3dce60ac783b997b7527c324b9c9ee30a6ccfd7 Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Tue, 6 Sep 2022 14:09:49 +0000 Subject: [PATCH 010/112] Remove dead broken code from const zst handling in backends --- src/common.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/common.rs b/src/common.rs index ccb6cbbc2c8..aa1c271c31c 100644 --- a/src/common.rs +++ b/src/common.rs @@ -158,10 +158,6 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { None } - fn zst_to_backend(&self, _ty: Type<'gcc>) -> RValue<'gcc> { - self.const_undef(self.type_ix(0)) - } - fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> { let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() }; match cv { From 23eae52d45ceace95215e696da13c6f9f57f90f0 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Fri, 26 Aug 2022 19:32:04 -0700 Subject: [PATCH 011/112] Add RanlibFailure --- src/archive.rs | 4 +++- src/errors.rs | 7 +++++++ src/lib.rs | 2 ++ 3 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 src/errors.rs diff --git a/src/archive.rs b/src/archive.rs index f863abdcc97..14a69c194e4 100644 --- a/src/archive.rs +++ b/src/archive.rs @@ -1,6 +1,8 @@ use std::fs::File; use std::path::{Path, PathBuf}; +use crate::errors::RanlibFailure; + use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder}; use rustc_session::Session; @@ -181,7 +183,7 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { std::process::Command::new("ranlib").arg(output).status().expect("Couldn't run ranlib"); if !status.success() { - self.config.sess.fatal(&format!("Ranlib exited with code {:?}", status.code())); + self.config.sess.emit_fatal(RanlibFailure { exit_code: status.code() }); } any_members diff --git a/src/errors.rs b/src/errors.rs new file mode 100644 index 00000000000..1a0e38fc0bb --- /dev/null +++ b/src/errors.rs @@ -0,0 +1,7 @@ +use rustc_macros::SessionDiagnostic; + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::ranlib_failure)] +pub(crate) struct RanlibFailure { + pub exit_code: Option +} diff --git a/src/lib.rs b/src/lib.rs index 223466fb9b5..fc10112e55e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -25,6 +25,7 @@ extern crate rustc_codegen_ssa; extern crate rustc_data_structures; extern crate rustc_errors; extern crate rustc_hir; +extern crate rustc_macros; extern crate rustc_metadata; extern crate rustc_middle; extern crate rustc_session; @@ -50,6 +51,7 @@ mod context; mod coverageinfo; mod debuginfo; mod declare; +mod errors; mod int; mod intrinsic; mod mono_item; From 7277046d84d53a9b31abb4ef8d5b448afa4725f1 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Fri, 26 Aug 2022 20:23:50 -0700 Subject: [PATCH 012/112] Add LinkageConstOrMutType --- src/consts.rs | 6 ++---- src/errors.rs | 8 ++++++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/consts.rs b/src/consts.rs index 356c03ee3c1..81f53328867 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -14,6 +14,7 @@ use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRan use crate::base; use crate::context::CodegenCx; +use crate::errors::LinkageConstOrMutType; use crate::type_of::LayoutGccExt; impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { @@ -368,10 +369,7 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg cx.layout_of(mt.ty).gcc_type(cx, true) } else { - cx.sess().span_fatal( - span, - "must have type `*const T` or `*mut T` due to `#[linkage]` attribute", - ) + cx.sess().emit_fatal(LinkageConstOrMutType { span: span }) }; // Declare a symbol `foo` with the desired linkage. let global1 = cx.declare_global_with_linkage(&sym, llty2, base::global_linkage_to_gcc(linkage)); diff --git a/src/errors.rs b/src/errors.rs index 1a0e38fc0bb..456a60c6f90 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -1,7 +1,15 @@ use rustc_macros::SessionDiagnostic; +use rustc_span::Span; #[derive(SessionDiagnostic)] #[diag(codegen_gcc::ranlib_failure)] pub(crate) struct RanlibFailure { pub exit_code: Option } + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::linkage_const_or_mut_type)] +pub(crate) struct LinkageConstOrMutType { + #[primary_span] + pub span: Span +} From 7fc07caf6798dcb29f91a6a57fd63e3d3ffa34ee Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Fri, 26 Aug 2022 20:44:44 -0700 Subject: [PATCH 013/112] Add UnwindingInlineAsm --- src/asm.rs | 3 ++- src/errors.rs | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/asm.rs b/src/asm.rs index 52fd66af065..007b001f213 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -12,6 +12,7 @@ use std::borrow::Cow; use crate::builder::Builder; use crate::context::CodegenCx; +use crate::errors::UnwindingInlineAsm; use crate::type_of::LayoutGccExt; use crate::callee::get_fn; @@ -109,7 +110,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], _instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) { if options.contains(InlineAsmOptions::MAY_UNWIND) { self.sess() - .struct_span_err(span[0], "GCC backend does not support unwinding from inline asm") + .create_err(UnwindingInlineAsm { span: span[0] }) .emit(); return; } diff --git a/src/errors.rs b/src/errors.rs index 456a60c6f90..e0c7dca8e32 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -13,3 +13,10 @@ pub(crate) struct LinkageConstOrMutType { #[primary_span] pub span: Span } + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::unwinding_inline_asm)] +pub(crate) struct UnwindingInlineAsm { + #[primary_span] + pub span: Span +} From 33b58ebf22bed790dacf2ef82351a95883559b77 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Fri, 26 Aug 2022 20:50:37 -0700 Subject: [PATCH 014/112] Add LTONotSupported --- src/errors.rs | 4 ++++ src/lib.rs | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/errors.rs b/src/errors.rs index e0c7dca8e32..1b2953952ef 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -14,6 +14,10 @@ pub(crate) struct LinkageConstOrMutType { pub span: Span } +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::lto_not_supported)] +pub(crate) struct LTONotSupported {} + #[derive(SessionDiagnostic)] #[diag(codegen_gcc::unwinding_inline_asm)] pub(crate) struct UnwindingInlineAsm { diff --git a/src/lib.rs b/src/lib.rs index fc10112e55e..03f41d197b8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -61,6 +61,7 @@ mod type_of; use std::any::Any; use std::sync::{Arc, Mutex}; +use crate::errors::LTONotSupported; use gccjit::{Context, OptimizationLevel, CType}; use rustc_ast::expand::allocator::AllocatorKind; use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen}; @@ -99,7 +100,7 @@ pub struct GccCodegenBackend { impl CodegenBackend for GccCodegenBackend { fn init(&self, sess: &Session) { if sess.lto() != Lto::No { - sess.warn("LTO is not supported. You may get a linker error."); + sess.emit_warning(LTONotSupported {}); } let temp_dir = TempDir::new().expect("cannot create temporary directory"); From 534ce39aac4881bf3ff4b41a359723f3075bf47a Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Sat, 27 Aug 2022 15:19:16 -0700 Subject: [PATCH 015/112] Add LayoutSizeOverflow --- src/context.rs | 5 +++-- src/errors.rs | 8 ++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/context.rs b/src/context.rs index 478f6d893dd..187e4c689ec 100644 --- a/src/context.rs +++ b/src/context.rs @@ -18,6 +18,7 @@ use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDat use rustc_target::spec::{HasTargetSpec, Target, TlsModel}; use crate::callee::get_fn; +use crate::errors::LayoutSizeOverflow; #[derive(Clone)] pub struct FuncSig<'gcc> { @@ -477,7 +478,7 @@ impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { #[inline] fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { if let LayoutError::SizeOverflow(_) = err { - self.sess().span_fatal(span, &err.to_string()) + self.sess().emit_fatal(LayoutSizeOverflow { span, error: err.to_string() }) } else { span_bug!(span, "failed to get layout for `{}`: {}", ty, err) } @@ -495,7 +496,7 @@ impl<'gcc, 'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { fn_abi_request: FnAbiRequest<'tcx>, ) -> ! { if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err { - self.sess().span_fatal(span, &err.to_string()) + self.sess().emit_fatal(LayoutSizeOverflow { span, error: err.to_string() }) } else { match fn_abi_request { FnAbiRequest::OfFnPtr { sig, extra_args } => { diff --git a/src/errors.rs b/src/errors.rs index 1b2953952ef..490a209ead0 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -7,6 +7,14 @@ pub(crate) struct RanlibFailure { pub exit_code: Option } +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::layout_size_overflow)] +pub(crate) struct LayoutSizeOverflow { + #[primary_span] + pub span: Span, + pub error: String, +} + #[derive(SessionDiagnostic)] #[diag(codegen_gcc::linkage_const_or_mut_type)] pub(crate) struct LinkageConstOrMutType { From 1f3ae14c83aa6f4a7bb8c68cf2deab68b646cfe0 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Sat, 27 Aug 2022 15:21:46 -0700 Subject: [PATCH 016/112] Lint against untranslatable diagnostics in rustc_codegen_gcc --- src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/lib.rs b/src/lib.rs index 03f41d197b8..007d61ed51d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -18,6 +18,8 @@ #![recursion_limit="256"] #![warn(rust_2018_idioms)] #![warn(unused_lifetimes)] +#![deny(rustc::untranslatable_diagnostic)] +#![deny(rustc::diagnostic_outside_of_impl)] extern crate rustc_apfloat; extern crate rustc_ast; From ff9bf7b6b3e5050d78ac3931b993b9a0fcbbad18 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Mon, 29 Aug 2022 20:22:03 -0700 Subject: [PATCH 017/112] remove IntoDiagnosticArg impl for Option --- src/archive.rs | 2 +- src/errors.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/archive.rs b/src/archive.rs index 14a69c194e4..77fbb2c500e 100644 --- a/src/archive.rs +++ b/src/archive.rs @@ -183,7 +183,7 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { std::process::Command::new("ranlib").arg(output).status().expect("Couldn't run ranlib"); if !status.success() { - self.config.sess.emit_fatal(RanlibFailure { exit_code: status.code() }); + self.config.sess.emit_fatal(RanlibFailure { exit_code: format!("{:?}", status.code()) }); } any_members diff --git a/src/errors.rs b/src/errors.rs index 490a209ead0..01de75976a3 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -4,7 +4,7 @@ use rustc_span::Span; #[derive(SessionDiagnostic)] #[diag(codegen_gcc::ranlib_failure)] pub(crate) struct RanlibFailure { - pub exit_code: Option + pub exit_code: String, } #[derive(SessionDiagnostic)] From d3bb849fe9d663a1de780824bb8d08d01d611114 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Wed, 31 Aug 2022 22:02:35 -0700 Subject: [PATCH 018/112] Add wrapper type for ExitCode for use in RanlibFailure --- src/archive.rs | 2 +- src/errors.rs | 24 +++++++++++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/src/archive.rs b/src/archive.rs index 77fbb2c500e..ac0342f6b80 100644 --- a/src/archive.rs +++ b/src/archive.rs @@ -183,7 +183,7 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { std::process::Command::new("ranlib").arg(output).status().expect("Couldn't run ranlib"); if !status.success() { - self.config.sess.emit_fatal(RanlibFailure { exit_code: format!("{:?}", status.code()) }); + self.config.sess.emit_fatal(RanlibFailure::new(status.code())); } any_members diff --git a/src/errors.rs b/src/errors.rs index 01de75976a3..b5fc789c279 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -1,10 +1,32 @@ +use rustc_errors::{DiagnosticArgValue, IntoDiagnosticArg}; use rustc_macros::SessionDiagnostic; use rustc_span::Span; +use std::borrow::Cow; + +struct ExitCode { + pub exit_code: Option, +} + +impl IntoDiagnosticArg for ExitCode { + fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> { + match self.exit_code { + Some(t) => t.into_diagnostic_arg(), + None => DiagnosticArgValue::Str(Cow::Borrowed("None")), + } + } +} #[derive(SessionDiagnostic)] #[diag(codegen_gcc::ranlib_failure)] pub(crate) struct RanlibFailure { - pub exit_code: String, + exit_code: ExitCode, +} + +impl RanlibFailure { + pub fn new(exit_code: Option) -> Self { + let exit_code = ExitCode{ exit_code }; + RanlibFailure { exit_code } + } } #[derive(SessionDiagnostic)] From 70ab0548a0afbef84583e8efe189466d4bcbf213 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Wed, 31 Aug 2022 22:03:05 -0700 Subject: [PATCH 019/112] lint type --- src/errors.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/errors.rs b/src/errors.rs index b5fc789c279..938c0a74af3 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -46,7 +46,7 @@ pub(crate) struct LinkageConstOrMutType { #[derive(SessionDiagnostic)] #[diag(codegen_gcc::lto_not_supported)] -pub(crate) struct LTONotSupported {} +pub(crate) struct LTONotSupported; #[derive(SessionDiagnostic)] #[diag(codegen_gcc::unwinding_inline_asm)] From 059326c4bd30e573b961d2267a8cd74d84c05220 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Sun, 11 Sep 2022 16:43:18 -0700 Subject: [PATCH 020/112] Add monomorphization errors --- src/errors.rs | 198 +++++++++++++++++++++++++++++++++++++++- src/intrinsic/mod.rs | 13 +-- src/intrinsic/simd.rs | 203 +++++++++++++----------------------------- 3 files changed, 261 insertions(+), 153 deletions(-) diff --git a/src/errors.rs b/src/errors.rs index 938c0a74af3..a70ebf62da3 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -1,6 +1,7 @@ use rustc_errors::{DiagnosticArgValue, IntoDiagnosticArg}; use rustc_macros::SessionDiagnostic; -use rustc_span::Span; +use rustc_middle::ty::Ty; +use rustc_span::{Span, Symbol}; use std::borrow::Cow; struct ExitCode { @@ -29,6 +30,201 @@ impl RanlibFailure { } } +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_basic_integer, code = "E0511")] +pub(crate) struct InvalidMonomorphizationBasicInteger<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ty: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_invalid_float_vector, code = "E0511")] +pub(crate) struct InvalidMonomorphizationInvalidFloatVector<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub elem_ty: &'a str, + pub vec_ty: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_not_float, code = "E0511")] +pub(crate) struct InvalidMonomorphizationNotFloat<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ty: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_unrecognized, code = "E0511")] +pub(crate) struct InvalidMonomorphizationUnrecognized { + #[primary_span] + pub span: Span, + pub name: Symbol, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_expected_signed_unsigned, code = "E0511")] +pub(crate) struct InvalidMonomorphizationExpectedSignedUnsigned<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub elem_ty: Ty<'a>, + pub vec_ty: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_unsupported_element, code = "E0511")] +pub(crate) struct InvalidMonomorphizationUnsupportedElement<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_ty: Ty<'a>, + pub elem_ty: Ty<'a>, + pub ret_ty: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_invalid_bitmask, code = "E0511")] +pub(crate) struct InvalidMonomorphizationInvalidBitmask<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ty: Ty<'a>, + pub expected_int_bits: u64, + pub expected_bytes: u64, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_simd_shuffle, code = "E0511")] +pub(crate) struct InvalidMonomorphizationSimdShuffle<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ty: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_expected_simd, code = "E0511")] +pub(crate) struct InvalidMonomorphizationExpectedSimd<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub position: &'a str, + pub found_ty: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_mask_type, code = "E0511")] +pub(crate) struct InvalidMonomorphizationMaskType<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ty: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_return_length, code = "E0511")] +pub(crate) struct InvalidMonomorphizationReturnLength<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_len: u64, + pub ret_ty: Ty<'a>, + pub out_len: u64, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_return_length_input_type, code = "E0511")] +pub(crate) struct InvalidMonomorphizationReturnLengthInputType<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_len: u64, + pub in_ty: Ty<'a>, + pub ret_ty: Ty<'a>, + pub out_len: u64, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_return_element, code = "E0511")] +pub(crate) struct InvalidMonomorphizationReturnElement<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_elem: Ty<'a>, + pub in_ty: Ty<'a>, + pub ret_ty: Ty<'a>, + pub out_ty: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_return_type, code = "E0511")] +pub(crate) struct InvalidMonomorphizationReturnType<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_elem: Ty<'a>, + pub in_ty: Ty<'a>, + pub ret_ty: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_inserted_type, code = "E0511")] +pub(crate) struct InvalidMonomorphizationInsertedType<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_elem: Ty<'a>, + pub in_ty: Ty<'a>, + pub out_ty: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_return_integer_type, code = "E0511")] +pub(crate) struct InvalidMonomorphizationReturnIntegerType<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ret_ty: Ty<'a>, + pub out_ty: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_mismatched_lengths, code = "E0511")] +pub(crate) struct InvalidMonomorphizationMismatchedLengths { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub m_len: u64, + pub v_len: u64, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_unsupported_cast, code = "E0511")] +pub(crate) struct InvalidMonomorphizationUnsupportedCast<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_ty: Ty<'a>, + pub in_elem: Ty<'a>, + pub ret_ty: Ty<'a>, + pub out_elem: Ty<'a>, +} + +#[derive(SessionDiagnostic)] +#[diag(codegen_gcc::invalid_monomorphization_unsupported_operation, code = "E0511")] +pub(crate) struct InvalidMonomorphizationUnsupportedOperation<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_ty: Ty<'a>, + pub in_elem: Ty<'a>, +} + #[derive(SessionDiagnostic)] #[diag(codegen_gcc::layout_size_overflow)] pub(crate) struct LayoutSizeOverflow { diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index 02cedd4646b..cc9c90c2427 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -4,7 +4,7 @@ mod simd; use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp, FunctionType}; use rustc_codegen_ssa::MemFlags; use rustc_codegen_ssa::base::wants_msvc_seh; -use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error}; +use rustc_codegen_ssa::common::IntPredicate; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods}; @@ -20,6 +20,7 @@ use crate::abi::GccType; use crate::builder::Builder; use crate::common::{SignType, TypeReflection}; use crate::context::CodegenCx; +use crate::errors::InvalidMonomorphizationBasicInteger; use crate::type_of::LayoutGccExt; use crate::intrinsic::simd::generic_simd_intrinsic; @@ -242,15 +243,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { _ => bug!(), }, None => { - span_invalid_monomorphization_error( - tcx.sess, - span, - &format!( - "invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", - name, ty - ), - ); + tcx.sess.emit_err(InvalidMonomorphizationBasicInteger { span, name, ty }); return; } } diff --git a/src/intrinsic/simd.rs b/src/intrinsic/simd.rs index 2401f335018..22bd0a1de82 100644 --- a/src/intrinsic/simd.rs +++ b/src/intrinsic/simd.rs @@ -2,7 +2,7 @@ use std::cmp::Ordering; use gccjit::{BinaryOp, RValue, Type, ToRValue}; use rustc_codegen_ssa::base::compare_simd_types; -use rustc_codegen_ssa::common::{TypeKind, span_invalid_monomorphization_error}; +use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods}; @@ -14,43 +14,48 @@ use rustc_span::{Span, Symbol, sym}; use rustc_target::abi::Align; use crate::builder::Builder; +use crate::errors::{ + InvalidMonomorphizationInvalidFloatVector, + InvalidMonomorphizationNotFloat, + InvalidMonomorphizationUnrecognized, + InvalidMonomorphizationExpectedSignedUnsigned, + InvalidMonomorphizationUnsupportedElement, + InvalidMonomorphizationInvalidBitmask, + InvalidMonomorphizationSimdShuffle, + InvalidMonomorphizationExpectedSimd, + InvalidMonomorphizationMaskType, + InvalidMonomorphizationReturnLength, + InvalidMonomorphizationReturnLengthInputType, + InvalidMonomorphizationReturnElement, + InvalidMonomorphizationReturnType, + InvalidMonomorphizationInsertedType, + InvalidMonomorphizationReturnIntegerType, + InvalidMonomorphizationMismatchedLengths, + InvalidMonomorphizationUnsupportedCast, + InvalidMonomorphizationUnsupportedOperation +}; use crate::intrinsic; pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result, ()> { // macros for error handling: - #[allow(unused_macro_rules)] - macro_rules! emit_error { - ($msg: tt) => { - emit_error!($msg, ) - }; - ($msg: tt, $($fmt: tt)*) => { - span_invalid_monomorphization_error( - bx.sess(), span, - &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), - name, $($fmt)*)); - } - } - macro_rules! return_error { - ($($fmt: tt)*) => { + ($err:expr) => { { - emit_error!($($fmt)*); + bx.sess().emit_err($err); return Err(()); } } } - macro_rules! require { - ($cond: expr, $($fmt: tt)*) => { + ($cond:expr, $err:expr) => { if !$cond { - return_error!($($fmt)*); + return_error!($err); } - }; + } } - macro_rules! require_simd { ($ty: expr, $position: expr) => { - require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty) + require!($ty.is_simd(), InvalidMonomorphizationExpectedSimd { span, name, position: $position, found_ty: $ty }) }; } @@ -82,10 +87,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, bx.load(int_ty, ptr, Align::ONE) } _ => return_error!( - "invalid bitmask `{}`, expected `u{}` or `[u8; {}]`", - mask_ty, - expected_int_bits, - expected_bytes + InvalidMonomorphizationInvalidBitmask { span, name, ty: mask_ty, expected_int_bits, expected_bytes } ), }; @@ -127,18 +129,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); require!( in_len == out_len, - "expected return type with length {} (same as input type `{}`), \ - found `{}` with length {}", - in_len, - in_ty, - ret_ty, - out_len + InvalidMonomorphizationReturnLengthInputType { span, name, in_len, in_ty, ret_ty, out_len } ); require!( bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer, - "expected return type with integer elements, found `{}` with non-integer `{}`", - ret_ty, - out_ty + InvalidMonomorphizationReturnIntegerType {span, name, ret_ty, out_ty} ); return Ok(compare_simd_types( @@ -163,8 +158,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, }) } _ => return_error!( - "simd_shuffle index must be an array of `u32`, got `{}`", - args[2].layout.ty + InvalidMonomorphizationSimdShuffle { span, name, ty: args[2].layout.ty } ), } } @@ -179,19 +173,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); require!( out_len == n, - "expected return type of length {}, found `{}` with length {}", - n, - ret_ty, - out_len + InvalidMonomorphizationReturnLength { span, name, in_len: n, ret_ty, out_len } ); require!( in_elem == out_ty, - "expected return element type `{}` (element of input `{}`), \ - found `{}` with element type `{}`", - in_elem, - in_ty, - ret_ty, - out_ty + InvalidMonomorphizationReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty } ); let vector = args[2].immediate(); @@ -207,10 +193,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, if name == sym::simd_insert { require!( in_elem == arg_tys[2], - "expected inserted type `{}` (element of input `{}`), found `{}`", - in_elem, - in_ty, - arg_tys[2] + InvalidMonomorphizationInsertedType { span, name, in_elem, in_ty, out_ty: arg_tys[2] } ); let vector = args[0].immediate(); let index = args[1].immediate(); @@ -263,10 +246,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, if name == sym::simd_extract { require!( ret_ty == in_elem, - "expected return type `{}` (element of input `{}`), found `{}`", - in_elem, - in_ty, - ret_ty + InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } ); let vector = args[0].immediate(); return Ok(bx.context.new_vector_access(None, vector, args[1].immediate()).to_rvalue()); @@ -279,13 +259,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); require!( m_len == v_len, - "mismatched lengths: mask length `{}` != other vector length `{}`", - m_len, - v_len + InvalidMonomorphizationMismatchedLengths { span, name, m_len, v_len } ); match m_elem_ty.kind() { ty::Int(_) => {} - _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty), + _ => return_error!(InvalidMonomorphizationMaskType { span, name, ty: m_elem_ty }), } return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate())); } @@ -295,12 +273,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); require!( in_len == out_len, - "expected return type with length {} (same as input type `{}`), \ - found `{}` with length {}", - in_len, - in_ty, - ret_ty, - out_len + InvalidMonomorphizationReturnLengthInputType { span, name, in_len, in_ty, ret_ty, out_len } ); // casting cares about nominal type, not just structural type if in_elem == out_elem { @@ -412,13 +385,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, } _ => { /* Unsupported. Fallthrough. */ } } - require!( - false, - "unsupported cast from `{}` with element `{}` to `{}` with element `{}`", - in_ty, - in_elem, - ret_ty, - out_elem + return_error!( + InvalidMonomorphizationUnsupportedCast { span, name, in_ty, in_elem, ret_ty, out_elem } ); } @@ -431,10 +399,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, })* _ => {}, } - require!(false, - "unsupported operation on `{}` with element `{}`", - in_ty, - in_elem) + return_error!(InvalidMonomorphizationUnsupportedOperation { span, name, in_ty, in_elem }) })* } } @@ -448,23 +413,14 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, span: Span, args: &[OperandRef<'tcx, RValue<'gcc>>], ) -> Result, ()> { - macro_rules! emit_error { - ($msg: tt, $($fmt: tt)*) => { - span_invalid_monomorphization_error( - bx.sess(), span, - &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), - name, $($fmt)*)); - } - } macro_rules! return_error { - ($($fmt: tt)*) => { + ($err:expr) => { { - emit_error!($($fmt)*); + bx.sess().emit_err($err); return Err(()); } } } - let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() { let elem_ty = bx.cx.type_float_from_ty(*f); @@ -472,16 +428,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, 32 => ("f32", elem_ty), 64 => ("f64", elem_ty), _ => { - return_error!( - "unsupported element type `{}` of floating-point vector `{}`", - f.name_str(), - in_ty - ); + // Can we pass elem_ty directly? + return_error!(InvalidMonomorphizationInvalidFloatVector { span, name, elem_ty: f.name_str(), vec_ty: in_ty }); } } } else { - return_error!("`{}` is not a floating-point type", in_ty); + return_error!(InvalidMonomorphizationNotFloat { span, name, ty: in_ty }); }; let vec_ty = bx.cx.type_vector(elem_ty, in_len); @@ -504,7 +457,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)), sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)), sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)), - _ => return_error!("unrecognized intrinsic `{}`", name), + _ => return_error!(InvalidMonomorphizationUnrecognized { span, name }) }; let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); let function = intrinsic::llvm::intrinsic(llvm_name, &bx.cx); @@ -557,10 +510,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, })* _ => {}, } - require!(false, - "unsupported operation on `{}` with element `{}`", - in_ty, - in_elem) + return_error!(InvalidMonomorphizationUnsupportedOperation { span, name, in_ty, in_elem }) })* } } @@ -579,12 +529,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)), ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)), _ => { - return_error!( - "expected element type `{}` of vector type `{}` \ - to be a signed or unsigned integer type", - arg_tys[0].simd_size_and_type(bx.tcx()).1, - arg_tys[0] - ); + return_error!(InvalidMonomorphizationExpectedSignedUnsigned { + span, + name, + elem_ty: arg_tys[0].simd_size_and_type(bx.tcx()).1, + vec_ty: arg_tys[0], + }); } }; let builtin_name = @@ -617,10 +567,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, if name == sym::$name { require!( ret_ty == in_elem, - "expected return type `{}` (element of input `{}`), found `{}`", - in_elem, - in_ty, - ret_ty + InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } ); return match in_elem.kind() { ty::Int(_) | ty::Uint(_) => { @@ -644,13 +591,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op)) } } - _ => return_error!( - "unsupported {} from `{}` with element `{}` to `{}`", - sym::$name, - in_ty, - in_elem, - ret_ty - ), + _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }), }; } }; @@ -676,20 +617,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, if name == sym::$name { require!( ret_ty == in_elem, - "expected return type `{}` (element of input `{}`), found `{}`", - in_elem, - in_ty, - ret_ty + InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } ); return match in_elem.kind() { ty::Int(_) | ty::Uint(_) | ty::Float(_) => Ok(bx.$reduction(args[0].immediate())), - _ => return_error!( - "unsupported {} from `{}` with element `{}` to `{}`", - sym::$name, - in_ty, - in_elem, - ret_ty - ), + _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }), }; } }; @@ -704,22 +636,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let input = if !$boolean { require!( ret_ty == in_elem, - "expected return type `{}` (element of input `{}`), found `{}`", - in_elem, - in_ty, - ret_ty + InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } ); args[0].immediate() } else { match in_elem.kind() { ty::Int(_) | ty::Uint(_) => {} - _ => return_error!( - "unsupported {} from `{}` with element `{}` to `{}`", - sym::$name, - in_ty, - in_elem, - ret_ty - ), + _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }), } // boolean reductions operate on vectors of i1s: @@ -733,11 +656,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) }) } _ => return_error!( - "unsupported {} from `{}` with element `{}` to `{}`", - sym::$name, - in_ty, - in_elem, - ret_ty + InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty } ), }; } From 5c839d995b09f5034dd73eaf0cbb276764d08e98 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Sun, 11 Sep 2022 17:34:51 -0700 Subject: [PATCH 021/112] impl SessionDiagnostic for LayoutError and Spanned --- src/context.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/src/context.rs b/src/context.rs index 187e4c689ec..14de0cee28e 100644 --- a/src/context.rs +++ b/src/context.rs @@ -13,7 +13,7 @@ use rustc_middle::mir::mono::CodegenUnit; use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt}; use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers}; use rustc_session::Session; -use rustc_span::Span; +use rustc_span::{Span, source_map::respan}; use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx}; use rustc_target::spec::{HasTargetSpec, Target, TlsModel}; @@ -478,6 +478,23 @@ impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { #[inline] fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { if let LayoutError::SizeOverflow(_) = err { + let _ = respan(span, err); + // error: lifetime may not live long enough + // --> src/context.rs:483:13 + // | + // 475 | impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { + // | ---- ---- lifetime `'tcx` defined here + // | | + // | lifetime `'gcc` defined here + // ... + // 483 | self.sess().emit_fatal(respan(span, err)) + // | ^^^^^^^^^^^ argument requires that `'gcc` must outlive `'tcx` + // | + // = help: consider adding the following bound: `'gcc: 'tcx` + // = note: requirement occurs because of the type `CodegenCx<'_, '_>`, which makes the generic argument `'_` invariant + // = note: the struct `CodegenCx<'gcc, 'tcx>` is invariant over the parameter `'gcc` + // = help: see for more information about variance + // self.sess().emit_fatal(respan(span, err)) self.sess().emit_fatal(LayoutSizeOverflow { span, error: err.to_string() }) } else { span_bug!(span, "failed to get layout for `{}`: {}", ty, err) From 43b320657141da7f447c782c9ea93a072cfd67b3 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Sat, 24 Sep 2022 11:05:37 -0700 Subject: [PATCH 022/112] rebase and update trait names --- src/errors.rs | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/src/errors.rs b/src/errors.rs index a70ebf62da3..a1c95e7a7f4 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -1,5 +1,5 @@ use rustc_errors::{DiagnosticArgValue, IntoDiagnosticArg}; -use rustc_macros::SessionDiagnostic; +use rustc_macros::Diagnostic; use rustc_middle::ty::Ty; use rustc_span::{Span, Symbol}; use std::borrow::Cow; @@ -17,7 +17,7 @@ impl IntoDiagnosticArg for ExitCode { } } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::ranlib_failure)] pub(crate) struct RanlibFailure { exit_code: ExitCode, @@ -30,7 +30,7 @@ impl RanlibFailure { } } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_basic_integer, code = "E0511")] pub(crate) struct InvalidMonomorphizationBasicInteger<'a> { #[primary_span] @@ -39,7 +39,7 @@ pub(crate) struct InvalidMonomorphizationBasicInteger<'a> { pub ty: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_invalid_float_vector, code = "E0511")] pub(crate) struct InvalidMonomorphizationInvalidFloatVector<'a> { #[primary_span] @@ -49,7 +49,7 @@ pub(crate) struct InvalidMonomorphizationInvalidFloatVector<'a> { pub vec_ty: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_not_float, code = "E0511")] pub(crate) struct InvalidMonomorphizationNotFloat<'a> { #[primary_span] @@ -58,7 +58,7 @@ pub(crate) struct InvalidMonomorphizationNotFloat<'a> { pub ty: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_unrecognized, code = "E0511")] pub(crate) struct InvalidMonomorphizationUnrecognized { #[primary_span] @@ -66,7 +66,7 @@ pub(crate) struct InvalidMonomorphizationUnrecognized { pub name: Symbol, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_expected_signed_unsigned, code = "E0511")] pub(crate) struct InvalidMonomorphizationExpectedSignedUnsigned<'a> { #[primary_span] @@ -76,7 +76,7 @@ pub(crate) struct InvalidMonomorphizationExpectedSignedUnsigned<'a> { pub vec_ty: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_unsupported_element, code = "E0511")] pub(crate) struct InvalidMonomorphizationUnsupportedElement<'a> { #[primary_span] @@ -87,7 +87,7 @@ pub(crate) struct InvalidMonomorphizationUnsupportedElement<'a> { pub ret_ty: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_invalid_bitmask, code = "E0511")] pub(crate) struct InvalidMonomorphizationInvalidBitmask<'a> { #[primary_span] @@ -98,7 +98,7 @@ pub(crate) struct InvalidMonomorphizationInvalidBitmask<'a> { pub expected_bytes: u64, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_simd_shuffle, code = "E0511")] pub(crate) struct InvalidMonomorphizationSimdShuffle<'a> { #[primary_span] @@ -107,7 +107,7 @@ pub(crate) struct InvalidMonomorphizationSimdShuffle<'a> { pub ty: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_expected_simd, code = "E0511")] pub(crate) struct InvalidMonomorphizationExpectedSimd<'a> { #[primary_span] @@ -117,7 +117,7 @@ pub(crate) struct InvalidMonomorphizationExpectedSimd<'a> { pub found_ty: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_mask_type, code = "E0511")] pub(crate) struct InvalidMonomorphizationMaskType<'a> { #[primary_span] @@ -126,7 +126,7 @@ pub(crate) struct InvalidMonomorphizationMaskType<'a> { pub ty: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_return_length, code = "E0511")] pub(crate) struct InvalidMonomorphizationReturnLength<'a> { #[primary_span] @@ -137,7 +137,7 @@ pub(crate) struct InvalidMonomorphizationReturnLength<'a> { pub out_len: u64, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_return_length_input_type, code = "E0511")] pub(crate) struct InvalidMonomorphizationReturnLengthInputType<'a> { #[primary_span] @@ -149,7 +149,7 @@ pub(crate) struct InvalidMonomorphizationReturnLengthInputType<'a> { pub out_len: u64, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_return_element, code = "E0511")] pub(crate) struct InvalidMonomorphizationReturnElement<'a> { #[primary_span] @@ -161,7 +161,7 @@ pub(crate) struct InvalidMonomorphizationReturnElement<'a> { pub out_ty: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_return_type, code = "E0511")] pub(crate) struct InvalidMonomorphizationReturnType<'a> { #[primary_span] @@ -172,7 +172,7 @@ pub(crate) struct InvalidMonomorphizationReturnType<'a> { pub ret_ty: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_inserted_type, code = "E0511")] pub(crate) struct InvalidMonomorphizationInsertedType<'a> { #[primary_span] @@ -183,7 +183,7 @@ pub(crate) struct InvalidMonomorphizationInsertedType<'a> { pub out_ty: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_return_integer_type, code = "E0511")] pub(crate) struct InvalidMonomorphizationReturnIntegerType<'a> { #[primary_span] @@ -193,7 +193,7 @@ pub(crate) struct InvalidMonomorphizationReturnIntegerType<'a> { pub out_ty: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_mismatched_lengths, code = "E0511")] pub(crate) struct InvalidMonomorphizationMismatchedLengths { #[primary_span] @@ -203,7 +203,7 @@ pub(crate) struct InvalidMonomorphizationMismatchedLengths { pub v_len: u64, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_unsupported_cast, code = "E0511")] pub(crate) struct InvalidMonomorphizationUnsupportedCast<'a> { #[primary_span] @@ -215,7 +215,7 @@ pub(crate) struct InvalidMonomorphizationUnsupportedCast<'a> { pub out_elem: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::invalid_monomorphization_unsupported_operation, code = "E0511")] pub(crate) struct InvalidMonomorphizationUnsupportedOperation<'a> { #[primary_span] @@ -225,7 +225,7 @@ pub(crate) struct InvalidMonomorphizationUnsupportedOperation<'a> { pub in_elem: Ty<'a>, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::layout_size_overflow)] pub(crate) struct LayoutSizeOverflow { #[primary_span] @@ -233,18 +233,18 @@ pub(crate) struct LayoutSizeOverflow { pub error: String, } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::linkage_const_or_mut_type)] pub(crate) struct LinkageConstOrMutType { #[primary_span] pub span: Span } -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::lto_not_supported)] pub(crate) struct LTONotSupported; -#[derive(SessionDiagnostic)] +#[derive(Diagnostic)] #[diag(codegen_gcc::unwinding_inline_asm)] pub(crate) struct UnwindingInlineAsm { #[primary_span] From 0ae1e27b17cca88bdbdd5ffc84add51057d4cbf9 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Sat, 24 Sep 2022 11:36:16 -0700 Subject: [PATCH 023/112] fix lifetime error --- src/context.rs | 24 +++--------------------- src/errors.rs | 8 -------- 2 files changed, 3 insertions(+), 29 deletions(-) diff --git a/src/context.rs b/src/context.rs index 14de0cee28e..46ade33eb01 100644 --- a/src/context.rs +++ b/src/context.rs @@ -18,7 +18,6 @@ use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDat use rustc_target::spec::{HasTargetSpec, Target, TlsModel}; use crate::callee::get_fn; -use crate::errors::LayoutSizeOverflow; #[derive(Clone)] pub struct FuncSig<'gcc> { @@ -294,7 +293,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { self.is_native_int_type(typ) || self.is_non_native_int_type(typ) || typ.is_compatible_with(self.bool_type) } - pub fn sess(&self) -> &Session { + pub fn sess(&self) -> &'tcx Session { &self.tcx.sess } @@ -478,24 +477,7 @@ impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { #[inline] fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { if let LayoutError::SizeOverflow(_) = err { - let _ = respan(span, err); - // error: lifetime may not live long enough - // --> src/context.rs:483:13 - // | - // 475 | impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { - // | ---- ---- lifetime `'tcx` defined here - // | | - // | lifetime `'gcc` defined here - // ... - // 483 | self.sess().emit_fatal(respan(span, err)) - // | ^^^^^^^^^^^ argument requires that `'gcc` must outlive `'tcx` - // | - // = help: consider adding the following bound: `'gcc: 'tcx` - // = note: requirement occurs because of the type `CodegenCx<'_, '_>`, which makes the generic argument `'_` invariant - // = note: the struct `CodegenCx<'gcc, 'tcx>` is invariant over the parameter `'gcc` - // = help: see for more information about variance - // self.sess().emit_fatal(respan(span, err)) - self.sess().emit_fatal(LayoutSizeOverflow { span, error: err.to_string() }) + self.sess().emit_fatal(respan(span, err)) } else { span_bug!(span, "failed to get layout for `{}`: {}", ty, err) } @@ -513,7 +495,7 @@ impl<'gcc, 'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { fn_abi_request: FnAbiRequest<'tcx>, ) -> ! { if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err { - self.sess().emit_fatal(LayoutSizeOverflow { span, error: err.to_string() }) + self.sess().emit_fatal(respan(span, err)) } else { match fn_abi_request { FnAbiRequest::OfFnPtr { sig, extra_args } => { diff --git a/src/errors.rs b/src/errors.rs index a1c95e7a7f4..eb8528104fa 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -225,14 +225,6 @@ pub(crate) struct InvalidMonomorphizationUnsupportedOperation<'a> { pub in_elem: Ty<'a>, } -#[derive(Diagnostic)] -#[diag(codegen_gcc::layout_size_overflow)] -pub(crate) struct LayoutSizeOverflow { - #[primary_span] - pub span: Span, - pub error: String, -} - #[derive(Diagnostic)] #[diag(codegen_gcc::linkage_const_or_mut_type)] pub(crate) struct LinkageConstOrMutType { From d1741f6d6225079ab1bc5f65bf19562069c48d5a Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Sat, 24 Sep 2022 15:03:14 -0700 Subject: [PATCH 024/112] remove comment --- src/intrinsic/simd.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/intrinsic/simd.rs b/src/intrinsic/simd.rs index 22bd0a1de82..ffc85b773a2 100644 --- a/src/intrinsic/simd.rs +++ b/src/intrinsic/simd.rs @@ -428,7 +428,6 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, 32 => ("f32", elem_ty), 64 => ("f64", elem_ty), _ => { - // Can we pass elem_ty directly? return_error!(InvalidMonomorphizationInvalidFloatVector { span, name, elem_ty: f.name_str(), vec_ty: in_ty }); } } From 277b997adc88e197538827616778d0161ac7a714 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Mon, 26 Sep 2022 19:57:40 -0700 Subject: [PATCH 025/112] lint and remove unused diagnostic --- src/errors.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/errors.rs b/src/errors.rs index eb8528104fa..83f4af16612 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -4,13 +4,12 @@ use rustc_middle::ty::Ty; use rustc_span::{Span, Symbol}; use std::borrow::Cow; -struct ExitCode { - pub exit_code: Option, -} +struct ExitCode(Option); impl IntoDiagnosticArg for ExitCode { fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> { - match self.exit_code { + let ExitCode(exit_code) = self; + match exit_code { Some(t) => t.into_diagnostic_arg(), None => DiagnosticArgValue::Str(Cow::Borrowed("None")), } @@ -25,8 +24,7 @@ pub(crate) struct RanlibFailure { impl RanlibFailure { pub fn new(exit_code: Option) -> Self { - let exit_code = ExitCode{ exit_code }; - RanlibFailure { exit_code } + RanlibFailure { exit_code: ExitCode(exit_code) } } } From 4d398832a5ac5318d2b32de3cca1d39dc4040f9a Mon Sep 17 00:00:00 2001 From: Urgau Date: Sat, 24 Sep 2022 12:34:56 +0200 Subject: [PATCH 026/112] Stabilize bench_black_box --- tests/run/int.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run/int.rs b/tests/run/int.rs index 2b90e4ae8d8..75779622b54 100644 --- a/tests/run/int.rs +++ b/tests/run/int.rs @@ -3,7 +3,7 @@ // Run-time: // status: 0 -#![feature(bench_black_box, const_black_box, core_intrinsics, start)] +#![feature(const_black_box, core_intrinsics, start)] #![no_std] From 5ae3bf2ed3848b626cb301b2fd3034affe47b7f7 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Wed, 28 Sep 2022 19:02:38 -0700 Subject: [PATCH 027/112] print when ranlib failed without an exit code --- src/errors.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/errors.rs b/src/errors.rs index 83f4af16612..d7816e395c8 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -11,7 +11,7 @@ impl IntoDiagnosticArg for ExitCode { let ExitCode(exit_code) = self; match exit_code { Some(t) => t.into_diagnostic_arg(), - None => DiagnosticArgValue::Str(Cow::Borrowed("None")), + None => DiagnosticArgValue::Str(Cow::Borrowed("")), } } } From 2db7a873de20de1a67f953304213442ca93e3d0f Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Sat, 1 Oct 2022 16:34:45 +0000 Subject: [PATCH 028/112] Remove unused Context assoc type from WriteBackendMethods --- src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 007d61ed51d..a70ed8084a9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -210,7 +210,6 @@ impl WriteBackendMethods for GccCodegenBackend { type Module = GccContext; type TargetMachine = (); type ModuleBuffer = ModuleBuffer; - type Context = (); type ThinData = (); type ThinBuffer = ThinBuffer; From 29edc888bdaa55b0362523a70279fbde50622d84 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Sat, 1 Oct 2022 16:45:07 +0000 Subject: [PATCH 029/112] Remove several unused methods from MiscMethods --- src/context.rs | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/src/context.rs b/src/context.rs index 46ade33eb01..62a61eb8548 100644 --- a/src/context.rs +++ b/src/context.rs @@ -416,10 +416,6 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { self.codegen_unit } - fn used_statics(&self) -> &RefCell>> { - unimplemented!(); - } - fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) { // TODO(antoyo) } @@ -428,10 +424,6 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { // TODO(antoyo) } - fn create_used_variable(&self) { - unimplemented!(); - } - fn declare_c_main(&self, fn_type: Self::Type) -> Option { if self.get_declared_value("main").is_none() { Some(self.declare_cfn("main", fn_type)) @@ -443,14 +435,6 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { None } } - - fn compiler_used_statics(&self) -> &RefCell>> { - unimplemented!() - } - - fn create_compiler_used_variable(&self) { - unimplemented!() - } } impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> { From 69a065ef816acd1230a11bd2899e6f4e7e985e39 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Sat, 1 Oct 2022 16:45:33 +0000 Subject: [PATCH 030/112] Remove unused target_cpu and tune_cpu methods from ExtraBackendMethods --- src/lib.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index a70ed8084a9..accd02ab002 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -171,15 +171,6 @@ impl ExtraBackendMethods for GccCodegenBackend { Ok(()) }) } - - fn target_cpu<'b>(&self, _sess: &'b Session) -> &'b str { - unimplemented!(); - } - - fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> { - None - // TODO(antoyo) - } } pub struct ModuleBuffer; From 3f43ee24075c50f8f1a1add793c037500a465d15 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Sat, 1 Oct 2022 17:01:31 +0000 Subject: [PATCH 031/112] Merge apply_attrs_callsite into call and invoke Some codegen backends are not able to apply callsite attrs after the fact. --- src/abi.rs | 4 ---- src/asm.rs | 2 +- src/builder.rs | 31 +++++++++++++++++++++++++++---- src/intrinsic/mod.rs | 6 +++--- src/intrinsic/simd.rs | 2 +- 5 files changed, 32 insertions(+), 13 deletions(-) diff --git a/src/abi.rs b/src/abi.rs index 848c34211ff..6fb1cbfad8c 100644 --- a/src/abi.rs +++ b/src/abi.rs @@ -11,10 +11,6 @@ use crate::intrinsic::ArgAbiExt; use crate::type_of::LayoutGccExt; impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { - fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) { - // TODO(antoyo) - } - fn get_param(&mut self, index: usize) -> Self::Value { let func = self.current_func(); let param = func.get_param(index as i32); diff --git a/src/asm.rs b/src/asm.rs index 007b001f213..c346dbd63cc 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -498,7 +498,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { if options.contains(InlineAsmOptions::NORETURN) { let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable"); let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) }; - self.call(self.type_void(), builtin_unreachable, &[], None); + self.call(self.type_void(), None, builtin_unreachable, &[], None); } // Write results to outputs. diff --git a/src/builder.rs b/src/builder.rs index 6994eeb00c3..e6fed9d35ca 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -444,11 +444,23 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.block.end_with_switch(None, value, default_block, &gcc_cases); } - fn invoke(&mut self, typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { + fn invoke( + &mut self, + typ: Type<'gcc>, + fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, + func: RValue<'gcc>, + args: &[RValue<'gcc>], + then: Block<'gcc>, + catch: Block<'gcc>, + _funclet: Option<&Funclet>, + ) -> RValue<'gcc> { // TODO(bjorn3): Properly implement unwinding. - let call_site = self.call(typ, func, args, None); + let call_site = self.call(typ, None, func, args, None); let condition = self.context.new_rvalue_from_int(self.bool_type, 1); self.llbb().end_with_conditional(None, condition, then, catch); + if let Some(_fn_abi) = fn_abi { + // TODO(bjorn3): Apply function attributes + } call_site } @@ -1227,16 +1239,27 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { // TODO(antoyo) } - fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> { + fn call( + &mut self, + _typ: Type<'gcc>, + fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, + func: RValue<'gcc>, + args: &[RValue<'gcc>], + funclet: Option<&Funclet>, + ) -> RValue<'gcc> { // FIXME(antoyo): remove when having a proper API. let gcc_func = unsafe { std::mem::transmute(func) }; - if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() { + let call = if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() { self.function_call(func, args, funclet) } else { // If it's a not function that was defined, it's a function pointer. self.function_ptr_call(func, args, funclet) + }; + if let Some(_fn_abi) = fn_abi { + // TODO(bjorn3): Apply function attributes } + call } fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index cc9c90c2427..49be6c649e6 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -100,7 +100,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { _ if simple.is_some() => { // FIXME(antoyo): remove this cast when the API supports function. let func = unsafe { std::mem::transmute(simple.expect("simple")) }; - self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::>(), None) + self.call(self.type_void(), None, func, &args.iter().map(|arg| arg.immediate()).collect::>(), None) }, sym::likely => { self.expect(args[0].immediate(), true) @@ -341,7 +341,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { fn abort(&mut self) { let func = self.context.get_builtin_function("abort"); let func: RValue<'gcc> = unsafe { std::mem::transmute(func) }; - self.call(self.type_void(), func, &[], None); + self.call(self.type_void(), None, func, &[], None); } fn assume(&mut self, value: Self::Value) { @@ -1124,7 +1124,7 @@ fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue< // NOTE: the `|| true` here is to use the panic=abort strategy with panic=unwind too if bx.sess().panic_strategy() == PanicStrategy::Abort || true { // TODO(bjorn3): Properly implement unwinding and remove the `|| true` once this is done. - bx.call(bx.type_void(), try_func, &[data], None); + bx.call(bx.type_void(), None, try_func, &[data], None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. let ret_align = bx.tcx.data_layout.i32_align.abi; diff --git a/src/intrinsic/simd.rs b/src/intrinsic/simd.rs index ffc85b773a2..12e416f62a4 100644 --- a/src/intrinsic/simd.rs +++ b/src/intrinsic/simd.rs @@ -461,7 +461,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); let function = intrinsic::llvm::intrinsic(llvm_name, &bx.cx); let function: RValue<'gcc> = unsafe { std::mem::transmute(function) }; - let c = bx.call(fn_ty, function, &args.iter().map(|arg| arg.immediate()).collect::>(), None); + let c = bx.call(fn_ty, None, function, &args.iter().map(|arg| arg.immediate()).collect::>(), None); Ok(c) } From af0b18b454029cea852db105f15d443f4af1e2b1 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Sat, 1 Oct 2022 17:34:21 +0000 Subject: [PATCH 032/112] Remove dynamic_alloca from BuilderMethods --- src/builder.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index e6fed9d35ca..15471ecdb03 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -655,10 +655,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None) } - fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> { - unimplemented!(); - } - fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> { unimplemented!(); } From 413edf67713195ebbf57ba50aac84e9387559bbe Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Sat, 1 Oct 2022 18:22:46 +0000 Subject: [PATCH 033/112] Remove type argument of array_alloca and rename to byte_array_alloca --- src/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/builder.rs b/src/builder.rs index 15471ecdb03..a314b7cc215 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -655,7 +655,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None) } - fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> { + fn byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> { unimplemented!(); } From 14e0e0fec386d42a8d980c63dd4ff3c11f6eeaf0 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Mon, 17 Oct 2022 22:38:37 +0100 Subject: [PATCH 034/112] Stabilize asm_sym --- tests/run/asm.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/run/asm.rs b/tests/run/asm.rs index 46abbb553bf..38c1eac7adf 100644 --- a/tests/run/asm.rs +++ b/tests/run/asm.rs @@ -3,11 +3,12 @@ // Run-time: // status: 0 -#![feature(asm_const, asm_sym)] +#![feature(asm_const)] use std::arch::{asm, global_asm}; -global_asm!(" +global_asm!( + " .global add_asm add_asm: mov rax, rdi @@ -132,7 +133,9 @@ fn main() { assert_eq!(x, 43); // check sym fn - extern "C" fn foo() -> u64 { 42 } + extern "C" fn foo() -> u64 { + 42 + } let x: u64; unsafe { asm!("call {}", sym foo, lateout("rax") x); From d3b02e31866dc3011702182b6e3226268acf110d Mon Sep 17 00:00:00 2001 From: Nilstrieb <48135649+Nilstrieb@users.noreply.github.com> Date: Sat, 22 Oct 2022 11:07:54 +0200 Subject: [PATCH 035/112] Migrate all diagnostics --- src/errors.rs | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/src/errors.rs b/src/errors.rs index d7816e395c8..15ad90f9043 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -17,7 +17,7 @@ impl IntoDiagnosticArg for ExitCode { } #[derive(Diagnostic)] -#[diag(codegen_gcc::ranlib_failure)] +#[diag(codegen_gcc_ranlib_failure)] pub(crate) struct RanlibFailure { exit_code: ExitCode, } @@ -29,7 +29,7 @@ impl RanlibFailure { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_basic_integer, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_basic_integer, code = "E0511")] pub(crate) struct InvalidMonomorphizationBasicInteger<'a> { #[primary_span] pub span: Span, @@ -38,7 +38,7 @@ pub(crate) struct InvalidMonomorphizationBasicInteger<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_invalid_float_vector, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_invalid_float_vector, code = "E0511")] pub(crate) struct InvalidMonomorphizationInvalidFloatVector<'a> { #[primary_span] pub span: Span, @@ -48,7 +48,7 @@ pub(crate) struct InvalidMonomorphizationInvalidFloatVector<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_not_float, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_not_float, code = "E0511")] pub(crate) struct InvalidMonomorphizationNotFloat<'a> { #[primary_span] pub span: Span, @@ -57,7 +57,7 @@ pub(crate) struct InvalidMonomorphizationNotFloat<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_unrecognized, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_unrecognized, code = "E0511")] pub(crate) struct InvalidMonomorphizationUnrecognized { #[primary_span] pub span: Span, @@ -65,7 +65,7 @@ pub(crate) struct InvalidMonomorphizationUnrecognized { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_expected_signed_unsigned, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_expected_signed_unsigned, code = "E0511")] pub(crate) struct InvalidMonomorphizationExpectedSignedUnsigned<'a> { #[primary_span] pub span: Span, @@ -75,7 +75,7 @@ pub(crate) struct InvalidMonomorphizationExpectedSignedUnsigned<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_unsupported_element, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_unsupported_element, code = "E0511")] pub(crate) struct InvalidMonomorphizationUnsupportedElement<'a> { #[primary_span] pub span: Span, @@ -86,7 +86,7 @@ pub(crate) struct InvalidMonomorphizationUnsupportedElement<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_invalid_bitmask, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_invalid_bitmask, code = "E0511")] pub(crate) struct InvalidMonomorphizationInvalidBitmask<'a> { #[primary_span] pub span: Span, @@ -97,7 +97,7 @@ pub(crate) struct InvalidMonomorphizationInvalidBitmask<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_simd_shuffle, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_simd_shuffle, code = "E0511")] pub(crate) struct InvalidMonomorphizationSimdShuffle<'a> { #[primary_span] pub span: Span, @@ -106,7 +106,7 @@ pub(crate) struct InvalidMonomorphizationSimdShuffle<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_expected_simd, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_expected_simd, code = "E0511")] pub(crate) struct InvalidMonomorphizationExpectedSimd<'a> { #[primary_span] pub span: Span, @@ -116,7 +116,7 @@ pub(crate) struct InvalidMonomorphizationExpectedSimd<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_mask_type, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_mask_type, code = "E0511")] pub(crate) struct InvalidMonomorphizationMaskType<'a> { #[primary_span] pub span: Span, @@ -125,7 +125,7 @@ pub(crate) struct InvalidMonomorphizationMaskType<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_return_length, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_return_length, code = "E0511")] pub(crate) struct InvalidMonomorphizationReturnLength<'a> { #[primary_span] pub span: Span, @@ -136,7 +136,7 @@ pub(crate) struct InvalidMonomorphizationReturnLength<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_return_length_input_type, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_return_length_input_type, code = "E0511")] pub(crate) struct InvalidMonomorphizationReturnLengthInputType<'a> { #[primary_span] pub span: Span, @@ -148,7 +148,7 @@ pub(crate) struct InvalidMonomorphizationReturnLengthInputType<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_return_element, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_return_element, code = "E0511")] pub(crate) struct InvalidMonomorphizationReturnElement<'a> { #[primary_span] pub span: Span, @@ -160,7 +160,7 @@ pub(crate) struct InvalidMonomorphizationReturnElement<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_return_type, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_return_type, code = "E0511")] pub(crate) struct InvalidMonomorphizationReturnType<'a> { #[primary_span] pub span: Span, @@ -171,7 +171,7 @@ pub(crate) struct InvalidMonomorphizationReturnType<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_inserted_type, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_inserted_type, code = "E0511")] pub(crate) struct InvalidMonomorphizationInsertedType<'a> { #[primary_span] pub span: Span, @@ -182,7 +182,7 @@ pub(crate) struct InvalidMonomorphizationInsertedType<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_return_integer_type, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_return_integer_type, code = "E0511")] pub(crate) struct InvalidMonomorphizationReturnIntegerType<'a> { #[primary_span] pub span: Span, @@ -192,7 +192,7 @@ pub(crate) struct InvalidMonomorphizationReturnIntegerType<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_mismatched_lengths, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_mismatched_lengths, code = "E0511")] pub(crate) struct InvalidMonomorphizationMismatchedLengths { #[primary_span] pub span: Span, @@ -202,7 +202,7 @@ pub(crate) struct InvalidMonomorphizationMismatchedLengths { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_unsupported_cast, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_unsupported_cast, code = "E0511")] pub(crate) struct InvalidMonomorphizationUnsupportedCast<'a> { #[primary_span] pub span: Span, @@ -214,7 +214,7 @@ pub(crate) struct InvalidMonomorphizationUnsupportedCast<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::invalid_monomorphization_unsupported_operation, code = "E0511")] +#[diag(codegen_gcc_invalid_monomorphization_unsupported_operation, code = "E0511")] pub(crate) struct InvalidMonomorphizationUnsupportedOperation<'a> { #[primary_span] pub span: Span, @@ -224,18 +224,18 @@ pub(crate) struct InvalidMonomorphizationUnsupportedOperation<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc::linkage_const_or_mut_type)] +#[diag(codegen_gcc_linkage_const_or_mut_type)] pub(crate) struct LinkageConstOrMutType { #[primary_span] pub span: Span } #[derive(Diagnostic)] -#[diag(codegen_gcc::lto_not_supported)] +#[diag(codegen_gcc_lto_not_supported)] pub(crate) struct LTONotSupported; #[derive(Diagnostic)] -#[diag(codegen_gcc::unwinding_inline_asm)] +#[diag(codegen_gcc_unwinding_inline_asm)] pub(crate) struct UnwindingInlineAsm { #[primary_span] pub span: Span From e30385bc06226bb823a1891e7f66bf069dee20a9 Mon Sep 17 00:00:00 2001 From: Daniel Paoliello Date: Wed, 12 Oct 2022 14:44:01 -0700 Subject: [PATCH 036/112] Support raw-dylib functions being used inside inlined functions --- src/archive.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/archive.rs b/src/archive.rs index ac0342f6b80..f18ae7ea5e9 100644 --- a/src/archive.rs +++ b/src/archive.rs @@ -47,6 +47,7 @@ impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder { _lib_name: &str, _dll_imports: &[DllImport], _tmpdir: &Path, + _is_direct_dependency: bool, ) -> PathBuf { unimplemented!(); } From 5a1c8e8e5c8b8a7e0a0d1a576be425226725d59b Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Fri, 14 Oct 2022 02:24:58 +0100 Subject: [PATCH 037/112] Rewrite implementation of `#[alloc_error_handler]` The new implementation doesn't use weak lang items and instead changes `#[alloc_error_handler]` to an attribute macro just like `#[global_allocator]`. The attribute will generate the `__rg_oom` function which is called by the compiler-generated `__rust_alloc_error_handler`. If no `__rg_oom` function is defined in any crate then the compiler shim will call `__rdl_oom` in the alloc crate which will simply panic. This also fixes link errors with `-C link-dead-code` with `default_alloc_error_handler`: `__rg_oom` was previously defined in the alloc crate and would attempt to reference the `oom` lang item, even if it didn't exist. This worked as long as `__rg_oom` was excluded from linking since it was not called. This is a prerequisite for the stabilization of `default_alloc_error_handler` (#102318). --- src/allocator.rs | 11 ++--------- src/lib.rs | 4 ++-- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/src/allocator.rs b/src/allocator.rs index 58efb81e800..e2c9ffe9c1c 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -7,7 +7,7 @@ use rustc_span::symbol::sym; use crate::GccContext; -pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) { +pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) { let context = &mods.context; let usize = match tcx.sess.target.pointer_width { @@ -90,14 +90,7 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam .collect(); let func = context.new_function(None, FunctionType::Exported, void, &args, name, false); - let kind = - if has_alloc_error_handler { - AllocatorKind::Global - } - else { - AllocatorKind::Default - }; - let callee = kind.fn_name(sym::oom); + let callee = alloc_error_handler_kind.fn_name(sym::oom); let args: Vec<_> = types.iter().enumerate() .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .collect(); diff --git a/src/lib.rs b/src/lib.rs index accd02ab002..dd0daf2c38b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -153,11 +153,11 @@ impl CodegenBackend for GccCodegenBackend { } impl ExtraBackendMethods for GccCodegenBackend { - fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) -> Self::Module { + fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) -> Self::Module { let mut mods = GccContext { context: Context::default(), }; - unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, has_alloc_error_handler); } + unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, alloc_error_handler_kind); } mods } From 87237cb69935730623917aa4674901270b938c26 Mon Sep 17 00:00:00 2001 From: Ayush Singh Date: Sun, 6 Nov 2022 14:01:46 +0530 Subject: [PATCH 038/112] Add type_array to BaseTypeMethods Moved type_array function to rustc_codegen_ssa::BaseTypeMethods trait. This allows using normal alloca function to create arrays as suggested in https://github.com/rust-lang/rust/pull/104022. Signed-off-by: Ayush Singh --- src/type_.rs | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/src/type_.rs b/src/type_.rs index 68bdb8d4e55..862ed62c68b 100644 --- a/src/type_.rs +++ b/src/type_.rs @@ -201,6 +201,27 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> { value.get_type() } + + fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> { + if let Some(struct_type) = ty.is_struct() { + if struct_type.get_field_count() == 0 { + // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a + // size of usize::MAX in test_binary_search, we workaround this by setting the size to + // zero for ZSTs. + // FIXME(antoyo): fix gccjit API. + len = 0; + } + } + + // NOTE: see note above. Some other test uses usize::MAX. + if len == u64::MAX { + len = 0; + } + + let len: i32 = len.try_into().expect("array len"); + + self.context.new_array_type(None, ty, len) + } } impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { @@ -227,27 +248,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { self.context.new_opaque_struct_type(None, name) } - pub fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> { - if let Some(struct_type) = ty.is_struct() { - if struct_type.get_field_count() == 0 { - // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a - // size of usize::MAX in test_binary_search, we workaround this by setting the size to - // zero for ZSTs. - // FIXME(antoyo): fix gccjit API. - len = 0; - } - } - - // NOTE: see note above. Some other test uses usize::MAX. - if len == u64::MAX { - len = 0; - } - - let len: i32 = len.try_into().expect("array len"); - - self.context.new_array_type(None, ty, len) - } - pub fn type_bool(&self) -> Type<'gcc> { self.context.new_type::() } From e876f43599acf69a094ecbbbc04c10d7d8f58356 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 6 Nov 2022 14:15:20 +0100 Subject: [PATCH 039/112] fix cranelift and gcc --- src/consts.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/consts.rs b/src/consts.rs index 81f53328867..111bfeb1322 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -297,12 +297,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAllocation<'tcx>) -> RValue<'gcc> { let alloc = alloc.inner(); - let mut llvals = Vec::with_capacity(alloc.provenance().len() + 1); + let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1); let dl = cx.data_layout(); let pointer_size = dl.pointer_size.bytes() as usize; let mut next_offset = 0; - for &(offset, alloc_id) in alloc.provenance().iter() { + for &(offset, alloc_id) in alloc.provenance().ptrs().iter() { let offset = offset.bytes(); assert_eq!(offset as usize as u64, offset); let offset = offset as usize; From 2596ab4d75abec169b96bae468ae752838602480 Mon Sep 17 00:00:00 2001 From: Caleb Zulawski Date: Sat, 29 Oct 2022 23:36:47 -0400 Subject: [PATCH 040/112] Allow actual AVX512-related feature names in the case of some misleading aliases --- src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index dd0daf2c38b..b9600da5c39 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -315,10 +315,10 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec { false } /* - adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512gfni, - avx512ifma, avx512pf, avx512vaes, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpclmulqdq, - avx512vpopcntdq, bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm, - sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, xsave, xsavec, xsaveopt, xsaves + adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512ifma, + avx512pf, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpopcntdq, + bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, gfni, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm, + sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, vaes, vpclmulqdq, xsave, xsavec, xsaveopt, xsaves */ //false }) From 358419cc25a86cfc48bbf30041f29b955bd2c881 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 13 Nov 2022 12:14:59 +0100 Subject: [PATCH 041/112] add is_sized method on Abi and Layout, and use it --- src/type_.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/type_.rs b/src/type_.rs index 862ed62c68b..bdf7318ce48 100644 --- a/src/type_.rs +++ b/src/type_.rs @@ -277,7 +277,7 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout offset = target_offset + field.size; prev_effective_align = effective_field_align; } - if !layout.is_unsized() && field_count > 0 { + if layout.is_sized() && field_count > 0 { if offset > layout.size { bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset); } From 695c76c45ac9819f2d3c0d3d93dd4f9eaf4041bd Mon Sep 17 00:00:00 2001 From: Ayush Singh Date: Tue, 15 Nov 2022 22:15:55 +0530 Subject: [PATCH 042/112] Use custom entry name in gcc This is a continuation of 9f0a8620bd7d325e6d42417b08daff3e55cb88f6 for gcc. Signed-off-by: Ayush Singh --- src/context.rs | 5 +++-- src/declare.rs | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/context.rs b/src/context.rs index 62a61eb8548..2e71c3665da 100644 --- a/src/context.rs +++ b/src/context.rs @@ -425,8 +425,9 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } fn declare_c_main(&self, fn_type: Self::Type) -> Option { - if self.get_declared_value("main").is_none() { - Some(self.declare_cfn("main", fn_type)) + let entry_name = self.sess().target.entry_name.as_ref(); + if self.get_declared_value(entry_name).is_none() { + Some(self.declare_entry_fn(entry_name, fn_type, ())) } else { // If the symbol already exists, it is an error: for example, the user wrote diff --git a/src/declare.rs b/src/declare.rs index a619e2f7712..eae77508c97 100644 --- a/src/declare.rs +++ b/src/declare.rs @@ -65,13 +65,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { global } - pub fn declare_cfn(&self, name: &str, _fn_type: Type<'gcc>) -> RValue<'gcc> { + pub fn declare_entry_fn(&self, name: &str, _fn_type: Type<'gcc>, callconv: () /*llvm::CCallConv*/) -> RValue<'gcc> { // TODO(antoyo): use the fn_type parameter. let const_string = self.context.new_type::().make_pointer().make_pointer(); let return_type = self.type_i32(); let variadic = false; self.linkage.set(FunctionType::Exported); - let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, &[self.type_i32(), const_string], variadic); + let func = declare_raw_fn(self, name, callconv, return_type, &[self.type_i32(), const_string], variadic); // NOTE: it is needed to set the current_func here as well, because get_fn() is not called // for the main function. *self.current_func.borrow_mut() = Some(func); From af8682b8b40b6c374912388315e7220a75dc16d9 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sat, 1 Oct 2022 23:10:36 +0200 Subject: [PATCH 043/112] Introduce composite debuginfo. --- src/debuginfo.rs | 57 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 8 deletions(-) diff --git a/src/debuginfo.rs b/src/debuginfo.rs index 266759ed6cf..a81585d4128 100644 --- a/src/debuginfo.rs +++ b/src/debuginfo.rs @@ -4,8 +4,9 @@ use rustc_codegen_ssa::traits::{DebugInfoBuilderMethods, DebugInfoMethods}; use rustc_middle::mir; use rustc_middle::ty::{Instance, PolyExistentialTraitRef, Ty}; use rustc_span::{SourceFile, Span, Symbol}; -use rustc_target::abi::Size; use rustc_target::abi::call::FnAbi; +use rustc_target::abi::Size; +use std::ops::Range; use crate::builder::Builder; use crate::context::CodegenCx; @@ -13,7 +14,15 @@ use crate::context::CodegenCx; impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> { // FIXME(eddyb) find a common convention for all of the debuginfo-related // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.). - fn dbg_var_addr(&mut self, _dbg_var: Self::DIVariable, _scope_metadata: Self::DIScope, _variable_alloca: Self::Value, _direct_offset: Size, _indirect_offsets: &[Size]) { + fn dbg_var_addr( + &mut self, + _dbg_var: Self::DIVariable, + _scope_metadata: Self::DIScope, + _variable_alloca: Self::Value, + _direct_offset: Size, + _indirect_offsets: &[Size], + _fragment: Option>, + ) { unimplemented!(); } @@ -31,16 +40,31 @@ impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> { } impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { - fn create_vtable_debuginfo(&self, _ty: Ty<'tcx>, _trait_ref: Option>, _vtable: Self::Value) { + fn create_vtable_debuginfo( + &self, + _ty: Ty<'tcx>, + _trait_ref: Option>, + _vtable: Self::Value, + ) { // TODO(antoyo) } - fn create_function_debug_context(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>) -> Option> { + fn create_function_debug_context( + &self, + _instance: Instance<'tcx>, + _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + _llfn: RValue<'gcc>, + _mir: &mir::Body<'tcx>, + ) -> Option> { // TODO(antoyo) None } - fn extend_scope_to_file(&self, _scope_metadata: Self::DIScope, _file: &SourceFile) -> Self::DIScope { + fn extend_scope_to_file( + &self, + _scope_metadata: Self::DIScope, + _file: &SourceFile, + ) -> Self::DIScope { unimplemented!(); } @@ -48,15 +72,32 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { // TODO(antoyo) } - fn create_dbg_var(&self, _variable_name: Symbol, _variable_type: Ty<'tcx>, _scope_metadata: Self::DIScope, _variable_kind: VariableKind, _span: Span) -> Self::DIVariable { + fn create_dbg_var( + &self, + _variable_name: Symbol, + _variable_type: Ty<'tcx>, + _scope_metadata: Self::DIScope, + _variable_kind: VariableKind, + _span: Span, + ) -> Self::DIVariable { unimplemented!(); } - fn dbg_scope_fn(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option>) -> Self::DIScope { + fn dbg_scope_fn( + &self, + _instance: Instance<'tcx>, + _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + _maybe_definition_llfn: Option>, + ) -> Self::DIScope { unimplemented!(); } - fn dbg_loc(&self, _scope: Self::DIScope, _inlined_at: Option, _span: Span) -> Self::DILocation { + fn dbg_loc( + &self, + _scope: Self::DIScope, + _inlined_at: Option, + _span: Span, + ) -> Self::DILocation { unimplemented!(); } } From d01b63a09ab53ec37a41b51b6e30f0f44c44f43b Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 9 Nov 2022 11:04:10 +1100 Subject: [PATCH 044/112] Use `&mut Bx` more. For the next commit, `FunctionCx::codegen_*_terminator` need to take a `&mut Bx` instead of consuming a `Bx`. This triggers a cascade of similar changes across multiple functions. The resulting code is more concise and replaces many `&mut bx` expressions with `bx`. --- src/builder.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index a314b7cc215..782f6856654 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -755,11 +755,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { OperandRef { val, layout: place.layout } } - fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self { + fn write_operand_repeatedly(&mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) { let zero = self.const_usize(0); let count = self.const_usize(count); - let start = dest.project_index(&mut self, zero).llval; - let end = dest.project_index(&mut self, count).llval; + let start = dest.project_index(self, zero).llval; + let end = dest.project_index(self, count).llval; let header_bb = self.append_sibling_block("repeat_loop_header"); let body_bb = self.append_sibling_block("repeat_loop_body"); @@ -778,14 +778,13 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.switch_to_block(body_bb); let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); - cg_elem.val.store(&mut self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align)); + cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align)); let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]); self.llbb().add_assignment(None, current, next); self.br(header_bb); self.switch_to_block(next_bb); - self } fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) { From 39511672e764f911df5e5ee9de79aa9b992a86c8 Mon Sep 17 00:00:00 2001 From: Chris Denton Date: Sat, 26 Nov 2022 09:54:54 +0000 Subject: [PATCH 045/112] Remove more redundant `all`s --- example/alloc_system.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/example/alloc_system.rs b/example/alloc_system.rs index 89661918d05..fd01fcf1fc8 100644 --- a/example/alloc_system.rs +++ b/example/alloc_system.rs @@ -13,17 +13,17 @@ // The minimum alignment guaranteed by the architecture. This value is used to // add fast paths for low alignment values. -#[cfg(all(any(target_arch = "x86", +#[cfg(any(target_arch = "x86", target_arch = "arm", target_arch = "mips", target_arch = "powerpc", - target_arch = "powerpc64")))] + target_arch = "powerpc64"))] const MIN_ALIGN: usize = 8; -#[cfg(all(any(target_arch = "x86_64", +#[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "mips64", target_arch = "s390x", - target_arch = "sparc64")))] + target_arch = "sparc64"))] const MIN_ALIGN: usize = 16; pub struct System; From 690085c2dab7e28851a0521e9513eafc5acb4e4e Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 28 May 2022 10:43:51 +0000 Subject: [PATCH 046/112] Rewrite LLVM's archive writer in Rust This allows it to be used by other codegen backends --- Cargo.lock | 14 ---- Cargo.toml | 4 -- src/archive.rs | 177 ++----------------------------------------------- src/errors.rs | 16 +---- 4 files changed, 8 insertions(+), 203 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6df2102470f..1cb219e12e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11,12 +11,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "ar" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "450575f58f7bee32816abbff470cbc47797397c2a81e0eaced4b98436daf52e1" - [[package]] name = "bitflags" version = "1.3.2" @@ -212,10 +206,8 @@ dependencies = [ name = "rustc_codegen_gcc" version = "0.1.0" dependencies = [ - "ar", "gccjit", "lang_tester", - "target-lexicon", "tempfile", ] @@ -228,12 +220,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "target-lexicon" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab0e7238dcc7b40a7be719a25365910f6807bd864f4cce6b2e6b873658e2b19d" - [[package]] name = "tempfile" version = "3.2.0" diff --git a/Cargo.toml b/Cargo.toml index 211d19a8dc8..1f3da2f799b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,10 +27,6 @@ gccjit = { git = "https://github.com/antoyo/gccjit.rs" } # Local copy. #gccjit = { path = "../gccjit.rs" } -target-lexicon = "0.10.0" - -ar = "0.8.0" - [dev-dependencies] lang_tester = "0.3.9" tempfile = "3.1.0" diff --git a/src/archive.rs b/src/archive.rs index f18ae7ea5e9..11fa074f5ac 100644 --- a/src/archive.rs +++ b/src/archive.rs @@ -1,44 +1,17 @@ -use std::fs::File; use std::path::{Path, PathBuf}; -use crate::errors::RanlibFailure; - -use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder}; +use rustc_codegen_ssa::back::archive::{ + get_native_object_symbols, ArArchiveBuilder, ArchiveBuilder, ArchiveBuilderBuilder, +}; use rustc_session::Session; use rustc_session::cstore::DllImport; -struct ArchiveConfig<'a> { - sess: &'a Session, - use_native_ar: bool, - use_gnu_style_archive: bool, -} - -#[derive(Debug)] -enum ArchiveEntry { - FromArchive { - archive_index: usize, - entry_index: usize, - }, - File(PathBuf), -} - -pub struct ArArchiveBuilderBuilder; +pub(crate) struct ArArchiveBuilderBuilder; impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder { fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box + 'a> { - let config = ArchiveConfig { - sess, - use_native_ar: false, - // FIXME test for linux and System V derivatives instead - use_gnu_style_archive: sess.target.options.archive_format == "gnu", - }; - - Box::new(ArArchiveBuilder { - config, - src_archives: vec![], - entries: vec![], - }) + Box::new(ArArchiveBuilder::new(sess, get_native_object_symbols)) } fn create_dll_import_lib( @@ -49,144 +22,6 @@ impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder { _tmpdir: &Path, _is_direct_dependency: bool, ) -> PathBuf { - unimplemented!(); - } -} - -pub struct ArArchiveBuilder<'a> { - config: ArchiveConfig<'a>, - src_archives: Vec<(PathBuf, ar::Archive)>, - // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at - // the end of an archive for linkers to not get confused. - entries: Vec<(String, ArchiveEntry)>, -} - -impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { - fn add_file(&mut self, file: &Path) { - self.entries.push(( - file.file_name().unwrap().to_str().unwrap().to_string(), - ArchiveEntry::File(file.to_owned()), - )); - } - - fn add_archive( - &mut self, - archive_path: &Path, - mut skip: Box bool + 'static>, - ) -> std::io::Result<()> { - let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?); - let archive_index = self.src_archives.len(); - - let mut i = 0; - while let Some(entry) = archive.next_entry() { - let entry = entry?; - let file_name = String::from_utf8(entry.header().identifier().to_vec()) - .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?; - if !skip(&file_name) { - self.entries - .push((file_name, ArchiveEntry::FromArchive { archive_index, entry_index: i })); - } - i += 1; - } - - self.src_archives.push((archive_path.to_owned(), archive)); - Ok(()) - } - - fn build(mut self: Box, output: &Path) -> bool { - use std::process::Command; - - fn add_file_using_ar(archive: &Path, file: &Path) { - Command::new("ar") - .arg("r") // add or replace file - .arg("-c") // silence created file message - .arg(archive) - .arg(&file) - .status() - .unwrap(); - } - - enum BuilderKind<'a> { - Bsd(ar::Builder), - Gnu(ar::GnuBuilder), - NativeAr(&'a Path), - } - - let mut builder = if self.config.use_native_ar { - BuilderKind::NativeAr(output) - } else if self.config.use_gnu_style_archive { - BuilderKind::Gnu(ar::GnuBuilder::new( - File::create(output).unwrap(), - self.entries - .iter() - .map(|(name, _)| name.as_bytes().to_vec()) - .collect(), - )) - } else { - BuilderKind::Bsd(ar::Builder::new(File::create(output).unwrap())) - }; - - let any_members = !self.entries.is_empty(); - - // Add all files - for (entry_name, entry) in self.entries.into_iter() { - match entry { - ArchiveEntry::FromArchive { - archive_index, - entry_index, - } => { - let (ref src_archive_path, ref mut src_archive) = - self.src_archives[archive_index]; - let entry = src_archive.jump_to_entry(entry_index).unwrap(); - let header = entry.header().clone(); - - match builder { - BuilderKind::Bsd(ref mut builder) => { - builder.append(&header, entry).unwrap() - } - BuilderKind::Gnu(ref mut builder) => { - builder.append(&header, entry).unwrap() - } - BuilderKind::NativeAr(archive_file) => { - Command::new("ar") - .arg("x") - .arg(src_archive_path) - .arg(&entry_name) - .status() - .unwrap(); - add_file_using_ar(archive_file, Path::new(&entry_name)); - std::fs::remove_file(entry_name).unwrap(); - } - } - } - ArchiveEntry::File(file) => - match builder { - BuilderKind::Bsd(ref mut builder) => { - builder - .append_file(entry_name.as_bytes(), &mut File::open(file).expect("file for bsd builder")) - .unwrap() - }, - BuilderKind::Gnu(ref mut builder) => { - builder - .append_file(entry_name.as_bytes(), &mut File::open(&file).expect(&format!("file {:?} for gnu builder", file))) - .unwrap() - }, - BuilderKind::NativeAr(archive_file) => add_file_using_ar(archive_file, &file), - }, - } - } - - // Finalize archive - std::mem::drop(builder); - - // Run ranlib to be able to link the archive - let status = - std::process::Command::new("ranlib").arg(output).status().expect("Couldn't run ranlib"); - - if !status.success() { - self.config.sess.emit_fatal(RanlibFailure::new(status.code())); - } - - any_members + unimplemented!("creating dll imports is not yet supported"); } } diff --git a/src/errors.rs b/src/errors.rs index 15ad90f9043..89fed7be131 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -16,18 +16,6 @@ impl IntoDiagnosticArg for ExitCode { } } -#[derive(Diagnostic)] -#[diag(codegen_gcc_ranlib_failure)] -pub(crate) struct RanlibFailure { - exit_code: ExitCode, -} - -impl RanlibFailure { - pub fn new(exit_code: Option) -> Self { - RanlibFailure { exit_code: ExitCode(exit_code) } - } -} - #[derive(Diagnostic)] #[diag(codegen_gcc_invalid_monomorphization_basic_integer, code = "E0511")] pub(crate) struct InvalidMonomorphizationBasicInteger<'a> { @@ -227,7 +215,7 @@ pub(crate) struct InvalidMonomorphizationUnsupportedOperation<'a> { #[diag(codegen_gcc_linkage_const_or_mut_type)] pub(crate) struct LinkageConstOrMutType { #[primary_span] - pub span: Span + pub span: Span, } #[derive(Diagnostic)] @@ -238,5 +226,5 @@ pub(crate) struct LTONotSupported; #[diag(codegen_gcc_unwinding_inline_asm)] pub(crate) struct UnwindingInlineAsm { #[primary_span] - pub span: Span + pub span: Span, } From 38dfdee36a19e58c6de61ba4792931596a233a6c Mon Sep 17 00:00:00 2001 From: Maybe Waffle Date: Sun, 27 Nov 2022 11:15:06 +0000 Subject: [PATCH 047/112] Prefer doc comments over `//`-comments in compiler --- src/context.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/context.rs b/src/context.rs index 2e71c3665da..837708aeb0e 100644 --- a/src/context.rs +++ b/src/context.rs @@ -88,9 +88,9 @@ pub struct CodegenCx<'gcc, 'tcx> { pub vtables: RefCell, Option>), RValue<'gcc>>>, // TODO(antoyo): improve the SSA API to not require those. - // Mapping from function pointer type to indexes of on stack parameters. + /// Mapping from function pointer type to indexes of on stack parameters. pub on_stack_params: RefCell, FxHashSet>>, - // Mapping from function to indexes of on stack parameters. + /// Mapping from function to indexes of on stack parameters. pub on_stack_function_params: RefCell, FxHashSet>>, /// Cache of emitted const globals (value -> global) From 213ced1c43afaec00bd7b532e7b0aa25b205336e Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Sat, 3 Dec 2022 18:27:18 +0000 Subject: [PATCH 048/112] Destruct landing_pad return value before passing it to cg_ssa --- src/builder.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 782f6856654..effb2de4827 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1119,18 +1119,18 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { // TODO(antoyo) } - fn cleanup_landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>) -> RValue<'gcc> { - let field1 = self.context.new_field(None, self.u8_type.make_pointer(), "landing_pad_field_1"); - let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1"); - let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]); - self.current_func().new_local(None, struct_type.as_type(), "landing_pad") - .to_rvalue() + fn cleanup_landing_pad(&mut self, _pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) { + ( + self.current_func().new_local(None, self.u8_type.make_pointer(), "landing_pad0") + .to_rvalue(), + self.current_func().new_local(None, self.i32_type, "landing_pad1").to_rvalue(), + ) // TODO(antoyo): Properly implement unwinding. // the above is just to make the compilation work as it seems // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort. } - fn resume(&mut self, _exn: RValue<'gcc>) { + fn resume(&mut self, _exn0: RValue<'gcc>, _exn1: RValue<'gcc>) { // TODO(bjorn3): Properly implement unwinding. self.unreachable(); } From 6181cb2929890000dc63a3b59e60ff62b98770f9 Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Wed, 23 Nov 2022 18:13:30 -0800 Subject: [PATCH 049/112] Move linkage type check to HIR analysis and fix semantics issues. This ensures that the error is printed even for unused variables, as well as unifying the handling between the LLVM and GCC backends. This also fixes unusual behavior around exported Rust-defined variables with linkage attributes. With the previous behavior, it appears to be impossible to define such a variable such that it can actually be imported and used by another crate. This is because on the importing side, the variable is required to be a pointer, but on the exporting side, the type checker rejects static variables of pointer type because they do not implement `Sync`. Even if it were possible to import such a type, it appears that code generation on the importing side would add an unexpected additional level of pointer indirection, which would break type safety. This highlighted that the semantics of linkage on Rust-defined variables is different to linkage on foreign items. As such, we now model the difference with two different codegen attributes: linkage for Rust-defined variables, and import_linkage for foreign items. This change gives semantics to the test src/test/ui/linkage-attr/auxiliary/def_illtyped_external.rs which was previously expected to fail to compile. Therefore, convert it into a test that is expected to successfully compile. The update to the GCC backend is speculative and untested. --- src/consts.rs | 27 ++++++--------------------- src/errors.rs | 7 ------- 2 files changed, 6 insertions(+), 28 deletions(-) diff --git a/src/consts.rs b/src/consts.rs index 111bfeb1322..ea8ab761146 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -8,13 +8,11 @@ use rustc_middle::mir::mono::MonoItem; use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::ty::layout::LayoutOf; use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint}; -use rustc_span::Span; use rustc_span::def_id::DefId; use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRange}; use crate::base; use crate::context::CodegenCx; -use crate::errors::LinkageConstOrMutType; use crate::type_of::LayoutGccExt; impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { @@ -239,12 +237,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } Node::ForeignItem(&hir::ForeignItem { - span, + span: _, kind: hir::ForeignItemKind::Static(..), .. }) => { let fn_attrs = self.tcx.codegen_fn_attrs(def_id); - check_and_apply_linkage(&self, &fn_attrs, ty, sym, span) + check_and_apply_linkage(&self, &fn_attrs, ty, sym) } item => bug!("get_static: expected static, found {:?}", item), @@ -257,8 +255,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { //debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id)); let attrs = self.tcx.codegen_fn_attrs(def_id); - let span = self.tcx.def_span(def_id); - let global = check_and_apply_linkage(&self, &attrs, ty, sym, span); + let global = check_and_apply_linkage(&self, &attrs, ty, sym); let needs_dll_storage_attr = false; // TODO(antoyo) @@ -355,24 +352,12 @@ pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id Ok((const_alloc_to_gcc(cx, alloc), alloc)) } -fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str, span: Span) -> LValue<'gcc> { +fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str) -> LValue<'gcc> { let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); let llty = cx.layout_of(ty).gcc_type(cx, true); - if let Some(linkage) = attrs.linkage { - // If this is a static with a linkage specified, then we need to handle - // it a little specially. The typesystem prevents things like &T and - // extern "C" fn() from being non-null, so we can't just declare a - // static and call it a day. Some linkages (like weak) will make it such - // that the static actually has a null value. - let llty2 = - if let ty::RawPtr(ref mt) = ty.kind() { - cx.layout_of(mt.ty).gcc_type(cx, true) - } - else { - cx.sess().emit_fatal(LinkageConstOrMutType { span: span }) - }; + if let Some(linkage) = attrs.import_linkage { // Declare a symbol `foo` with the desired linkage. - let global1 = cx.declare_global_with_linkage(&sym, llty2, base::global_linkage_to_gcc(linkage)); + let global1 = cx.declare_global_with_linkage(&sym, cx.type_i8(), base::global_linkage_to_gcc(linkage)); // Declare an internal global `extern_with_linkage_foo` which // is initialized with the address of `foo`. If `foo` is diff --git a/src/errors.rs b/src/errors.rs index 89fed7be131..d0ba7e24791 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -211,13 +211,6 @@ pub(crate) struct InvalidMonomorphizationUnsupportedOperation<'a> { pub in_elem: Ty<'a>, } -#[derive(Diagnostic)] -#[diag(codegen_gcc_linkage_const_or_mut_type)] -pub(crate) struct LinkageConstOrMutType { - #[primary_span] - pub span: Span, -} - #[derive(Diagnostic)] #[diag(codegen_gcc_lto_not_supported)] pub(crate) struct LTONotSupported; From d5f7c20ce569481b7b74e0932dea8528fe84509a Mon Sep 17 00:00:00 2001 From: Ramon de C Valle Date: Mon, 21 Nov 2022 21:29:00 -0800 Subject: [PATCH 050/112] Add LLVM KCFI support to the Rust compiler This commit adds LLVM Kernel Control Flow Integrity (KCFI) support to the Rust compiler. It initially provides forward-edge control flow protection for operating systems kernels for Rust-compiled code only by aggregating function pointers in groups identified by their return and parameter types. (See llvm/llvm-project@cff5bef.) Forward-edge control flow protection for C or C++ and Rust -compiled code "mixed binaries" (i.e., for when C or C++ and Rust -compiled code share the same virtual address space) will be provided in later work as part of this project by identifying C char and integer type uses at the time types are encoded (see Type metadata in the design document in the tracking issue #89653). LLVM KCFI can be enabled with -Zsanitizer=kcfi. Co-authored-by: bjorn3 <17426603+bjorn3@users.noreply.github.com> --- src/type_.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/type_.rs b/src/type_.rs index bdf7318ce48..89a415cdb36 100644 --- a/src/type_.rs +++ b/src/type_.rs @@ -300,4 +300,8 @@ impl<'gcc, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'gcc, 'tcx> { // Unsupported. self.context.new_rvalue_from_int(self.int_type, 0) } + + fn set_kcfi_type_metadata(&self, _function: RValue<'gcc>, _kcfi_typeid: u32) { + // Unsupported. + } } From ac4e9caaf5c683b59d3367b42e1cf8c19dff2472 Mon Sep 17 00:00:00 2001 From: Jules Bertholet Date: Fri, 25 Mar 2022 02:44:16 -0400 Subject: [PATCH 051/112] Add `round_ties_even` to `f32` and `f64` --- src/intrinsic/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index 49be6c649e6..35e650c65a0 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -68,6 +68,8 @@ fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> sym::nearbyintf64 => "nearbyint", sym::roundf32 => "roundf", sym::roundf64 => "round", + sym::roundevenf32 => "roundevenf", + sym::roundevenf64 => "roundeven", sym::abort => "abort", _ => return None, }; From b4e9ba5cf3dc785ea1cbb8ff871194729ccb826b Mon Sep 17 00:00:00 2001 From: Jeremy Stucki Date: Tue, 20 Dec 2022 22:10:40 +0100 Subject: [PATCH 052/112] rustc: Remove needless lifetimes --- src/base.rs | 2 +- src/common.rs | 2 +- src/lib.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/base.rs b/src/base.rs index 8f9f6f98faf..d464bd3d12a 100644 --- a/src/base.rs +++ b/src/base.rs @@ -52,7 +52,7 @@ pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType { } } -pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol, supports_128bit_integers: bool) -> (ModuleCodegen, u64) { +pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_integers: bool) -> (ModuleCodegen, u64) { let prof_timer = tcx.prof.generic_activity("codegen_module"); let start_time = Instant::now(); diff --git a/src/common.rs b/src/common.rs index aa1c271c31c..d7df1347121 100644 --- a/src/common.rs +++ b/src/common.rs @@ -44,7 +44,7 @@ pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> context.new_array_constructor(None, typ, &elements) } -pub fn type_is_pointer<'gcc>(typ: Type<'gcc>) -> bool { +pub fn type_is_pointer(typ: Type) -> bool { typ.get_pointee().is_some() } diff --git a/src/lib.rs b/src/lib.rs index b9600da5c39..bf1da38312f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -161,7 +161,7 @@ impl ExtraBackendMethods for GccCodegenBackend { mods } - fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen, u64) { + fn compile_codegen_unit(&self, tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen, u64) { base::compile_codegen_unit(tcx, cgu_name, *self.supports_128bit_integers.lock().expect("lock")) } From 2d09c9bc948031e2ee8661915513a000bdc594ac Mon Sep 17 00:00:00 2001 From: Jeremy Stucki Date: Tue, 20 Dec 2022 22:34:42 +0100 Subject: [PATCH 053/112] Add missing anonymous lifetime --- src/common.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common.rs b/src/common.rs index d7df1347121..0afc56b4494 100644 --- a/src/common.rs +++ b/src/common.rs @@ -44,7 +44,7 @@ pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> context.new_array_constructor(None, typ, &elements) } -pub fn type_is_pointer(typ: Type) -> bool { +pub fn type_is_pointer(typ: Type<'_>) -> bool { typ.get_pointee().is_some() } From 26c010c941975b439ec366d0744061c2221d7c07 Mon Sep 17 00:00:00 2001 From: Michael Goulet Date: Sat, 10 Dec 2022 20:31:01 +0000 Subject: [PATCH 054/112] Simplify some iterator combinators --- src/builder.rs | 2 +- src/context.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index effb2de4827..a92242b2615 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1244,7 +1244,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { ) -> RValue<'gcc> { // FIXME(antoyo): remove when having a proper API. let gcc_func = unsafe { std::mem::transmute(func) }; - let call = if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() { + let call = if self.functions.borrow().values().any(|value| *value == gcc_func) { self.function_call(func, args, funclet) } else { diff --git a/src/context.rs b/src/context.rs index 837708aeb0e..4424b31c054 100644 --- a/src/context.rs +++ b/src/context.rs @@ -253,7 +253,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> { let function: Function<'gcc> = unsafe { std::mem::transmute(value) }; - debug_assert!(self.functions.borrow().values().find(|value| **value == function).is_some(), + debug_assert!(self.functions.borrow().values().any(|value| *value == function), "{:?} ({:?}) is not a function", value, value.get_type()); function } From 2671f378bcb846c4183abf783cd308ea4d7e4435 Mon Sep 17 00:00:00 2001 From: Albert Larsan <74931857+albertlarsan68@users.noreply.github.com> Date: Thu, 5 Jan 2023 09:45:44 +0100 Subject: [PATCH 055/112] Change `src/test` to `tests` in source files, fix tidy and tests --- test.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test.sh b/test.sh index 8b390f95a4b..c5ffb763673 100755 --- a/test.sh +++ b/test.sh @@ -253,25 +253,25 @@ rustc = "$HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE/bin/rustc" EOF rustc -V | cut -d' ' -f3 | tr -d '(' - git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') src/test + git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') tests - for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do + for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" tests/ui); do rm $test done - git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed + git checkout -- tests/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed - rm -r src/test/ui/{abi*,extern/,panic-runtime/,panics/,unsized-locals/,proc-macro/,threads-sendsync/,thinlto/,borrowck/,test*,*lto*.rs} || true - for test in $(rg --files-with-matches "catch_unwind|should_panic|thread|lto" src/test/ui); do + rm -r tests/ui/{abi*,extern/,panic-runtime/,panics/,unsized-locals/,proc-macro/,threads-sendsync/,thinlto/,borrowck/,test*,*lto*.rs} || true + for test in $(rg --files-with-matches "catch_unwind|should_panic|thread|lto" tests/ui); do rm $test done - git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs - git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs + git checkout tests/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs + git checkout tests/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs RUSTC_ARGS="-Zpanic-abort-tests -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort" echo "[TEST] rustc test suite" - COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 src/test/ui/ --rustc-args "$RUSTC_ARGS" + COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 tests/ui/ --rustc-args "$RUSTC_ARGS" } function clean_ui_tests() { From 559b57e50a904e61ee43124f5032e1a03a5f2506 Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Sun, 22 Jan 2023 23:03:58 -0500 Subject: [PATCH 056/112] abi: add `AddressSpace` field to `Primitive::Pointer` ...and remove it from `PointeeInfo`, which isn't meant for this. There are still various places (marked with FIXMEs) that assume all pointers have the same size and alignment. Fixing this requires parsing non-default address spaces in the data layout string, which will be done in a followup. --- src/builder.rs | 2 +- src/common.rs | 2 +- src/consts.rs | 16 ++++++++++++---- src/type_of.rs | 4 ++-- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index a92242b2615..e88c12716ec 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -709,7 +709,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { bx.range_metadata(load, vr); } } - abi::Pointer if vr.start < vr.end && !vr.contains(0) => { + abi::Pointer(_) if vr.start < vr.end && !vr.contains(0) => { bx.nonnull_metadata(load); } _ => {} diff --git a/src/common.rs b/src/common.rs index 0afc56b4494..c939da9cec3 100644 --- a/src/common.rs +++ b/src/common.rs @@ -211,7 +211,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { let base_addr = self.const_bitcast(base_addr, self.usize_type); let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64); let ptr = self.const_bitcast(base_addr + offset, ptr_type); - if layout.primitive() != Pointer { + if !matches!(layout.primitive(), Pointer(_)) { self.const_bitcast(ptr.dereference(None).to_rvalue(), ty) } else { diff --git a/src/consts.rs b/src/consts.rs index ea8ab761146..ba64b1f6389 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -7,9 +7,9 @@ use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs} use rustc_middle::mir::mono::MonoItem; use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::ty::layout::LayoutOf; -use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint}; +use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, GlobalAlloc, Scalar as InterpScalar, read_target_uint}; use rustc_span::def_id::DefId; -use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRange}; +use rustc_target::abi::{self, AddressSpace, Align, HasDataLayout, Primitive, Size, WrappingRange}; use crate::base; use crate::context::CodegenCx; @@ -322,13 +322,21 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl ) .expect("const_alloc_to_llvm: could not read relocation pointer") as u64; + + let address_space = match cx.tcx.global_alloc(alloc_id) { + GlobalAlloc::Function(..) => cx.data_layout().instruction_address_space, + GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) | GlobalAlloc::VTable(..) => { + AddressSpace::DATA + } + }; + llvals.push(cx.scalar_to_backend( InterpScalar::from_pointer( interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), &cx.tcx, ), - abi::Scalar::Initialized { value: Primitive::Pointer, valid_range: WrappingRange::full(dl.pointer_size) }, - cx.type_i8p(), + abi::Scalar::Initialized { value: Primitive::Pointer(address_space), valid_range: WrappingRange::full(dl.pointer_size) }, + cx.type_i8p_ext(address_space), )); next_offset = offset + pointer_size; } diff --git a/src/type_of.rs b/src/type_of.rs index 524d10fb5e2..1326af670cd 100644 --- a/src/type_of.rs +++ b/src/type_of.rs @@ -253,7 +253,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { Int(i, false) => cx.type_from_unsigned_integer(i), F32 => cx.type_f32(), F64 => cx.type_f64(), - Pointer => { + Pointer(address_space) => { // If we know the alignment, pick something better than i8. let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { @@ -262,7 +262,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { else { cx.type_i8() }; - cx.type_ptr_to(pointee) + cx.type_ptr_to_ext(pointee, address_space) } } } From 01241f8888d9ca82446179efd8a4fef71bb7bd40 Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Wed, 25 Jan 2023 01:46:19 -0500 Subject: [PATCH 057/112] create and use GlobalAlloc::address_space --- src/consts.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/consts.rs b/src/consts.rs index ba64b1f6389..dc41cb761b5 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -7,9 +7,9 @@ use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs} use rustc_middle::mir::mono::MonoItem; use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::ty::layout::LayoutOf; -use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, GlobalAlloc, Scalar as InterpScalar, read_target_uint}; +use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint}; use rustc_span::def_id::DefId; -use rustc_target::abi::{self, AddressSpace, Align, HasDataLayout, Primitive, Size, WrappingRange}; +use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRange}; use crate::base; use crate::context::CodegenCx; @@ -323,12 +323,7 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl .expect("const_alloc_to_llvm: could not read relocation pointer") as u64; - let address_space = match cx.tcx.global_alloc(alloc_id) { - GlobalAlloc::Function(..) => cx.data_layout().instruction_address_space, - GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) | GlobalAlloc::VTable(..) => { - AddressSpace::DATA - } - }; + let address_space = cx.tcx.global_alloc(alloc_id).address_space(cx); llvals.push(cx.scalar_to_backend( InterpScalar::from_pointer( From 9c6f7cb3c7fc81aba3fd22d8ad39faaee8168a9c Mon Sep 17 00:00:00 2001 From: David Wood Date: Fri, 19 Aug 2022 14:48:15 +0100 Subject: [PATCH 058/112] session: diagnostic migration lint on more fns Apply the diagnostic migration lint to more functions on `Session`. Signed-off-by: David Wood --- src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/lib.rs b/src/lib.rs index bf1da38312f..5ab87feb98b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -200,6 +200,7 @@ unsafe impl Sync for GccContext {} impl WriteBackendMethods for GccCodegenBackend { type Module = GccContext; type TargetMachine = (); + type TargetMachineError = (); type ModuleBuffer = ModuleBuffer; type ThinData = (); type ThinBuffer = ThinBuffer; From 4238ce2195439ee080323cdd4e4906dfe3c3a6a6 Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Tue, 14 Feb 2023 08:51:19 +0000 Subject: [PATCH 059/112] s/eval_usize/eval_target_usize/ for clarity --- src/intrinsic/simd.rs | 407 ++++++++++++++++++++++++------------------ 1 file changed, 233 insertions(+), 174 deletions(-) diff --git a/src/intrinsic/simd.rs b/src/intrinsic/simd.rs index 12e416f62a4..cb8168b4071 100644 --- a/src/intrinsic/simd.rs +++ b/src/intrinsic/simd.rs @@ -1,6 +1,6 @@ use std::cmp::Ordering; -use gccjit::{BinaryOp, RValue, Type, ToRValue}; +use gccjit::{BinaryOp, RValue, ToRValue, Type}; use rustc_codegen_ssa::base::compare_simd_types; use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::mir::operand::OperandRef; @@ -10,52 +10,57 @@ use rustc_hir as hir; use rustc_middle::span_bug; use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::{self, Ty}; -use rustc_span::{Span, Symbol, sym}; +use rustc_span::{sym, Span, Symbol}; use rustc_target::abi::Align; use crate::builder::Builder; use crate::errors::{ - InvalidMonomorphizationInvalidFloatVector, - InvalidMonomorphizationNotFloat, - InvalidMonomorphizationUnrecognized, - InvalidMonomorphizationExpectedSignedUnsigned, - InvalidMonomorphizationUnsupportedElement, - InvalidMonomorphizationInvalidBitmask, - InvalidMonomorphizationSimdShuffle, - InvalidMonomorphizationExpectedSimd, - InvalidMonomorphizationMaskType, - InvalidMonomorphizationReturnLength, - InvalidMonomorphizationReturnLengthInputType, - InvalidMonomorphizationReturnElement, - InvalidMonomorphizationReturnType, - InvalidMonomorphizationInsertedType, - InvalidMonomorphizationReturnIntegerType, - InvalidMonomorphizationMismatchedLengths, - InvalidMonomorphizationUnsupportedCast, - InvalidMonomorphizationUnsupportedOperation + InvalidMonomorphizationExpectedSignedUnsigned, InvalidMonomorphizationExpectedSimd, + InvalidMonomorphizationInsertedType, InvalidMonomorphizationInvalidBitmask, + InvalidMonomorphizationInvalidFloatVector, InvalidMonomorphizationMaskType, + InvalidMonomorphizationMismatchedLengths, InvalidMonomorphizationNotFloat, + InvalidMonomorphizationReturnElement, InvalidMonomorphizationReturnIntegerType, + InvalidMonomorphizationReturnLength, InvalidMonomorphizationReturnLengthInputType, + InvalidMonomorphizationReturnType, InvalidMonomorphizationSimdShuffle, + InvalidMonomorphizationUnrecognized, InvalidMonomorphizationUnsupportedCast, + InvalidMonomorphizationUnsupportedElement, InvalidMonomorphizationUnsupportedOperation, }; use crate::intrinsic; -pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result, ()> { +pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( + bx: &mut Builder<'a, 'gcc, 'tcx>, + name: Symbol, + callee_ty: Ty<'tcx>, + args: &[OperandRef<'tcx, RValue<'gcc>>], + ret_ty: Ty<'tcx>, + llret_ty: Type<'gcc>, + span: Span, +) -> Result, ()> { // macros for error handling: macro_rules! return_error { - ($err:expr) => { - { - bx.sess().emit_err($err); - return Err(()); - } - } + ($err:expr) => {{ + bx.sess().emit_err($err); + return Err(()); + }}; } macro_rules! require { ($cond:expr, $err:expr) => { if !$cond { return_error!($err); } - } + }; } macro_rules! require_simd { ($ty: expr, $position: expr) => { - require!($ty.is_simd(), InvalidMonomorphizationExpectedSimd { span, name, position: $position, found_ty: $ty }) + require!( + $ty.is_simd(), + InvalidMonomorphizationExpectedSimd { + span, + name, + position: $position, + found_ty: $ty + } + ) }; } @@ -77,7 +82,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), ty::Array(elem, len) if matches!(elem.kind(), ty::Uint(ty::UintTy::U8)) - && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all()) + && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all()) == Some(expected_bytes) => { let place = PlaceRef::alloca(bx, args[0].layout); @@ -86,9 +91,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty)); bx.load(int_ty, ptr, Align::ONE) } - _ => return_error!( - InvalidMonomorphizationInvalidBitmask { span, name, ty: mask_ty, expected_int_bits, expected_bytes } - ), + _ => return_error!(InvalidMonomorphizationInvalidBitmask { + span, + name, + ty: mask_ty, + expected_int_bits, + expected_bytes + }), }; let arg1 = args[1].immediate(); @@ -129,11 +138,18 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); require!( in_len == out_len, - InvalidMonomorphizationReturnLengthInputType { span, name, in_len, in_ty, ret_ty, out_len } + InvalidMonomorphizationReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + } ); require!( bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer, - InvalidMonomorphizationReturnIntegerType {span, name, ret_ty, out_ty} + InvalidMonomorphizationReturnIntegerType { span, name, ret_ty, out_ty } ); return Ok(compare_simd_types( @@ -147,26 +163,26 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, } if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") { - let n: u64 = - if stripped.is_empty() { - // Make sure this is actually an array, since typeck only checks the length-suffixed - // version of this intrinsic. - match args[2].layout.ty.kind() { - ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => { - len.try_eval_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| { - span_bug!(span, "could not evaluate shuffle index array length") - }) - } - _ => return_error!( - InvalidMonomorphizationSimdShuffle { span, name, ty: args[2].layout.ty } - ), + let n: u64 = if stripped.is_empty() { + // Make sure this is actually an array, since typeck only checks the length-suffixed + // version of this intrinsic. + match args[2].layout.ty.kind() { + ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => { + len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else( + || span_bug!(span, "could not evaluate shuffle index array length"), + ) } + _ => return_error!(InvalidMonomorphizationSimdShuffle { + span, + name, + ty: args[2].layout.ty + }), } - else { - stripped.parse().unwrap_or_else(|_| { - span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?") - }) - }; + } else { + stripped.parse().unwrap_or_else(|_| { + span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?") + }) + }; require_simd!(ret_ty, "return"); @@ -182,14 +198,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let vector = args[2].immediate(); - return Ok(bx.shuffle_vector( - args[0].immediate(), - args[1].immediate(), - vector, - )); + return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), vector)); } - #[cfg(feature="master")] + #[cfg(feature = "master")] if name == sym::simd_insert { require!( in_elem == arg_tys[2], @@ -205,44 +217,44 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, // not be an l-value. So, call a builtin to set the element. // TODO(antoyo): perhaps we could create a new vector or maybe there's a GIMPLE instruction for that? // TODO(antoyo): don't use target specific builtins here. - let func_name = - match in_len { - 2 => { - if element_type == bx.i64_type { - "__builtin_ia32_vec_set_v2di" - } - else { - unimplemented!(); - } - }, - 4 => { - if element_type == bx.i32_type { - "__builtin_ia32_vec_set_v4si" - } - else { - unimplemented!(); - } - }, - 8 => { - if element_type == bx.i16_type { - "__builtin_ia32_vec_set_v8hi" - } - else { - unimplemented!(); - } - }, - _ => unimplemented!("Len: {}", in_len), - }; + let func_name = match in_len { + 2 => { + if element_type == bx.i64_type { + "__builtin_ia32_vec_set_v2di" + } else { + unimplemented!(); + } + } + 4 => { + if element_type == bx.i32_type { + "__builtin_ia32_vec_set_v4si" + } else { + unimplemented!(); + } + } + 8 => { + if element_type == bx.i16_type { + "__builtin_ia32_vec_set_v8hi" + } else { + unimplemented!(); + } + } + _ => unimplemented!("Len: {}", in_len), + }; let builtin = bx.context.get_target_builtin_function(func_name); let param1_type = builtin.get_param(0).to_rvalue().get_type(); // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. let vector = bx.cx.bitcast_if_needed(vector, param1_type); - let result = bx.context.new_call(None, builtin, &[vector, value, bx.context.new_cast(None, index, bx.int_type)]); + let result = bx.context.new_call( + None, + builtin, + &[vector, value, bx.context.new_cast(None, index, bx.int_type)], + ); // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. return Ok(bx.context.new_bitcast(None, result, vector.get_type())); } - #[cfg(feature="master")] + #[cfg(feature = "master")] if name == sym::simd_extract { require!( ret_ty == in_elem, @@ -273,7 +285,14 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); require!( in_len == out_len, - InvalidMonomorphizationReturnLengthInputType { span, name, in_len, in_ty, ret_ty, out_len } + InvalidMonomorphizationReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + } ); // casting cares about nominal type, not just structural type if in_elem == out_elem { @@ -322,19 +341,27 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let cast_vec_element = |index| { let index = bx.context.new_rvalue_from_int(bx.int_type, index); - bx.context.new_cast(None, bx.context.new_array_access(None, array, index).to_rvalue(), out_type) + bx.context.new_cast( + None, + bx.context.new_array_access(None, array, index).to_rvalue(), + out_type, + ) }; - bx.context.new_rvalue_from_vector(None, vector_type, &[ - cast_vec_element(0), - cast_vec_element(1), - cast_vec_element(2), - cast_vec_element(3), - cast_vec_element(4), - cast_vec_element(5), - cast_vec_element(6), - cast_vec_element(7), - ]) + bx.context.new_rvalue_from_vector( + None, + vector_type, + &[ + cast_vec_element(0), + cast_vec_element(1), + cast_vec_element(2), + cast_vec_element(3), + cast_vec_element(4), + cast_vec_element(5), + cast_vec_element(6), + cast_vec_element(7), + ], + ) }; match (in_style, out_style) { @@ -385,9 +412,14 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, } _ => { /* Unsupported. Fallthrough. */ } } - return_error!( - InvalidMonomorphizationUnsupportedCast { span, name, in_ty, in_elem, ret_ty, out_elem } - ); + return_error!(InvalidMonomorphizationUnsupportedCast { + span, + name, + in_ty, + in_elem, + ret_ty, + out_elem + }); } macro_rules! arith_binary { @@ -414,54 +446,60 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ) -> Result, ()> { macro_rules! return_error { - ($err:expr) => { - { - bx.sess().emit_err($err); - return Err(()); - } - } + ($err:expr) => {{ + bx.sess().emit_err($err); + return Err(()); + }}; } - let (elem_ty_str, elem_ty) = - if let ty::Float(f) = in_elem.kind() { - let elem_ty = bx.cx.type_float_from_ty(*f); - match f.bit_width() { - 32 => ("f32", elem_ty), - 64 => ("f64", elem_ty), - _ => { - return_error!(InvalidMonomorphizationInvalidFloatVector { span, name, elem_ty: f.name_str(), vec_ty: in_ty }); - } + let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() { + let elem_ty = bx.cx.type_float_from_ty(*f); + match f.bit_width() { + 32 => ("f32", elem_ty), + 64 => ("f64", elem_ty), + _ => { + return_error!(InvalidMonomorphizationInvalidFloatVector { + span, + name, + elem_ty: f.name_str(), + vec_ty: in_ty + }); } } - else { - return_error!(InvalidMonomorphizationNotFloat { span, name, ty: in_ty }); - }; + } else { + return_error!(InvalidMonomorphizationNotFloat { span, name, ty: in_ty }); + }; let vec_ty = bx.cx.type_vector(elem_ty, in_len); - let (intr_name, fn_ty) = - match name { - sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)), // TODO(antoyo): pand with 170141183420855150465331762880109871103 - sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)), - sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)), - sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)), - sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)), - _ => return_error!(InvalidMonomorphizationUnrecognized { span, name }) - }; + let (intr_name, fn_ty) = match name { + sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)), // TODO(antoyo): pand with 170141183420855150465331762880109871103 + sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)), + sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)), + sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)), + sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)), + _ => return_error!(InvalidMonomorphizationUnrecognized { span, name }), + }; let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); let function = intrinsic::llvm::intrinsic(llvm_name, &bx.cx); let function: RValue<'gcc> = unsafe { std::mem::transmute(function) }; - let c = bx.call(fn_ty, None, function, &args.iter().map(|arg| arg.immediate()).collect::>(), None); + let c = bx.call( + fn_ty, + None, + function, + &args.iter().map(|arg| arg.immediate()).collect::>(), + None, + ); Ok(c) } @@ -518,7 +556,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, simd_neg: Int => neg, Float => fneg; } - #[cfg(feature="master")] + #[cfg(feature = "master")] if name == sym::simd_saturating_add || name == sym::simd_saturating_sub { let lhs = args[0].immediate(); let rhs = args[1].immediate(); @@ -536,18 +574,23 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, }); } }; - let builtin_name = - match (signed, is_add, in_len, elem_width) { - (true, true, 32, 8) => "__builtin_ia32_paddsb256", // TODO(antoyo): cast arguments to unsigned. - (false, true, 32, 8) => "__builtin_ia32_paddusb256", - (true, true, 16, 16) => "__builtin_ia32_paddsw256", - (false, true, 16, 16) => "__builtin_ia32_paddusw256", - (true, false, 16, 16) => "__builtin_ia32_psubsw256", - (false, false, 16, 16) => "__builtin_ia32_psubusw256", - (true, false, 32, 8) => "__builtin_ia32_psubsb256", - (false, false, 32, 8) => "__builtin_ia32_psubusb256", - _ => unimplemented!("signed: {}, is_add: {}, in_len: {}, elem_width: {}", signed, is_add, in_len, elem_width), - }; + let builtin_name = match (signed, is_add, in_len, elem_width) { + (true, true, 32, 8) => "__builtin_ia32_paddsb256", // TODO(antoyo): cast arguments to unsigned. + (false, true, 32, 8) => "__builtin_ia32_paddusb256", + (true, true, 16, 16) => "__builtin_ia32_paddsw256", + (false, true, 16, 16) => "__builtin_ia32_paddusw256", + (true, false, 16, 16) => "__builtin_ia32_psubsw256", + (false, false, 16, 16) => "__builtin_ia32_psubusw256", + (true, false, 32, 8) => "__builtin_ia32_psubsb256", + (false, false, 32, 8) => "__builtin_ia32_psubusb256", + _ => unimplemented!( + "signed: {}, is_add: {}, in_len: {}, elem_width: {}", + signed, + is_add, + in_len, + elem_width + ), + }; let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64); let func = bx.context.get_target_builtin_function(builtin_name); @@ -575,8 +618,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, // if overflow occurs, the result is the // mathematical result modulo 2^n: Ok(bx.$op(args[1].immediate(), r)) - } - else { + } else { Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op)) } } @@ -585,12 +627,17 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, // ordered arithmetic reductions take an accumulator let acc = args[1].immediate(); Ok(bx.$float_reduce(acc, args[0].immediate())) - } - else { + } else { Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op)) } } - _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }), + _ => return_error!(InvalidMonomorphizationUnsupportedElement { + span, + name, + in_ty, + elem_ty: in_elem, + ret_ty + }), }; } }; @@ -603,13 +650,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, add, 0.0 // TODO: Use this argument. ); - arith_red!( - simd_reduce_mul_unordered: BinaryOp::Mult, - vector_reduce_fmul_fast, - false, - mul, - 1.0 - ); + arith_red!(simd_reduce_mul_unordered: BinaryOp::Mult, vector_reduce_fmul_fast, false, mul, 1.0); macro_rules! minmax_red { ($name:ident: $reduction:ident) => { @@ -619,8 +660,16 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } ); return match in_elem.kind() { - ty::Int(_) | ty::Uint(_) | ty::Float(_) => Ok(bx.$reduction(args[0].immediate())), - _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }), + ty::Int(_) | ty::Uint(_) | ty::Float(_) => { + Ok(bx.$reduction(args[0].immediate())) + } + _ => return_error!(InvalidMonomorphizationUnsupportedElement { + span, + name, + in_ty, + elem_ty: in_elem, + ret_ty + }), }; } }; @@ -641,7 +690,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, } else { match in_elem.kind() { ty::Int(_) | ty::Uint(_) => {} - _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }), + _ => return_error!(InvalidMonomorphizationUnsupportedElement { + span, + name, + in_ty, + elem_ty: in_elem, + ret_ty + }), } // boolean reductions operate on vectors of i1s: @@ -654,9 +709,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let r = bx.vector_reduce_op(input, $op); Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) }) } - _ => return_error!( - InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty } - ), + _ => return_error!(InvalidMonomorphizationUnsupportedElement { + span, + name, + in_ty, + elem_ty: in_elem, + ret_ty + }), }; } }; From 3e14bba40c78dec57fc87ff07581b11daf530b0c Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 13 Oct 2022 10:13:02 +0100 Subject: [PATCH 060/112] errors: generate typed identifiers in each crate Instead of loading the Fluent resources for every crate in `rustc_error_messages`, each crate generates typed identifiers for its own diagnostics and creates a static which are pulled together in the `rustc_driver` crate and provided to the diagnostic emitter. Signed-off-by: David Wood --- locales/en-US.ftl | 62 +++++++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 5 +++- 2 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 locales/en-US.ftl diff --git a/locales/en-US.ftl b/locales/en-US.ftl new file mode 100644 index 00000000000..6101b28ab0c --- /dev/null +++ b/locales/en-US.ftl @@ -0,0 +1,62 @@ +codegen_gcc_unwinding_inline_asm = + GCC backend does not support unwinding from inline asm + +codegen_gcc_lto_not_supported = + LTO is not supported. You may get a linker error. + +codegen_gcc_invalid_monomorphization_basic_integer = + invalid monomorphization of `{$name}` intrinsic: expected basic integer type, found `{$ty}` + +codegen_gcc_invalid_monomorphization_invalid_float_vector = + invalid monomorphization of `{$name}` intrinsic: unsupported element type `{$elem_ty}` of floating-point vector `{$vec_ty}` + +codegen_gcc_invalid_monomorphization_not_float = + invalid monomorphization of `{$name}` intrinsic: `{$ty}` is not a floating-point type + +codegen_gcc_invalid_monomorphization_unrecognized = + invalid monomorphization of `{$name}` intrinsic: unrecognized intrinsic `{$name}` + +codegen_gcc_invalid_monomorphization_expected_signed_unsigned = + invalid monomorphization of `{$name}` intrinsic: expected element type `{$elem_ty}` of vector type `{$vec_ty}` to be a signed or unsigned integer type + +codegen_gcc_invalid_monomorphization_unsupported_element = + invalid monomorphization of `{$name}` intrinsic: unsupported {$name} from `{$in_ty}` with element `{$elem_ty}` to `{$ret_ty}` + +codegen_gcc_invalid_monomorphization_invalid_bitmask = + invalid monomorphization of `{$name}` intrinsic: invalid bitmask `{$ty}`, expected `u{$expected_int_bits}` or `[u8; {$expected_bytes}]` + +codegen_gcc_invalid_monomorphization_simd_shuffle = + invalid monomorphization of `{$name}` intrinsic: simd_shuffle index must be an array of `u32`, got `{$ty}` + +codegen_gcc_invalid_monomorphization_expected_simd = + invalid monomorphization of `{$name}` intrinsic: expected SIMD {$expected_ty} type, found non-SIMD `{$found_ty}` + +codegen_gcc_invalid_monomorphization_mask_type = + invalid monomorphization of `{$name}` intrinsic: mask element type is `{$ty}`, expected `i_` + +codegen_gcc_invalid_monomorphization_return_length = + invalid monomorphization of `{$name}` intrinsic: expected return type of length {$in_len}, found `{$ret_ty}` with length {$out_len} + +codegen_gcc_invalid_monomorphization_return_length_input_type = + invalid monomorphization of `{$name}` intrinsic: expected return type with length {$in_len} (same as input type `{$in_ty}`), found `{$ret_ty}` with length {$out_len} + +codegen_gcc_invalid_monomorphization_return_element = + invalid monomorphization of `{$name}` intrinsic: expected return element type `{$in_elem}` (element of input `{$in_ty}`), found `{$ret_ty}` with element type `{$out_ty}` + +codegen_gcc_invalid_monomorphization_return_type = + invalid monomorphization of `{$name}` intrinsic: expected return type `{$in_elem}` (element of input `{$in_ty}`), found `{$ret_ty}` + +codegen_gcc_invalid_monomorphization_inserted_type = + invalid monomorphization of `{$name}` intrinsic: expected inserted type `{$in_elem}` (element of input `{$in_ty}`), found `{$out_ty}` + +codegen_gcc_invalid_monomorphization_return_integer_type = + invalid monomorphization of `{$name}` intrinsic: expected return type with integer elements, found `{$ret_ty}` with non-integer `{$out_ty}` + +codegen_gcc_invalid_monomorphization_mismatched_lengths = + invalid monomorphization of `{$name}` intrinsic: mismatched lengths: mask length `{$m_len}` != other vector length `{$v_len}` + +codegen_gcc_invalid_monomorphization_unsupported_cast = + invalid monomorphization of `{$name}` intrinsic: unsupported cast from `{$in_ty}` with element `{$in_elem}` to `{$ret_ty}` with element `{$out_elem}` + +codegen_gcc_invalid_monomorphization_unsupported_operation = + invalid monomorphization of `{$name}` intrinsic: unsupported operation on `{$in_ty}` with element `{$in_elem}` diff --git a/src/lib.rs b/src/lib.rs index 5ab87feb98b..546c892aae5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -73,7 +73,8 @@ use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModul use rustc_codegen_ssa::target_features::supported_target_features; use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods}; use rustc_data_structures::fx::FxHashMap; -use rustc_errors::{ErrorGuaranteed, Handler}; +use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, SubdiagnosticMessage}; +use rustc_macros::fluent_messages; use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::ty::TyCtxt; @@ -84,6 +85,8 @@ use rustc_span::Symbol; use rustc_span::fatal_error::FatalError; use tempfile::TempDir; +fluent_messages! { "../locales/en-US.ftl" } + pub struct PrintOnPanic String>(pub F); impl String> Drop for PrintOnPanic { From 984945a1176fef84d45eb9ff0a59dbdfd010890f Mon Sep 17 00:00:00 2001 From: David Wood Date: Mon, 17 Oct 2022 14:11:26 +0100 Subject: [PATCH 061/112] various: translation resources from cg backend Extend `CodegenBackend` trait with a function returning the translation resources from the codegen backend, which can be added to the complete list of resources provided to the emitter. Signed-off-by: David Wood --- src/lib.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/lib.rs b/src/lib.rs index 546c892aae5..44538b41528 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -103,6 +103,10 @@ pub struct GccCodegenBackend { } impl CodegenBackend for GccCodegenBackend { + fn locale_resource(&self) -> &'static str { + crate::DEFAULT_LOCALE_RESOURCE + } + fn init(&self, sess: &Session) { if sess.lto() != Lto::No { sess.emit_warning(LTONotSupported {}); From aefda5786e92a38c4bb1da0cc8abf0d2e0ee509b Mon Sep 17 00:00:00 2001 From: Alan Egerton Date: Wed, 22 Feb 2023 02:18:40 +0000 Subject: [PATCH 062/112] Remove type-traversal trait aliases --- src/callee.rs | 2 +- src/mono_item.rs | 2 +- src/type_of.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/callee.rs b/src/callee.rs index c1041125ecc..9e3a22ee05d 100644 --- a/src/callee.rs +++ b/src/callee.rs @@ -1,6 +1,6 @@ use gccjit::{FunctionType, RValue}; use rustc_codegen_ssa::traits::BaseTypeMethods; -use rustc_middle::ty::{self, Instance, TypeVisitable}; +use rustc_middle::ty::{self, Instance, TypeVisitableExt}; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use crate::abi::FnAbiGccExt; diff --git a/src/mono_item.rs b/src/mono_item.rs index 9468a1ef4bb..a7c868354fb 100644 --- a/src/mono_item.rs +++ b/src/mono_item.rs @@ -1,7 +1,7 @@ use rustc_codegen_ssa::traits::PreDefineMethods; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::mono::{Linkage, Visibility}; -use rustc_middle::ty::{self, Instance, TypeVisitable}; +use rustc_middle::ty::{self, Instance, TypeVisitableExt}; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf}; use rustc_span::def_id::DefId; diff --git a/src/type_of.rs b/src/type_of.rs index 1326af670cd..ea2ce765053 100644 --- a/src/type_of.rs +++ b/src/type_of.rs @@ -3,7 +3,7 @@ use std::fmt::Write; use gccjit::{Struct, Type}; use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods}; use rustc_middle::bug; -use rustc_middle::ty::{self, Ty, TypeVisitable}; +use rustc_middle::ty::{self, Ty, TypeVisitableExt}; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; From 5b0dbc47d7645b6ed12a4a81ab4daf85d1fa3a0f Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Fri, 17 Feb 2023 14:33:08 +1100 Subject: [PATCH 063/112] Rename many interner functions. (This is a large commit. The changes to `compiler/rustc_middle/src/ty/context.rs` are the most important ones.) The current naming scheme is a mess, with a mix of `_intern_`, `intern_` and `mk_` prefixes, with little consistency. In particular, in many cases it's easy to use an iterator interner when a (preferable) slice interner is available. The guiding principles of the new naming system: - No `_intern_` prefixes. - The `intern_` prefix is for internal operations. - The `mk_` prefix is for external operations. - For cases where there is a slice interner and an iterator interner, the former is `mk_foo` and the latter is `mk_foo_from_iter`. Also, `slice_interners!` and `direct_interners!` can now be `pub` or non-`pub`, which helps enforce the internal/external operations division. It's not perfect, but I think it's a clear improvement. The following lists show everything that was renamed. slice_interners - const_list - mk_const_list -> mk_const_list_from_iter - intern_const_list -> mk_const_list - substs - mk_substs -> mk_substs_from_iter - intern_substs -> mk_substs - check_substs -> check_and_mk_substs (this is a weird one) - canonical_var_infos - intern_canonical_var_infos -> mk_canonical_var_infos - poly_existential_predicates - mk_poly_existential_predicates -> mk_poly_existential_predicates_from_iter - intern_poly_existential_predicates -> mk_poly_existential_predicates - _intern_poly_existential_predicates -> intern_poly_existential_predicates - predicates - mk_predicates -> mk_predicates_from_iter - intern_predicates -> mk_predicates - _intern_predicates -> intern_predicates - projs - intern_projs -> mk_projs - place_elems - mk_place_elems -> mk_place_elems_from_iter - intern_place_elems -> mk_place_elems - bound_variable_kinds - mk_bound_variable_kinds -> mk_bound_variable_kinds_from_iter - intern_bound_variable_kinds -> mk_bound_variable_kinds direct_interners - region - intern_region (unchanged) - const - mk_const_internal -> intern_const - const_allocation - intern_const_alloc -> mk_const_alloc - layout - intern_layout -> mk_layout - adt_def - intern_adt_def -> mk_adt_def_from_data (unusual case, hard to avoid) - alloc_adt_def(!) -> mk_adt_def - external_constraints - intern_external_constraints -> mk_external_constraints Other - type_list - mk_type_list -> mk_type_list_from_iter - intern_type_list -> mk_type_list - tup - mk_tup -> mk_tup_from_iter - intern_tup -> mk_tup --- src/context.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/context.rs b/src/context.rs index 4424b31c054..fc746fbd599 100644 --- a/src/context.rs +++ b/src/context.rs @@ -383,7 +383,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { tcx, ty::ParamEnv::reveal_all(), def_id, - tcx.intern_substs(&[]), + tcx.mk_substs(&[]), ) .unwrap().unwrap(), ), From ea83d6f4d83c53a7da67e995efbaba6ca394903a Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Mon, 20 Feb 2023 14:52:23 +1100 Subject: [PATCH 064/112] Use `List::empty()` instead of `mk_substs(&[])`. --- src/context.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/context.rs b/src/context.rs index fc746fbd599..457006319af 100644 --- a/src/context.rs +++ b/src/context.rs @@ -383,7 +383,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { tcx, ty::ParamEnv::reveal_all(), def_id, - tcx.mk_substs(&[]), + ty::List::empty(), ) .unwrap().unwrap(), ), From d725cfb6abd04691a3e5c91e4f2c0e3860561deb Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 5 Mar 2023 12:03:19 -0500 Subject: [PATCH 065/112] Merge commit '08a6d6e16b5efe217123e780398969946266268f' into sync-cg_gcc-2023-03-04 --- .github/workflows/ci.yml | 111 +- .github/workflows/release.yml | 111 + .github/workflows/stdarch.yml | 116 + Cargo.lock | 7 + Cargo.toml | 2 + Readme.md | 92 +- build_sysroot/build_sysroot.sh | 2 +- config.sh | 2 +- example/alloc_example.rs | 20 +- example/mini_core.rs | 68 +- example/mini_core_hello_world.rs | 2 + example/mod_bench.rs | 6 +- example/std_example.rs | 1 + failing-ui-tests.txt | 68 + failing-ui-tests12.txt | 39 + locales/en-US.ftl | 3 + ...1-Add-stdarch-Cargo.toml-for-testing.patch | 39 + patches/0001-Disable-examples.patch | 25 + ...022-core-Disable-not-compiling-tests.patch | 47 +- ...0024-core-Disable-portable-simd-test.patch | 28 - rust-toolchain | 2 +- rustc_patches/compile_test.patch | 14 - src/allocator.rs | 16 +- src/asm.rs | 211 +- src/attributes.rs | 112 + src/back/write.rs | 1 + src/base.rs | 45 +- src/builder.rs | 413 ++- src/callee.rs | 113 +- src/common.rs | 26 +- src/consts.rs | 171 +- src/context.rs | 83 +- src/declare.rs | 15 +- src/errors.rs | 6 + src/int.rs | 16 +- src/intrinsic/archs.rs | 2443 ++++++++++++++++- src/intrinsic/llvm.rs | 797 +++++- src/intrinsic/mod.rs | 156 +- src/intrinsic/simd.rs | 839 ++++-- src/lib.rs | 9 +- src/mono_item.rs | 40 +- src/type_.rs | 22 +- src/type_of.rs | 48 +- test.sh | 240 +- tests/lang_tests_common.rs | 6 +- tests/run/abort1.rs | 1 + tests/run/abort2.rs | 1 + tests/run/array.rs | 3 +- tests/run/assign.rs | 3 +- tests/run/closure.rs | 10 +- tests/run/condition.rs | 3 +- tests/run/fun_ptr.rs | 3 +- tests/run/int.rs | 16 +- tests/run/int_overflow.rs | 3 +- tests/run/mut_ref.rs | 3 +- tests/run/operations.rs | 3 +- tests/run/ptr_cast.rs | 3 +- tests/run/slice.rs | 1 + tests/run/static.rs | 1 + tools/check_intrinsics_duplicates.py | 67 + tools/generate_intrinsics.py | 91 +- 61 files changed, 5726 insertions(+), 1119 deletions(-) create mode 100644 .github/workflows/release.yml create mode 100644 .github/workflows/stdarch.yml create mode 100644 failing-ui-tests.txt create mode 100644 failing-ui-tests12.txt create mode 100644 patches/0001-Add-stdarch-Cargo.toml-for-testing.patch create mode 100644 patches/0001-Disable-examples.patch delete mode 100644 patches/0024-core-Disable-portable-simd-test.patch delete mode 100644 rustc_patches/compile_test.patch create mode 100644 src/attributes.rs create mode 100644 tools/check_intrinsics_duplicates.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8ebdabe8261..d2b7724a221 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,36 +4,72 @@ on: - push - pull_request +permissions: + contents: read + +env: + # Enable backtraces for easier debugging + RUST_BACKTRACE: 1 + jobs: build: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: - libgccjit_version: ["libgccjit.so", "libgccjit_without_int128.so", "libgccjit12.so"] + libgccjit_version: + - { gcc: "libgccjit.so", extra: "", env_extra: "", artifacts_branch: "master" } + - { gcc: "libgccjit_without_int128.so", extra: "", env_extra: "", artifacts_branch: "master-without-128bit-integers" } + - { gcc: "libgccjit12.so", extra: "--no-default-features", env_extra: "TEST_FLAGS='-Cpanic=abort -Zpanic-abort-tests'", artifacts_branch: "gcc12" } + commands: [ + "--mini-tests", + "--std-tests", + # FIXME: re-enable asm tests when GCC can emit in the right syntax. + # "--asm-tests", + "--test-libcore", + "--extended-rand-tests", + "--extended-regex-example-tests", + "--extended-regex-tests", + "--test-successful-rustc --nb-parts 2 --current-part 0", + "--test-successful-rustc --nb-parts 2 --current-part 1", + "--test-failing-rustc", + ] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: repository: llvm/llvm-project path: llvm - name: Install packages - run: sudo apt-get install ninja-build ripgrep + # `llvm-14-tools` is needed to install the `FileCheck` binary which is used for asm tests. + run: sudo apt-get install ninja-build ripgrep llvm-14-tools + + - name: Install libgccjit12 + if: matrix.libgccjit_version.gcc == 'libgccjit12.so' + run: sudo apt-get install libgccjit-12-dev - name: Download artifact + if: matrix.libgccjit_version.gcc != 'libgccjit12.so' uses: dawidd6/action-download-artifact@v2 with: workflow: main.yml - name: ${{ matrix.libgccjit_version }} + name: ${{ matrix.libgccjit_version.gcc }} path: gcc-build repo: antoyo/gcc + branch: ${{ matrix.libgccjit_version.artifacts_branch }} + event: push search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts. - name: Setup path to libgccjit + if: matrix.libgccjit_version.gcc == 'libgccjit12.so' + run: echo /usr/lib/gcc/x86_64-linux-gnu/12 > gcc_path + + - name: Setup path to libgccjit + if: matrix.libgccjit_version.gcc != 'libgccjit12.so' run: | echo $(readlink -f gcc-build) > gcc_path # NOTE: the filename is still libgccjit.so even when the artifact name is different. @@ -48,49 +84,44 @@ jobs: - name: Set RUST_COMPILER_RT_ROOT run: echo "RUST_COMPILER_RT_ROOT="${{ env.workspace }}/llvm/compiler-rt >> $GITHUB_ENV - # https://github.com/actions/cache/issues/133 - - name: Fixup owner of ~/.cargo/ - # Don't remove the trailing /. It is necessary to follow the symlink. - run: sudo chown -R $(whoami):$(id -ng) ~/.cargo/ - - name: Cache cargo installed crates - uses: actions/cache@v1.1.2 + uses: actions/cache@v3 with: path: ~/.cargo/bin key: cargo-installed-crates2-ubuntu-latest - name: Cache cargo registry - uses: actions/cache@v1 + uses: actions/cache@v3 with: path: ~/.cargo/registry key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }} - name: Cache cargo index - uses: actions/cache@v1 + uses: actions/cache@v3 with: path: ~/.cargo/git key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} - name: Cache cargo target dir - uses: actions/cache@v1.1.2 + uses: actions/cache@v3 with: path: target key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }} - - name: Build - if: matrix.libgccjit_version != 'libgccjit12.so' - run: | - ./prepare_build.sh - ./build.sh - cargo test - ./clean_all.sh + #- name: Cache rust repository + ## We only clone the rust repository for rustc tests + #if: ${{ contains(matrix.commands, 'rustc') }} + #uses: actions/cache@v3 + #id: cache-rust-repository + #with: + #path: rust + #key: ${{ runner.os }}-packages-${{ hashFiles('rust/.git/HEAD') }} - name: Build - if: matrix.libgccjit_version == 'libgccjit12.so' run: | ./prepare_build.sh - ./build.sh --no-default-features - cargo test --no-default-features + ${{ matrix.libgccjit_version.env_extra }} ./build.sh ${{ matrix.libgccjit_version.extra }} + ${{ matrix.libgccjit_version.env_extra }} cargo test ${{ matrix.libgccjit_version.extra }} ./clean_all.sh - name: Prepare dependencies @@ -106,26 +137,16 @@ jobs: command: build args: --release - - name: Test - if: matrix.libgccjit_version != 'libgccjit12.so' - run: | - # Enable backtraces for easier debugging - export RUST_BACKTRACE=1 - - # Reduce amount of benchmark runs as they are slow - export COMPILE_RUNS=2 - export RUN_RUNS=2 + - name: Add more failing tests for GCC 12 + if: ${{ matrix.libgccjit_version.gcc == 'libgccjit12.so' }} + run: cat failing-ui-tests12.txt >> failing-ui-tests.txt - ./test.sh --release - - - name: Test - if: matrix.libgccjit_version == 'libgccjit12.so' + - name: Run tests run: | - # Enable backtraces for easier debugging - export RUST_BACKTRACE=1 - - # Reduce amount of benchmark runs as they are slow - export COMPILE_RUNS=2 - export RUN_RUNS=2 + ${{ matrix.libgccjit_version.env_extra }} ./test.sh --release --clean --build-sysroot ${{ matrix.commands }} ${{ matrix.libgccjit_version.extra }} - ./test.sh --release --no-default-features + duplicates: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - run: python tools/check_intrinsics_duplicates.py diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000000..c4e99469bc2 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,111 @@ +name: CI with sysroot compiled in release mode + +on: + - push + - pull_request + +permissions: + contents: read + +env: + # Enable backtraces for easier debugging + RUST_BACKTRACE: 1 + +jobs: + build: + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + libgccjit_version: + - { gcc: "libgccjit.so", artifacts_branch: "master" } + commands: [ + "--test-successful-rustc --nb-parts 2 --current-part 0", + "--test-successful-rustc --nb-parts 2 --current-part 1", + ] + + steps: + - uses: actions/checkout@v3 + + - uses: actions/checkout@v3 + with: + repository: llvm/llvm-project + path: llvm + + - name: Install packages + run: sudo apt-get install ninja-build ripgrep + + - name: Download artifact + uses: dawidd6/action-download-artifact@v2 + with: + workflow: main.yml + name: ${{ matrix.libgccjit_version.gcc }} + path: gcc-build + repo: antoyo/gcc + branch: ${{ matrix.libgccjit_version.artifacts_branch }} + event: push + search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts. + + - name: Setup path to libgccjit + run: | + echo $(readlink -f gcc-build) > gcc_path + # NOTE: the filename is still libgccjit.so even when the artifact name is different. + ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0 + + - name: Set env + run: | + echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV + echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV + + - name: Set RUST_COMPILER_RT_ROOT + run: echo "RUST_COMPILER_RT_ROOT="${{ env.workspace }}/llvm/compiler-rt >> $GITHUB_ENV + + - name: Cache cargo installed crates + uses: actions/cache@v3 + with: + path: ~/.cargo/bin + key: cargo-installed-crates2-ubuntu-latest + + - name: Cache cargo registry + uses: actions/cache@v3 + with: + path: ~/.cargo/registry + key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }} + + - name: Cache cargo index + uses: actions/cache@v3 + with: + path: ~/.cargo/git + key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} + + - name: Cache cargo target dir + uses: actions/cache@v3 + with: + path: target + key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }} + + - name: Build + run: | + ./prepare_build.sh + ./build.sh --release --release-sysroot + cargo test + ./clean_all.sh + + - name: Prepare dependencies + run: | + git config --global user.email "user@example.com" + git config --global user.name "User" + ./prepare.sh + + # Compile is a separate step, as the actions-rs/cargo action supports error annotations + - name: Compile + uses: actions-rs/cargo@v1.0.3 + with: + command: build + args: --release + + - name: Run tests + run: | + ./test.sh --release --clean --release-sysroot --build-sysroot ${{ matrix.commands }} diff --git a/.github/workflows/stdarch.yml b/.github/workflows/stdarch.yml new file mode 100644 index 00000000000..42fb35e738f --- /dev/null +++ b/.github/workflows/stdarch.yml @@ -0,0 +1,116 @@ +name: stdarch tests with sysroot compiled in release mode + +on: + - push + - pull_request + +permissions: + contents: read + +env: + # Enable backtraces for easier debugging + RUST_BACKTRACE: 1 + +jobs: + build: + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + libgccjit_version: + - { gcc: "libgccjit.so", artifacts_branch: "master" } + commands: [ + "--test-successful-rustc --nb-parts 2 --current-part 0", + "--test-successful-rustc --nb-parts 2 --current-part 1", + ] + + steps: + - uses: actions/checkout@v3 + + - uses: actions/checkout@v3 + with: + repository: llvm/llvm-project + path: llvm + + - name: Install packages + run: sudo apt-get install ninja-build ripgrep + + - name: Download artifact + uses: dawidd6/action-download-artifact@v2 + with: + workflow: main.yml + name: ${{ matrix.libgccjit_version.gcc }} + path: gcc-build + repo: antoyo/gcc + branch: ${{ matrix.libgccjit_version.artifacts_branch }} + event: push + search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts. + + - name: Setup path to libgccjit + run: | + echo $(readlink -f gcc-build) > gcc_path + # NOTE: the filename is still libgccjit.so even when the artifact name is different. + ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0 + + - name: Set env + run: | + echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV + echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV + + - name: Set RUST_COMPILER_RT_ROOT + run: echo "RUST_COMPILER_RT_ROOT="${{ env.workspace }}/llvm/compiler-rt >> $GITHUB_ENV + + - name: Cache cargo installed crates + uses: actions/cache@v3 + with: + path: ~/.cargo/bin + key: cargo-installed-crates2-ubuntu-latest + + - name: Cache cargo registry + uses: actions/cache@v3 + with: + path: ~/.cargo/registry + key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }} + + - name: Cache cargo index + uses: actions/cache@v3 + with: + path: ~/.cargo/git + key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} + + - name: Cache cargo target dir + uses: actions/cache@v3 + with: + path: target + key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }} + + - name: Build + run: | + ./prepare_build.sh + ./build.sh --release --release-sysroot + cargo test + ./clean_all.sh + + - name: Prepare dependencies + run: | + git config --global user.email "user@example.com" + git config --global user.name "User" + ./prepare.sh + + # Compile is a separate step, as the actions-rs/cargo action supports error annotations + - name: Compile + uses: actions-rs/cargo@v1.0.3 + with: + command: build + args: --release + + - name: Run tests + run: | + ./test.sh --release --clean --release-sysroot --build-sysroot --mini-tests --std-tests --test-libcore + + - name: Run stdarch tests + run: | + cd build_sysroot/sysroot_src/library/stdarch/ + CHANNEL=release TARGET=x86_64-unknown-linux-gnu ../../../../cargo.sh test diff --git a/Cargo.lock b/Cargo.lock index 1cb219e12e0..65e76197559 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -208,6 +208,7 @@ version = "0.1.0" dependencies = [ "gccjit", "lang_tester", + "smallvec", "tempfile", ] @@ -220,6 +221,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "smallvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" + [[package]] name = "tempfile" version = "3.2.0" diff --git a/Cargo.toml b/Cargo.toml index 1f3da2f799b..81066d9ce1f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,6 +27,8 @@ gccjit = { git = "https://github.com/antoyo/gccjit.rs" } # Local copy. #gccjit = { path = "../gccjit.rs" } +smallvec = { version = "1.6.1", features = ["union", "may_dangle"] } + [dev-dependencies] lang_tester = "0.3.9" tempfile = "3.1.0" diff --git a/Readme.md b/Readme.md index fe23a267696..bb741943892 100644 --- a/Readme.md +++ b/Readme.md @@ -1,5 +1,7 @@ # WIP libgccjit codegen backend for rust +[![Chat on IRC](https://img.shields.io/badge/irc.libera.chat-%23rustc__codegen__gcc-blue.svg)](https://web.libera.chat/#rustc_codegen_gcc) + This is a GCC codegen for rustc, which means it can be loaded by the existing rustc frontend, but benefits from GCC: more architectures are supported and GCC's optimizations are used. **Despite its name, libgccjit can be used for ahead-of-time compilation, as is used here.** @@ -16,21 +18,61 @@ The patches in [this repository](https://github.com/antoyo/libgccjit-patches) ne (Those patches should work when applied on master, but in case it doesn't work, they are known to work when applied on 079c23cfe079f203d5df83fea8e92a60c7d7e878.) You can also use my [fork of gcc](https://github.com/antoyo/gcc) which already includes these patches.** +To build it (most of these instructions come from [here](https://gcc.gnu.org/onlinedocs/jit/internals/index.html), so don't hesitate to take a look there if you encounter an issue): + +```bash +$ git clone https://github.com/antoyo/gcc +$ sudo apt install flex libmpfr-dev libgmp-dev libmpc3 libmpc-dev +$ mkdir gcc-build gcc-install +$ cd gcc-build +$ ../gcc/configure \ + --enable-host-shared \ + --enable-languages=jit \ + --enable-checking=release \ # it enables extra checks which allow to find bugs + --disable-bootstrap \ + --disable-multilib \ + --prefix=$(pwd)/../gcc-install +$ make -j4 # You can replace `4` with another number depending on how many cores you have. +``` + +If you want to run libgccjit tests, you will need to also enable the C++ language in the `configure`: + +```bash +--enable-languages=jit,c++ +``` + +Then to run libgccjit tests: + +```bash +$ cd gcc # from the `gcc-build` folder +$ make check-jit +# To run one specific test: +$ make check-jit RUNTESTFLAGS="-v -v -v jit.exp=jit.dg/test-asm.cc" +``` + **Put the path to your custom build of libgccjit in the file `gcc_path`.** ```bash -$ git clone https://github.com/rust-lang/rustc_codegen_gcc.git -$ cd rustc_codegen_gcc +$ dirname $(readlink -f `find . -name libgccjit.so`) > gcc_path +``` + +You also need to set RUST_COMPILER_RT_ROOT: + +```bash $ git clone https://github.com/llvm/llvm-project llvm --depth 1 --single-branch $ export RUST_COMPILER_RT_ROOT="$PWD/llvm/compiler-rt" -$ ./prepare_build.sh # download and patch sysroot src -$ ./build.sh --release ``` -To run the tests: +Then you can run commands like this: ```bash $ ./prepare.sh # download and patch sysroot src and install hyperfine for benchmarking +$ LIBRARY_PATH=$(cat gcc_path) LD_LIBRARY_PATH=$(cat gcc_path) ./build.sh --release +``` + +To run the tests: + +```bash $ ./test.sh --release ``` @@ -120,13 +162,52 @@ To print a debug representation of a tree: debug_tree(expr); ``` +(defined in print-tree.h) + +To print a debug reprensentation of a gimple struct: + +```c +debug_gimple_stmt(gimple_struct) +``` + To get the `rustc` command to run in `gdb`, add the `--verbose` flag to `cargo build`. +To have the correct file paths in `gdb` instead of `/usr/src/debug/gcc/libstdc++-v3/libsupc++/eh_personality.cc`: + +Maybe by calling the following at the beginning of gdb: + +``` +set substitute-path /usr/src/debug/gcc /path/to/gcc-repo/gcc +``` + +TODO(antoyo): but that's not what I remember I was doing. + ### How to use a custom-build rustc * Build the stage2 compiler (`rustup toolchain link debug-current build/x86_64-unknown-linux-gnu/stage2`). * Clean and rebuild the codegen with `debug-current` in the file `rust-toolchain`. +### How to install a forked git-subtree + +Using git-subtree with `rustc` requires a patched git to make it work. +The PR that is needed is [here](https://github.com/gitgitgadget/git/pull/493). +Use the following instructions to install it: + +``` +git clone git@github.com:tqc/git.git +cd git +git checkout tqc/subtree +make +make install +cd contrib/subtree +make +cp git-subtree ~/bin +``` + +### How to use [mem-trace](https://github.com/antoyo/mem-trace) + +`rustc` needs to be built without `jemalloc` so that `mem-trace` can overload `malloc` since `jemalloc` is linked statically, so a `LD_PRELOAD`-ed library won't a chance to intercept the calls to `malloc`. + ### How to build a cross-compiling libgccjit #### Building libgccjit @@ -142,6 +223,5 @@ To get the `rustc` command to run in `gdb`, add the `--verbose` flag to `cargo b * Since rustc doesn't support this architecture yet, set it back to `TARGET_TRIPLE="mips-unknown-linux-gnu"` (or another target having the same attributes). Alternatively, create a [target specification file](https://book.avr-rust.com/005.1-the-target-specification-json-file.html) (note that the `arch` specified in this file must be supported by the rust compiler). * Set `linker='-Clinker=m68k-linux-gcc'`. * Set the path to the cross-compiling libgccjit in `gcc_path`. - * Disable the 128-bit integer types if the target doesn't support them by using `let i128_type = context.new_type::();` in `context.rs` (same for u128_type). * Comment the line: `context.add_command_line_option("-masm=intel");` in src/base.rs. * (might not be necessary) Disable the compilation of libstd.so (and possibly libcore.so?). diff --git a/build_sysroot/build_sysroot.sh b/build_sysroot/build_sysroot.sh index f293192a099..9d692d599f6 100755 --- a/build_sysroot/build_sysroot.sh +++ b/build_sysroot/build_sysroot.sh @@ -16,7 +16,7 @@ rm Cargo.lock test_target/Cargo.lock 2>/dev/null || true rm -r sysroot/ 2>/dev/null || true # Build libs -export RUSTFLAGS="$RUSTFLAGS -Z force-unstable-if-unmarked -Cpanic=abort" +export RUSTFLAGS="$RUSTFLAGS -Z force-unstable-if-unmarked" if [[ "$1" == "--release" ]]; then sysroot_channel='release' RUSTFLAGS="$RUSTFLAGS -Zmir-opt-level=3" cargo build --target $TARGET_TRIPLE --release diff --git a/config.sh b/config.sh index b25e215fb9e..166e83901c4 100644 --- a/config.sh +++ b/config.sh @@ -38,7 +38,7 @@ if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then fi fi -export RUSTFLAGS="$CG_RUSTFLAGS $linker -Cpanic=abort -Csymbol-mangling-version=v0 -Cdebuginfo=2 -Clto=off -Zpanic-abort-tests -Zcodegen-backend=$(pwd)/target/${CHANNEL:-debug}/librustc_codegen_gcc.$dylib_ext --sysroot $(pwd)/build_sysroot/sysroot" +export RUSTFLAGS="$CG_RUSTFLAGS $linker -Csymbol-mangling-version=v0 -Cdebuginfo=2 -Clto=off -Zcodegen-backend=$(pwd)/target/${CHANNEL:-debug}/librustc_codegen_gcc.$dylib_ext --sysroot $(pwd)/build_sysroot/sysroot $TEST_FLAGS" # FIXME(antoyo): remove once the atomic shim is gone if [[ `uname` == 'Darwin' ]]; then diff --git a/example/alloc_example.rs b/example/alloc_example.rs index 74ea7ec4ede..c80348ca549 100644 --- a/example/alloc_example.rs +++ b/example/alloc_example.rs @@ -1,4 +1,4 @@ -#![feature(start, box_syntax, core_intrinsics, alloc_error_handler)] +#![feature(start, box_syntax, core_intrinsics, alloc_error_handler, lang_items)] #![no_std] extern crate alloc; @@ -18,16 +18,22 @@ extern "C" { #[panic_handler] fn panic_handler(_: &core::panic::PanicInfo) -> ! { - unsafe { - core::intrinsics::abort(); - } + core::intrinsics::abort(); } #[alloc_error_handler] fn alloc_error_handler(_: alloc::alloc::Layout) -> ! { - unsafe { - core::intrinsics::abort(); - } + core::intrinsics::abort(); +} + +#[lang = "eh_personality"] +fn eh_personality() -> ! { + loop {} +} + +#[no_mangle] +unsafe extern "C" fn _Unwind_Resume() { + core::intrinsics::unreachable(); } #[start] diff --git a/example/mini_core.rs b/example/mini_core.rs index ddcbb0d9fc7..637b8dc53fe 100644 --- a/example/mini_core.rs +++ b/example/mini_core.rs @@ -1,6 +1,6 @@ #![feature( no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types, - untagged_unions, decl_macro, rustc_attrs, transparent_unions, auto_traits, + decl_macro, rustc_attrs, transparent_unions, auto_traits, thread_local )] #![no_core] @@ -17,6 +17,9 @@ pub trait Sized {} #[lang = "destruct"] pub trait Destruct {} +#[lang = "tuple_trait"] +pub trait Tuple {} + #[lang = "unsize"] pub trait Unsize {} @@ -39,14 +42,14 @@ impl<'a, T: ?Sized+Unsize, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut impl, U: ?Sized> DispatchFromDyn<*const U> for *const T {} // *mut T -> *mut U impl, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {} -impl, U: ?Sized> DispatchFromDyn> for Box {} +impl, U: ?Sized> DispatchFromDyn> for Box {} #[lang = "receiver"] pub trait Receiver {} impl Receiver for &T {} impl Receiver for &mut T {} -impl Receiver for Box {} +impl Receiver for Box {} #[lang = "copy"] pub unsafe trait Copy {} @@ -396,7 +399,7 @@ pub struct PhantomData; #[lang = "fn_once"] #[rustc_paren_sugar] -pub trait FnOnce { +pub trait FnOnce { #[lang = "fn_once_output"] type Output; @@ -405,13 +408,21 @@ pub trait FnOnce { #[lang = "fn_mut"] #[rustc_paren_sugar] -pub trait FnMut: FnOnce { +pub trait FnMut: FnOnce { extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output; } #[lang = "panic"] #[track_caller] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { + unsafe { + libc::puts("Panicking\n\0" as *const str as *const u8); + intrinsics::abort(); + } +} + +#[lang = "panic_cannot_unwind"] +fn panic_cannot_unwind() -> ! { unsafe { libc::puts("Panicking\n\0" as *const str as *const u8); intrinsics::abort(); @@ -450,17 +461,32 @@ pub trait Deref { pub trait Allocator { } +impl Allocator for () {} + pub struct Global; impl Allocator for Global {} +#[repr(transparent)] +#[rustc_layout_scalar_valid_range_start(1)] +#[rustc_nonnull_optimization_guaranteed] +pub struct NonNull(pub *const T); + +impl CoerceUnsized> for NonNull where T: Unsize {} +impl DispatchFromDyn> for NonNull where T: Unsize {} + +pub struct Unique { + pub pointer: NonNull, + pub _marker: PhantomData, +} + +impl CoerceUnsized> for Unique where T: Unsize {} +impl DispatchFromDyn> for Unique where T: Unsize {} + #[lang = "owned_box"] -pub struct Box< - T: ?Sized, - A: Allocator = Global, ->(*mut T, A); +pub struct Box(Unique, A); -impl, U: ?Sized> CoerceUnsized> for Box {} +impl, U: ?Sized, A: Allocator> CoerceUnsized> for Box {} impl Drop for Box { fn drop(&mut self) { @@ -468,7 +494,7 @@ impl Drop for Box { } } -impl Deref for Box { +impl Deref for Box { type Target = T; fn deref(&self) -> &Self::Target { @@ -482,8 +508,8 @@ unsafe fn allocate(size: usize, _align: usize) -> *mut u8 { } #[lang = "box_free"] -unsafe fn box_free(ptr: *mut T, alloc: A) { - libc::free(ptr as *mut u8); +unsafe fn box_free(ptr: Unique, _alloc: ()) { + libc::free(ptr.pointer.0 as *mut u8); } #[lang = "drop"] @@ -505,17 +531,25 @@ pub union MaybeUninit { } pub mod intrinsics { + use crate::Sized; + extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; + #[rustc_safe_intrinsic] pub fn size_of() -> usize; - pub fn size_of_val(val: *const T) -> usize; + pub fn size_of_val(val: *const T) -> usize; + #[rustc_safe_intrinsic] pub fn min_align_of() -> usize; - pub fn min_align_of_val(val: *const T) -> usize; + pub fn min_align_of_val(val: *const T) -> usize; pub fn copy(src: *const T, dst: *mut T, count: usize); pub fn transmute(e: T) -> U; pub fn ctlz_nonzero(x: T) -> T; - pub fn needs_drop() -> bool; + #[rustc_safe_intrinsic] + pub fn needs_drop() -> bool; + #[rustc_safe_intrinsic] pub fn bitreverse(x: T) -> T; + #[rustc_safe_intrinsic] pub fn bswap(x: T) -> T; pub fn write_bytes(dst: *mut T, val: u8, count: usize); pub fn unreachable() -> !; diff --git a/example/mini_core_hello_world.rs b/example/mini_core_hello_world.rs index 14fd9eeffa6..993a31e68ea 100644 --- a/example/mini_core_hello_world.rs +++ b/example/mini_core_hello_world.rs @@ -85,6 +85,7 @@ fn start( main: fn() -> T, argc: isize, argv: *const *const u8, + _sigpipe: u8, ) -> isize { if argc == 3 { unsafe { puts(*argv); } @@ -228,6 +229,7 @@ fn main() { } as Box; const FUNC_REF: Option = Some(main); + #[allow(unreachable_code)] match FUNC_REF { Some(_) => {}, None => assert!(false), diff --git a/example/mod_bench.rs b/example/mod_bench.rs index 2e2b0052dee..95bcad2cd17 100644 --- a/example/mod_bench.rs +++ b/example/mod_bench.rs @@ -6,9 +6,7 @@ extern {} #[panic_handler] fn panic_handler(_: &core::panic::PanicInfo) -> ! { - unsafe { - core::intrinsics::abort(); - } + core::intrinsics::abort(); } #[lang="eh_personality"] @@ -32,6 +30,6 @@ fn main(_argc: isize, _argv: *const *const u8) -> isize { #[inline(never)] fn black_box(i: u32) { if i != 1 { - unsafe { core::intrinsics::abort(); } + core::intrinsics::abort(); } } diff --git a/example/std_example.rs b/example/std_example.rs index 31069058aea..5c171c49fd1 100644 --- a/example/std_example.rs +++ b/example/std_example.rs @@ -1,5 +1,6 @@ #![feature(core_intrinsics, generators, generator_trait, is_sorted)] +#[cfg(feature="master")] use std::arch::x86_64::*; use std::io::Write; use std::ops::Generator; diff --git a/failing-ui-tests.txt b/failing-ui-tests.txt new file mode 100644 index 00000000000..8539e27ea6a --- /dev/null +++ b/failing-ui-tests.txt @@ -0,0 +1,68 @@ +tests/ui/allocator/custom-in-block.rs +tests/ui/allocator/custom-in-submodule.rs +tests/ui/allocator/custom.rs +tests/ui/allocator/hygiene.rs +tests/ui/allocator/no_std-alloc-error-handler-custom.rs +tests/ui/allocator/no_std-alloc-error-handler-default.rs +tests/ui/allocator/xcrate-use.rs +tests/ui/allocator/xcrate-use2.rs +tests/ui/asm/may_unwind.rs +tests/ui/asm/x86_64/multiple-clobber-abi.rs +tests/ui/debuginfo/debuginfo-emit-llvm-ir-and-split-debuginfo.rs +tests/ui/functions-closures/parallel-codegen-closures.rs +tests/ui/linkage-attr/linkage1.rs +tests/ui/lto/dylib-works.rs +tests/ui/numbers-arithmetic/saturating-float-casts.rs +tests/ui/polymorphization/promoted-function.rs +tests/ui/process/nofile-limit.rs +tests/ui/sepcomp/sepcomp-cci.rs +tests/ui/sepcomp/sepcomp-extern.rs +tests/ui/sepcomp/sepcomp-fns-backwards.rs +tests/ui/sepcomp/sepcomp-fns.rs +tests/ui/sepcomp/sepcomp-statics.rs +tests/ui/simd/intrinsic/generic-arithmetic-pass.rs +tests/ui/sse2.rs +tests/ui/target-feature/missing-plusminus.rs +tests/ui/asm/x86_64/may_unwind.rs +tests/ui/backtrace.rs +tests/ui/catch-unwind-bang.rs +tests/ui/cfg/cfg-panic-abort.rs +tests/ui/drop/dynamic-drop-async.rs +tests/ui/drop/repeat-drop.rs +tests/ui/fmt/format-args-capture.rs +tests/ui/generator/panic-drops-resume.rs +tests/ui/generator/panic-drops.rs +tests/ui/intrinsics/panic-uninitialized-zeroed.rs +tests/ui/iterators/iter-sum-overflow-debug.rs +tests/ui/iterators/iter-sum-overflow-overflow-checks.rs +tests/ui/mir/mir_calls_to_shims.rs +tests/ui/mir/mir_drop_order.rs +tests/ui/mir/mir_let_chains_drop_order.rs +tests/ui/oom_unwind.rs +tests/ui/panic-runtime/abort-link-to-unwinding-crates.rs +tests/ui/panic-runtime/abort.rs +tests/ui/panic-runtime/link-to-abort.rs +tests/ui/unwind-no-uwtable.rs +tests/ui/parser/unclosed-delimiter-in-dep.rs +tests/ui/runtime/rt-explody-panic-payloads.rs +tests/ui/simd/intrinsic/ptr-cast.rs +tests/ui/binding/fn-arg-incomplete-pattern-drop-order.rs +tests/ui/consts/missing_span_in_backtrace.rs +tests/ui/drop/dynamic-drop.rs +tests/ui/dyn-star/box.rs +tests/ui/issues/issue-40883.rs +tests/ui/issues/issue-43853.rs +tests/ui/issues/issue-47364.rs +tests/ui/macros/rfc-2011-nicer-assert-messages/assert-without-captures-does-not-create-unnecessary-code.rs +tests/ui/rfc-2091-track-caller/std-panic-locations.rs +tests/ui/rfcs/rfc1857-drop-order.rs +tests/ui/simd/issue-17170.rs +tests/ui/simd/issue-39720.rs +tests/ui/simd/issue-89193.rs +tests/ui/statics/issue-91050-1.rs +tests/ui/statics/issue-91050-2.rs +tests/ui/alloc-error/default-alloc-error-hook.rs +tests/ui/generator/panic-safe.rs +tests/ui/issues/issue-14875.rs +tests/ui/issues/issue-29948.rs +tests/ui/panic-while-printing.rs diff --git a/failing-ui-tests12.txt b/failing-ui-tests12.txt new file mode 100644 index 00000000000..8c27bd8b8ca --- /dev/null +++ b/failing-ui-tests12.txt @@ -0,0 +1,39 @@ +tests/ui/asm/x86_64/issue-96797.rs +tests/ui/intrinsics/const-eval-select-x86_64.rs +tests/ui/packed/packed-struct-drop-aligned.rs +tests/ui/packed/packed-struct-generic-layout.rs +tests/ui/packed/packed-struct-layout.rs +tests/ui/packed/packed-struct-optimized-enum.rs +tests/ui/packed/packed-struct-size.rs +tests/ui/packed/packed-struct-vec.rs +tests/ui/packed/packed-tuple-struct-layout.rs +tests/ui/simd/array-type.rs +tests/ui/simd/intrinsic/float-minmax-pass.rs +tests/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs +tests/ui/simd/intrinsic/generic-as.rs +tests/ui/simd/intrinsic/generic-cast-pass.rs +tests/ui/simd/intrinsic/generic-cast-pointer-width.rs +tests/ui/simd/intrinsic/generic-comparison-pass.rs +tests/ui/simd/intrinsic/generic-elements-pass.rs +tests/ui/simd/intrinsic/generic-reduction-pass.rs +tests/ui/simd/intrinsic/generic-select-pass.rs +tests/ui/simd/intrinsic/inlining-issue67557-ice.rs +tests/ui/simd/intrinsic/inlining-issue67557.rs +tests/ui/simd/monomorphize-shuffle-index.rs +tests/ui/simd/shuffle.rs +tests/ui/simd/simd-bitmask.rs +tests/ui/generator/resume-after-return.rs +tests/ui/iterators/iter-step-overflow-debug.rs +tests/ui/macros/rfc-2011-nicer-assert-messages/all-expr-kinds.rs +tests/ui/numbers-arithmetic/next-power-of-two-overflow-debug.rs +tests/ui/privacy/reachable-unnameable-items.rs +tests/ui/rfc-1937-termination-trait/termination-trait-in-test.rs +tests/ui/async-await/async-fn-size-moved-locals.rs +tests/ui/async-await/async-fn-size-uninit-locals.rs +tests/ui/cfg/cfg-panic.rs +tests/ui/generator/size-moved-locals.rs +tests/ui/macros/rfc-2011-nicer-assert-messages/all-not-available-cases.rs +tests/ui/simd/intrinsic/generic-gather-pass.rs +tests/ui/simd/issue-85915-simd-ptrs.rs +tests/ui/issues/issue-68010-large-zst-consts.rs +tests/ui/rust-2018/proc-macro-crate-in-paths.rs diff --git a/locales/en-US.ftl b/locales/en-US.ftl index 6101b28ab0c..2181d49eeef 100644 --- a/locales/en-US.ftl +++ b/locales/en-US.ftl @@ -60,3 +60,6 @@ codegen_gcc_invalid_monomorphization_unsupported_cast = codegen_gcc_invalid_monomorphization_unsupported_operation = invalid monomorphization of `{$name}` intrinsic: unsupported operation on `{$in_ty}` with element `{$in_elem}` + +codegen_gcc_invalid_minimum_alignment = + invalid minimum global alignment: {$err} diff --git a/patches/0001-Add-stdarch-Cargo.toml-for-testing.patch b/patches/0001-Add-stdarch-Cargo.toml-for-testing.patch new file mode 100644 index 00000000000..93c63b5dcac --- /dev/null +++ b/patches/0001-Add-stdarch-Cargo.toml-for-testing.patch @@ -0,0 +1,39 @@ +From c3821e02fbd6cb5ad6e06d759fccdc9073712375 Mon Sep 17 00:00:00 2001 +From: Antoni Boucher +Date: Tue, 7 Jun 2022 21:40:13 -0400 +Subject: [PATCH] Add stdarch Cargo.toml for testing + +--- + library/stdarch/Cargo.toml | 20 ++++++++++++++++++++ + 1 file changed, 20 insertions(+) + create mode 100644 library/stdarch/Cargo.toml + +diff --git a/library/stdarch/Cargo.toml b/library/stdarch/Cargo.toml +new file mode 100644 +index 0000000..fbe0a95 +--- /dev/null ++++ b/library/stdarch/Cargo.toml +@@ -0,0 +1,20 @@ ++[workspace] ++members = [ ++ "crates/core_arch", ++ "crates/std_detect", ++ "crates/stdarch-gen", ++ "examples/" ++] ++exclude = [ ++ "crates/wasm-assert-instr-tests" ++] ++ ++[profile.release] ++debug = true ++opt-level = 3 ++incremental = true ++ ++[profile.bench] ++debug = 1 ++opt-level = 3 ++incremental = true +-- +2.26.2.7.g19db9cfb68.dirty + diff --git a/patches/0001-Disable-examples.patch b/patches/0001-Disable-examples.patch new file mode 100644 index 00000000000..1b71df1ca8d --- /dev/null +++ b/patches/0001-Disable-examples.patch @@ -0,0 +1,25 @@ +From a2d53a324a02c04b76c0e9d39dc15cd443a3b8b2 Mon Sep 17 00:00:00 2001 +From: Antoni Boucher +Date: Fri, 25 Nov 2022 11:18:11 -0500 +Subject: [PATCH] Disable examples + +--- + library/stdarch/Cargo.toml | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/library/stdarch/Cargo.toml b/library/stdarch/Cargo.toml +index fbe0a95..748d72d 100644 +--- a/library/stdarch/Cargo.toml ++++ b/library/stdarch/Cargo.toml +@@ -3,7 +3,7 @@ members = [ + "crates/core_arch", + "crates/std_detect", + "crates/stdarch-gen", +- "examples/" ++ #"examples/" + ] + exclude = [ + "crates/wasm-assert-instr-tests" +-- +2.26.2.7.g19db9cfb68.dirty + diff --git a/patches/0022-core-Disable-not-compiling-tests.patch b/patches/0022-core-Disable-not-compiling-tests.patch index 301b3f9bde4..4db56fa3bd2 100644 --- a/patches/0022-core-Disable-not-compiling-tests.patch +++ b/patches/0022-core-Disable-not-compiling-tests.patch @@ -18,7 +18,7 @@ new file mode 100644 index 0000000..46fd999 --- /dev/null +++ b/library/core/tests/Cargo.toml -@@ -0,0 +1,8 @@ +@@ -0,0 +1,12 @@ +[package] +name = "core" +version = "0.0.0" @@ -27,37 +27,18 @@ index 0000000..46fd999 +[lib] +name = "coretests" +path = "lib.rs" -diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs -index a35897e..f0bf645 100644 ---- a/library/core/tests/num/flt2dec/mod.rs -+++ b/library/core/tests/num/flt2dec/mod.rs -@@ -13,7 +13,6 @@ mod strategy { - mod dragon; - mod grisu; - } --mod random; - - pub fn decode_finite(v: T) -> Decoded { - match decode(v).1 { -diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs -index 6609bc3..241b497 100644 ---- a/library/core/tests/slice.rs -+++ b/library/core/tests/slice.rs -@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() { - } - } - -+/* - #[test] - #[cfg(not(target_arch = "wasm32"))] - fn sort_unstable() { -@@ -1394,6 +1395,7 @@ fn partition_at_index() { - v.select_nth_unstable(0); - assert!(v == [0xDEADBEEF]); - } -+*/ - - #[test] - #[should_panic(expected = "index 0 greater than length of slice")] ++ ++[dependencies] ++rand = { version = "0.8.5", default-features = false } ++rand_xorshift = { version = "0.3.0", default-features = false } +diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs +index 42a26ae..5ac1042 100644 +--- a/library/core/tests/lib.rs ++++ b/library/core/tests/lib.rs +@@ -1,3 +1,4 @@ ++#![cfg(test)] + #![feature(alloc_layout_extra)] + #![feature(array_chunks)] + #![feature(array_methods)] -- 2.21.0 (Apple Git-122) diff --git a/patches/0024-core-Disable-portable-simd-test.patch b/patches/0024-core-Disable-portable-simd-test.patch deleted file mode 100644 index c59a40df039..00000000000 --- a/patches/0024-core-Disable-portable-simd-test.patch +++ /dev/null @@ -1,28 +0,0 @@ -From b1ae000f6da1abd3b8e9b80c40bc11c89b8ae93c Mon Sep 17 00:00:00 2001 -From: bjorn3 -Date: Thu, 30 Dec 2021 16:54:40 +0100 -Subject: [PATCH] [core] Disable portable-simd test - ---- - library/core/tests/lib.rs | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs -index 06c7be0..359e2e7 100644 ---- a/library/core/tests/lib.rs -+++ b/library/core/tests/lib.rs -@@ -75,7 +75,6 @@ - #![feature(never_type)] - #![feature(unwrap_infallible)] --#![feature(portable_simd)] - #![feature(ptr_metadata)] - #![feature(once_cell)] - #![feature(option_result_contains)] -@@ -127,7 +126,6 @@ mod pin; - mod pin_macro; - mod ptr; - mod result; --mod simd; - mod slice; - mod str; - mod str_lossy; diff --git a/rust-toolchain b/rust-toolchain index b20aeb979ad..933ecd45baa 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2022-06-06" +channel = "nightly-2023-03-02" components = ["rust-src", "rustc-dev", "llvm-tools-preview"] diff --git a/rustc_patches/compile_test.patch b/rustc_patches/compile_test.patch deleted file mode 100644 index 59143eac37b..00000000000 --- a/rustc_patches/compile_test.patch +++ /dev/null @@ -1,14 +0,0 @@ -diff --git a/src/tools/compiletest/src/header.rs b/src/tools/compiletest/src/header.rs -index 887d27fd6dca4..2c2239f2b83d1 100644 ---- a/src/tools/compiletest/src/header.rs -+++ b/src/tools/compiletest/src/header.rs -@@ -806,8 +806,8 @@ pub fn make_test_description( - cfg: Option<&str>, - ) -> test::TestDesc { - let mut ignore = false; - #[cfg(not(bootstrap))] -- let ignore_message: Option = None; -+ let ignore_message: Option<&str> = None; - let mut should_fail = false; - - let rustc_has_profiler_support = env::var_os("RUSTC_PROFILER_SUPPORT").is_some(); diff --git a/src/allocator.rs b/src/allocator.rs index e2c9ffe9c1c..4bad33ee879 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -1,3 +1,5 @@ +#[cfg(feature="master")] +use gccjit::FnAttribute; use gccjit::{FunctionType, GlobalKind, ToRValue}; use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS}; use rustc_middle::bug; @@ -50,7 +52,8 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false); if tcx.sess.target.options.default_hidden_visibility { - // TODO(antoyo): set visibility. + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); } if tcx.sess.must_emit_unwind_tables() { // TODO(antoyo): emit unwind tables. @@ -61,7 +64,8 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .collect(); let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false); - // TODO(antoyo): set visibility. + #[cfg(feature="master")] + callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); let block = func.new_block("entry"); @@ -90,12 +94,18 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam .collect(); let func = context.new_function(None, FunctionType::Exported, void, &args, name, false); + if tcx.sess.target.default_hidden_visibility { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + } + let callee = alloc_error_handler_kind.fn_name(sym::oom); let args: Vec<_> = types.iter().enumerate() .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .collect(); let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false); - //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden); + #[cfg(feature="master")] + callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); let block = func.new_block("entry"); diff --git a/src/asm.rs b/src/asm.rs index c346dbd63cc..41e9d61a10e 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -157,7 +157,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { use ConstraintOrRegister::*; let (constraint, ty) = match (reg_to_gcc(reg), place) { - (Constraint(constraint), Some(place)) => (constraint, place.layout.gcc_type(self.cx, false)), + (Constraint(constraint), Some(place)) => (constraint, place.layout.gcc_type(self.cx)), // When `reg` is a class and not an explicit register but the out place is not specified, // we need to create an unused output variable to assign the output to. This var // needs to be of a type that's "compatible" with the register class, but specific type @@ -226,7 +226,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { // This decision is also backed by the fact that LLVM needs in and out // values to be of *exactly the same type*, not just "compatible". // I'm not sure if GCC is so picky too, but better safe than sorry. - let ty = in_value.layout.gcc_type(self.cx, false); + let ty = in_value.layout.gcc_type(self.cx); let tmp_var = self.current_func().new_local(None, ty, "output_register"); // If the out_place is None (i.e `inout(reg) _` syntax was used), we translate @@ -286,7 +286,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { continue }; - let ty = out_place.layout.gcc_type(self.cx, false); + let ty = out_place.layout.gcc_type(self.cx); let tmp_var = self.current_func().new_local(None, ty, "output_register"); tmp_var.set_register_name(reg_name); @@ -306,7 +306,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { // `in("explicit register") var` InlineAsmOperandRef::In { reg, value } => { if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) { - let ty = value.layout.gcc_type(self.cx, false); + let ty = value.layout.gcc_type(self.cx); let reg_var = self.current_func().new_local(None, ty, "input_register"); reg_var.set_register_name(reg_name); self.llbb().add_assignment(None, reg_var, value.immediate()); @@ -325,7 +325,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => { if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) { // See explanation in the first pass. - let ty = in_value.layout.gcc_type(self.cx, false); + let ty = in_value.layout.gcc_type(self.cx); let tmp_var = self.current_func().new_local(None, ty, "output_register"); tmp_var.set_register_name(reg_name); @@ -353,8 +353,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { inputs.push(AsmInOperand { constraint: "X".into(), rust_idx, - val: self.cx.rvalue_as_function(get_fn(self.cx, instance)) - .get_address(None), + val: get_fn(self.cx, instance).get_address(None), }); } @@ -382,15 +381,19 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { for piece in template { match *piece { InlineAsmTemplatePiece::String(ref string) => { - // TODO(@Commeownist): switch to `Iterator::intersperse` once it's stable - let mut iter = string.split('%'); - if let Some(s) = iter.next() { - template_str.push_str(s); - } - - for s in iter { - template_str.push_str("%%"); - template_str.push_str(s); + for char in string.chars() { + // TODO(antoyo): might also need to escape | if rustc doesn't do it. + let escaped_char = + match char { + '%' => "%%", + '{' => "%{", + '}' => "%}", + _ => { + template_str.push(char); + continue; + }, + }; + template_str.push_str(escaped_char); } } InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => { @@ -565,39 +568,52 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { _ => unimplemented!(), } }, + // They can be retrieved from https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html InlineAsmRegOrRegClass::RegClass(reg) => match reg { - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(), - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => unimplemented!(), - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => unimplemented!(), - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r", + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w", + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x", + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => { + unreachable!("clobber-only") + } + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r", InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => unimplemented!(), - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => unimplemented!(), - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => unimplemented!(), - InlineAsmRegClass::Avr(_) => unimplemented!(), - InlineAsmRegClass::Bpf(_) => unimplemented!(), - InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => unimplemented!(), - InlineAsmRegClass::Msp430(_) => unimplemented!(), - InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => unimplemented!(), - InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => unimplemented!(), - InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => unimplemented!(), - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => unimplemented!(), - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => unimplemented!(), + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "t", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e", + InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w", + InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "d", // more specific than "r" + InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f", + InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r", + // https://github.com/gcc-mirror/gcc/blob/master/gcc/config/nvptx/nvptx.md -> look for + // "define_constraint". + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h", + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r", + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l", + + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b", + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f", InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { unreachable!("clobber-only") }, - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(), - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(), + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f", + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => { + unreachable!("clobber-only") + } InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r", InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q", InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q", @@ -605,16 +621,18 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x", InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v", InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "Yk", - InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => unimplemented!(), - InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(), InlineAsmRegClass::X86( - X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::tmm_reg, + X86InlineAsmRegClass::kreg0 + | X86InlineAsmRegClass::x87_reg + | X86InlineAsmRegClass::mmx_reg + | X86InlineAsmRegClass::tmm_reg, ) => unreachable!("clobber-only"), InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("GCC backend does not support SPIR-V") } - InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(), + InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r", + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f", InlineAsmRegClass::Err => unreachable!(), } }; @@ -692,21 +710,23 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> { && options.contains(InlineAsmOptions::ATT_SYNTAX); // Build the template string - let mut template_str = String::new(); + let mut template_str = ".pushsection .text\n".to_owned(); + if att_dialect { + template_str.push_str(".att_syntax\n"); + } for piece in template { match *piece { InlineAsmTemplatePiece::String(ref string) => { - for line in string.lines() { + let mut index = 0; + while index < string.len() { // NOTE: gcc does not allow inline comment, so remove them. - let line = - if let Some(index) = line.rfind("//") { - &line[..index] - } - else { - line - }; - template_str.push_str(line); - template_str.push('\n'); + let comment_index = string[index..].find("//") + .map(|comment_index| comment_index + index) + .unwrap_or(string.len()); + template_str.push_str(&string[index..comment_index]); + index = string[comment_index..].find('\n') + .map(|index| index + comment_index) + .unwrap_or(string.len()); } }, InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => { @@ -719,6 +739,8 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } GlobalAsmOperandRef::SymFn { instance } => { + let function = get_fn(self, instance); + self.add_used_function(function); // TODO(@Amanieu): Additional mangling is needed on // some targets to add a leading underscore (Mach-O) // or byte count suffixes (x86 Windows). @@ -727,6 +749,7 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } GlobalAsmOperandRef::SymStatic { def_id } => { + // TODO(antoyo): set the global variable as used. // TODO(@Amanieu): Additional mangling is needed on // some targets to add a leading underscore (Mach-O). let instance = Instance::mono(self.tcx, def_id); @@ -738,48 +761,51 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } } - let template_str = - if att_dialect { - format!(".att_syntax\n\t{}\n\t.intel_syntax noprefix", template_str) - } - else { - template_str - }; + if att_dialect { + template_str.push_str("\n\t.intel_syntax noprefix"); + } // NOTE: seems like gcc will put the asm in the wrong section, so set it to .text manually. - let template_str = format!(".pushsection .text\n{}\n.popsection", template_str); + template_str.push_str("\n.popsection"); self.context.add_top_level_asm(None, &template_str); } } fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option) -> Option { + // The modifiers can be retrieved from + // https://gcc.gnu.org/onlinedocs/gcc/Modifiers.html#Modifiers match reg { InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier, - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => modifier, InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => { - unimplemented!() + if modifier == Some('v') { None } else { modifier } + } + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => { + unreachable!("clobber-only") } - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None, InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => unimplemented!(), + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None, InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => unimplemented!(), + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'), InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => { - unimplemented!() + if modifier.is_none() { + Some('q') + } else { + modifier + } } - InlineAsmRegClass::Avr(_) => unimplemented!(), - InlineAsmRegClass::Bpf(_) => unimplemented!(), - InlineAsmRegClass::Hexagon(_) => unimplemented!(), - InlineAsmRegClass::Mips(_) => unimplemented!(), - InlineAsmRegClass::Msp430(_) => unimplemented!(), - InlineAsmRegClass::Nvptx(_) => unimplemented!(), - InlineAsmRegClass::PowerPC(_) => unimplemented!(), + InlineAsmRegClass::Hexagon(_) => None, + InlineAsmRegClass::Mips(_) => None, + InlineAsmRegClass::Nvptx(_) => None, + InlineAsmRegClass::PowerPC(_) => None, InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) - | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(), - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(), + | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None, + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => { + unreachable!("clobber-only") + } InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier { None => if arch == InlineAsmArch::X86_64 { Some('q') } else { Some('k') }, @@ -803,16 +829,29 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option _ => unreachable!(), }, InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None, - InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => None, - InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::tmm_reg) => { + InlineAsmRegClass::X86( + X86InlineAsmRegClass::x87_reg + | X86InlineAsmRegClass::mmx_reg + | X86InlineAsmRegClass::kreg0 + | X86InlineAsmRegClass::tmm_reg, + ) => { unreachable!("clobber-only") } - InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(), + InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None, + InlineAsmRegClass::Bpf(_) => None, + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) + | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) + | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier { + Some('h') => Some('B'), + Some('l') => Some('A'), + _ => None, + }, + InlineAsmRegClass::Avr(_) => None, + InlineAsmRegClass::S390x(_) => None, + InlineAsmRegClass::Msp430(_) => None, InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") - }, - InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(), + } InlineAsmRegClass::Err => unreachable!(), } } diff --git a/src/attributes.rs b/src/attributes.rs new file mode 100644 index 00000000000..243a1a36dd0 --- /dev/null +++ b/src/attributes.rs @@ -0,0 +1,112 @@ +#[cfg(feature="master")] +use gccjit::FnAttribute; +use gccjit::Function; +use rustc_attr::InstructionSetAttr; +use rustc_codegen_ssa::target_features::tied_target_features; +use rustc_data_structures::fx::FxHashMap; +use rustc_middle::ty; +use rustc_session::Session; +use rustc_span::symbol::sym; +use smallvec::{smallvec, SmallVec}; + +use crate::context::CodegenCx; + +// Given a map from target_features to whether they are enabled or disabled, +// ensure only valid combinations are allowed. +pub fn check_tied_features(sess: &Session, features: &FxHashMap<&str, bool>) -> Option<&'static [&'static str]> { + for tied in tied_target_features(sess) { + // Tied features must be set to the same value, or not set at all + let mut tied_iter = tied.iter(); + let enabled = features.get(tied_iter.next().unwrap()); + if tied_iter.any(|feature| enabled != features.get(feature)) { + return Some(tied); + } + } + None +} + +// TODO(antoyo): maybe move to a new module gcc_util. +// To find a list of GCC's names, check https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html +fn to_gcc_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> { + let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch }; + match (arch, s) { + ("x86", "sse4.2") => smallvec!["sse4.2", "crc32"], + ("x86", "pclmulqdq") => smallvec!["pclmul"], + ("x86", "rdrand") => smallvec!["rdrnd"], + ("x86", "bmi1") => smallvec!["bmi"], + ("x86", "cmpxchg16b") => smallvec!["cx16"], + ("x86", "avx512vaes") => smallvec!["vaes"], + ("x86", "avx512gfni") => smallvec!["gfni"], + ("x86", "avx512vpclmulqdq") => smallvec!["vpclmulqdq"], + // NOTE: seems like GCC requires 'avx512bw' for 'avx512vbmi2'. + ("x86", "avx512vbmi2") => smallvec!["avx512vbmi2", "avx512bw"], + // NOTE: seems like GCC requires 'avx512bw' for 'avx512bitalg'. + ("x86", "avx512bitalg") => smallvec!["avx512bitalg", "avx512bw"], + ("aarch64", "rcpc2") => smallvec!["rcpc-immo"], + ("aarch64", "dpb") => smallvec!["ccpp"], + ("aarch64", "dpb2") => smallvec!["ccdp"], + ("aarch64", "frintts") => smallvec!["fptoint"], + ("aarch64", "fcma") => smallvec!["complxnum"], + ("aarch64", "pmuv3") => smallvec!["perfmon"], + ("aarch64", "paca") => smallvec!["pauth"], + ("aarch64", "pacg") => smallvec!["pauth"], + // Rust ties fp and neon together. In LLVM neon implicitly enables fp, + // but we manually enable neon when a feature only implicitly enables fp + ("aarch64", "f32mm") => smallvec!["f32mm", "neon"], + ("aarch64", "f64mm") => smallvec!["f64mm", "neon"], + ("aarch64", "fhm") => smallvec!["fp16fml", "neon"], + ("aarch64", "fp16") => smallvec!["fullfp16", "neon"], + ("aarch64", "jsconv") => smallvec!["jsconv", "neon"], + ("aarch64", "sve") => smallvec!["sve", "neon"], + ("aarch64", "sve2") => smallvec!["sve2", "neon"], + ("aarch64", "sve2-aes") => smallvec!["sve2-aes", "neon"], + ("aarch64", "sve2-sm4") => smallvec!["sve2-sm4", "neon"], + ("aarch64", "sve2-sha3") => smallvec!["sve2-sha3", "neon"], + ("aarch64", "sve2-bitperm") => smallvec!["sve2-bitperm", "neon"], + (_, s) => smallvec![s], + } +} + +/// Composite function which sets GCC attributes for function depending on its AST (`#[attribute]`) +/// attributes. +pub fn from_fn_attrs<'gcc, 'tcx>( + cx: &CodegenCx<'gcc, 'tcx>, + #[cfg_attr(not(feature="master"), allow(unused_variables))] + func: Function<'gcc>, + instance: ty::Instance<'tcx>, +) { + let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id()); + + let function_features = + codegen_fn_attrs.target_features.iter().map(|features| features.as_str()).collect::>(); + + if let Some(features) = check_tied_features(cx.tcx.sess, &function_features.iter().map(|features| (*features, true)).collect()) { + let span = cx.tcx + .get_attr(instance.def_id(), sym::target_feature) + .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span); + let msg = format!("the target features {} must all be either enabled or disabled together", features.join(", ")); + let mut err = cx.tcx.sess.struct_span_err(span, &msg); + err.help("add the missing features in a `target_feature` attribute"); + err.emit(); + return; + } + + let mut function_features = function_features + .iter() + .flat_map(|feat| to_gcc_features(cx.tcx.sess, feat).into_iter()) + .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x { + InstructionSetAttr::ArmA32 => "-thumb-mode", // TODO(antoyo): support removing feature. + InstructionSetAttr::ArmT32 => "thumb-mode", + })) + .collect::>(); + + // TODO(antoyo): check if we really need global backend features. (Maybe they could be applied + // globally?) + let mut global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str()); + function_features.extend(&mut global_features); + let target_features = function_features.join(","); + if !target_features.is_empty() { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Target(&target_features)); + } +} diff --git a/src/back/write.rs b/src/back/write.rs index efcf18d31eb..5f54ac4ebc6 100644 --- a/src/back/write.rs +++ b/src/back/write.rs @@ -57,6 +57,7 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, _diag_han if env::var("CG_GCCJIT_DUMP_TO_FILE").as_deref() == Ok("1") { let _ = fs::create_dir("/tmp/gccjit_dumps"); let path = &format!("/tmp/gccjit_dumps/{}.c", module.name); + context.set_debug_info(true); context.dump_to_file(path, true); } context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str")); diff --git a/src/base.rs b/src/base.rs index d464bd3d12a..dcd560b3dcd 100644 --- a/src/base.rs +++ b/src/base.rs @@ -8,6 +8,8 @@ use gccjit::{ }; use rustc_middle::dep_graph; use rustc_middle::ty::TyCtxt; +#[cfg(feature="master")] +use rustc_middle::mir::mono::Visibility; use rustc_middle::mir::mono::Linkage; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use rustc_codegen_ssa::base::maybe_create_entry_wrapper; @@ -20,6 +22,15 @@ use crate::GccContext; use crate::builder::Builder; use crate::context::CodegenCx; +#[cfg(feature="master")] +pub fn visibility_to_gcc(linkage: Visibility) -> gccjit::Visibility { + match linkage { + Visibility::Default => gccjit::Visibility::Default, + Visibility::Hidden => gccjit::Visibility::Hidden, + Visibility::Protected => gccjit::Visibility::Protected, + } +} + pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind { match linkage { Linkage::External => GlobalKind::Imported, @@ -76,16 +87,34 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i // Instantiate monomorphizations without filling out definitions yet... //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str()); let context = Context::default(); + + context.add_command_line_option("-fexceptions"); + context.add_driver_option("-fexceptions"); + // TODO(antoyo): only set on x86 platforms. context.add_command_line_option("-masm=intel"); // TODO(antoyo): only add the following cli argument if the feature is supported. context.add_command_line_option("-msse2"); context.add_command_line_option("-mavx2"); - context.add_command_line_option("-msha"); - context.add_command_line_option("-mpclmul"); // FIXME(antoyo): the following causes an illegal instruction on vmovdqu64 in std_example on my CPU. // Only add if the CPU supports it. - //context.add_command_line_option("-mavx512f"); + context.add_command_line_option("-msha"); + context.add_command_line_option("-mpclmul"); + context.add_command_line_option("-mfma"); + context.add_command_line_option("-mfma4"); + context.add_command_line_option("-m64"); + context.add_command_line_option("-mbmi"); + context.add_command_line_option("-mgfni"); + //context.add_command_line_option("-mavxvnni"); // The CI doesn't support this option. + context.add_command_line_option("-mf16c"); + context.add_command_line_option("-maes"); + context.add_command_line_option("-mxsavec"); + context.add_command_line_option("-mbmi2"); + context.add_command_line_option("-mrtm"); + context.add_command_line_option("-mvaes"); + context.add_command_line_option("-mvpclmulqdq"); + context.add_command_line_option("-mavx"); + for arg in &tcx.sess.opts.cg.llvm_args { context.add_command_line_option(arg); } @@ -95,12 +124,20 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i context.add_command_line_option("-fno-semantic-interposition"); // NOTE: Rust relies on LLVM not doing TBAA (https://github.com/rust-lang/unsafe-code-guidelines/issues/292). context.add_command_line_option("-fno-strict-aliasing"); + // NOTE: Rust relies on LLVM doing wrapping on overflow. + context.add_command_line_option("-fwrapv"); if tcx.sess.opts.unstable_opts.function_sections.unwrap_or(tcx.sess.target.function_sections) { context.add_command_line_option("-ffunction-sections"); context.add_command_line_option("-fdata-sections"); } + if env::var("CG_GCCJIT_DUMP_RTL").as_deref() == Ok("1") { + context.add_command_line_option("-fdump-rtl-vregs"); + } + if env::var("CG_GCCJIT_DUMP_TREE_ALL").as_deref() == Ok("1") { + context.add_command_line_option("-fdump-tree-all"); + } if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") { context.set_dump_code_on_compile(true); } @@ -115,7 +152,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i context.set_keep_intermediates(true); } - // TODO(bjorn3): Remove once unwinding is properly implemented + // NOTE: The codegen generates unrechable blocks. context.set_allow_unreachable_blocks(true); { diff --git a/src/builder.rs b/src/builder.rs index e88c12716ec..a3c8142bea2 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -217,7 +217,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let actual_ty = actual_val.get_type(); if expected_ty != actual_ty { - if !actual_ty.is_vector() && !expected_ty.is_vector() && actual_ty.is_integral() && expected_ty.is_integral() && actual_ty.get_size() != expected_ty.get_size() { + if !actual_ty.is_vector() && !expected_ty.is_vector() && (actual_ty.is_integral() && expected_ty.is_integral()) || (actual_ty.get_pointee().is_some() && expected_ty.get_pointee().is_some()) { self.context.new_cast(None, actual_val, expected_ty) } else if on_stack_param_indices.contains(&index) { @@ -226,6 +226,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { else { assert!(!((actual_ty.is_vector() && !expected_ty.is_vector()) || (!actual_ty.is_vector() && expected_ty.is_vector())), "{:?} ({}) -> {:?} ({}), index: {:?}[{}]", actual_ty, actual_ty.is_vector(), expected_ty, expected_ty.is_vector(), func_ptr, index); // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. + // TODO: remove bitcast now that vector types can be compared? self.bitcast(actual_val, expected_ty) } } @@ -279,21 +280,30 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { - let args = self.check_ptr_call("call", func_ptr, args); + let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr"); + let func_name = format!("{:?}", func_ptr); + let previous_arg_count = args.len(); + let orig_args = args; + let args = { + let function_address_names = self.function_address_names.borrow(); + let original_function_name = function_address_names.get(&func_ptr); + llvm::adjust_intrinsic_arguments(&self, gcc_func, args.into(), &func_name, original_function_name) + }; + let args_adjusted = args.len() != previous_arg_count; + let args = self.check_ptr_call("call", func_ptr, &*args); // gccjit requires to use the result of functions, even when it's not used. // That's why we assign the result to a local or call add_eval(). - let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr"); let return_type = gcc_func.get_return_type(); let void_type = self.context.new_type::<()>(); let current_func = self.block.get_function(); if return_type != void_type { unsafe { RETURN_VALUE_COUNT += 1 }; - let result = current_func.new_local(None, return_type, &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT })); - let func_name = format!("{:?}", func_ptr); - let args = llvm::adjust_intrinsic_arguments(&self, gcc_func, args, &func_name); - self.block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args)); + let return_value = self.cx.context.new_call_through_ptr(None, func_ptr, &args); + let return_value = llvm::adjust_intrinsic_return_value(&self, return_value, &func_name, &args, args_adjusted, orig_args); + let result = current_func.new_local(None, return_value.get_type(), &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT })); + self.block.add_assignment(None, result, return_value); result.to_rvalue() } else { @@ -366,10 +376,10 @@ impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { } } -impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> { +impl<'a, 'gcc, 'tcx> Deref for Builder<'a, 'gcc, 'tcx> { type Target = CodegenCx<'gcc, 'tcx>; - fn deref(&self) -> &Self::Target { + fn deref<'b>(&'b self) -> &'a Self::Target { self.cx } } @@ -387,7 +397,7 @@ impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> { } impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { - fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self { + fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Builder<'a, 'gcc, 'tcx> { Builder::with_cx(cx, block) } @@ -444,17 +454,36 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.block.end_with_switch(None, value, default_block, &gcc_cases); } - fn invoke( - &mut self, - typ: Type<'gcc>, - fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, - func: RValue<'gcc>, - args: &[RValue<'gcc>], - then: Block<'gcc>, - catch: Block<'gcc>, - _funclet: Option<&Funclet>, - ) -> RValue<'gcc> { - // TODO(bjorn3): Properly implement unwinding. + #[cfg(feature="master")] + fn invoke(&mut self, typ: Type<'gcc>, _fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { + let try_block = self.current_func().new_block("try"); + + let current_block = self.block.clone(); + self.block = try_block; + let call = self.call(typ, None, func, args, None); // TODO(antoyo): use funclet here? + self.block = current_block; + + let return_value = self.current_func() + .new_local(None, call.get_type(), "invokeResult"); + + try_block.add_assignment(None, return_value, call); + + try_block.end_with_jump(None, then); + + if self.cleanup_blocks.borrow().contains(&catch) { + self.block.add_try_finally(None, try_block, catch); + } + else { + self.block.add_try_catch(None, try_block, catch); + } + + self.block.end_with_jump(None, then); + + return_value.to_rvalue() + } + + #[cfg(not(feature="master"))] + fn invoke(&mut self, typ: Type<'gcc>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { let call_site = self.call(typ, None, func, args, None); let condition = self.context.new_rvalue_from_int(self.bool_type, 1); self.llbb().end_with_conditional(None, condition, then, catch); @@ -542,6 +571,31 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): add check in libgccjit since using the binary operator % causes the following error: + // during RTL pass: expand + // libgccjit.so: error: in expmed_mode_index, at expmed.h:240 + // 0x7f0101d58dc6 expmed_mode_index + // ../../../gcc/gcc/expmed.h:240 + // 0x7f0101d58e35 expmed_op_cost_ptr + // ../../../gcc/gcc/expmed.h:262 + // 0x7f0101d594a1 sdiv_cost_ptr + // ../../../gcc/gcc/expmed.h:531 + // 0x7f0101d594f3 sdiv_cost + // ../../../gcc/gcc/expmed.h:549 + // 0x7f0101d6af7e expand_divmod(int, tree_code, machine_mode, rtx_def*, rtx_def*, rtx_def*, int, optab_methods) + // ../../../gcc/gcc/expmed.cc:4356 + // 0x7f0101d94f9e expand_expr_divmod + // ../../../gcc/gcc/expr.cc:8929 + // 0x7f0101d97a26 expand_expr_real_2(separate_ops*, rtx_def*, machine_mode, expand_modifier) + // ../../../gcc/gcc/expr.cc:9566 + // 0x7f0101bef6ef expand_gimple_stmt_1 + // ../../../gcc/gcc/cfgexpand.cc:3967 + // 0x7f0101bef910 expand_gimple_stmt + // ../../../gcc/gcc/cfgexpand.cc:4028 + // 0x7f0101bf6ee7 expand_gimple_basic_block + // ../../../gcc/gcc/cfgexpand.cc:6069 + // 0x7f0101bf9194 execute + // ../../../gcc/gcc/cfgexpand.cc:6795 if a.get_type().is_compatible_with(self.cx.float_type) { let fmodf = self.context.get_builtin_function("fmodf"); // FIXME(antoyo): this seems to produce the wrong result. @@ -616,24 +670,29 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { a * b } - fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { - unimplemented!(); + fn fadd_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs + rhs } - fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { - unimplemented!(); + fn fsub_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs - rhs } - fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { - unimplemented!(); + fn fmul_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs * rhs } - fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { - unimplemented!(); + fn fdiv_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs / rhs } - fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { - unimplemented!(); + fn frem_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + self.frem(lhs, rhs) } fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) { @@ -722,7 +781,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } else if place.layout.is_gcc_immediate() { let load = self.load( - place.layout.gcc_type(self, false), + place.layout.gcc_type(self), place.llval, place.align, ); @@ -733,7 +792,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { let b_offset = a.size(self).align_to(b.align(self).abi); - let pair_type = place.layout.gcc_type(self, false); + let pair_type = place.layout.gcc_type(self); let mut load = |i, scalar: &abi::Scalar, align| { let llptr = self.struct_gep(pair_type, place.llval, i as u64); @@ -833,26 +892,31 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { - let mut result = ptr; + let ptr_type = ptr.get_type(); + let mut pointee_type = ptr.get_type(); + // NOTE: we cannot use array indexing here like in inbounds_gep because array indexing is + // always considered in bounds in GCC (TODO(antoyo): to be verified). + // So, we have to cast to a number. + let mut result = self.context.new_bitcast(None, ptr, self.sizet_type); + // FIXME(antoyo): if there were more than 1 index, this code is probably wrong and would + // require dereferencing the pointer. for index in indices { - result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue(); + pointee_type = pointee_type.get_pointee().expect("pointee type"); + let pointee_size = self.context.new_rvalue_from_int(index.get_type(), pointee_type.get_size() as i32); + result = result + self.gcc_int_cast(*index * pointee_size, self.sizet_type); } - result + self.context.new_bitcast(None, result, ptr_type) } fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { - // FIXME(antoyo): would be safer if doing the same thing (loop) as gep. - // TODO(antoyo): specify inbounds somehow. - match indices.len() { - 1 => { - self.context.new_array_access(None, ptr, indices[0]).get_address(None) - }, - 2 => { - let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0? - self.context.new_array_access(None, array, indices[1]).get_address(None) - }, - _ => unimplemented!(), + // NOTE: array indexing is always considered in bounds in GCC (TODO(antoyo): to be verified). + let mut indices = indices.into_iter(); + let index = indices.next().expect("first index in inbounds_gep"); + let mut result = self.context.new_array_access(None, ptr, *index); + for index in indices { + result = self.context.new_array_access(None, result, *index); } + result.get_address(None) } fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> { @@ -1034,8 +1098,19 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { unimplemented!(); } - fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> { - unimplemented!(); + #[cfg(feature="master")] + fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> { + self.context.new_vector_access(None, vec, idx).to_rvalue() + } + + #[cfg(not(feature="master"))] + fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = vec.get_type().unqualified().dyncast_vector().expect("Called extract_element on a non-vector type"); + let element_type = vector_type.get_element_type(); + let vec_num_units = vector_type.get_num_units(); + let array_type = self.context.new_array_type(None, element_type, vec_num_units as u64); + let array = self.context.new_bitcast(None, vec, array_type).to_rvalue(); + self.context.new_array_access(None, array, idx).to_rvalue() } fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> { @@ -1116,22 +1191,52 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn set_personality_fn(&mut self, _personality: RValue<'gcc>) { - // TODO(antoyo) + #[cfg(feature="master")] + { + let personality = self.rvalue_as_function(_personality); + self.current_func().set_personality_function(personality); + } + } + + #[cfg(feature="master")] + fn cleanup_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) { + self.set_personality_fn(pers_fn); + + // NOTE: insert the current block in a variable so that a later call to invoke knows to + // generate a try/finally instead of a try/catch for this block. + self.cleanup_blocks.borrow_mut().insert(self.block); + + let eh_pointer_builtin = self.cx.context.get_target_builtin_function("__builtin_eh_pointer"); + let zero = self.cx.context.new_rvalue_zero(self.int_type); + let ptr = self.cx.context.new_call(None, eh_pointer_builtin, &[zero]); + + let value1_type = self.u8_type.make_pointer(); + let ptr = self.cx.context.new_cast(None, ptr, value1_type); + let value1 = ptr; + let value2 = zero; // TODO(antoyo): set the proper value here (the type of exception?). + + (value1, value2) } + #[cfg(not(feature="master"))] fn cleanup_landing_pad(&mut self, _pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) { - ( - self.current_func().new_local(None, self.u8_type.make_pointer(), "landing_pad0") - .to_rvalue(), - self.current_func().new_local(None, self.i32_type, "landing_pad1").to_rvalue(), - ) - // TODO(antoyo): Properly implement unwinding. - // the above is just to make the compilation work as it seems - // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort. + let value1 = self.current_func().new_local(None, self.u8_type.make_pointer(), "landing_pad0") + .to_rvalue(); + let value2 = self.current_func().new_local(None, self.i32_type, "landing_pad1").to_rvalue(); + (value1, value2) + } + + #[cfg(feature="master")] + fn resume(&mut self, exn0: RValue<'gcc>, _exn1: RValue<'gcc>) { + let exn_type = exn0.get_type(); + let exn = self.context.new_cast(None, exn0, exn_type); + let unwind_resume = self.context.get_target_builtin_function("__builtin_unwind_resume"); + self.llbb().add_eval(None, self.context.new_call(None, unwind_resume, &[exn])); + self.unreachable(); } + #[cfg(not(feature="master"))] fn resume(&mut self, _exn0: RValue<'gcc>, _exn1: RValue<'gcc>) { - // TODO(bjorn3): Properly implement unwinding. self.unreachable(); } @@ -1160,6 +1265,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> { let expected = self.current_func().new_local(None, cmp.get_type(), "expected"); self.llbb().add_assignment(None, expected, cmp); + // NOTE: gcc doesn't support a failure memory model that is stronger than the success + // memory model. + let order = + if failure_order as i32 > order as i32 { + failure_order + } + else { + order + }; let success = self.compare_exchange(dst, expected, src, order, failure_order, weak); let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false); @@ -1469,7 +1583,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { #[cfg(feature="master")] pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> { - let struct_type = mask.get_type().is_struct().expect("mask of struct type"); + let struct_type = mask.get_type().is_struct().expect("mask should be of struct type"); // TODO(antoyo): use a recursive unqualified() here. let vector_type = v1.get_type().unqualified().dyncast_vector().expect("vector type"); @@ -1501,22 +1615,17 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { vector_elements.push(self.context.new_rvalue_zero(mask_element_type)); } - let array_type = self.context.new_array_type(None, element_type, vec_num_units as i32); let result_type = self.context.new_vector_type(element_type, mask_num_units as u64); let (v1, v2) = if vec_num_units < mask_num_units { // NOTE: the mask needs to be the same length as the input vectors, so join the 2 // vectors and create a dummy second vector. - // TODO(antoyo): switch to using new_vector_access. - let array = self.context.new_bitcast(None, v1, array_type); let mut elements = vec![]; for i in 0..vec_num_units { - elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); + elements.push(self.context.new_vector_access(None, v1, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); } - // TODO(antoyo): switch to using new_vector_access. - let array = self.context.new_bitcast(None, v2, array_type); for i in 0..(mask_num_units - vec_num_units) { - elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); + elements.push(self.context.new_vector_access(None, v2, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); } let v1 = self.context.new_rvalue_from_vector(None, result_type, &elements); let zero = self.context.new_rvalue_zero(element_type); @@ -1536,10 +1645,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // NOTE: if padding was added, only select the number of elements of the masks to // remove that padding in the result. let mut elements = vec![]; - // TODO(antoyo): switch to using new_vector_access. - let array = self.context.new_bitcast(None, result, array_type); for i in 0..mask_num_units { - elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); + elements.push(self.context.new_vector_access(None, result, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); } self.context.new_rvalue_from_vector(None, result_type, &elements) } @@ -1558,18 +1665,20 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc> { let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_type = vector_type.get_element_type(); + let mask_element_type = self.type_ix(element_type.get_size() as u64 * 8); let element_count = vector_type.get_num_units(); let mut vector_elements = vec![]; for i in 0..element_count { vector_elements.push(i); } - let mask_type = self.context.new_vector_type(self.int_type, element_count as u64); + let mask_type = self.context.new_vector_type(mask_element_type, element_count as u64); let mut shift = 1; let mut res = src; while shift < element_count { let vector_elements: Vec<_> = vector_elements.iter() - .map(|i| self.context.new_rvalue_from_int(self.int_type, ((i + shift) % element_count) as i32)) + .map(|i| self.context.new_rvalue_from_int(mask_element_type, ((i + shift) % element_count) as i32)) .collect(); let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements); let shifted = self.context.new_rvalue_vector_perm(None, res, res, mask); @@ -1581,7 +1690,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } #[cfg(not(feature="master"))] - pub fn vector_reduce(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc> + pub fn vector_reduce(&mut self, _src: RValue<'gcc>, _op: F) -> RValue<'gcc> where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc> { unimplemented!(); @@ -1595,15 +1704,47 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { unimplemented!(); } + #[cfg(feature="master")] + pub fn vector_reduce_fadd(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_count = vector_type.get_num_units(); + (0..element_count).into_iter() + .map(|i| self.context + .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _)) + .to_rvalue()) + .fold(acc, |x, i| x + i) + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce_fadd(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + pub fn vector_reduce_fmul_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); } + #[cfg(feature="master")] + pub fn vector_reduce_fmul(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_count = vector_type.get_num_units(); + (0..element_count).into_iter() + .map(|i| self.context + .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _)) + .to_rvalue()) + .fold(acc, |x, i| x * i) + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce_fmul(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!() + } + // Inspired by Hacker's Delight min implementation. pub fn vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc> { self.vector_reduce(src, |a, b, context| { let differences_or_zeros = difference_or_zero(a, b, context); - context.new_binary_op(None, BinaryOp::Minus, a.get_type(), a, differences_or_zeros) + context.new_binary_op(None, BinaryOp::Plus, b.get_type(), b, differences_or_zeros) }) } @@ -1611,38 +1752,148 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { pub fn vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc> { self.vector_reduce(src, |a, b, context| { let differences_or_zeros = difference_or_zero(a, b, context); - context.new_binary_op(None, BinaryOp::Plus, b.get_type(), b, differences_or_zeros) + context.new_binary_op(None, BinaryOp::Minus, a.get_type(), a, differences_or_zeros) }) } + fn vector_extremum(&mut self, a: RValue<'gcc>, b: RValue<'gcc>, direction: ExtremumOperation) -> RValue<'gcc> { + let vector_type = a.get_type(); + + // mask out the NaNs in b and replace them with the corresponding lane in a, so when a and + // b get compared & spliced together, we get the numeric values instead of NaNs. + let b_nan_mask = self.context.new_comparison(None, ComparisonOp::NotEquals, b, b); + let mask_type = b_nan_mask.get_type(); + let b_nan_mask_inverted = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, mask_type, b_nan_mask); + let a_cast = self.context.new_bitcast(None, a, mask_type); + let b_cast = self.context.new_bitcast(None, b, mask_type); + let res = (b_nan_mask & a_cast) | (b_nan_mask_inverted & b_cast); + let b = self.context.new_bitcast(None, res, vector_type); + + // now do the actual comparison + let comparison_op = match direction { + ExtremumOperation::Min => ComparisonOp::LessThan, + ExtremumOperation::Max => ComparisonOp::GreaterThan, + }; + let cmp = self.context.new_comparison(None, comparison_op, a, b); + let cmp_inverted = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, cmp.get_type(), cmp); + let res = (cmp & a_cast) | (cmp_inverted & res); + self.context.new_bitcast(None, res, vector_type) + } + + pub fn vector_fmin(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.vector_extremum(a, b, ExtremumOperation::Min) + } + + #[cfg(feature="master")] + pub fn vector_reduce_fmin(&mut self, src: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_count = vector_type.get_num_units(); + let mut acc = self.context.new_vector_access(None, src, self.context.new_rvalue_zero(self.int_type)).to_rvalue(); + for i in 1..element_count { + let elem = self.context + .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _)) + .to_rvalue(); + let cmp = self.context.new_comparison(None, ComparisonOp::LessThan, acc, elem); + acc = self.select(cmp, acc, elem); + } + acc + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce_fmin(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + + pub fn vector_fmax(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.vector_extremum(a, b, ExtremumOperation::Max) + } + + #[cfg(feature="master")] + pub fn vector_reduce_fmax(&mut self, src: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_count = vector_type.get_num_units(); + let mut acc = self.context.new_vector_access(None, src, self.context.new_rvalue_zero(self.int_type)).to_rvalue(); + for i in 1..element_count { + let elem = self.context + .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _)) + .to_rvalue(); + let cmp = self.context.new_comparison(None, ComparisonOp::GreaterThan, acc, elem); + acc = self.select(cmp, acc, elem); + } + acc + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce_fmax(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + pub fn vector_select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, else_val: RValue<'gcc>) -> RValue<'gcc> { // cond is a vector of integers, not of bools. - let cond_type = cond.get_type(); - let vector_type = cond_type.unqualified().dyncast_vector().expect("vector type"); + let vector_type = cond.get_type().unqualified().dyncast_vector().expect("vector type"); let num_units = vector_type.get_num_units(); let element_type = vector_type.get_element_type(); + + #[cfg(feature="master")] + let (cond, element_type) = { + let then_val_vector_type = then_val.get_type().dyncast_vector().expect("vector type"); + let then_val_element_type = then_val_vector_type.get_element_type(); + let then_val_element_size = then_val_element_type.get_size(); + + // NOTE: the mask needs to be of the same size as the other arguments in order for the & + // operation to work. + if then_val_element_size != element_type.get_size() { + let new_element_type = self.type_ix(then_val_element_size as u64 * 8); + let new_vector_type = self.context.new_vector_type(new_element_type, num_units as u64); + let cond = self.context.convert_vector(None, cond, new_vector_type); + (cond, new_element_type) + } + else { + (cond, element_type) + } + }; + + let cond_type = cond.get_type(); + let zeros = vec![self.context.new_rvalue_zero(element_type); num_units]; let zeros = self.context.new_rvalue_from_vector(None, cond_type, &zeros); + let result_type = then_val.get_type(); + let masks = self.context.new_comparison(None, ComparisonOp::NotEquals, cond, zeros); + // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make + // the & operation work. + let then_val = self.bitcast_if_needed(then_val, masks.get_type()); let then_vals = masks & then_val; - let ones = vec![self.context.new_rvalue_one(element_type); num_units]; - let ones = self.context.new_rvalue_from_vector(None, cond_type, &ones); - let inverted_masks = masks + ones; + let minus_ones = vec![self.context.new_rvalue_from_int(element_type, -1); num_units]; + let minus_ones = self.context.new_rvalue_from_vector(None, cond_type, &minus_ones); + let inverted_masks = masks ^ minus_ones; // NOTE: sometimes, the type of else_val can be different than the type of then_val in // libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND // operation to work. + // TODO: remove bitcast now that vector types can be compared? let else_val = self.context.new_bitcast(None, else_val, then_val.get_type()); let else_vals = inverted_masks & else_val; - then_vals | else_vals + let res = then_vals | else_vals; + self.bitcast_if_needed(res, result_type) } } fn difference_or_zero<'gcc>(a: RValue<'gcc>, b: RValue<'gcc>, context: &'gcc Context<'gcc>) -> RValue<'gcc> { let difference = a - b; let masks = context.new_comparison(None, ComparisonOp::GreaterThanEquals, b, a); + // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make + // the & operation work. + let a_type = a.get_type(); + let masks = + if masks.get_type() != a_type { + context.new_bitcast(None, masks, a_type) + } + else { + masks + }; difference & masks } diff --git a/src/callee.rs b/src/callee.rs index 9e3a22ee05d..ba1e8656208 100644 --- a/src/callee.rs +++ b/src/callee.rs @@ -1,9 +1,10 @@ -use gccjit::{FunctionType, RValue}; -use rustc_codegen_ssa::traits::BaseTypeMethods; +#[cfg(feature="master")] +use gccjit::{FnAttribute, Visibility}; +use gccjit::{FunctionType, Function}; use rustc_middle::ty::{self, Instance, TypeVisitableExt}; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; -use crate::abi::FnAbiGccExt; +use crate::attributes; use crate::context::CodegenCx; /// Codegens a reference to a fn/method item, monomorphizing and @@ -13,22 +14,26 @@ use crate::context::CodegenCx; /// /// - `cx`: the crate context /// - `instance`: the instance to be instantiated -pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> RValue<'gcc> { +pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> Function<'gcc> { let tcx = cx.tcx(); assert!(!instance.substs.needs_infer()); assert!(!instance.substs.has_escaping_bound_vars()); + let sym = tcx.symbol_name(instance).name; + if let Some(&func) = cx.function_instances.borrow().get(&instance) { return func; } - let sym = tcx.symbol_name(instance).name; - let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); let func = - if let Some(func) = cx.get_declared_value(&sym) { + if let Some(_func) = cx.get_declared_value(&sym) { + // FIXME(antoyo): we never reach this because get_declared_value only returns global variables + // and here we try to get a function. + unreachable!(); + /* // Create a fn pointer with the new signature. let ptrty = fn_abi.ptr_to_gcc_type(cx); @@ -61,13 +66,105 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) } else { func - } + }*/ } else { cx.linkage.set(FunctionType::Extern); let func = cx.declare_fn(&sym, &fn_abi); + attributes::from_fn_attrs(cx, func, instance); + + let instance_def_id = instance.def_id(); + // TODO(antoyo): set linkage and attributes. + + // Apply an appropriate linkage/visibility value to our item that we + // just declared. + // + // This is sort of subtle. Inside our codegen unit we started off + // compilation by predefining all our own `MonoItem` instances. That + // is, everything we're codegenning ourselves is already defined. That + // means that anything we're actually codegenning in this codegen unit + // will have hit the above branch in `get_declared_value`. As a result, + // we're guaranteed here that we're declaring a symbol that won't get + // defined, or in other words we're referencing a value from another + // codegen unit or even another crate. + // + // So because this is a foreign value we blanket apply an external + // linkage directive because it's coming from a different object file. + // The visibility here is where it gets tricky. This symbol could be + // referencing some foreign crate or foreign library (an `extern` + // block) in which case we want to leave the default visibility. We may + // also, though, have multiple codegen units. It could be a + // monomorphization, in which case its expected visibility depends on + // whether we are sharing generics or not. The important thing here is + // that the visibility we apply to the declaration is the same one that + // has been applied to the definition (wherever that definition may be). + let is_generic = instance.substs.non_erasable_generics().next().is_some(); + + if is_generic { + // This is a monomorphization. Its expected visibility depends + // on whether we are in share-generics mode. + + if cx.tcx.sess.opts.share_generics() { + // We are in share_generics mode. + + if let Some(instance_def_id) = instance_def_id.as_local() { + // This is a definition from the current crate. If the + // definition is unreachable for downstream crates or + // the current crate does not re-export generics, the + // definition of the instance will have been declared + // as `hidden`. + if cx.tcx.is_unreachable_local_definition(instance_def_id) + || !cx.tcx.local_crate_exports_generics() + { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } else { + // This is a monomorphization of a generic function + // defined in an upstream crate. + if instance.upstream_monomorphization(tcx).is_some() { + // This is instantiated in another crate. It cannot + // be `hidden`. + } else { + // This is a local instantiation of an upstream definition. + // If the current crate does not re-export it + // (because it is a C library or an executable), it + // will have been declared `hidden`. + if !cx.tcx.local_crate_exports_generics() { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } + } + } else { + // When not sharing generics, all instances are in the same + // crate and have hidden visibility + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } else { + // This is a non-generic function + if cx.tcx.is_codegened_item(instance_def_id) { + // This is a function that is instantiated in the local crate + + if instance_def_id.is_local() { + // This is function that is defined in the local crate. + // If it is not reachable, it is hidden. + if !cx.tcx.is_reachable_non_generic(instance_def_id) { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } else { + // This is a function from an upstream crate that has + // been instantiated here. These are always hidden. + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } + } + func }; diff --git a/src/common.rs b/src/common.rs index c939da9cec3..76fc7bd222e 100644 --- a/src/common.rs +++ b/src/common.rs @@ -36,7 +36,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> { let context = &cx.context; let byte_type = context.new_type::(); - let typ = context.new_array_type(None, byte_type, bytes.len() as i32); + let typ = context.new_array_type(None, byte_type, bytes.len() as u64); let elements: Vec<_> = bytes.iter() .map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32)) @@ -115,8 +115,8 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { self.const_uint(self.usize_type, i) } - fn const_u8(&self, _i: u8) -> RValue<'gcc> { - unimplemented!(); + fn const_u8(&self, i: u8) -> RValue<'gcc> { + self.const_uint(self.type_u8(), i as u64) } fn const_real(&self, typ: Type<'gcc>, val: f64) -> RValue<'gcc> { @@ -133,7 +133,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { .1; let len = s.len(); let cs = self.const_ptrcast(str_global.get_address(None), - self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self, true)), + self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self)), ); (cs, self.const_usize(len as u64)) } @@ -174,8 +174,18 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } let value = self.const_uint_big(self.type_ix(bitsize), data); - // TODO(bjorn3): assert size is correct - self.const_bitcast(value, ty) + let bytesize = layout.size(self).bytes(); + if bitsize > 1 && ty.is_integral() && bytesize as u32 == ty.get_size() { + // NOTE: since the intrinsic _xabort is called with a bitcast, which + // is non-const, but expects a constant, do a normal cast instead of a bitcast. + // FIXME(antoyo): fix bitcast to work in constant contexts. + // TODO(antoyo): perhaps only use bitcast for pointers? + self.context.new_cast(None, value, ty) + } + else { + // TODO(bjorn3): assert size is correct + self.const_bitcast(value, ty) + } } Scalar::Ptr(ptr, _size) => { let (alloc_id, offset) = ptr.into_parts(); @@ -227,11 +237,11 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn from_const_alloc(&self, layout: TyAndLayout<'tcx>, alloc: ConstAllocation<'tcx>, offset: Size) -> PlaceRef<'tcx, RValue<'gcc>> { assert_eq!(alloc.inner().align, layout.align.abi); - let ty = self.type_ptr_to(layout.gcc_type(self, true)); + let ty = self.type_ptr_to(layout.gcc_type(self)); let value = if layout.size == Size::ZERO { let value = self.const_usize(alloc.inner().align.bytes()); - self.context.new_cast(None, value, ty) + self.const_bitcast(value, ty) } else { let init = const_alloc_to_gcc(self, alloc); diff --git a/src/consts.rs b/src/consts.rs index dc41cb761b5..792ab8f890d 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -1,8 +1,8 @@ -use gccjit::{GlobalKind, LValue, RValue, ToRValue, Type}; +#[cfg(feature = "master")] +use gccjit::FnAttribute; +use gccjit::{Function, GlobalKind, LValue, RValue, ToRValue, Type}; use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods}; -use rustc_hir as hir; -use rustc_hir::Node; -use rustc_middle::{bug, span_bug}; +use rustc_middle::span_bug; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; use rustc_middle::mir::mono::MonoItem; use rustc_middle::ty::{self, Instance, Ty}; @@ -13,6 +13,7 @@ use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRan use crate::base; use crate::context::CodegenCx; +use crate::errors::InvalidMinimumAlignment; use crate::type_of::LayoutGccExt; impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { @@ -30,6 +31,21 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } +fn set_global_alignment<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, gv: LValue<'gcc>, mut align: Align) { + // The target may require greater alignment for globals than the type does. + // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, + // which can force it to be smaller. Rust doesn't support this yet. + if let Some(min) = cx.sess().target.min_global_align { + match Align::from_bits(min) { + Ok(min) => align = align.max(min), + Err(err) => { + cx.sess().emit_err(InvalidMinimumAlignment { err }); + } + } + } + gv.set_alignment(align.bytes() as i32); +} + impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> { // TODO(antoyo): implement a proper rvalue comparison in libgccjit instead of doing the @@ -79,9 +95,9 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { let instance = Instance::mono(self.tcx, def_id); let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); - let gcc_type = self.layout_of(ty).gcc_type(self, true); + let gcc_type = self.layout_of(ty).gcc_type(self); - // TODO(antoyo): set alignment. + set_global_alignment(self, global, self.align_of(ty)); let value = self.bitcast_if_needed(value, gcc_type); global.global_set_initializer_rvalue(value); @@ -158,12 +174,19 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { // TODO(antoyo) } - fn add_compiler_used_global(&self, _global: RValue<'gcc>) { - // TODO(antoyo) + fn add_compiler_used_global(&self, global: RValue<'gcc>) { + // NOTE: seems like GCC does not make the distinction between compiler.used and used. + self.add_used_global(global); } } impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + #[cfg_attr(not(feature="master"), allow(unused_variables))] + pub fn add_used_function(&self, function: Function<'gcc>) { + #[cfg(feature = "master")] + function.add_attribute(FnAttribute::Used); + } + pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> { let global = match kind { @@ -208,82 +231,59 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let sym = self.tcx.symbol_name(instance).name; let global = - if let Some(def_id) = def_id.as_local() { - let id = self.tcx.hir().local_def_id_to_hir_id(def_id); - let llty = self.layout_of(ty).gcc_type(self, true); - // FIXME: refactor this to work without accessing the HIR - let global = match self.tcx.hir().get(id) { - Node::Item(&hir::Item { span, kind: hir::ItemKind::Static(..), .. }) => { - if let Some(global) = self.get_declared_value(&sym) { - if self.val_ty(global) != self.type_ptr_to(llty) { - span_bug!(span, "Conflicting types for static"); - } - } - - let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); - let global = self.declare_global( - &sym, - llty, - GlobalKind::Exported, - is_tls, - fn_attrs.link_section, - ); - - if !self.tcx.is_reachable_non_generic(def_id) { - // TODO(antoyo): set visibility. - } - - global - } - - Node::ForeignItem(&hir::ForeignItem { - span: _, - kind: hir::ForeignItemKind::Static(..), - .. - }) => { - let fn_attrs = self.tcx.codegen_fn_attrs(def_id); - check_and_apply_linkage(&self, &fn_attrs, ty, sym) - } - - item => bug!("get_static: expected static, found {:?}", item), - }; + if def_id.is_local() && !self.tcx.is_foreign_item(def_id) { + let llty = self.layout_of(ty).gcc_type(self); + if let Some(global) = self.get_declared_value(sym) { + if self.val_ty(global) != self.type_ptr_to(llty) { + span_bug!(self.tcx.def_span(def_id), "Conflicting types for static"); + } + } - global + let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); + let global = self.declare_global( + &sym, + llty, + GlobalKind::Exported, + is_tls, + fn_attrs.link_section, + ); + + if !self.tcx.is_reachable_non_generic(def_id) { + // TODO(antoyo): set visibility. } - else { - // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? - //debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id)); - - let attrs = self.tcx.codegen_fn_attrs(def_id); - let global = check_and_apply_linkage(&self, &attrs, ty, sym); - - let needs_dll_storage_attr = false; // TODO(antoyo) - - // If this assertion triggers, there's something wrong with commandline - // argument validation. - debug_assert!( - !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() - && self.tcx.sess.target.options.is_like_msvc - && self.tcx.sess.opts.cg.prefer_dynamic) - ); - - if needs_dll_storage_attr { - // This item is external but not foreign, i.e., it originates from an external Rust - // crate. Since we don't know whether this crate will be linked dynamically or - // statically in the final application, we always mark such symbols as 'dllimport'. - // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs - // to make things work. - // - // However, in some scenarios we defer emission of statics to downstream - // crates, so there are cases where a static with an upstream DefId - // is actually present in the current crate. We can find out via the - // is_codegened_item query. - if !self.tcx.is_codegened_item(def_id) { - unimplemented!(); - } + + global + } else { + check_and_apply_linkage(&self, &fn_attrs, ty, sym) + }; + + if !def_id.is_local() { + let needs_dll_storage_attr = false; // TODO(antoyo) + + // If this assertion triggers, there's something wrong with commandline + // argument validation. + debug_assert!( + !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() + && self.tcx.sess.target.options.is_like_msvc + && self.tcx.sess.opts.cg.prefer_dynamic) + ); + + if needs_dll_storage_attr { + // This item is external but not foreign, i.e., it originates from an external Rust + // crate. Since we don't know whether this crate will be linked dynamically or + // statically in the final application, we always mark such symbols as 'dllimport'. + // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs + // to make things work. + // + // However, in some scenarios we defer emission of statics to downstream + // crates, so there are cases where a static with an upstream DefId + // is actually present in the current crate. We can find out via the + // is_codegened_item query. + if !self.tcx.is_codegened_item(def_id) { + unimplemented!(); } - global - }; + } + } // TODO(antoyo): set dll storage class. @@ -357,7 +357,7 @@ pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str) -> LValue<'gcc> { let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); - let llty = cx.layout_of(ty).gcc_type(cx, true); + let gcc_type = cx.layout_of(ty).gcc_type(cx); if let Some(linkage) = attrs.import_linkage { // Declare a symbol `foo` with the desired linkage. let global1 = cx.declare_global_with_linkage(&sym, cx.type_i8(), base::global_linkage_to_gcc(linkage)); @@ -370,9 +370,10 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg // zero. let mut real_name = "_rust_extern_with_linkage_".to_string(); real_name.push_str(&sym); - let global2 = cx.define_global(&real_name, llty, is_tls, attrs.link_section); + let global2 = cx.define_global(&real_name, gcc_type, is_tls, attrs.link_section); // TODO(antoyo): set linkage. - global2.global_set_initializer_rvalue(global1.get_address(None)); + let value = cx.const_ptrcast(global1.get_address(None), gcc_type); + global2.global_set_initializer_rvalue(value); // TODO(antoyo): use global_set_initializer() when it will work. global2 } @@ -386,6 +387,6 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg // don't do this then linker errors can be generated where the linker // complains that one object files has a thread local version of the // symbol and another one doesn't. - cx.declare_global(&sym, llty, GlobalKind::Imported, is_tls, attrs.link_section) + cx.declare_global(&sym, gcc_type, GlobalKind::Imported, is_tls, attrs.link_section) } } diff --git a/src/context.rs b/src/context.rs index 457006319af..661681bdb50 100644 --- a/src/context.rs +++ b/src/context.rs @@ -1,9 +1,10 @@ use std::cell::{Cell, RefCell}; -use gccjit::{Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Struct, Type}; +use gccjit::{Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Type}; use rustc_codegen_ssa::base::wants_msvc_seh; use rustc_codegen_ssa::traits::{ BackendTypes, + BaseTypeMethods, MiscMethods, }; use rustc_data_structures::base_n; @@ -11,7 +12,7 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_middle::span_bug; use rustc_middle::mir::mono::CodegenUnit; use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt}; -use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers}; +use rustc_middle::ty::layout::{FnAbiError, FnAbiOf, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers}; use rustc_session::Session; use rustc_span::{Span, source_map::respan}; use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx}; @@ -33,6 +34,7 @@ pub struct CodegenCx<'gcc, 'tcx> { // TODO(bjorn3): Can this field be removed? pub current_func: RefCell>>, pub normal_function_addresses: RefCell>>, + pub function_address_names: RefCell, String>>, pub functions: RefCell>>, pub intrinsics: RefCell>>, @@ -78,12 +80,10 @@ pub struct CodegenCx<'gcc, 'tcx> { pub struct_types: RefCell>, Type<'gcc>>>, - pub types_with_fields_to_set: RefCell, (Struct<'gcc>, TyAndLayout<'tcx>)>>, - /// Cache instances of monomorphic and polymorphic items pub instances: RefCell, LValue<'gcc>>>, /// Cache function instances of monomorphic and polymorphic items - pub function_instances: RefCell, RValue<'gcc>>>, + pub function_instances: RefCell, Function<'gcc>>>, /// Cache generated vtables pub vtables: RefCell, Option>), RValue<'gcc>>>, @@ -110,6 +110,7 @@ pub struct CodegenCx<'gcc, 'tcx> { local_gen_sym_counter: Cell, eh_personality: Cell>>, + pub rust_try_fn: Cell, Function<'gcc>)>>, pub pointee_infos: RefCell, Size), Option>>, @@ -119,6 +120,8 @@ pub struct CodegenCx<'gcc, 'tcx> { /// they can be dereferenced later. /// FIXME(antoyo): fix the rustc API to avoid having this hack. pub structs_as_pointer: RefCell>>, + + pub cleanup_blocks: RefCell>>, } impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { @@ -194,6 +197,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { context, current_func: RefCell::new(None), normal_function_addresses: Default::default(), + function_address_names: Default::default(), functions: RefCell::new(functions), intrinsics: RefCell::new(FxHashMap::default()), @@ -243,11 +247,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { types: Default::default(), tcx, struct_types: Default::default(), - types_with_fields_to_set: Default::default(), local_gen_sym_counter: Cell::new(0), eh_personality: Cell::new(None), + rust_try_fn: Cell::new(None), pointee_infos: Default::default(), structs_as_pointer: Default::default(), + cleanup_blocks: Default::default(), } } @@ -327,8 +332,9 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn get_fn(&self, instance: Instance<'tcx>) -> RValue<'gcc> { let func = get_fn(self, instance); - *self.current_func.borrow_mut() = Some(self.rvalue_as_function(func)); - func + *self.current_func.borrow_mut() = Some(func); + // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. + unsafe { std::mem::transmute(func) } } fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> { @@ -339,8 +345,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { self.intrinsics.borrow()[func_name].clone() } else { - let func = get_fn(self, instance); - self.rvalue_as_function(func) + get_fn(self, instance) }; let ptr = func.get_address(None); @@ -348,6 +353,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { // FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI). self.normal_function_addresses.borrow_mut().insert(ptr); + self.function_address_names.borrow_mut().insert(ptr, func_name.to_string()); ptr } @@ -377,31 +383,40 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { return llpersonality; } let tcx = self.tcx; - let llfn = match tcx.lang_items().eh_personality() { - Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr( - ty::Instance::resolve( - tcx, - ty::ParamEnv::reveal_all(), - def_id, - ty::List::empty(), - ) - .unwrap().unwrap(), - ), - _ => { - let _name = if wants_msvc_seh(self.sess()) { - "__CxxFrameHandler3" - } else { - "rust_eh_personality" - }; - //let func = self.declare_func(name, self.type_i32(), &[], true); - // FIXME(antoyo): this hack should not be needed. That will probably be removed when - // unwinding support is added. - self.context.new_rvalue_from_int(self.int_type, 0) - } - }; + let func = + match tcx.lang_items().eh_personality() { + Some(def_id) if !wants_msvc_seh(self.sess()) => { + let instance = + ty::Instance::resolve( + tcx, + ty::ParamEnv::reveal_all(), + def_id, + ty::List::empty(), + ) + .unwrap().unwrap(); + + let symbol_name = tcx.symbol_name(instance).name; + let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); + self.linkage.set(FunctionType::Extern); + let func = self.declare_fn(symbol_name, &fn_abi); + let func: RValue<'gcc> = unsafe { std::mem::transmute(func) }; + func + }, + _ => { + let name = + if wants_msvc_seh(self.sess()) { + "__CxxFrameHandler3" + } + else { + "rust_eh_personality" + }; + let func = self.declare_func(name, self.type_i32(), &[], true); + unsafe { std::mem::transmute(func) } + } + }; // TODO(antoyo): apply target cpu attributes. - self.eh_personality.set(Some(llfn)); - llfn + self.eh_personality.set(Some(func)); + func } fn sess(&self) -> &Session { diff --git a/src/declare.rs b/src/declare.rs index eae77508c97..4748e7e4be2 100644 --- a/src/declare.rs +++ b/src/declare.rs @@ -38,12 +38,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { global } - /*pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> { - self.linkage.set(FunctionType::Exported); - let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic); - // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. - unsafe { std::mem::transmute(func) } - }*/ + pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> Function<'gcc> { + self.linkage.set(FunctionType::Extern); + declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic) + } pub fn declare_global(&self, name: &str, ty: Type<'gcc>, global_kind: GlobalKind, is_tls: bool, link_section: Option) -> LValue<'gcc> { let global = self.context.new_global(None, global_kind, ty, name); @@ -79,12 +77,11 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { unsafe { std::mem::transmute(func) } } - pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> RValue<'gcc> { + pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Function<'gcc> { let (return_type, params, variadic, on_stack_param_indices) = fn_abi.gcc_type(self); let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, ¶ms, variadic); self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices); - // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. - unsafe { std::mem::transmute(func) } + func } pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> LValue<'gcc> { diff --git a/src/errors.rs b/src/errors.rs index d0ba7e24791..5ea39606c08 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -221,3 +221,9 @@ pub(crate) struct UnwindingInlineAsm { #[primary_span] pub span: Span, } + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_minimum_alignment)] +pub(crate) struct InvalidMinimumAlignment { + pub err: String, +} diff --git a/src/int.rs b/src/int.rs index 0c5dab00466..0cf1204791d 100644 --- a/src/int.rs +++ b/src/int.rs @@ -389,18 +389,22 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { }; self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit)) } + else if a_type.get_pointee().is_some() && b_type.get_pointee().is_some() { + // NOTE: gcc cannot compare pointers to different objects, but rustc does that, so cast them to usize. + lhs = self.context.new_bitcast(None, lhs, self.usize_type); + rhs = self.context.new_bitcast(None, rhs, self.usize_type); + self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs) + } else { - let left_type = lhs.get_type(); - let right_type = rhs.get_type(); - if left_type != right_type { + if a_type != b_type { // NOTE: because libgccjit cannot compare function pointers. - if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() { + if a_type.dyncast_function_ptr_type().is_some() && b_type.dyncast_function_ptr_type().is_some() { lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer()); rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer()); } // NOTE: hack because we try to cast a vector type to the same vector type. - else if format!("{:?}", left_type) != format!("{:?}", right_type) { - rhs = self.context.new_cast(None, rhs, left_type); + else if format!("{:?}", a_type) != format!("{:?}", b_type) { + rhs = self.context.new_cast(None, rhs, a_type); } } self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs) diff --git a/src/intrinsic/archs.rs b/src/intrinsic/archs.rs index fb6c38fa072..8a4559355ea 100644 --- a/src/intrinsic/archs.rs +++ b/src/intrinsic/archs.rs @@ -34,6 +34,7 @@ match name { "llvm.aarch64.dmb" => "__builtin_arm_dmb", "llvm.aarch64.dsb" => "__builtin_arm_dsb", "llvm.aarch64.isb" => "__builtin_arm_isb", + "llvm.aarch64.prefetch" => "__builtin_arm_prefetch", "llvm.aarch64.sve.aesd" => "__builtin_sve_svaesd_u8", "llvm.aarch64.sve.aese" => "__builtin_sve_svaese_u8", "llvm.aarch64.sve.aesimc" => "__builtin_sve_svaesimc_u8", @@ -58,13 +59,22 @@ match name { "llvm.amdgcn.cubema" => "__builtin_amdgcn_cubema", "llvm.amdgcn.cubesc" => "__builtin_amdgcn_cubesc", "llvm.amdgcn.cubetc" => "__builtin_amdgcn_cubetc", + "llvm.amdgcn.cvt.f32.bf8" => "__builtin_amdgcn_cvt_f32_bf8", + "llvm.amdgcn.cvt.f32.fp8" => "__builtin_amdgcn_cvt_f32_fp8", + "llvm.amdgcn.cvt.pk.bf8.f32" => "__builtin_amdgcn_cvt_pk_bf8_f32", + "llvm.amdgcn.cvt.pk.f32.bf8" => "__builtin_amdgcn_cvt_pk_f32_bf8", + "llvm.amdgcn.cvt.pk.f32.fp8" => "__builtin_amdgcn_cvt_pk_f32_fp8", + "llvm.amdgcn.cvt.pk.fp8.f32" => "__builtin_amdgcn_cvt_pk_fp8_f32", "llvm.amdgcn.cvt.pk.i16" => "__builtin_amdgcn_cvt_pk_i16", "llvm.amdgcn.cvt.pk.u16" => "__builtin_amdgcn_cvt_pk_u16", "llvm.amdgcn.cvt.pk.u8.f32" => "__builtin_amdgcn_cvt_pk_u8_f32", "llvm.amdgcn.cvt.pknorm.i16" => "__builtin_amdgcn_cvt_pknorm_i16", "llvm.amdgcn.cvt.pknorm.u16" => "__builtin_amdgcn_cvt_pknorm_u16", "llvm.amdgcn.cvt.pkrtz" => "__builtin_amdgcn_cvt_pkrtz", + "llvm.amdgcn.cvt.sr.bf8.f32" => "__builtin_amdgcn_cvt_sr_bf8_f32", + "llvm.amdgcn.cvt.sr.fp8.f32" => "__builtin_amdgcn_cvt_sr_fp8_f32", "llvm.amdgcn.dispatch.id" => "__builtin_amdgcn_dispatch_id", + "llvm.amdgcn.ds.add.gs.reg.rtn" => "__builtin_amdgcn_ds_add_gs_reg_rtn", "llvm.amdgcn.ds.bpermute" => "__builtin_amdgcn_ds_bpermute", "llvm.amdgcn.ds.fadd.v2bf16" => "__builtin_amdgcn_ds_atomic_fadd_v2bf16", "llvm.amdgcn.ds.gws.barrier" => "__builtin_amdgcn_ds_gws_barrier", @@ -74,12 +84,16 @@ match name { "llvm.amdgcn.ds.gws.sema.release.all" => "__builtin_amdgcn_ds_gws_sema_release_all", "llvm.amdgcn.ds.gws.sema.v" => "__builtin_amdgcn_ds_gws_sema_v", "llvm.amdgcn.ds.permute" => "__builtin_amdgcn_ds_permute", + "llvm.amdgcn.ds.sub.gs.reg.rtn" => "__builtin_amdgcn_ds_sub_gs_reg_rtn", "llvm.amdgcn.ds.swizzle" => "__builtin_amdgcn_ds_swizzle", "llvm.amdgcn.endpgm" => "__builtin_amdgcn_endpgm", "llvm.amdgcn.fdot2" => "__builtin_amdgcn_fdot2", - "llvm.amdgcn.fmed3" => "__builtin_amdgcn_fmed3", + "llvm.amdgcn.fdot2.bf16.bf16" => "__builtin_amdgcn_fdot2_bf16_bf16", + "llvm.amdgcn.fdot2.f16.f16" => "__builtin_amdgcn_fdot2_f16_f16", + "llvm.amdgcn.fdot2.f32.bf16" => "__builtin_amdgcn_fdot2_f32_bf16", "llvm.amdgcn.fmul.legacy" => "__builtin_amdgcn_fmul_legacy", "llvm.amdgcn.groupstaticsize" => "__builtin_amdgcn_groupstaticsize", + "llvm.amdgcn.iglp.opt" => "__builtin_amdgcn_iglp_opt", "llvm.amdgcn.implicit.buffer.ptr" => "__builtin_amdgcn_implicit_buffer_ptr", "llvm.amdgcn.implicitarg.ptr" => "__builtin_amdgcn_implicitarg_ptr", "llvm.amdgcn.interp.mov" => "__builtin_amdgcn_interp_mov", @@ -93,11 +107,51 @@ match name { "llvm.amdgcn.lerp" => "__builtin_amdgcn_lerp", "llvm.amdgcn.mbcnt.hi" => "__builtin_amdgcn_mbcnt_hi", "llvm.amdgcn.mbcnt.lo" => "__builtin_amdgcn_mbcnt_lo", + "llvm.amdgcn.mfma.f32.16x16x16bf16.1k" => "__builtin_amdgcn_mfma_f32_16x16x16bf16_1k", + "llvm.amdgcn.mfma.f32.16x16x16f16" => "__builtin_amdgcn_mfma_f32_16x16x16f16", + "llvm.amdgcn.mfma.f32.16x16x1f32" => "__builtin_amdgcn_mfma_f32_16x16x1f32", + "llvm.amdgcn.mfma.f32.16x16x2bf16" => "__builtin_amdgcn_mfma_f32_16x16x2bf16", + "llvm.amdgcn.mfma.f32.16x16x32.bf8.bf8" => "__builtin_amdgcn_mfma_f32_16x16x32_bf8_bf8", + "llvm.amdgcn.mfma.f32.16x16x32.bf8.fp8" => "__builtin_amdgcn_mfma_f32_16x16x32_bf8_fp8", + "llvm.amdgcn.mfma.f32.16x16x32.fp8.bf8" => "__builtin_amdgcn_mfma_f32_16x16x32_fp8_bf8", + "llvm.amdgcn.mfma.f32.16x16x32.fp8.fp8" => "__builtin_amdgcn_mfma_f32_16x16x32_fp8_fp8", + "llvm.amdgcn.mfma.f32.16x16x4bf16.1k" => "__builtin_amdgcn_mfma_f32_16x16x4bf16_1k", + "llvm.amdgcn.mfma.f32.16x16x4f16" => "__builtin_amdgcn_mfma_f32_16x16x4f16", + "llvm.amdgcn.mfma.f32.16x16x4f32" => "__builtin_amdgcn_mfma_f32_16x16x4f32", + "llvm.amdgcn.mfma.f32.16x16x8.xf32" => "__builtin_amdgcn_mfma_f32_16x16x8_xf32", + "llvm.amdgcn.mfma.f32.16x16x8bf16" => "__builtin_amdgcn_mfma_f32_16x16x8bf16", + "llvm.amdgcn.mfma.f32.32x32x16.bf8.bf8" => "__builtin_amdgcn_mfma_f32_32x32x16_bf8_bf8", + "llvm.amdgcn.mfma.f32.32x32x16.bf8.fp8" => "__builtin_amdgcn_mfma_f32_32x32x16_bf8_fp8", + "llvm.amdgcn.mfma.f32.32x32x16.fp8.bf8" => "__builtin_amdgcn_mfma_f32_32x32x16_fp8_bf8", + "llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8" => "__builtin_amdgcn_mfma_f32_32x32x16_fp8_fp8", + "llvm.amdgcn.mfma.f32.32x32x1f32" => "__builtin_amdgcn_mfma_f32_32x32x1f32", + "llvm.amdgcn.mfma.f32.32x32x2bf16" => "__builtin_amdgcn_mfma_f32_32x32x2bf16", + "llvm.amdgcn.mfma.f32.32x32x2f32" => "__builtin_amdgcn_mfma_f32_32x32x2f32", + "llvm.amdgcn.mfma.f32.32x32x4.xf32" => "__builtin_amdgcn_mfma_f32_32x32x4_xf32", + "llvm.amdgcn.mfma.f32.32x32x4bf16" => "__builtin_amdgcn_mfma_f32_32x32x4bf16", + "llvm.amdgcn.mfma.f32.32x32x4bf16.1k" => "__builtin_amdgcn_mfma_f32_32x32x4bf16_1k", + "llvm.amdgcn.mfma.f32.32x32x4f16" => "__builtin_amdgcn_mfma_f32_32x32x4f16", + "llvm.amdgcn.mfma.f32.32x32x8bf16.1k" => "__builtin_amdgcn_mfma_f32_32x32x8bf16_1k", + "llvm.amdgcn.mfma.f32.32x32x8f16" => "__builtin_amdgcn_mfma_f32_32x32x8f16", + "llvm.amdgcn.mfma.f32.4x4x1f32" => "__builtin_amdgcn_mfma_f32_4x4x1f32", + "llvm.amdgcn.mfma.f32.4x4x2bf16" => "__builtin_amdgcn_mfma_f32_4x4x2bf16", + "llvm.amdgcn.mfma.f32.4x4x4bf16.1k" => "__builtin_amdgcn_mfma_f32_4x4x4bf16_1k", + "llvm.amdgcn.mfma.f32.4x4x4f16" => "__builtin_amdgcn_mfma_f32_4x4x4f16", + "llvm.amdgcn.mfma.f64.16x16x4f64" => "__builtin_amdgcn_mfma_f64_16x16x4f64", + "llvm.amdgcn.mfma.f64.4x4x4f64" => "__builtin_amdgcn_mfma_f64_4x4x4f64", + "llvm.amdgcn.mfma.i32.16x16x16i8" => "__builtin_amdgcn_mfma_i32_16x16x16i8", + "llvm.amdgcn.mfma.i32.16x16x32.i8" => "__builtin_amdgcn_mfma_i32_16x16x32_i8", + "llvm.amdgcn.mfma.i32.16x16x4i8" => "__builtin_amdgcn_mfma_i32_16x16x4i8", + "llvm.amdgcn.mfma.i32.32x32x16.i8" => "__builtin_amdgcn_mfma_i32_32x32x16_i8", + "llvm.amdgcn.mfma.i32.32x32x4i8" => "__builtin_amdgcn_mfma_i32_32x32x4i8", + "llvm.amdgcn.mfma.i32.32x32x8i8" => "__builtin_amdgcn_mfma_i32_32x32x8i8", + "llvm.amdgcn.mfma.i32.4x4x4i8" => "__builtin_amdgcn_mfma_i32_4x4x4i8", "llvm.amdgcn.mqsad.pk.u16.u8" => "__builtin_amdgcn_mqsad_pk_u16_u8", "llvm.amdgcn.mqsad.u32.u8" => "__builtin_amdgcn_mqsad_u32_u8", "llvm.amdgcn.msad.u8" => "__builtin_amdgcn_msad_u8", "llvm.amdgcn.perm" => "__builtin_amdgcn_perm", "llvm.amdgcn.permlane16" => "__builtin_amdgcn_permlane16", + "llvm.amdgcn.permlane64" => "__builtin_amdgcn_permlane64", "llvm.amdgcn.permlanex16" => "__builtin_amdgcn_permlanex16", "llvm.amdgcn.qsad.pk.u16.u8" => "__builtin_amdgcn_qsad_pk_u16_u8", "llvm.amdgcn.queue.ptr" => "__builtin_amdgcn_queue_ptr", @@ -122,19 +176,40 @@ match name { "llvm.amdgcn.s.setprio" => "__builtin_amdgcn_s_setprio", "llvm.amdgcn.s.setreg" => "__builtin_amdgcn_s_setreg", "llvm.amdgcn.s.sleep" => "__builtin_amdgcn_s_sleep", + "llvm.amdgcn.s.wait.event.export.ready" => "__builtin_amdgcn_s_wait_event_export_ready", "llvm.amdgcn.s.waitcnt" => "__builtin_amdgcn_s_waitcnt", "llvm.amdgcn.sad.hi.u8" => "__builtin_amdgcn_sad_hi_u8", "llvm.amdgcn.sad.u16" => "__builtin_amdgcn_sad_u16", "llvm.amdgcn.sad.u8" => "__builtin_amdgcn_sad_u8", "llvm.amdgcn.sched.barrier" => "__builtin_amdgcn_sched_barrier", + "llvm.amdgcn.sched.group.barrier" => "__builtin_amdgcn_sched_group_barrier", "llvm.amdgcn.sdot2" => "__builtin_amdgcn_sdot2", "llvm.amdgcn.sdot4" => "__builtin_amdgcn_sdot4", "llvm.amdgcn.sdot8" => "__builtin_amdgcn_sdot8", + "llvm.amdgcn.smfmac.f32.16x16x32.bf16" => "__builtin_amdgcn_smfmac_f32_16x16x32_bf16", + "llvm.amdgcn.smfmac.f32.16x16x32.f16" => "__builtin_amdgcn_smfmac_f32_16x16x32_f16", + "llvm.amdgcn.smfmac.f32.16x16x64.bf8.bf8" => "__builtin_amdgcn_smfmac_f32_16x16x64_bf8_bf8", + "llvm.amdgcn.smfmac.f32.16x16x64.bf8.fp8" => "__builtin_amdgcn_smfmac_f32_16x16x64_bf8_fp8", + "llvm.amdgcn.smfmac.f32.16x16x64.fp8.bf8" => "__builtin_amdgcn_smfmac_f32_16x16x64_fp8_bf8", + "llvm.amdgcn.smfmac.f32.16x16x64.fp8.fp8" => "__builtin_amdgcn_smfmac_f32_16x16x64_fp8_fp8", + "llvm.amdgcn.smfmac.f32.32x32x16.bf16" => "__builtin_amdgcn_smfmac_f32_32x32x16_bf16", + "llvm.amdgcn.smfmac.f32.32x32x16.f16" => "__builtin_amdgcn_smfmac_f32_32x32x16_f16", + "llvm.amdgcn.smfmac.f32.32x32x32.bf8.bf8" => "__builtin_amdgcn_smfmac_f32_32x32x32_bf8_bf8", + "llvm.amdgcn.smfmac.f32.32x32x32.bf8.fp8" => "__builtin_amdgcn_smfmac_f32_32x32x32_bf8_fp8", + "llvm.amdgcn.smfmac.f32.32x32x32.fp8.bf8" => "__builtin_amdgcn_smfmac_f32_32x32x32_fp8_bf8", + "llvm.amdgcn.smfmac.f32.32x32x32.fp8.fp8" => "__builtin_amdgcn_smfmac_f32_32x32x32_fp8_fp8", + "llvm.amdgcn.smfmac.i32.16x16x64.i8" => "__builtin_amdgcn_smfmac_i32_16x16x64_i8", + "llvm.amdgcn.smfmac.i32.32x32x32.i8" => "__builtin_amdgcn_smfmac_i32_32x32x32_i8", + "llvm.amdgcn.sudot4" => "__builtin_amdgcn_sudot4", + "llvm.amdgcn.sudot8" => "__builtin_amdgcn_sudot8", "llvm.amdgcn.udot2" => "__builtin_amdgcn_udot2", "llvm.amdgcn.udot4" => "__builtin_amdgcn_udot4", "llvm.amdgcn.udot8" => "__builtin_amdgcn_udot8", "llvm.amdgcn.wave.barrier" => "__builtin_amdgcn_wave_barrier", "llvm.amdgcn.wavefrontsize" => "__builtin_amdgcn_wavefrontsize", + "llvm.amdgcn.workgroup.id.x" => "__builtin_amdgcn_workgroup_id_x", + "llvm.amdgcn.workgroup.id.y" => "__builtin_amdgcn_workgroup_id_y", + "llvm.amdgcn.workgroup.id.z" => "__builtin_amdgcn_workgroup_id_z", "llvm.amdgcn.writelane" => "__builtin_amdgcn_writelane", // arm "llvm.arm.cdp" => "__builtin_arm_cdp", @@ -249,6 +324,8 @@ match name { "llvm.bpf.pseudo" => "__builtin_bpf_pseudo", // cuda "llvm.cuda.syncthreads" => "__syncthreads", + // dx + "llvm.dx.create.handle" => "__builtin_hlsl_create_handle", // hexagon "llvm.hexagon.A2.abs" => "__builtin_HEXAGON_A2_abs", "llvm.hexagon.A2.absp" => "__builtin_HEXAGON_A2_absp", @@ -459,6 +536,11 @@ match name { "llvm.hexagon.A4.vrminuw" => "__builtin_HEXAGON_A4_vrminuw", "llvm.hexagon.A4.vrminw" => "__builtin_HEXAGON_A4_vrminw", "llvm.hexagon.A5.vaddhubs" => "__builtin_HEXAGON_A5_vaddhubs", + "llvm.hexagon.A6.vcmpbeq.notany" => "__builtin_HEXAGON_A6_vcmpbeq_notany", + "llvm.hexagon.A7.clip" => "__builtin_HEXAGON_A7_clip", + "llvm.hexagon.A7.croundd.ri" => "__builtin_HEXAGON_A7_croundd_ri", + "llvm.hexagon.A7.croundd.rr" => "__builtin_HEXAGON_A7_croundd_rr", + "llvm.hexagon.A7.vclip" => "__builtin_HEXAGON_A7_vclip", "llvm.hexagon.C2.all8" => "__builtin_HEXAGON_C2_all8", "llvm.hexagon.C2.and" => "__builtin_HEXAGON_C2_and", "llvm.hexagon.C2.andn" => "__builtin_HEXAGON_C2_andn", @@ -557,6 +639,10 @@ match name { "llvm.hexagon.F2.dfmax" => "__builtin_HEXAGON_F2_dfmax", "llvm.hexagon.F2.dfmin" => "__builtin_HEXAGON_F2_dfmin", "llvm.hexagon.F2.dfmpy" => "__builtin_HEXAGON_F2_dfmpy", + "llvm.hexagon.F2.dfmpyfix" => "__builtin_HEXAGON_F2_dfmpyfix", + "llvm.hexagon.F2.dfmpyhh" => "__builtin_HEXAGON_F2_dfmpyhh", + "llvm.hexagon.F2.dfmpylh" => "__builtin_HEXAGON_F2_dfmpylh", + "llvm.hexagon.F2.dfmpyll" => "__builtin_HEXAGON_F2_dfmpyll", "llvm.hexagon.F2.dfsub" => "__builtin_HEXAGON_F2_dfsub", "llvm.hexagon.F2.sfadd" => "__builtin_HEXAGON_F2_sfadd", "llvm.hexagon.F2.sfclass" => "__builtin_HEXAGON_F2_sfclass", @@ -578,6 +664,8 @@ match name { "llvm.hexagon.F2.sfmin" => "__builtin_HEXAGON_F2_sfmin", "llvm.hexagon.F2.sfmpy" => "__builtin_HEXAGON_F2_sfmpy", "llvm.hexagon.F2.sfsub" => "__builtin_HEXAGON_F2_sfsub", + "llvm.hexagon.L2.loadw.locked" => "__builtin_HEXAGON_L2_loadw_locked", + "llvm.hexagon.L4.loadd.locked" => "__builtin__HEXAGON_L4_loadd_locked", "llvm.hexagon.M2.acci" => "__builtin_HEXAGON_M2_acci", "llvm.hexagon.M2.accii" => "__builtin_HEXAGON_M2_accii", "llvm.hexagon.M2.cmaci.s0" => "__builtin_HEXAGON_M2_cmaci_s0", @@ -646,6 +734,7 @@ match name { "llvm.hexagon.M2.mmpyul.rs1" => "__builtin_HEXAGON_M2_mmpyul_rs1", "llvm.hexagon.M2.mmpyul.s0" => "__builtin_HEXAGON_M2_mmpyul_s0", "llvm.hexagon.M2.mmpyul.s1" => "__builtin_HEXAGON_M2_mmpyul_s1", + "llvm.hexagon.M2.mnaci" => "__builtin_HEXAGON_M2_mnaci", "llvm.hexagon.M2.mpy.acc.hh.s0" => "__builtin_HEXAGON_M2_mpy_acc_hh_s0", "llvm.hexagon.M2.mpy.acc.hh.s1" => "__builtin_HEXAGON_M2_mpy_acc_hh_s1", "llvm.hexagon.M2.mpy.acc.hl.s0" => "__builtin_HEXAGON_M2_mpy_acc_hl_s0", @@ -894,6 +983,24 @@ match name { "llvm.hexagon.M5.vrmpybuu" => "__builtin_HEXAGON_M5_vrmpybuu", "llvm.hexagon.M6.vabsdiffb" => "__builtin_HEXAGON_M6_vabsdiffb", "llvm.hexagon.M6.vabsdiffub" => "__builtin_HEXAGON_M6_vabsdiffub", + "llvm.hexagon.M7.dcmpyiw" => "__builtin_HEXAGON_M7_dcmpyiw", + "llvm.hexagon.M7.dcmpyiw.acc" => "__builtin_HEXAGON_M7_dcmpyiw_acc", + "llvm.hexagon.M7.dcmpyiwc" => "__builtin_HEXAGON_M7_dcmpyiwc", + "llvm.hexagon.M7.dcmpyiwc.acc" => "__builtin_HEXAGON_M7_dcmpyiwc_acc", + "llvm.hexagon.M7.dcmpyrw" => "__builtin_HEXAGON_M7_dcmpyrw", + "llvm.hexagon.M7.dcmpyrw.acc" => "__builtin_HEXAGON_M7_dcmpyrw_acc", + "llvm.hexagon.M7.dcmpyrwc" => "__builtin_HEXAGON_M7_dcmpyrwc", + "llvm.hexagon.M7.dcmpyrwc.acc" => "__builtin_HEXAGON_M7_dcmpyrwc_acc", + "llvm.hexagon.M7.vdmpy" => "__builtin_HEXAGON_M7_vdmpy", + "llvm.hexagon.M7.vdmpy.acc" => "__builtin_HEXAGON_M7_vdmpy_acc", + "llvm.hexagon.M7.wcmpyiw" => "__builtin_HEXAGON_M7_wcmpyiw", + "llvm.hexagon.M7.wcmpyiw.rnd" => "__builtin_HEXAGON_M7_wcmpyiw_rnd", + "llvm.hexagon.M7.wcmpyiwc" => "__builtin_HEXAGON_M7_wcmpyiwc", + "llvm.hexagon.M7.wcmpyiwc.rnd" => "__builtin_HEXAGON_M7_wcmpyiwc_rnd", + "llvm.hexagon.M7.wcmpyrw" => "__builtin_HEXAGON_M7_wcmpyrw", + "llvm.hexagon.M7.wcmpyrw.rnd" => "__builtin_HEXAGON_M7_wcmpyrw_rnd", + "llvm.hexagon.M7.wcmpyrwc" => "__builtin_HEXAGON_M7_wcmpyrwc", + "llvm.hexagon.M7.wcmpyrwc.rnd" => "__builtin_HEXAGON_M7_wcmpyrwc_rnd", "llvm.hexagon.S2.addasl.rrri" => "__builtin_HEXAGON_S2_addasl_rrri", "llvm.hexagon.S2.asl.i.p" => "__builtin_HEXAGON_S2_asl_i_p", "llvm.hexagon.S2.asl.i.p.acc" => "__builtin_HEXAGON_S2_asl_i_p_acc", @@ -1023,6 +1130,7 @@ match name { "llvm.hexagon.S2.lsr.r.r.or" => "__builtin_HEXAGON_S2_lsr_r_r_or", "llvm.hexagon.S2.lsr.r.vh" => "__builtin_HEXAGON_S2_lsr_r_vh", "llvm.hexagon.S2.lsr.r.vw" => "__builtin_HEXAGON_S2_lsr_r_vw", + "llvm.hexagon.S2.mask" => "__builtin_HEXAGON_S2_mask", "llvm.hexagon.S2.packhl" => "__builtin_HEXAGON_S2_packhl", "llvm.hexagon.S2.parityp" => "__builtin_HEXAGON_S2_parityp", "llvm.hexagon.S2.setbit.i" => "__builtin_HEXAGON_S2_setbit_i", @@ -1031,6 +1139,12 @@ match name { "llvm.hexagon.S2.shuffeh" => "__builtin_HEXAGON_S2_shuffeh", "llvm.hexagon.S2.shuffob" => "__builtin_HEXAGON_S2_shuffob", "llvm.hexagon.S2.shuffoh" => "__builtin_HEXAGON_S2_shuffoh", + "llvm.hexagon.S2.storerb.pbr" => "__builtin_brev_stb", + "llvm.hexagon.S2.storerd.pbr" => "__builtin_brev_std", + "llvm.hexagon.S2.storerf.pbr" => "__builtin_brev_sthhi", + "llvm.hexagon.S2.storerh.pbr" => "__builtin_brev_sth", + "llvm.hexagon.S2.storeri.pbr" => "__builtin_brev_stw", + "llvm.hexagon.S2.storew.locked" => "__builtin_HEXAGON_S2_storew_locked", "llvm.hexagon.S2.svsathb" => "__builtin_HEXAGON_S2_svsathb", "llvm.hexagon.S2.svsathub" => "__builtin_HEXAGON_S2_svsathub", "llvm.hexagon.S2.tableidxb.goodsyntax" => "__builtin_HEXAGON_S2_tableidxb_goodsyntax", @@ -1089,6 +1203,7 @@ match name { "llvm.hexagon.S4.ori.asl.ri" => "__builtin_HEXAGON_S4_ori_asl_ri", "llvm.hexagon.S4.ori.lsr.ri" => "__builtin_HEXAGON_S4_ori_lsr_ri", "llvm.hexagon.S4.parity" => "__builtin_HEXAGON_S4_parity", + "llvm.hexagon.S4.stored.locked" => "__builtin_HEXAGON_S4_stored_locked", "llvm.hexagon.S4.subaddi" => "__builtin_HEXAGON_S4_subaddi", "llvm.hexagon.S4.subi.asl.ri" => "__builtin_HEXAGON_S4_subi_asl_ri", "llvm.hexagon.S4.subi.lsr.ri" => "__builtin_HEXAGON_S4_subi_lsr_ri", @@ -1126,8 +1241,56 @@ match name { "llvm.hexagon.V6.hi.128B" => "__builtin_HEXAGON_V6_hi_128B", "llvm.hexagon.V6.lo" => "__builtin_HEXAGON_V6_lo", "llvm.hexagon.V6.lo.128B" => "__builtin_HEXAGON_V6_lo_128B", + "llvm.hexagon.V6.lvsplatb" => "__builtin_HEXAGON_V6_lvsplatb", + "llvm.hexagon.V6.lvsplatb.128B" => "__builtin_HEXAGON_V6_lvsplatb_128B", + "llvm.hexagon.V6.lvsplath" => "__builtin_HEXAGON_V6_lvsplath", + "llvm.hexagon.V6.lvsplath.128B" => "__builtin_HEXAGON_V6_lvsplath_128B", "llvm.hexagon.V6.lvsplatw" => "__builtin_HEXAGON_V6_lvsplatw", "llvm.hexagon.V6.lvsplatw.128B" => "__builtin_HEXAGON_V6_lvsplatw_128B", + "llvm.hexagon.V6.pred.and" => "__builtin_HEXAGON_V6_pred_and", + "llvm.hexagon.V6.pred.and.128B" => "__builtin_HEXAGON_V6_pred_and_128B", + "llvm.hexagon.V6.pred.and.n" => "__builtin_HEXAGON_V6_pred_and_n", + "llvm.hexagon.V6.pred.and.n.128B" => "__builtin_HEXAGON_V6_pred_and_n_128B", + "llvm.hexagon.V6.pred.not" => "__builtin_HEXAGON_V6_pred_not", + "llvm.hexagon.V6.pred.not.128B" => "__builtin_HEXAGON_V6_pred_not_128B", + "llvm.hexagon.V6.pred.or" => "__builtin_HEXAGON_V6_pred_or", + "llvm.hexagon.V6.pred.or.128B" => "__builtin_HEXAGON_V6_pred_or_128B", + "llvm.hexagon.V6.pred.or.n" => "__builtin_HEXAGON_V6_pred_or_n", + "llvm.hexagon.V6.pred.or.n.128B" => "__builtin_HEXAGON_V6_pred_or_n_128B", + "llvm.hexagon.V6.pred.scalar2" => "__builtin_HEXAGON_V6_pred_scalar2", + "llvm.hexagon.V6.pred.scalar2.128B" => "__builtin_HEXAGON_V6_pred_scalar2_128B", + "llvm.hexagon.V6.pred.scalar2v2" => "__builtin_HEXAGON_V6_pred_scalar2v2", + "llvm.hexagon.V6.pred.scalar2v2.128B" => "__builtin_HEXAGON_V6_pred_scalar2v2_128B", + "llvm.hexagon.V6.pred.xor" => "__builtin_HEXAGON_V6_pred_xor", + "llvm.hexagon.V6.pred.xor.128B" => "__builtin_HEXAGON_V6_pred_xor_128B", + "llvm.hexagon.V6.shuffeqh" => "__builtin_HEXAGON_V6_shuffeqh", + "llvm.hexagon.V6.shuffeqh.128B" => "__builtin_HEXAGON_V6_shuffeqh_128B", + "llvm.hexagon.V6.shuffeqw" => "__builtin_HEXAGON_V6_shuffeqw", + "llvm.hexagon.V6.shuffeqw.128B" => "__builtin_HEXAGON_V6_shuffeqw_128B", + "llvm.hexagon.V6.v6mpyhubs10" => "__builtin_HEXAGON_V6_v6mpyhubs10", + "llvm.hexagon.V6.v6mpyhubs10.128B" => "__builtin_HEXAGON_V6_v6mpyhubs10_128B", + "llvm.hexagon.V6.v6mpyhubs10.vxx" => "__builtin_HEXAGON_V6_v6mpyhubs10_vxx", + "llvm.hexagon.V6.v6mpyhubs10.vxx.128B" => "__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B", + "llvm.hexagon.V6.v6mpyvubs10" => "__builtin_HEXAGON_V6_v6mpyvubs10", + "llvm.hexagon.V6.v6mpyvubs10.128B" => "__builtin_HEXAGON_V6_v6mpyvubs10_128B", + "llvm.hexagon.V6.v6mpyvubs10.vxx" => "__builtin_HEXAGON_V6_v6mpyvubs10_vxx", + "llvm.hexagon.V6.v6mpyvubs10.vxx.128B" => "__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B", + "llvm.hexagon.V6.vS32b.nqpred.ai" => "__builtin_HEXAGON_V6_vS32b_nqpred_ai", + "llvm.hexagon.V6.vS32b.nqpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_nqpred_ai_128B", + "llvm.hexagon.V6.vS32b.nt.nqpred.ai" => "__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai", + "llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai_128B", + "llvm.hexagon.V6.vS32b.nt.qpred.ai" => "__builtin_HEXAGON_V6_vS32b_nt_qpred_ai", + "llvm.hexagon.V6.vS32b.nt.qpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_nt_qpred_ai_128B", + "llvm.hexagon.V6.vS32b.qpred.ai" => "__builtin_HEXAGON_V6_vS32b_qpred_ai", + "llvm.hexagon.V6.vS32b.qpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_qpred_ai_128B", + "llvm.hexagon.V6.vabs.hf" => "__builtin_HEXAGON_V6_vabs_hf", + "llvm.hexagon.V6.vabs.hf.128B" => "__builtin_HEXAGON_V6_vabs_hf_128B", + "llvm.hexagon.V6.vabs.sf" => "__builtin_HEXAGON_V6_vabs_sf", + "llvm.hexagon.V6.vabs.sf.128B" => "__builtin_HEXAGON_V6_vabs_sf_128B", + "llvm.hexagon.V6.vabsb" => "__builtin_HEXAGON_V6_vabsb", + "llvm.hexagon.V6.vabsb.128B" => "__builtin_HEXAGON_V6_vabsb_128B", + "llvm.hexagon.V6.vabsb.sat" => "__builtin_HEXAGON_V6_vabsb_sat", + "llvm.hexagon.V6.vabsb.sat.128B" => "__builtin_HEXAGON_V6_vabsb_sat_128B", "llvm.hexagon.V6.vabsdiffh" => "__builtin_HEXAGON_V6_vabsdiffh", "llvm.hexagon.V6.vabsdiffh.128B" => "__builtin_HEXAGON_V6_vabsdiffh_128B", "llvm.hexagon.V6.vabsdiffub" => "__builtin_HEXAGON_V6_vabsdiffub", @@ -1144,36 +1307,90 @@ match name { "llvm.hexagon.V6.vabsw.128B" => "__builtin_HEXAGON_V6_vabsw_128B", "llvm.hexagon.V6.vabsw.sat" => "__builtin_HEXAGON_V6_vabsw_sat", "llvm.hexagon.V6.vabsw.sat.128B" => "__builtin_HEXAGON_V6_vabsw_sat_128B", + "llvm.hexagon.V6.vadd.hf" => "__builtin_HEXAGON_V6_vadd_hf", + "llvm.hexagon.V6.vadd.hf.128B" => "__builtin_HEXAGON_V6_vadd_hf_128B", + "llvm.hexagon.V6.vadd.hf.hf" => "__builtin_HEXAGON_V6_vadd_hf_hf", + "llvm.hexagon.V6.vadd.hf.hf.128B" => "__builtin_HEXAGON_V6_vadd_hf_hf_128B", + "llvm.hexagon.V6.vadd.qf16" => "__builtin_HEXAGON_V6_vadd_qf16", + "llvm.hexagon.V6.vadd.qf16.128B" => "__builtin_HEXAGON_V6_vadd_qf16_128B", + "llvm.hexagon.V6.vadd.qf16.mix" => "__builtin_HEXAGON_V6_vadd_qf16_mix", + "llvm.hexagon.V6.vadd.qf16.mix.128B" => "__builtin_HEXAGON_V6_vadd_qf16_mix_128B", + "llvm.hexagon.V6.vadd.qf32" => "__builtin_HEXAGON_V6_vadd_qf32", + "llvm.hexagon.V6.vadd.qf32.128B" => "__builtin_HEXAGON_V6_vadd_qf32_128B", + "llvm.hexagon.V6.vadd.qf32.mix" => "__builtin_HEXAGON_V6_vadd_qf32_mix", + "llvm.hexagon.V6.vadd.qf32.mix.128B" => "__builtin_HEXAGON_V6_vadd_qf32_mix_128B", + "llvm.hexagon.V6.vadd.sf" => "__builtin_HEXAGON_V6_vadd_sf", + "llvm.hexagon.V6.vadd.sf.128B" => "__builtin_HEXAGON_V6_vadd_sf_128B", + "llvm.hexagon.V6.vadd.sf.bf" => "__builtin_HEXAGON_V6_vadd_sf_bf", + "llvm.hexagon.V6.vadd.sf.bf.128B" => "__builtin_HEXAGON_V6_vadd_sf_bf_128B", + "llvm.hexagon.V6.vadd.sf.hf" => "__builtin_HEXAGON_V6_vadd_sf_hf", + "llvm.hexagon.V6.vadd.sf.hf.128B" => "__builtin_HEXAGON_V6_vadd_sf_hf_128B", + "llvm.hexagon.V6.vadd.sf.sf" => "__builtin_HEXAGON_V6_vadd_sf_sf", + "llvm.hexagon.V6.vadd.sf.sf.128B" => "__builtin_HEXAGON_V6_vadd_sf_sf_128B", "llvm.hexagon.V6.vaddb" => "__builtin_HEXAGON_V6_vaddb", "llvm.hexagon.V6.vaddb.128B" => "__builtin_HEXAGON_V6_vaddb_128B", "llvm.hexagon.V6.vaddb.dv" => "__builtin_HEXAGON_V6_vaddb_dv", "llvm.hexagon.V6.vaddb.dv.128B" => "__builtin_HEXAGON_V6_vaddb_dv_128B", + "llvm.hexagon.V6.vaddbnq" => "__builtin_HEXAGON_V6_vaddbnq", + "llvm.hexagon.V6.vaddbnq.128B" => "__builtin_HEXAGON_V6_vaddbnq_128B", + "llvm.hexagon.V6.vaddbq" => "__builtin_HEXAGON_V6_vaddbq", + "llvm.hexagon.V6.vaddbq.128B" => "__builtin_HEXAGON_V6_vaddbq_128B", + "llvm.hexagon.V6.vaddbsat" => "__builtin_HEXAGON_V6_vaddbsat", + "llvm.hexagon.V6.vaddbsat.128B" => "__builtin_HEXAGON_V6_vaddbsat_128B", + "llvm.hexagon.V6.vaddbsat.dv" => "__builtin_HEXAGON_V6_vaddbsat_dv", + "llvm.hexagon.V6.vaddbsat.dv.128B" => "__builtin_HEXAGON_V6_vaddbsat_dv_128B", + "llvm.hexagon.V6.vaddcarrysat" => "__builtin_HEXAGON_V6_vaddcarrysat", + "llvm.hexagon.V6.vaddcarrysat.128B" => "__builtin_HEXAGON_V6_vaddcarrysat_128B", + "llvm.hexagon.V6.vaddclbh" => "__builtin_HEXAGON_V6_vaddclbh", + "llvm.hexagon.V6.vaddclbh.128B" => "__builtin_HEXAGON_V6_vaddclbh_128B", + "llvm.hexagon.V6.vaddclbw" => "__builtin_HEXAGON_V6_vaddclbw", + "llvm.hexagon.V6.vaddclbw.128B" => "__builtin_HEXAGON_V6_vaddclbw_128B", "llvm.hexagon.V6.vaddh" => "__builtin_HEXAGON_V6_vaddh", "llvm.hexagon.V6.vaddh.128B" => "__builtin_HEXAGON_V6_vaddh_128B", "llvm.hexagon.V6.vaddh.dv" => "__builtin_HEXAGON_V6_vaddh_dv", "llvm.hexagon.V6.vaddh.dv.128B" => "__builtin_HEXAGON_V6_vaddh_dv_128B", + "llvm.hexagon.V6.vaddhnq" => "__builtin_HEXAGON_V6_vaddhnq", + "llvm.hexagon.V6.vaddhnq.128B" => "__builtin_HEXAGON_V6_vaddhnq_128B", + "llvm.hexagon.V6.vaddhq" => "__builtin_HEXAGON_V6_vaddhq", + "llvm.hexagon.V6.vaddhq.128B" => "__builtin_HEXAGON_V6_vaddhq_128B", "llvm.hexagon.V6.vaddhsat" => "__builtin_HEXAGON_V6_vaddhsat", "llvm.hexagon.V6.vaddhsat.128B" => "__builtin_HEXAGON_V6_vaddhsat_128B", "llvm.hexagon.V6.vaddhsat.dv" => "__builtin_HEXAGON_V6_vaddhsat_dv", "llvm.hexagon.V6.vaddhsat.dv.128B" => "__builtin_HEXAGON_V6_vaddhsat_dv_128B", "llvm.hexagon.V6.vaddhw" => "__builtin_HEXAGON_V6_vaddhw", "llvm.hexagon.V6.vaddhw.128B" => "__builtin_HEXAGON_V6_vaddhw_128B", + "llvm.hexagon.V6.vaddhw.acc" => "__builtin_HEXAGON_V6_vaddhw_acc", + "llvm.hexagon.V6.vaddhw.acc.128B" => "__builtin_HEXAGON_V6_vaddhw_acc_128B", "llvm.hexagon.V6.vaddubh" => "__builtin_HEXAGON_V6_vaddubh", "llvm.hexagon.V6.vaddubh.128B" => "__builtin_HEXAGON_V6_vaddubh_128B", + "llvm.hexagon.V6.vaddubh.acc" => "__builtin_HEXAGON_V6_vaddubh_acc", + "llvm.hexagon.V6.vaddubh.acc.128B" => "__builtin_HEXAGON_V6_vaddubh_acc_128B", "llvm.hexagon.V6.vaddubsat" => "__builtin_HEXAGON_V6_vaddubsat", "llvm.hexagon.V6.vaddubsat.128B" => "__builtin_HEXAGON_V6_vaddubsat_128B", "llvm.hexagon.V6.vaddubsat.dv" => "__builtin_HEXAGON_V6_vaddubsat_dv", "llvm.hexagon.V6.vaddubsat.dv.128B" => "__builtin_HEXAGON_V6_vaddubsat_dv_128B", + "llvm.hexagon.V6.vaddububb.sat" => "__builtin_HEXAGON_V6_vaddububb_sat", + "llvm.hexagon.V6.vaddububb.sat.128B" => "__builtin_HEXAGON_V6_vaddububb_sat_128B", "llvm.hexagon.V6.vadduhsat" => "__builtin_HEXAGON_V6_vadduhsat", "llvm.hexagon.V6.vadduhsat.128B" => "__builtin_HEXAGON_V6_vadduhsat_128B", "llvm.hexagon.V6.vadduhsat.dv" => "__builtin_HEXAGON_V6_vadduhsat_dv", "llvm.hexagon.V6.vadduhsat.dv.128B" => "__builtin_HEXAGON_V6_vadduhsat_dv_128B", "llvm.hexagon.V6.vadduhw" => "__builtin_HEXAGON_V6_vadduhw", "llvm.hexagon.V6.vadduhw.128B" => "__builtin_HEXAGON_V6_vadduhw_128B", + "llvm.hexagon.V6.vadduhw.acc" => "__builtin_HEXAGON_V6_vadduhw_acc", + "llvm.hexagon.V6.vadduhw.acc.128B" => "__builtin_HEXAGON_V6_vadduhw_acc_128B", + "llvm.hexagon.V6.vadduwsat" => "__builtin_HEXAGON_V6_vadduwsat", + "llvm.hexagon.V6.vadduwsat.128B" => "__builtin_HEXAGON_V6_vadduwsat_128B", + "llvm.hexagon.V6.vadduwsat.dv" => "__builtin_HEXAGON_V6_vadduwsat_dv", + "llvm.hexagon.V6.vadduwsat.dv.128B" => "__builtin_HEXAGON_V6_vadduwsat_dv_128B", "llvm.hexagon.V6.vaddw" => "__builtin_HEXAGON_V6_vaddw", "llvm.hexagon.V6.vaddw.128B" => "__builtin_HEXAGON_V6_vaddw_128B", "llvm.hexagon.V6.vaddw.dv" => "__builtin_HEXAGON_V6_vaddw_dv", "llvm.hexagon.V6.vaddw.dv.128B" => "__builtin_HEXAGON_V6_vaddw_dv_128B", + "llvm.hexagon.V6.vaddwnq" => "__builtin_HEXAGON_V6_vaddwnq", + "llvm.hexagon.V6.vaddwnq.128B" => "__builtin_HEXAGON_V6_vaddwnq_128B", + "llvm.hexagon.V6.vaddwq" => "__builtin_HEXAGON_V6_vaddwq", + "llvm.hexagon.V6.vaddwq.128B" => "__builtin_HEXAGON_V6_vaddwq_128B", "llvm.hexagon.V6.vaddwsat" => "__builtin_HEXAGON_V6_vaddwsat", "llvm.hexagon.V6.vaddwsat.128B" => "__builtin_HEXAGON_V6_vaddwsat_128B", "llvm.hexagon.V6.vaddwsat.dv" => "__builtin_HEXAGON_V6_vaddwsat_dv", @@ -1184,8 +1401,26 @@ match name { "llvm.hexagon.V6.valignbi.128B" => "__builtin_HEXAGON_V6_valignbi_128B", "llvm.hexagon.V6.vand" => "__builtin_HEXAGON_V6_vand", "llvm.hexagon.V6.vand.128B" => "__builtin_HEXAGON_V6_vand_128B", + "llvm.hexagon.V6.vandnqrt" => "__builtin_HEXAGON_V6_vandnqrt", + "llvm.hexagon.V6.vandnqrt.128B" => "__builtin_HEXAGON_V6_vandnqrt_128B", + "llvm.hexagon.V6.vandnqrt.acc" => "__builtin_HEXAGON_V6_vandnqrt_acc", + "llvm.hexagon.V6.vandnqrt.acc.128B" => "__builtin_HEXAGON_V6_vandnqrt_acc_128B", + "llvm.hexagon.V6.vandqrt" => "__builtin_HEXAGON_V6_vandqrt", + "llvm.hexagon.V6.vandqrt.128B" => "__builtin_HEXAGON_V6_vandqrt_128B", + "llvm.hexagon.V6.vandqrt.acc" => "__builtin_HEXAGON_V6_vandqrt_acc", + "llvm.hexagon.V6.vandqrt.acc.128B" => "__builtin_HEXAGON_V6_vandqrt_acc_128B", + "llvm.hexagon.V6.vandvnqv" => "__builtin_HEXAGON_V6_vandvnqv", + "llvm.hexagon.V6.vandvnqv.128B" => "__builtin_HEXAGON_V6_vandvnqv_128B", + "llvm.hexagon.V6.vandvqv" => "__builtin_HEXAGON_V6_vandvqv", + "llvm.hexagon.V6.vandvqv.128B" => "__builtin_HEXAGON_V6_vandvqv_128B", + "llvm.hexagon.V6.vandvrt" => "__builtin_HEXAGON_V6_vandvrt", + "llvm.hexagon.V6.vandvrt.128B" => "__builtin_HEXAGON_V6_vandvrt_128B", + "llvm.hexagon.V6.vandvrt.acc" => "__builtin_HEXAGON_V6_vandvrt_acc", + "llvm.hexagon.V6.vandvrt.acc.128B" => "__builtin_HEXAGON_V6_vandvrt_acc_128B", "llvm.hexagon.V6.vaslh" => "__builtin_HEXAGON_V6_vaslh", "llvm.hexagon.V6.vaslh.128B" => "__builtin_HEXAGON_V6_vaslh_128B", + "llvm.hexagon.V6.vaslh.acc" => "__builtin_HEXAGON_V6_vaslh_acc", + "llvm.hexagon.V6.vaslh.acc.128B" => "__builtin_HEXAGON_V6_vaslh_acc_128B", "llvm.hexagon.V6.vaslhv" => "__builtin_HEXAGON_V6_vaslhv", "llvm.hexagon.V6.vaslhv.128B" => "__builtin_HEXAGON_V6_vaslhv_128B", "llvm.hexagon.V6.vaslw" => "__builtin_HEXAGON_V6_vaslw", @@ -1194,16 +1429,38 @@ match name { "llvm.hexagon.V6.vaslw.acc.128B" => "__builtin_HEXAGON_V6_vaslw_acc_128B", "llvm.hexagon.V6.vaslwv" => "__builtin_HEXAGON_V6_vaslwv", "llvm.hexagon.V6.vaslwv.128B" => "__builtin_HEXAGON_V6_vaslwv_128B", + "llvm.hexagon.V6.vasr.into" => "__builtin_HEXAGON_V6_vasr_into", + "llvm.hexagon.V6.vasr.into.128B" => "__builtin_HEXAGON_V6_vasr_into_128B", "llvm.hexagon.V6.vasrh" => "__builtin_HEXAGON_V6_vasrh", "llvm.hexagon.V6.vasrh.128B" => "__builtin_HEXAGON_V6_vasrh_128B", + "llvm.hexagon.V6.vasrh.acc" => "__builtin_HEXAGON_V6_vasrh_acc", + "llvm.hexagon.V6.vasrh.acc.128B" => "__builtin_HEXAGON_V6_vasrh_acc_128B", "llvm.hexagon.V6.vasrhbrndsat" => "__builtin_HEXAGON_V6_vasrhbrndsat", "llvm.hexagon.V6.vasrhbrndsat.128B" => "__builtin_HEXAGON_V6_vasrhbrndsat_128B", + "llvm.hexagon.V6.vasrhbsat" => "__builtin_HEXAGON_V6_vasrhbsat", + "llvm.hexagon.V6.vasrhbsat.128B" => "__builtin_HEXAGON_V6_vasrhbsat_128B", "llvm.hexagon.V6.vasrhubrndsat" => "__builtin_HEXAGON_V6_vasrhubrndsat", "llvm.hexagon.V6.vasrhubrndsat.128B" => "__builtin_HEXAGON_V6_vasrhubrndsat_128B", "llvm.hexagon.V6.vasrhubsat" => "__builtin_HEXAGON_V6_vasrhubsat", "llvm.hexagon.V6.vasrhubsat.128B" => "__builtin_HEXAGON_V6_vasrhubsat_128B", "llvm.hexagon.V6.vasrhv" => "__builtin_HEXAGON_V6_vasrhv", "llvm.hexagon.V6.vasrhv.128B" => "__builtin_HEXAGON_V6_vasrhv_128B", + "llvm.hexagon.V6.vasruhubrndsat" => "__builtin_HEXAGON_V6_vasruhubrndsat", + "llvm.hexagon.V6.vasruhubrndsat.128B" => "__builtin_HEXAGON_V6_vasruhubrndsat_128B", + "llvm.hexagon.V6.vasruhubsat" => "__builtin_HEXAGON_V6_vasruhubsat", + "llvm.hexagon.V6.vasruhubsat.128B" => "__builtin_HEXAGON_V6_vasruhubsat_128B", + "llvm.hexagon.V6.vasruwuhrndsat" => "__builtin_HEXAGON_V6_vasruwuhrndsat", + "llvm.hexagon.V6.vasruwuhrndsat.128B" => "__builtin_HEXAGON_V6_vasruwuhrndsat_128B", + "llvm.hexagon.V6.vasruwuhsat" => "__builtin_HEXAGON_V6_vasruwuhsat", + "llvm.hexagon.V6.vasruwuhsat.128B" => "__builtin_HEXAGON_V6_vasruwuhsat_128B", + "llvm.hexagon.V6.vasrvuhubrndsat" => "__builtin_HEXAGON_V6_vasrvuhubrndsat", + "llvm.hexagon.V6.vasrvuhubrndsat.128B" => "__builtin_HEXAGON_V6_vasrvuhubrndsat_128B", + "llvm.hexagon.V6.vasrvuhubsat" => "__builtin_HEXAGON_V6_vasrvuhubsat", + "llvm.hexagon.V6.vasrvuhubsat.128B" => "__builtin_HEXAGON_V6_vasrvuhubsat_128B", + "llvm.hexagon.V6.vasrvwuhrndsat" => "__builtin_HEXAGON_V6_vasrvwuhrndsat", + "llvm.hexagon.V6.vasrvwuhrndsat.128B" => "__builtin_HEXAGON_V6_vasrvwuhrndsat_128B", + "llvm.hexagon.V6.vasrvwuhsat" => "__builtin_HEXAGON_V6_vasrvwuhsat", + "llvm.hexagon.V6.vasrvwuhsat.128B" => "__builtin_HEXAGON_V6_vasrvwuhsat_128B", "llvm.hexagon.V6.vasrw" => "__builtin_HEXAGON_V6_vasrw", "llvm.hexagon.V6.vasrw.128B" => "__builtin_HEXAGON_V6_vasrw_128B", "llvm.hexagon.V6.vasrw.acc" => "__builtin_HEXAGON_V6_vasrw_acc", @@ -1214,14 +1471,22 @@ match name { "llvm.hexagon.V6.vasrwhrndsat.128B" => "__builtin_HEXAGON_V6_vasrwhrndsat_128B", "llvm.hexagon.V6.vasrwhsat" => "__builtin_HEXAGON_V6_vasrwhsat", "llvm.hexagon.V6.vasrwhsat.128B" => "__builtin_HEXAGON_V6_vasrwhsat_128B", + "llvm.hexagon.V6.vasrwuhrndsat" => "__builtin_HEXAGON_V6_vasrwuhrndsat", + "llvm.hexagon.V6.vasrwuhrndsat.128B" => "__builtin_HEXAGON_V6_vasrwuhrndsat_128B", "llvm.hexagon.V6.vasrwuhsat" => "__builtin_HEXAGON_V6_vasrwuhsat", "llvm.hexagon.V6.vasrwuhsat.128B" => "__builtin_HEXAGON_V6_vasrwuhsat_128B", "llvm.hexagon.V6.vasrwv" => "__builtin_HEXAGON_V6_vasrwv", "llvm.hexagon.V6.vasrwv.128B" => "__builtin_HEXAGON_V6_vasrwv_128B", "llvm.hexagon.V6.vassign" => "__builtin_HEXAGON_V6_vassign", "llvm.hexagon.V6.vassign.128B" => "__builtin_HEXAGON_V6_vassign_128B", + "llvm.hexagon.V6.vassign.fp" => "__builtin_HEXAGON_V6_vassign_fp", + "llvm.hexagon.V6.vassign.fp.128B" => "__builtin_HEXAGON_V6_vassign_fp_128B", "llvm.hexagon.V6.vassignp" => "__builtin_HEXAGON_V6_vassignp", "llvm.hexagon.V6.vassignp.128B" => "__builtin_HEXAGON_V6_vassignp_128B", + "llvm.hexagon.V6.vavgb" => "__builtin_HEXAGON_V6_vavgb", + "llvm.hexagon.V6.vavgb.128B" => "__builtin_HEXAGON_V6_vavgb_128B", + "llvm.hexagon.V6.vavgbrnd" => "__builtin_HEXAGON_V6_vavgbrnd", + "llvm.hexagon.V6.vavgbrnd.128B" => "__builtin_HEXAGON_V6_vavgbrnd_128B", "llvm.hexagon.V6.vavgh" => "__builtin_HEXAGON_V6_vavgh", "llvm.hexagon.V6.vavgh.128B" => "__builtin_HEXAGON_V6_vavgh_128B", "llvm.hexagon.V6.vavghrnd" => "__builtin_HEXAGON_V6_vavghrnd", @@ -1234,6 +1499,10 @@ match name { "llvm.hexagon.V6.vavguh.128B" => "__builtin_HEXAGON_V6_vavguh_128B", "llvm.hexagon.V6.vavguhrnd" => "__builtin_HEXAGON_V6_vavguhrnd", "llvm.hexagon.V6.vavguhrnd.128B" => "__builtin_HEXAGON_V6_vavguhrnd_128B", + "llvm.hexagon.V6.vavguw" => "__builtin_HEXAGON_V6_vavguw", + "llvm.hexagon.V6.vavguw.128B" => "__builtin_HEXAGON_V6_vavguw_128B", + "llvm.hexagon.V6.vavguwrnd" => "__builtin_HEXAGON_V6_vavguwrnd", + "llvm.hexagon.V6.vavguwrnd.128B" => "__builtin_HEXAGON_V6_vavguwrnd_128B", "llvm.hexagon.V6.vavgw" => "__builtin_HEXAGON_V6_vavgw", "llvm.hexagon.V6.vavgw.128B" => "__builtin_HEXAGON_V6_vavgw_128B", "llvm.hexagon.V6.vavgwrnd" => "__builtin_HEXAGON_V6_vavgwrnd", @@ -1244,8 +1513,46 @@ match name { "llvm.hexagon.V6.vcl0w.128B" => "__builtin_HEXAGON_V6_vcl0w_128B", "llvm.hexagon.V6.vcombine" => "__builtin_HEXAGON_V6_vcombine", "llvm.hexagon.V6.vcombine.128B" => "__builtin_HEXAGON_V6_vcombine_128B", + "llvm.hexagon.V6.vconv.h.hf" => "__builtin_HEXAGON_V6_vconv_h_hf", + "llvm.hexagon.V6.vconv.h.hf.128B" => "__builtin_HEXAGON_V6_vconv_h_hf_128B", + "llvm.hexagon.V6.vconv.hf.h" => "__builtin_HEXAGON_V6_vconv_hf_h", + "llvm.hexagon.V6.vconv.hf.h.128B" => "__builtin_HEXAGON_V6_vconv_hf_h_128B", + "llvm.hexagon.V6.vconv.hf.qf16" => "__builtin_HEXAGON_V6_vconv_hf_qf16", + "llvm.hexagon.V6.vconv.hf.qf16.128B" => "__builtin_HEXAGON_V6_vconv_hf_qf16_128B", + "llvm.hexagon.V6.vconv.hf.qf32" => "__builtin_HEXAGON_V6_vconv_hf_qf32", + "llvm.hexagon.V6.vconv.hf.qf32.128B" => "__builtin_HEXAGON_V6_vconv_hf_qf32_128B", + "llvm.hexagon.V6.vconv.sf.qf32" => "__builtin_HEXAGON_V6_vconv_sf_qf32", + "llvm.hexagon.V6.vconv.sf.qf32.128B" => "__builtin_HEXAGON_V6_vconv_sf_qf32_128B", + "llvm.hexagon.V6.vconv.sf.w" => "__builtin_HEXAGON_V6_vconv_sf_w", + "llvm.hexagon.V6.vconv.sf.w.128B" => "__builtin_HEXAGON_V6_vconv_sf_w_128B", + "llvm.hexagon.V6.vconv.w.sf" => "__builtin_HEXAGON_V6_vconv_w_sf", + "llvm.hexagon.V6.vconv.w.sf.128B" => "__builtin_HEXAGON_V6_vconv_w_sf_128B", + "llvm.hexagon.V6.vcvt.b.hf" => "__builtin_HEXAGON_V6_vcvt_b_hf", + "llvm.hexagon.V6.vcvt.b.hf.128B" => "__builtin_HEXAGON_V6_vcvt_b_hf_128B", + "llvm.hexagon.V6.vcvt.bf.sf" => "__builtin_HEXAGON_V6_vcvt_bf_sf", + "llvm.hexagon.V6.vcvt.bf.sf.128B" => "__builtin_HEXAGON_V6_vcvt_bf_sf_128B", + "llvm.hexagon.V6.vcvt.h.hf" => "__builtin_HEXAGON_V6_vcvt_h_hf", + "llvm.hexagon.V6.vcvt.h.hf.128B" => "__builtin_HEXAGON_V6_vcvt_h_hf_128B", + "llvm.hexagon.V6.vcvt.hf.b" => "__builtin_HEXAGON_V6_vcvt_hf_b", + "llvm.hexagon.V6.vcvt.hf.b.128B" => "__builtin_HEXAGON_V6_vcvt_hf_b_128B", + "llvm.hexagon.V6.vcvt.hf.h" => "__builtin_HEXAGON_V6_vcvt_hf_h", + "llvm.hexagon.V6.vcvt.hf.h.128B" => "__builtin_HEXAGON_V6_vcvt_hf_h_128B", + "llvm.hexagon.V6.vcvt.hf.sf" => "__builtin_HEXAGON_V6_vcvt_hf_sf", + "llvm.hexagon.V6.vcvt.hf.sf.128B" => "__builtin_HEXAGON_V6_vcvt_hf_sf_128B", + "llvm.hexagon.V6.vcvt.hf.ub" => "__builtin_HEXAGON_V6_vcvt_hf_ub", + "llvm.hexagon.V6.vcvt.hf.ub.128B" => "__builtin_HEXAGON_V6_vcvt_hf_ub_128B", + "llvm.hexagon.V6.vcvt.hf.uh" => "__builtin_HEXAGON_V6_vcvt_hf_uh", + "llvm.hexagon.V6.vcvt.hf.uh.128B" => "__builtin_HEXAGON_V6_vcvt_hf_uh_128B", + "llvm.hexagon.V6.vcvt.sf.hf" => "__builtin_HEXAGON_V6_vcvt_sf_hf", + "llvm.hexagon.V6.vcvt.sf.hf.128B" => "__builtin_HEXAGON_V6_vcvt_sf_hf_128B", + "llvm.hexagon.V6.vcvt.ub.hf" => "__builtin_HEXAGON_V6_vcvt_ub_hf", + "llvm.hexagon.V6.vcvt.ub.hf.128B" => "__builtin_HEXAGON_V6_vcvt_ub_hf_128B", + "llvm.hexagon.V6.vcvt.uh.hf" => "__builtin_HEXAGON_V6_vcvt_uh_hf", + "llvm.hexagon.V6.vcvt.uh.hf.128B" => "__builtin_HEXAGON_V6_vcvt_uh_hf_128B", "llvm.hexagon.V6.vd0" => "__builtin_HEXAGON_V6_vd0", "llvm.hexagon.V6.vd0.128B" => "__builtin_HEXAGON_V6_vd0_128B", + "llvm.hexagon.V6.vdd0" => "__builtin_HEXAGON_V6_vdd0", + "llvm.hexagon.V6.vdd0.128B" => "__builtin_HEXAGON_V6_vdd0_128B", "llvm.hexagon.V6.vdealb" => "__builtin_HEXAGON_V6_vdealb", "llvm.hexagon.V6.vdealb.128B" => "__builtin_HEXAGON_V6_vdealb_128B", "llvm.hexagon.V6.vdealb4w" => "__builtin_HEXAGON_V6_vdealb4w", @@ -1256,6 +1563,10 @@ match name { "llvm.hexagon.V6.vdealvdd.128B" => "__builtin_HEXAGON_V6_vdealvdd_128B", "llvm.hexagon.V6.vdelta" => "__builtin_HEXAGON_V6_vdelta", "llvm.hexagon.V6.vdelta.128B" => "__builtin_HEXAGON_V6_vdelta_128B", + "llvm.hexagon.V6.vdmpy.sf.hf" => "__builtin_HEXAGON_V6_vdmpy_sf_hf", + "llvm.hexagon.V6.vdmpy.sf.hf.128B" => "__builtin_HEXAGON_V6_vdmpy_sf_hf_128B", + "llvm.hexagon.V6.vdmpy.sf.hf.acc" => "__builtin_HEXAGON_V6_vdmpy_sf_hf_acc", + "llvm.hexagon.V6.vdmpy.sf.hf.acc.128B" => "__builtin_HEXAGON_V6_vdmpy_sf_hf_acc_128B", "llvm.hexagon.V6.vdmpybus" => "__builtin_HEXAGON_V6_vdmpybus", "llvm.hexagon.V6.vdmpybus.128B" => "__builtin_HEXAGON_V6_vdmpybus_128B", "llvm.hexagon.V6.vdmpybus.acc" => "__builtin_HEXAGON_V6_vdmpybus_acc", @@ -1296,12 +1607,134 @@ match name { "llvm.hexagon.V6.vdsaduh.128B" => "__builtin_HEXAGON_V6_vdsaduh_128B", "llvm.hexagon.V6.vdsaduh.acc" => "__builtin_HEXAGON_V6_vdsaduh_acc", "llvm.hexagon.V6.vdsaduh.acc.128B" => "__builtin_HEXAGON_V6_vdsaduh_acc_128B", + "llvm.hexagon.V6.veqb" => "__builtin_HEXAGON_V6_veqb", + "llvm.hexagon.V6.veqb.128B" => "__builtin_HEXAGON_V6_veqb_128B", + "llvm.hexagon.V6.veqb.and" => "__builtin_HEXAGON_V6_veqb_and", + "llvm.hexagon.V6.veqb.and.128B" => "__builtin_HEXAGON_V6_veqb_and_128B", + "llvm.hexagon.V6.veqb.or" => "__builtin_HEXAGON_V6_veqb_or", + "llvm.hexagon.V6.veqb.or.128B" => "__builtin_HEXAGON_V6_veqb_or_128B", + "llvm.hexagon.V6.veqb.xor" => "__builtin_HEXAGON_V6_veqb_xor", + "llvm.hexagon.V6.veqb.xor.128B" => "__builtin_HEXAGON_V6_veqb_xor_128B", + "llvm.hexagon.V6.veqh" => "__builtin_HEXAGON_V6_veqh", + "llvm.hexagon.V6.veqh.128B" => "__builtin_HEXAGON_V6_veqh_128B", + "llvm.hexagon.V6.veqh.and" => "__builtin_HEXAGON_V6_veqh_and", + "llvm.hexagon.V6.veqh.and.128B" => "__builtin_HEXAGON_V6_veqh_and_128B", + "llvm.hexagon.V6.veqh.or" => "__builtin_HEXAGON_V6_veqh_or", + "llvm.hexagon.V6.veqh.or.128B" => "__builtin_HEXAGON_V6_veqh_or_128B", + "llvm.hexagon.V6.veqh.xor" => "__builtin_HEXAGON_V6_veqh_xor", + "llvm.hexagon.V6.veqh.xor.128B" => "__builtin_HEXAGON_V6_veqh_xor_128B", + "llvm.hexagon.V6.veqw" => "__builtin_HEXAGON_V6_veqw", + "llvm.hexagon.V6.veqw.128B" => "__builtin_HEXAGON_V6_veqw_128B", + "llvm.hexagon.V6.veqw.and" => "__builtin_HEXAGON_V6_veqw_and", + "llvm.hexagon.V6.veqw.and.128B" => "__builtin_HEXAGON_V6_veqw_and_128B", + "llvm.hexagon.V6.veqw.or" => "__builtin_HEXAGON_V6_veqw_or", + "llvm.hexagon.V6.veqw.or.128B" => "__builtin_HEXAGON_V6_veqw_or_128B", + "llvm.hexagon.V6.veqw.xor" => "__builtin_HEXAGON_V6_veqw_xor", + "llvm.hexagon.V6.veqw.xor.128B" => "__builtin_HEXAGON_V6_veqw_xor_128B", + "llvm.hexagon.V6.vfmax.hf" => "__builtin_HEXAGON_V6_vfmax_hf", + "llvm.hexagon.V6.vfmax.hf.128B" => "__builtin_HEXAGON_V6_vfmax_hf_128B", + "llvm.hexagon.V6.vfmax.sf" => "__builtin_HEXAGON_V6_vfmax_sf", + "llvm.hexagon.V6.vfmax.sf.128B" => "__builtin_HEXAGON_V6_vfmax_sf_128B", + "llvm.hexagon.V6.vfmin.hf" => "__builtin_HEXAGON_V6_vfmin_hf", + "llvm.hexagon.V6.vfmin.hf.128B" => "__builtin_HEXAGON_V6_vfmin_hf_128B", + "llvm.hexagon.V6.vfmin.sf" => "__builtin_HEXAGON_V6_vfmin_sf", + "llvm.hexagon.V6.vfmin.sf.128B" => "__builtin_HEXAGON_V6_vfmin_sf_128B", + "llvm.hexagon.V6.vfneg.hf" => "__builtin_HEXAGON_V6_vfneg_hf", + "llvm.hexagon.V6.vfneg.hf.128B" => "__builtin_HEXAGON_V6_vfneg_hf_128B", + "llvm.hexagon.V6.vfneg.sf" => "__builtin_HEXAGON_V6_vfneg_sf", + "llvm.hexagon.V6.vfneg.sf.128B" => "__builtin_HEXAGON_V6_vfneg_sf_128B", + "llvm.hexagon.V6.vgathermh" => "__builtin_HEXAGON_V6_vgathermh", + "llvm.hexagon.V6.vgathermh.128B" => "__builtin_HEXAGON_V6_vgathermh_128B", + "llvm.hexagon.V6.vgathermhq" => "__builtin_HEXAGON_V6_vgathermhq", + "llvm.hexagon.V6.vgathermhq.128B" => "__builtin_HEXAGON_V6_vgathermhq_128B", + "llvm.hexagon.V6.vgathermhw" => "__builtin_HEXAGON_V6_vgathermhw", + "llvm.hexagon.V6.vgathermhw.128B" => "__builtin_HEXAGON_V6_vgathermhw_128B", + "llvm.hexagon.V6.vgathermhwq" => "__builtin_HEXAGON_V6_vgathermhwq", + "llvm.hexagon.V6.vgathermhwq.128B" => "__builtin_HEXAGON_V6_vgathermhwq_128B", + "llvm.hexagon.V6.vgathermw" => "__builtin_HEXAGON_V6_vgathermw", + "llvm.hexagon.V6.vgathermw.128B" => "__builtin_HEXAGON_V6_vgathermw_128B", + "llvm.hexagon.V6.vgathermwq" => "__builtin_HEXAGON_V6_vgathermwq", + "llvm.hexagon.V6.vgathermwq.128B" => "__builtin_HEXAGON_V6_vgathermwq_128B", + "llvm.hexagon.V6.vgtb" => "__builtin_HEXAGON_V6_vgtb", + "llvm.hexagon.V6.vgtb.128B" => "__builtin_HEXAGON_V6_vgtb_128B", + "llvm.hexagon.V6.vgtb.and" => "__builtin_HEXAGON_V6_vgtb_and", + "llvm.hexagon.V6.vgtb.and.128B" => "__builtin_HEXAGON_V6_vgtb_and_128B", + "llvm.hexagon.V6.vgtb.or" => "__builtin_HEXAGON_V6_vgtb_or", + "llvm.hexagon.V6.vgtb.or.128B" => "__builtin_HEXAGON_V6_vgtb_or_128B", + "llvm.hexagon.V6.vgtb.xor" => "__builtin_HEXAGON_V6_vgtb_xor", + "llvm.hexagon.V6.vgtb.xor.128B" => "__builtin_HEXAGON_V6_vgtb_xor_128B", + "llvm.hexagon.V6.vgtbf" => "__builtin_HEXAGON_V6_vgtbf", + "llvm.hexagon.V6.vgtbf.128B" => "__builtin_HEXAGON_V6_vgtbf_128B", + "llvm.hexagon.V6.vgtbf.and" => "__builtin_HEXAGON_V6_vgtbf_and", + "llvm.hexagon.V6.vgtbf.and.128B" => "__builtin_HEXAGON_V6_vgtbf_and_128B", + "llvm.hexagon.V6.vgtbf.or" => "__builtin_HEXAGON_V6_vgtbf_or", + "llvm.hexagon.V6.vgtbf.or.128B" => "__builtin_HEXAGON_V6_vgtbf_or_128B", + "llvm.hexagon.V6.vgtbf.xor" => "__builtin_HEXAGON_V6_vgtbf_xor", + "llvm.hexagon.V6.vgtbf.xor.128B" => "__builtin_HEXAGON_V6_vgtbf_xor_128B", + "llvm.hexagon.V6.vgth" => "__builtin_HEXAGON_V6_vgth", + "llvm.hexagon.V6.vgth.128B" => "__builtin_HEXAGON_V6_vgth_128B", + "llvm.hexagon.V6.vgth.and" => "__builtin_HEXAGON_V6_vgth_and", + "llvm.hexagon.V6.vgth.and.128B" => "__builtin_HEXAGON_V6_vgth_and_128B", + "llvm.hexagon.V6.vgth.or" => "__builtin_HEXAGON_V6_vgth_or", + "llvm.hexagon.V6.vgth.or.128B" => "__builtin_HEXAGON_V6_vgth_or_128B", + "llvm.hexagon.V6.vgth.xor" => "__builtin_HEXAGON_V6_vgth_xor", + "llvm.hexagon.V6.vgth.xor.128B" => "__builtin_HEXAGON_V6_vgth_xor_128B", + "llvm.hexagon.V6.vgthf" => "__builtin_HEXAGON_V6_vgthf", + "llvm.hexagon.V6.vgthf.128B" => "__builtin_HEXAGON_V6_vgthf_128B", + "llvm.hexagon.V6.vgthf.and" => "__builtin_HEXAGON_V6_vgthf_and", + "llvm.hexagon.V6.vgthf.and.128B" => "__builtin_HEXAGON_V6_vgthf_and_128B", + "llvm.hexagon.V6.vgthf.or" => "__builtin_HEXAGON_V6_vgthf_or", + "llvm.hexagon.V6.vgthf.or.128B" => "__builtin_HEXAGON_V6_vgthf_or_128B", + "llvm.hexagon.V6.vgthf.xor" => "__builtin_HEXAGON_V6_vgthf_xor", + "llvm.hexagon.V6.vgthf.xor.128B" => "__builtin_HEXAGON_V6_vgthf_xor_128B", + "llvm.hexagon.V6.vgtsf" => "__builtin_HEXAGON_V6_vgtsf", + "llvm.hexagon.V6.vgtsf.128B" => "__builtin_HEXAGON_V6_vgtsf_128B", + "llvm.hexagon.V6.vgtsf.and" => "__builtin_HEXAGON_V6_vgtsf_and", + "llvm.hexagon.V6.vgtsf.and.128B" => "__builtin_HEXAGON_V6_vgtsf_and_128B", + "llvm.hexagon.V6.vgtsf.or" => "__builtin_HEXAGON_V6_vgtsf_or", + "llvm.hexagon.V6.vgtsf.or.128B" => "__builtin_HEXAGON_V6_vgtsf_or_128B", + "llvm.hexagon.V6.vgtsf.xor" => "__builtin_HEXAGON_V6_vgtsf_xor", + "llvm.hexagon.V6.vgtsf.xor.128B" => "__builtin_HEXAGON_V6_vgtsf_xor_128B", + "llvm.hexagon.V6.vgtub" => "__builtin_HEXAGON_V6_vgtub", + "llvm.hexagon.V6.vgtub.128B" => "__builtin_HEXAGON_V6_vgtub_128B", + "llvm.hexagon.V6.vgtub.and" => "__builtin_HEXAGON_V6_vgtub_and", + "llvm.hexagon.V6.vgtub.and.128B" => "__builtin_HEXAGON_V6_vgtub_and_128B", + "llvm.hexagon.V6.vgtub.or" => "__builtin_HEXAGON_V6_vgtub_or", + "llvm.hexagon.V6.vgtub.or.128B" => "__builtin_HEXAGON_V6_vgtub_or_128B", + "llvm.hexagon.V6.vgtub.xor" => "__builtin_HEXAGON_V6_vgtub_xor", + "llvm.hexagon.V6.vgtub.xor.128B" => "__builtin_HEXAGON_V6_vgtub_xor_128B", + "llvm.hexagon.V6.vgtuh" => "__builtin_HEXAGON_V6_vgtuh", + "llvm.hexagon.V6.vgtuh.128B" => "__builtin_HEXAGON_V6_vgtuh_128B", + "llvm.hexagon.V6.vgtuh.and" => "__builtin_HEXAGON_V6_vgtuh_and", + "llvm.hexagon.V6.vgtuh.and.128B" => "__builtin_HEXAGON_V6_vgtuh_and_128B", + "llvm.hexagon.V6.vgtuh.or" => "__builtin_HEXAGON_V6_vgtuh_or", + "llvm.hexagon.V6.vgtuh.or.128B" => "__builtin_HEXAGON_V6_vgtuh_or_128B", + "llvm.hexagon.V6.vgtuh.xor" => "__builtin_HEXAGON_V6_vgtuh_xor", + "llvm.hexagon.V6.vgtuh.xor.128B" => "__builtin_HEXAGON_V6_vgtuh_xor_128B", + "llvm.hexagon.V6.vgtuw" => "__builtin_HEXAGON_V6_vgtuw", + "llvm.hexagon.V6.vgtuw.128B" => "__builtin_HEXAGON_V6_vgtuw_128B", + "llvm.hexagon.V6.vgtuw.and" => "__builtin_HEXAGON_V6_vgtuw_and", + "llvm.hexagon.V6.vgtuw.and.128B" => "__builtin_HEXAGON_V6_vgtuw_and_128B", + "llvm.hexagon.V6.vgtuw.or" => "__builtin_HEXAGON_V6_vgtuw_or", + "llvm.hexagon.V6.vgtuw.or.128B" => "__builtin_HEXAGON_V6_vgtuw_or_128B", + "llvm.hexagon.V6.vgtuw.xor" => "__builtin_HEXAGON_V6_vgtuw_xor", + "llvm.hexagon.V6.vgtuw.xor.128B" => "__builtin_HEXAGON_V6_vgtuw_xor_128B", + "llvm.hexagon.V6.vgtw" => "__builtin_HEXAGON_V6_vgtw", + "llvm.hexagon.V6.vgtw.128B" => "__builtin_HEXAGON_V6_vgtw_128B", + "llvm.hexagon.V6.vgtw.and" => "__builtin_HEXAGON_V6_vgtw_and", + "llvm.hexagon.V6.vgtw.and.128B" => "__builtin_HEXAGON_V6_vgtw_and_128B", + "llvm.hexagon.V6.vgtw.or" => "__builtin_HEXAGON_V6_vgtw_or", + "llvm.hexagon.V6.vgtw.or.128B" => "__builtin_HEXAGON_V6_vgtw_or_128B", + "llvm.hexagon.V6.vgtw.xor" => "__builtin_HEXAGON_V6_vgtw_xor", + "llvm.hexagon.V6.vgtw.xor.128B" => "__builtin_HEXAGON_V6_vgtw_xor_128B", "llvm.hexagon.V6.vinsertwr" => "__builtin_HEXAGON_V6_vinsertwr", "llvm.hexagon.V6.vinsertwr.128B" => "__builtin_HEXAGON_V6_vinsertwr_128B", "llvm.hexagon.V6.vlalignb" => "__builtin_HEXAGON_V6_vlalignb", "llvm.hexagon.V6.vlalignb.128B" => "__builtin_HEXAGON_V6_vlalignb_128B", "llvm.hexagon.V6.vlalignbi" => "__builtin_HEXAGON_V6_vlalignbi", "llvm.hexagon.V6.vlalignbi.128B" => "__builtin_HEXAGON_V6_vlalignbi_128B", + "llvm.hexagon.V6.vlsrb" => "__builtin_HEXAGON_V6_vlsrb", + "llvm.hexagon.V6.vlsrb.128B" => "__builtin_HEXAGON_V6_vlsrb_128B", "llvm.hexagon.V6.vlsrh" => "__builtin_HEXAGON_V6_vlsrh", "llvm.hexagon.V6.vlsrh.128B" => "__builtin_HEXAGON_V6_vlsrh_128B", "llvm.hexagon.V6.vlsrhv" => "__builtin_HEXAGON_V6_vlsrhv", @@ -1310,6 +1743,8 @@ match name { "llvm.hexagon.V6.vlsrw.128B" => "__builtin_HEXAGON_V6_vlsrw_128B", "llvm.hexagon.V6.vlsrwv" => "__builtin_HEXAGON_V6_vlsrwv", "llvm.hexagon.V6.vlsrwv.128B" => "__builtin_HEXAGON_V6_vlsrwv_128B", + "llvm.hexagon.V6.vlut4" => "__builtin_HEXAGON_V6_vlut4", + "llvm.hexagon.V6.vlut4.128B" => "__builtin_HEXAGON_V6_vlut4_128B", "llvm.hexagon.V6.vlutb" => "__builtin_HEXAGON_V6_vlutb", "llvm.hexagon.V6.vlutb.128B" => "__builtin_HEXAGON_V6_vlutb_128B", "llvm.hexagon.V6.vlutb.acc" => "__builtin_HEXAGON_V6_vlutb_acc", @@ -1320,12 +1755,32 @@ match name { "llvm.hexagon.V6.vlutb.dv.acc.128B" => "__builtin_HEXAGON_V6_vlutb_dv_acc_128B", "llvm.hexagon.V6.vlutvvb" => "__builtin_HEXAGON_V6_vlutvvb", "llvm.hexagon.V6.vlutvvb.128B" => "__builtin_HEXAGON_V6_vlutvvb_128B", + "llvm.hexagon.V6.vlutvvb.nm" => "__builtin_HEXAGON_V6_vlutvvb_nm", + "llvm.hexagon.V6.vlutvvb.nm.128B" => "__builtin_HEXAGON_V6_vlutvvb_nm_128B", "llvm.hexagon.V6.vlutvvb.oracc" => "__builtin_HEXAGON_V6_vlutvvb_oracc", "llvm.hexagon.V6.vlutvvb.oracc.128B" => "__builtin_HEXAGON_V6_vlutvvb_oracc_128B", + "llvm.hexagon.V6.vlutvvb.oracci" => "__builtin_HEXAGON_V6_vlutvvb_oracci", + "llvm.hexagon.V6.vlutvvb.oracci.128B" => "__builtin_HEXAGON_V6_vlutvvb_oracci_128B", + "llvm.hexagon.V6.vlutvvbi" => "__builtin_HEXAGON_V6_vlutvvbi", + "llvm.hexagon.V6.vlutvvbi.128B" => "__builtin_HEXAGON_V6_vlutvvbi_128B", "llvm.hexagon.V6.vlutvwh" => "__builtin_HEXAGON_V6_vlutvwh", "llvm.hexagon.V6.vlutvwh.128B" => "__builtin_HEXAGON_V6_vlutvwh_128B", + "llvm.hexagon.V6.vlutvwh.nm" => "__builtin_HEXAGON_V6_vlutvwh_nm", + "llvm.hexagon.V6.vlutvwh.nm.128B" => "__builtin_HEXAGON_V6_vlutvwh_nm_128B", "llvm.hexagon.V6.vlutvwh.oracc" => "__builtin_HEXAGON_V6_vlutvwh_oracc", "llvm.hexagon.V6.vlutvwh.oracc.128B" => "__builtin_HEXAGON_V6_vlutvwh_oracc_128B", + "llvm.hexagon.V6.vlutvwh.oracci" => "__builtin_HEXAGON_V6_vlutvwh_oracci", + "llvm.hexagon.V6.vlutvwh.oracci.128B" => "__builtin_HEXAGON_V6_vlutvwh_oracci_128B", + "llvm.hexagon.V6.vlutvwhi" => "__builtin_HEXAGON_V6_vlutvwhi", + "llvm.hexagon.V6.vlutvwhi.128B" => "__builtin_HEXAGON_V6_vlutvwhi_128B", + "llvm.hexagon.V6.vmax.bf" => "__builtin_HEXAGON_V6_vmax_bf", + "llvm.hexagon.V6.vmax.bf.128B" => "__builtin_HEXAGON_V6_vmax_bf_128B", + "llvm.hexagon.V6.vmax.hf" => "__builtin_HEXAGON_V6_vmax_hf", + "llvm.hexagon.V6.vmax.hf.128B" => "__builtin_HEXAGON_V6_vmax_hf_128B", + "llvm.hexagon.V6.vmax.sf" => "__builtin_HEXAGON_V6_vmax_sf", + "llvm.hexagon.V6.vmax.sf.128B" => "__builtin_HEXAGON_V6_vmax_sf_128B", + "llvm.hexagon.V6.vmaxb" => "__builtin_HEXAGON_V6_vmaxb", + "llvm.hexagon.V6.vmaxb.128B" => "__builtin_HEXAGON_V6_vmaxb_128B", "llvm.hexagon.V6.vmaxh" => "__builtin_HEXAGON_V6_vmaxh", "llvm.hexagon.V6.vmaxh.128B" => "__builtin_HEXAGON_V6_vmaxh_128B", "llvm.hexagon.V6.vmaxub" => "__builtin_HEXAGON_V6_vmaxub", @@ -1334,6 +1789,14 @@ match name { "llvm.hexagon.V6.vmaxuh.128B" => "__builtin_HEXAGON_V6_vmaxuh_128B", "llvm.hexagon.V6.vmaxw" => "__builtin_HEXAGON_V6_vmaxw", "llvm.hexagon.V6.vmaxw.128B" => "__builtin_HEXAGON_V6_vmaxw_128B", + "llvm.hexagon.V6.vmin.bf" => "__builtin_HEXAGON_V6_vmin_bf", + "llvm.hexagon.V6.vmin.bf.128B" => "__builtin_HEXAGON_V6_vmin_bf_128B", + "llvm.hexagon.V6.vmin.hf" => "__builtin_HEXAGON_V6_vmin_hf", + "llvm.hexagon.V6.vmin.hf.128B" => "__builtin_HEXAGON_V6_vmin_hf_128B", + "llvm.hexagon.V6.vmin.sf" => "__builtin_HEXAGON_V6_vmin_sf", + "llvm.hexagon.V6.vmin.sf.128B" => "__builtin_HEXAGON_V6_vmin_sf_128B", + "llvm.hexagon.V6.vminb" => "__builtin_HEXAGON_V6_vminb", + "llvm.hexagon.V6.vminb.128B" => "__builtin_HEXAGON_V6_vminb_128B", "llvm.hexagon.V6.vminh" => "__builtin_HEXAGON_V6_vminh", "llvm.hexagon.V6.vminh.128B" => "__builtin_HEXAGON_V6_vminh_128B", "llvm.hexagon.V6.vminub" => "__builtin_HEXAGON_V6_vminub", @@ -1348,12 +1811,56 @@ match name { "llvm.hexagon.V6.vmpabus.acc.128B" => "__builtin_HEXAGON_V6_vmpabus_acc_128B", "llvm.hexagon.V6.vmpabusv" => "__builtin_HEXAGON_V6_vmpabusv", "llvm.hexagon.V6.vmpabusv.128B" => "__builtin_HEXAGON_V6_vmpabusv_128B", + "llvm.hexagon.V6.vmpabuu" => "__builtin_HEXAGON_V6_vmpabuu", + "llvm.hexagon.V6.vmpabuu.128B" => "__builtin_HEXAGON_V6_vmpabuu_128B", + "llvm.hexagon.V6.vmpabuu.acc" => "__builtin_HEXAGON_V6_vmpabuu_acc", + "llvm.hexagon.V6.vmpabuu.acc.128B" => "__builtin_HEXAGON_V6_vmpabuu_acc_128B", "llvm.hexagon.V6.vmpabuuv" => "__builtin_HEXAGON_V6_vmpabuuv", "llvm.hexagon.V6.vmpabuuv.128B" => "__builtin_HEXAGON_V6_vmpabuuv_128B", "llvm.hexagon.V6.vmpahb" => "__builtin_HEXAGON_V6_vmpahb", "llvm.hexagon.V6.vmpahb.128B" => "__builtin_HEXAGON_V6_vmpahb_128B", "llvm.hexagon.V6.vmpahb.acc" => "__builtin_HEXAGON_V6_vmpahb_acc", "llvm.hexagon.V6.vmpahb.acc.128B" => "__builtin_HEXAGON_V6_vmpahb_acc_128B", + "llvm.hexagon.V6.vmpahhsat" => "__builtin_HEXAGON_V6_vmpahhsat", + "llvm.hexagon.V6.vmpahhsat.128B" => "__builtin_HEXAGON_V6_vmpahhsat_128B", + "llvm.hexagon.V6.vmpauhb" => "__builtin_HEXAGON_V6_vmpauhb", + "llvm.hexagon.V6.vmpauhb.128B" => "__builtin_HEXAGON_V6_vmpauhb_128B", + "llvm.hexagon.V6.vmpauhb.acc" => "__builtin_HEXAGON_V6_vmpauhb_acc", + "llvm.hexagon.V6.vmpauhb.acc.128B" => "__builtin_HEXAGON_V6_vmpauhb_acc_128B", + "llvm.hexagon.V6.vmpauhuhsat" => "__builtin_HEXAGON_V6_vmpauhuhsat", + "llvm.hexagon.V6.vmpauhuhsat.128B" => "__builtin_HEXAGON_V6_vmpauhuhsat_128B", + "llvm.hexagon.V6.vmpsuhuhsat" => "__builtin_HEXAGON_V6_vmpsuhuhsat", + "llvm.hexagon.V6.vmpsuhuhsat.128B" => "__builtin_HEXAGON_V6_vmpsuhuhsat_128B", + "llvm.hexagon.V6.vmpy.hf.hf" => "__builtin_HEXAGON_V6_vmpy_hf_hf", + "llvm.hexagon.V6.vmpy.hf.hf.128B" => "__builtin_HEXAGON_V6_vmpy_hf_hf_128B", + "llvm.hexagon.V6.vmpy.hf.hf.acc" => "__builtin_HEXAGON_V6_vmpy_hf_hf_acc", + "llvm.hexagon.V6.vmpy.hf.hf.acc.128B" => "__builtin_HEXAGON_V6_vmpy_hf_hf_acc_128B", + "llvm.hexagon.V6.vmpy.qf16" => "__builtin_HEXAGON_V6_vmpy_qf16", + "llvm.hexagon.V6.vmpy.qf16.128B" => "__builtin_HEXAGON_V6_vmpy_qf16_128B", + "llvm.hexagon.V6.vmpy.qf16.hf" => "__builtin_HEXAGON_V6_vmpy_qf16_hf", + "llvm.hexagon.V6.vmpy.qf16.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf16_hf_128B", + "llvm.hexagon.V6.vmpy.qf16.mix.hf" => "__builtin_HEXAGON_V6_vmpy_qf16_mix_hf", + "llvm.hexagon.V6.vmpy.qf16.mix.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf16_mix_hf_128B", + "llvm.hexagon.V6.vmpy.qf32" => "__builtin_HEXAGON_V6_vmpy_qf32", + "llvm.hexagon.V6.vmpy.qf32.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_128B", + "llvm.hexagon.V6.vmpy.qf32.hf" => "__builtin_HEXAGON_V6_vmpy_qf32_hf", + "llvm.hexagon.V6.vmpy.qf32.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_hf_128B", + "llvm.hexagon.V6.vmpy.qf32.mix.hf" => "__builtin_HEXAGON_V6_vmpy_qf32_mix_hf", + "llvm.hexagon.V6.vmpy.qf32.mix.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_mix_hf_128B", + "llvm.hexagon.V6.vmpy.qf32.qf16" => "__builtin_HEXAGON_V6_vmpy_qf32_qf16", + "llvm.hexagon.V6.vmpy.qf32.qf16.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_qf16_128B", + "llvm.hexagon.V6.vmpy.qf32.sf" => "__builtin_HEXAGON_V6_vmpy_qf32_sf", + "llvm.hexagon.V6.vmpy.qf32.sf.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_sf_128B", + "llvm.hexagon.V6.vmpy.sf.bf" => "__builtin_HEXAGON_V6_vmpy_sf_bf", + "llvm.hexagon.V6.vmpy.sf.bf.128B" => "__builtin_HEXAGON_V6_vmpy_sf_bf_128B", + "llvm.hexagon.V6.vmpy.sf.bf.acc" => "__builtin_HEXAGON_V6_vmpy_sf_bf_acc", + "llvm.hexagon.V6.vmpy.sf.bf.acc.128B" => "__builtin_HEXAGON_V6_vmpy_sf_bf_acc_128B", + "llvm.hexagon.V6.vmpy.sf.hf" => "__builtin_HEXAGON_V6_vmpy_sf_hf", + "llvm.hexagon.V6.vmpy.sf.hf.128B" => "__builtin_HEXAGON_V6_vmpy_sf_hf_128B", + "llvm.hexagon.V6.vmpy.sf.hf.acc" => "__builtin_HEXAGON_V6_vmpy_sf_hf_acc", + "llvm.hexagon.V6.vmpy.sf.hf.acc.128B" => "__builtin_HEXAGON_V6_vmpy_sf_hf_acc_128B", + "llvm.hexagon.V6.vmpy.sf.sf" => "__builtin_HEXAGON_V6_vmpy_sf_sf", + "llvm.hexagon.V6.vmpy.sf.sf.128B" => "__builtin_HEXAGON_V6_vmpy_sf_sf_128B", "llvm.hexagon.V6.vmpybus" => "__builtin_HEXAGON_V6_vmpybus", "llvm.hexagon.V6.vmpybus.128B" => "__builtin_HEXAGON_V6_vmpybus_128B", "llvm.hexagon.V6.vmpybus.acc" => "__builtin_HEXAGON_V6_vmpybus_acc", @@ -1368,8 +1875,12 @@ match name { "llvm.hexagon.V6.vmpybv.acc.128B" => "__builtin_HEXAGON_V6_vmpybv_acc_128B", "llvm.hexagon.V6.vmpyewuh" => "__builtin_HEXAGON_V6_vmpyewuh", "llvm.hexagon.V6.vmpyewuh.128B" => "__builtin_HEXAGON_V6_vmpyewuh_128B", + "llvm.hexagon.V6.vmpyewuh.64" => "__builtin_HEXAGON_V6_vmpyewuh_64", + "llvm.hexagon.V6.vmpyewuh.64.128B" => "__builtin_HEXAGON_V6_vmpyewuh_64_128B", "llvm.hexagon.V6.vmpyh" => "__builtin_HEXAGON_V6_vmpyh", "llvm.hexagon.V6.vmpyh.128B" => "__builtin_HEXAGON_V6_vmpyh_128B", + "llvm.hexagon.V6.vmpyh.acc" => "__builtin_HEXAGON_V6_vmpyh_acc", + "llvm.hexagon.V6.vmpyh.acc.128B" => "__builtin_HEXAGON_V6_vmpyh_acc_128B", "llvm.hexagon.V6.vmpyhsat.acc" => "__builtin_HEXAGON_V6_vmpyhsat_acc", "llvm.hexagon.V6.vmpyhsat.acc.128B" => "__builtin_HEXAGON_V6_vmpyhsat_acc_128B", "llvm.hexagon.V6.vmpyhsrs" => "__builtin_HEXAGON_V6_vmpyhsrs", @@ -1412,8 +1923,14 @@ match name { "llvm.hexagon.V6.vmpyiwh.128B" => "__builtin_HEXAGON_V6_vmpyiwh_128B", "llvm.hexagon.V6.vmpyiwh.acc" => "__builtin_HEXAGON_V6_vmpyiwh_acc", "llvm.hexagon.V6.vmpyiwh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwh_acc_128B", + "llvm.hexagon.V6.vmpyiwub" => "__builtin_HEXAGON_V6_vmpyiwub", + "llvm.hexagon.V6.vmpyiwub.128B" => "__builtin_HEXAGON_V6_vmpyiwub_128B", + "llvm.hexagon.V6.vmpyiwub.acc" => "__builtin_HEXAGON_V6_vmpyiwub_acc", + "llvm.hexagon.V6.vmpyiwub.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwub_acc_128B", "llvm.hexagon.V6.vmpyowh" => "__builtin_HEXAGON_V6_vmpyowh", "llvm.hexagon.V6.vmpyowh.128B" => "__builtin_HEXAGON_V6_vmpyowh_128B", + "llvm.hexagon.V6.vmpyowh.64.acc" => "__builtin_HEXAGON_V6_vmpyowh_64_acc", + "llvm.hexagon.V6.vmpyowh.64.acc.128B" => "__builtin_HEXAGON_V6_vmpyowh_64_acc_128B", "llvm.hexagon.V6.vmpyowh.rnd" => "__builtin_HEXAGON_V6_vmpyowh_rnd", "llvm.hexagon.V6.vmpyowh.rnd.128B" => "__builtin_HEXAGON_V6_vmpyowh_rnd_128B", "llvm.hexagon.V6.vmpyowh.rnd.sacc" => "__builtin_HEXAGON_V6_vmpyowh_rnd_sacc", @@ -1432,10 +1949,20 @@ match name { "llvm.hexagon.V6.vmpyuh.128B" => "__builtin_HEXAGON_V6_vmpyuh_128B", "llvm.hexagon.V6.vmpyuh.acc" => "__builtin_HEXAGON_V6_vmpyuh_acc", "llvm.hexagon.V6.vmpyuh.acc.128B" => "__builtin_HEXAGON_V6_vmpyuh_acc_128B", + "llvm.hexagon.V6.vmpyuhe" => "__builtin_HEXAGON_V6_vmpyuhe", + "llvm.hexagon.V6.vmpyuhe.128B" => "__builtin_HEXAGON_V6_vmpyuhe_128B", + "llvm.hexagon.V6.vmpyuhe.acc" => "__builtin_HEXAGON_V6_vmpyuhe_acc", + "llvm.hexagon.V6.vmpyuhe.acc.128B" => "__builtin_HEXAGON_V6_vmpyuhe_acc_128B", "llvm.hexagon.V6.vmpyuhv" => "__builtin_HEXAGON_V6_vmpyuhv", "llvm.hexagon.V6.vmpyuhv.128B" => "__builtin_HEXAGON_V6_vmpyuhv_128B", "llvm.hexagon.V6.vmpyuhv.acc" => "__builtin_HEXAGON_V6_vmpyuhv_acc", "llvm.hexagon.V6.vmpyuhv.acc.128B" => "__builtin_HEXAGON_V6_vmpyuhv_acc_128B", + "llvm.hexagon.V6.vmpyuhvs" => "__builtin_HEXAGON_V6_vmpyuhvs", + "llvm.hexagon.V6.vmpyuhvs.128B" => "__builtin_HEXAGON_V6_vmpyuhvs_128B", + "llvm.hexagon.V6.vmux" => "__builtin_HEXAGON_V6_vmux", + "llvm.hexagon.V6.vmux.128B" => "__builtin_HEXAGON_V6_vmux_128B", + "llvm.hexagon.V6.vnavgb" => "__builtin_HEXAGON_V6_vnavgb", + "llvm.hexagon.V6.vnavgb.128B" => "__builtin_HEXAGON_V6_vnavgb_128B", "llvm.hexagon.V6.vnavgh" => "__builtin_HEXAGON_V6_vnavgh", "llvm.hexagon.V6.vnavgh.128B" => "__builtin_HEXAGON_V6_vnavgh_128B", "llvm.hexagon.V6.vnavgub" => "__builtin_HEXAGON_V6_vnavgub", @@ -1468,8 +1995,18 @@ match name { "llvm.hexagon.V6.vpackwuh.sat.128B" => "__builtin_HEXAGON_V6_vpackwuh_sat_128B", "llvm.hexagon.V6.vpopcounth" => "__builtin_HEXAGON_V6_vpopcounth", "llvm.hexagon.V6.vpopcounth.128B" => "__builtin_HEXAGON_V6_vpopcounth_128B", + "llvm.hexagon.V6.vprefixqb" => "__builtin_HEXAGON_V6_vprefixqb", + "llvm.hexagon.V6.vprefixqb.128B" => "__builtin_HEXAGON_V6_vprefixqb_128B", + "llvm.hexagon.V6.vprefixqh" => "__builtin_HEXAGON_V6_vprefixqh", + "llvm.hexagon.V6.vprefixqh.128B" => "__builtin_HEXAGON_V6_vprefixqh_128B", + "llvm.hexagon.V6.vprefixqw" => "__builtin_HEXAGON_V6_vprefixqw", + "llvm.hexagon.V6.vprefixqw.128B" => "__builtin_HEXAGON_V6_vprefixqw_128B", "llvm.hexagon.V6.vrdelta" => "__builtin_HEXAGON_V6_vrdelta", "llvm.hexagon.V6.vrdelta.128B" => "__builtin_HEXAGON_V6_vrdelta_128B", + "llvm.hexagon.V6.vrmpybub.rtt" => "__builtin_HEXAGON_V6_vrmpybub_rtt", + "llvm.hexagon.V6.vrmpybub.rtt.128B" => "__builtin_HEXAGON_V6_vrmpybub_rtt_128B", + "llvm.hexagon.V6.vrmpybub.rtt.acc" => "__builtin_HEXAGON_V6_vrmpybub_rtt_acc", + "llvm.hexagon.V6.vrmpybub.rtt.acc.128B" => "__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B", "llvm.hexagon.V6.vrmpybus" => "__builtin_HEXAGON_V6_vrmpybus", "llvm.hexagon.V6.vrmpybus.128B" => "__builtin_HEXAGON_V6_vrmpybus_128B", "llvm.hexagon.V6.vrmpybus.acc" => "__builtin_HEXAGON_V6_vrmpybus_acc", @@ -1490,6 +2027,10 @@ match name { "llvm.hexagon.V6.vrmpyub.128B" => "__builtin_HEXAGON_V6_vrmpyub_128B", "llvm.hexagon.V6.vrmpyub.acc" => "__builtin_HEXAGON_V6_vrmpyub_acc", "llvm.hexagon.V6.vrmpyub.acc.128B" => "__builtin_HEXAGON_V6_vrmpyub_acc_128B", + "llvm.hexagon.V6.vrmpyub.rtt" => "__builtin_HEXAGON_V6_vrmpyub_rtt", + "llvm.hexagon.V6.vrmpyub.rtt.128B" => "__builtin_HEXAGON_V6_vrmpyub_rtt_128B", + "llvm.hexagon.V6.vrmpyub.rtt.acc" => "__builtin_HEXAGON_V6_vrmpyub_rtt_acc", + "llvm.hexagon.V6.vrmpyub.rtt.acc.128B" => "__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B", "llvm.hexagon.V6.vrmpyubi" => "__builtin_HEXAGON_V6_vrmpyubi", "llvm.hexagon.V6.vrmpyubi.128B" => "__builtin_HEXAGON_V6_vrmpyubi_128B", "llvm.hexagon.V6.vrmpyubi.acc" => "__builtin_HEXAGON_V6_vrmpyubi_acc", @@ -1500,10 +2041,16 @@ match name { "llvm.hexagon.V6.vrmpyubv.acc.128B" => "__builtin_HEXAGON_V6_vrmpyubv_acc_128B", "llvm.hexagon.V6.vror" => "__builtin_HEXAGON_V6_vror", "llvm.hexagon.V6.vror.128B" => "__builtin_HEXAGON_V6_vror_128B", + "llvm.hexagon.V6.vrotr" => "__builtin_HEXAGON_V6_vrotr", + "llvm.hexagon.V6.vrotr.128B" => "__builtin_HEXAGON_V6_vrotr_128B", "llvm.hexagon.V6.vroundhb" => "__builtin_HEXAGON_V6_vroundhb", "llvm.hexagon.V6.vroundhb.128B" => "__builtin_HEXAGON_V6_vroundhb_128B", "llvm.hexagon.V6.vroundhub" => "__builtin_HEXAGON_V6_vroundhub", "llvm.hexagon.V6.vroundhub.128B" => "__builtin_HEXAGON_V6_vroundhub_128B", + "llvm.hexagon.V6.vrounduhub" => "__builtin_HEXAGON_V6_vrounduhub", + "llvm.hexagon.V6.vrounduhub.128B" => "__builtin_HEXAGON_V6_vrounduhub_128B", + "llvm.hexagon.V6.vrounduwuh" => "__builtin_HEXAGON_V6_vrounduwuh", + "llvm.hexagon.V6.vrounduwuh.128B" => "__builtin_HEXAGON_V6_vrounduwuh_128B", "llvm.hexagon.V6.vroundwh" => "__builtin_HEXAGON_V6_vroundwh", "llvm.hexagon.V6.vroundwh.128B" => "__builtin_HEXAGON_V6_vroundwh_128B", "llvm.hexagon.V6.vroundwuh" => "__builtin_HEXAGON_V6_vroundwuh", @@ -1512,12 +2059,34 @@ match name { "llvm.hexagon.V6.vrsadubi.128B" => "__builtin_HEXAGON_V6_vrsadubi_128B", "llvm.hexagon.V6.vrsadubi.acc" => "__builtin_HEXAGON_V6_vrsadubi_acc", "llvm.hexagon.V6.vrsadubi.acc.128B" => "__builtin_HEXAGON_V6_vrsadubi_acc_128B", + "llvm.hexagon.V6.vsatdw" => "__builtin_HEXAGON_V6_vsatdw", + "llvm.hexagon.V6.vsatdw.128B" => "__builtin_HEXAGON_V6_vsatdw_128B", "llvm.hexagon.V6.vsathub" => "__builtin_HEXAGON_V6_vsathub", "llvm.hexagon.V6.vsathub.128B" => "__builtin_HEXAGON_V6_vsathub_128B", + "llvm.hexagon.V6.vsatuwuh" => "__builtin_HEXAGON_V6_vsatuwuh", + "llvm.hexagon.V6.vsatuwuh.128B" => "__builtin_HEXAGON_V6_vsatuwuh_128B", "llvm.hexagon.V6.vsatwh" => "__builtin_HEXAGON_V6_vsatwh", "llvm.hexagon.V6.vsatwh.128B" => "__builtin_HEXAGON_V6_vsatwh_128B", "llvm.hexagon.V6.vsb" => "__builtin_HEXAGON_V6_vsb", "llvm.hexagon.V6.vsb.128B" => "__builtin_HEXAGON_V6_vsb_128B", + "llvm.hexagon.V6.vscattermh" => "__builtin_HEXAGON_V6_vscattermh", + "llvm.hexagon.V6.vscattermh.128B" => "__builtin_HEXAGON_V6_vscattermh_128B", + "llvm.hexagon.V6.vscattermh.add" => "__builtin_HEXAGON_V6_vscattermh_add", + "llvm.hexagon.V6.vscattermh.add.128B" => "__builtin_HEXAGON_V6_vscattermh_add_128B", + "llvm.hexagon.V6.vscattermhq" => "__builtin_HEXAGON_V6_vscattermhq", + "llvm.hexagon.V6.vscattermhq.128B" => "__builtin_HEXAGON_V6_vscattermhq_128B", + "llvm.hexagon.V6.vscattermhw" => "__builtin_HEXAGON_V6_vscattermhw", + "llvm.hexagon.V6.vscattermhw.128B" => "__builtin_HEXAGON_V6_vscattermhw_128B", + "llvm.hexagon.V6.vscattermhw.add" => "__builtin_HEXAGON_V6_vscattermhw_add", + "llvm.hexagon.V6.vscattermhw.add.128B" => "__builtin_HEXAGON_V6_vscattermhw_add_128B", + "llvm.hexagon.V6.vscattermhwq" => "__builtin_HEXAGON_V6_vscattermhwq", + "llvm.hexagon.V6.vscattermhwq.128B" => "__builtin_HEXAGON_V6_vscattermhwq_128B", + "llvm.hexagon.V6.vscattermw" => "__builtin_HEXAGON_V6_vscattermw", + "llvm.hexagon.V6.vscattermw.128B" => "__builtin_HEXAGON_V6_vscattermw_128B", + "llvm.hexagon.V6.vscattermw.add" => "__builtin_HEXAGON_V6_vscattermw_add", + "llvm.hexagon.V6.vscattermw.add.128B" => "__builtin_HEXAGON_V6_vscattermw_add_128B", + "llvm.hexagon.V6.vscattermwq" => "__builtin_HEXAGON_V6_vscattermwq", + "llvm.hexagon.V6.vscattermwq.128B" => "__builtin_HEXAGON_V6_vscattermwq_128B", "llvm.hexagon.V6.vsh" => "__builtin_HEXAGON_V6_vsh", "llvm.hexagon.V6.vsh.128B" => "__builtin_HEXAGON_V6_vsh_128B", "llvm.hexagon.V6.vshufeh" => "__builtin_HEXAGON_V6_vshufeh", @@ -1538,14 +2107,46 @@ match name { "llvm.hexagon.V6.vshufoeh.128B" => "__builtin_HEXAGON_V6_vshufoeh_128B", "llvm.hexagon.V6.vshufoh" => "__builtin_HEXAGON_V6_vshufoh", "llvm.hexagon.V6.vshufoh.128B" => "__builtin_HEXAGON_V6_vshufoh_128B", + "llvm.hexagon.V6.vsub.hf" => "__builtin_HEXAGON_V6_vsub_hf", + "llvm.hexagon.V6.vsub.hf.128B" => "__builtin_HEXAGON_V6_vsub_hf_128B", + "llvm.hexagon.V6.vsub.hf.hf" => "__builtin_HEXAGON_V6_vsub_hf_hf", + "llvm.hexagon.V6.vsub.hf.hf.128B" => "__builtin_HEXAGON_V6_vsub_hf_hf_128B", + "llvm.hexagon.V6.vsub.qf16" => "__builtin_HEXAGON_V6_vsub_qf16", + "llvm.hexagon.V6.vsub.qf16.128B" => "__builtin_HEXAGON_V6_vsub_qf16_128B", + "llvm.hexagon.V6.vsub.qf16.mix" => "__builtin_HEXAGON_V6_vsub_qf16_mix", + "llvm.hexagon.V6.vsub.qf16.mix.128B" => "__builtin_HEXAGON_V6_vsub_qf16_mix_128B", + "llvm.hexagon.V6.vsub.qf32" => "__builtin_HEXAGON_V6_vsub_qf32", + "llvm.hexagon.V6.vsub.qf32.128B" => "__builtin_HEXAGON_V6_vsub_qf32_128B", + "llvm.hexagon.V6.vsub.qf32.mix" => "__builtin_HEXAGON_V6_vsub_qf32_mix", + "llvm.hexagon.V6.vsub.qf32.mix.128B" => "__builtin_HEXAGON_V6_vsub_qf32_mix_128B", + "llvm.hexagon.V6.vsub.sf" => "__builtin_HEXAGON_V6_vsub_sf", + "llvm.hexagon.V6.vsub.sf.128B" => "__builtin_HEXAGON_V6_vsub_sf_128B", + "llvm.hexagon.V6.vsub.sf.bf" => "__builtin_HEXAGON_V6_vsub_sf_bf", + "llvm.hexagon.V6.vsub.sf.bf.128B" => "__builtin_HEXAGON_V6_vsub_sf_bf_128B", + "llvm.hexagon.V6.vsub.sf.hf" => "__builtin_HEXAGON_V6_vsub_sf_hf", + "llvm.hexagon.V6.vsub.sf.hf.128B" => "__builtin_HEXAGON_V6_vsub_sf_hf_128B", + "llvm.hexagon.V6.vsub.sf.sf" => "__builtin_HEXAGON_V6_vsub_sf_sf", + "llvm.hexagon.V6.vsub.sf.sf.128B" => "__builtin_HEXAGON_V6_vsub_sf_sf_128B", "llvm.hexagon.V6.vsubb" => "__builtin_HEXAGON_V6_vsubb", "llvm.hexagon.V6.vsubb.128B" => "__builtin_HEXAGON_V6_vsubb_128B", "llvm.hexagon.V6.vsubb.dv" => "__builtin_HEXAGON_V6_vsubb_dv", "llvm.hexagon.V6.vsubb.dv.128B" => "__builtin_HEXAGON_V6_vsubb_dv_128B", + "llvm.hexagon.V6.vsubbnq" => "__builtin_HEXAGON_V6_vsubbnq", + "llvm.hexagon.V6.vsubbnq.128B" => "__builtin_HEXAGON_V6_vsubbnq_128B", + "llvm.hexagon.V6.vsubbq" => "__builtin_HEXAGON_V6_vsubbq", + "llvm.hexagon.V6.vsubbq.128B" => "__builtin_HEXAGON_V6_vsubbq_128B", + "llvm.hexagon.V6.vsubbsat" => "__builtin_HEXAGON_V6_vsubbsat", + "llvm.hexagon.V6.vsubbsat.128B" => "__builtin_HEXAGON_V6_vsubbsat_128B", + "llvm.hexagon.V6.vsubbsat.dv" => "__builtin_HEXAGON_V6_vsubbsat_dv", + "llvm.hexagon.V6.vsubbsat.dv.128B" => "__builtin_HEXAGON_V6_vsubbsat_dv_128B", "llvm.hexagon.V6.vsubh" => "__builtin_HEXAGON_V6_vsubh", "llvm.hexagon.V6.vsubh.128B" => "__builtin_HEXAGON_V6_vsubh_128B", "llvm.hexagon.V6.vsubh.dv" => "__builtin_HEXAGON_V6_vsubh_dv", "llvm.hexagon.V6.vsubh.dv.128B" => "__builtin_HEXAGON_V6_vsubh_dv_128B", + "llvm.hexagon.V6.vsubhnq" => "__builtin_HEXAGON_V6_vsubhnq", + "llvm.hexagon.V6.vsubhnq.128B" => "__builtin_HEXAGON_V6_vsubhnq_128B", + "llvm.hexagon.V6.vsubhq" => "__builtin_HEXAGON_V6_vsubhq", + "llvm.hexagon.V6.vsubhq.128B" => "__builtin_HEXAGON_V6_vsubhq_128B", "llvm.hexagon.V6.vsubhsat" => "__builtin_HEXAGON_V6_vsubhsat", "llvm.hexagon.V6.vsubhsat.128B" => "__builtin_HEXAGON_V6_vsubhsat_128B", "llvm.hexagon.V6.vsubhsat.dv" => "__builtin_HEXAGON_V6_vsubhsat_dv", @@ -1558,20 +2159,32 @@ match name { "llvm.hexagon.V6.vsububsat.128B" => "__builtin_HEXAGON_V6_vsububsat_128B", "llvm.hexagon.V6.vsububsat.dv" => "__builtin_HEXAGON_V6_vsububsat_dv", "llvm.hexagon.V6.vsububsat.dv.128B" => "__builtin_HEXAGON_V6_vsububsat_dv_128B", + "llvm.hexagon.V6.vsubububb.sat" => "__builtin_HEXAGON_V6_vsubububb_sat", + "llvm.hexagon.V6.vsubububb.sat.128B" => "__builtin_HEXAGON_V6_vsubububb_sat_128B", "llvm.hexagon.V6.vsubuhsat" => "__builtin_HEXAGON_V6_vsubuhsat", "llvm.hexagon.V6.vsubuhsat.128B" => "__builtin_HEXAGON_V6_vsubuhsat_128B", "llvm.hexagon.V6.vsubuhsat.dv" => "__builtin_HEXAGON_V6_vsubuhsat_dv", "llvm.hexagon.V6.vsubuhsat.dv.128B" => "__builtin_HEXAGON_V6_vsubuhsat_dv_128B", "llvm.hexagon.V6.vsubuhw" => "__builtin_HEXAGON_V6_vsubuhw", "llvm.hexagon.V6.vsubuhw.128B" => "__builtin_HEXAGON_V6_vsubuhw_128B", + "llvm.hexagon.V6.vsubuwsat" => "__builtin_HEXAGON_V6_vsubuwsat", + "llvm.hexagon.V6.vsubuwsat.128B" => "__builtin_HEXAGON_V6_vsubuwsat_128B", + "llvm.hexagon.V6.vsubuwsat.dv" => "__builtin_HEXAGON_V6_vsubuwsat_dv", + "llvm.hexagon.V6.vsubuwsat.dv.128B" => "__builtin_HEXAGON_V6_vsubuwsat_dv_128B", "llvm.hexagon.V6.vsubw" => "__builtin_HEXAGON_V6_vsubw", "llvm.hexagon.V6.vsubw.128B" => "__builtin_HEXAGON_V6_vsubw_128B", "llvm.hexagon.V6.vsubw.dv" => "__builtin_HEXAGON_V6_vsubw_dv", "llvm.hexagon.V6.vsubw.dv.128B" => "__builtin_HEXAGON_V6_vsubw_dv_128B", + "llvm.hexagon.V6.vsubwnq" => "__builtin_HEXAGON_V6_vsubwnq", + "llvm.hexagon.V6.vsubwnq.128B" => "__builtin_HEXAGON_V6_vsubwnq_128B", + "llvm.hexagon.V6.vsubwq" => "__builtin_HEXAGON_V6_vsubwq", + "llvm.hexagon.V6.vsubwq.128B" => "__builtin_HEXAGON_V6_vsubwq_128B", "llvm.hexagon.V6.vsubwsat" => "__builtin_HEXAGON_V6_vsubwsat", "llvm.hexagon.V6.vsubwsat.128B" => "__builtin_HEXAGON_V6_vsubwsat_128B", "llvm.hexagon.V6.vsubwsat.dv" => "__builtin_HEXAGON_V6_vsubwsat_dv", "llvm.hexagon.V6.vsubwsat.dv.128B" => "__builtin_HEXAGON_V6_vsubwsat_dv_128B", + "llvm.hexagon.V6.vswap" => "__builtin_HEXAGON_V6_vswap", + "llvm.hexagon.V6.vswap.128B" => "__builtin_HEXAGON_V6_vswap_128B", "llvm.hexagon.V6.vtmpyb" => "__builtin_HEXAGON_V6_vtmpyb", "llvm.hexagon.V6.vtmpyb.128B" => "__builtin_HEXAGON_V6_vtmpyb_128B", "llvm.hexagon.V6.vtmpyb.acc" => "__builtin_HEXAGON_V6_vtmpyb_acc", @@ -1602,6 +2215,19 @@ match name { "llvm.hexagon.V6.vzb.128B" => "__builtin_HEXAGON_V6_vzb_128B", "llvm.hexagon.V6.vzh" => "__builtin_HEXAGON_V6_vzh", "llvm.hexagon.V6.vzh.128B" => "__builtin_HEXAGON_V6_vzh_128B", + "llvm.hexagon.Y2.dccleana" => "__builtin_HEXAGON_Y2_dccleana", + "llvm.hexagon.Y2.dccleaninva" => "__builtin_HEXAGON_Y2_dccleaninva", + "llvm.hexagon.Y2.dcfetch" => "__builtin_HEXAGON_Y2_dcfetch", + "llvm.hexagon.Y2.dcinva" => "__builtin_HEXAGON_Y2_dcinva", + "llvm.hexagon.Y2.dczeroa" => "__builtin_HEXAGON_Y2_dczeroa", + "llvm.hexagon.Y4.l2fetch" => "__builtin_HEXAGON_Y4_l2fetch", + "llvm.hexagon.Y5.l2fetch" => "__builtin_HEXAGON_Y5_l2fetch", + "llvm.hexagon.Y6.dmlink" => "__builtin_HEXAGON_Y6_dmlink", + "llvm.hexagon.Y6.dmpause" => "__builtin_HEXAGON_Y6_dmpause", + "llvm.hexagon.Y6.dmpoll" => "__builtin_HEXAGON_Y6_dmpoll", + "llvm.hexagon.Y6.dmresume" => "__builtin_HEXAGON_Y6_dmresume", + "llvm.hexagon.Y6.dmstart" => "__builtin_HEXAGON_Y6_dmstart", + "llvm.hexagon.Y6.dmwait" => "__builtin_HEXAGON_Y6_dmwait", "llvm.hexagon.brev.ldb" => "__builtin_brev_ldb", "llvm.hexagon.brev.ldd" => "__builtin_brev_ldd", "llvm.hexagon.brev.ldh" => "__builtin_brev_ldh", @@ -1626,6 +2252,8 @@ match name { "llvm.hexagon.circ.stw" => "__builtin_circ_stw", "llvm.hexagon.mm256i.vaddw" => "__builtin__mm256i_vaddw", "llvm.hexagon.prefetch" => "__builtin_HEXAGON_prefetch", + "llvm.hexagon.vmemcpy" => "__builtin_hexagon_vmemcpy", + "llvm.hexagon.vmemset" => "__builtin_hexagon_vmemset", // mips "llvm.mips.absq.s.ph" => "__builtin_mips_absq_s_ph", "llvm.mips.absq.s.qb" => "__builtin_mips_absq_s_qb", @@ -2299,6 +2927,8 @@ match name { "llvm.mips.xor.v" => "__builtin_msa_xor_v", "llvm.mips.xori.b" => "__builtin_msa_xori_b", // nvvm + "llvm.nvvm.abs.bf16" => "__nvvm_abs_bf16", + "llvm.nvvm.abs.bf16x2" => "__nvvm_abs_bf16x2", "llvm.nvvm.abs.i" => "__nvvm_abs_i", "llvm.nvvm.abs.ll" => "__nvvm_abs_ll", "llvm.nvvm.add.rm.d" => "__nvvm_add_rm_d", @@ -2314,8 +2944,13 @@ match name { "llvm.nvvm.add.rz.f" => "__nvvm_add_rz_f", "llvm.nvvm.add.rz.ftz.f" => "__nvvm_add_rz_ftz_f", "llvm.nvvm.bar.sync" => "__nvvm_bar_sync", - "llvm.nvvm.barrier0" => "__nvvm_bar0", - // [DUPLICATE]: "llvm.nvvm.barrier0" => "__syncthreads", + "llvm.nvvm.bar.warp.sync" => "__nvvm_bar_warp_sync", + "llvm.nvvm.barrier" => "__nvvm_bar", + "llvm.nvvm.barrier.n" => "__nvvm_bar_n", + "llvm.nvvm.barrier.sync" => "__nvvm_barrier_sync", + "llvm.nvvm.barrier.sync.cnt" => "__nvvm_barrier_sync_cnt", + "llvm.nvvm.barrier0" => "__syncthreads", + // [DUPLICATE]: "llvm.nvvm.barrier0" => "__nvvm_bar0", "llvm.nvvm.barrier0.and" => "__nvvm_bar0_and", "llvm.nvvm.barrier0.or" => "__nvvm_bar0_or", "llvm.nvvm.barrier0.popc" => "__nvvm_bar0_popc", @@ -2332,6 +2967,17 @@ match name { "llvm.nvvm.clz.ll" => "__nvvm_clz_ll", "llvm.nvvm.cos.approx.f" => "__nvvm_cos_approx_f", "llvm.nvvm.cos.approx.ftz.f" => "__nvvm_cos_approx_ftz_f", + "llvm.nvvm.cp.async.ca.shared.global.16" => "__nvvm_cp_async_ca_shared_global_16", + "llvm.nvvm.cp.async.ca.shared.global.4" => "__nvvm_cp_async_ca_shared_global_4", + "llvm.nvvm.cp.async.ca.shared.global.8" => "__nvvm_cp_async_ca_shared_global_8", + "llvm.nvvm.cp.async.cg.shared.global.16" => "__nvvm_cp_async_cg_shared_global_16", + "llvm.nvvm.cp.async.commit.group" => "__nvvm_cp_async_commit_group", + "llvm.nvvm.cp.async.mbarrier.arrive" => "__nvvm_cp_async_mbarrier_arrive", + "llvm.nvvm.cp.async.mbarrier.arrive.noinc" => "__nvvm_cp_async_mbarrier_arrive_noinc", + "llvm.nvvm.cp.async.mbarrier.arrive.noinc.shared" => "__nvvm_cp_async_mbarrier_arrive_noinc_shared", + "llvm.nvvm.cp.async.mbarrier.arrive.shared" => "__nvvm_cp_async_mbarrier_arrive_shared", + "llvm.nvvm.cp.async.wait.all" => "__nvvm_cp_async_wait_all", + "llvm.nvvm.cp.async.wait.group" => "__nvvm_cp_async_wait_group", "llvm.nvvm.d2f.rm" => "__nvvm_d2f_rm", "llvm.nvvm.d2f.rm.ftz" => "__nvvm_d2f_rm_ftz", "llvm.nvvm.d2f.rn" => "__nvvm_d2f_rn", @@ -2374,7 +3020,13 @@ match name { "llvm.nvvm.div.rz.ftz.f" => "__nvvm_div_rz_ftz_f", "llvm.nvvm.ex2.approx.d" => "__nvvm_ex2_approx_d", "llvm.nvvm.ex2.approx.f" => "__nvvm_ex2_approx_f", + "llvm.nvvm.ex2.approx.f16" => "__nvvm_ex2_approx_f16", + "llvm.nvvm.ex2.approx.f16x2" => "__nvvm_ex2_approx_f16x2", "llvm.nvvm.ex2.approx.ftz.f" => "__nvvm_ex2_approx_ftz_f", + "llvm.nvvm.f2bf16.rn" => "__nvvm_f2bf16_rn", + "llvm.nvvm.f2bf16.rn.relu" => "__nvvm_f2bf16_rn_relu", + "llvm.nvvm.f2bf16.rz" => "__nvvm_f2bf16_rz", + "llvm.nvvm.f2bf16.rz.relu" => "__nvvm_f2bf16_rz_relu", "llvm.nvvm.f2h.rn" => "__nvvm_f2h_rn", "llvm.nvvm.f2h.rn.ftz" => "__nvvm_f2h_rn_ftz", "llvm.nvvm.f2i.rm" => "__nvvm_f2i_rm", @@ -2393,6 +3045,7 @@ match name { "llvm.nvvm.f2ll.rp.ftz" => "__nvvm_f2ll_rp_ftz", "llvm.nvvm.f2ll.rz" => "__nvvm_f2ll_rz", "llvm.nvvm.f2ll.rz.ftz" => "__nvvm_f2ll_rz_ftz", + "llvm.nvvm.f2tf32.rna" => "__nvvm_f2tf32_rna", "llvm.nvvm.f2ui.rm" => "__nvvm_f2ui_rm", "llvm.nvvm.f2ui.rm.ftz" => "__nvvm_f2ui_rm_ftz", "llvm.nvvm.f2ui.rn" => "__nvvm_f2ui_rn", @@ -2412,27 +3065,112 @@ match name { "llvm.nvvm.fabs.d" => "__nvvm_fabs_d", "llvm.nvvm.fabs.f" => "__nvvm_fabs_f", "llvm.nvvm.fabs.ftz.f" => "__nvvm_fabs_ftz_f", + "llvm.nvvm.ff2bf16x2.rn" => "__nvvm_ff2bf16x2_rn", + "llvm.nvvm.ff2bf16x2.rn.relu" => "__nvvm_ff2bf16x2_rn_relu", + "llvm.nvvm.ff2bf16x2.rz" => "__nvvm_ff2bf16x2_rz", + "llvm.nvvm.ff2bf16x2.rz.relu" => "__nvvm_ff2bf16x2_rz_relu", + "llvm.nvvm.ff2f16x2.rn" => "__nvvm_ff2f16x2_rn", + "llvm.nvvm.ff2f16x2.rn.relu" => "__nvvm_ff2f16x2_rn_relu", + "llvm.nvvm.ff2f16x2.rz" => "__nvvm_ff2f16x2_rz", + "llvm.nvvm.ff2f16x2.rz.relu" => "__nvvm_ff2f16x2_rz_relu", "llvm.nvvm.floor.d" => "__nvvm_floor_d", "llvm.nvvm.floor.f" => "__nvvm_floor_f", "llvm.nvvm.floor.ftz.f" => "__nvvm_floor_ftz_f", "llvm.nvvm.fma.rm.d" => "__nvvm_fma_rm_d", "llvm.nvvm.fma.rm.f" => "__nvvm_fma_rm_f", "llvm.nvvm.fma.rm.ftz.f" => "__nvvm_fma_rm_ftz_f", + "llvm.nvvm.fma.rn.bf16" => "__nvvm_fma_rn_bf16", + "llvm.nvvm.fma.rn.bf16x2" => "__nvvm_fma_rn_bf16x2", "llvm.nvvm.fma.rn.d" => "__nvvm_fma_rn_d", "llvm.nvvm.fma.rn.f" => "__nvvm_fma_rn_f", + "llvm.nvvm.fma.rn.f16" => "__nvvm_fma_rn_f16", + "llvm.nvvm.fma.rn.f16x2" => "__nvvm_fma_rn_f16x2", "llvm.nvvm.fma.rn.ftz.f" => "__nvvm_fma_rn_ftz_f", + "llvm.nvvm.fma.rn.ftz.f16" => "__nvvm_fma_rn_ftz_f16", + "llvm.nvvm.fma.rn.ftz.f16x2" => "__nvvm_fma_rn_ftz_f16x2", + "llvm.nvvm.fma.rn.ftz.relu.f16" => "__nvvm_fma_rn_ftz_relu_f16", + "llvm.nvvm.fma.rn.ftz.relu.f16x2" => "__nvvm_fma_rn_ftz_relu_f16x2", + "llvm.nvvm.fma.rn.ftz.sat.f16" => "__nvvm_fma_rn_ftz_sat_f16", + "llvm.nvvm.fma.rn.ftz.sat.f16x2" => "__nvvm_fma_rn_ftz_sat_f16x2", + "llvm.nvvm.fma.rn.relu.bf16" => "__nvvm_fma_rn_relu_bf16", + "llvm.nvvm.fma.rn.relu.bf16x2" => "__nvvm_fma_rn_relu_bf16x2", + "llvm.nvvm.fma.rn.relu.f16" => "__nvvm_fma_rn_relu_f16", + "llvm.nvvm.fma.rn.relu.f16x2" => "__nvvm_fma_rn_relu_f16x2", + "llvm.nvvm.fma.rn.sat.f16" => "__nvvm_fma_rn_sat_f16", + "llvm.nvvm.fma.rn.sat.f16x2" => "__nvvm_fma_rn_sat_f16x2", "llvm.nvvm.fma.rp.d" => "__nvvm_fma_rp_d", "llvm.nvvm.fma.rp.f" => "__nvvm_fma_rp_f", "llvm.nvvm.fma.rp.ftz.f" => "__nvvm_fma_rp_ftz_f", "llvm.nvvm.fma.rz.d" => "__nvvm_fma_rz_d", "llvm.nvvm.fma.rz.f" => "__nvvm_fma_rz_f", "llvm.nvvm.fma.rz.ftz.f" => "__nvvm_fma_rz_ftz_f", + "llvm.nvvm.fmax.bf16" => "__nvvm_fmax_bf16", + "llvm.nvvm.fmax.bf16x2" => "__nvvm_fmax_bf16x2", "llvm.nvvm.fmax.d" => "__nvvm_fmax_d", "llvm.nvvm.fmax.f" => "__nvvm_fmax_f", + "llvm.nvvm.fmax.f16" => "__nvvm_fmax_f16", + "llvm.nvvm.fmax.f16x2" => "__nvvm_fmax_f16x2", "llvm.nvvm.fmax.ftz.f" => "__nvvm_fmax_ftz_f", + "llvm.nvvm.fmax.ftz.f16" => "__nvvm_fmax_ftz_f16", + "llvm.nvvm.fmax.ftz.f16x2" => "__nvvm_fmax_ftz_f16x2", + "llvm.nvvm.fmax.ftz.nan.f" => "__nvvm_fmax_ftz_nan_f", + "llvm.nvvm.fmax.ftz.nan.f16" => "__nvvm_fmax_ftz_nan_f16", + "llvm.nvvm.fmax.ftz.nan.f16x2" => "__nvvm_fmax_ftz_nan_f16x2", + "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f" => "__nvvm_fmax_ftz_nan_xorsign_abs_f", + "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16" => "__nvvm_fmax_ftz_nan_xorsign_abs_f16", + "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16x2" => "__nvvm_fmax_ftz_nan_xorsign_abs_f16x2", + "llvm.nvvm.fmax.ftz.xorsign.abs.f" => "__nvvm_fmax_ftz_xorsign_abs_f", + "llvm.nvvm.fmax.ftz.xorsign.abs.f16" => "__nvvm_fmax_ftz_xorsign_abs_f16", + "llvm.nvvm.fmax.ftz.xorsign.abs.f16x2" => "__nvvm_fmax_ftz_xorsign_abs_f16x2", + "llvm.nvvm.fmax.nan.bf16" => "__nvvm_fmax_nan_bf16", + "llvm.nvvm.fmax.nan.bf16x2" => "__nvvm_fmax_nan_bf16x2", + "llvm.nvvm.fmax.nan.f" => "__nvvm_fmax_nan_f", + "llvm.nvvm.fmax.nan.f16" => "__nvvm_fmax_nan_f16", + "llvm.nvvm.fmax.nan.f16x2" => "__nvvm_fmax_nan_f16x2", + "llvm.nvvm.fmax.nan.xorsign.abs.bf16" => "__nvvm_fmax_nan_xorsign_abs_bf16", + "llvm.nvvm.fmax.nan.xorsign.abs.bf16x2" => "__nvvm_fmax_nan_xorsign_abs_bf16x2", + "llvm.nvvm.fmax.nan.xorsign.abs.f" => "__nvvm_fmax_nan_xorsign_abs_f", + "llvm.nvvm.fmax.nan.xorsign.abs.f16" => "__nvvm_fmax_nan_xorsign_abs_f16", + "llvm.nvvm.fmax.nan.xorsign.abs.f16x2" => "__nvvm_fmax_nan_xorsign_abs_f16x2", + "llvm.nvvm.fmax.xorsign.abs.bf16" => "__nvvm_fmax_xorsign_abs_bf16", + "llvm.nvvm.fmax.xorsign.abs.bf16x2" => "__nvvm_fmax_xorsign_abs_bf16x2", + "llvm.nvvm.fmax.xorsign.abs.f" => "__nvvm_fmax_xorsign_abs_f", + "llvm.nvvm.fmax.xorsign.abs.f16" => "__nvvm_fmax_xorsign_abs_f16", + "llvm.nvvm.fmax.xorsign.abs.f16x2" => "__nvvm_fmax_xorsign_abs_f16x2", + "llvm.nvvm.fmin.bf16" => "__nvvm_fmin_bf16", + "llvm.nvvm.fmin.bf16x2" => "__nvvm_fmin_bf16x2", "llvm.nvvm.fmin.d" => "__nvvm_fmin_d", "llvm.nvvm.fmin.f" => "__nvvm_fmin_f", + "llvm.nvvm.fmin.f16" => "__nvvm_fmin_f16", + "llvm.nvvm.fmin.f16x2" => "__nvvm_fmin_f16x2", "llvm.nvvm.fmin.ftz.f" => "__nvvm_fmin_ftz_f", + "llvm.nvvm.fmin.ftz.f16" => "__nvvm_fmin_ftz_f16", + "llvm.nvvm.fmin.ftz.f16x2" => "__nvvm_fmin_ftz_f16x2", + "llvm.nvvm.fmin.ftz.nan.f" => "__nvvm_fmin_ftz_nan_f", + "llvm.nvvm.fmin.ftz.nan.f16" => "__nvvm_fmin_ftz_nan_f16", + "llvm.nvvm.fmin.ftz.nan.f16x2" => "__nvvm_fmin_ftz_nan_f16x2", + "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f" => "__nvvm_fmin_ftz_nan_xorsign_abs_f", + "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16" => "__nvvm_fmin_ftz_nan_xorsign_abs_f16", + "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16x2" => "__nvvm_fmin_ftz_nan_xorsign_abs_f16x2", + "llvm.nvvm.fmin.ftz.xorsign.abs.f" => "__nvvm_fmin_ftz_xorsign_abs_f", + "llvm.nvvm.fmin.ftz.xorsign.abs.f16" => "__nvvm_fmin_ftz_xorsign_abs_f16", + "llvm.nvvm.fmin.ftz.xorsign.abs.f16x2" => "__nvvm_fmin_ftz_xorsign_abs_f16x2", + "llvm.nvvm.fmin.nan.bf16" => "__nvvm_fmin_nan_bf16", + "llvm.nvvm.fmin.nan.bf16x2" => "__nvvm_fmin_nan_bf16x2", + "llvm.nvvm.fmin.nan.f" => "__nvvm_fmin_nan_f", + "llvm.nvvm.fmin.nan.f16" => "__nvvm_fmin_nan_f16", + "llvm.nvvm.fmin.nan.f16x2" => "__nvvm_fmin_nan_f16x2", + "llvm.nvvm.fmin.nan.xorsign.abs.bf16" => "__nvvm_fmin_nan_xorsign_abs_bf16", + "llvm.nvvm.fmin.nan.xorsign.abs.bf16x2" => "__nvvm_fmin_nan_xorsign_abs_bf16x2", + "llvm.nvvm.fmin.nan.xorsign.abs.f" => "__nvvm_fmin_nan_xorsign_abs_f", + "llvm.nvvm.fmin.nan.xorsign.abs.f16" => "__nvvm_fmin_nan_xorsign_abs_f16", + "llvm.nvvm.fmin.nan.xorsign.abs.f16x2" => "__nvvm_fmin_nan_xorsign_abs_f16x2", + "llvm.nvvm.fmin.xorsign.abs.bf16" => "__nvvm_fmin_xorsign_abs_bf16", + "llvm.nvvm.fmin.xorsign.abs.bf16x2" => "__nvvm_fmin_xorsign_abs_bf16x2", + "llvm.nvvm.fmin.xorsign.abs.f" => "__nvvm_fmin_xorsign_abs_f", + "llvm.nvvm.fmin.xorsign.abs.f16" => "__nvvm_fmin_xorsign_abs_f16", + "llvm.nvvm.fmin.xorsign.abs.f16x2" => "__nvvm_fmin_xorsign_abs_f16x2", + "llvm.nvvm.fns" => "__nvvm_fns", "llvm.nvvm.h2f" => "__nvvm_h2f", "llvm.nvvm.i2d.rm" => "__nvvm_i2d_rm", "llvm.nvvm.i2d.rn" => "__nvvm_i2d_rn", @@ -2461,10 +3199,27 @@ match name { "llvm.nvvm.ll2f.rp" => "__nvvm_ll2f_rp", "llvm.nvvm.ll2f.rz" => "__nvvm_ll2f_rz", "llvm.nvvm.lohi.i2d" => "__nvvm_lohi_i2d", + "llvm.nvvm.match.any.sync.i32" => "__nvvm_match_any_sync_i32", + "llvm.nvvm.match.any.sync.i64" => "__nvvm_match_any_sync_i64", "llvm.nvvm.max.i" => "__nvvm_max_i", "llvm.nvvm.max.ll" => "__nvvm_max_ll", "llvm.nvvm.max.ui" => "__nvvm_max_ui", "llvm.nvvm.max.ull" => "__nvvm_max_ull", + "llvm.nvvm.mbarrier.arrive" => "__nvvm_mbarrier_arrive", + "llvm.nvvm.mbarrier.arrive.drop" => "__nvvm_mbarrier_arrive_drop", + "llvm.nvvm.mbarrier.arrive.drop.noComplete" => "__nvvm_mbarrier_arrive_drop_noComplete", + "llvm.nvvm.mbarrier.arrive.drop.noComplete.shared" => "__nvvm_mbarrier_arrive_drop_noComplete_shared", + "llvm.nvvm.mbarrier.arrive.drop.shared" => "__nvvm_mbarrier_arrive_drop_shared", + "llvm.nvvm.mbarrier.arrive.noComplete" => "__nvvm_mbarrier_arrive_noComplete", + "llvm.nvvm.mbarrier.arrive.noComplete.shared" => "__nvvm_mbarrier_arrive_noComplete_shared", + "llvm.nvvm.mbarrier.arrive.shared" => "__nvvm_mbarrier_arrive_shared", + "llvm.nvvm.mbarrier.init" => "__nvvm_mbarrier_init", + "llvm.nvvm.mbarrier.init.shared" => "__nvvm_mbarrier_init_shared", + "llvm.nvvm.mbarrier.inval" => "__nvvm_mbarrier_inval", + "llvm.nvvm.mbarrier.inval.shared" => "__nvvm_mbarrier_inval_shared", + "llvm.nvvm.mbarrier.pending.count" => "__nvvm_mbarrier_pending_count", + "llvm.nvvm.mbarrier.test.wait" => "__nvvm_mbarrier_test_wait", + "llvm.nvvm.mbarrier.test.wait.shared" => "__nvvm_mbarrier_test_wait_shared", "llvm.nvvm.membar.cta" => "__nvvm_membar_cta", "llvm.nvvm.membar.gl" => "__nvvm_membar_gl", "llvm.nvvm.membar.sys" => "__nvvm_membar_sys", @@ -2490,10 +3245,13 @@ match name { "llvm.nvvm.mulhi.ll" => "__nvvm_mulhi_ll", "llvm.nvvm.mulhi.ui" => "__nvvm_mulhi_ui", "llvm.nvvm.mulhi.ull" => "__nvvm_mulhi_ull", + "llvm.nvvm.neg.bf16" => "__nvvm_neg_bf16", + "llvm.nvvm.neg.bf16x2" => "__nvvm_neg_bf16x2", "llvm.nvvm.popc.i" => "__nvvm_popc_i", "llvm.nvvm.popc.ll" => "__nvvm_popc_ll", "llvm.nvvm.prmt" => "__nvvm_prmt", "llvm.nvvm.rcp.approx.ftz.d" => "__nvvm_rcp_approx_ftz_d", + "llvm.nvvm.rcp.approx.ftz.f" => "__nvvm_rcp_approx_ftz_f", "llvm.nvvm.rcp.rm.d" => "__nvvm_rcp_rm_d", "llvm.nvvm.rcp.rm.f" => "__nvvm_rcp_rm_f", "llvm.nvvm.rcp.rm.ftz.f" => "__nvvm_rcp_rm_ftz_f", @@ -2506,8 +3264,11 @@ match name { "llvm.nvvm.rcp.rz.d" => "__nvvm_rcp_rz_d", "llvm.nvvm.rcp.rz.f" => "__nvvm_rcp_rz_f", "llvm.nvvm.rcp.rz.ftz.f" => "__nvvm_rcp_rz_ftz_f", - "llvm.nvvm.read.ptx.sreg.clock" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.clock64" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.clock" => "__nvvm_read_ptx_sreg_clock", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.clock" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.clock64" => "__nvvm_read_ptx_sreg_clock64", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.clock64" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.ctaid.w" => "__nvvm_read_ptx_sreg_ctaid_w", "llvm.nvvm.read.ptx.sreg.ctaid.x" => "__nvvm_read_ptx_sreg_ctaid_x", "llvm.nvvm.read.ptx.sreg.ctaid.y" => "__nvvm_read_ptx_sreg_ctaid_y", "llvm.nvvm.read.ptx.sreg.ctaid.z" => "__nvvm_read_ptx_sreg_ctaid_z", @@ -2543,32 +3304,58 @@ match name { "llvm.nvvm.read.ptx.sreg.envreg7" => "__nvvm_read_ptx_sreg_envreg7", "llvm.nvvm.read.ptx.sreg.envreg8" => "__nvvm_read_ptx_sreg_envreg8", "llvm.nvvm.read.ptx.sreg.envreg9" => "__nvvm_read_ptx_sreg_envreg9", - "llvm.nvvm.read.ptx.sreg.gridid" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.laneid" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.lanemask.eq" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.lanemask.ge" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.lanemask.gt" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.lanemask.le" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.lanemask.lt" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.gridid" => "__nvvm_read_ptx_sreg_gridid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.gridid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.laneid" => "__nvvm_read_ptx_sreg_laneid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.laneid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.eq" => "__nvvm_read_ptx_sreg_lanemask_eq", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.eq" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.ge" => "__nvvm_read_ptx_sreg_lanemask_ge", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.ge" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.gt" => "__nvvm_read_ptx_sreg_lanemask_gt", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.gt" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.le" => "__nvvm_read_ptx_sreg_lanemask_le", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.le" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.lt" => "__nvvm_read_ptx_sreg_lanemask_lt", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.lt" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.nctaid.w" => "__nvvm_read_ptx_sreg_nctaid_w", "llvm.nvvm.read.ptx.sreg.nctaid.x" => "__nvvm_read_ptx_sreg_nctaid_x", "llvm.nvvm.read.ptx.sreg.nctaid.y" => "__nvvm_read_ptx_sreg_nctaid_y", "llvm.nvvm.read.ptx.sreg.nctaid.z" => "__nvvm_read_ptx_sreg_nctaid_z", - "llvm.nvvm.read.ptx.sreg.nsmid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.nsmid" => "__nvvm_read_ptx_sreg_nsmid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.nsmid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.ntid.w" => "__nvvm_read_ptx_sreg_ntid_w", "llvm.nvvm.read.ptx.sreg.ntid.x" => "__nvvm_read_ptx_sreg_ntid_x", "llvm.nvvm.read.ptx.sreg.ntid.y" => "__nvvm_read_ptx_sreg_ntid_y", "llvm.nvvm.read.ptx.sreg.ntid.z" => "__nvvm_read_ptx_sreg_ntid_z", - "llvm.nvvm.read.ptx.sreg.nwarpid" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.pm0" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.pm1" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.pm2" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.pm3" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.smid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.nwarpid" => "__nvvm_read_ptx_sreg_nwarpid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.nwarpid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.pm0" => "__nvvm_read_ptx_sreg_pm0", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm0" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.pm1" => "__nvvm_read_ptx_sreg_pm1", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm1" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.pm2" => "__nvvm_read_ptx_sreg_pm2", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm2" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.pm3" => "__nvvm_read_ptx_sreg_pm3", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm3" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.smid" => "__nvvm_read_ptx_sreg_smid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.smid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.tid.w" => "__nvvm_read_ptx_sreg_tid_w", "llvm.nvvm.read.ptx.sreg.tid.x" => "__nvvm_read_ptx_sreg_tid_x", "llvm.nvvm.read.ptx.sreg.tid.y" => "__nvvm_read_ptx_sreg_tid_y", "llvm.nvvm.read.ptx.sreg.tid.z" => "__nvvm_read_ptx_sreg_tid_z", - "llvm.nvvm.read.ptx.sreg.warpid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.warpid" => "__nvvm_read_ptx_sreg_warpid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.warpid" => "__nvvm_read_ptx_sreg_", "llvm.nvvm.read.ptx.sreg.warpsize" => "__nvvm_read_ptx_sreg_warpsize", // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.warpsize" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.redux.sync.add" => "__nvvm_redux_sync_add", + "llvm.nvvm.redux.sync.and" => "__nvvm_redux_sync_and", + "llvm.nvvm.redux.sync.max" => "__nvvm_redux_sync_max", + "llvm.nvvm.redux.sync.min" => "__nvvm_redux_sync_min", + "llvm.nvvm.redux.sync.or" => "__nvvm_redux_sync_or", + "llvm.nvvm.redux.sync.umax" => "__nvvm_redux_sync_umax", + "llvm.nvvm.redux.sync.umin" => "__nvvm_redux_sync_umin", + "llvm.nvvm.redux.sync.xor" => "__nvvm_redux_sync_xor", "llvm.nvvm.rotate.b32" => "__nvvm_rotate_b32", "llvm.nvvm.rotate.b64" => "__nvvm_rotate_b64", "llvm.nvvm.rotate.right.b64" => "__nvvm_rotate_right_b64", @@ -2589,6 +3376,14 @@ match name { "llvm.nvvm.shfl.down.i32" => "__nvvm_shfl_down_i32", "llvm.nvvm.shfl.idx.f32" => "__nvvm_shfl_idx_f32", "llvm.nvvm.shfl.idx.i32" => "__nvvm_shfl_idx_i32", + "llvm.nvvm.shfl.sync.bfly.f32" => "__nvvm_shfl_sync_bfly_f32", + "llvm.nvvm.shfl.sync.bfly.i32" => "__nvvm_shfl_sync_bfly_i32", + "llvm.nvvm.shfl.sync.down.f32" => "__nvvm_shfl_sync_down_f32", + "llvm.nvvm.shfl.sync.down.i32" => "__nvvm_shfl_sync_down_i32", + "llvm.nvvm.shfl.sync.idx.f32" => "__nvvm_shfl_sync_idx_f32", + "llvm.nvvm.shfl.sync.idx.i32" => "__nvvm_shfl_sync_idx_i32", + "llvm.nvvm.shfl.sync.up.f32" => "__nvvm_shfl_sync_up_f32", + "llvm.nvvm.shfl.sync.up.i32" => "__nvvm_shfl_sync_up_i32", "llvm.nvvm.shfl.up.f32" => "__nvvm_shfl_up_f32", "llvm.nvvm.shfl.up.i32" => "__nvvm_shfl_up_i32", "llvm.nvvm.sin.approx.f" => "__nvvm_sin_approx_f", @@ -2852,6 +3647,14 @@ match name { "llvm.nvvm.ull2f.rn" => "__nvvm_ull2f_rn", "llvm.nvvm.ull2f.rp" => "__nvvm_ull2f_rp", "llvm.nvvm.ull2f.rz" => "__nvvm_ull2f_rz", + "llvm.nvvm.vote.all" => "__nvvm_vote_all", + "llvm.nvvm.vote.all.sync" => "__nvvm_vote_all_sync", + "llvm.nvvm.vote.any" => "__nvvm_vote_any", + "llvm.nvvm.vote.any.sync" => "__nvvm_vote_any_sync", + "llvm.nvvm.vote.ballot" => "__nvvm_vote_ballot", + "llvm.nvvm.vote.ballot.sync" => "__nvvm_vote_ballot_sync", + "llvm.nvvm.vote.uni" => "__nvvm_vote_uni", + "llvm.nvvm.vote.uni.sync" => "__nvvm_vote_uni_sync", // ppc "llvm.ppc.addex" => "__builtin_ppc_addex", "llvm.ppc.addf128.round.to.odd" => "__builtin_addf128_round_to_odd", @@ -2881,6 +3684,10 @@ match name { "llvm.ppc.altivec.mtvsrhm" => "__builtin_altivec_mtvsrhm", "llvm.ppc.altivec.mtvsrqm" => "__builtin_altivec_mtvsrqm", "llvm.ppc.altivec.mtvsrwm" => "__builtin_altivec_mtvsrwm", + "llvm.ppc.altivec.vabsdub" => "__builtin_altivec_vabsdub", + "llvm.ppc.altivec.vabsduh" => "__builtin_altivec_vabsduh", + "llvm.ppc.altivec.vabsduw" => "__builtin_altivec_vabsduw", + "llvm.ppc.altivec.vaddcuq" => "__builtin_altivec_vaddcuq", "llvm.ppc.altivec.vaddcuw" => "__builtin_altivec_vaddcuw", "llvm.ppc.altivec.vaddecuq" => "__builtin_altivec_vaddecuq", "llvm.ppc.altivec.vaddeuqm" => "__builtin_altivec_vaddeuqm", @@ -2963,6 +3770,12 @@ match name { "llvm.ppc.altivec.vctuxs" => "__builtin_altivec_vctuxs", "llvm.ppc.altivec.vctzdm" => "__builtin_altivec_vctzdm", "llvm.ppc.altivec.vctzlsbb" => "__builtin_altivec_vctzlsbb", + "llvm.ppc.altivec.vdivesd" => "__builtin_altivec_vdivesd", + "llvm.ppc.altivec.vdivesq" => "__builtin_altivec_vdivesq", + "llvm.ppc.altivec.vdivesw" => "__builtin_altivec_vdivesw", + "llvm.ppc.altivec.vdiveud" => "__builtin_altivec_vdiveud", + "llvm.ppc.altivec.vdiveuq" => "__builtin_altivec_vdiveuq", + "llvm.ppc.altivec.vdiveuw" => "__builtin_altivec_vdiveuw", "llvm.ppc.altivec.vexpandbm" => "__builtin_altivec_vexpandbm", "llvm.ppc.altivec.vexpanddm" => "__builtin_altivec_vexpanddm", "llvm.ppc.altivec.vexpandhm" => "__builtin_altivec_vexpandhm", @@ -3036,15 +3849,23 @@ match name { "llvm.ppc.altivec.vmsumuhm" => "__builtin_altivec_vmsumuhm", "llvm.ppc.altivec.vmsumuhs" => "__builtin_altivec_vmsumuhs", "llvm.ppc.altivec.vmulesb" => "__builtin_altivec_vmulesb", + "llvm.ppc.altivec.vmulesd" => "__builtin_altivec_vmulesd", "llvm.ppc.altivec.vmulesh" => "__builtin_altivec_vmulesh", "llvm.ppc.altivec.vmulesw" => "__builtin_altivec_vmulesw", "llvm.ppc.altivec.vmuleub" => "__builtin_altivec_vmuleub", + "llvm.ppc.altivec.vmuleud" => "__builtin_altivec_vmuleud", "llvm.ppc.altivec.vmuleuh" => "__builtin_altivec_vmuleuh", "llvm.ppc.altivec.vmuleuw" => "__builtin_altivec_vmuleuw", + "llvm.ppc.altivec.vmulhsd" => "__builtin_altivec_vmulhsd", + "llvm.ppc.altivec.vmulhsw" => "__builtin_altivec_vmulhsw", + "llvm.ppc.altivec.vmulhud" => "__builtin_altivec_vmulhud", + "llvm.ppc.altivec.vmulhuw" => "__builtin_altivec_vmulhuw", "llvm.ppc.altivec.vmulosb" => "__builtin_altivec_vmulosb", + "llvm.ppc.altivec.vmulosd" => "__builtin_altivec_vmulosd", "llvm.ppc.altivec.vmulosh" => "__builtin_altivec_vmulosh", "llvm.ppc.altivec.vmulosw" => "__builtin_altivec_vmulosw", "llvm.ppc.altivec.vmuloub" => "__builtin_altivec_vmuloub", + "llvm.ppc.altivec.vmuloud" => "__builtin_altivec_vmuloud", "llvm.ppc.altivec.vmulouh" => "__builtin_altivec_vmulouh", "llvm.ppc.altivec.vmulouw" => "__builtin_altivec_vmulouw", "llvm.ppc.altivec.vnmsubfp" => "__builtin_altivec_vnmsubfp", @@ -3071,8 +3892,14 @@ match name { "llvm.ppc.altivec.vrfiz" => "__builtin_altivec_vrfiz", "llvm.ppc.altivec.vrlb" => "__builtin_altivec_vrlb", "llvm.ppc.altivec.vrld" => "__builtin_altivec_vrld", + "llvm.ppc.altivec.vrldmi" => "__builtin_altivec_vrldmi", + "llvm.ppc.altivec.vrldnm" => "__builtin_altivec_vrldnm", "llvm.ppc.altivec.vrlh" => "__builtin_altivec_vrlh", + "llvm.ppc.altivec.vrlqmi" => "__builtin_altivec_vrlqmi", + "llvm.ppc.altivec.vrlqnm" => "__builtin_altivec_vrlqnm", "llvm.ppc.altivec.vrlw" => "__builtin_altivec_vrlw", + "llvm.ppc.altivec.vrlwmi" => "__builtin_altivec_vrlwmi", + "llvm.ppc.altivec.vrlwnm" => "__builtin_altivec_vrlwnm", "llvm.ppc.altivec.vrsqrtefp" => "__builtin_altivec_vrsqrtefp", "llvm.ppc.altivec.vsel" => "__builtin_altivec_vsel_4si", "llvm.ppc.altivec.vsl" => "__builtin_altivec_vsl", @@ -3080,6 +3907,7 @@ match name { "llvm.ppc.altivec.vsldbi" => "__builtin_altivec_vsldbi", "llvm.ppc.altivec.vslh" => "__builtin_altivec_vslh", "llvm.ppc.altivec.vslo" => "__builtin_altivec_vslo", + "llvm.ppc.altivec.vslv" => "__builtin_altivec_vslv", "llvm.ppc.altivec.vslw" => "__builtin_altivec_vslw", "llvm.ppc.altivec.vsr" => "__builtin_altivec_vsr", "llvm.ppc.altivec.vsrab" => "__builtin_altivec_vsrab", @@ -3089,6 +3917,7 @@ match name { "llvm.ppc.altivec.vsrdbi" => "__builtin_altivec_vsrdbi", "llvm.ppc.altivec.vsrh" => "__builtin_altivec_vsrh", "llvm.ppc.altivec.vsro" => "__builtin_altivec_vsro", + "llvm.ppc.altivec.vsrv" => "__builtin_altivec_vsrv", "llvm.ppc.altivec.vsrw" => "__builtin_altivec_vsrw", "llvm.ppc.altivec.vstribl" => "__builtin_altivec_vstribl", "llvm.ppc.altivec.vstribl.p" => "__builtin_altivec_vstribl_p", @@ -3098,6 +3927,7 @@ match name { "llvm.ppc.altivec.vstrihl.p" => "__builtin_altivec_vstrihl_p", "llvm.ppc.altivec.vstrihr" => "__builtin_altivec_vstrihr", "llvm.ppc.altivec.vstrihr.p" => "__builtin_altivec_vstrihr_p", + "llvm.ppc.altivec.vsubcuq" => "__builtin_altivec_vsubcuq", "llvm.ppc.altivec.vsubcuw" => "__builtin_altivec_vsubcuw", "llvm.ppc.altivec.vsubecuq" => "__builtin_altivec_vsubecuq", "llvm.ppc.altivec.vsubeuqm" => "__builtin_altivec_vsubeuqm", @@ -3165,6 +3995,8 @@ match name { "llvm.ppc.fmaf128.round.to.odd" => "__builtin_fmaf128_round_to_odd", "llvm.ppc.fmsub" => "__builtin_ppc_fmsub", "llvm.ppc.fmsubs" => "__builtin_ppc_fmsubs", + "llvm.ppc.fnabs" => "__builtin_ppc_fnabs", + "llvm.ppc.fnabss" => "__builtin_ppc_fnabss", "llvm.ppc.fnmadd" => "__builtin_ppc_fnmadd", "llvm.ppc.fnmadds" => "__builtin_ppc_fnmadds", "llvm.ppc.fre" => "__builtin_ppc_fre", @@ -3341,8 +4173,24 @@ match name { "llvm.ppc.vsx.xvcmpgtdp.p" => "__builtin_vsx_xvcmpgtdp_p", "llvm.ppc.vsx.xvcmpgtsp" => "__builtin_vsx_xvcmpgtsp", "llvm.ppc.vsx.xvcmpgtsp.p" => "__builtin_vsx_xvcmpgtsp_p", + "llvm.ppc.vsx.xvcvbf16spn" => "__builtin_vsx_xvcvbf16spn", + "llvm.ppc.vsx.xvcvdpsp" => "__builtin_vsx_xvcvdpsp", + "llvm.ppc.vsx.xvcvdpsxws" => "__builtin_vsx_xvcvdpsxws", + "llvm.ppc.vsx.xvcvdpuxws" => "__builtin_vsx_xvcvdpuxws", + "llvm.ppc.vsx.xvcvhpsp" => "__builtin_vsx_xvcvhpsp", + "llvm.ppc.vsx.xvcvspbf16" => "__builtin_vsx_xvcvspbf16", + "llvm.ppc.vsx.xvcvspdp" => "__builtin_vsx_xvcvspdp", + "llvm.ppc.vsx.xvcvsphp" => "__builtin_vsx_xvcvsphp", + "llvm.ppc.vsx.xvcvspsxds" => "__builtin_vsx_xvcvspsxds", + "llvm.ppc.vsx.xvcvspuxds" => "__builtin_vsx_xvcvspuxds", + "llvm.ppc.vsx.xvcvsxdsp" => "__builtin_vsx_xvcvsxdsp", + "llvm.ppc.vsx.xvcvsxwdp" => "__builtin_vsx_xvcvsxwdp", + "llvm.ppc.vsx.xvcvuxdsp" => "__builtin_vsx_xvcvuxdsp", + "llvm.ppc.vsx.xvcvuxwdp" => "__builtin_vsx_xvcvuxwdp", "llvm.ppc.vsx.xvdivdp" => "__builtin_vsx_xvdivdp", "llvm.ppc.vsx.xvdivsp" => "__builtin_vsx_xvdivsp", + "llvm.ppc.vsx.xviexpdp" => "__builtin_vsx_xviexpdp", + "llvm.ppc.vsx.xviexpsp" => "__builtin_vsx_xviexpsp", "llvm.ppc.vsx.xvmaxdp" => "__builtin_vsx_xvmaxdp", "llvm.ppc.vsx.xvmaxsp" => "__builtin_vsx_xvmaxsp", "llvm.ppc.vsx.xvmindp" => "__builtin_vsx_xvmindp", @@ -3351,10 +4199,28 @@ match name { "llvm.ppc.vsx.xvresp" => "__builtin_vsx_xvresp", "llvm.ppc.vsx.xvrsqrtedp" => "__builtin_vsx_xvrsqrtedp", "llvm.ppc.vsx.xvrsqrtesp" => "__builtin_vsx_xvrsqrtesp", + "llvm.ppc.vsx.xvtdivdp" => "__builtin_vsx_xvtdivdp", + "llvm.ppc.vsx.xvtdivsp" => "__builtin_vsx_xvtdivsp", + "llvm.ppc.vsx.xvtlsbb" => "__builtin_vsx_xvtlsbb", + "llvm.ppc.vsx.xvtsqrtdp" => "__builtin_vsx_xvtsqrtdp", + "llvm.ppc.vsx.xvtsqrtsp" => "__builtin_vsx_xvtsqrtsp", + "llvm.ppc.vsx.xvtstdcdp" => "__builtin_vsx_xvtstdcdp", + "llvm.ppc.vsx.xvtstdcsp" => "__builtin_vsx_xvtstdcsp", + "llvm.ppc.vsx.xvxexpdp" => "__builtin_vsx_xvxexpdp", + "llvm.ppc.vsx.xvxexpsp" => "__builtin_vsx_xvxexpsp", + "llvm.ppc.vsx.xvxsigdp" => "__builtin_vsx_xvxsigdp", + "llvm.ppc.vsx.xvxsigsp" => "__builtin_vsx_xvxsigsp", "llvm.ppc.vsx.xxblendvb" => "__builtin_vsx_xxblendvb", "llvm.ppc.vsx.xxblendvd" => "__builtin_vsx_xxblendvd", "llvm.ppc.vsx.xxblendvh" => "__builtin_vsx_xxblendvh", "llvm.ppc.vsx.xxblendvw" => "__builtin_vsx_xxblendvw", + "llvm.ppc.vsx.xxeval" => "__builtin_vsx_xxeval", + "llvm.ppc.vsx.xxextractuw" => "__builtin_vsx_xxextractuw", + "llvm.ppc.vsx.xxgenpcvbm" => "__builtin_vsx_xxgenpcvbm", + "llvm.ppc.vsx.xxgenpcvdm" => "__builtin_vsx_xxgenpcvdm", + "llvm.ppc.vsx.xxgenpcvhm" => "__builtin_vsx_xxgenpcvhm", + "llvm.ppc.vsx.xxgenpcvwm" => "__builtin_vsx_xxgenpcvwm", + "llvm.ppc.vsx.xxinsertw" => "__builtin_vsx_xxinsertw", "llvm.ppc.vsx.xxleqv" => "__builtin_vsx_xxleqv", "llvm.ppc.vsx.xxpermx" => "__builtin_vsx_xxpermx", // ptx @@ -3376,6 +4242,19 @@ match name { "llvm.ptx.read.pm3" => "__builtin_ptx_read_pm3", "llvm.ptx.read.smid" => "__builtin_ptx_read_smid", "llvm.ptx.read.warpid" => "__builtin_ptx_read_warpid", + // r600 + "llvm.r600.group.barrier" => "__builtin_r600_group_barrier", + "llvm.r600.implicitarg.ptr" => "__builtin_r600_implicitarg_ptr", + "llvm.r600.rat.store.typed" => "__builtin_r600_rat_store_typed", + "llvm.r600.read.global.size.x" => "__builtin_r600_read_global_size_x", + "llvm.r600.read.global.size.y" => "__builtin_r600_read_global_size_y", + "llvm.r600.read.global.size.z" => "__builtin_r600_read_global_size_z", + "llvm.r600.read.ngroups.x" => "__builtin_r600_read_ngroups_x", + "llvm.r600.read.ngroups.y" => "__builtin_r600_read_ngroups_y", + "llvm.r600.read.ngroups.z" => "__builtin_r600_read_ngroups_z", + "llvm.r600.read.tgid.x" => "__builtin_r600_read_tgid_x", + "llvm.r600.read.tgid.y" => "__builtin_r600_read_tgid_y", + "llvm.r600.read.tgid.z" => "__builtin_r600_read_tgid_z", // s390 "llvm.s390.efpc" => "__builtin_s390_efpc", "llvm.s390.etnd" => "__builtin_tx_nesting_depth", @@ -3383,29 +4262,1426 @@ match name { "llvm.s390.ppa.txassist" => "__builtin_tx_assist", "llvm.s390.sfpc" => "__builtin_s390_sfpc", "llvm.s390.tend" => "__builtin_tend", + "llvm.s390.vaccb" => "__builtin_s390_vaccb", + "llvm.s390.vacccq" => "__builtin_s390_vacccq", + "llvm.s390.vaccf" => "__builtin_s390_vaccf", + "llvm.s390.vaccg" => "__builtin_s390_vaccg", + "llvm.s390.vacch" => "__builtin_s390_vacch", + "llvm.s390.vaccq" => "__builtin_s390_vaccq", + "llvm.s390.vacq" => "__builtin_s390_vacq", + "llvm.s390.vaq" => "__builtin_s390_vaq", + "llvm.s390.vavgb" => "__builtin_s390_vavgb", + "llvm.s390.vavgf" => "__builtin_s390_vavgf", + "llvm.s390.vavgg" => "__builtin_s390_vavgg", + "llvm.s390.vavgh" => "__builtin_s390_vavgh", + "llvm.s390.vavglb" => "__builtin_s390_vavglb", + "llvm.s390.vavglf" => "__builtin_s390_vavglf", + "llvm.s390.vavglg" => "__builtin_s390_vavglg", + "llvm.s390.vavglh" => "__builtin_s390_vavglh", + "llvm.s390.vbperm" => "__builtin_s390_vbperm", "llvm.s390.vcfn" => "__builtin_s390_vcfn", + "llvm.s390.vcksm" => "__builtin_s390_vcksm", "llvm.s390.vclfnhs" => "__builtin_s390_vclfnhs", "llvm.s390.vclfnls" => "__builtin_s390_vclfnls", "llvm.s390.vcnf" => "__builtin_s390_vcnf", "llvm.s390.vcrnfs" => "__builtin_s390_vcrnfs", + "llvm.s390.verimb" => "__builtin_s390_verimb", + "llvm.s390.verimf" => "__builtin_s390_verimf", + "llvm.s390.verimg" => "__builtin_s390_verimg", + "llvm.s390.verimh" => "__builtin_s390_verimh", + "llvm.s390.verllb" => "__builtin_s390_verllb", + "llvm.s390.verllf" => "__builtin_s390_verllf", + "llvm.s390.verllg" => "__builtin_s390_verllg", + "llvm.s390.verllh" => "__builtin_s390_verllh", + "llvm.s390.verllvb" => "__builtin_s390_verllvb", + "llvm.s390.verllvf" => "__builtin_s390_verllvf", + "llvm.s390.verllvg" => "__builtin_s390_verllvg", + "llvm.s390.verllvh" => "__builtin_s390_verllvh", + "llvm.s390.vfaeb" => "__builtin_s390_vfaeb", + "llvm.s390.vfaef" => "__builtin_s390_vfaef", + "llvm.s390.vfaeh" => "__builtin_s390_vfaeh", + "llvm.s390.vfaezb" => "__builtin_s390_vfaezb", + "llvm.s390.vfaezf" => "__builtin_s390_vfaezf", + "llvm.s390.vfaezh" => "__builtin_s390_vfaezh", + "llvm.s390.vfeeb" => "__builtin_s390_vfeeb", + "llvm.s390.vfeef" => "__builtin_s390_vfeef", + "llvm.s390.vfeeh" => "__builtin_s390_vfeeh", + "llvm.s390.vfeezb" => "__builtin_s390_vfeezb", + "llvm.s390.vfeezf" => "__builtin_s390_vfeezf", + "llvm.s390.vfeezh" => "__builtin_s390_vfeezh", + "llvm.s390.vfeneb" => "__builtin_s390_vfeneb", + "llvm.s390.vfenef" => "__builtin_s390_vfenef", + "llvm.s390.vfeneh" => "__builtin_s390_vfeneh", + "llvm.s390.vfenezb" => "__builtin_s390_vfenezb", + "llvm.s390.vfenezf" => "__builtin_s390_vfenezf", + "llvm.s390.vfenezh" => "__builtin_s390_vfenezh", + "llvm.s390.vgfmab" => "__builtin_s390_vgfmab", + "llvm.s390.vgfmaf" => "__builtin_s390_vgfmaf", + "llvm.s390.vgfmag" => "__builtin_s390_vgfmag", + "llvm.s390.vgfmah" => "__builtin_s390_vgfmah", + "llvm.s390.vgfmb" => "__builtin_s390_vgfmb", + "llvm.s390.vgfmf" => "__builtin_s390_vgfmf", + "llvm.s390.vgfmg" => "__builtin_s390_vgfmg", + "llvm.s390.vgfmh" => "__builtin_s390_vgfmh", + "llvm.s390.vistrb" => "__builtin_s390_vistrb", + "llvm.s390.vistrf" => "__builtin_s390_vistrf", + "llvm.s390.vistrh" => "__builtin_s390_vistrh", "llvm.s390.vlbb" => "__builtin_s390_vlbb", "llvm.s390.vll" => "__builtin_s390_vll", "llvm.s390.vlrl" => "__builtin_s390_vlrl", + "llvm.s390.vmaeb" => "__builtin_s390_vmaeb", + "llvm.s390.vmaef" => "__builtin_s390_vmaef", + "llvm.s390.vmaeh" => "__builtin_s390_vmaeh", + "llvm.s390.vmahb" => "__builtin_s390_vmahb", + "llvm.s390.vmahf" => "__builtin_s390_vmahf", + "llvm.s390.vmahh" => "__builtin_s390_vmahh", + "llvm.s390.vmaleb" => "__builtin_s390_vmaleb", + "llvm.s390.vmalef" => "__builtin_s390_vmalef", + "llvm.s390.vmaleh" => "__builtin_s390_vmaleh", + "llvm.s390.vmalhb" => "__builtin_s390_vmalhb", + "llvm.s390.vmalhf" => "__builtin_s390_vmalhf", + "llvm.s390.vmalhh" => "__builtin_s390_vmalhh", + "llvm.s390.vmalob" => "__builtin_s390_vmalob", + "llvm.s390.vmalof" => "__builtin_s390_vmalof", + "llvm.s390.vmaloh" => "__builtin_s390_vmaloh", + "llvm.s390.vmaob" => "__builtin_s390_vmaob", + "llvm.s390.vmaof" => "__builtin_s390_vmaof", + "llvm.s390.vmaoh" => "__builtin_s390_vmaoh", + "llvm.s390.vmeb" => "__builtin_s390_vmeb", + "llvm.s390.vmef" => "__builtin_s390_vmef", + "llvm.s390.vmeh" => "__builtin_s390_vmeh", + "llvm.s390.vmhb" => "__builtin_s390_vmhb", + "llvm.s390.vmhf" => "__builtin_s390_vmhf", + "llvm.s390.vmhh" => "__builtin_s390_vmhh", + "llvm.s390.vmleb" => "__builtin_s390_vmleb", + "llvm.s390.vmlef" => "__builtin_s390_vmlef", + "llvm.s390.vmleh" => "__builtin_s390_vmleh", + "llvm.s390.vmlhb" => "__builtin_s390_vmlhb", + "llvm.s390.vmlhf" => "__builtin_s390_vmlhf", + "llvm.s390.vmlhh" => "__builtin_s390_vmlhh", + "llvm.s390.vmlob" => "__builtin_s390_vmlob", + "llvm.s390.vmlof" => "__builtin_s390_vmlof", + "llvm.s390.vmloh" => "__builtin_s390_vmloh", + "llvm.s390.vmob" => "__builtin_s390_vmob", + "llvm.s390.vmof" => "__builtin_s390_vmof", + "llvm.s390.vmoh" => "__builtin_s390_vmoh", "llvm.s390.vmslg" => "__builtin_s390_vmslg", "llvm.s390.vpdi" => "__builtin_s390_vpdi", "llvm.s390.vperm" => "__builtin_s390_vperm", + "llvm.s390.vpklsf" => "__builtin_s390_vpklsf", + "llvm.s390.vpklsg" => "__builtin_s390_vpklsg", + "llvm.s390.vpklsh" => "__builtin_s390_vpklsh", + "llvm.s390.vpksf" => "__builtin_s390_vpksf", + "llvm.s390.vpksg" => "__builtin_s390_vpksg", + "llvm.s390.vpksh" => "__builtin_s390_vpksh", + "llvm.s390.vsbcbiq" => "__builtin_s390_vsbcbiq", + "llvm.s390.vsbiq" => "__builtin_s390_vsbiq", + "llvm.s390.vscbib" => "__builtin_s390_vscbib", + "llvm.s390.vscbif" => "__builtin_s390_vscbif", + "llvm.s390.vscbig" => "__builtin_s390_vscbig", + "llvm.s390.vscbih" => "__builtin_s390_vscbih", + "llvm.s390.vscbiq" => "__builtin_s390_vscbiq", + "llvm.s390.vsl" => "__builtin_s390_vsl", + "llvm.s390.vslb" => "__builtin_s390_vslb", "llvm.s390.vsld" => "__builtin_s390_vsld", "llvm.s390.vsldb" => "__builtin_s390_vsldb", + "llvm.s390.vsq" => "__builtin_s390_vsq", + "llvm.s390.vsra" => "__builtin_s390_vsra", + "llvm.s390.vsrab" => "__builtin_s390_vsrab", "llvm.s390.vsrd" => "__builtin_s390_vsrd", + "llvm.s390.vsrl" => "__builtin_s390_vsrl", + "llvm.s390.vsrlb" => "__builtin_s390_vsrlb", "llvm.s390.vstl" => "__builtin_s390_vstl", + "llvm.s390.vstrcb" => "__builtin_s390_vstrcb", + "llvm.s390.vstrcf" => "__builtin_s390_vstrcf", + "llvm.s390.vstrch" => "__builtin_s390_vstrch", + "llvm.s390.vstrczb" => "__builtin_s390_vstrczb", + "llvm.s390.vstrczf" => "__builtin_s390_vstrczf", + "llvm.s390.vstrczh" => "__builtin_s390_vstrczh", "llvm.s390.vstrl" => "__builtin_s390_vstrl", + "llvm.s390.vsumb" => "__builtin_s390_vsumb", + "llvm.s390.vsumgf" => "__builtin_s390_vsumgf", + "llvm.s390.vsumgh" => "__builtin_s390_vsumgh", + "llvm.s390.vsumh" => "__builtin_s390_vsumh", + "llvm.s390.vsumqf" => "__builtin_s390_vsumqf", + "llvm.s390.vsumqg" => "__builtin_s390_vsumqg", + "llvm.s390.vtm" => "__builtin_s390_vtm", + "llvm.s390.vuphb" => "__builtin_s390_vuphb", + "llvm.s390.vuphf" => "__builtin_s390_vuphf", + "llvm.s390.vuphh" => "__builtin_s390_vuphh", + "llvm.s390.vuplb" => "__builtin_s390_vuplb", + "llvm.s390.vuplf" => "__builtin_s390_vuplf", + "llvm.s390.vuplhb" => "__builtin_s390_vuplhb", + "llvm.s390.vuplhf" => "__builtin_s390_vuplhf", + "llvm.s390.vuplhh" => "__builtin_s390_vuplhh", + "llvm.s390.vuplhw" => "__builtin_s390_vuplhw", + "llvm.s390.vupllb" => "__builtin_s390_vupllb", + "llvm.s390.vupllf" => "__builtin_s390_vupllf", + "llvm.s390.vupllh" => "__builtin_s390_vupllh", // ve + "llvm.ve.vl.andm.MMM" => "__builtin_ve_vl_andm_MMM", + "llvm.ve.vl.andm.mmm" => "__builtin_ve_vl_andm_mmm", + "llvm.ve.vl.eqvm.MMM" => "__builtin_ve_vl_eqvm_MMM", + "llvm.ve.vl.eqvm.mmm" => "__builtin_ve_vl_eqvm_mmm", "llvm.ve.vl.extract.vm512l" => "__builtin_ve_vl_extract_vm512l", "llvm.ve.vl.extract.vm512u" => "__builtin_ve_vl_extract_vm512u", + "llvm.ve.vl.fencec.s" => "__builtin_ve_vl_fencec_s", + "llvm.ve.vl.fencei" => "__builtin_ve_vl_fencei", + "llvm.ve.vl.fencem.s" => "__builtin_ve_vl_fencem_s", + "llvm.ve.vl.fidcr.sss" => "__builtin_ve_vl_fidcr_sss", "llvm.ve.vl.insert.vm512l" => "__builtin_ve_vl_insert_vm512l", "llvm.ve.vl.insert.vm512u" => "__builtin_ve_vl_insert_vm512u", + "llvm.ve.vl.lcr.sss" => "__builtin_ve_vl_lcr_sss", + "llvm.ve.vl.lsv.vvss" => "__builtin_ve_vl_lsv_vvss", + "llvm.ve.vl.lvm.MMss" => "__builtin_ve_vl_lvm_MMss", + "llvm.ve.vl.lvm.mmss" => "__builtin_ve_vl_lvm_mmss", + "llvm.ve.vl.lvsd.svs" => "__builtin_ve_vl_lvsd_svs", + "llvm.ve.vl.lvsl.svs" => "__builtin_ve_vl_lvsl_svs", + "llvm.ve.vl.lvss.svs" => "__builtin_ve_vl_lvss_svs", + "llvm.ve.vl.lzvm.sml" => "__builtin_ve_vl_lzvm_sml", + "llvm.ve.vl.negm.MM" => "__builtin_ve_vl_negm_MM", + "llvm.ve.vl.negm.mm" => "__builtin_ve_vl_negm_mm", + "llvm.ve.vl.nndm.MMM" => "__builtin_ve_vl_nndm_MMM", + "llvm.ve.vl.nndm.mmm" => "__builtin_ve_vl_nndm_mmm", + "llvm.ve.vl.orm.MMM" => "__builtin_ve_vl_orm_MMM", + "llvm.ve.vl.orm.mmm" => "__builtin_ve_vl_orm_mmm", "llvm.ve.vl.pack.f32a" => "__builtin_ve_vl_pack_f32a", "llvm.ve.vl.pack.f32p" => "__builtin_ve_vl_pack_f32p", + "llvm.ve.vl.pcvm.sml" => "__builtin_ve_vl_pcvm_sml", + "llvm.ve.vl.pfchv.ssl" => "__builtin_ve_vl_pfchv_ssl", + "llvm.ve.vl.pfchvnc.ssl" => "__builtin_ve_vl_pfchvnc_ssl", + "llvm.ve.vl.pvadds.vsvMvl" => "__builtin_ve_vl_pvadds_vsvMvl", + "llvm.ve.vl.pvadds.vsvl" => "__builtin_ve_vl_pvadds_vsvl", + "llvm.ve.vl.pvadds.vsvvl" => "__builtin_ve_vl_pvadds_vsvvl", + "llvm.ve.vl.pvadds.vvvMvl" => "__builtin_ve_vl_pvadds_vvvMvl", + "llvm.ve.vl.pvadds.vvvl" => "__builtin_ve_vl_pvadds_vvvl", + "llvm.ve.vl.pvadds.vvvvl" => "__builtin_ve_vl_pvadds_vvvvl", + "llvm.ve.vl.pvaddu.vsvMvl" => "__builtin_ve_vl_pvaddu_vsvMvl", + "llvm.ve.vl.pvaddu.vsvl" => "__builtin_ve_vl_pvaddu_vsvl", + "llvm.ve.vl.pvaddu.vsvvl" => "__builtin_ve_vl_pvaddu_vsvvl", + "llvm.ve.vl.pvaddu.vvvMvl" => "__builtin_ve_vl_pvaddu_vvvMvl", + "llvm.ve.vl.pvaddu.vvvl" => "__builtin_ve_vl_pvaddu_vvvl", + "llvm.ve.vl.pvaddu.vvvvl" => "__builtin_ve_vl_pvaddu_vvvvl", + "llvm.ve.vl.pvand.vsvMvl" => "__builtin_ve_vl_pvand_vsvMvl", + "llvm.ve.vl.pvand.vsvl" => "__builtin_ve_vl_pvand_vsvl", + "llvm.ve.vl.pvand.vsvvl" => "__builtin_ve_vl_pvand_vsvvl", + "llvm.ve.vl.pvand.vvvMvl" => "__builtin_ve_vl_pvand_vvvMvl", + "llvm.ve.vl.pvand.vvvl" => "__builtin_ve_vl_pvand_vvvl", + "llvm.ve.vl.pvand.vvvvl" => "__builtin_ve_vl_pvand_vvvvl", + "llvm.ve.vl.pvbrd.vsMvl" => "__builtin_ve_vl_pvbrd_vsMvl", + "llvm.ve.vl.pvbrd.vsl" => "__builtin_ve_vl_pvbrd_vsl", + "llvm.ve.vl.pvbrd.vsvl" => "__builtin_ve_vl_pvbrd_vsvl", + "llvm.ve.vl.pvbrv.vvMvl" => "__builtin_ve_vl_pvbrv_vvMvl", + "llvm.ve.vl.pvbrv.vvl" => "__builtin_ve_vl_pvbrv_vvl", + "llvm.ve.vl.pvbrv.vvvl" => "__builtin_ve_vl_pvbrv_vvvl", + "llvm.ve.vl.pvbrvlo.vvl" => "__builtin_ve_vl_pvbrvlo_vvl", + "llvm.ve.vl.pvbrvlo.vvmvl" => "__builtin_ve_vl_pvbrvlo_vvmvl", + "llvm.ve.vl.pvbrvlo.vvvl" => "__builtin_ve_vl_pvbrvlo_vvvl", + "llvm.ve.vl.pvbrvup.vvl" => "__builtin_ve_vl_pvbrvup_vvl", + "llvm.ve.vl.pvbrvup.vvmvl" => "__builtin_ve_vl_pvbrvup_vvmvl", + "llvm.ve.vl.pvbrvup.vvvl" => "__builtin_ve_vl_pvbrvup_vvvl", + "llvm.ve.vl.pvcmps.vsvMvl" => "__builtin_ve_vl_pvcmps_vsvMvl", + "llvm.ve.vl.pvcmps.vsvl" => "__builtin_ve_vl_pvcmps_vsvl", + "llvm.ve.vl.pvcmps.vsvvl" => "__builtin_ve_vl_pvcmps_vsvvl", + "llvm.ve.vl.pvcmps.vvvMvl" => "__builtin_ve_vl_pvcmps_vvvMvl", + "llvm.ve.vl.pvcmps.vvvl" => "__builtin_ve_vl_pvcmps_vvvl", + "llvm.ve.vl.pvcmps.vvvvl" => "__builtin_ve_vl_pvcmps_vvvvl", + "llvm.ve.vl.pvcmpu.vsvMvl" => "__builtin_ve_vl_pvcmpu_vsvMvl", + "llvm.ve.vl.pvcmpu.vsvl" => "__builtin_ve_vl_pvcmpu_vsvl", + "llvm.ve.vl.pvcmpu.vsvvl" => "__builtin_ve_vl_pvcmpu_vsvvl", + "llvm.ve.vl.pvcmpu.vvvMvl" => "__builtin_ve_vl_pvcmpu_vvvMvl", + "llvm.ve.vl.pvcmpu.vvvl" => "__builtin_ve_vl_pvcmpu_vvvl", + "llvm.ve.vl.pvcmpu.vvvvl" => "__builtin_ve_vl_pvcmpu_vvvvl", + "llvm.ve.vl.pvcvtsw.vvl" => "__builtin_ve_vl_pvcvtsw_vvl", + "llvm.ve.vl.pvcvtsw.vvvl" => "__builtin_ve_vl_pvcvtsw_vvvl", + "llvm.ve.vl.pvcvtws.vvMvl" => "__builtin_ve_vl_pvcvtws_vvMvl", + "llvm.ve.vl.pvcvtws.vvl" => "__builtin_ve_vl_pvcvtws_vvl", + "llvm.ve.vl.pvcvtws.vvvl" => "__builtin_ve_vl_pvcvtws_vvvl", + "llvm.ve.vl.pvcvtwsrz.vvMvl" => "__builtin_ve_vl_pvcvtwsrz_vvMvl", + "llvm.ve.vl.pvcvtwsrz.vvl" => "__builtin_ve_vl_pvcvtwsrz_vvl", + "llvm.ve.vl.pvcvtwsrz.vvvl" => "__builtin_ve_vl_pvcvtwsrz_vvvl", + "llvm.ve.vl.pveqv.vsvMvl" => "__builtin_ve_vl_pveqv_vsvMvl", + "llvm.ve.vl.pveqv.vsvl" => "__builtin_ve_vl_pveqv_vsvl", + "llvm.ve.vl.pveqv.vsvvl" => "__builtin_ve_vl_pveqv_vsvvl", + "llvm.ve.vl.pveqv.vvvMvl" => "__builtin_ve_vl_pveqv_vvvMvl", + "llvm.ve.vl.pveqv.vvvl" => "__builtin_ve_vl_pveqv_vvvl", + "llvm.ve.vl.pveqv.vvvvl" => "__builtin_ve_vl_pveqv_vvvvl", + "llvm.ve.vl.pvfadd.vsvMvl" => "__builtin_ve_vl_pvfadd_vsvMvl", + "llvm.ve.vl.pvfadd.vsvl" => "__builtin_ve_vl_pvfadd_vsvl", + "llvm.ve.vl.pvfadd.vsvvl" => "__builtin_ve_vl_pvfadd_vsvvl", + "llvm.ve.vl.pvfadd.vvvMvl" => "__builtin_ve_vl_pvfadd_vvvMvl", + "llvm.ve.vl.pvfadd.vvvl" => "__builtin_ve_vl_pvfadd_vvvl", + "llvm.ve.vl.pvfadd.vvvvl" => "__builtin_ve_vl_pvfadd_vvvvl", + "llvm.ve.vl.pvfcmp.vsvMvl" => "__builtin_ve_vl_pvfcmp_vsvMvl", + "llvm.ve.vl.pvfcmp.vsvl" => "__builtin_ve_vl_pvfcmp_vsvl", + "llvm.ve.vl.pvfcmp.vsvvl" => "__builtin_ve_vl_pvfcmp_vsvvl", + "llvm.ve.vl.pvfcmp.vvvMvl" => "__builtin_ve_vl_pvfcmp_vvvMvl", + "llvm.ve.vl.pvfcmp.vvvl" => "__builtin_ve_vl_pvfcmp_vvvl", + "llvm.ve.vl.pvfcmp.vvvvl" => "__builtin_ve_vl_pvfcmp_vvvvl", + "llvm.ve.vl.pvfmad.vsvvMvl" => "__builtin_ve_vl_pvfmad_vsvvMvl", + "llvm.ve.vl.pvfmad.vsvvl" => "__builtin_ve_vl_pvfmad_vsvvl", + "llvm.ve.vl.pvfmad.vsvvvl" => "__builtin_ve_vl_pvfmad_vsvvvl", + "llvm.ve.vl.pvfmad.vvsvMvl" => "__builtin_ve_vl_pvfmad_vvsvMvl", + "llvm.ve.vl.pvfmad.vvsvl" => "__builtin_ve_vl_pvfmad_vvsvl", + "llvm.ve.vl.pvfmad.vvsvvl" => "__builtin_ve_vl_pvfmad_vvsvvl", + "llvm.ve.vl.pvfmad.vvvvMvl" => "__builtin_ve_vl_pvfmad_vvvvMvl", + "llvm.ve.vl.pvfmad.vvvvl" => "__builtin_ve_vl_pvfmad_vvvvl", + "llvm.ve.vl.pvfmad.vvvvvl" => "__builtin_ve_vl_pvfmad_vvvvvl", + "llvm.ve.vl.pvfmax.vsvMvl" => "__builtin_ve_vl_pvfmax_vsvMvl", + "llvm.ve.vl.pvfmax.vsvl" => "__builtin_ve_vl_pvfmax_vsvl", + "llvm.ve.vl.pvfmax.vsvvl" => "__builtin_ve_vl_pvfmax_vsvvl", + "llvm.ve.vl.pvfmax.vvvMvl" => "__builtin_ve_vl_pvfmax_vvvMvl", + "llvm.ve.vl.pvfmax.vvvl" => "__builtin_ve_vl_pvfmax_vvvl", + "llvm.ve.vl.pvfmax.vvvvl" => "__builtin_ve_vl_pvfmax_vvvvl", + "llvm.ve.vl.pvfmin.vsvMvl" => "__builtin_ve_vl_pvfmin_vsvMvl", + "llvm.ve.vl.pvfmin.vsvl" => "__builtin_ve_vl_pvfmin_vsvl", + "llvm.ve.vl.pvfmin.vsvvl" => "__builtin_ve_vl_pvfmin_vsvvl", + "llvm.ve.vl.pvfmin.vvvMvl" => "__builtin_ve_vl_pvfmin_vvvMvl", + "llvm.ve.vl.pvfmin.vvvl" => "__builtin_ve_vl_pvfmin_vvvl", + "llvm.ve.vl.pvfmin.vvvvl" => "__builtin_ve_vl_pvfmin_vvvvl", + "llvm.ve.vl.pvfmkaf.Ml" => "__builtin_ve_vl_pvfmkaf_Ml", + "llvm.ve.vl.pvfmkat.Ml" => "__builtin_ve_vl_pvfmkat_Ml", + "llvm.ve.vl.pvfmkseq.MvMl" => "__builtin_ve_vl_pvfmkseq_MvMl", + "llvm.ve.vl.pvfmkseq.Mvl" => "__builtin_ve_vl_pvfmkseq_Mvl", + "llvm.ve.vl.pvfmkseqnan.MvMl" => "__builtin_ve_vl_pvfmkseqnan_MvMl", + "llvm.ve.vl.pvfmkseqnan.Mvl" => "__builtin_ve_vl_pvfmkseqnan_Mvl", + "llvm.ve.vl.pvfmksge.MvMl" => "__builtin_ve_vl_pvfmksge_MvMl", + "llvm.ve.vl.pvfmksge.Mvl" => "__builtin_ve_vl_pvfmksge_Mvl", + "llvm.ve.vl.pvfmksgenan.MvMl" => "__builtin_ve_vl_pvfmksgenan_MvMl", + "llvm.ve.vl.pvfmksgenan.Mvl" => "__builtin_ve_vl_pvfmksgenan_Mvl", + "llvm.ve.vl.pvfmksgt.MvMl" => "__builtin_ve_vl_pvfmksgt_MvMl", + "llvm.ve.vl.pvfmksgt.Mvl" => "__builtin_ve_vl_pvfmksgt_Mvl", + "llvm.ve.vl.pvfmksgtnan.MvMl" => "__builtin_ve_vl_pvfmksgtnan_MvMl", + "llvm.ve.vl.pvfmksgtnan.Mvl" => "__builtin_ve_vl_pvfmksgtnan_Mvl", + "llvm.ve.vl.pvfmksle.MvMl" => "__builtin_ve_vl_pvfmksle_MvMl", + "llvm.ve.vl.pvfmksle.Mvl" => "__builtin_ve_vl_pvfmksle_Mvl", + "llvm.ve.vl.pvfmkslenan.MvMl" => "__builtin_ve_vl_pvfmkslenan_MvMl", + "llvm.ve.vl.pvfmkslenan.Mvl" => "__builtin_ve_vl_pvfmkslenan_Mvl", + "llvm.ve.vl.pvfmksloeq.mvl" => "__builtin_ve_vl_pvfmksloeq_mvl", + "llvm.ve.vl.pvfmksloeq.mvml" => "__builtin_ve_vl_pvfmksloeq_mvml", + "llvm.ve.vl.pvfmksloeqnan.mvl" => "__builtin_ve_vl_pvfmksloeqnan_mvl", + "llvm.ve.vl.pvfmksloeqnan.mvml" => "__builtin_ve_vl_pvfmksloeqnan_mvml", + "llvm.ve.vl.pvfmksloge.mvl" => "__builtin_ve_vl_pvfmksloge_mvl", + "llvm.ve.vl.pvfmksloge.mvml" => "__builtin_ve_vl_pvfmksloge_mvml", + "llvm.ve.vl.pvfmkslogenan.mvl" => "__builtin_ve_vl_pvfmkslogenan_mvl", + "llvm.ve.vl.pvfmkslogenan.mvml" => "__builtin_ve_vl_pvfmkslogenan_mvml", + "llvm.ve.vl.pvfmkslogt.mvl" => "__builtin_ve_vl_pvfmkslogt_mvl", + "llvm.ve.vl.pvfmkslogt.mvml" => "__builtin_ve_vl_pvfmkslogt_mvml", + "llvm.ve.vl.pvfmkslogtnan.mvl" => "__builtin_ve_vl_pvfmkslogtnan_mvl", + "llvm.ve.vl.pvfmkslogtnan.mvml" => "__builtin_ve_vl_pvfmkslogtnan_mvml", + "llvm.ve.vl.pvfmkslole.mvl" => "__builtin_ve_vl_pvfmkslole_mvl", + "llvm.ve.vl.pvfmkslole.mvml" => "__builtin_ve_vl_pvfmkslole_mvml", + "llvm.ve.vl.pvfmkslolenan.mvl" => "__builtin_ve_vl_pvfmkslolenan_mvl", + "llvm.ve.vl.pvfmkslolenan.mvml" => "__builtin_ve_vl_pvfmkslolenan_mvml", + "llvm.ve.vl.pvfmkslolt.mvl" => "__builtin_ve_vl_pvfmkslolt_mvl", + "llvm.ve.vl.pvfmkslolt.mvml" => "__builtin_ve_vl_pvfmkslolt_mvml", + "llvm.ve.vl.pvfmksloltnan.mvl" => "__builtin_ve_vl_pvfmksloltnan_mvl", + "llvm.ve.vl.pvfmksloltnan.mvml" => "__builtin_ve_vl_pvfmksloltnan_mvml", + "llvm.ve.vl.pvfmkslonan.mvl" => "__builtin_ve_vl_pvfmkslonan_mvl", + "llvm.ve.vl.pvfmkslonan.mvml" => "__builtin_ve_vl_pvfmkslonan_mvml", + "llvm.ve.vl.pvfmkslone.mvl" => "__builtin_ve_vl_pvfmkslone_mvl", + "llvm.ve.vl.pvfmkslone.mvml" => "__builtin_ve_vl_pvfmkslone_mvml", + "llvm.ve.vl.pvfmkslonenan.mvl" => "__builtin_ve_vl_pvfmkslonenan_mvl", + "llvm.ve.vl.pvfmkslonenan.mvml" => "__builtin_ve_vl_pvfmkslonenan_mvml", + "llvm.ve.vl.pvfmkslonum.mvl" => "__builtin_ve_vl_pvfmkslonum_mvl", + "llvm.ve.vl.pvfmkslonum.mvml" => "__builtin_ve_vl_pvfmkslonum_mvml", + "llvm.ve.vl.pvfmkslt.MvMl" => "__builtin_ve_vl_pvfmkslt_MvMl", + "llvm.ve.vl.pvfmkslt.Mvl" => "__builtin_ve_vl_pvfmkslt_Mvl", + "llvm.ve.vl.pvfmksltnan.MvMl" => "__builtin_ve_vl_pvfmksltnan_MvMl", + "llvm.ve.vl.pvfmksltnan.Mvl" => "__builtin_ve_vl_pvfmksltnan_Mvl", + "llvm.ve.vl.pvfmksnan.MvMl" => "__builtin_ve_vl_pvfmksnan_MvMl", + "llvm.ve.vl.pvfmksnan.Mvl" => "__builtin_ve_vl_pvfmksnan_Mvl", + "llvm.ve.vl.pvfmksne.MvMl" => "__builtin_ve_vl_pvfmksne_MvMl", + "llvm.ve.vl.pvfmksne.Mvl" => "__builtin_ve_vl_pvfmksne_Mvl", + "llvm.ve.vl.pvfmksnenan.MvMl" => "__builtin_ve_vl_pvfmksnenan_MvMl", + "llvm.ve.vl.pvfmksnenan.Mvl" => "__builtin_ve_vl_pvfmksnenan_Mvl", + "llvm.ve.vl.pvfmksnum.MvMl" => "__builtin_ve_vl_pvfmksnum_MvMl", + "llvm.ve.vl.pvfmksnum.Mvl" => "__builtin_ve_vl_pvfmksnum_Mvl", + "llvm.ve.vl.pvfmksupeq.mvl" => "__builtin_ve_vl_pvfmksupeq_mvl", + "llvm.ve.vl.pvfmksupeq.mvml" => "__builtin_ve_vl_pvfmksupeq_mvml", + "llvm.ve.vl.pvfmksupeqnan.mvl" => "__builtin_ve_vl_pvfmksupeqnan_mvl", + "llvm.ve.vl.pvfmksupeqnan.mvml" => "__builtin_ve_vl_pvfmksupeqnan_mvml", + "llvm.ve.vl.pvfmksupge.mvl" => "__builtin_ve_vl_pvfmksupge_mvl", + "llvm.ve.vl.pvfmksupge.mvml" => "__builtin_ve_vl_pvfmksupge_mvml", + "llvm.ve.vl.pvfmksupgenan.mvl" => "__builtin_ve_vl_pvfmksupgenan_mvl", + "llvm.ve.vl.pvfmksupgenan.mvml" => "__builtin_ve_vl_pvfmksupgenan_mvml", + "llvm.ve.vl.pvfmksupgt.mvl" => "__builtin_ve_vl_pvfmksupgt_mvl", + "llvm.ve.vl.pvfmksupgt.mvml" => "__builtin_ve_vl_pvfmksupgt_mvml", + "llvm.ve.vl.pvfmksupgtnan.mvl" => "__builtin_ve_vl_pvfmksupgtnan_mvl", + "llvm.ve.vl.pvfmksupgtnan.mvml" => "__builtin_ve_vl_pvfmksupgtnan_mvml", + "llvm.ve.vl.pvfmksuple.mvl" => "__builtin_ve_vl_pvfmksuple_mvl", + "llvm.ve.vl.pvfmksuple.mvml" => "__builtin_ve_vl_pvfmksuple_mvml", + "llvm.ve.vl.pvfmksuplenan.mvl" => "__builtin_ve_vl_pvfmksuplenan_mvl", + "llvm.ve.vl.pvfmksuplenan.mvml" => "__builtin_ve_vl_pvfmksuplenan_mvml", + "llvm.ve.vl.pvfmksuplt.mvl" => "__builtin_ve_vl_pvfmksuplt_mvl", + "llvm.ve.vl.pvfmksuplt.mvml" => "__builtin_ve_vl_pvfmksuplt_mvml", + "llvm.ve.vl.pvfmksupltnan.mvl" => "__builtin_ve_vl_pvfmksupltnan_mvl", + "llvm.ve.vl.pvfmksupltnan.mvml" => "__builtin_ve_vl_pvfmksupltnan_mvml", + "llvm.ve.vl.pvfmksupnan.mvl" => "__builtin_ve_vl_pvfmksupnan_mvl", + "llvm.ve.vl.pvfmksupnan.mvml" => "__builtin_ve_vl_pvfmksupnan_mvml", + "llvm.ve.vl.pvfmksupne.mvl" => "__builtin_ve_vl_pvfmksupne_mvl", + "llvm.ve.vl.pvfmksupne.mvml" => "__builtin_ve_vl_pvfmksupne_mvml", + "llvm.ve.vl.pvfmksupnenan.mvl" => "__builtin_ve_vl_pvfmksupnenan_mvl", + "llvm.ve.vl.pvfmksupnenan.mvml" => "__builtin_ve_vl_pvfmksupnenan_mvml", + "llvm.ve.vl.pvfmksupnum.mvl" => "__builtin_ve_vl_pvfmksupnum_mvl", + "llvm.ve.vl.pvfmksupnum.mvml" => "__builtin_ve_vl_pvfmksupnum_mvml", + "llvm.ve.vl.pvfmkweq.MvMl" => "__builtin_ve_vl_pvfmkweq_MvMl", + "llvm.ve.vl.pvfmkweq.Mvl" => "__builtin_ve_vl_pvfmkweq_Mvl", + "llvm.ve.vl.pvfmkweqnan.MvMl" => "__builtin_ve_vl_pvfmkweqnan_MvMl", + "llvm.ve.vl.pvfmkweqnan.Mvl" => "__builtin_ve_vl_pvfmkweqnan_Mvl", + "llvm.ve.vl.pvfmkwge.MvMl" => "__builtin_ve_vl_pvfmkwge_MvMl", + "llvm.ve.vl.pvfmkwge.Mvl" => "__builtin_ve_vl_pvfmkwge_Mvl", + "llvm.ve.vl.pvfmkwgenan.MvMl" => "__builtin_ve_vl_pvfmkwgenan_MvMl", + "llvm.ve.vl.pvfmkwgenan.Mvl" => "__builtin_ve_vl_pvfmkwgenan_Mvl", + "llvm.ve.vl.pvfmkwgt.MvMl" => "__builtin_ve_vl_pvfmkwgt_MvMl", + "llvm.ve.vl.pvfmkwgt.Mvl" => "__builtin_ve_vl_pvfmkwgt_Mvl", + "llvm.ve.vl.pvfmkwgtnan.MvMl" => "__builtin_ve_vl_pvfmkwgtnan_MvMl", + "llvm.ve.vl.pvfmkwgtnan.Mvl" => "__builtin_ve_vl_pvfmkwgtnan_Mvl", + "llvm.ve.vl.pvfmkwle.MvMl" => "__builtin_ve_vl_pvfmkwle_MvMl", + "llvm.ve.vl.pvfmkwle.Mvl" => "__builtin_ve_vl_pvfmkwle_Mvl", + "llvm.ve.vl.pvfmkwlenan.MvMl" => "__builtin_ve_vl_pvfmkwlenan_MvMl", + "llvm.ve.vl.pvfmkwlenan.Mvl" => "__builtin_ve_vl_pvfmkwlenan_Mvl", + "llvm.ve.vl.pvfmkwloeq.mvl" => "__builtin_ve_vl_pvfmkwloeq_mvl", + "llvm.ve.vl.pvfmkwloeq.mvml" => "__builtin_ve_vl_pvfmkwloeq_mvml", + "llvm.ve.vl.pvfmkwloeqnan.mvl" => "__builtin_ve_vl_pvfmkwloeqnan_mvl", + "llvm.ve.vl.pvfmkwloeqnan.mvml" => "__builtin_ve_vl_pvfmkwloeqnan_mvml", + "llvm.ve.vl.pvfmkwloge.mvl" => "__builtin_ve_vl_pvfmkwloge_mvl", + "llvm.ve.vl.pvfmkwloge.mvml" => "__builtin_ve_vl_pvfmkwloge_mvml", + "llvm.ve.vl.pvfmkwlogenan.mvl" => "__builtin_ve_vl_pvfmkwlogenan_mvl", + "llvm.ve.vl.pvfmkwlogenan.mvml" => "__builtin_ve_vl_pvfmkwlogenan_mvml", + "llvm.ve.vl.pvfmkwlogt.mvl" => "__builtin_ve_vl_pvfmkwlogt_mvl", + "llvm.ve.vl.pvfmkwlogt.mvml" => "__builtin_ve_vl_pvfmkwlogt_mvml", + "llvm.ve.vl.pvfmkwlogtnan.mvl" => "__builtin_ve_vl_pvfmkwlogtnan_mvl", + "llvm.ve.vl.pvfmkwlogtnan.mvml" => "__builtin_ve_vl_pvfmkwlogtnan_mvml", + "llvm.ve.vl.pvfmkwlole.mvl" => "__builtin_ve_vl_pvfmkwlole_mvl", + "llvm.ve.vl.pvfmkwlole.mvml" => "__builtin_ve_vl_pvfmkwlole_mvml", + "llvm.ve.vl.pvfmkwlolenan.mvl" => "__builtin_ve_vl_pvfmkwlolenan_mvl", + "llvm.ve.vl.pvfmkwlolenan.mvml" => "__builtin_ve_vl_pvfmkwlolenan_mvml", + "llvm.ve.vl.pvfmkwlolt.mvl" => "__builtin_ve_vl_pvfmkwlolt_mvl", + "llvm.ve.vl.pvfmkwlolt.mvml" => "__builtin_ve_vl_pvfmkwlolt_mvml", + "llvm.ve.vl.pvfmkwloltnan.mvl" => "__builtin_ve_vl_pvfmkwloltnan_mvl", + "llvm.ve.vl.pvfmkwloltnan.mvml" => "__builtin_ve_vl_pvfmkwloltnan_mvml", + "llvm.ve.vl.pvfmkwlonan.mvl" => "__builtin_ve_vl_pvfmkwlonan_mvl", + "llvm.ve.vl.pvfmkwlonan.mvml" => "__builtin_ve_vl_pvfmkwlonan_mvml", + "llvm.ve.vl.pvfmkwlone.mvl" => "__builtin_ve_vl_pvfmkwlone_mvl", + "llvm.ve.vl.pvfmkwlone.mvml" => "__builtin_ve_vl_pvfmkwlone_mvml", + "llvm.ve.vl.pvfmkwlonenan.mvl" => "__builtin_ve_vl_pvfmkwlonenan_mvl", + "llvm.ve.vl.pvfmkwlonenan.mvml" => "__builtin_ve_vl_pvfmkwlonenan_mvml", + "llvm.ve.vl.pvfmkwlonum.mvl" => "__builtin_ve_vl_pvfmkwlonum_mvl", + "llvm.ve.vl.pvfmkwlonum.mvml" => "__builtin_ve_vl_pvfmkwlonum_mvml", + "llvm.ve.vl.pvfmkwlt.MvMl" => "__builtin_ve_vl_pvfmkwlt_MvMl", + "llvm.ve.vl.pvfmkwlt.Mvl" => "__builtin_ve_vl_pvfmkwlt_Mvl", + "llvm.ve.vl.pvfmkwltnan.MvMl" => "__builtin_ve_vl_pvfmkwltnan_MvMl", + "llvm.ve.vl.pvfmkwltnan.Mvl" => "__builtin_ve_vl_pvfmkwltnan_Mvl", + "llvm.ve.vl.pvfmkwnan.MvMl" => "__builtin_ve_vl_pvfmkwnan_MvMl", + "llvm.ve.vl.pvfmkwnan.Mvl" => "__builtin_ve_vl_pvfmkwnan_Mvl", + "llvm.ve.vl.pvfmkwne.MvMl" => "__builtin_ve_vl_pvfmkwne_MvMl", + "llvm.ve.vl.pvfmkwne.Mvl" => "__builtin_ve_vl_pvfmkwne_Mvl", + "llvm.ve.vl.pvfmkwnenan.MvMl" => "__builtin_ve_vl_pvfmkwnenan_MvMl", + "llvm.ve.vl.pvfmkwnenan.Mvl" => "__builtin_ve_vl_pvfmkwnenan_Mvl", + "llvm.ve.vl.pvfmkwnum.MvMl" => "__builtin_ve_vl_pvfmkwnum_MvMl", + "llvm.ve.vl.pvfmkwnum.Mvl" => "__builtin_ve_vl_pvfmkwnum_Mvl", + "llvm.ve.vl.pvfmkwupeq.mvl" => "__builtin_ve_vl_pvfmkwupeq_mvl", + "llvm.ve.vl.pvfmkwupeq.mvml" => "__builtin_ve_vl_pvfmkwupeq_mvml", + "llvm.ve.vl.pvfmkwupeqnan.mvl" => "__builtin_ve_vl_pvfmkwupeqnan_mvl", + "llvm.ve.vl.pvfmkwupeqnan.mvml" => "__builtin_ve_vl_pvfmkwupeqnan_mvml", + "llvm.ve.vl.pvfmkwupge.mvl" => "__builtin_ve_vl_pvfmkwupge_mvl", + "llvm.ve.vl.pvfmkwupge.mvml" => "__builtin_ve_vl_pvfmkwupge_mvml", + "llvm.ve.vl.pvfmkwupgenan.mvl" => "__builtin_ve_vl_pvfmkwupgenan_mvl", + "llvm.ve.vl.pvfmkwupgenan.mvml" => "__builtin_ve_vl_pvfmkwupgenan_mvml", + "llvm.ve.vl.pvfmkwupgt.mvl" => "__builtin_ve_vl_pvfmkwupgt_mvl", + "llvm.ve.vl.pvfmkwupgt.mvml" => "__builtin_ve_vl_pvfmkwupgt_mvml", + "llvm.ve.vl.pvfmkwupgtnan.mvl" => "__builtin_ve_vl_pvfmkwupgtnan_mvl", + "llvm.ve.vl.pvfmkwupgtnan.mvml" => "__builtin_ve_vl_pvfmkwupgtnan_mvml", + "llvm.ve.vl.pvfmkwuple.mvl" => "__builtin_ve_vl_pvfmkwuple_mvl", + "llvm.ve.vl.pvfmkwuple.mvml" => "__builtin_ve_vl_pvfmkwuple_mvml", + "llvm.ve.vl.pvfmkwuplenan.mvl" => "__builtin_ve_vl_pvfmkwuplenan_mvl", + "llvm.ve.vl.pvfmkwuplenan.mvml" => "__builtin_ve_vl_pvfmkwuplenan_mvml", + "llvm.ve.vl.pvfmkwuplt.mvl" => "__builtin_ve_vl_pvfmkwuplt_mvl", + "llvm.ve.vl.pvfmkwuplt.mvml" => "__builtin_ve_vl_pvfmkwuplt_mvml", + "llvm.ve.vl.pvfmkwupltnan.mvl" => "__builtin_ve_vl_pvfmkwupltnan_mvl", + "llvm.ve.vl.pvfmkwupltnan.mvml" => "__builtin_ve_vl_pvfmkwupltnan_mvml", + "llvm.ve.vl.pvfmkwupnan.mvl" => "__builtin_ve_vl_pvfmkwupnan_mvl", + "llvm.ve.vl.pvfmkwupnan.mvml" => "__builtin_ve_vl_pvfmkwupnan_mvml", + "llvm.ve.vl.pvfmkwupne.mvl" => "__builtin_ve_vl_pvfmkwupne_mvl", + "llvm.ve.vl.pvfmkwupne.mvml" => "__builtin_ve_vl_pvfmkwupne_mvml", + "llvm.ve.vl.pvfmkwupnenan.mvl" => "__builtin_ve_vl_pvfmkwupnenan_mvl", + "llvm.ve.vl.pvfmkwupnenan.mvml" => "__builtin_ve_vl_pvfmkwupnenan_mvml", + "llvm.ve.vl.pvfmkwupnum.mvl" => "__builtin_ve_vl_pvfmkwupnum_mvl", + "llvm.ve.vl.pvfmkwupnum.mvml" => "__builtin_ve_vl_pvfmkwupnum_mvml", + "llvm.ve.vl.pvfmsb.vsvvMvl" => "__builtin_ve_vl_pvfmsb_vsvvMvl", + "llvm.ve.vl.pvfmsb.vsvvl" => "__builtin_ve_vl_pvfmsb_vsvvl", + "llvm.ve.vl.pvfmsb.vsvvvl" => "__builtin_ve_vl_pvfmsb_vsvvvl", + "llvm.ve.vl.pvfmsb.vvsvMvl" => "__builtin_ve_vl_pvfmsb_vvsvMvl", + "llvm.ve.vl.pvfmsb.vvsvl" => "__builtin_ve_vl_pvfmsb_vvsvl", + "llvm.ve.vl.pvfmsb.vvsvvl" => "__builtin_ve_vl_pvfmsb_vvsvvl", + "llvm.ve.vl.pvfmsb.vvvvMvl" => "__builtin_ve_vl_pvfmsb_vvvvMvl", + "llvm.ve.vl.pvfmsb.vvvvl" => "__builtin_ve_vl_pvfmsb_vvvvl", + "llvm.ve.vl.pvfmsb.vvvvvl" => "__builtin_ve_vl_pvfmsb_vvvvvl", + "llvm.ve.vl.pvfmul.vsvMvl" => "__builtin_ve_vl_pvfmul_vsvMvl", + "llvm.ve.vl.pvfmul.vsvl" => "__builtin_ve_vl_pvfmul_vsvl", + "llvm.ve.vl.pvfmul.vsvvl" => "__builtin_ve_vl_pvfmul_vsvvl", + "llvm.ve.vl.pvfmul.vvvMvl" => "__builtin_ve_vl_pvfmul_vvvMvl", + "llvm.ve.vl.pvfmul.vvvl" => "__builtin_ve_vl_pvfmul_vvvl", + "llvm.ve.vl.pvfmul.vvvvl" => "__builtin_ve_vl_pvfmul_vvvvl", + "llvm.ve.vl.pvfnmad.vsvvMvl" => "__builtin_ve_vl_pvfnmad_vsvvMvl", + "llvm.ve.vl.pvfnmad.vsvvl" => "__builtin_ve_vl_pvfnmad_vsvvl", + "llvm.ve.vl.pvfnmad.vsvvvl" => "__builtin_ve_vl_pvfnmad_vsvvvl", + "llvm.ve.vl.pvfnmad.vvsvMvl" => "__builtin_ve_vl_pvfnmad_vvsvMvl", + "llvm.ve.vl.pvfnmad.vvsvl" => "__builtin_ve_vl_pvfnmad_vvsvl", + "llvm.ve.vl.pvfnmad.vvsvvl" => "__builtin_ve_vl_pvfnmad_vvsvvl", + "llvm.ve.vl.pvfnmad.vvvvMvl" => "__builtin_ve_vl_pvfnmad_vvvvMvl", + "llvm.ve.vl.pvfnmad.vvvvl" => "__builtin_ve_vl_pvfnmad_vvvvl", + "llvm.ve.vl.pvfnmad.vvvvvl" => "__builtin_ve_vl_pvfnmad_vvvvvl", + "llvm.ve.vl.pvfnmsb.vsvvMvl" => "__builtin_ve_vl_pvfnmsb_vsvvMvl", + "llvm.ve.vl.pvfnmsb.vsvvl" => "__builtin_ve_vl_pvfnmsb_vsvvl", + "llvm.ve.vl.pvfnmsb.vsvvvl" => "__builtin_ve_vl_pvfnmsb_vsvvvl", + "llvm.ve.vl.pvfnmsb.vvsvMvl" => "__builtin_ve_vl_pvfnmsb_vvsvMvl", + "llvm.ve.vl.pvfnmsb.vvsvl" => "__builtin_ve_vl_pvfnmsb_vvsvl", + "llvm.ve.vl.pvfnmsb.vvsvvl" => "__builtin_ve_vl_pvfnmsb_vvsvvl", + "llvm.ve.vl.pvfnmsb.vvvvMvl" => "__builtin_ve_vl_pvfnmsb_vvvvMvl", + "llvm.ve.vl.pvfnmsb.vvvvl" => "__builtin_ve_vl_pvfnmsb_vvvvl", + "llvm.ve.vl.pvfnmsb.vvvvvl" => "__builtin_ve_vl_pvfnmsb_vvvvvl", + "llvm.ve.vl.pvfsub.vsvMvl" => "__builtin_ve_vl_pvfsub_vsvMvl", + "llvm.ve.vl.pvfsub.vsvl" => "__builtin_ve_vl_pvfsub_vsvl", + "llvm.ve.vl.pvfsub.vsvvl" => "__builtin_ve_vl_pvfsub_vsvvl", + "llvm.ve.vl.pvfsub.vvvMvl" => "__builtin_ve_vl_pvfsub_vvvMvl", + "llvm.ve.vl.pvfsub.vvvl" => "__builtin_ve_vl_pvfsub_vvvl", + "llvm.ve.vl.pvfsub.vvvvl" => "__builtin_ve_vl_pvfsub_vvvvl", + "llvm.ve.vl.pvldz.vvMvl" => "__builtin_ve_vl_pvldz_vvMvl", + "llvm.ve.vl.pvldz.vvl" => "__builtin_ve_vl_pvldz_vvl", + "llvm.ve.vl.pvldz.vvvl" => "__builtin_ve_vl_pvldz_vvvl", + "llvm.ve.vl.pvldzlo.vvl" => "__builtin_ve_vl_pvldzlo_vvl", + "llvm.ve.vl.pvldzlo.vvmvl" => "__builtin_ve_vl_pvldzlo_vvmvl", + "llvm.ve.vl.pvldzlo.vvvl" => "__builtin_ve_vl_pvldzlo_vvvl", + "llvm.ve.vl.pvldzup.vvl" => "__builtin_ve_vl_pvldzup_vvl", + "llvm.ve.vl.pvldzup.vvmvl" => "__builtin_ve_vl_pvldzup_vvmvl", + "llvm.ve.vl.pvldzup.vvvl" => "__builtin_ve_vl_pvldzup_vvvl", + "llvm.ve.vl.pvmaxs.vsvMvl" => "__builtin_ve_vl_pvmaxs_vsvMvl", + "llvm.ve.vl.pvmaxs.vsvl" => "__builtin_ve_vl_pvmaxs_vsvl", + "llvm.ve.vl.pvmaxs.vsvvl" => "__builtin_ve_vl_pvmaxs_vsvvl", + "llvm.ve.vl.pvmaxs.vvvMvl" => "__builtin_ve_vl_pvmaxs_vvvMvl", + "llvm.ve.vl.pvmaxs.vvvl" => "__builtin_ve_vl_pvmaxs_vvvl", + "llvm.ve.vl.pvmaxs.vvvvl" => "__builtin_ve_vl_pvmaxs_vvvvl", + "llvm.ve.vl.pvmins.vsvMvl" => "__builtin_ve_vl_pvmins_vsvMvl", + "llvm.ve.vl.pvmins.vsvl" => "__builtin_ve_vl_pvmins_vsvl", + "llvm.ve.vl.pvmins.vsvvl" => "__builtin_ve_vl_pvmins_vsvvl", + "llvm.ve.vl.pvmins.vvvMvl" => "__builtin_ve_vl_pvmins_vvvMvl", + "llvm.ve.vl.pvmins.vvvl" => "__builtin_ve_vl_pvmins_vvvl", + "llvm.ve.vl.pvmins.vvvvl" => "__builtin_ve_vl_pvmins_vvvvl", + "llvm.ve.vl.pvor.vsvMvl" => "__builtin_ve_vl_pvor_vsvMvl", + "llvm.ve.vl.pvor.vsvl" => "__builtin_ve_vl_pvor_vsvl", + "llvm.ve.vl.pvor.vsvvl" => "__builtin_ve_vl_pvor_vsvvl", + "llvm.ve.vl.pvor.vvvMvl" => "__builtin_ve_vl_pvor_vvvMvl", + "llvm.ve.vl.pvor.vvvl" => "__builtin_ve_vl_pvor_vvvl", + "llvm.ve.vl.pvor.vvvvl" => "__builtin_ve_vl_pvor_vvvvl", + "llvm.ve.vl.pvpcnt.vvMvl" => "__builtin_ve_vl_pvpcnt_vvMvl", + "llvm.ve.vl.pvpcnt.vvl" => "__builtin_ve_vl_pvpcnt_vvl", + "llvm.ve.vl.pvpcnt.vvvl" => "__builtin_ve_vl_pvpcnt_vvvl", + "llvm.ve.vl.pvpcntlo.vvl" => "__builtin_ve_vl_pvpcntlo_vvl", + "llvm.ve.vl.pvpcntlo.vvmvl" => "__builtin_ve_vl_pvpcntlo_vvmvl", + "llvm.ve.vl.pvpcntlo.vvvl" => "__builtin_ve_vl_pvpcntlo_vvvl", + "llvm.ve.vl.pvpcntup.vvl" => "__builtin_ve_vl_pvpcntup_vvl", + "llvm.ve.vl.pvpcntup.vvmvl" => "__builtin_ve_vl_pvpcntup_vvmvl", + "llvm.ve.vl.pvpcntup.vvvl" => "__builtin_ve_vl_pvpcntup_vvvl", + "llvm.ve.vl.pvrcp.vvl" => "__builtin_ve_vl_pvrcp_vvl", + "llvm.ve.vl.pvrcp.vvvl" => "__builtin_ve_vl_pvrcp_vvvl", + "llvm.ve.vl.pvrsqrt.vvl" => "__builtin_ve_vl_pvrsqrt_vvl", + "llvm.ve.vl.pvrsqrt.vvvl" => "__builtin_ve_vl_pvrsqrt_vvvl", + "llvm.ve.vl.pvrsqrtnex.vvl" => "__builtin_ve_vl_pvrsqrtnex_vvl", + "llvm.ve.vl.pvrsqrtnex.vvvl" => "__builtin_ve_vl_pvrsqrtnex_vvvl", + "llvm.ve.vl.pvseq.vl" => "__builtin_ve_vl_pvseq_vl", + "llvm.ve.vl.pvseq.vvl" => "__builtin_ve_vl_pvseq_vvl", + "llvm.ve.vl.pvseqlo.vl" => "__builtin_ve_vl_pvseqlo_vl", + "llvm.ve.vl.pvseqlo.vvl" => "__builtin_ve_vl_pvseqlo_vvl", + "llvm.ve.vl.pvsequp.vl" => "__builtin_ve_vl_pvsequp_vl", + "llvm.ve.vl.pvsequp.vvl" => "__builtin_ve_vl_pvsequp_vvl", + "llvm.ve.vl.pvsla.vvsMvl" => "__builtin_ve_vl_pvsla_vvsMvl", + "llvm.ve.vl.pvsla.vvsl" => "__builtin_ve_vl_pvsla_vvsl", + "llvm.ve.vl.pvsla.vvsvl" => "__builtin_ve_vl_pvsla_vvsvl", + "llvm.ve.vl.pvsla.vvvMvl" => "__builtin_ve_vl_pvsla_vvvMvl", + "llvm.ve.vl.pvsla.vvvl" => "__builtin_ve_vl_pvsla_vvvl", + "llvm.ve.vl.pvsla.vvvvl" => "__builtin_ve_vl_pvsla_vvvvl", + "llvm.ve.vl.pvsll.vvsMvl" => "__builtin_ve_vl_pvsll_vvsMvl", + "llvm.ve.vl.pvsll.vvsl" => "__builtin_ve_vl_pvsll_vvsl", + "llvm.ve.vl.pvsll.vvsvl" => "__builtin_ve_vl_pvsll_vvsvl", + "llvm.ve.vl.pvsll.vvvMvl" => "__builtin_ve_vl_pvsll_vvvMvl", + "llvm.ve.vl.pvsll.vvvl" => "__builtin_ve_vl_pvsll_vvvl", + "llvm.ve.vl.pvsll.vvvvl" => "__builtin_ve_vl_pvsll_vvvvl", + "llvm.ve.vl.pvsra.vvsMvl" => "__builtin_ve_vl_pvsra_vvsMvl", + "llvm.ve.vl.pvsra.vvsl" => "__builtin_ve_vl_pvsra_vvsl", + "llvm.ve.vl.pvsra.vvsvl" => "__builtin_ve_vl_pvsra_vvsvl", + "llvm.ve.vl.pvsra.vvvMvl" => "__builtin_ve_vl_pvsra_vvvMvl", + "llvm.ve.vl.pvsra.vvvl" => "__builtin_ve_vl_pvsra_vvvl", + "llvm.ve.vl.pvsra.vvvvl" => "__builtin_ve_vl_pvsra_vvvvl", + "llvm.ve.vl.pvsrl.vvsMvl" => "__builtin_ve_vl_pvsrl_vvsMvl", + "llvm.ve.vl.pvsrl.vvsl" => "__builtin_ve_vl_pvsrl_vvsl", + "llvm.ve.vl.pvsrl.vvsvl" => "__builtin_ve_vl_pvsrl_vvsvl", + "llvm.ve.vl.pvsrl.vvvMvl" => "__builtin_ve_vl_pvsrl_vvvMvl", + "llvm.ve.vl.pvsrl.vvvl" => "__builtin_ve_vl_pvsrl_vvvl", + "llvm.ve.vl.pvsrl.vvvvl" => "__builtin_ve_vl_pvsrl_vvvvl", + "llvm.ve.vl.pvsubs.vsvMvl" => "__builtin_ve_vl_pvsubs_vsvMvl", + "llvm.ve.vl.pvsubs.vsvl" => "__builtin_ve_vl_pvsubs_vsvl", + "llvm.ve.vl.pvsubs.vsvvl" => "__builtin_ve_vl_pvsubs_vsvvl", + "llvm.ve.vl.pvsubs.vvvMvl" => "__builtin_ve_vl_pvsubs_vvvMvl", + "llvm.ve.vl.pvsubs.vvvl" => "__builtin_ve_vl_pvsubs_vvvl", + "llvm.ve.vl.pvsubs.vvvvl" => "__builtin_ve_vl_pvsubs_vvvvl", + "llvm.ve.vl.pvsubu.vsvMvl" => "__builtin_ve_vl_pvsubu_vsvMvl", + "llvm.ve.vl.pvsubu.vsvl" => "__builtin_ve_vl_pvsubu_vsvl", + "llvm.ve.vl.pvsubu.vsvvl" => "__builtin_ve_vl_pvsubu_vsvvl", + "llvm.ve.vl.pvsubu.vvvMvl" => "__builtin_ve_vl_pvsubu_vvvMvl", + "llvm.ve.vl.pvsubu.vvvl" => "__builtin_ve_vl_pvsubu_vvvl", + "llvm.ve.vl.pvsubu.vvvvl" => "__builtin_ve_vl_pvsubu_vvvvl", + "llvm.ve.vl.pvxor.vsvMvl" => "__builtin_ve_vl_pvxor_vsvMvl", + "llvm.ve.vl.pvxor.vsvl" => "__builtin_ve_vl_pvxor_vsvl", + "llvm.ve.vl.pvxor.vsvvl" => "__builtin_ve_vl_pvxor_vsvvl", + "llvm.ve.vl.pvxor.vvvMvl" => "__builtin_ve_vl_pvxor_vvvMvl", + "llvm.ve.vl.pvxor.vvvl" => "__builtin_ve_vl_pvxor_vvvl", + "llvm.ve.vl.pvxor.vvvvl" => "__builtin_ve_vl_pvxor_vvvvl", + "llvm.ve.vl.scr.sss" => "__builtin_ve_vl_scr_sss", + "llvm.ve.vl.svm.sMs" => "__builtin_ve_vl_svm_sMs", + "llvm.ve.vl.svm.sms" => "__builtin_ve_vl_svm_sms", + "llvm.ve.vl.svob" => "__builtin_ve_vl_svob", + "llvm.ve.vl.tovm.sml" => "__builtin_ve_vl_tovm_sml", + "llvm.ve.vl.tscr.ssss" => "__builtin_ve_vl_tscr_ssss", + "llvm.ve.vl.vaddsl.vsvl" => "__builtin_ve_vl_vaddsl_vsvl", + "llvm.ve.vl.vaddsl.vsvmvl" => "__builtin_ve_vl_vaddsl_vsvmvl", + "llvm.ve.vl.vaddsl.vsvvl" => "__builtin_ve_vl_vaddsl_vsvvl", + "llvm.ve.vl.vaddsl.vvvl" => "__builtin_ve_vl_vaddsl_vvvl", + "llvm.ve.vl.vaddsl.vvvmvl" => "__builtin_ve_vl_vaddsl_vvvmvl", + "llvm.ve.vl.vaddsl.vvvvl" => "__builtin_ve_vl_vaddsl_vvvvl", + "llvm.ve.vl.vaddswsx.vsvl" => "__builtin_ve_vl_vaddswsx_vsvl", + "llvm.ve.vl.vaddswsx.vsvmvl" => "__builtin_ve_vl_vaddswsx_vsvmvl", + "llvm.ve.vl.vaddswsx.vsvvl" => "__builtin_ve_vl_vaddswsx_vsvvl", + "llvm.ve.vl.vaddswsx.vvvl" => "__builtin_ve_vl_vaddswsx_vvvl", + "llvm.ve.vl.vaddswsx.vvvmvl" => "__builtin_ve_vl_vaddswsx_vvvmvl", + "llvm.ve.vl.vaddswsx.vvvvl" => "__builtin_ve_vl_vaddswsx_vvvvl", + "llvm.ve.vl.vaddswzx.vsvl" => "__builtin_ve_vl_vaddswzx_vsvl", + "llvm.ve.vl.vaddswzx.vsvmvl" => "__builtin_ve_vl_vaddswzx_vsvmvl", + "llvm.ve.vl.vaddswzx.vsvvl" => "__builtin_ve_vl_vaddswzx_vsvvl", + "llvm.ve.vl.vaddswzx.vvvl" => "__builtin_ve_vl_vaddswzx_vvvl", + "llvm.ve.vl.vaddswzx.vvvmvl" => "__builtin_ve_vl_vaddswzx_vvvmvl", + "llvm.ve.vl.vaddswzx.vvvvl" => "__builtin_ve_vl_vaddswzx_vvvvl", + "llvm.ve.vl.vaddul.vsvl" => "__builtin_ve_vl_vaddul_vsvl", + "llvm.ve.vl.vaddul.vsvmvl" => "__builtin_ve_vl_vaddul_vsvmvl", + "llvm.ve.vl.vaddul.vsvvl" => "__builtin_ve_vl_vaddul_vsvvl", + "llvm.ve.vl.vaddul.vvvl" => "__builtin_ve_vl_vaddul_vvvl", + "llvm.ve.vl.vaddul.vvvmvl" => "__builtin_ve_vl_vaddul_vvvmvl", + "llvm.ve.vl.vaddul.vvvvl" => "__builtin_ve_vl_vaddul_vvvvl", + "llvm.ve.vl.vadduw.vsvl" => "__builtin_ve_vl_vadduw_vsvl", + "llvm.ve.vl.vadduw.vsvmvl" => "__builtin_ve_vl_vadduw_vsvmvl", + "llvm.ve.vl.vadduw.vsvvl" => "__builtin_ve_vl_vadduw_vsvvl", + "llvm.ve.vl.vadduw.vvvl" => "__builtin_ve_vl_vadduw_vvvl", + "llvm.ve.vl.vadduw.vvvmvl" => "__builtin_ve_vl_vadduw_vvvmvl", + "llvm.ve.vl.vadduw.vvvvl" => "__builtin_ve_vl_vadduw_vvvvl", + "llvm.ve.vl.vand.vsvl" => "__builtin_ve_vl_vand_vsvl", + "llvm.ve.vl.vand.vsvmvl" => "__builtin_ve_vl_vand_vsvmvl", + "llvm.ve.vl.vand.vsvvl" => "__builtin_ve_vl_vand_vsvvl", + "llvm.ve.vl.vand.vvvl" => "__builtin_ve_vl_vand_vvvl", + "llvm.ve.vl.vand.vvvmvl" => "__builtin_ve_vl_vand_vvvmvl", + "llvm.ve.vl.vand.vvvvl" => "__builtin_ve_vl_vand_vvvvl", + "llvm.ve.vl.vbrdd.vsl" => "__builtin_ve_vl_vbrdd_vsl", + "llvm.ve.vl.vbrdd.vsmvl" => "__builtin_ve_vl_vbrdd_vsmvl", + "llvm.ve.vl.vbrdd.vsvl" => "__builtin_ve_vl_vbrdd_vsvl", + "llvm.ve.vl.vbrdl.vsl" => "__builtin_ve_vl_vbrdl_vsl", + "llvm.ve.vl.vbrdl.vsmvl" => "__builtin_ve_vl_vbrdl_vsmvl", + "llvm.ve.vl.vbrdl.vsvl" => "__builtin_ve_vl_vbrdl_vsvl", + "llvm.ve.vl.vbrds.vsl" => "__builtin_ve_vl_vbrds_vsl", + "llvm.ve.vl.vbrds.vsmvl" => "__builtin_ve_vl_vbrds_vsmvl", + "llvm.ve.vl.vbrds.vsvl" => "__builtin_ve_vl_vbrds_vsvl", + "llvm.ve.vl.vbrdw.vsl" => "__builtin_ve_vl_vbrdw_vsl", + "llvm.ve.vl.vbrdw.vsmvl" => "__builtin_ve_vl_vbrdw_vsmvl", + "llvm.ve.vl.vbrdw.vsvl" => "__builtin_ve_vl_vbrdw_vsvl", + "llvm.ve.vl.vbrv.vvl" => "__builtin_ve_vl_vbrv_vvl", + "llvm.ve.vl.vbrv.vvmvl" => "__builtin_ve_vl_vbrv_vvmvl", + "llvm.ve.vl.vbrv.vvvl" => "__builtin_ve_vl_vbrv_vvvl", + "llvm.ve.vl.vcmpsl.vsvl" => "__builtin_ve_vl_vcmpsl_vsvl", + "llvm.ve.vl.vcmpsl.vsvmvl" => "__builtin_ve_vl_vcmpsl_vsvmvl", + "llvm.ve.vl.vcmpsl.vsvvl" => "__builtin_ve_vl_vcmpsl_vsvvl", + "llvm.ve.vl.vcmpsl.vvvl" => "__builtin_ve_vl_vcmpsl_vvvl", + "llvm.ve.vl.vcmpsl.vvvmvl" => "__builtin_ve_vl_vcmpsl_vvvmvl", + "llvm.ve.vl.vcmpsl.vvvvl" => "__builtin_ve_vl_vcmpsl_vvvvl", + "llvm.ve.vl.vcmpswsx.vsvl" => "__builtin_ve_vl_vcmpswsx_vsvl", + "llvm.ve.vl.vcmpswsx.vsvmvl" => "__builtin_ve_vl_vcmpswsx_vsvmvl", + "llvm.ve.vl.vcmpswsx.vsvvl" => "__builtin_ve_vl_vcmpswsx_vsvvl", + "llvm.ve.vl.vcmpswsx.vvvl" => "__builtin_ve_vl_vcmpswsx_vvvl", + "llvm.ve.vl.vcmpswsx.vvvmvl" => "__builtin_ve_vl_vcmpswsx_vvvmvl", + "llvm.ve.vl.vcmpswsx.vvvvl" => "__builtin_ve_vl_vcmpswsx_vvvvl", + "llvm.ve.vl.vcmpswzx.vsvl" => "__builtin_ve_vl_vcmpswzx_vsvl", + "llvm.ve.vl.vcmpswzx.vsvmvl" => "__builtin_ve_vl_vcmpswzx_vsvmvl", + "llvm.ve.vl.vcmpswzx.vsvvl" => "__builtin_ve_vl_vcmpswzx_vsvvl", + "llvm.ve.vl.vcmpswzx.vvvl" => "__builtin_ve_vl_vcmpswzx_vvvl", + "llvm.ve.vl.vcmpswzx.vvvmvl" => "__builtin_ve_vl_vcmpswzx_vvvmvl", + "llvm.ve.vl.vcmpswzx.vvvvl" => "__builtin_ve_vl_vcmpswzx_vvvvl", + "llvm.ve.vl.vcmpul.vsvl" => "__builtin_ve_vl_vcmpul_vsvl", + "llvm.ve.vl.vcmpul.vsvmvl" => "__builtin_ve_vl_vcmpul_vsvmvl", + "llvm.ve.vl.vcmpul.vsvvl" => "__builtin_ve_vl_vcmpul_vsvvl", + "llvm.ve.vl.vcmpul.vvvl" => "__builtin_ve_vl_vcmpul_vvvl", + "llvm.ve.vl.vcmpul.vvvmvl" => "__builtin_ve_vl_vcmpul_vvvmvl", + "llvm.ve.vl.vcmpul.vvvvl" => "__builtin_ve_vl_vcmpul_vvvvl", + "llvm.ve.vl.vcmpuw.vsvl" => "__builtin_ve_vl_vcmpuw_vsvl", + "llvm.ve.vl.vcmpuw.vsvmvl" => "__builtin_ve_vl_vcmpuw_vsvmvl", + "llvm.ve.vl.vcmpuw.vsvvl" => "__builtin_ve_vl_vcmpuw_vsvvl", + "llvm.ve.vl.vcmpuw.vvvl" => "__builtin_ve_vl_vcmpuw_vvvl", + "llvm.ve.vl.vcmpuw.vvvmvl" => "__builtin_ve_vl_vcmpuw_vvvmvl", + "llvm.ve.vl.vcmpuw.vvvvl" => "__builtin_ve_vl_vcmpuw_vvvvl", + "llvm.ve.vl.vcp.vvmvl" => "__builtin_ve_vl_vcp_vvmvl", + "llvm.ve.vl.vcvtdl.vvl" => "__builtin_ve_vl_vcvtdl_vvl", + "llvm.ve.vl.vcvtdl.vvvl" => "__builtin_ve_vl_vcvtdl_vvvl", + "llvm.ve.vl.vcvtds.vvl" => "__builtin_ve_vl_vcvtds_vvl", + "llvm.ve.vl.vcvtds.vvvl" => "__builtin_ve_vl_vcvtds_vvvl", + "llvm.ve.vl.vcvtdw.vvl" => "__builtin_ve_vl_vcvtdw_vvl", + "llvm.ve.vl.vcvtdw.vvvl" => "__builtin_ve_vl_vcvtdw_vvvl", + "llvm.ve.vl.vcvtld.vvl" => "__builtin_ve_vl_vcvtld_vvl", + "llvm.ve.vl.vcvtld.vvmvl" => "__builtin_ve_vl_vcvtld_vvmvl", + "llvm.ve.vl.vcvtld.vvvl" => "__builtin_ve_vl_vcvtld_vvvl", + "llvm.ve.vl.vcvtldrz.vvl" => "__builtin_ve_vl_vcvtldrz_vvl", + "llvm.ve.vl.vcvtldrz.vvmvl" => "__builtin_ve_vl_vcvtldrz_vvmvl", + "llvm.ve.vl.vcvtldrz.vvvl" => "__builtin_ve_vl_vcvtldrz_vvvl", + "llvm.ve.vl.vcvtsd.vvl" => "__builtin_ve_vl_vcvtsd_vvl", + "llvm.ve.vl.vcvtsd.vvvl" => "__builtin_ve_vl_vcvtsd_vvvl", + "llvm.ve.vl.vcvtsw.vvl" => "__builtin_ve_vl_vcvtsw_vvl", + "llvm.ve.vl.vcvtsw.vvvl" => "__builtin_ve_vl_vcvtsw_vvvl", + "llvm.ve.vl.vcvtwdsx.vvl" => "__builtin_ve_vl_vcvtwdsx_vvl", + "llvm.ve.vl.vcvtwdsx.vvmvl" => "__builtin_ve_vl_vcvtwdsx_vvmvl", + "llvm.ve.vl.vcvtwdsx.vvvl" => "__builtin_ve_vl_vcvtwdsx_vvvl", + "llvm.ve.vl.vcvtwdsxrz.vvl" => "__builtin_ve_vl_vcvtwdsxrz_vvl", + "llvm.ve.vl.vcvtwdsxrz.vvmvl" => "__builtin_ve_vl_vcvtwdsxrz_vvmvl", + "llvm.ve.vl.vcvtwdsxrz.vvvl" => "__builtin_ve_vl_vcvtwdsxrz_vvvl", + "llvm.ve.vl.vcvtwdzx.vvl" => "__builtin_ve_vl_vcvtwdzx_vvl", + "llvm.ve.vl.vcvtwdzx.vvmvl" => "__builtin_ve_vl_vcvtwdzx_vvmvl", + "llvm.ve.vl.vcvtwdzx.vvvl" => "__builtin_ve_vl_vcvtwdzx_vvvl", + "llvm.ve.vl.vcvtwdzxrz.vvl" => "__builtin_ve_vl_vcvtwdzxrz_vvl", + "llvm.ve.vl.vcvtwdzxrz.vvmvl" => "__builtin_ve_vl_vcvtwdzxrz_vvmvl", + "llvm.ve.vl.vcvtwdzxrz.vvvl" => "__builtin_ve_vl_vcvtwdzxrz_vvvl", + "llvm.ve.vl.vcvtwssx.vvl" => "__builtin_ve_vl_vcvtwssx_vvl", + "llvm.ve.vl.vcvtwssx.vvmvl" => "__builtin_ve_vl_vcvtwssx_vvmvl", + "llvm.ve.vl.vcvtwssx.vvvl" => "__builtin_ve_vl_vcvtwssx_vvvl", + "llvm.ve.vl.vcvtwssxrz.vvl" => "__builtin_ve_vl_vcvtwssxrz_vvl", + "llvm.ve.vl.vcvtwssxrz.vvmvl" => "__builtin_ve_vl_vcvtwssxrz_vvmvl", + "llvm.ve.vl.vcvtwssxrz.vvvl" => "__builtin_ve_vl_vcvtwssxrz_vvvl", + "llvm.ve.vl.vcvtwszx.vvl" => "__builtin_ve_vl_vcvtwszx_vvl", + "llvm.ve.vl.vcvtwszx.vvmvl" => "__builtin_ve_vl_vcvtwszx_vvmvl", + "llvm.ve.vl.vcvtwszx.vvvl" => "__builtin_ve_vl_vcvtwszx_vvvl", + "llvm.ve.vl.vcvtwszxrz.vvl" => "__builtin_ve_vl_vcvtwszxrz_vvl", + "llvm.ve.vl.vcvtwszxrz.vvmvl" => "__builtin_ve_vl_vcvtwszxrz_vvmvl", + "llvm.ve.vl.vcvtwszxrz.vvvl" => "__builtin_ve_vl_vcvtwszxrz_vvvl", + "llvm.ve.vl.vdivsl.vsvl" => "__builtin_ve_vl_vdivsl_vsvl", + "llvm.ve.vl.vdivsl.vsvmvl" => "__builtin_ve_vl_vdivsl_vsvmvl", + "llvm.ve.vl.vdivsl.vsvvl" => "__builtin_ve_vl_vdivsl_vsvvl", + "llvm.ve.vl.vdivsl.vvsl" => "__builtin_ve_vl_vdivsl_vvsl", + "llvm.ve.vl.vdivsl.vvsmvl" => "__builtin_ve_vl_vdivsl_vvsmvl", + "llvm.ve.vl.vdivsl.vvsvl" => "__builtin_ve_vl_vdivsl_vvsvl", + "llvm.ve.vl.vdivsl.vvvl" => "__builtin_ve_vl_vdivsl_vvvl", + "llvm.ve.vl.vdivsl.vvvmvl" => "__builtin_ve_vl_vdivsl_vvvmvl", + "llvm.ve.vl.vdivsl.vvvvl" => "__builtin_ve_vl_vdivsl_vvvvl", + "llvm.ve.vl.vdivswsx.vsvl" => "__builtin_ve_vl_vdivswsx_vsvl", + "llvm.ve.vl.vdivswsx.vsvmvl" => "__builtin_ve_vl_vdivswsx_vsvmvl", + "llvm.ve.vl.vdivswsx.vsvvl" => "__builtin_ve_vl_vdivswsx_vsvvl", + "llvm.ve.vl.vdivswsx.vvsl" => "__builtin_ve_vl_vdivswsx_vvsl", + "llvm.ve.vl.vdivswsx.vvsmvl" => "__builtin_ve_vl_vdivswsx_vvsmvl", + "llvm.ve.vl.vdivswsx.vvsvl" => "__builtin_ve_vl_vdivswsx_vvsvl", + "llvm.ve.vl.vdivswsx.vvvl" => "__builtin_ve_vl_vdivswsx_vvvl", + "llvm.ve.vl.vdivswsx.vvvmvl" => "__builtin_ve_vl_vdivswsx_vvvmvl", + "llvm.ve.vl.vdivswsx.vvvvl" => "__builtin_ve_vl_vdivswsx_vvvvl", + "llvm.ve.vl.vdivswzx.vsvl" => "__builtin_ve_vl_vdivswzx_vsvl", + "llvm.ve.vl.vdivswzx.vsvmvl" => "__builtin_ve_vl_vdivswzx_vsvmvl", + "llvm.ve.vl.vdivswzx.vsvvl" => "__builtin_ve_vl_vdivswzx_vsvvl", + "llvm.ve.vl.vdivswzx.vvsl" => "__builtin_ve_vl_vdivswzx_vvsl", + "llvm.ve.vl.vdivswzx.vvsmvl" => "__builtin_ve_vl_vdivswzx_vvsmvl", + "llvm.ve.vl.vdivswzx.vvsvl" => "__builtin_ve_vl_vdivswzx_vvsvl", + "llvm.ve.vl.vdivswzx.vvvl" => "__builtin_ve_vl_vdivswzx_vvvl", + "llvm.ve.vl.vdivswzx.vvvmvl" => "__builtin_ve_vl_vdivswzx_vvvmvl", + "llvm.ve.vl.vdivswzx.vvvvl" => "__builtin_ve_vl_vdivswzx_vvvvl", + "llvm.ve.vl.vdivul.vsvl" => "__builtin_ve_vl_vdivul_vsvl", + "llvm.ve.vl.vdivul.vsvmvl" => "__builtin_ve_vl_vdivul_vsvmvl", + "llvm.ve.vl.vdivul.vsvvl" => "__builtin_ve_vl_vdivul_vsvvl", + "llvm.ve.vl.vdivul.vvsl" => "__builtin_ve_vl_vdivul_vvsl", + "llvm.ve.vl.vdivul.vvsmvl" => "__builtin_ve_vl_vdivul_vvsmvl", + "llvm.ve.vl.vdivul.vvsvl" => "__builtin_ve_vl_vdivul_vvsvl", + "llvm.ve.vl.vdivul.vvvl" => "__builtin_ve_vl_vdivul_vvvl", + "llvm.ve.vl.vdivul.vvvmvl" => "__builtin_ve_vl_vdivul_vvvmvl", + "llvm.ve.vl.vdivul.vvvvl" => "__builtin_ve_vl_vdivul_vvvvl", + "llvm.ve.vl.vdivuw.vsvl" => "__builtin_ve_vl_vdivuw_vsvl", + "llvm.ve.vl.vdivuw.vsvmvl" => "__builtin_ve_vl_vdivuw_vsvmvl", + "llvm.ve.vl.vdivuw.vsvvl" => "__builtin_ve_vl_vdivuw_vsvvl", + "llvm.ve.vl.vdivuw.vvsl" => "__builtin_ve_vl_vdivuw_vvsl", + "llvm.ve.vl.vdivuw.vvsmvl" => "__builtin_ve_vl_vdivuw_vvsmvl", + "llvm.ve.vl.vdivuw.vvsvl" => "__builtin_ve_vl_vdivuw_vvsvl", + "llvm.ve.vl.vdivuw.vvvl" => "__builtin_ve_vl_vdivuw_vvvl", + "llvm.ve.vl.vdivuw.vvvmvl" => "__builtin_ve_vl_vdivuw_vvvmvl", + "llvm.ve.vl.vdivuw.vvvvl" => "__builtin_ve_vl_vdivuw_vvvvl", + "llvm.ve.vl.veqv.vsvl" => "__builtin_ve_vl_veqv_vsvl", + "llvm.ve.vl.veqv.vsvmvl" => "__builtin_ve_vl_veqv_vsvmvl", + "llvm.ve.vl.veqv.vsvvl" => "__builtin_ve_vl_veqv_vsvvl", + "llvm.ve.vl.veqv.vvvl" => "__builtin_ve_vl_veqv_vvvl", + "llvm.ve.vl.veqv.vvvmvl" => "__builtin_ve_vl_veqv_vvvmvl", + "llvm.ve.vl.veqv.vvvvl" => "__builtin_ve_vl_veqv_vvvvl", + "llvm.ve.vl.vex.vvmvl" => "__builtin_ve_vl_vex_vvmvl", + "llvm.ve.vl.vfaddd.vsvl" => "__builtin_ve_vl_vfaddd_vsvl", + "llvm.ve.vl.vfaddd.vsvmvl" => "__builtin_ve_vl_vfaddd_vsvmvl", + "llvm.ve.vl.vfaddd.vsvvl" => "__builtin_ve_vl_vfaddd_vsvvl", + "llvm.ve.vl.vfaddd.vvvl" => "__builtin_ve_vl_vfaddd_vvvl", + "llvm.ve.vl.vfaddd.vvvmvl" => "__builtin_ve_vl_vfaddd_vvvmvl", + "llvm.ve.vl.vfaddd.vvvvl" => "__builtin_ve_vl_vfaddd_vvvvl", + "llvm.ve.vl.vfadds.vsvl" => "__builtin_ve_vl_vfadds_vsvl", + "llvm.ve.vl.vfadds.vsvmvl" => "__builtin_ve_vl_vfadds_vsvmvl", + "llvm.ve.vl.vfadds.vsvvl" => "__builtin_ve_vl_vfadds_vsvvl", + "llvm.ve.vl.vfadds.vvvl" => "__builtin_ve_vl_vfadds_vvvl", + "llvm.ve.vl.vfadds.vvvmvl" => "__builtin_ve_vl_vfadds_vvvmvl", + "llvm.ve.vl.vfadds.vvvvl" => "__builtin_ve_vl_vfadds_vvvvl", + "llvm.ve.vl.vfcmpd.vsvl" => "__builtin_ve_vl_vfcmpd_vsvl", + "llvm.ve.vl.vfcmpd.vsvmvl" => "__builtin_ve_vl_vfcmpd_vsvmvl", + "llvm.ve.vl.vfcmpd.vsvvl" => "__builtin_ve_vl_vfcmpd_vsvvl", + "llvm.ve.vl.vfcmpd.vvvl" => "__builtin_ve_vl_vfcmpd_vvvl", + "llvm.ve.vl.vfcmpd.vvvmvl" => "__builtin_ve_vl_vfcmpd_vvvmvl", + "llvm.ve.vl.vfcmpd.vvvvl" => "__builtin_ve_vl_vfcmpd_vvvvl", + "llvm.ve.vl.vfcmps.vsvl" => "__builtin_ve_vl_vfcmps_vsvl", + "llvm.ve.vl.vfcmps.vsvmvl" => "__builtin_ve_vl_vfcmps_vsvmvl", + "llvm.ve.vl.vfcmps.vsvvl" => "__builtin_ve_vl_vfcmps_vsvvl", + "llvm.ve.vl.vfcmps.vvvl" => "__builtin_ve_vl_vfcmps_vvvl", + "llvm.ve.vl.vfcmps.vvvmvl" => "__builtin_ve_vl_vfcmps_vvvmvl", + "llvm.ve.vl.vfcmps.vvvvl" => "__builtin_ve_vl_vfcmps_vvvvl", + "llvm.ve.vl.vfdivd.vsvl" => "__builtin_ve_vl_vfdivd_vsvl", + "llvm.ve.vl.vfdivd.vsvmvl" => "__builtin_ve_vl_vfdivd_vsvmvl", + "llvm.ve.vl.vfdivd.vsvvl" => "__builtin_ve_vl_vfdivd_vsvvl", + "llvm.ve.vl.vfdivd.vvvl" => "__builtin_ve_vl_vfdivd_vvvl", + "llvm.ve.vl.vfdivd.vvvmvl" => "__builtin_ve_vl_vfdivd_vvvmvl", + "llvm.ve.vl.vfdivd.vvvvl" => "__builtin_ve_vl_vfdivd_vvvvl", + "llvm.ve.vl.vfdivs.vsvl" => "__builtin_ve_vl_vfdivs_vsvl", + "llvm.ve.vl.vfdivs.vsvmvl" => "__builtin_ve_vl_vfdivs_vsvmvl", + "llvm.ve.vl.vfdivs.vsvvl" => "__builtin_ve_vl_vfdivs_vsvvl", + "llvm.ve.vl.vfdivs.vvvl" => "__builtin_ve_vl_vfdivs_vvvl", + "llvm.ve.vl.vfdivs.vvvmvl" => "__builtin_ve_vl_vfdivs_vvvmvl", + "llvm.ve.vl.vfdivs.vvvvl" => "__builtin_ve_vl_vfdivs_vvvvl", + "llvm.ve.vl.vfmadd.vsvvl" => "__builtin_ve_vl_vfmadd_vsvvl", + "llvm.ve.vl.vfmadd.vsvvmvl" => "__builtin_ve_vl_vfmadd_vsvvmvl", + "llvm.ve.vl.vfmadd.vsvvvl" => "__builtin_ve_vl_vfmadd_vsvvvl", + "llvm.ve.vl.vfmadd.vvsvl" => "__builtin_ve_vl_vfmadd_vvsvl", + "llvm.ve.vl.vfmadd.vvsvmvl" => "__builtin_ve_vl_vfmadd_vvsvmvl", + "llvm.ve.vl.vfmadd.vvsvvl" => "__builtin_ve_vl_vfmadd_vvsvvl", + "llvm.ve.vl.vfmadd.vvvvl" => "__builtin_ve_vl_vfmadd_vvvvl", + "llvm.ve.vl.vfmadd.vvvvmvl" => "__builtin_ve_vl_vfmadd_vvvvmvl", + "llvm.ve.vl.vfmadd.vvvvvl" => "__builtin_ve_vl_vfmadd_vvvvvl", + "llvm.ve.vl.vfmads.vsvvl" => "__builtin_ve_vl_vfmads_vsvvl", + "llvm.ve.vl.vfmads.vsvvmvl" => "__builtin_ve_vl_vfmads_vsvvmvl", + "llvm.ve.vl.vfmads.vsvvvl" => "__builtin_ve_vl_vfmads_vsvvvl", + "llvm.ve.vl.vfmads.vvsvl" => "__builtin_ve_vl_vfmads_vvsvl", + "llvm.ve.vl.vfmads.vvsvmvl" => "__builtin_ve_vl_vfmads_vvsvmvl", + "llvm.ve.vl.vfmads.vvsvvl" => "__builtin_ve_vl_vfmads_vvsvvl", + "llvm.ve.vl.vfmads.vvvvl" => "__builtin_ve_vl_vfmads_vvvvl", + "llvm.ve.vl.vfmads.vvvvmvl" => "__builtin_ve_vl_vfmads_vvvvmvl", + "llvm.ve.vl.vfmads.vvvvvl" => "__builtin_ve_vl_vfmads_vvvvvl", + "llvm.ve.vl.vfmaxd.vsvl" => "__builtin_ve_vl_vfmaxd_vsvl", + "llvm.ve.vl.vfmaxd.vsvmvl" => "__builtin_ve_vl_vfmaxd_vsvmvl", + "llvm.ve.vl.vfmaxd.vsvvl" => "__builtin_ve_vl_vfmaxd_vsvvl", + "llvm.ve.vl.vfmaxd.vvvl" => "__builtin_ve_vl_vfmaxd_vvvl", + "llvm.ve.vl.vfmaxd.vvvmvl" => "__builtin_ve_vl_vfmaxd_vvvmvl", + "llvm.ve.vl.vfmaxd.vvvvl" => "__builtin_ve_vl_vfmaxd_vvvvl", + "llvm.ve.vl.vfmaxs.vsvl" => "__builtin_ve_vl_vfmaxs_vsvl", + "llvm.ve.vl.vfmaxs.vsvmvl" => "__builtin_ve_vl_vfmaxs_vsvmvl", + "llvm.ve.vl.vfmaxs.vsvvl" => "__builtin_ve_vl_vfmaxs_vsvvl", + "llvm.ve.vl.vfmaxs.vvvl" => "__builtin_ve_vl_vfmaxs_vvvl", + "llvm.ve.vl.vfmaxs.vvvmvl" => "__builtin_ve_vl_vfmaxs_vvvmvl", + "llvm.ve.vl.vfmaxs.vvvvl" => "__builtin_ve_vl_vfmaxs_vvvvl", + "llvm.ve.vl.vfmind.vsvl" => "__builtin_ve_vl_vfmind_vsvl", + "llvm.ve.vl.vfmind.vsvmvl" => "__builtin_ve_vl_vfmind_vsvmvl", + "llvm.ve.vl.vfmind.vsvvl" => "__builtin_ve_vl_vfmind_vsvvl", + "llvm.ve.vl.vfmind.vvvl" => "__builtin_ve_vl_vfmind_vvvl", + "llvm.ve.vl.vfmind.vvvmvl" => "__builtin_ve_vl_vfmind_vvvmvl", + "llvm.ve.vl.vfmind.vvvvl" => "__builtin_ve_vl_vfmind_vvvvl", + "llvm.ve.vl.vfmins.vsvl" => "__builtin_ve_vl_vfmins_vsvl", + "llvm.ve.vl.vfmins.vsvmvl" => "__builtin_ve_vl_vfmins_vsvmvl", + "llvm.ve.vl.vfmins.vsvvl" => "__builtin_ve_vl_vfmins_vsvvl", + "llvm.ve.vl.vfmins.vvvl" => "__builtin_ve_vl_vfmins_vvvl", + "llvm.ve.vl.vfmins.vvvmvl" => "__builtin_ve_vl_vfmins_vvvmvl", + "llvm.ve.vl.vfmins.vvvvl" => "__builtin_ve_vl_vfmins_vvvvl", + "llvm.ve.vl.vfmkdeq.mvl" => "__builtin_ve_vl_vfmkdeq_mvl", + "llvm.ve.vl.vfmkdeq.mvml" => "__builtin_ve_vl_vfmkdeq_mvml", + "llvm.ve.vl.vfmkdeqnan.mvl" => "__builtin_ve_vl_vfmkdeqnan_mvl", + "llvm.ve.vl.vfmkdeqnan.mvml" => "__builtin_ve_vl_vfmkdeqnan_mvml", + "llvm.ve.vl.vfmkdge.mvl" => "__builtin_ve_vl_vfmkdge_mvl", + "llvm.ve.vl.vfmkdge.mvml" => "__builtin_ve_vl_vfmkdge_mvml", + "llvm.ve.vl.vfmkdgenan.mvl" => "__builtin_ve_vl_vfmkdgenan_mvl", + "llvm.ve.vl.vfmkdgenan.mvml" => "__builtin_ve_vl_vfmkdgenan_mvml", + "llvm.ve.vl.vfmkdgt.mvl" => "__builtin_ve_vl_vfmkdgt_mvl", + "llvm.ve.vl.vfmkdgt.mvml" => "__builtin_ve_vl_vfmkdgt_mvml", + "llvm.ve.vl.vfmkdgtnan.mvl" => "__builtin_ve_vl_vfmkdgtnan_mvl", + "llvm.ve.vl.vfmkdgtnan.mvml" => "__builtin_ve_vl_vfmkdgtnan_mvml", + "llvm.ve.vl.vfmkdle.mvl" => "__builtin_ve_vl_vfmkdle_mvl", + "llvm.ve.vl.vfmkdle.mvml" => "__builtin_ve_vl_vfmkdle_mvml", + "llvm.ve.vl.vfmkdlenan.mvl" => "__builtin_ve_vl_vfmkdlenan_mvl", + "llvm.ve.vl.vfmkdlenan.mvml" => "__builtin_ve_vl_vfmkdlenan_mvml", + "llvm.ve.vl.vfmkdlt.mvl" => "__builtin_ve_vl_vfmkdlt_mvl", + "llvm.ve.vl.vfmkdlt.mvml" => "__builtin_ve_vl_vfmkdlt_mvml", + "llvm.ve.vl.vfmkdltnan.mvl" => "__builtin_ve_vl_vfmkdltnan_mvl", + "llvm.ve.vl.vfmkdltnan.mvml" => "__builtin_ve_vl_vfmkdltnan_mvml", + "llvm.ve.vl.vfmkdnan.mvl" => "__builtin_ve_vl_vfmkdnan_mvl", + "llvm.ve.vl.vfmkdnan.mvml" => "__builtin_ve_vl_vfmkdnan_mvml", + "llvm.ve.vl.vfmkdne.mvl" => "__builtin_ve_vl_vfmkdne_mvl", + "llvm.ve.vl.vfmkdne.mvml" => "__builtin_ve_vl_vfmkdne_mvml", + "llvm.ve.vl.vfmkdnenan.mvl" => "__builtin_ve_vl_vfmkdnenan_mvl", + "llvm.ve.vl.vfmkdnenan.mvml" => "__builtin_ve_vl_vfmkdnenan_mvml", + "llvm.ve.vl.vfmkdnum.mvl" => "__builtin_ve_vl_vfmkdnum_mvl", + "llvm.ve.vl.vfmkdnum.mvml" => "__builtin_ve_vl_vfmkdnum_mvml", + "llvm.ve.vl.vfmklaf.ml" => "__builtin_ve_vl_vfmklaf_ml", + "llvm.ve.vl.vfmklat.ml" => "__builtin_ve_vl_vfmklat_ml", + "llvm.ve.vl.vfmkleq.mvl" => "__builtin_ve_vl_vfmkleq_mvl", + "llvm.ve.vl.vfmkleq.mvml" => "__builtin_ve_vl_vfmkleq_mvml", + "llvm.ve.vl.vfmkleqnan.mvl" => "__builtin_ve_vl_vfmkleqnan_mvl", + "llvm.ve.vl.vfmkleqnan.mvml" => "__builtin_ve_vl_vfmkleqnan_mvml", + "llvm.ve.vl.vfmklge.mvl" => "__builtin_ve_vl_vfmklge_mvl", + "llvm.ve.vl.vfmklge.mvml" => "__builtin_ve_vl_vfmklge_mvml", + "llvm.ve.vl.vfmklgenan.mvl" => "__builtin_ve_vl_vfmklgenan_mvl", + "llvm.ve.vl.vfmklgenan.mvml" => "__builtin_ve_vl_vfmklgenan_mvml", + "llvm.ve.vl.vfmklgt.mvl" => "__builtin_ve_vl_vfmklgt_mvl", + "llvm.ve.vl.vfmklgt.mvml" => "__builtin_ve_vl_vfmklgt_mvml", + "llvm.ve.vl.vfmklgtnan.mvl" => "__builtin_ve_vl_vfmklgtnan_mvl", + "llvm.ve.vl.vfmklgtnan.mvml" => "__builtin_ve_vl_vfmklgtnan_mvml", + "llvm.ve.vl.vfmklle.mvl" => "__builtin_ve_vl_vfmklle_mvl", + "llvm.ve.vl.vfmklle.mvml" => "__builtin_ve_vl_vfmklle_mvml", + "llvm.ve.vl.vfmkllenan.mvl" => "__builtin_ve_vl_vfmkllenan_mvl", + "llvm.ve.vl.vfmkllenan.mvml" => "__builtin_ve_vl_vfmkllenan_mvml", + "llvm.ve.vl.vfmkllt.mvl" => "__builtin_ve_vl_vfmkllt_mvl", + "llvm.ve.vl.vfmkllt.mvml" => "__builtin_ve_vl_vfmkllt_mvml", + "llvm.ve.vl.vfmklltnan.mvl" => "__builtin_ve_vl_vfmklltnan_mvl", + "llvm.ve.vl.vfmklltnan.mvml" => "__builtin_ve_vl_vfmklltnan_mvml", + "llvm.ve.vl.vfmklnan.mvl" => "__builtin_ve_vl_vfmklnan_mvl", + "llvm.ve.vl.vfmklnan.mvml" => "__builtin_ve_vl_vfmklnan_mvml", + "llvm.ve.vl.vfmklne.mvl" => "__builtin_ve_vl_vfmklne_mvl", + "llvm.ve.vl.vfmklne.mvml" => "__builtin_ve_vl_vfmklne_mvml", + "llvm.ve.vl.vfmklnenan.mvl" => "__builtin_ve_vl_vfmklnenan_mvl", + "llvm.ve.vl.vfmklnenan.mvml" => "__builtin_ve_vl_vfmklnenan_mvml", + "llvm.ve.vl.vfmklnum.mvl" => "__builtin_ve_vl_vfmklnum_mvl", + "llvm.ve.vl.vfmklnum.mvml" => "__builtin_ve_vl_vfmklnum_mvml", + "llvm.ve.vl.vfmkseq.mvl" => "__builtin_ve_vl_vfmkseq_mvl", + "llvm.ve.vl.vfmkseq.mvml" => "__builtin_ve_vl_vfmkseq_mvml", + "llvm.ve.vl.vfmkseqnan.mvl" => "__builtin_ve_vl_vfmkseqnan_mvl", + "llvm.ve.vl.vfmkseqnan.mvml" => "__builtin_ve_vl_vfmkseqnan_mvml", + "llvm.ve.vl.vfmksge.mvl" => "__builtin_ve_vl_vfmksge_mvl", + "llvm.ve.vl.vfmksge.mvml" => "__builtin_ve_vl_vfmksge_mvml", + "llvm.ve.vl.vfmksgenan.mvl" => "__builtin_ve_vl_vfmksgenan_mvl", + "llvm.ve.vl.vfmksgenan.mvml" => "__builtin_ve_vl_vfmksgenan_mvml", + "llvm.ve.vl.vfmksgt.mvl" => "__builtin_ve_vl_vfmksgt_mvl", + "llvm.ve.vl.vfmksgt.mvml" => "__builtin_ve_vl_vfmksgt_mvml", + "llvm.ve.vl.vfmksgtnan.mvl" => "__builtin_ve_vl_vfmksgtnan_mvl", + "llvm.ve.vl.vfmksgtnan.mvml" => "__builtin_ve_vl_vfmksgtnan_mvml", + "llvm.ve.vl.vfmksle.mvl" => "__builtin_ve_vl_vfmksle_mvl", + "llvm.ve.vl.vfmksle.mvml" => "__builtin_ve_vl_vfmksle_mvml", + "llvm.ve.vl.vfmkslenan.mvl" => "__builtin_ve_vl_vfmkslenan_mvl", + "llvm.ve.vl.vfmkslenan.mvml" => "__builtin_ve_vl_vfmkslenan_mvml", + "llvm.ve.vl.vfmkslt.mvl" => "__builtin_ve_vl_vfmkslt_mvl", + "llvm.ve.vl.vfmkslt.mvml" => "__builtin_ve_vl_vfmkslt_mvml", + "llvm.ve.vl.vfmksltnan.mvl" => "__builtin_ve_vl_vfmksltnan_mvl", + "llvm.ve.vl.vfmksltnan.mvml" => "__builtin_ve_vl_vfmksltnan_mvml", + "llvm.ve.vl.vfmksnan.mvl" => "__builtin_ve_vl_vfmksnan_mvl", + "llvm.ve.vl.vfmksnan.mvml" => "__builtin_ve_vl_vfmksnan_mvml", + "llvm.ve.vl.vfmksne.mvl" => "__builtin_ve_vl_vfmksne_mvl", + "llvm.ve.vl.vfmksne.mvml" => "__builtin_ve_vl_vfmksne_mvml", + "llvm.ve.vl.vfmksnenan.mvl" => "__builtin_ve_vl_vfmksnenan_mvl", + "llvm.ve.vl.vfmksnenan.mvml" => "__builtin_ve_vl_vfmksnenan_mvml", + "llvm.ve.vl.vfmksnum.mvl" => "__builtin_ve_vl_vfmksnum_mvl", + "llvm.ve.vl.vfmksnum.mvml" => "__builtin_ve_vl_vfmksnum_mvml", + "llvm.ve.vl.vfmkweq.mvl" => "__builtin_ve_vl_vfmkweq_mvl", + "llvm.ve.vl.vfmkweq.mvml" => "__builtin_ve_vl_vfmkweq_mvml", + "llvm.ve.vl.vfmkweqnan.mvl" => "__builtin_ve_vl_vfmkweqnan_mvl", + "llvm.ve.vl.vfmkweqnan.mvml" => "__builtin_ve_vl_vfmkweqnan_mvml", + "llvm.ve.vl.vfmkwge.mvl" => "__builtin_ve_vl_vfmkwge_mvl", + "llvm.ve.vl.vfmkwge.mvml" => "__builtin_ve_vl_vfmkwge_mvml", + "llvm.ve.vl.vfmkwgenan.mvl" => "__builtin_ve_vl_vfmkwgenan_mvl", + "llvm.ve.vl.vfmkwgenan.mvml" => "__builtin_ve_vl_vfmkwgenan_mvml", + "llvm.ve.vl.vfmkwgt.mvl" => "__builtin_ve_vl_vfmkwgt_mvl", + "llvm.ve.vl.vfmkwgt.mvml" => "__builtin_ve_vl_vfmkwgt_mvml", + "llvm.ve.vl.vfmkwgtnan.mvl" => "__builtin_ve_vl_vfmkwgtnan_mvl", + "llvm.ve.vl.vfmkwgtnan.mvml" => "__builtin_ve_vl_vfmkwgtnan_mvml", + "llvm.ve.vl.vfmkwle.mvl" => "__builtin_ve_vl_vfmkwle_mvl", + "llvm.ve.vl.vfmkwle.mvml" => "__builtin_ve_vl_vfmkwle_mvml", + "llvm.ve.vl.vfmkwlenan.mvl" => "__builtin_ve_vl_vfmkwlenan_mvl", + "llvm.ve.vl.vfmkwlenan.mvml" => "__builtin_ve_vl_vfmkwlenan_mvml", + "llvm.ve.vl.vfmkwlt.mvl" => "__builtin_ve_vl_vfmkwlt_mvl", + "llvm.ve.vl.vfmkwlt.mvml" => "__builtin_ve_vl_vfmkwlt_mvml", + "llvm.ve.vl.vfmkwltnan.mvl" => "__builtin_ve_vl_vfmkwltnan_mvl", + "llvm.ve.vl.vfmkwltnan.mvml" => "__builtin_ve_vl_vfmkwltnan_mvml", + "llvm.ve.vl.vfmkwnan.mvl" => "__builtin_ve_vl_vfmkwnan_mvl", + "llvm.ve.vl.vfmkwnan.mvml" => "__builtin_ve_vl_vfmkwnan_mvml", + "llvm.ve.vl.vfmkwne.mvl" => "__builtin_ve_vl_vfmkwne_mvl", + "llvm.ve.vl.vfmkwne.mvml" => "__builtin_ve_vl_vfmkwne_mvml", + "llvm.ve.vl.vfmkwnenan.mvl" => "__builtin_ve_vl_vfmkwnenan_mvl", + "llvm.ve.vl.vfmkwnenan.mvml" => "__builtin_ve_vl_vfmkwnenan_mvml", + "llvm.ve.vl.vfmkwnum.mvl" => "__builtin_ve_vl_vfmkwnum_mvl", + "llvm.ve.vl.vfmkwnum.mvml" => "__builtin_ve_vl_vfmkwnum_mvml", + "llvm.ve.vl.vfmsbd.vsvvl" => "__builtin_ve_vl_vfmsbd_vsvvl", + "llvm.ve.vl.vfmsbd.vsvvmvl" => "__builtin_ve_vl_vfmsbd_vsvvmvl", + "llvm.ve.vl.vfmsbd.vsvvvl" => "__builtin_ve_vl_vfmsbd_vsvvvl", + "llvm.ve.vl.vfmsbd.vvsvl" => "__builtin_ve_vl_vfmsbd_vvsvl", + "llvm.ve.vl.vfmsbd.vvsvmvl" => "__builtin_ve_vl_vfmsbd_vvsvmvl", + "llvm.ve.vl.vfmsbd.vvsvvl" => "__builtin_ve_vl_vfmsbd_vvsvvl", + "llvm.ve.vl.vfmsbd.vvvvl" => "__builtin_ve_vl_vfmsbd_vvvvl", + "llvm.ve.vl.vfmsbd.vvvvmvl" => "__builtin_ve_vl_vfmsbd_vvvvmvl", + "llvm.ve.vl.vfmsbd.vvvvvl" => "__builtin_ve_vl_vfmsbd_vvvvvl", + "llvm.ve.vl.vfmsbs.vsvvl" => "__builtin_ve_vl_vfmsbs_vsvvl", + "llvm.ve.vl.vfmsbs.vsvvmvl" => "__builtin_ve_vl_vfmsbs_vsvvmvl", + "llvm.ve.vl.vfmsbs.vsvvvl" => "__builtin_ve_vl_vfmsbs_vsvvvl", + "llvm.ve.vl.vfmsbs.vvsvl" => "__builtin_ve_vl_vfmsbs_vvsvl", + "llvm.ve.vl.vfmsbs.vvsvmvl" => "__builtin_ve_vl_vfmsbs_vvsvmvl", + "llvm.ve.vl.vfmsbs.vvsvvl" => "__builtin_ve_vl_vfmsbs_vvsvvl", + "llvm.ve.vl.vfmsbs.vvvvl" => "__builtin_ve_vl_vfmsbs_vvvvl", + "llvm.ve.vl.vfmsbs.vvvvmvl" => "__builtin_ve_vl_vfmsbs_vvvvmvl", + "llvm.ve.vl.vfmsbs.vvvvvl" => "__builtin_ve_vl_vfmsbs_vvvvvl", + "llvm.ve.vl.vfmuld.vsvl" => "__builtin_ve_vl_vfmuld_vsvl", + "llvm.ve.vl.vfmuld.vsvmvl" => "__builtin_ve_vl_vfmuld_vsvmvl", + "llvm.ve.vl.vfmuld.vsvvl" => "__builtin_ve_vl_vfmuld_vsvvl", + "llvm.ve.vl.vfmuld.vvvl" => "__builtin_ve_vl_vfmuld_vvvl", + "llvm.ve.vl.vfmuld.vvvmvl" => "__builtin_ve_vl_vfmuld_vvvmvl", + "llvm.ve.vl.vfmuld.vvvvl" => "__builtin_ve_vl_vfmuld_vvvvl", + "llvm.ve.vl.vfmuls.vsvl" => "__builtin_ve_vl_vfmuls_vsvl", + "llvm.ve.vl.vfmuls.vsvmvl" => "__builtin_ve_vl_vfmuls_vsvmvl", + "llvm.ve.vl.vfmuls.vsvvl" => "__builtin_ve_vl_vfmuls_vsvvl", + "llvm.ve.vl.vfmuls.vvvl" => "__builtin_ve_vl_vfmuls_vvvl", + "llvm.ve.vl.vfmuls.vvvmvl" => "__builtin_ve_vl_vfmuls_vvvmvl", + "llvm.ve.vl.vfmuls.vvvvl" => "__builtin_ve_vl_vfmuls_vvvvl", + "llvm.ve.vl.vfnmadd.vsvvl" => "__builtin_ve_vl_vfnmadd_vsvvl", + "llvm.ve.vl.vfnmadd.vsvvmvl" => "__builtin_ve_vl_vfnmadd_vsvvmvl", + "llvm.ve.vl.vfnmadd.vsvvvl" => "__builtin_ve_vl_vfnmadd_vsvvvl", + "llvm.ve.vl.vfnmadd.vvsvl" => "__builtin_ve_vl_vfnmadd_vvsvl", + "llvm.ve.vl.vfnmadd.vvsvmvl" => "__builtin_ve_vl_vfnmadd_vvsvmvl", + "llvm.ve.vl.vfnmadd.vvsvvl" => "__builtin_ve_vl_vfnmadd_vvsvvl", + "llvm.ve.vl.vfnmadd.vvvvl" => "__builtin_ve_vl_vfnmadd_vvvvl", + "llvm.ve.vl.vfnmadd.vvvvmvl" => "__builtin_ve_vl_vfnmadd_vvvvmvl", + "llvm.ve.vl.vfnmadd.vvvvvl" => "__builtin_ve_vl_vfnmadd_vvvvvl", + "llvm.ve.vl.vfnmads.vsvvl" => "__builtin_ve_vl_vfnmads_vsvvl", + "llvm.ve.vl.vfnmads.vsvvmvl" => "__builtin_ve_vl_vfnmads_vsvvmvl", + "llvm.ve.vl.vfnmads.vsvvvl" => "__builtin_ve_vl_vfnmads_vsvvvl", + "llvm.ve.vl.vfnmads.vvsvl" => "__builtin_ve_vl_vfnmads_vvsvl", + "llvm.ve.vl.vfnmads.vvsvmvl" => "__builtin_ve_vl_vfnmads_vvsvmvl", + "llvm.ve.vl.vfnmads.vvsvvl" => "__builtin_ve_vl_vfnmads_vvsvvl", + "llvm.ve.vl.vfnmads.vvvvl" => "__builtin_ve_vl_vfnmads_vvvvl", + "llvm.ve.vl.vfnmads.vvvvmvl" => "__builtin_ve_vl_vfnmads_vvvvmvl", + "llvm.ve.vl.vfnmads.vvvvvl" => "__builtin_ve_vl_vfnmads_vvvvvl", + "llvm.ve.vl.vfnmsbd.vsvvl" => "__builtin_ve_vl_vfnmsbd_vsvvl", + "llvm.ve.vl.vfnmsbd.vsvvmvl" => "__builtin_ve_vl_vfnmsbd_vsvvmvl", + "llvm.ve.vl.vfnmsbd.vsvvvl" => "__builtin_ve_vl_vfnmsbd_vsvvvl", + "llvm.ve.vl.vfnmsbd.vvsvl" => "__builtin_ve_vl_vfnmsbd_vvsvl", + "llvm.ve.vl.vfnmsbd.vvsvmvl" => "__builtin_ve_vl_vfnmsbd_vvsvmvl", + "llvm.ve.vl.vfnmsbd.vvsvvl" => "__builtin_ve_vl_vfnmsbd_vvsvvl", + "llvm.ve.vl.vfnmsbd.vvvvl" => "__builtin_ve_vl_vfnmsbd_vvvvl", + "llvm.ve.vl.vfnmsbd.vvvvmvl" => "__builtin_ve_vl_vfnmsbd_vvvvmvl", + "llvm.ve.vl.vfnmsbd.vvvvvl" => "__builtin_ve_vl_vfnmsbd_vvvvvl", + "llvm.ve.vl.vfnmsbs.vsvvl" => "__builtin_ve_vl_vfnmsbs_vsvvl", + "llvm.ve.vl.vfnmsbs.vsvvmvl" => "__builtin_ve_vl_vfnmsbs_vsvvmvl", + "llvm.ve.vl.vfnmsbs.vsvvvl" => "__builtin_ve_vl_vfnmsbs_vsvvvl", + "llvm.ve.vl.vfnmsbs.vvsvl" => "__builtin_ve_vl_vfnmsbs_vvsvl", + "llvm.ve.vl.vfnmsbs.vvsvmvl" => "__builtin_ve_vl_vfnmsbs_vvsvmvl", + "llvm.ve.vl.vfnmsbs.vvsvvl" => "__builtin_ve_vl_vfnmsbs_vvsvvl", + "llvm.ve.vl.vfnmsbs.vvvvl" => "__builtin_ve_vl_vfnmsbs_vvvvl", + "llvm.ve.vl.vfnmsbs.vvvvmvl" => "__builtin_ve_vl_vfnmsbs_vvvvmvl", + "llvm.ve.vl.vfnmsbs.vvvvvl" => "__builtin_ve_vl_vfnmsbs_vvvvvl", + "llvm.ve.vl.vfrmaxdfst.vvl" => "__builtin_ve_vl_vfrmaxdfst_vvl", + "llvm.ve.vl.vfrmaxdfst.vvvl" => "__builtin_ve_vl_vfrmaxdfst_vvvl", + "llvm.ve.vl.vfrmaxdlst.vvl" => "__builtin_ve_vl_vfrmaxdlst_vvl", + "llvm.ve.vl.vfrmaxdlst.vvvl" => "__builtin_ve_vl_vfrmaxdlst_vvvl", + "llvm.ve.vl.vfrmaxsfst.vvl" => "__builtin_ve_vl_vfrmaxsfst_vvl", + "llvm.ve.vl.vfrmaxsfst.vvvl" => "__builtin_ve_vl_vfrmaxsfst_vvvl", + "llvm.ve.vl.vfrmaxslst.vvl" => "__builtin_ve_vl_vfrmaxslst_vvl", + "llvm.ve.vl.vfrmaxslst.vvvl" => "__builtin_ve_vl_vfrmaxslst_vvvl", + "llvm.ve.vl.vfrmindfst.vvl" => "__builtin_ve_vl_vfrmindfst_vvl", + "llvm.ve.vl.vfrmindfst.vvvl" => "__builtin_ve_vl_vfrmindfst_vvvl", + "llvm.ve.vl.vfrmindlst.vvl" => "__builtin_ve_vl_vfrmindlst_vvl", + "llvm.ve.vl.vfrmindlst.vvvl" => "__builtin_ve_vl_vfrmindlst_vvvl", + "llvm.ve.vl.vfrminsfst.vvl" => "__builtin_ve_vl_vfrminsfst_vvl", + "llvm.ve.vl.vfrminsfst.vvvl" => "__builtin_ve_vl_vfrminsfst_vvvl", + "llvm.ve.vl.vfrminslst.vvl" => "__builtin_ve_vl_vfrminslst_vvl", + "llvm.ve.vl.vfrminslst.vvvl" => "__builtin_ve_vl_vfrminslst_vvvl", + "llvm.ve.vl.vfsqrtd.vvl" => "__builtin_ve_vl_vfsqrtd_vvl", + "llvm.ve.vl.vfsqrtd.vvvl" => "__builtin_ve_vl_vfsqrtd_vvvl", + "llvm.ve.vl.vfsqrts.vvl" => "__builtin_ve_vl_vfsqrts_vvl", + "llvm.ve.vl.vfsqrts.vvvl" => "__builtin_ve_vl_vfsqrts_vvvl", + "llvm.ve.vl.vfsubd.vsvl" => "__builtin_ve_vl_vfsubd_vsvl", + "llvm.ve.vl.vfsubd.vsvmvl" => "__builtin_ve_vl_vfsubd_vsvmvl", + "llvm.ve.vl.vfsubd.vsvvl" => "__builtin_ve_vl_vfsubd_vsvvl", + "llvm.ve.vl.vfsubd.vvvl" => "__builtin_ve_vl_vfsubd_vvvl", + "llvm.ve.vl.vfsubd.vvvmvl" => "__builtin_ve_vl_vfsubd_vvvmvl", + "llvm.ve.vl.vfsubd.vvvvl" => "__builtin_ve_vl_vfsubd_vvvvl", + "llvm.ve.vl.vfsubs.vsvl" => "__builtin_ve_vl_vfsubs_vsvl", + "llvm.ve.vl.vfsubs.vsvmvl" => "__builtin_ve_vl_vfsubs_vsvmvl", + "llvm.ve.vl.vfsubs.vsvvl" => "__builtin_ve_vl_vfsubs_vsvvl", + "llvm.ve.vl.vfsubs.vvvl" => "__builtin_ve_vl_vfsubs_vvvl", + "llvm.ve.vl.vfsubs.vvvmvl" => "__builtin_ve_vl_vfsubs_vvvmvl", + "llvm.ve.vl.vfsubs.vvvvl" => "__builtin_ve_vl_vfsubs_vvvvl", + "llvm.ve.vl.vfsumd.vvl" => "__builtin_ve_vl_vfsumd_vvl", + "llvm.ve.vl.vfsumd.vvml" => "__builtin_ve_vl_vfsumd_vvml", + "llvm.ve.vl.vfsums.vvl" => "__builtin_ve_vl_vfsums_vvl", + "llvm.ve.vl.vfsums.vvml" => "__builtin_ve_vl_vfsums_vvml", + "llvm.ve.vl.vgt.vvssl" => "__builtin_ve_vl_vgt_vvssl", + "llvm.ve.vl.vgt.vvssml" => "__builtin_ve_vl_vgt_vvssml", + "llvm.ve.vl.vgt.vvssmvl" => "__builtin_ve_vl_vgt_vvssmvl", + "llvm.ve.vl.vgt.vvssvl" => "__builtin_ve_vl_vgt_vvssvl", + "llvm.ve.vl.vgtlsx.vvssl" => "__builtin_ve_vl_vgtlsx_vvssl", + "llvm.ve.vl.vgtlsx.vvssml" => "__builtin_ve_vl_vgtlsx_vvssml", + "llvm.ve.vl.vgtlsx.vvssmvl" => "__builtin_ve_vl_vgtlsx_vvssmvl", + "llvm.ve.vl.vgtlsx.vvssvl" => "__builtin_ve_vl_vgtlsx_vvssvl", + "llvm.ve.vl.vgtlsxnc.vvssl" => "__builtin_ve_vl_vgtlsxnc_vvssl", + "llvm.ve.vl.vgtlsxnc.vvssml" => "__builtin_ve_vl_vgtlsxnc_vvssml", + "llvm.ve.vl.vgtlsxnc.vvssmvl" => "__builtin_ve_vl_vgtlsxnc_vvssmvl", + "llvm.ve.vl.vgtlsxnc.vvssvl" => "__builtin_ve_vl_vgtlsxnc_vvssvl", + "llvm.ve.vl.vgtlzx.vvssl" => "__builtin_ve_vl_vgtlzx_vvssl", + "llvm.ve.vl.vgtlzx.vvssml" => "__builtin_ve_vl_vgtlzx_vvssml", + "llvm.ve.vl.vgtlzx.vvssmvl" => "__builtin_ve_vl_vgtlzx_vvssmvl", + "llvm.ve.vl.vgtlzx.vvssvl" => "__builtin_ve_vl_vgtlzx_vvssvl", + "llvm.ve.vl.vgtlzxnc.vvssl" => "__builtin_ve_vl_vgtlzxnc_vvssl", + "llvm.ve.vl.vgtlzxnc.vvssml" => "__builtin_ve_vl_vgtlzxnc_vvssml", + "llvm.ve.vl.vgtlzxnc.vvssmvl" => "__builtin_ve_vl_vgtlzxnc_vvssmvl", + "llvm.ve.vl.vgtlzxnc.vvssvl" => "__builtin_ve_vl_vgtlzxnc_vvssvl", + "llvm.ve.vl.vgtnc.vvssl" => "__builtin_ve_vl_vgtnc_vvssl", + "llvm.ve.vl.vgtnc.vvssml" => "__builtin_ve_vl_vgtnc_vvssml", + "llvm.ve.vl.vgtnc.vvssmvl" => "__builtin_ve_vl_vgtnc_vvssmvl", + "llvm.ve.vl.vgtnc.vvssvl" => "__builtin_ve_vl_vgtnc_vvssvl", + "llvm.ve.vl.vgtu.vvssl" => "__builtin_ve_vl_vgtu_vvssl", + "llvm.ve.vl.vgtu.vvssml" => "__builtin_ve_vl_vgtu_vvssml", + "llvm.ve.vl.vgtu.vvssmvl" => "__builtin_ve_vl_vgtu_vvssmvl", + "llvm.ve.vl.vgtu.vvssvl" => "__builtin_ve_vl_vgtu_vvssvl", + "llvm.ve.vl.vgtunc.vvssl" => "__builtin_ve_vl_vgtunc_vvssl", + "llvm.ve.vl.vgtunc.vvssml" => "__builtin_ve_vl_vgtunc_vvssml", + "llvm.ve.vl.vgtunc.vvssmvl" => "__builtin_ve_vl_vgtunc_vvssmvl", + "llvm.ve.vl.vgtunc.vvssvl" => "__builtin_ve_vl_vgtunc_vvssvl", + "llvm.ve.vl.vld.vssl" => "__builtin_ve_vl_vld_vssl", + "llvm.ve.vl.vld.vssvl" => "__builtin_ve_vl_vld_vssvl", + "llvm.ve.vl.vld2d.vssl" => "__builtin_ve_vl_vld2d_vssl", + "llvm.ve.vl.vld2d.vssvl" => "__builtin_ve_vl_vld2d_vssvl", + "llvm.ve.vl.vld2dnc.vssl" => "__builtin_ve_vl_vld2dnc_vssl", + "llvm.ve.vl.vld2dnc.vssvl" => "__builtin_ve_vl_vld2dnc_vssvl", + "llvm.ve.vl.vldl2dsx.vssl" => "__builtin_ve_vl_vldl2dsx_vssl", + "llvm.ve.vl.vldl2dsx.vssvl" => "__builtin_ve_vl_vldl2dsx_vssvl", + "llvm.ve.vl.vldl2dsxnc.vssl" => "__builtin_ve_vl_vldl2dsxnc_vssl", + "llvm.ve.vl.vldl2dsxnc.vssvl" => "__builtin_ve_vl_vldl2dsxnc_vssvl", + "llvm.ve.vl.vldl2dzx.vssl" => "__builtin_ve_vl_vldl2dzx_vssl", + "llvm.ve.vl.vldl2dzx.vssvl" => "__builtin_ve_vl_vldl2dzx_vssvl", + "llvm.ve.vl.vldl2dzxnc.vssl" => "__builtin_ve_vl_vldl2dzxnc_vssl", + "llvm.ve.vl.vldl2dzxnc.vssvl" => "__builtin_ve_vl_vldl2dzxnc_vssvl", + "llvm.ve.vl.vldlsx.vssl" => "__builtin_ve_vl_vldlsx_vssl", + "llvm.ve.vl.vldlsx.vssvl" => "__builtin_ve_vl_vldlsx_vssvl", + "llvm.ve.vl.vldlsxnc.vssl" => "__builtin_ve_vl_vldlsxnc_vssl", + "llvm.ve.vl.vldlsxnc.vssvl" => "__builtin_ve_vl_vldlsxnc_vssvl", + "llvm.ve.vl.vldlzx.vssl" => "__builtin_ve_vl_vldlzx_vssl", + "llvm.ve.vl.vldlzx.vssvl" => "__builtin_ve_vl_vldlzx_vssvl", + "llvm.ve.vl.vldlzxnc.vssl" => "__builtin_ve_vl_vldlzxnc_vssl", + "llvm.ve.vl.vldlzxnc.vssvl" => "__builtin_ve_vl_vldlzxnc_vssvl", + "llvm.ve.vl.vldnc.vssl" => "__builtin_ve_vl_vldnc_vssl", + "llvm.ve.vl.vldnc.vssvl" => "__builtin_ve_vl_vldnc_vssvl", + "llvm.ve.vl.vldu.vssl" => "__builtin_ve_vl_vldu_vssl", + "llvm.ve.vl.vldu.vssvl" => "__builtin_ve_vl_vldu_vssvl", + "llvm.ve.vl.vldu2d.vssl" => "__builtin_ve_vl_vldu2d_vssl", + "llvm.ve.vl.vldu2d.vssvl" => "__builtin_ve_vl_vldu2d_vssvl", + "llvm.ve.vl.vldu2dnc.vssl" => "__builtin_ve_vl_vldu2dnc_vssl", + "llvm.ve.vl.vldu2dnc.vssvl" => "__builtin_ve_vl_vldu2dnc_vssvl", + "llvm.ve.vl.vldunc.vssl" => "__builtin_ve_vl_vldunc_vssl", + "llvm.ve.vl.vldunc.vssvl" => "__builtin_ve_vl_vldunc_vssvl", + "llvm.ve.vl.vldz.vvl" => "__builtin_ve_vl_vldz_vvl", + "llvm.ve.vl.vldz.vvmvl" => "__builtin_ve_vl_vldz_vvmvl", + "llvm.ve.vl.vldz.vvvl" => "__builtin_ve_vl_vldz_vvvl", + "llvm.ve.vl.vmaxsl.vsvl" => "__builtin_ve_vl_vmaxsl_vsvl", + "llvm.ve.vl.vmaxsl.vsvmvl" => "__builtin_ve_vl_vmaxsl_vsvmvl", + "llvm.ve.vl.vmaxsl.vsvvl" => "__builtin_ve_vl_vmaxsl_vsvvl", + "llvm.ve.vl.vmaxsl.vvvl" => "__builtin_ve_vl_vmaxsl_vvvl", + "llvm.ve.vl.vmaxsl.vvvmvl" => "__builtin_ve_vl_vmaxsl_vvvmvl", + "llvm.ve.vl.vmaxsl.vvvvl" => "__builtin_ve_vl_vmaxsl_vvvvl", + "llvm.ve.vl.vmaxswsx.vsvl" => "__builtin_ve_vl_vmaxswsx_vsvl", + "llvm.ve.vl.vmaxswsx.vsvmvl" => "__builtin_ve_vl_vmaxswsx_vsvmvl", + "llvm.ve.vl.vmaxswsx.vsvvl" => "__builtin_ve_vl_vmaxswsx_vsvvl", + "llvm.ve.vl.vmaxswsx.vvvl" => "__builtin_ve_vl_vmaxswsx_vvvl", + "llvm.ve.vl.vmaxswsx.vvvmvl" => "__builtin_ve_vl_vmaxswsx_vvvmvl", + "llvm.ve.vl.vmaxswsx.vvvvl" => "__builtin_ve_vl_vmaxswsx_vvvvl", + "llvm.ve.vl.vmaxswzx.vsvl" => "__builtin_ve_vl_vmaxswzx_vsvl", + "llvm.ve.vl.vmaxswzx.vsvmvl" => "__builtin_ve_vl_vmaxswzx_vsvmvl", + "llvm.ve.vl.vmaxswzx.vsvvl" => "__builtin_ve_vl_vmaxswzx_vsvvl", + "llvm.ve.vl.vmaxswzx.vvvl" => "__builtin_ve_vl_vmaxswzx_vvvl", + "llvm.ve.vl.vmaxswzx.vvvmvl" => "__builtin_ve_vl_vmaxswzx_vvvmvl", + "llvm.ve.vl.vmaxswzx.vvvvl" => "__builtin_ve_vl_vmaxswzx_vvvvl", + "llvm.ve.vl.vminsl.vsvl" => "__builtin_ve_vl_vminsl_vsvl", + "llvm.ve.vl.vminsl.vsvmvl" => "__builtin_ve_vl_vminsl_vsvmvl", + "llvm.ve.vl.vminsl.vsvvl" => "__builtin_ve_vl_vminsl_vsvvl", + "llvm.ve.vl.vminsl.vvvl" => "__builtin_ve_vl_vminsl_vvvl", + "llvm.ve.vl.vminsl.vvvmvl" => "__builtin_ve_vl_vminsl_vvvmvl", + "llvm.ve.vl.vminsl.vvvvl" => "__builtin_ve_vl_vminsl_vvvvl", + "llvm.ve.vl.vminswsx.vsvl" => "__builtin_ve_vl_vminswsx_vsvl", + "llvm.ve.vl.vminswsx.vsvmvl" => "__builtin_ve_vl_vminswsx_vsvmvl", + "llvm.ve.vl.vminswsx.vsvvl" => "__builtin_ve_vl_vminswsx_vsvvl", + "llvm.ve.vl.vminswsx.vvvl" => "__builtin_ve_vl_vminswsx_vvvl", + "llvm.ve.vl.vminswsx.vvvmvl" => "__builtin_ve_vl_vminswsx_vvvmvl", + "llvm.ve.vl.vminswsx.vvvvl" => "__builtin_ve_vl_vminswsx_vvvvl", + "llvm.ve.vl.vminswzx.vsvl" => "__builtin_ve_vl_vminswzx_vsvl", + "llvm.ve.vl.vminswzx.vsvmvl" => "__builtin_ve_vl_vminswzx_vsvmvl", + "llvm.ve.vl.vminswzx.vsvvl" => "__builtin_ve_vl_vminswzx_vsvvl", + "llvm.ve.vl.vminswzx.vvvl" => "__builtin_ve_vl_vminswzx_vvvl", + "llvm.ve.vl.vminswzx.vvvmvl" => "__builtin_ve_vl_vminswzx_vvvmvl", + "llvm.ve.vl.vminswzx.vvvvl" => "__builtin_ve_vl_vminswzx_vvvvl", + "llvm.ve.vl.vmrg.vsvml" => "__builtin_ve_vl_vmrg_vsvml", + "llvm.ve.vl.vmrg.vsvmvl" => "__builtin_ve_vl_vmrg_vsvmvl", + "llvm.ve.vl.vmrg.vvvml" => "__builtin_ve_vl_vmrg_vvvml", + "llvm.ve.vl.vmrg.vvvmvl" => "__builtin_ve_vl_vmrg_vvvmvl", + "llvm.ve.vl.vmrgw.vsvMl" => "__builtin_ve_vl_vmrgw_vsvMl", + "llvm.ve.vl.vmrgw.vsvMvl" => "__builtin_ve_vl_vmrgw_vsvMvl", + "llvm.ve.vl.vmrgw.vvvMl" => "__builtin_ve_vl_vmrgw_vvvMl", + "llvm.ve.vl.vmrgw.vvvMvl" => "__builtin_ve_vl_vmrgw_vvvMvl", + "llvm.ve.vl.vmulsl.vsvl" => "__builtin_ve_vl_vmulsl_vsvl", + "llvm.ve.vl.vmulsl.vsvmvl" => "__builtin_ve_vl_vmulsl_vsvmvl", + "llvm.ve.vl.vmulsl.vsvvl" => "__builtin_ve_vl_vmulsl_vsvvl", + "llvm.ve.vl.vmulsl.vvvl" => "__builtin_ve_vl_vmulsl_vvvl", + "llvm.ve.vl.vmulsl.vvvmvl" => "__builtin_ve_vl_vmulsl_vvvmvl", + "llvm.ve.vl.vmulsl.vvvvl" => "__builtin_ve_vl_vmulsl_vvvvl", + "llvm.ve.vl.vmulslw.vsvl" => "__builtin_ve_vl_vmulslw_vsvl", + "llvm.ve.vl.vmulslw.vsvvl" => "__builtin_ve_vl_vmulslw_vsvvl", + "llvm.ve.vl.vmulslw.vvvl" => "__builtin_ve_vl_vmulslw_vvvl", + "llvm.ve.vl.vmulslw.vvvvl" => "__builtin_ve_vl_vmulslw_vvvvl", + "llvm.ve.vl.vmulswsx.vsvl" => "__builtin_ve_vl_vmulswsx_vsvl", + "llvm.ve.vl.vmulswsx.vsvmvl" => "__builtin_ve_vl_vmulswsx_vsvmvl", + "llvm.ve.vl.vmulswsx.vsvvl" => "__builtin_ve_vl_vmulswsx_vsvvl", + "llvm.ve.vl.vmulswsx.vvvl" => "__builtin_ve_vl_vmulswsx_vvvl", + "llvm.ve.vl.vmulswsx.vvvmvl" => "__builtin_ve_vl_vmulswsx_vvvmvl", + "llvm.ve.vl.vmulswsx.vvvvl" => "__builtin_ve_vl_vmulswsx_vvvvl", + "llvm.ve.vl.vmulswzx.vsvl" => "__builtin_ve_vl_vmulswzx_vsvl", + "llvm.ve.vl.vmulswzx.vsvmvl" => "__builtin_ve_vl_vmulswzx_vsvmvl", + "llvm.ve.vl.vmulswzx.vsvvl" => "__builtin_ve_vl_vmulswzx_vsvvl", + "llvm.ve.vl.vmulswzx.vvvl" => "__builtin_ve_vl_vmulswzx_vvvl", + "llvm.ve.vl.vmulswzx.vvvmvl" => "__builtin_ve_vl_vmulswzx_vvvmvl", + "llvm.ve.vl.vmulswzx.vvvvl" => "__builtin_ve_vl_vmulswzx_vvvvl", + "llvm.ve.vl.vmulul.vsvl" => "__builtin_ve_vl_vmulul_vsvl", + "llvm.ve.vl.vmulul.vsvmvl" => "__builtin_ve_vl_vmulul_vsvmvl", + "llvm.ve.vl.vmulul.vsvvl" => "__builtin_ve_vl_vmulul_vsvvl", + "llvm.ve.vl.vmulul.vvvl" => "__builtin_ve_vl_vmulul_vvvl", + "llvm.ve.vl.vmulul.vvvmvl" => "__builtin_ve_vl_vmulul_vvvmvl", + "llvm.ve.vl.vmulul.vvvvl" => "__builtin_ve_vl_vmulul_vvvvl", + "llvm.ve.vl.vmuluw.vsvl" => "__builtin_ve_vl_vmuluw_vsvl", + "llvm.ve.vl.vmuluw.vsvmvl" => "__builtin_ve_vl_vmuluw_vsvmvl", + "llvm.ve.vl.vmuluw.vsvvl" => "__builtin_ve_vl_vmuluw_vsvvl", + "llvm.ve.vl.vmuluw.vvvl" => "__builtin_ve_vl_vmuluw_vvvl", + "llvm.ve.vl.vmuluw.vvvmvl" => "__builtin_ve_vl_vmuluw_vvvmvl", + "llvm.ve.vl.vmuluw.vvvvl" => "__builtin_ve_vl_vmuluw_vvvvl", + "llvm.ve.vl.vmv.vsvl" => "__builtin_ve_vl_vmv_vsvl", + "llvm.ve.vl.vmv.vsvmvl" => "__builtin_ve_vl_vmv_vsvmvl", + "llvm.ve.vl.vmv.vsvvl" => "__builtin_ve_vl_vmv_vsvvl", + "llvm.ve.vl.vor.vsvl" => "__builtin_ve_vl_vor_vsvl", + "llvm.ve.vl.vor.vsvmvl" => "__builtin_ve_vl_vor_vsvmvl", + "llvm.ve.vl.vor.vsvvl" => "__builtin_ve_vl_vor_vsvvl", + "llvm.ve.vl.vor.vvvl" => "__builtin_ve_vl_vor_vvvl", + "llvm.ve.vl.vor.vvvmvl" => "__builtin_ve_vl_vor_vvvmvl", + "llvm.ve.vl.vor.vvvvl" => "__builtin_ve_vl_vor_vvvvl", + "llvm.ve.vl.vpcnt.vvl" => "__builtin_ve_vl_vpcnt_vvl", + "llvm.ve.vl.vpcnt.vvmvl" => "__builtin_ve_vl_vpcnt_vvmvl", + "llvm.ve.vl.vpcnt.vvvl" => "__builtin_ve_vl_vpcnt_vvvl", + "llvm.ve.vl.vrand.vvl" => "__builtin_ve_vl_vrand_vvl", + "llvm.ve.vl.vrand.vvml" => "__builtin_ve_vl_vrand_vvml", + "llvm.ve.vl.vrcpd.vvl" => "__builtin_ve_vl_vrcpd_vvl", + "llvm.ve.vl.vrcpd.vvvl" => "__builtin_ve_vl_vrcpd_vvvl", + "llvm.ve.vl.vrcps.vvl" => "__builtin_ve_vl_vrcps_vvl", + "llvm.ve.vl.vrcps.vvvl" => "__builtin_ve_vl_vrcps_vvvl", + "llvm.ve.vl.vrmaxslfst.vvl" => "__builtin_ve_vl_vrmaxslfst_vvl", + "llvm.ve.vl.vrmaxslfst.vvvl" => "__builtin_ve_vl_vrmaxslfst_vvvl", + "llvm.ve.vl.vrmaxsllst.vvl" => "__builtin_ve_vl_vrmaxsllst_vvl", + "llvm.ve.vl.vrmaxsllst.vvvl" => "__builtin_ve_vl_vrmaxsllst_vvvl", + "llvm.ve.vl.vrmaxswfstsx.vvl" => "__builtin_ve_vl_vrmaxswfstsx_vvl", + "llvm.ve.vl.vrmaxswfstsx.vvvl" => "__builtin_ve_vl_vrmaxswfstsx_vvvl", + "llvm.ve.vl.vrmaxswfstzx.vvl" => "__builtin_ve_vl_vrmaxswfstzx_vvl", + "llvm.ve.vl.vrmaxswfstzx.vvvl" => "__builtin_ve_vl_vrmaxswfstzx_vvvl", + "llvm.ve.vl.vrmaxswlstsx.vvl" => "__builtin_ve_vl_vrmaxswlstsx_vvl", + "llvm.ve.vl.vrmaxswlstsx.vvvl" => "__builtin_ve_vl_vrmaxswlstsx_vvvl", + "llvm.ve.vl.vrmaxswlstzx.vvl" => "__builtin_ve_vl_vrmaxswlstzx_vvl", + "llvm.ve.vl.vrmaxswlstzx.vvvl" => "__builtin_ve_vl_vrmaxswlstzx_vvvl", + "llvm.ve.vl.vrminslfst.vvl" => "__builtin_ve_vl_vrminslfst_vvl", + "llvm.ve.vl.vrminslfst.vvvl" => "__builtin_ve_vl_vrminslfst_vvvl", + "llvm.ve.vl.vrminsllst.vvl" => "__builtin_ve_vl_vrminsllst_vvl", + "llvm.ve.vl.vrminsllst.vvvl" => "__builtin_ve_vl_vrminsllst_vvvl", + "llvm.ve.vl.vrminswfstsx.vvl" => "__builtin_ve_vl_vrminswfstsx_vvl", + "llvm.ve.vl.vrminswfstsx.vvvl" => "__builtin_ve_vl_vrminswfstsx_vvvl", + "llvm.ve.vl.vrminswfstzx.vvl" => "__builtin_ve_vl_vrminswfstzx_vvl", + "llvm.ve.vl.vrminswfstzx.vvvl" => "__builtin_ve_vl_vrminswfstzx_vvvl", + "llvm.ve.vl.vrminswlstsx.vvl" => "__builtin_ve_vl_vrminswlstsx_vvl", + "llvm.ve.vl.vrminswlstsx.vvvl" => "__builtin_ve_vl_vrminswlstsx_vvvl", + "llvm.ve.vl.vrminswlstzx.vvl" => "__builtin_ve_vl_vrminswlstzx_vvl", + "llvm.ve.vl.vrminswlstzx.vvvl" => "__builtin_ve_vl_vrminswlstzx_vvvl", + "llvm.ve.vl.vror.vvl" => "__builtin_ve_vl_vror_vvl", + "llvm.ve.vl.vror.vvml" => "__builtin_ve_vl_vror_vvml", + "llvm.ve.vl.vrsqrtd.vvl" => "__builtin_ve_vl_vrsqrtd_vvl", + "llvm.ve.vl.vrsqrtd.vvvl" => "__builtin_ve_vl_vrsqrtd_vvvl", + "llvm.ve.vl.vrsqrtdnex.vvl" => "__builtin_ve_vl_vrsqrtdnex_vvl", + "llvm.ve.vl.vrsqrtdnex.vvvl" => "__builtin_ve_vl_vrsqrtdnex_vvvl", + "llvm.ve.vl.vrsqrts.vvl" => "__builtin_ve_vl_vrsqrts_vvl", + "llvm.ve.vl.vrsqrts.vvvl" => "__builtin_ve_vl_vrsqrts_vvvl", + "llvm.ve.vl.vrsqrtsnex.vvl" => "__builtin_ve_vl_vrsqrtsnex_vvl", + "llvm.ve.vl.vrsqrtsnex.vvvl" => "__builtin_ve_vl_vrsqrtsnex_vvvl", + "llvm.ve.vl.vrxor.vvl" => "__builtin_ve_vl_vrxor_vvl", + "llvm.ve.vl.vrxor.vvml" => "__builtin_ve_vl_vrxor_vvml", + "llvm.ve.vl.vsc.vvssl" => "__builtin_ve_vl_vsc_vvssl", + "llvm.ve.vl.vsc.vvssml" => "__builtin_ve_vl_vsc_vvssml", + "llvm.ve.vl.vscl.vvssl" => "__builtin_ve_vl_vscl_vvssl", + "llvm.ve.vl.vscl.vvssml" => "__builtin_ve_vl_vscl_vvssml", + "llvm.ve.vl.vsclnc.vvssl" => "__builtin_ve_vl_vsclnc_vvssl", + "llvm.ve.vl.vsclnc.vvssml" => "__builtin_ve_vl_vsclnc_vvssml", + "llvm.ve.vl.vsclncot.vvssl" => "__builtin_ve_vl_vsclncot_vvssl", + "llvm.ve.vl.vsclncot.vvssml" => "__builtin_ve_vl_vsclncot_vvssml", + "llvm.ve.vl.vsclot.vvssl" => "__builtin_ve_vl_vsclot_vvssl", + "llvm.ve.vl.vsclot.vvssml" => "__builtin_ve_vl_vsclot_vvssml", + "llvm.ve.vl.vscnc.vvssl" => "__builtin_ve_vl_vscnc_vvssl", + "llvm.ve.vl.vscnc.vvssml" => "__builtin_ve_vl_vscnc_vvssml", + "llvm.ve.vl.vscncot.vvssl" => "__builtin_ve_vl_vscncot_vvssl", + "llvm.ve.vl.vscncot.vvssml" => "__builtin_ve_vl_vscncot_vvssml", + "llvm.ve.vl.vscot.vvssl" => "__builtin_ve_vl_vscot_vvssl", + "llvm.ve.vl.vscot.vvssml" => "__builtin_ve_vl_vscot_vvssml", + "llvm.ve.vl.vscu.vvssl" => "__builtin_ve_vl_vscu_vvssl", + "llvm.ve.vl.vscu.vvssml" => "__builtin_ve_vl_vscu_vvssml", + "llvm.ve.vl.vscunc.vvssl" => "__builtin_ve_vl_vscunc_vvssl", + "llvm.ve.vl.vscunc.vvssml" => "__builtin_ve_vl_vscunc_vvssml", + "llvm.ve.vl.vscuncot.vvssl" => "__builtin_ve_vl_vscuncot_vvssl", + "llvm.ve.vl.vscuncot.vvssml" => "__builtin_ve_vl_vscuncot_vvssml", + "llvm.ve.vl.vscuot.vvssl" => "__builtin_ve_vl_vscuot_vvssl", + "llvm.ve.vl.vscuot.vvssml" => "__builtin_ve_vl_vscuot_vvssml", + "llvm.ve.vl.vseq.vl" => "__builtin_ve_vl_vseq_vl", + "llvm.ve.vl.vseq.vvl" => "__builtin_ve_vl_vseq_vvl", + "llvm.ve.vl.vsfa.vvssl" => "__builtin_ve_vl_vsfa_vvssl", + "llvm.ve.vl.vsfa.vvssmvl" => "__builtin_ve_vl_vsfa_vvssmvl", + "llvm.ve.vl.vsfa.vvssvl" => "__builtin_ve_vl_vsfa_vvssvl", + "llvm.ve.vl.vshf.vvvsl" => "__builtin_ve_vl_vshf_vvvsl", + "llvm.ve.vl.vshf.vvvsvl" => "__builtin_ve_vl_vshf_vvvsvl", + "llvm.ve.vl.vslal.vvsl" => "__builtin_ve_vl_vslal_vvsl", + "llvm.ve.vl.vslal.vvsmvl" => "__builtin_ve_vl_vslal_vvsmvl", + "llvm.ve.vl.vslal.vvsvl" => "__builtin_ve_vl_vslal_vvsvl", + "llvm.ve.vl.vslal.vvvl" => "__builtin_ve_vl_vslal_vvvl", + "llvm.ve.vl.vslal.vvvmvl" => "__builtin_ve_vl_vslal_vvvmvl", + "llvm.ve.vl.vslal.vvvvl" => "__builtin_ve_vl_vslal_vvvvl", + "llvm.ve.vl.vslawsx.vvsl" => "__builtin_ve_vl_vslawsx_vvsl", + "llvm.ve.vl.vslawsx.vvsmvl" => "__builtin_ve_vl_vslawsx_vvsmvl", + "llvm.ve.vl.vslawsx.vvsvl" => "__builtin_ve_vl_vslawsx_vvsvl", + "llvm.ve.vl.vslawsx.vvvl" => "__builtin_ve_vl_vslawsx_vvvl", + "llvm.ve.vl.vslawsx.vvvmvl" => "__builtin_ve_vl_vslawsx_vvvmvl", + "llvm.ve.vl.vslawsx.vvvvl" => "__builtin_ve_vl_vslawsx_vvvvl", + "llvm.ve.vl.vslawzx.vvsl" => "__builtin_ve_vl_vslawzx_vvsl", + "llvm.ve.vl.vslawzx.vvsmvl" => "__builtin_ve_vl_vslawzx_vvsmvl", + "llvm.ve.vl.vslawzx.vvsvl" => "__builtin_ve_vl_vslawzx_vvsvl", + "llvm.ve.vl.vslawzx.vvvl" => "__builtin_ve_vl_vslawzx_vvvl", + "llvm.ve.vl.vslawzx.vvvmvl" => "__builtin_ve_vl_vslawzx_vvvmvl", + "llvm.ve.vl.vslawzx.vvvvl" => "__builtin_ve_vl_vslawzx_vvvvl", + "llvm.ve.vl.vsll.vvsl" => "__builtin_ve_vl_vsll_vvsl", + "llvm.ve.vl.vsll.vvsmvl" => "__builtin_ve_vl_vsll_vvsmvl", + "llvm.ve.vl.vsll.vvsvl" => "__builtin_ve_vl_vsll_vvsvl", + "llvm.ve.vl.vsll.vvvl" => "__builtin_ve_vl_vsll_vvvl", + "llvm.ve.vl.vsll.vvvmvl" => "__builtin_ve_vl_vsll_vvvmvl", + "llvm.ve.vl.vsll.vvvvl" => "__builtin_ve_vl_vsll_vvvvl", + "llvm.ve.vl.vsral.vvsl" => "__builtin_ve_vl_vsral_vvsl", + "llvm.ve.vl.vsral.vvsmvl" => "__builtin_ve_vl_vsral_vvsmvl", + "llvm.ve.vl.vsral.vvsvl" => "__builtin_ve_vl_vsral_vvsvl", + "llvm.ve.vl.vsral.vvvl" => "__builtin_ve_vl_vsral_vvvl", + "llvm.ve.vl.vsral.vvvmvl" => "__builtin_ve_vl_vsral_vvvmvl", + "llvm.ve.vl.vsral.vvvvl" => "__builtin_ve_vl_vsral_vvvvl", + "llvm.ve.vl.vsrawsx.vvsl" => "__builtin_ve_vl_vsrawsx_vvsl", + "llvm.ve.vl.vsrawsx.vvsmvl" => "__builtin_ve_vl_vsrawsx_vvsmvl", + "llvm.ve.vl.vsrawsx.vvsvl" => "__builtin_ve_vl_vsrawsx_vvsvl", + "llvm.ve.vl.vsrawsx.vvvl" => "__builtin_ve_vl_vsrawsx_vvvl", + "llvm.ve.vl.vsrawsx.vvvmvl" => "__builtin_ve_vl_vsrawsx_vvvmvl", + "llvm.ve.vl.vsrawsx.vvvvl" => "__builtin_ve_vl_vsrawsx_vvvvl", + "llvm.ve.vl.vsrawzx.vvsl" => "__builtin_ve_vl_vsrawzx_vvsl", + "llvm.ve.vl.vsrawzx.vvsmvl" => "__builtin_ve_vl_vsrawzx_vvsmvl", + "llvm.ve.vl.vsrawzx.vvsvl" => "__builtin_ve_vl_vsrawzx_vvsvl", + "llvm.ve.vl.vsrawzx.vvvl" => "__builtin_ve_vl_vsrawzx_vvvl", + "llvm.ve.vl.vsrawzx.vvvmvl" => "__builtin_ve_vl_vsrawzx_vvvmvl", + "llvm.ve.vl.vsrawzx.vvvvl" => "__builtin_ve_vl_vsrawzx_vvvvl", + "llvm.ve.vl.vsrl.vvsl" => "__builtin_ve_vl_vsrl_vvsl", + "llvm.ve.vl.vsrl.vvsmvl" => "__builtin_ve_vl_vsrl_vvsmvl", + "llvm.ve.vl.vsrl.vvsvl" => "__builtin_ve_vl_vsrl_vvsvl", + "llvm.ve.vl.vsrl.vvvl" => "__builtin_ve_vl_vsrl_vvvl", + "llvm.ve.vl.vsrl.vvvmvl" => "__builtin_ve_vl_vsrl_vvvmvl", + "llvm.ve.vl.vsrl.vvvvl" => "__builtin_ve_vl_vsrl_vvvvl", + "llvm.ve.vl.vst.vssl" => "__builtin_ve_vl_vst_vssl", + "llvm.ve.vl.vst.vssml" => "__builtin_ve_vl_vst_vssml", + "llvm.ve.vl.vst2d.vssl" => "__builtin_ve_vl_vst2d_vssl", + "llvm.ve.vl.vst2d.vssml" => "__builtin_ve_vl_vst2d_vssml", + "llvm.ve.vl.vst2dnc.vssl" => "__builtin_ve_vl_vst2dnc_vssl", + "llvm.ve.vl.vst2dnc.vssml" => "__builtin_ve_vl_vst2dnc_vssml", + "llvm.ve.vl.vst2dncot.vssl" => "__builtin_ve_vl_vst2dncot_vssl", + "llvm.ve.vl.vst2dncot.vssml" => "__builtin_ve_vl_vst2dncot_vssml", + "llvm.ve.vl.vst2dot.vssl" => "__builtin_ve_vl_vst2dot_vssl", + "llvm.ve.vl.vst2dot.vssml" => "__builtin_ve_vl_vst2dot_vssml", + "llvm.ve.vl.vstl.vssl" => "__builtin_ve_vl_vstl_vssl", + "llvm.ve.vl.vstl.vssml" => "__builtin_ve_vl_vstl_vssml", + "llvm.ve.vl.vstl2d.vssl" => "__builtin_ve_vl_vstl2d_vssl", + "llvm.ve.vl.vstl2d.vssml" => "__builtin_ve_vl_vstl2d_vssml", + "llvm.ve.vl.vstl2dnc.vssl" => "__builtin_ve_vl_vstl2dnc_vssl", + "llvm.ve.vl.vstl2dnc.vssml" => "__builtin_ve_vl_vstl2dnc_vssml", + "llvm.ve.vl.vstl2dncot.vssl" => "__builtin_ve_vl_vstl2dncot_vssl", + "llvm.ve.vl.vstl2dncot.vssml" => "__builtin_ve_vl_vstl2dncot_vssml", + "llvm.ve.vl.vstl2dot.vssl" => "__builtin_ve_vl_vstl2dot_vssl", + "llvm.ve.vl.vstl2dot.vssml" => "__builtin_ve_vl_vstl2dot_vssml", + "llvm.ve.vl.vstlnc.vssl" => "__builtin_ve_vl_vstlnc_vssl", + "llvm.ve.vl.vstlnc.vssml" => "__builtin_ve_vl_vstlnc_vssml", + "llvm.ve.vl.vstlncot.vssl" => "__builtin_ve_vl_vstlncot_vssl", + "llvm.ve.vl.vstlncot.vssml" => "__builtin_ve_vl_vstlncot_vssml", + "llvm.ve.vl.vstlot.vssl" => "__builtin_ve_vl_vstlot_vssl", + "llvm.ve.vl.vstlot.vssml" => "__builtin_ve_vl_vstlot_vssml", + "llvm.ve.vl.vstnc.vssl" => "__builtin_ve_vl_vstnc_vssl", + "llvm.ve.vl.vstnc.vssml" => "__builtin_ve_vl_vstnc_vssml", + "llvm.ve.vl.vstncot.vssl" => "__builtin_ve_vl_vstncot_vssl", + "llvm.ve.vl.vstncot.vssml" => "__builtin_ve_vl_vstncot_vssml", + "llvm.ve.vl.vstot.vssl" => "__builtin_ve_vl_vstot_vssl", + "llvm.ve.vl.vstot.vssml" => "__builtin_ve_vl_vstot_vssml", + "llvm.ve.vl.vstu.vssl" => "__builtin_ve_vl_vstu_vssl", + "llvm.ve.vl.vstu.vssml" => "__builtin_ve_vl_vstu_vssml", + "llvm.ve.vl.vstu2d.vssl" => "__builtin_ve_vl_vstu2d_vssl", + "llvm.ve.vl.vstu2d.vssml" => "__builtin_ve_vl_vstu2d_vssml", + "llvm.ve.vl.vstu2dnc.vssl" => "__builtin_ve_vl_vstu2dnc_vssl", + "llvm.ve.vl.vstu2dnc.vssml" => "__builtin_ve_vl_vstu2dnc_vssml", + "llvm.ve.vl.vstu2dncot.vssl" => "__builtin_ve_vl_vstu2dncot_vssl", + "llvm.ve.vl.vstu2dncot.vssml" => "__builtin_ve_vl_vstu2dncot_vssml", + "llvm.ve.vl.vstu2dot.vssl" => "__builtin_ve_vl_vstu2dot_vssl", + "llvm.ve.vl.vstu2dot.vssml" => "__builtin_ve_vl_vstu2dot_vssml", + "llvm.ve.vl.vstunc.vssl" => "__builtin_ve_vl_vstunc_vssl", + "llvm.ve.vl.vstunc.vssml" => "__builtin_ve_vl_vstunc_vssml", + "llvm.ve.vl.vstuncot.vssl" => "__builtin_ve_vl_vstuncot_vssl", + "llvm.ve.vl.vstuncot.vssml" => "__builtin_ve_vl_vstuncot_vssml", + "llvm.ve.vl.vstuot.vssl" => "__builtin_ve_vl_vstuot_vssl", + "llvm.ve.vl.vstuot.vssml" => "__builtin_ve_vl_vstuot_vssml", + "llvm.ve.vl.vsubsl.vsvl" => "__builtin_ve_vl_vsubsl_vsvl", + "llvm.ve.vl.vsubsl.vsvmvl" => "__builtin_ve_vl_vsubsl_vsvmvl", + "llvm.ve.vl.vsubsl.vsvvl" => "__builtin_ve_vl_vsubsl_vsvvl", + "llvm.ve.vl.vsubsl.vvvl" => "__builtin_ve_vl_vsubsl_vvvl", + "llvm.ve.vl.vsubsl.vvvmvl" => "__builtin_ve_vl_vsubsl_vvvmvl", + "llvm.ve.vl.vsubsl.vvvvl" => "__builtin_ve_vl_vsubsl_vvvvl", + "llvm.ve.vl.vsubswsx.vsvl" => "__builtin_ve_vl_vsubswsx_vsvl", + "llvm.ve.vl.vsubswsx.vsvmvl" => "__builtin_ve_vl_vsubswsx_vsvmvl", + "llvm.ve.vl.vsubswsx.vsvvl" => "__builtin_ve_vl_vsubswsx_vsvvl", + "llvm.ve.vl.vsubswsx.vvvl" => "__builtin_ve_vl_vsubswsx_vvvl", + "llvm.ve.vl.vsubswsx.vvvmvl" => "__builtin_ve_vl_vsubswsx_vvvmvl", + "llvm.ve.vl.vsubswsx.vvvvl" => "__builtin_ve_vl_vsubswsx_vvvvl", + "llvm.ve.vl.vsubswzx.vsvl" => "__builtin_ve_vl_vsubswzx_vsvl", + "llvm.ve.vl.vsubswzx.vsvmvl" => "__builtin_ve_vl_vsubswzx_vsvmvl", + "llvm.ve.vl.vsubswzx.vsvvl" => "__builtin_ve_vl_vsubswzx_vsvvl", + "llvm.ve.vl.vsubswzx.vvvl" => "__builtin_ve_vl_vsubswzx_vvvl", + "llvm.ve.vl.vsubswzx.vvvmvl" => "__builtin_ve_vl_vsubswzx_vvvmvl", + "llvm.ve.vl.vsubswzx.vvvvl" => "__builtin_ve_vl_vsubswzx_vvvvl", + "llvm.ve.vl.vsubul.vsvl" => "__builtin_ve_vl_vsubul_vsvl", + "llvm.ve.vl.vsubul.vsvmvl" => "__builtin_ve_vl_vsubul_vsvmvl", + "llvm.ve.vl.vsubul.vsvvl" => "__builtin_ve_vl_vsubul_vsvvl", + "llvm.ve.vl.vsubul.vvvl" => "__builtin_ve_vl_vsubul_vvvl", + "llvm.ve.vl.vsubul.vvvmvl" => "__builtin_ve_vl_vsubul_vvvmvl", + "llvm.ve.vl.vsubul.vvvvl" => "__builtin_ve_vl_vsubul_vvvvl", + "llvm.ve.vl.vsubuw.vsvl" => "__builtin_ve_vl_vsubuw_vsvl", + "llvm.ve.vl.vsubuw.vsvmvl" => "__builtin_ve_vl_vsubuw_vsvmvl", + "llvm.ve.vl.vsubuw.vsvvl" => "__builtin_ve_vl_vsubuw_vsvvl", + "llvm.ve.vl.vsubuw.vvvl" => "__builtin_ve_vl_vsubuw_vvvl", + "llvm.ve.vl.vsubuw.vvvmvl" => "__builtin_ve_vl_vsubuw_vvvmvl", + "llvm.ve.vl.vsubuw.vvvvl" => "__builtin_ve_vl_vsubuw_vvvvl", + "llvm.ve.vl.vsuml.vvl" => "__builtin_ve_vl_vsuml_vvl", + "llvm.ve.vl.vsuml.vvml" => "__builtin_ve_vl_vsuml_vvml", + "llvm.ve.vl.vsumwsx.vvl" => "__builtin_ve_vl_vsumwsx_vvl", + "llvm.ve.vl.vsumwsx.vvml" => "__builtin_ve_vl_vsumwsx_vvml", + "llvm.ve.vl.vsumwzx.vvl" => "__builtin_ve_vl_vsumwzx_vvl", + "llvm.ve.vl.vsumwzx.vvml" => "__builtin_ve_vl_vsumwzx_vvml", + "llvm.ve.vl.vxor.vsvl" => "__builtin_ve_vl_vxor_vsvl", + "llvm.ve.vl.vxor.vsvmvl" => "__builtin_ve_vl_vxor_vsvmvl", + "llvm.ve.vl.vxor.vsvvl" => "__builtin_ve_vl_vxor_vsvvl", + "llvm.ve.vl.vxor.vvvl" => "__builtin_ve_vl_vxor_vvvl", + "llvm.ve.vl.vxor.vvvmvl" => "__builtin_ve_vl_vxor_vvvmvl", + "llvm.ve.vl.vxor.vvvvl" => "__builtin_ve_vl_vxor_vvvvl", + "llvm.ve.vl.xorm.MMM" => "__builtin_ve_vl_xorm_MMM", + "llvm.ve.vl.xorm.mmm" => "__builtin_ve_vl_xorm_mmm", // x86 "llvm.x86.3dnow.pavgusb" => "__builtin_ia32_pavgusb", "llvm.x86.3dnow.pf2id" => "__builtin_ia32_pf2id", @@ -3430,6 +5706,10 @@ match name { "llvm.x86.3dnowa.pfnacc" => "__builtin_ia32_pfnacc", "llvm.x86.3dnowa.pfpnacc" => "__builtin_ia32_pfpnacc", "llvm.x86.3dnowa.pi2fw" => "__builtin_ia32_pi2fw", + "llvm.x86.aadd32" => "__builtin_ia32_aadd32", + "llvm.x86.aadd64" => "__builtin_ia32_aadd64", + "llvm.x86.aand32" => "__builtin_ia32_aand32", + "llvm.x86.aand64" => "__builtin_ia32_aand64", "llvm.x86.addcarry.u32" => "__builtin_ia32_addcarry_u32", "llvm.x86.addcarry.u64" => "__builtin_ia32_addcarry_u64", "llvm.x86.addcarryx.u32" => "__builtin_ia32_addcarryx_u32", @@ -3448,6 +5728,8 @@ match name { "llvm.x86.aesni.aesenclast.512" => "__builtin_ia32_aesenclast512", "llvm.x86.aesni.aesimc" => "__builtin_ia32_aesimc128", "llvm.x86.aesni.aeskeygenassist" => "__builtin_ia32_aeskeygenassist128", + "llvm.x86.aor32" => "__builtin_ia32_aor32", + "llvm.x86.aor64" => "__builtin_ia32_aor64", "llvm.x86.avx.addsub.pd.256" => "__builtin_ia32_addsubpd256", "llvm.x86.avx.addsub.ps.256" => "__builtin_ia32_addsubps256", "llvm.x86.avx.blend.pd.256" => "__builtin_ia32_blendpd256", @@ -3660,6 +5942,18 @@ match name { "llvm.x86.avx2.vbroadcast.ss.ps.256" => "__builtin_ia32_vbroadcastss_ps256", "llvm.x86.avx2.vextracti128" => "__builtin_ia32_extract128i256", "llvm.x86.avx2.vinserti128" => "__builtin_ia32_insert128i256", + "llvm.x86.avx2.vpdpbssd.128" => "__builtin_ia32_vpdpbssd128", + "llvm.x86.avx2.vpdpbssd.256" => "__builtin_ia32_vpdpbssd256", + "llvm.x86.avx2.vpdpbssds.128" => "__builtin_ia32_vpdpbssds128", + "llvm.x86.avx2.vpdpbssds.256" => "__builtin_ia32_vpdpbssds256", + "llvm.x86.avx2.vpdpbsud.128" => "__builtin_ia32_vpdpbsud128", + "llvm.x86.avx2.vpdpbsud.256" => "__builtin_ia32_vpdpbsud256", + "llvm.x86.avx2.vpdpbsuds.128" => "__builtin_ia32_vpdpbsuds128", + "llvm.x86.avx2.vpdpbsuds.256" => "__builtin_ia32_vpdpbsuds256", + "llvm.x86.avx2.vpdpbuud.128" => "__builtin_ia32_vpdpbuud128", + "llvm.x86.avx2.vpdpbuud.256" => "__builtin_ia32_vpdpbuud256", + "llvm.x86.avx2.vpdpbuuds.128" => "__builtin_ia32_vpdpbuuds128", + "llvm.x86.avx2.vpdpbuuds.256" => "__builtin_ia32_vpdpbuuds256", "llvm.x86.avx2.vperm2i128" => "__builtin_ia32_permti256", "llvm.x86.avx512.add.pd.512" => "__builtin_ia32_addpd512", "llvm.x86.avx512.add.ps.512" => "__builtin_ia32_addps512", @@ -3779,8 +6073,8 @@ match name { "llvm.x86.avx512.mask.add.ps.128" => "__builtin_ia32_addps128_mask", "llvm.x86.avx512.mask.add.ps.256" => "__builtin_ia32_addps256_mask", "llvm.x86.avx512.mask.add.ps.512" => "__builtin_ia32_addps512_mask", - "llvm.x86.avx512.mask.add.sd.round" => "__builtin_ia32_addsd_round_mask", - "llvm.x86.avx512.mask.add.ss.round" => "__builtin_ia32_addss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.add.sd.round" => "__builtin_ia32_addsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.add.ss.round" => "__builtin_ia32_addss_round_mask", "llvm.x86.avx512.mask.and.pd.128" => "__builtin_ia32_andpd128_mask", "llvm.x86.avx512.mask.and.pd.256" => "__builtin_ia32_andpd256_mask", "llvm.x86.avx512.mask.and.pd.512" => "__builtin_ia32_andpd512_mask", @@ -3894,8 +6188,8 @@ match name { "llvm.x86.avx512.mask.cvtqq2ps.128" => "__builtin_ia32_cvtqq2ps128_mask", "llvm.x86.avx512.mask.cvtqq2ps.256" => "__builtin_ia32_cvtqq2ps256_mask", "llvm.x86.avx512.mask.cvtqq2ps.512" => "__builtin_ia32_cvtqq2ps512_mask", - "llvm.x86.avx512.mask.cvtsd2ss.round" => "__builtin_ia32_cvtsd2ss_round_mask", - "llvm.x86.avx512.mask.cvtss2sd.round" => "__builtin_ia32_cvtss2sd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.cvtsd2ss.round" => "__builtin_ia32_cvtsd2ss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.cvtss2sd.round" => "__builtin_ia32_cvtss2sd_round_mask", "llvm.x86.avx512.mask.cvttpd2dq.128" => "__builtin_ia32_cvttpd2dq128_mask", "llvm.x86.avx512.mask.cvttpd2dq.256" => "__builtin_ia32_cvttpd2dq256_mask", "llvm.x86.avx512.mask.cvttpd2dq.512" => "__builtin_ia32_cvttpd2dq512_mask", @@ -3941,8 +6235,8 @@ match name { "llvm.x86.avx512.mask.div.ps.128" => "__builtin_ia32_divps_mask", "llvm.x86.avx512.mask.div.ps.256" => "__builtin_ia32_divps256_mask", "llvm.x86.avx512.mask.div.ps.512" => "__builtin_ia32_divps512_mask", - "llvm.x86.avx512.mask.div.sd.round" => "__builtin_ia32_divsd_round_mask", - "llvm.x86.avx512.mask.div.ss.round" => "__builtin_ia32_divss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.div.sd.round" => "__builtin_ia32_divsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.div.ss.round" => "__builtin_ia32_divss_round_mask", "llvm.x86.avx512.mask.expand.d.128" => "__builtin_ia32_expandsi128_mask", "llvm.x86.avx512.mask.expand.d.256" => "__builtin_ia32_expandsi256_mask", "llvm.x86.avx512.mask.expand.d.512" => "__builtin_ia32_expandsi512_mask", @@ -3989,16 +6283,16 @@ match name { "llvm.x86.avx512.mask.getexp.ps.128" => "__builtin_ia32_getexpps128_mask", "llvm.x86.avx512.mask.getexp.ps.256" => "__builtin_ia32_getexpps256_mask", "llvm.x86.avx512.mask.getexp.ps.512" => "__builtin_ia32_getexpps512_mask", - "llvm.x86.avx512.mask.getexp.sd" => "__builtin_ia32_getexpsd128_round_mask", - "llvm.x86.avx512.mask.getexp.ss" => "__builtin_ia32_getexpss128_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getexp.sd" => "__builtin_ia32_getexpsd128_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getexp.ss" => "__builtin_ia32_getexpss128_round_mask", "llvm.x86.avx512.mask.getmant.pd.128" => "__builtin_ia32_getmantpd128_mask", "llvm.x86.avx512.mask.getmant.pd.256" => "__builtin_ia32_getmantpd256_mask", "llvm.x86.avx512.mask.getmant.pd.512" => "__builtin_ia32_getmantpd512_mask", "llvm.x86.avx512.mask.getmant.ps.128" => "__builtin_ia32_getmantps128_mask", "llvm.x86.avx512.mask.getmant.ps.256" => "__builtin_ia32_getmantps256_mask", "llvm.x86.avx512.mask.getmant.ps.512" => "__builtin_ia32_getmantps512_mask", - "llvm.x86.avx512.mask.getmant.sd" => "__builtin_ia32_getmantsd_round_mask", - "llvm.x86.avx512.mask.getmant.ss" => "__builtin_ia32_getmantss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getmant.sd" => "__builtin_ia32_getmantsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getmant.ss" => "__builtin_ia32_getmantss_round_mask", "llvm.x86.avx512.mask.insertf32x4.256" => "__builtin_ia32_insertf32x4_256_mask", "llvm.x86.avx512.mask.insertf32x4.512" => "__builtin_ia32_insertf32x4_mask", "llvm.x86.avx512.mask.insertf32x8.512" => "__builtin_ia32_insertf32x8_mask", @@ -4023,16 +6317,16 @@ match name { "llvm.x86.avx512.mask.max.ps.128" => "__builtin_ia32_maxps_mask", "llvm.x86.avx512.mask.max.ps.256" => "__builtin_ia32_maxps256_mask", "llvm.x86.avx512.mask.max.ps.512" => "__builtin_ia32_maxps512_mask", - "llvm.x86.avx512.mask.max.sd.round" => "__builtin_ia32_maxsd_round_mask", - "llvm.x86.avx512.mask.max.ss.round" => "__builtin_ia32_maxss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.max.sd.round" => "__builtin_ia32_maxsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.max.ss.round" => "__builtin_ia32_maxss_round_mask", "llvm.x86.avx512.mask.min.pd.128" => "__builtin_ia32_minpd_mask", "llvm.x86.avx512.mask.min.pd.256" => "__builtin_ia32_minpd256_mask", "llvm.x86.avx512.mask.min.pd.512" => "__builtin_ia32_minpd512_mask", "llvm.x86.avx512.mask.min.ps.128" => "__builtin_ia32_minps_mask", "llvm.x86.avx512.mask.min.ps.256" => "__builtin_ia32_minps256_mask", "llvm.x86.avx512.mask.min.ps.512" => "__builtin_ia32_minps512_mask", - "llvm.x86.avx512.mask.min.sd.round" => "__builtin_ia32_minsd_round_mask", - "llvm.x86.avx512.mask.min.ss.round" => "__builtin_ia32_minss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.min.sd.round" => "__builtin_ia32_minsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.min.ss.round" => "__builtin_ia32_minss_round_mask", "llvm.x86.avx512.mask.move.sd" => "__builtin_ia32_movsd_mask", "llvm.x86.avx512.mask.move.ss" => "__builtin_ia32_movss_mask", "llvm.x86.avx512.mask.mul.pd.128" => "__builtin_ia32_mulpd_mask", @@ -4041,8 +6335,8 @@ match name { "llvm.x86.avx512.mask.mul.ps.128" => "__builtin_ia32_mulps_mask", "llvm.x86.avx512.mask.mul.ps.256" => "__builtin_ia32_mulps256_mask", "llvm.x86.avx512.mask.mul.ps.512" => "__builtin_ia32_mulps512_mask", - "llvm.x86.avx512.mask.mul.sd.round" => "__builtin_ia32_mulsd_round_mask", - "llvm.x86.avx512.mask.mul.ss.round" => "__builtin_ia32_mulss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.mul.sd.round" => "__builtin_ia32_mulsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.mul.ss.round" => "__builtin_ia32_mulss_round_mask", "llvm.x86.avx512.mask.or.pd.128" => "__builtin_ia32_orpd128_mask", "llvm.x86.avx512.mask.or.pd.256" => "__builtin_ia32_orpd256_mask", "llvm.x86.avx512.mask.or.pd.512" => "__builtin_ia32_orpd512_mask", @@ -4527,8 +6821,8 @@ match name { "llvm.x86.avx512.mask.range.ps.128" => "__builtin_ia32_rangeps128_mask", "llvm.x86.avx512.mask.range.ps.256" => "__builtin_ia32_rangeps256_mask", "llvm.x86.avx512.mask.range.ps.512" => "__builtin_ia32_rangeps512_mask", - "llvm.x86.avx512.mask.range.sd" => "__builtin_ia32_rangesd128_round_mask", - "llvm.x86.avx512.mask.range.ss" => "__builtin_ia32_rangess128_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.range.sd" => "__builtin_ia32_rangesd128_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.range.ss" => "__builtin_ia32_rangess128_round_mask", "llvm.x86.avx512.mask.reduce.pd.128" => "__builtin_ia32_reducepd128_mask", "llvm.x86.avx512.mask.reduce.pd.256" => "__builtin_ia32_reducepd256_mask", "llvm.x86.avx512.mask.reduce.pd.512" => "__builtin_ia32_reducepd512_mask", @@ -4543,16 +6837,16 @@ match name { "llvm.x86.avx512.mask.rndscale.ps.128" => "__builtin_ia32_rndscaleps_128_mask", "llvm.x86.avx512.mask.rndscale.ps.256" => "__builtin_ia32_rndscaleps_256_mask", "llvm.x86.avx512.mask.rndscale.ps.512" => "__builtin_ia32_rndscaleps_mask", - "llvm.x86.avx512.mask.rndscale.sd" => "__builtin_ia32_rndscalesd_round_mask", - "llvm.x86.avx512.mask.rndscale.ss" => "__builtin_ia32_rndscaless_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.rndscale.sd" => "__builtin_ia32_rndscalesd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.rndscale.ss" => "__builtin_ia32_rndscaless_round_mask", "llvm.x86.avx512.mask.scalef.pd.128" => "__builtin_ia32_scalefpd128_mask", "llvm.x86.avx512.mask.scalef.pd.256" => "__builtin_ia32_scalefpd256_mask", "llvm.x86.avx512.mask.scalef.pd.512" => "__builtin_ia32_scalefpd512_mask", "llvm.x86.avx512.mask.scalef.ps.128" => "__builtin_ia32_scalefps128_mask", "llvm.x86.avx512.mask.scalef.ps.256" => "__builtin_ia32_scalefps256_mask", "llvm.x86.avx512.mask.scalef.ps.512" => "__builtin_ia32_scalefps512_mask", - "llvm.x86.avx512.mask.scalef.sd" => "__builtin_ia32_scalefsd_round_mask", - "llvm.x86.avx512.mask.scalef.ss" => "__builtin_ia32_scalefss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.scalef.sd" => "__builtin_ia32_scalefsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.scalef.ss" => "__builtin_ia32_scalefss_round_mask", "llvm.x86.avx512.mask.shuf.f32x4" => "__builtin_ia32_shuf_f32x4_mask", "llvm.x86.avx512.mask.shuf.f32x4.256" => "__builtin_ia32_shuf_f32x4_256_mask", "llvm.x86.avx512.mask.shuf.f64x2" => "__builtin_ia32_shuf_f64x2_mask", @@ -4573,8 +6867,8 @@ match name { "llvm.x86.avx512.mask.sqrt.ps.128" => "__builtin_ia32_sqrtps128_mask", "llvm.x86.avx512.mask.sqrt.ps.256" => "__builtin_ia32_sqrtps256_mask", "llvm.x86.avx512.mask.sqrt.ps.512" => "__builtin_ia32_sqrtps512_mask", - "llvm.x86.avx512.mask.sqrt.sd" => "__builtin_ia32_sqrtsd_round_mask", - "llvm.x86.avx512.mask.sqrt.ss" => "__builtin_ia32_sqrtss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sqrt.sd" => "__builtin_ia32_sqrtsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sqrt.ss" => "__builtin_ia32_sqrtss_round_mask", "llvm.x86.avx512.mask.store.ss" => "__builtin_ia32_storess_mask", "llvm.x86.avx512.mask.storeu.d.512" => "__builtin_ia32_storedqusi512_mask", "llvm.x86.avx512.mask.storeu.pd.512" => "__builtin_ia32_storeupd512_mask", @@ -4586,8 +6880,8 @@ match name { "llvm.x86.avx512.mask.sub.ps.128" => "__builtin_ia32_subps128_mask", "llvm.x86.avx512.mask.sub.ps.256" => "__builtin_ia32_subps256_mask", "llvm.x86.avx512.mask.sub.ps.512" => "__builtin_ia32_subps512_mask", - "llvm.x86.avx512.mask.sub.sd.round" => "__builtin_ia32_subsd_round_mask", - "llvm.x86.avx512.mask.sub.ss.round" => "__builtin_ia32_subss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sub.sd.round" => "__builtin_ia32_subsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sub.ss.round" => "__builtin_ia32_subss_round_mask", "llvm.x86.avx512.mask.valign.d.128" => "__builtin_ia32_alignd128_mask", "llvm.x86.avx512.mask.valign.d.256" => "__builtin_ia32_alignd256_mask", "llvm.x86.avx512.mask.valign.d.512" => "__builtin_ia32_alignd512_mask", @@ -4905,9 +7199,9 @@ match name { "llvm.x86.avx512.rcp14.ss" => "__builtin_ia32_rcp14ss_mask", "llvm.x86.avx512.rcp28.pd" => "__builtin_ia32_rcp28pd_mask", "llvm.x86.avx512.rcp28.ps" => "__builtin_ia32_rcp28ps_mask", - "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_round_mask", // [DUPLICATE]: "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_mask", - "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_round_mask", // [DUPLICATE]: "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_mask", "llvm.x86.avx512.rndscale.sd" => "__builtin_ia32_rndscalesd", "llvm.x86.avx512.rndscale.ss" => "__builtin_ia32_rndscaless", @@ -4921,9 +7215,9 @@ match name { "llvm.x86.avx512.rsqrt14.ss" => "__builtin_ia32_rsqrt14ss_mask", "llvm.x86.avx512.rsqrt28.pd" => "__builtin_ia32_rsqrt28pd_mask", "llvm.x86.avx512.rsqrt28.ps" => "__builtin_ia32_rsqrt28ps_mask", - "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_round_mask", // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_mask", - "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_round_mask", // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_mask", "llvm.x86.avx512.scatter.dpd.512" => "__builtin_ia32_scattersiv8df", "llvm.x86.avx512.scatter.dpi.512" => "__builtin_ia32_scattersiv16si", @@ -5021,21 +7315,21 @@ match name { "llvm.x86.avx512bf16.dpbf16ps.512" => "__builtin_ia32_dpbf16ps_512", "llvm.x86.avx512fp16.add.ph.512" => "__builtin_ia32_addph512", "llvm.x86.avx512fp16.div.ph.512" => "__builtin_ia32_divph512", - "llvm.x86.avx512fp16.mask.add.sh.round" => "__builtin_ia32_addsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.add.sh.round" => "__builtin_ia32_addsh_round_mask", "llvm.x86.avx512fp16.mask.cmp.sh" => "__builtin_ia32_cmpsh_mask", - "llvm.x86.avx512fp16.mask.div.sh.round" => "__builtin_ia32_divsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.div.sh.round" => "__builtin_ia32_divsh_round_mask", "llvm.x86.avx512fp16.mask.fpclass.sh" => "__builtin_ia32_fpclasssh_mask", "llvm.x86.avx512fp16.mask.getexp.ph.128" => "__builtin_ia32_getexpph128_mask", "llvm.x86.avx512fp16.mask.getexp.ph.256" => "__builtin_ia32_getexpph256_mask", "llvm.x86.avx512fp16.mask.getexp.ph.512" => "__builtin_ia32_getexpph512_mask", - "llvm.x86.avx512fp16.mask.getexp.sh" => "__builtin_ia32_getexpsh128_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.getexp.sh" => "__builtin_ia32_getexpsh128_round_mask", "llvm.x86.avx512fp16.mask.getmant.ph.128" => "__builtin_ia32_getmantph128_mask", "llvm.x86.avx512fp16.mask.getmant.ph.256" => "__builtin_ia32_getmantph256_mask", "llvm.x86.avx512fp16.mask.getmant.ph.512" => "__builtin_ia32_getmantph512_mask", - "llvm.x86.avx512fp16.mask.getmant.sh" => "__builtin_ia32_getmantsh_round_mask", - "llvm.x86.avx512fp16.mask.max.sh.round" => "__builtin_ia32_maxsh_round_mask", - "llvm.x86.avx512fp16.mask.min.sh.round" => "__builtin_ia32_minsh_round_mask", - "llvm.x86.avx512fp16.mask.mul.sh.round" => "__builtin_ia32_mulsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.getmant.sh" => "__builtin_ia32_getmantsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.max.sh.round" => "__builtin_ia32_maxsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.min.sh.round" => "__builtin_ia32_minsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.mul.sh.round" => "__builtin_ia32_mulsh_round_mask", "llvm.x86.avx512fp16.mask.rcp.ph.128" => "__builtin_ia32_rcpph128_mask", "llvm.x86.avx512fp16.mask.rcp.ph.256" => "__builtin_ia32_rcpph256_mask", "llvm.x86.avx512fp16.mask.rcp.ph.512" => "__builtin_ia32_rcpph512_mask", @@ -5047,7 +7341,7 @@ match name { "llvm.x86.avx512fp16.mask.rndscale.ph.128" => "__builtin_ia32_rndscaleph_128_mask", "llvm.x86.avx512fp16.mask.rndscale.ph.256" => "__builtin_ia32_rndscaleph_256_mask", "llvm.x86.avx512fp16.mask.rndscale.ph.512" => "__builtin_ia32_rndscaleph_mask", - "llvm.x86.avx512fp16.mask.rndscale.sh" => "__builtin_ia32_rndscalesh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.rndscale.sh" => "__builtin_ia32_rndscalesh_round_mask", "llvm.x86.avx512fp16.mask.rsqrt.ph.128" => "__builtin_ia32_rsqrtph128_mask", "llvm.x86.avx512fp16.mask.rsqrt.ph.256" => "__builtin_ia32_rsqrtph256_mask", "llvm.x86.avx512fp16.mask.rsqrt.ph.512" => "__builtin_ia32_rsqrtph512_mask", @@ -5055,8 +7349,8 @@ match name { "llvm.x86.avx512fp16.mask.scalef.ph.128" => "__builtin_ia32_scalefph128_mask", "llvm.x86.avx512fp16.mask.scalef.ph.256" => "__builtin_ia32_scalefph256_mask", "llvm.x86.avx512fp16.mask.scalef.ph.512" => "__builtin_ia32_scalefph512_mask", - "llvm.x86.avx512fp16.mask.scalef.sh" => "__builtin_ia32_scalefsh_round_mask", - "llvm.x86.avx512fp16.mask.sub.sh.round" => "__builtin_ia32_subsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.scalef.sh" => "__builtin_ia32_scalefsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.sub.sh.round" => "__builtin_ia32_subsh_round_mask", "llvm.x86.avx512fp16.mask.vcvtdq2ph.128" => "__builtin_ia32_vcvtdq2ph128_mask", "llvm.x86.avx512fp16.mask.vcvtpd2ph.128" => "__builtin_ia32_vcvtpd2ph128_mask", "llvm.x86.avx512fp16.mask.vcvtpd2ph.256" => "__builtin_ia32_vcvtpd2ph256_mask", @@ -5090,10 +7384,10 @@ match name { "llvm.x86.avx512fp16.mask.vcvtps2phx.512" => "__builtin_ia32_vcvtps2phx512_mask", "llvm.x86.avx512fp16.mask.vcvtqq2ph.128" => "__builtin_ia32_vcvtqq2ph128_mask", "llvm.x86.avx512fp16.mask.vcvtqq2ph.256" => "__builtin_ia32_vcvtqq2ph256_mask", - "llvm.x86.avx512fp16.mask.vcvtsd2sh.round" => "__builtin_ia32_vcvtsd2sh_round_mask", - "llvm.x86.avx512fp16.mask.vcvtsh2sd.round" => "__builtin_ia32_vcvtsh2sd_round_mask", - "llvm.x86.avx512fp16.mask.vcvtsh2ss.round" => "__builtin_ia32_vcvtsh2ss_round_mask", - "llvm.x86.avx512fp16.mask.vcvtss2sh.round" => "__builtin_ia32_vcvtss2sh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtsd2sh.round" => "__builtin_ia32_vcvtsd2sh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtsh2sd.round" => "__builtin_ia32_vcvtsh2sd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtsh2ss.round" => "__builtin_ia32_vcvtsh2ss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtss2sh.round" => "__builtin_ia32_vcvtss2sh_round_mask", "llvm.x86.avx512fp16.mask.vcvttph2dq.128" => "__builtin_ia32_vcvttph2dq128_mask", "llvm.x86.avx512fp16.mask.vcvttph2dq.256" => "__builtin_ia32_vcvttph2dq256_mask", "llvm.x86.avx512fp16.mask.vcvttph2dq.512" => "__builtin_ia32_vcvttph2dq512_mask", @@ -5162,6 +7456,8 @@ match name { "llvm.x86.avx512fp16.vcvtusi642sh" => "__builtin_ia32_vcvtusi642sh", "llvm.x86.avx512fp16.vfmaddsub.ph.128" => "__builtin_ia32_vfmaddsubph", "llvm.x86.avx512fp16.vfmaddsub.ph.256" => "__builtin_ia32_vfmaddsubph256", + "llvm.x86.axor32" => "__builtin_ia32_axor32", + "llvm.x86.axor64" => "__builtin_ia32_axor64", "llvm.x86.bmi.bextr.32" => "__builtin_ia32_bextr_u32", "llvm.x86.bmi.bextr.64" => "__builtin_ia32_bextr_u64", "llvm.x86.bmi.bzhi.32" => "__builtin_ia32_bzhi_si", @@ -5176,6 +7472,8 @@ match name { "llvm.x86.clui" => "__builtin_ia32_clui", "llvm.x86.clwb" => "__builtin_ia32_clwb", "llvm.x86.clzero" => "__builtin_ia32_clzero", + "llvm.x86.cmpccxadd32" => "__builtin_ia32_cmpccxadd32", + "llvm.x86.cmpccxadd64" => "__builtin_ia32_cmpccxadd64", "llvm.x86.directstore32" => "__builtin_ia32_directstore_u32", "llvm.x86.directstore64" => "__builtin_ia32_directstore_u64", "llvm.x86.enqcmd" => "__builtin_ia32_enqcmd", @@ -5329,6 +7627,7 @@ match name { "llvm.x86.rdpid" => "__builtin_ia32_rdpid", "llvm.x86.rdpkru" => "__builtin_ia32_rdpkru", "llvm.x86.rdpmc" => "__builtin_ia32_rdpmc", + "llvm.x86.rdpru" => "__builtin_ia32_rdpru", "llvm.x86.rdsspd" => "__builtin_ia32_rdsspd", "llvm.x86.rdsspq" => "__builtin_ia32_rdsspq", "llvm.x86.rdtsc" => "__builtin_ia32_rdtsc", @@ -5606,6 +7905,8 @@ match name { "llvm.x86.tdpbusd.internal" => "__builtin_ia32_tdpbusd_internal", "llvm.x86.tdpbuud" => "__builtin_ia32_tdpbuud", "llvm.x86.tdpbuud.internal" => "__builtin_ia32_tdpbuud_internal", + "llvm.x86.tdpfp16ps" => "__builtin_ia32_tdpfp16ps", + "llvm.x86.tdpfp16ps.internal" => "__builtin_ia32_tdpfp16ps_internal", "llvm.x86.testui" => "__builtin_ia32_testui", "llvm.x86.tileloadd64" => "__builtin_ia32_tileloadd64", "llvm.x86.tileloadd64.internal" => "__builtin_ia32_tileloadd64_internal", @@ -5619,6 +7920,20 @@ match name { "llvm.x86.tpause" => "__builtin_ia32_tpause", "llvm.x86.umonitor" => "__builtin_ia32_umonitor", "llvm.x86.umwait" => "__builtin_ia32_umwait", + "llvm.x86.vbcstnebf162ps128" => "__builtin_ia32_vbcstnebf162ps128", + "llvm.x86.vbcstnebf162ps256" => "__builtin_ia32_vbcstnebf162ps256", + "llvm.x86.vbcstnesh2ps128" => "__builtin_ia32_vbcstnesh2ps128", + "llvm.x86.vbcstnesh2ps256" => "__builtin_ia32_vbcstnesh2ps256", + "llvm.x86.vcvtneebf162ps128" => "__builtin_ia32_vcvtneebf162ps128", + "llvm.x86.vcvtneebf162ps256" => "__builtin_ia32_vcvtneebf162ps256", + "llvm.x86.vcvtneeph2ps128" => "__builtin_ia32_vcvtneeph2ps128", + "llvm.x86.vcvtneeph2ps256" => "__builtin_ia32_vcvtneeph2ps256", + "llvm.x86.vcvtneobf162ps128" => "__builtin_ia32_vcvtneobf162ps128", + "llvm.x86.vcvtneobf162ps256" => "__builtin_ia32_vcvtneobf162ps256", + "llvm.x86.vcvtneoph2ps128" => "__builtin_ia32_vcvtneoph2ps128", + "llvm.x86.vcvtneoph2ps256" => "__builtin_ia32_vcvtneoph2ps256", + "llvm.x86.vcvtneps2bf16128" => "__builtin_ia32_vcvtneps2bf16128", + "llvm.x86.vcvtneps2bf16256" => "__builtin_ia32_vcvtneps2bf16256", "llvm.x86.vcvtph2ps.128" => "__builtin_ia32_vcvtph2ps", "llvm.x86.vcvtph2ps.256" => "__builtin_ia32_vcvtph2ps256", "llvm.x86.vcvtps2ph.128" => "__builtin_ia32_vcvtps2ph", diff --git a/src/intrinsic/llvm.rs b/src/intrinsic/llvm.rs index 1b089f08f76..0edec566be3 100644 --- a/src/intrinsic/llvm.rs +++ b/src/intrinsic/llvm.rs @@ -1,159 +1,387 @@ use std::borrow::Cow; -use gccjit::{Function, FunctionPtrType, RValue, ToRValue}; +use gccjit::{Function, FunctionPtrType, RValue, ToRValue, UnaryOp}; +use rustc_codegen_ssa::traits::BuilderMethods; use crate::{context::CodegenCx, builder::Builder}; -pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, gcc_func: FunctionPtrType<'gcc>, mut args: Cow<'b, [RValue<'gcc>]>, func_name: &str) -> Cow<'b, [RValue<'gcc>]> { +pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, gcc_func: FunctionPtrType<'gcc>, mut args: Cow<'b, [RValue<'gcc>]>, func_name: &str, original_function_name: Option<&String>) -> Cow<'b, [RValue<'gcc>]> { // Some LLVM intrinsics do not map 1-to-1 to GCC intrinsics, so we add the missing // arguments here. if gcc_func.get_param_count() != args.len() { match &*func_name { - "__builtin_ia32_pmuldq512_mask" | "__builtin_ia32_pmuludq512_mask" - // FIXME(antoyo): the following intrinsics has 4 (or 5) arguments according to the doc, but is defined with 2 (or 3) arguments in library/stdarch/crates/core_arch/src/x86/avx512f.rs. + // NOTE: the following intrinsics have a different number of parameters in LLVM and GCC. + "__builtin_ia32_prold512_mask" | "__builtin_ia32_pmuldq512_mask" | "__builtin_ia32_pmuludq512_mask" | "__builtin_ia32_pmaxsd512_mask" | "__builtin_ia32_pmaxsq512_mask" | "__builtin_ia32_pmaxsq256_mask" - | "__builtin_ia32_pmaxsq128_mask" | "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask" - | "__builtin_ia32_pmaxud512_mask" | "__builtin_ia32_pmaxuq512_mask" | "__builtin_ia32_pmaxuq256_mask" - | "__builtin_ia32_pmaxuq128_mask" + | "__builtin_ia32_pmaxsq128_mask" | "__builtin_ia32_pmaxud512_mask" | "__builtin_ia32_pmaxuq512_mask" | "__builtin_ia32_pminsd512_mask" | "__builtin_ia32_pminsq512_mask" | "__builtin_ia32_pminsq256_mask" - | "__builtin_ia32_pminsq128_mask" | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" - | "__builtin_ia32_pminud512_mask" | "__builtin_ia32_pminuq512_mask" | "__builtin_ia32_pminuq256_mask" - | "__builtin_ia32_pminuq128_mask" | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" + | "__builtin_ia32_pminsq128_mask" | "__builtin_ia32_pminud512_mask" | "__builtin_ia32_pminuq512_mask" + | "__builtin_ia32_prolq512_mask" | "__builtin_ia32_prorq512_mask" | "__builtin_ia32_pslldi512_mask" + | "__builtin_ia32_psrldi512_mask" | "__builtin_ia32_psllqi512_mask" | "__builtin_ia32_psrlqi512_mask" + | "__builtin_ia32_pslld512_mask" | "__builtin_ia32_psrld512_mask" | "__builtin_ia32_psllq512_mask" + | "__builtin_ia32_psrlq512_mask" | "__builtin_ia32_psrad512_mask" | "__builtin_ia32_psraq512_mask" + | "__builtin_ia32_psradi512_mask" | "__builtin_ia32_psraqi512_mask" | "__builtin_ia32_psrav16si_mask" + | "__builtin_ia32_psrav8di_mask" | "__builtin_ia32_prolvd512_mask" | "__builtin_ia32_prorvd512_mask" + | "__builtin_ia32_prolvq512_mask" | "__builtin_ia32_prorvq512_mask" | "__builtin_ia32_psllv16si_mask" + | "__builtin_ia32_psrlv16si_mask" | "__builtin_ia32_psllv8di_mask" | "__builtin_ia32_psrlv8di_mask" + | "__builtin_ia32_permvarsi512_mask" | "__builtin_ia32_vpermilvarps512_mask" + | "__builtin_ia32_vpermilvarpd512_mask" | "__builtin_ia32_permvardi512_mask" + | "__builtin_ia32_permvarsf512_mask" | "__builtin_ia32_permvarqi512_mask" + | "__builtin_ia32_permvarqi256_mask" | "__builtin_ia32_permvarqi128_mask" + | "__builtin_ia32_vpmultishiftqb512_mask" | "__builtin_ia32_vpmultishiftqb256_mask" + | "__builtin_ia32_vpmultishiftqb128_mask" => { - // TODO: refactor by separating those intrinsics outside of this branch. - let add_before_last_arg = - match &*func_name { - "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask" - | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" - | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => true, - _ => false, - }; - let new_first_arg_is_zero = - match &*func_name { - "__builtin_ia32_pmaxuq256_mask" | "__builtin_ia32_pmaxuq128_mask" - | "__builtin_ia32_pminuq256_mask" | "__builtin_ia32_pminuq128_mask" => true, - _ => false - }; - let arg3_index = - match &*func_name { - "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => 1, - _ => 2, - }; - let mut new_args = args.to_vec(); - let arg3_type = gcc_func.get_param_type(arg3_index); - let first_arg = - if new_first_arg_is_zero { - let vector_type = arg3_type.dyncast_vector().expect("vector type"); - let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); - let num_units = vector_type.get_num_units(); - builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units]) - } - else { - builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue() - }; - if add_before_last_arg { - new_args.insert(new_args.len() - 1, first_arg); - } - else { - new_args.push(first_arg); - } - let arg4_index = - match &*func_name { - "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => 2, - _ => 3, - }; - let arg4_type = gcc_func.get_param_type(arg4_index); - let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); - if add_before_last_arg { - new_args.insert(new_args.len() - 1, minus_one); - } - else { - new_args.push(minus_one); + let mut new_args = args.to_vec(); + let arg3_type = gcc_func.get_param_type(2); + let first_arg = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue(); + new_args.push(first_arg); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_pmaxuq256_mask" | "__builtin_ia32_pmaxuq128_mask" | "__builtin_ia32_pminuq256_mask" + | "__builtin_ia32_pminuq128_mask" | "__builtin_ia32_prold256_mask" | "__builtin_ia32_prold128_mask" + | "__builtin_ia32_prord512_mask" | "__builtin_ia32_prord256_mask" | "__builtin_ia32_prord128_mask" + | "__builtin_ia32_prolq256_mask" | "__builtin_ia32_prolq128_mask" | "__builtin_ia32_prorq256_mask" + | "__builtin_ia32_prorq128_mask" | "__builtin_ia32_psraq256_mask" | "__builtin_ia32_psraq128_mask" + | "__builtin_ia32_psraqi256_mask" | "__builtin_ia32_psraqi128_mask" | "__builtin_ia32_psravq256_mask" + | "__builtin_ia32_psravq128_mask" | "__builtin_ia32_prolvd256_mask" | "__builtin_ia32_prolvd128_mask" + | "__builtin_ia32_prorvd256_mask" | "__builtin_ia32_prorvd128_mask" | "__builtin_ia32_prolvq256_mask" + | "__builtin_ia32_prolvq128_mask" | "__builtin_ia32_prorvq256_mask" | "__builtin_ia32_prorvq128_mask" + | "__builtin_ia32_permvardi256_mask" | "__builtin_ia32_permvardf512_mask" | "__builtin_ia32_permvardf256_mask" + | "__builtin_ia32_pmulhuw512_mask" | "__builtin_ia32_pmulhw512_mask" | "__builtin_ia32_pmulhrsw512_mask" + | "__builtin_ia32_pmaxuw512_mask" | "__builtin_ia32_pmaxub512_mask" | "__builtin_ia32_pmaxsw512_mask" + | "__builtin_ia32_pmaxsb512_mask" | "__builtin_ia32_pminuw512_mask" | "__builtin_ia32_pminub512_mask" + | "__builtin_ia32_pminsw512_mask" | "__builtin_ia32_pminsb512_mask" + | "__builtin_ia32_pmaddwd512_mask" | "__builtin_ia32_pmaddubsw512_mask" | "__builtin_ia32_packssdw512_mask" + | "__builtin_ia32_packsswb512_mask" | "__builtin_ia32_packusdw512_mask" | "__builtin_ia32_packuswb512_mask" + | "__builtin_ia32_pavgw512_mask" | "__builtin_ia32_pavgb512_mask" | "__builtin_ia32_psllw512_mask" + | "__builtin_ia32_psllwi512_mask" | "__builtin_ia32_psllv32hi_mask" | "__builtin_ia32_psrlw512_mask" + | "__builtin_ia32_psrlwi512_mask" | "__builtin_ia32_psllv16hi_mask" | "__builtin_ia32_psllv8hi_mask" + | "__builtin_ia32_psrlv32hi_mask" | "__builtin_ia32_psraw512_mask" | "__builtin_ia32_psrawi512_mask" + | "__builtin_ia32_psrlv16hi_mask" | "__builtin_ia32_psrlv8hi_mask" | "__builtin_ia32_psrav32hi_mask" + | "__builtin_ia32_permvarhi512_mask" | "__builtin_ia32_pshufb512_mask" | "__builtin_ia32_psrav16hi_mask" + | "__builtin_ia32_psrav8hi_mask" | "__builtin_ia32_permvarhi256_mask" | "__builtin_ia32_permvarhi128_mask" + => { + let mut new_args = args.to_vec(); + let arg3_type = gcc_func.get_param_type(2); + let vector_type = arg3_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_dbpsadbw512_mask" | "__builtin_ia32_dbpsadbw256_mask" | "__builtin_ia32_dbpsadbw128_mask" => { + let mut new_args = args.to_vec(); + let arg4_type = gcc_func.get_param_type(3); + let vector_type = arg4_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = builder.context.new_rvalue_from_vector(None, arg4_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg5_type = gcc_func.get_param_type(4); + let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_vplzcntd_512_mask" | "__builtin_ia32_vplzcntd_256_mask" | "__builtin_ia32_vplzcntd_128_mask" + | "__builtin_ia32_vplzcntq_512_mask" | "__builtin_ia32_vplzcntq_256_mask" | "__builtin_ia32_vplzcntq_128_mask" => { + let mut new_args = args.to_vec(); + // Remove last arg as it doesn't seem to be used in GCC and is always false. + new_args.pop(); + let arg2_type = gcc_func.get_param_type(1); + let vector_type = arg2_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg3_type = gcc_func.get_param_type(2); + let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_vpconflictsi_512_mask" | "__builtin_ia32_vpconflictsi_256_mask" + | "__builtin_ia32_vpconflictsi_128_mask" | "__builtin_ia32_vpconflictdi_512_mask" + | "__builtin_ia32_vpconflictdi_256_mask" | "__builtin_ia32_vpconflictdi_128_mask" => { + let mut new_args = args.to_vec(); + let arg2_type = gcc_func.get_param_type(1); + let vector_type = arg2_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg3_type = gcc_func.get_param_type(2); + let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_pternlogd512_mask" | "__builtin_ia32_pternlogd256_mask" + | "__builtin_ia32_pternlogd128_mask" | "__builtin_ia32_pternlogq512_mask" + | "__builtin_ia32_pternlogq256_mask" | "__builtin_ia32_pternlogq128_mask" => { + let mut new_args = args.to_vec(); + let arg5_type = gcc_func.get_param_type(4); + let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => { + let mut new_args = args.to_vec(); + + let mut last_arg = None; + if args.len() == 4 { + last_arg = new_args.pop(); + } + + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + + if args.len() == 3 { + // Both llvm.fma.v16f32 and llvm.x86.avx512.vfmadd.ps.512 maps to + // the same GCC intrinsic, but the former has 3 parameters and the + // latter has 4 so it doesn't require this additional argument. + let arg5_type = gcc_func.get_param_type(4); + new_args.push(builder.context.new_rvalue_from_int(arg5_type, 4)); + } + + if let Some(last_arg) = last_arg { + new_args.push(last_arg); + } + + args = new_args.into(); + }, + "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask" + | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask" + | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask" + | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask" + | "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask" + | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" => { + let mut new_args = args.to_vec(); + let last_arg = new_args.pop().expect("last arg"); + let arg3_type = gcc_func.get_param_type(2); + let undefined = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue(); + new_args.push(undefined); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + new_args.push(last_arg); + args = new_args.into(); + }, + "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => { + let mut new_args = args.to_vec(); + let last_arg = new_args.pop().expect("last arg"); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + new_args.push(last_arg); + args = new_args.into(); + }, + "__builtin_ia32_vpermi2vard512_mask" | "__builtin_ia32_vpermi2vard256_mask" + | "__builtin_ia32_vpermi2vard128_mask" | "__builtin_ia32_vpermi2varq512_mask" + | "__builtin_ia32_vpermi2varq256_mask" | "__builtin_ia32_vpermi2varq128_mask" + | "__builtin_ia32_vpermi2varps512_mask" | "__builtin_ia32_vpermi2varps256_mask" + | "__builtin_ia32_vpermi2varps128_mask" | "__builtin_ia32_vpermi2varpd512_mask" + | "__builtin_ia32_vpermi2varpd256_mask" | "__builtin_ia32_vpermi2varpd128_mask" | "__builtin_ia32_vpmadd52huq512_mask" + | "__builtin_ia32_vpmadd52luq512_mask" | "__builtin_ia32_vpmadd52huq256_mask" | "__builtin_ia32_vpmadd52luq256_mask" + | "__builtin_ia32_vpmadd52huq128_mask" + => { + let mut new_args = args.to_vec(); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_cvtdq2ps512_mask" | "__builtin_ia32_cvtudq2ps512_mask" + | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => { + let mut new_args = args.to_vec(); + let last_arg = new_args.pop().expect("last arg"); + let arg2_type = gcc_func.get_param_type(1); + let undefined = builder.current_func().new_local(None, arg2_type, "undefined_for_intrinsic").to_rvalue(); + new_args.push(undefined); + let arg3_type = gcc_func.get_param_type(2); + let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1); + new_args.push(minus_one); + new_args.push(last_arg); + args = new_args.into(); + }, + "__builtin_ia32_stmxcsr" => { + args = vec![].into(); + }, + "__builtin_ia32_addcarryx_u64" | "__builtin_ia32_sbb_u64" | "__builtin_ia32_addcarryx_u32" | "__builtin_ia32_sbb_u32" => { + let mut new_args = args.to_vec(); + let arg2_type = gcc_func.get_param_type(1); + let variable = builder.current_func().new_local(None, arg2_type, "addcarryResult"); + new_args.push(variable.get_address(None)); + args = new_args.into(); + }, + "__builtin_ia32_vpermt2varqi512_mask" | "__builtin_ia32_vpermt2varqi256_mask" + | "__builtin_ia32_vpermt2varqi128_mask" | "__builtin_ia32_vpermt2varhi512_mask" + | "__builtin_ia32_vpermt2varhi256_mask" | "__builtin_ia32_vpermt2varhi128_mask" + => { + let new_args = args.to_vec(); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + args = vec![new_args[1], new_args[0], new_args[2], minus_one].into(); + }, + "__builtin_ia32_xrstor" | "__builtin_ia32_xsavec" => { + let new_args = args.to_vec(); + let thirty_two = builder.context.new_rvalue_from_int(new_args[1].get_type(), 32); + let arg2 = new_args[1] << thirty_two | new_args[2]; + let arg2_type = gcc_func.get_param_type(1); + let arg2 = builder.context.new_cast(None, arg2, arg2_type); + args = vec![new_args[0], arg2].into(); + }, + "__builtin_prefetch" => { + let mut new_args = args.to_vec(); + new_args.pop(); + args = new_args.into(); + }, + _ => (), + } + } + else { + match &*func_name { + "__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => { + let new_args = args.to_vec(); + let arg3_type = gcc_func.get_param_type(2); + let arg3 = builder.context.new_cast(None, new_args[4], arg3_type); + let arg4_type = gcc_func.get_param_type(3); + let arg4 = builder.context.new_bitcast(None, new_args[2], arg4_type); + args = vec![new_args[0], new_args[1], arg3, arg4, new_args[3], new_args[5]].into(); + }, + // NOTE: the LLVM intrinsic receives 3 floats, but the GCC builtin requires 3 vectors. + // FIXME: the intrinsics like _mm_mask_fmadd_sd should probably directly call the GCC + // instrinsic to avoid this. + "__builtin_ia32_vfmaddss3_round" => { + let new_args = args.to_vec(); + let arg1_type = gcc_func.get_param_type(0); + let arg2_type = gcc_func.get_param_type(1); + let arg3_type = gcc_func.get_param_type(2); + let a = builder.context.new_rvalue_from_vector(None, arg1_type, &[new_args[0]; 4]); + let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 4]); + let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 4]); + args = vec![a, b, c, new_args[3]].into(); + }, + "__builtin_ia32_vfmaddsd3_round" => { + let new_args = args.to_vec(); + let arg1_type = gcc_func.get_param_type(0); + let arg2_type = gcc_func.get_param_type(1); + let arg3_type = gcc_func.get_param_type(2); + let a = builder.context.new_rvalue_from_vector(None, arg1_type, &[new_args[0]; 2]); + let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 2]); + let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 2]); + args = vec![a, b, c, new_args[3]].into(); + }, + "__builtin_ia32_vfmaddsubpd256" | "__builtin_ia32_vfmaddsubps" | "__builtin_ia32_vfmaddsubps256" + | "__builtin_ia32_vfmaddsubpd" => { + if let Some(original_function_name) = original_function_name { + match &**original_function_name { + "llvm.x86.fma.vfmsubadd.pd.256" | "llvm.x86.fma.vfmsubadd.ps" | "llvm.x86.fma.vfmsubadd.ps.256" + | "llvm.x86.fma.vfmsubadd.pd" => { + // NOTE: since both llvm.x86.fma.vfmsubadd.ps and llvm.x86.fma.vfmaddsub.ps maps to + // __builtin_ia32_vfmaddsubps, only add minus if this comes from a + // subadd LLVM intrinsic, e.g. _mm256_fmsubadd_pd. + let mut new_args = args.to_vec(); + let arg3 = &mut new_args[2]; + *arg3 = builder.context.new_unary_op(None, UnaryOp::Minus, arg3.get_type(), *arg3); + args = new_args.into(); + }, + _ => (), } - args = new_args.into(); - }, - "__builtin_ia32_pternlogd512_mask" | "__builtin_ia32_pternlogd256_mask" - | "__builtin_ia32_pternlogd128_mask" | "__builtin_ia32_pternlogq512_mask" - | "__builtin_ia32_pternlogq256_mask" | "__builtin_ia32_pternlogq128_mask" => { - let mut new_args = args.to_vec(); - let arg5_type = gcc_func.get_param_type(4); - let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1); - new_args.push(minus_one); - args = new_args.into(); - }, - "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => { - let mut new_args = args.to_vec(); - - let mut last_arg = None; - if args.len() == 4 { - last_arg = new_args.pop(); - } - - let arg4_type = gcc_func.get_param_type(3); - let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); - new_args.push(minus_one); - - if args.len() == 3 { - // Both llvm.fma.v16f32 and llvm.x86.avx512.vfmadd.ps.512 maps to - // the same GCC intrinsic, but the former has 3 parameters and the - // latter has 4 so it doesn't require this additional argument. - let arg5_type = gcc_func.get_param_type(4); - new_args.push(builder.context.new_rvalue_from_int(arg5_type, 4)); - } - - if let Some(last_arg) = last_arg { - new_args.push(last_arg); - } - - args = new_args.into(); - }, - "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask" - | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask" - | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask" - | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask" => { - let mut new_args = args.to_vec(); - let last_arg = new_args.pop().expect("last arg"); - let arg3_type = gcc_func.get_param_type(2); - let undefined = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue(); - new_args.push(undefined); - let arg4_type = gcc_func.get_param_type(3); - let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); - new_args.push(minus_one); - new_args.push(last_arg); - args = new_args.into(); - }, - "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => { - let mut new_args = args.to_vec(); - let last_arg = new_args.pop().expect("last arg"); - let arg4_type = gcc_func.get_param_type(3); - let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); - new_args.push(minus_one); - new_args.push(last_arg); - args = new_args.into(); - }, - _ => (), + } + }, + "__builtin_ia32_ldmxcsr" => { + // The builtin __builtin_ia32_ldmxcsr takes an integer value while llvm.x86.sse.ldmxcsr takes a pointer, + // so dereference the pointer. + let mut new_args = args.to_vec(); + let uint_ptr_type = builder.uint_type.make_pointer(); + let arg1 = builder.context.new_cast(None, args[0], uint_ptr_type); + new_args[0] = arg1.dereference(None).to_rvalue(); + args = new_args.into(); + }, + "__builtin_ia32_rcp14sd_mask" | "__builtin_ia32_rcp14ss_mask" | "__builtin_ia32_rsqrt14sd_mask" + | "__builtin_ia32_rsqrt14ss_mask" => { + let new_args = args.to_vec(); + args = vec![new_args[1], new_args[0], new_args[2], new_args[3]].into(); + }, + "__builtin_ia32_sqrtsd_mask_round" | "__builtin_ia32_sqrtss_mask_round" => { + let new_args = args.to_vec(); + args = vec![new_args[1], new_args[0], new_args[2], new_args[3], new_args[4]].into(); + }, + _ => (), } } args } +pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, mut return_value: RValue<'gcc>, func_name: &str, args: &[RValue<'gcc>], args_adjusted: bool, orig_args: &[RValue<'gcc>]) -> RValue<'gcc> { + match func_name { + "__builtin_ia32_vfmaddss3_round" | "__builtin_ia32_vfmaddsd3_round" => { + #[cfg(feature="master")] + { + let zero = builder.context.new_rvalue_zero(builder.int_type); + return_value = builder.context.new_vector_access(None, return_value, zero).to_rvalue(); + } + }, + "__builtin_ia32_addcarryx_u64" | "__builtin_ia32_sbb_u64" | "__builtin_ia32_addcarryx_u32" | "__builtin_ia32_sbb_u32" => { + // Both llvm.x86.addcarry.32 and llvm.x86.addcarryx.u32 points to the same GCC builtin, + // but only the former requires adjusting the return value. + // Those 2 LLVM intrinsics differ by their argument count, that's why we check if the + // arguments were adjusted. + if args_adjusted { + let last_arg = args.last().expect("last arg"); + let field1 = builder.context.new_field(None, builder.u8_type, "carryFlag"); + let field2 = builder.context.new_field(None, args[1].get_type(), "carryResult"); + let struct_type = builder.context.new_struct_type(None, "addcarryResult", &[field1, field2]); + return_value = builder.context.new_struct_constructor(None, struct_type.as_type(), None, &[return_value, last_arg.dereference(None).to_rvalue()]); + } + }, + "__builtin_ia32_stmxcsr" => { + // The builtin __builtin_ia32_stmxcsr returns a value while llvm.x86.sse.stmxcsr writes + // the result in its pointer argument. + // We removed the argument since __builtin_ia32_stmxcsr takes no arguments, so we need + // to get back the original argument to get the pointer we need to write the result to. + let uint_ptr_type = builder.uint_type.make_pointer(); + let ptr = builder.context.new_cast(None, orig_args[0], uint_ptr_type); + builder.llbb().add_assignment(None, ptr.dereference(None), return_value); + // The return value was assigned to the result pointer above. In order to not call the + // builtin twice, we overwrite the return value with a dummy value. + return_value = builder.context.new_rvalue_zero(builder.int_type); + }, + _ => (), + } + + return_value +} + pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool { - // NOTE: these intrinsics have missing parameters before the last one, so ignore the - // last argument type check. // FIXME(antoyo): find a way to refactor in order to avoid this hack. match func_name { + // NOTE: these intrinsics have missing parameters before the last one, so ignore the + // last argument type check. "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask" | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" | "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask" | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask" | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask" | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask" - | "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => { + | "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" + | "__builtin_ia32_cvtdq2ps512_mask" | "__builtin_ia32_cvtudq2ps512_mask" => { if index == args_len - 1 { return true; } }, + "__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => { + if index == 2 || index == 3 { + return true; + } + }, "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => { // Since there are two LLVM intrinsics that map to each of these GCC builtins and only // one of them has a missing parameter before the last one, we check the number of @@ -162,6 +390,14 @@ pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool { return true; } }, + // NOTE: the LLVM intrinsic receives 3 floats, but the GCC builtin requires 3 vectors. + "__builtin_ia32_vfmaddss3_round" | "__builtin_ia32_vfmaddsd3_round" => return true, + "__builtin_ia32_vplzcntd_512_mask" | "__builtin_ia32_vplzcntd_256_mask" | "__builtin_ia32_vplzcntd_128_mask" + | "__builtin_ia32_vplzcntq_512_mask" | "__builtin_ia32_vplzcntq_256_mask" | "__builtin_ia32_vplzcntq_128_mask" => { + if index == args_len - 1 { + return true; + } + }, _ => (), } @@ -171,7 +407,7 @@ pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool { #[cfg(not(feature="master"))] pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> { match name { - "llvm.x86.xgetbv" => { + "llvm.x86.xgetbv" | "llvm.x86.sse2.pause" => { let gcc_name = "__builtin_trap"; let func = cx.context.get_builtin_function(gcc_name); cx.functions.borrow_mut().insert(gcc_name.to_string(), func); @@ -183,24 +419,26 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function #[cfg(feature="master")] pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> { + match name { + "llvm.prefetch" => { + let gcc_name = "__builtin_prefetch"; + let func = cx.context.get_builtin_function(gcc_name); + cx.functions.borrow_mut().insert(gcc_name.to_string(), func); + return func + }, + _ => (), + } + let gcc_name = match name { "llvm.x86.xgetbv" => "__builtin_ia32_xgetbv", // NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html "llvm.sqrt.v2f64" => "__builtin_ia32_sqrtpd", "llvm.x86.avx512.pmul.dq.512" => "__builtin_ia32_pmuldq512_mask", "llvm.x86.avx512.pmulu.dq.512" => "__builtin_ia32_pmuludq512_mask", - "llvm.x86.avx512.mask.pmaxs.q.256" => "__builtin_ia32_pmaxsq256_mask", - "llvm.x86.avx512.mask.pmaxs.q.128" => "__builtin_ia32_pmaxsq128_mask", "llvm.x86.avx512.max.ps.512" => "__builtin_ia32_maxps512_mask", "llvm.x86.avx512.max.pd.512" => "__builtin_ia32_maxpd512_mask", - "llvm.x86.avx512.mask.pmaxu.q.256" => "__builtin_ia32_pmaxuq256_mask", - "llvm.x86.avx512.mask.pmaxu.q.128" => "__builtin_ia32_pmaxuq128_mask", - "llvm.x86.avx512.mask.pmins.q.256" => "__builtin_ia32_pminsq256_mask", - "llvm.x86.avx512.mask.pmins.q.128" => "__builtin_ia32_pminsq128_mask", "llvm.x86.avx512.min.ps.512" => "__builtin_ia32_minps512_mask", "llvm.x86.avx512.min.pd.512" => "__builtin_ia32_minpd512_mask", - "llvm.x86.avx512.mask.pminu.q.256" => "__builtin_ia32_pminuq256_mask", - "llvm.x86.avx512.mask.pminu.q.128" => "__builtin_ia32_pminuq128_mask", "llvm.fma.v16f32" => "__builtin_ia32_vfmaddps512_mask", "llvm.fma.v8f64" => "__builtin_ia32_vfmaddpd512_mask", "llvm.x86.avx512.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask", @@ -221,6 +459,153 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function "llvm.x86.avx512.div.pd.512" => "__builtin_ia32_divpd512_mask", "llvm.x86.avx512.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask", "llvm.x86.avx512.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask", + "llvm.x86.avx512.sitofp.round.v16f32.v16i32" => "__builtin_ia32_cvtdq2ps512_mask", + "llvm.x86.avx512.uitofp.round.v16f32.v16i32" => "__builtin_ia32_cvtudq2ps512_mask", + "llvm.x86.avx512.mask.ucmp.d.512" => "__builtin_ia32_ucmpd512_mask", + "llvm.x86.avx512.mask.ucmp.d.256" => "__builtin_ia32_ucmpd256_mask", + "llvm.x86.avx512.mask.ucmp.d.128" => "__builtin_ia32_ucmpd128_mask", + "llvm.x86.avx512.mask.cmp.d.512" => "__builtin_ia32_cmpd512_mask", + "llvm.x86.avx512.mask.cmp.d.256" => "__builtin_ia32_cmpd256_mask", + "llvm.x86.avx512.mask.cmp.d.128" => "__builtin_ia32_cmpd128_mask", + "llvm.x86.avx512.mask.ucmp.q.512" => "__builtin_ia32_ucmpq512_mask", + "llvm.x86.avx512.mask.ucmp.q.256" => "__builtin_ia32_ucmpq256_mask", + "llvm.x86.avx512.mask.ucmp.q.128" => "__builtin_ia32_ucmpq128_mask", + "llvm.x86.avx512.mask.cmp.q.512" => "__builtin_ia32_cmpq512_mask", + "llvm.x86.avx512.mask.cmp.q.256" => "__builtin_ia32_cmpq256_mask", + "llvm.x86.avx512.mask.cmp.q.128" => "__builtin_ia32_cmpq128_mask", + "llvm.x86.avx512.mask.max.ss.round" => "__builtin_ia32_maxss_mask_round", + "llvm.x86.avx512.mask.max.sd.round" => "__builtin_ia32_maxsd_mask_round", + "llvm.x86.avx512.mask.min.ss.round" => "__builtin_ia32_minss_mask_round", + "llvm.x86.avx512.mask.min.sd.round" => "__builtin_ia32_minsd_mask_round", + "llvm.x86.avx512.mask.sqrt.ss" => "__builtin_ia32_sqrtss_mask_round", + "llvm.x86.avx512.mask.sqrt.sd" => "__builtin_ia32_sqrtsd_mask_round", + "llvm.x86.avx512.mask.getexp.ss" => "__builtin_ia32_getexpss_mask_round", + "llvm.x86.avx512.mask.getexp.sd" => "__builtin_ia32_getexpsd_mask_round", + "llvm.x86.avx512.mask.getmant.ss" => "__builtin_ia32_getmantss_mask_round", + "llvm.x86.avx512.mask.getmant.sd" => "__builtin_ia32_getmantsd_mask_round", + "llvm.x86.avx512.mask.rndscale.ss" => "__builtin_ia32_rndscaless_mask_round", + "llvm.x86.avx512.mask.rndscale.sd" => "__builtin_ia32_rndscalesd_mask_round", + "llvm.x86.avx512.mask.scalef.ss" => "__builtin_ia32_scalefss_mask_round", + "llvm.x86.avx512.mask.scalef.sd" => "__builtin_ia32_scalefsd_mask_round", + "llvm.x86.avx512.vfmadd.f32" => "__builtin_ia32_vfmaddss3_round", + "llvm.x86.avx512.vfmadd.f64" => "__builtin_ia32_vfmaddsd3_round", + "llvm.ceil.v4f64" => "__builtin_ia32_ceilpd256", + "llvm.ceil.v8f32" => "__builtin_ia32_ceilps256", + "llvm.floor.v4f64" => "__builtin_ia32_floorpd256", + "llvm.floor.v8f32" => "__builtin_ia32_floorps256", + "llvm.sqrt.v4f64" => "__builtin_ia32_sqrtpd256", + "llvm.x86.sse.stmxcsr" => "__builtin_ia32_stmxcsr", + "llvm.x86.sse.ldmxcsr" => "__builtin_ia32_ldmxcsr", + "llvm.ctpop.v16i32" => "__builtin_ia32_vpopcountd_v16si", + "llvm.ctpop.v8i32" => "__builtin_ia32_vpopcountd_v8si", + "llvm.ctpop.v4i32" => "__builtin_ia32_vpopcountd_v4si", + "llvm.ctpop.v8i64" => "__builtin_ia32_vpopcountq_v8di", + "llvm.ctpop.v4i64" => "__builtin_ia32_vpopcountq_v4di", + "llvm.ctpop.v2i64" => "__builtin_ia32_vpopcountq_v2di", + "llvm.x86.addcarry.64" => "__builtin_ia32_addcarryx_u64", + "llvm.x86.subborrow.64" => "__builtin_ia32_sbb_u64", + "llvm.floor.v2f64" => "__builtin_ia32_floorpd", + "llvm.floor.v4f32" => "__builtin_ia32_floorps", + "llvm.ceil.v2f64" => "__builtin_ia32_ceilpd", + "llvm.ceil.v4f32" => "__builtin_ia32_ceilps", + "llvm.fma.v2f64" => "__builtin_ia32_vfmaddpd", + "llvm.fma.v4f64" => "__builtin_ia32_vfmaddpd256", + "llvm.fma.v4f32" => "__builtin_ia32_vfmaddps", + "llvm.fma.v8f32" => "__builtin_ia32_vfmaddps256", + "llvm.ctlz.v16i32" => "__builtin_ia32_vplzcntd_512_mask", + "llvm.ctlz.v8i32" => "__builtin_ia32_vplzcntd_256_mask", + "llvm.ctlz.v4i32" => "__builtin_ia32_vplzcntd_128_mask", + "llvm.ctlz.v8i64" => "__builtin_ia32_vplzcntq_512_mask", + "llvm.ctlz.v4i64" => "__builtin_ia32_vplzcntq_256_mask", + "llvm.ctlz.v2i64" => "__builtin_ia32_vplzcntq_128_mask", + "llvm.ctpop.v32i16" => "__builtin_ia32_vpopcountw_v32hi", + "llvm.x86.fma.vfmsub.sd" => "__builtin_ia32_vfmsubsd3", + "llvm.x86.fma.vfmsub.ss" => "__builtin_ia32_vfmsubss3", + "llvm.x86.fma.vfmsubadd.pd" => "__builtin_ia32_vfmaddsubpd", + "llvm.x86.fma.vfmsubadd.pd.256" => "__builtin_ia32_vfmaddsubpd256", + "llvm.x86.fma.vfmsubadd.ps" => "__builtin_ia32_vfmaddsubps", + "llvm.x86.fma.vfmsubadd.ps.256" => "__builtin_ia32_vfmaddsubps256", + "llvm.x86.fma.vfnmadd.sd" => "__builtin_ia32_vfnmaddsd3", + "llvm.x86.fma.vfnmadd.ss" => "__builtin_ia32_vfnmaddss3", + "llvm.x86.fma.vfnmsub.sd" => "__builtin_ia32_vfnmsubsd3", + "llvm.x86.fma.vfnmsub.ss" => "__builtin_ia32_vfnmsubss3", + "llvm.x86.avx512.conflict.d.512" => "__builtin_ia32_vpconflictsi_512_mask", + "llvm.x86.avx512.conflict.d.256" => "__builtin_ia32_vpconflictsi_256_mask", + "llvm.x86.avx512.conflict.d.128" => "__builtin_ia32_vpconflictsi_128_mask", + "llvm.x86.avx512.conflict.q.512" => "__builtin_ia32_vpconflictdi_512_mask", + "llvm.x86.avx512.conflict.q.256" => "__builtin_ia32_vpconflictdi_256_mask", + "llvm.x86.avx512.conflict.q.128" => "__builtin_ia32_vpconflictdi_128_mask", + "llvm.x86.avx512.vpermi2var.qi.512" => "__builtin_ia32_vpermt2varqi512_mask", + "llvm.x86.avx512.vpermi2var.qi.256" => "__builtin_ia32_vpermt2varqi256_mask", + "llvm.x86.avx512.vpermi2var.qi.128" => "__builtin_ia32_vpermt2varqi128_mask", + "llvm.x86.avx512.permvar.qi.512" => "__builtin_ia32_permvarqi512_mask", + "llvm.x86.avx512.permvar.qi.256" => "__builtin_ia32_permvarqi256_mask", + "llvm.x86.avx512.permvar.qi.128" => "__builtin_ia32_permvarqi128_mask", + "llvm.x86.avx512.pmultishift.qb.512" => "__builtin_ia32_vpmultishiftqb512_mask", + "llvm.x86.avx512.pmultishift.qb.256" => "__builtin_ia32_vpmultishiftqb256_mask", + "llvm.x86.avx512.pmultishift.qb.128" => "__builtin_ia32_vpmultishiftqb128_mask", + "llvm.ctpop.v16i16" => "__builtin_ia32_vpopcountw_v16hi", + "llvm.ctpop.v8i16" => "__builtin_ia32_vpopcountw_v8hi", + "llvm.ctpop.v64i8" => "__builtin_ia32_vpopcountb_v64qi", + "llvm.ctpop.v32i8" => "__builtin_ia32_vpopcountb_v32qi", + "llvm.ctpop.v16i8" => "__builtin_ia32_vpopcountb_v16qi", + "llvm.x86.avx512.mask.vpshufbitqmb.512" => "__builtin_ia32_vpshufbitqmb512_mask", + "llvm.x86.avx512.mask.vpshufbitqmb.256" => "__builtin_ia32_vpshufbitqmb256_mask", + "llvm.x86.avx512.mask.vpshufbitqmb.128" => "__builtin_ia32_vpshufbitqmb128_mask", + "llvm.x86.avx512.mask.ucmp.w.512" => "__builtin_ia32_ucmpw512_mask", + "llvm.x86.avx512.mask.ucmp.w.256" => "__builtin_ia32_ucmpw256_mask", + "llvm.x86.avx512.mask.ucmp.w.128" => "__builtin_ia32_ucmpw128_mask", + "llvm.x86.avx512.mask.ucmp.b.512" => "__builtin_ia32_ucmpb512_mask", + "llvm.x86.avx512.mask.ucmp.b.256" => "__builtin_ia32_ucmpb256_mask", + "llvm.x86.avx512.mask.ucmp.b.128" => "__builtin_ia32_ucmpb128_mask", + "llvm.x86.avx512.mask.cmp.w.512" => "__builtin_ia32_cmpw512_mask", + "llvm.x86.avx512.mask.cmp.w.256" => "__builtin_ia32_cmpw256_mask", + "llvm.x86.avx512.mask.cmp.w.128" => "__builtin_ia32_cmpw128_mask", + "llvm.x86.avx512.mask.cmp.b.512" => "__builtin_ia32_cmpb512_mask", + "llvm.x86.avx512.mask.cmp.b.256" => "__builtin_ia32_cmpb256_mask", + "llvm.x86.avx512.mask.cmp.b.128" => "__builtin_ia32_cmpb128_mask", + "llvm.x86.xrstor" => "__builtin_ia32_xrstor", + "llvm.x86.xsavec" => "__builtin_ia32_xsavec", + "llvm.x86.addcarry.32" => "__builtin_ia32_addcarryx_u32", + "llvm.x86.subborrow.32" => "__builtin_ia32_sbb_u32", + "llvm.x86.avx512.mask.compress.store.w.512" => "__builtin_ia32_compressstoreuhi512_mask", + "llvm.x86.avx512.mask.compress.store.w.256" => "__builtin_ia32_compressstoreuhi256_mask", + "llvm.x86.avx512.mask.compress.store.w.128" => "__builtin_ia32_compressstoreuhi128_mask", + "llvm.x86.avx512.mask.compress.store.b.512" => "__builtin_ia32_compressstoreuqi512_mask", + "llvm.x86.avx512.mask.compress.store.b.256" => "__builtin_ia32_compressstoreuqi256_mask", + "llvm.x86.avx512.mask.compress.store.b.128" => "__builtin_ia32_compressstoreuqi128_mask", + "llvm.x86.avx512.mask.compress.w.512" => "__builtin_ia32_compresshi512_mask", + "llvm.x86.avx512.mask.compress.w.256" => "__builtin_ia32_compresshi256_mask", + "llvm.x86.avx512.mask.compress.w.128" => "__builtin_ia32_compresshi128_mask", + "llvm.x86.avx512.mask.compress.b.512" => "__builtin_ia32_compressqi512_mask", + "llvm.x86.avx512.mask.compress.b.256" => "__builtin_ia32_compressqi256_mask", + "llvm.x86.avx512.mask.compress.b.128" => "__builtin_ia32_compressqi128_mask", + "llvm.x86.avx512.mask.expand.w.512" => "__builtin_ia32_expandhi512_mask", + "llvm.x86.avx512.mask.expand.w.256" => "__builtin_ia32_expandhi256_mask", + "llvm.x86.avx512.mask.expand.w.128" => "__builtin_ia32_expandhi128_mask", + "llvm.x86.avx512.mask.expand.b.512" => "__builtin_ia32_expandqi512_mask", + "llvm.x86.avx512.mask.expand.b.256" => "__builtin_ia32_expandqi256_mask", + "llvm.x86.avx512.mask.expand.b.128" => "__builtin_ia32_expandqi128_mask", + "llvm.fshl.v8i64" => "__builtin_ia32_vpshldv_v8di", + "llvm.fshl.v4i64" => "__builtin_ia32_vpshldv_v4di", + "llvm.fshl.v2i64" => "__builtin_ia32_vpshldv_v2di", + "llvm.fshl.v16i32" => "__builtin_ia32_vpshldv_v16si", + "llvm.fshl.v8i32" => "__builtin_ia32_vpshldv_v8si", + "llvm.fshl.v4i32" => "__builtin_ia32_vpshldv_v4si", + "llvm.fshl.v32i16" => "__builtin_ia32_vpshldv_v32hi", + "llvm.fshl.v16i16" => "__builtin_ia32_vpshldv_v16hi", + "llvm.fshl.v8i16" => "__builtin_ia32_vpshldv_v8hi", + "llvm.fshr.v8i64" => "__builtin_ia32_vpshrdv_v8di", + "llvm.fshr.v4i64" => "__builtin_ia32_vpshrdv_v4di", + "llvm.fshr.v2i64" => "__builtin_ia32_vpshrdv_v2di", + "llvm.fshr.v16i32" => "__builtin_ia32_vpshrdv_v16si", + "llvm.fshr.v8i32" => "__builtin_ia32_vpshrdv_v8si", + "llvm.fshr.v4i32" => "__builtin_ia32_vpshrdv_v4si", + "llvm.fshr.v32i16" => "__builtin_ia32_vpshrdv_v32hi", + "llvm.fshr.v16i16" => "__builtin_ia32_vpshrdv_v16hi", + "llvm.fshr.v8i16" => "__builtin_ia32_vpshrdv_v8hi", + "llvm.x86.fma.vfmadd.sd" => "__builtin_ia32_vfmaddsd3", + "llvm.x86.fma.vfmadd.ss" => "__builtin_ia32_vfmaddss3", // The above doc points to unknown builtins for the following, so override them: "llvm.x86.avx2.gather.d.d" => "__builtin_ia32_gathersiv4si", @@ -239,7 +624,151 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function "llvm.x86.avx2.gather.q.q.256" => "__builtin_ia32_gatherdiv4di", "llvm.x86.avx2.gather.q.pd" => "__builtin_ia32_gatherdiv2df", "llvm.x86.avx2.gather.q.pd.256" => "__builtin_ia32_gatherdiv4df", - "" => "", + "llvm.x86.avx512.pslli.d.512" => "__builtin_ia32_pslldi512_mask", + "llvm.x86.avx512.psrli.d.512" => "__builtin_ia32_psrldi512_mask", + "llvm.x86.avx512.pslli.q.512" => "__builtin_ia32_psllqi512_mask", + "llvm.x86.avx512.psrli.q.512" => "__builtin_ia32_psrlqi512_mask", + "llvm.x86.avx512.psll.d.512" => "__builtin_ia32_pslld512_mask", + "llvm.x86.avx512.psrl.d.512" => "__builtin_ia32_psrld512_mask", + "llvm.x86.avx512.psll.q.512" => "__builtin_ia32_psllq512_mask", + "llvm.x86.avx512.psrl.q.512" => "__builtin_ia32_psrlq512_mask", + "llvm.x86.avx512.psra.d.512" => "__builtin_ia32_psrad512_mask", + "llvm.x86.avx512.psra.q.512" => "__builtin_ia32_psraq512_mask", + "llvm.x86.avx512.psra.q.256" => "__builtin_ia32_psraq256_mask", + "llvm.x86.avx512.psra.q.128" => "__builtin_ia32_psraq128_mask", + "llvm.x86.avx512.psrai.d.512" => "__builtin_ia32_psradi512_mask", + "llvm.x86.avx512.psrai.q.512" => "__builtin_ia32_psraqi512_mask", + "llvm.x86.avx512.psrai.q.256" => "__builtin_ia32_psraqi256_mask", + "llvm.x86.avx512.psrai.q.128" => "__builtin_ia32_psraqi128_mask", + "llvm.x86.avx512.psrav.d.512" => "__builtin_ia32_psrav16si_mask", + "llvm.x86.avx512.psrav.q.512" => "__builtin_ia32_psrav8di_mask", + "llvm.x86.avx512.psrav.q.256" => "__builtin_ia32_psravq256_mask", + "llvm.x86.avx512.psrav.q.128" => "__builtin_ia32_psravq128_mask", + "llvm.x86.avx512.psllv.d.512" => "__builtin_ia32_psllv16si_mask", + "llvm.x86.avx512.psrlv.d.512" => "__builtin_ia32_psrlv16si_mask", + "llvm.x86.avx512.psllv.q.512" => "__builtin_ia32_psllv8di_mask", + "llvm.x86.avx512.psrlv.q.512" => "__builtin_ia32_psrlv8di_mask", + "llvm.x86.avx512.permvar.si.512" => "__builtin_ia32_permvarsi512_mask", + "llvm.x86.avx512.vpermilvar.ps.512" => "__builtin_ia32_vpermilvarps512_mask", + "llvm.x86.avx512.vpermilvar.pd.512" => "__builtin_ia32_vpermilvarpd512_mask", + "llvm.x86.avx512.permvar.di.512" => "__builtin_ia32_permvardi512_mask", + "llvm.x86.avx512.permvar.di.256" => "__builtin_ia32_permvardi256_mask", + "llvm.x86.avx512.permvar.sf.512" => "__builtin_ia32_permvarsf512_mask", + "llvm.x86.avx512.permvar.df.512" => "__builtin_ia32_permvardf512_mask", + "llvm.x86.avx512.permvar.df.256" => "__builtin_ia32_permvardf256_mask", + "llvm.x86.avx512.vpermi2var.d.512" => "__builtin_ia32_vpermi2vard512_mask", + "llvm.x86.avx512.vpermi2var.d.256" => "__builtin_ia32_vpermi2vard256_mask", + "llvm.x86.avx512.vpermi2var.d.128" => "__builtin_ia32_vpermi2vard128_mask", + "llvm.x86.avx512.vpermi2var.q.512" => "__builtin_ia32_vpermi2varq512_mask", + "llvm.x86.avx512.vpermi2var.q.256" => "__builtin_ia32_vpermi2varq256_mask", + "llvm.x86.avx512.vpermi2var.q.128" => "__builtin_ia32_vpermi2varq128_mask", + "llvm.x86.avx512.vpermi2var.ps.512" => "__builtin_ia32_vpermi2varps512_mask", + "llvm.x86.avx512.vpermi2var.ps.256" => "__builtin_ia32_vpermi2varps256_mask", + "llvm.x86.avx512.vpermi2var.ps.128" => "__builtin_ia32_vpermi2varps128_mask", + "llvm.x86.avx512.vpermi2var.pd.512" => "__builtin_ia32_vpermi2varpd512_mask", + "llvm.x86.avx512.vpermi2var.pd.256" => "__builtin_ia32_vpermi2varpd256_mask", + "llvm.x86.avx512.vpermi2var.pd.128" => "__builtin_ia32_vpermi2varpd128_mask", + "llvm.x86.avx512.mask.add.ss.round" => "__builtin_ia32_addss_mask_round", + "llvm.x86.avx512.mask.add.sd.round" => "__builtin_ia32_addsd_mask_round", + "llvm.x86.avx512.mask.sub.ss.round" => "__builtin_ia32_subss_mask_round", + "llvm.x86.avx512.mask.sub.sd.round" => "__builtin_ia32_subsd_mask_round", + "llvm.x86.avx512.mask.mul.ss.round" => "__builtin_ia32_mulss_mask_round", + "llvm.x86.avx512.mask.mul.sd.round" => "__builtin_ia32_mulsd_mask_round", + "llvm.x86.avx512.mask.div.ss.round" => "__builtin_ia32_divss_mask_round", + "llvm.x86.avx512.mask.div.sd.round" => "__builtin_ia32_divsd_mask_round", + "llvm.x86.avx512.mask.cvtss2sd.round" => "__builtin_ia32_cvtss2sd_mask_round", + "llvm.x86.avx512.mask.cvtsd2ss.round" => "__builtin_ia32_cvtsd2ss_mask_round", + "llvm.x86.avx512.mask.range.ss" => "__builtin_ia32_rangess128_mask_round", + "llvm.x86.avx512.mask.range.sd" => "__builtin_ia32_rangesd128_mask_round", + "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_mask_round", + "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_mask_round", + "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_mask_round", + "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_mask_round", + "llvm.x86.avx512fp16.mask.add.sh.round" => "__builtin_ia32_addsh_mask_round", + "llvm.x86.avx512fp16.mask.div.sh.round" => "__builtin_ia32_divsh_mask_round", + "llvm.x86.avx512fp16.mask.getmant.sh" => "__builtin_ia32_getmantsh_mask_round", + "llvm.x86.avx512fp16.mask.max.sh.round" => "__builtin_ia32_maxsh_mask_round", + "llvm.x86.avx512fp16.mask.min.sh.round" => "__builtin_ia32_minsh_mask_round", + "llvm.x86.avx512fp16.mask.mul.sh.round" => "__builtin_ia32_mulsh_mask_round", + "llvm.x86.avx512fp16.mask.rndscale.sh" => "__builtin_ia32_rndscalesh_mask_round", + "llvm.x86.avx512fp16.mask.scalef.sh" => "__builtin_ia32_scalefsh_mask_round", + "llvm.x86.avx512fp16.mask.sub.sh.round" => "__builtin_ia32_subsh_mask_round", + "llvm.x86.avx512fp16.mask.vcvtsd2sh.round" => "__builtin_ia32_vcvtsd2sh_mask_round", + "llvm.x86.avx512fp16.mask.vcvtsh2sd.round" => "__builtin_ia32_vcvtsh2sd_mask_round", + "llvm.x86.avx512fp16.mask.vcvtsh2ss.round" => "__builtin_ia32_vcvtsh2ss_mask_round", + "llvm.x86.avx512fp16.mask.vcvtss2sh.round" => "__builtin_ia32_vcvtss2sh_mask_round", + "llvm.x86.aesni.aesenc.256" => "__builtin_ia32_vaesenc_v32qi", + "llvm.x86.aesni.aesenclast.256" => "__builtin_ia32_vaesenclast_v32qi", + "llvm.x86.aesni.aesdec.256" => "__builtin_ia32_vaesdec_v32qi", + "llvm.x86.aesni.aesdeclast.256" => "__builtin_ia32_vaesdeclast_v32qi", + "llvm.x86.aesni.aesenc.512" => "__builtin_ia32_vaesenc_v64qi", + "llvm.x86.aesni.aesenclast.512" => "__builtin_ia32_vaesenclast_v64qi", + "llvm.x86.aesni.aesdec.512" => "__builtin_ia32_vaesdec_v64qi", + "llvm.x86.aesni.aesdeclast.512" => "__builtin_ia32_vaesdeclast_v64qi", + "llvm.x86.avx512bf16.cvtne2ps2bf16.128" => "__builtin_ia32_cvtne2ps2bf16_v8bf", + "llvm.x86.avx512bf16.cvtne2ps2bf16.256" => "__builtin_ia32_cvtne2ps2bf16_v16bf", + "llvm.x86.avx512bf16.cvtne2ps2bf16.512" => "__builtin_ia32_cvtne2ps2bf16_v32bf", + "llvm.x86.avx512bf16.cvtneps2bf16.256" => "__builtin_ia32_cvtneps2bf16_v8sf", + "llvm.x86.avx512bf16.cvtneps2bf16.512" => "__builtin_ia32_cvtneps2bf16_v16sf", + "llvm.x86.avx512bf16.dpbf16ps.128" => "__builtin_ia32_dpbf16ps_v4sf", + "llvm.x86.avx512bf16.dpbf16ps.256" => "__builtin_ia32_dpbf16ps_v8sf", + "llvm.x86.avx512bf16.dpbf16ps.512" => "__builtin_ia32_dpbf16ps_v16sf", + "llvm.x86.pclmulqdq.512" => "__builtin_ia32_vpclmulqdq_v8di", + "llvm.x86.pclmulqdq.256" => "__builtin_ia32_vpclmulqdq_v4di", + "llvm.x86.avx512.pmulhu.w.512" => "__builtin_ia32_pmulhuw512_mask", + "llvm.x86.avx512.pmulh.w.512" => "__builtin_ia32_pmulhw512_mask", + "llvm.x86.avx512.pmul.hr.sw.512" => "__builtin_ia32_pmulhrsw512_mask", + "llvm.x86.avx512.pmaddw.d.512" => "__builtin_ia32_pmaddwd512_mask", + "llvm.x86.avx512.pmaddubs.w.512" => "__builtin_ia32_pmaddubsw512_mask", + "llvm.x86.avx512.packssdw.512" => "__builtin_ia32_packssdw512_mask", + "llvm.x86.avx512.packsswb.512" => "__builtin_ia32_packsswb512_mask", + "llvm.x86.avx512.packusdw.512" => "__builtin_ia32_packusdw512_mask", + "llvm.x86.avx512.packuswb.512" => "__builtin_ia32_packuswb512_mask", + "llvm.x86.avx512.pavg.w.512" => "__builtin_ia32_pavgw512_mask", + "llvm.x86.avx512.pavg.b.512" => "__builtin_ia32_pavgb512_mask", + "llvm.x86.avx512.psll.w.512" => "__builtin_ia32_psllw512_mask", + "llvm.x86.avx512.pslli.w.512" => "__builtin_ia32_psllwi512_mask", + "llvm.x86.avx512.psllv.w.512" => "__builtin_ia32_psllv32hi_mask", + "llvm.x86.avx512.psllv.w.256" => "__builtin_ia32_psllv16hi_mask", + "llvm.x86.avx512.psllv.w.128" => "__builtin_ia32_psllv8hi_mask", + "llvm.x86.avx512.psrl.w.512" => "__builtin_ia32_psrlw512_mask", + "llvm.x86.avx512.psrli.w.512" => "__builtin_ia32_psrlwi512_mask", + "llvm.x86.avx512.psrlv.w.512" => "__builtin_ia32_psrlv32hi_mask", + "llvm.x86.avx512.psrlv.w.256" => "__builtin_ia32_psrlv16hi_mask", + "llvm.x86.avx512.psrlv.w.128" => "__builtin_ia32_psrlv8hi_mask", + "llvm.x86.avx512.psra.w.512" => "__builtin_ia32_psraw512_mask", + "llvm.x86.avx512.psrai.w.512" => "__builtin_ia32_psrawi512_mask", + "llvm.x86.avx512.psrav.w.512" => "__builtin_ia32_psrav32hi_mask", + "llvm.x86.avx512.psrav.w.256" => "__builtin_ia32_psrav16hi_mask", + "llvm.x86.avx512.psrav.w.128" => "__builtin_ia32_psrav8hi_mask", + "llvm.x86.avx512.vpermi2var.hi.512" => "__builtin_ia32_vpermt2varhi512_mask", + "llvm.x86.avx512.vpermi2var.hi.256" => "__builtin_ia32_vpermt2varhi256_mask", + "llvm.x86.avx512.vpermi2var.hi.128" => "__builtin_ia32_vpermt2varhi128_mask", + "llvm.x86.avx512.permvar.hi.512" => "__builtin_ia32_permvarhi512_mask", + "llvm.x86.avx512.permvar.hi.256" => "__builtin_ia32_permvarhi256_mask", + "llvm.x86.avx512.permvar.hi.128" => "__builtin_ia32_permvarhi128_mask", + "llvm.x86.avx512.pshuf.b.512" => "__builtin_ia32_pshufb512_mask", + "llvm.x86.avx512.dbpsadbw.512" => "__builtin_ia32_dbpsadbw512_mask", + "llvm.x86.avx512.dbpsadbw.256" => "__builtin_ia32_dbpsadbw256_mask", + "llvm.x86.avx512.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128_mask", + "llvm.x86.avx512.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_mask", + "llvm.x86.avx512.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_mask", + "llvm.x86.avx512.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256_mask", + "llvm.x86.avx512.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256_mask", + "llvm.x86.avx512.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128_mask", + "llvm.x86.avx512.vpdpwssd.512" => "__builtin_ia32_vpdpwssd_v16si", + "llvm.x86.avx512.vpdpwssd.256" => "__builtin_ia32_vpdpwssd_v8si", + "llvm.x86.avx512.vpdpwssd.128" => "__builtin_ia32_vpdpwssd_v4si", + "llvm.x86.avx512.vpdpwssds.512" => "__builtin_ia32_vpdpwssds_v16si", + "llvm.x86.avx512.vpdpwssds.256" => "__builtin_ia32_vpdpwssds_v8si", + "llvm.x86.avx512.vpdpwssds.128" => "__builtin_ia32_vpdpwssds_v4si", + "llvm.x86.avx512.vpdpbusd.512" => "__builtin_ia32_vpdpbusd_v16si", + "llvm.x86.avx512.vpdpbusd.256" => "__builtin_ia32_vpdpbusd_v8si", + "llvm.x86.avx512.vpdpbusd.128" => "__builtin_ia32_vpdpbusd_v4si", + "llvm.x86.avx512.vpdpbusds.512" => "__builtin_ia32_vpdpbusds_v16si", + "llvm.x86.avx512.vpdpbusds.256" => "__builtin_ia32_vpdpbusds_v8si", + "llvm.x86.avx512.vpdpbusds.128" => "__builtin_ia32_vpdpbusds_v4si", + // NOTE: this file is generated by https://github.com/GuillaumeGomez/llvmint/blob/master/generate_list.py _ => include!("archs.rs"), }; diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index 49be6c649e6..2590e0e3af4 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -1,6 +1,9 @@ pub mod llvm; mod simd; +#[cfg(feature="master")] +use std::iter; + use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp, FunctionType}; use rustc_codegen_ssa::MemFlags; use rustc_codegen_ssa::base::wants_msvc_seh; @@ -8,15 +11,23 @@ use rustc_codegen_ssa::common::IntPredicate; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods}; +#[cfg(feature="master")] +use rustc_codegen_ssa::traits::{DerivedTypeMethods, MiscMethods}; use rustc_middle::bug; use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::ty::layout::LayoutOf; +#[cfg(feature="master")] +use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use rustc_span::{Span, Symbol, symbol::kw, sym}; use rustc_target::abi::HasDataLayout; use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; use rustc_target::spec::PanicStrategy; +#[cfg(feature="master")] +use rustc_target::spec::abi::Abi; use crate::abi::GccType; +#[cfg(feature="master")] +use crate::abi::FnAbiGccExt; use crate::builder::Builder; use crate::common::{SignType, TypeReflection}; use crate::context::CodegenCx; @@ -91,7 +102,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let name = tcx.item_name(def_id); let name_str = name.as_str(); - let llret_ty = self.layout_of(ret_ty).gcc_type(self, true); + let llret_ty = self.layout_of(ret_ty).gcc_type(self); let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); let simple = get_simple_intrinsic(self, name); @@ -404,7 +415,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { /// Gets the LLVM type for a place of the original Rust type of /// this argument/return, i.e., the result of `type_of::type_of`. fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { - self.layout.gcc_type(cx, true) + self.layout.gcc_type(cx) } /// Stores a direct/indirect value described by this ArgAbi into a @@ -1120,10 +1131,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } } -fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) { - // NOTE: the `|| true` here is to use the panic=abort strategy with panic=unwind too - if bx.sess().panic_strategy() == PanicStrategy::Abort || true { - // TODO(bjorn3): Properly implement unwinding and remove the `|| true` once this is done. +fn try_intrinsic<'a, 'b, 'gcc, 'tcx>(bx: &'b mut Builder<'a, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) { + if bx.sess().panic_strategy() == PanicStrategy::Abort { bx.call(bx.type_void(), None, try_func, &[data], None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. @@ -1134,6 +1143,141 @@ fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue< unimplemented!(); } else { + #[cfg(feature="master")] + codegen_gnu_try(bx, try_func, data, _catch_func, dest); + #[cfg(not(feature="master"))] unimplemented!(); } } + +// Definition of the standard `try` function for Rust using the GNU-like model +// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke` +// instructions). +// +// This codegen is a little surprising because we always call a shim +// function instead of inlining the call to `invoke` manually here. This is done +// because in LLVM we're only allowed to have one personality per function +// definition. The call to the `try` intrinsic is being inlined into the +// function calling it, and that function may already have other personality +// functions in play. By calling a shim we're guaranteed that our shim will have +// the right personality function. +#[cfg(feature="master")] +fn codegen_gnu_try<'gcc>(bx: &mut Builder<'_, 'gcc, '_>, try_func: RValue<'gcc>, data: RValue<'gcc>, catch_func: RValue<'gcc>, dest: RValue<'gcc>) { + let cx: &CodegenCx<'gcc, '_> = bx.cx; + let (llty, func) = get_rust_try_fn(cx, &mut |mut bx| { + // Codegens the shims described above: + // + // bx: + // invoke %try_func(%data) normal %normal unwind %catch + // + // normal: + // ret 0 + // + // catch: + // (%ptr, _) = landingpad + // call %catch_func(%data, %ptr) + // ret 1 + let then = bx.append_sibling_block("then"); + let catch = bx.append_sibling_block("catch"); + + let func = bx.current_func(); + let try_func = func.get_param(0).to_rvalue(); + let data = func.get_param(1).to_rvalue(); + let catch_func = func.get_param(2).to_rvalue(); + let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + + let current_block = bx.block.clone(); + + bx.switch_to_block(then); + bx.ret(bx.const_i32(0)); + + // Type indicator for the exception being thrown. + // + // The value is a pointer to the exception object + // being thrown. + bx.switch_to_block(catch); + bx.set_personality_fn(bx.eh_personality()); + + let eh_pointer_builtin = bx.cx.context.get_target_builtin_function("__builtin_eh_pointer"); + let zero = bx.cx.context.new_rvalue_zero(bx.int_type); + let ptr = bx.cx.context.new_call(None, eh_pointer_builtin, &[zero]); + let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + bx.call(catch_ty, None, catch_func, &[data, ptr], None); + bx.ret(bx.const_i32(1)); + + // NOTE: the blocks must be filled before adding the try/catch, otherwise gcc will not + // generate a try/catch. + // FIXME(antoyo): add a check in the libgccjit API to prevent this. + bx.switch_to_block(current_block); + bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None); + }); + + let func = unsafe { std::mem::transmute(func) }; + + // Note that no invoke is used here because by definition this function + // can't panic (that's what it's catching). + let ret = bx.call(llty, None, func, &[try_func, data, catch_func], None); + let i32_align = bx.tcx().data_layout.i32_align.abi; + bx.store(ret, dest, i32_align); +} + + +// Helper function used to get a handle to the `__rust_try` function used to +// catch exceptions. +// +// This function is only generated once and is then cached. +#[cfg(feature="master")] +fn get_rust_try_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>)) -> (Type<'gcc>, Function<'gcc>) { + if let Some(llfn) = cx.rust_try_fn.get() { + return llfn; + } + + // Define the type up front for the signature of the rust_try function. + let tcx = cx.tcx; + let i8p = tcx.mk_mut_ptr(tcx.types.i8); + // `unsafe fn(*mut i8) -> ()` + let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig( + iter::once(i8p), + tcx.mk_unit(), + false, + rustc_hir::Unsafety::Unsafe, + Abi::Rust, + ))); + // `unsafe fn(*mut i8, *mut i8) -> ()` + let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig( + [i8p, i8p].iter().cloned(), + tcx.mk_unit(), + false, + rustc_hir::Unsafety::Unsafe, + Abi::Rust, + ))); + // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32` + let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig( + [try_fn_ty, i8p, catch_fn_ty], + tcx.types.i32, + false, + rustc_hir::Unsafety::Unsafe, + Abi::Rust, + )); + let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen); + cx.rust_try_fn.set(Some(rust_try)); + rust_try +} + +// Helper function to give a Block to a closure to codegen a shim function. +// This is currently primarily used for the `try` intrinsic functions above. +#[cfg(feature="master")] +fn gen_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, name: &str, rust_fn_sig: ty::PolyFnSig<'tcx>, codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>)) -> (Type<'gcc>, Function<'gcc>) { + let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty()); + let (typ, _, _, _) = fn_abi.gcc_type(cx); + // FIXME(eddyb) find a nicer way to do this. + cx.linkage.set(FunctionType::Internal); + let func = cx.declare_fn(name, fn_abi); + let func_val = unsafe { std::mem::transmute(func) }; + cx.set_frame_pointer_type(func_val); + cx.apply_target_cpu_attr(func_val); + let block = Builder::append_block(cx, func_val, "entry-block"); + let bx = Builder::build(cx, block); + codegen(bx); + (typ, func) +} diff --git a/src/intrinsic/simd.rs b/src/intrinsic/simd.rs index cb8168b4071..b59c3a64f57 100644 --- a/src/intrinsic/simd.rs +++ b/src/intrinsic/simd.rs @@ -1,8 +1,13 @@ -use std::cmp::Ordering; +#[cfg(feature="master")] +use gccjit::{ComparisonOp, UnaryOp}; +use gccjit::ToRValue; +use gccjit::{BinaryOp, RValue, Type}; -use gccjit::{BinaryOp, RValue, ToRValue, Type}; use rustc_codegen_ssa::base::compare_simd_types; -use rustc_codegen_ssa::common::TypeKind; +use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; +#[cfg(feature="master")] +use rustc_codegen_ssa::errors::ExpectedPointerMutability; +use rustc_codegen_ssa::errors::InvalidMonomorphization; use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods}; @@ -14,18 +19,21 @@ use rustc_span::{sym, Span, Symbol}; use rustc_target::abi::Align; use crate::builder::Builder; +#[cfg(feature="master")] +use crate::context::CodegenCx; +#[cfg(feature="master")] +use crate::errors::{InvalidMonomorphizationExpectedSignedUnsigned, InvalidMonomorphizationInsertedType}; use crate::errors::{ - InvalidMonomorphizationExpectedSignedUnsigned, InvalidMonomorphizationExpectedSimd, - InvalidMonomorphizationInsertedType, InvalidMonomorphizationInvalidBitmask, + InvalidMonomorphizationExpectedSimd, + InvalidMonomorphizationInvalidBitmask, InvalidMonomorphizationInvalidFloatVector, InvalidMonomorphizationMaskType, InvalidMonomorphizationMismatchedLengths, InvalidMonomorphizationNotFloat, InvalidMonomorphizationReturnElement, InvalidMonomorphizationReturnIntegerType, InvalidMonomorphizationReturnLength, InvalidMonomorphizationReturnLengthInputType, InvalidMonomorphizationReturnType, InvalidMonomorphizationSimdShuffle, - InvalidMonomorphizationUnrecognized, InvalidMonomorphizationUnsupportedCast, - InvalidMonomorphizationUnsupportedElement, InvalidMonomorphizationUnsupportedOperation, + InvalidMonomorphizationUnrecognized, InvalidMonomorphizationUnsupportedElement, + InvalidMonomorphizationUnsupportedOperation, }; -use crate::intrinsic; pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( bx: &mut Builder<'a, 'gcc, 'tcx>, @@ -105,14 +113,19 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let arg1_vector_type = arg1_type.unqualified().dyncast_vector().expect("vector type"); let arg1_element_type = arg1_vector_type.get_element_type(); + // NOTE: since the arguments can be vectors of floats, make sure the mask is a vector of + // integer. + let mask_element_type = bx.type_ix(arg1_element_type.get_size() as u64 * 8); + let vector_mask_type = bx.context.new_vector_type(mask_element_type, arg1_vector_type.get_num_units() as u64); + let mut elements = vec![]; let one = bx.context.new_rvalue_one(mask.get_type()); for _ in 0..len { - let element = bx.context.new_cast(None, mask & one, arg1_element_type); + let element = bx.context.new_cast(None, mask & one, mask_element_type); elements.push(element); mask = mask >> one; } - let vector_mask = bx.context.new_rvalue_from_vector(None, arg1_type, &elements); + let vector_mask = bx.context.new_rvalue_from_vector(None, vector_mask_type, &elements); return Ok(bx.vector_select(vector_mask, arg1, args[2].immediate())); } @@ -210,48 +223,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let vector = args[0].immediate(); let index = args[1].immediate(); let value = args[2].immediate(); - // TODO(antoyo): use a recursive unqualified() here. - let vector_type = vector.get_type().unqualified().dyncast_vector().expect("vector type"); - let element_type = vector_type.get_element_type(); - // NOTE: we cannot cast to an array and assign to its element here because the value might - // not be an l-value. So, call a builtin to set the element. - // TODO(antoyo): perhaps we could create a new vector or maybe there's a GIMPLE instruction for that? - // TODO(antoyo): don't use target specific builtins here. - let func_name = match in_len { - 2 => { - if element_type == bx.i64_type { - "__builtin_ia32_vec_set_v2di" - } else { - unimplemented!(); - } - } - 4 => { - if element_type == bx.i32_type { - "__builtin_ia32_vec_set_v4si" - } else { - unimplemented!(); - } - } - 8 => { - if element_type == bx.i16_type { - "__builtin_ia32_vec_set_v8hi" - } else { - unimplemented!(); - } - } - _ => unimplemented!("Len: {}", in_len), - }; - let builtin = bx.context.get_target_builtin_function(func_name); - let param1_type = builtin.get_param(0).to_rvalue().get_type(); - // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. - let vector = bx.cx.bitcast_if_needed(vector, param1_type); - let result = bx.context.new_call( - None, - builtin, - &[vector, value, bx.context.new_cast(None, index, bx.int_type)], - ); - // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. - return Ok(bx.context.new_bitcast(None, result, vector.get_type())); + let variable = bx.current_func().new_local(None, vector.get_type(), "new_vector"); + bx.llbb().add_assignment(None, variable, vector); + let lvalue = bx.context.new_vector_access(None, variable.to_rvalue(), index); + // TODO(antoyo): if simd_insert is constant, use BIT_REF. + bx.llbb().add_assignment(None, lvalue, value); + return Ok(variable.to_rvalue()); } #[cfg(feature = "master")] @@ -280,7 +257,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate())); } - if name == sym::simd_cast { + #[cfg(feature="master")] + if name == sym::simd_cast || name == sym::simd_as { require_simd!(ret_ty, "return"); let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); require!( @@ -301,125 +279,40 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( enum Style { Float, - Int(/* is signed? */ bool), + Int, Unsupported, } - let (in_style, in_width) = match in_elem.kind() { - // vectors of pointer-sized integers should've been - // disallowed before here, so this unwrap is safe. - ty::Int(i) => ( - Style::Int(true), - i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Uint(u) => ( - Style::Int(false), - u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Float(f) => (Style::Float, f.bit_width()), - _ => (Style::Unsupported, 0), - }; - let (out_style, out_width) = match out_elem.kind() { - ty::Int(i) => ( - Style::Int(true), - i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Uint(u) => ( - Style::Int(false), - u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Float(f) => (Style::Float, f.bit_width()), - _ => (Style::Unsupported, 0), - }; - - let extend = |in_type, out_type| { - let vector_type = bx.context.new_vector_type(out_type, 8); - let vector = args[0].immediate(); - let array_type = bx.context.new_array_type(None, in_type, 8); - // TODO(antoyo): switch to using new_vector_access or __builtin_convertvector for vector casting. - let array = bx.context.new_bitcast(None, vector, array_type); - - let cast_vec_element = |index| { - let index = bx.context.new_rvalue_from_int(bx.int_type, index); - bx.context.new_cast( - None, - bx.context.new_array_access(None, array, index).to_rvalue(), - out_type, - ) + let in_style = + match in_elem.kind() { + ty::Int(_) | ty::Uint(_) => Style::Int, + ty::Float(_) => Style::Float, + _ => Style::Unsupported, }; - bx.context.new_rvalue_from_vector( - None, - vector_type, - &[ - cast_vec_element(0), - cast_vec_element(1), - cast_vec_element(2), - cast_vec_element(3), - cast_vec_element(4), - cast_vec_element(5), - cast_vec_element(6), - cast_vec_element(7), - ], - ) - }; + let out_style = + match out_elem.kind() { + ty::Int(_) | ty::Uint(_) => Style::Int, + ty::Float(_) => Style::Float, + _ => Style::Unsupported, + }; match (in_style, out_style) { - (Style::Int(in_is_signed), Style::Int(_)) => { - return Ok(match in_width.cmp(&out_width) { - Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty), - Ordering::Equal => args[0].immediate(), - Ordering::Less => { - if in_is_signed { - match (in_width, out_width) { - // FIXME(antoyo): the function _mm_cvtepi8_epi16 should directly - // call an intrinsic equivalent to __builtin_ia32_pmovsxbw128 so that - // we can generate a call to it. - (8, 16) => extend(bx.i8_type, bx.i16_type), - (8, 32) => extend(bx.i8_type, bx.i32_type), - (8, 64) => extend(bx.i8_type, bx.i64_type), - (16, 32) => extend(bx.i16_type, bx.i32_type), - (32, 64) => extend(bx.i32_type, bx.i64_type), - (16, 64) => extend(bx.i16_type, bx.i64_type), - _ => unimplemented!("in: {}, out: {}", in_width, out_width), - } - } else { - match (in_width, out_width) { - (8, 16) => extend(bx.u8_type, bx.u16_type), - (8, 32) => extend(bx.u8_type, bx.u32_type), - (8, 64) => extend(bx.u8_type, bx.u64_type), - (16, 32) => extend(bx.u16_type, bx.u32_type), - (16, 64) => extend(bx.u16_type, bx.u64_type), - (32, 64) => extend(bx.u32_type, bx.u64_type), - _ => unimplemented!("in: {}, out: {}", in_width, out_width), - } - } + (Style::Unsupported, Style::Unsupported) => { + require!( + false, + InvalidMonomorphization::UnsupportedCast { + span, + name, + in_ty, + in_elem, + ret_ty, + out_elem } - }); - } - (Style::Int(_), Style::Float) => { - // TODO: add support for internal functions in libgccjit to get access to IFN_VEC_CONVERT which is - // doing like __builtin_convertvector? - // Or maybe provide convert_vector as an API since it might not easy to get the - // types of internal functions. - unimplemented!(); - } - (Style::Float, Style::Int(_)) => { - unimplemented!(); - } - (Style::Float, Style::Float) => { - unimplemented!(); - } - _ => { /* Unsupported. Fallthrough. */ } + ); + }, + _ => return Ok(bx.context.convert_vector(None, args[0].immediate(), llret_ty)), } - return_error!(InvalidMonomorphizationUnsupportedCast { - span, - name, - in_ty, - in_elem, - ret_ty, - out_elem - }); } macro_rules! arith_binary { @@ -436,6 +329,71 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( } } + if name == sym::simd_bitmask { + // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a + // vector mask and returns the most significant bit (MSB) of each lane in the form + // of either: + // * an unsigned integer + // * an array of `u8` + // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits. + // + // The bit order of the result depends on the byte endianness, LSB-first for little + // endian and MSB-first for big endian. + + let vector = args[0].immediate(); + let vector_type = vector.get_type().dyncast_vector().expect("vector type"); + let elem_type = vector_type.get_element_type(); + + let expected_int_bits = in_len.max(8); + let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64); + + // FIXME(antoyo): that's not going to work for masks bigger than 128 bits. + let result_type = bx.type_ix(expected_int_bits); + let mut result = bx.context.new_rvalue_zero(result_type); + + let elem_size = elem_type.get_size() * 8; + let sign_shift = bx.context.new_rvalue_from_int(elem_type, elem_size as i32 - 1); + let one = bx.context.new_rvalue_one(elem_type); + + let mut shift = 0; + for i in 0..in_len { + let elem = bx.extract_element(vector, bx.context.new_rvalue_from_int(bx.int_type, i as i32)); + let shifted = elem >> sign_shift; + let masked = shifted & one; + result = result | (bx.context.new_cast(None, masked, result_type) << bx.context.new_rvalue_from_int(result_type, shift)); + shift += 1; + } + + match ret_ty.kind() { + ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => { + // Zero-extend iN to the bitmask type: + return Ok(result); + } + ty::Array(elem, len) + if matches!(elem.kind(), ty::Uint(ty::UintTy::U8)) + && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all()) + == Some(expected_bytes) => + { + // Zero-extend iN to the array length: + let ze = bx.zext(result, bx.type_ix(expected_bytes * 8)); + + // Convert the integer to a byte array + let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE); + bx.store(ze, ptr, Align::ONE); + let array_ty = bx.type_array(bx.type_i8(), expected_bytes); + let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty)); + return Ok(bx.load(array_ty, ptr, Align::ONE)); + } + _ => return_error!(InvalidMonomorphization::CannotReturn { + span, + name, + ret_ty, + expected_int_bits, + expected_bytes + }), + } + } + fn simd_simple_float_intrinsic<'gcc, 'tcx>( name: Symbol, in_elem: Ty<'_>, @@ -451,55 +409,66 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return Err(()); }}; } - let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() { - let elem_ty = bx.cx.type_float_from_ty(*f); - match f.bit_width() { - 32 => ("f32", elem_ty), - 64 => ("f64", elem_ty), - _ => { - return_error!(InvalidMonomorphizationInvalidFloatVector { - span, - name, - elem_ty: f.name_str(), - vec_ty: in_ty - }); + let (elem_ty_str, elem_ty) = + if let ty::Float(f) = in_elem.kind() { + let elem_ty = bx.cx.type_float_from_ty(*f); + match f.bit_width() { + 32 => ("f", elem_ty), + 64 => ("", elem_ty), + _ => { + return_error!(InvalidMonomorphizationInvalidFloatVector { span, name, elem_ty: f.name_str(), vec_ty: in_ty }); + } } } - } else { - return_error!(InvalidMonomorphizationNotFloat { span, name, ty: in_ty }); - }; + else { + return_error!(InvalidMonomorphizationNotFloat { span, name, ty: in_ty }); + }; let vec_ty = bx.cx.type_vector(elem_ty, in_len); - let (intr_name, fn_ty) = match name { - sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)), // TODO(antoyo): pand with 170141183420855150465331762880109871103 - sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)), - sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)), - sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)), - sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)), - _ => return_error!(InvalidMonomorphizationUnrecognized { span, name }), - }; - let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); - let function = intrinsic::llvm::intrinsic(llvm_name, &bx.cx); - let function: RValue<'gcc> = unsafe { std::mem::transmute(function) }; - let c = bx.call( - fn_ty, - None, - function, - &args.iter().map(|arg| arg.immediate()).collect::>(), - None, - ); + let intr_name = + match name { + sym::simd_ceil => "ceil", + sym::simd_fabs => "fabs", // TODO(antoyo): pand with 170141183420855150465331762880109871103 + sym::simd_fcos => "cos", + sym::simd_fexp2 => "exp2", + sym::simd_fexp => "exp", + sym::simd_flog10 => "log10", + sym::simd_flog2 => "log2", + sym::simd_flog => "log", + sym::simd_floor => "floor", + sym::simd_fma => "fma", + sym::simd_fpowi => "__builtin_powi", + sym::simd_fpow => "pow", + sym::simd_fsin => "sin", + sym::simd_fsqrt => "sqrt", + sym::simd_round => "round", + sym::simd_trunc => "trunc", + _ => return_error!(InvalidMonomorphizationUnrecognized { span, name }) + }; + let builtin_name = format!("{}{}", intr_name, elem_ty_str); + let funcs = bx.cx.functions.borrow(); + let function = funcs.get(&builtin_name).unwrap_or_else(|| panic!("unable to find builtin function {}", builtin_name)); + + // TODO(antoyo): add platform-specific behavior here for architectures that have these + // intrinsics as instructions (for instance, gpus) + let mut vector_elements = vec![]; + for i in 0..in_len { + let index = bx.context.new_rvalue_from_long(bx.ulong_type, i as i64); + // we have to treat fpowi specially, since fpowi's second argument is always an i32 + let arguments = if name == sym::simd_fpowi { + vec![ + bx.extract_element(args[0].immediate(), index).to_rvalue(), + args[1].immediate(), + ] + } else { + args.iter() + .map(|arg| bx.extract_element(arg.immediate(), index).to_rvalue()) + .collect() + }; + vector_elements.push(bx.context.new_call(None, *function, &arguments)); + } + let c = bx.context.new_rvalue_from_vector(None, vec_ty, &vector_elements); Ok(c) } @@ -525,6 +494,297 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args); } + #[cfg(feature="master")] + fn vector_ty<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, elem_ty: Ty<'tcx>, vec_len: u64) -> Type<'gcc> { + // FIXME: use cx.layout_of(ty).llvm_type() ? + let elem_ty = match *elem_ty.kind() { + ty::Int(v) => cx.type_int_from_ty(v), + ty::Uint(v) => cx.type_uint_from_ty(v), + ty::Float(v) => cx.type_float_from_ty(v), + _ => unreachable!(), + }; + cx.type_vector(elem_ty, vec_len) + } + + #[cfg(feature="master")] + fn gather<'a, 'gcc, 'tcx>(default: RValue<'gcc>, pointers: RValue<'gcc>, mask: RValue<'gcc>, pointer_count: usize, bx: &mut Builder<'a, 'gcc, 'tcx>, in_len: u64, underlying_ty: Ty<'tcx>, invert: bool) -> RValue<'gcc> { + let vector_type = + if pointer_count > 1 { + bx.context.new_vector_type(bx.usize_type, in_len) + } + else { + vector_ty(bx, underlying_ty, in_len) + }; + let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type(); + + let mut values = vec![]; + for i in 0..in_len { + let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64); + let int = bx.context.new_vector_access(None, pointers, index).to_rvalue(); + + let ptr_type = elem_type.make_pointer(); + let ptr = bx.context.new_bitcast(None, int, ptr_type); + let value = ptr.dereference(None).to_rvalue(); + values.push(value); + } + + let vector = bx.context.new_rvalue_from_vector(None, vector_type, &values); + + let mut mask_types = vec![]; + let mut mask_values = vec![]; + for i in 0..in_len { + let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64); + mask_types.push(bx.context.new_field(None, bx.i32_type, "m")); + let mask_value = bx.context.new_vector_access(None, mask, index).to_rvalue(); + let masked = bx.context.new_rvalue_from_int(bx.i32_type, in_len as i32) & mask_value; + let value = index + masked; + mask_values.push(value); + } + let mask_type = bx.context.new_struct_type(None, "mask_type", &mask_types); + let mask = bx.context.new_struct_constructor(None, mask_type.as_type(), None, &mask_values); + + if invert { + bx.shuffle_vector(vector, default, mask) + } + else { + bx.shuffle_vector(default, vector, mask) + } + } + + #[cfg(feature="master")] + if name == sym::simd_gather { + // simd_gather(values: , pointers: , + // mask: ) -> + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + + // All types must be simd vector types + require_simd!(in_ty, "first"); + require_simd!(arg_tys[1], "second"); + require_simd!(arg_tys[2], "third"); + require_simd!(ret_ty, "return"); + + // Of the same length: + let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); + require!( + in_len == out_len, + InvalidMonomorphization::SecondArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[1], + out_len + } + ); + require!( + in_len == out_len2, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[2], + out_len: out_len2 + } + ); + + // The return type must match the first argument type + require!( + ret_ty == in_ty, + InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty } + ); + + // This counts how many pointers + fn ptr_count(t: Ty<'_>) -> usize { + match t.kind() { + ty::RawPtr(p) => 1 + ptr_count(p.ty), + _ => 0, + } + } + + // Non-ptr type + fn non_ptr(t: Ty<'_>) -> Ty<'_> { + match t.kind() { + ty::RawPtr(p) => non_ptr(p.ty), + _ => t, + } + } + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); + let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (pointer_count, underlying_ty) = match element_ty1.kind() { + ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)), + _ => { + require!( + false, + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: element_ty1, + second_arg: arg_tys[1], + in_elem, + in_ty, + mutability: ExpectedPointerMutability::Not, + } + ); + unreachable!(); + } + }; + assert!(pointer_count > 0); + assert_eq!(pointer_count - 1, ptr_count(element_ty0)); + assert_eq!(underlying_ty, non_ptr(element_ty0)); + + // The element type of the third argument must be a signed integer type of any width: + let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); + match element_ty2.kind() { + ty::Int(_) => (), + _ => { + require!( + false, + InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: element_ty2, + third_arg: arg_tys[2] + } + ); + } + } + + return Ok(gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), pointer_count, bx, in_len, underlying_ty, false)); + } + + #[cfg(feature="master")] + if name == sym::simd_scatter { + // simd_scatter(values: , pointers: , + // mask: ) -> () + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + + // All types must be simd vector types + require_simd!(in_ty, "first"); + require_simd!(arg_tys[1], "second"); + require_simd!(arg_tys[2], "third"); + + // Of the same length: + let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); + require!( + in_len == element_len1, + InvalidMonomorphization::SecondArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[1], + out_len: element_len1 + } + ); + require!( + in_len == element_len2, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[2], + out_len: element_len2 + } + ); + + // This counts how many pointers + fn ptr_count(t: Ty<'_>) -> usize { + match t.kind() { + ty::RawPtr(p) => 1 + ptr_count(p.ty), + _ => 0, + } + } + + // Non-ptr type + fn non_ptr(t: Ty<'_>) -> Ty<'_> { + match t.kind() { + ty::RawPtr(p) => non_ptr(p.ty), + _ => t, + } + } + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); + let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); + let (pointer_count, underlying_ty) = match element_ty1.kind() { + ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => { + (ptr_count(element_ty1), non_ptr(element_ty1)) + } + _ => { + require!( + false, + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: element_ty1, + second_arg: arg_tys[1], + in_elem, + in_ty, + mutability: ExpectedPointerMutability::Mut, + } + ); + unreachable!(); + } + }; + assert!(pointer_count > 0); + assert_eq!(pointer_count - 1, ptr_count(element_ty0)); + assert_eq!(underlying_ty, non_ptr(element_ty0)); + + // The element type of the third argument must be a signed integer type of any width: + match element_ty2.kind() { + ty::Int(_) => (), + _ => { + require!( + false, + InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: element_ty2, + third_arg: arg_tys[2] + } + ); + } + } + + let result = gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), pointer_count, bx, in_len, underlying_ty, true); + + let pointers = args[1].immediate(); + + let vector_type = + if pointer_count > 1 { + bx.context.new_vector_type(bx.usize_type, in_len) + } + else { + vector_ty(bx, underlying_ty, in_len) + }; + let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type(); + + for i in 0..in_len { + let index = bx.context.new_rvalue_from_int(bx.int_type, i as i32); + let value = bx.context.new_vector_access(None, result, index); + + let int = bx.context.new_vector_access(None, pointers, index).to_rvalue(); + let ptr_type = elem_type.make_pointer(); + let ptr = bx.context.new_bitcast(None, int, ptr_type); + bx.llbb().add_assignment(None, ptr.dereference(None), value); + } + + return Ok(bx.context.new_rvalue_zero(bx.i32_type)); + } + arith_binary! { simd_add: Uint, Int => add, Float => fadd; simd_sub: Uint, Int => sub, Float => fsub; @@ -536,6 +796,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( simd_and: Uint, Int => and; simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors. simd_xor: Uint, Int => xor; + simd_fmin: Float => vector_fmin; + simd_fmax: Float => vector_fmax; } macro_rules! arith_unary { @@ -562,10 +824,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let rhs = args[1].immediate(); let is_add = name == sym::simd_saturating_add; let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _; - let (signed, elem_width, elem_ty) = match *in_elem.kind() { - ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)), - ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)), - _ => { + let (signed, elem_width, elem_ty) = + match *in_elem.kind() { + ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_int_from_ty(i)), + ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_uint_from_ty(i)), + _ => { return_error!(InvalidMonomorphizationExpectedSignedUnsigned { span, name, @@ -574,33 +837,78 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( }); } }; - let builtin_name = match (signed, is_add, in_len, elem_width) { - (true, true, 32, 8) => "__builtin_ia32_paddsb256", // TODO(antoyo): cast arguments to unsigned. - (false, true, 32, 8) => "__builtin_ia32_paddusb256", - (true, true, 16, 16) => "__builtin_ia32_paddsw256", - (false, true, 16, 16) => "__builtin_ia32_paddusw256", - (true, false, 16, 16) => "__builtin_ia32_psubsw256", - (false, false, 16, 16) => "__builtin_ia32_psubusw256", - (true, false, 32, 8) => "__builtin_ia32_psubsb256", - (false, false, 32, 8) => "__builtin_ia32_psubusb256", - _ => unimplemented!( - "signed: {}, is_add: {}, in_len: {}, elem_width: {}", - signed, - is_add, - in_len, - elem_width - ), - }; - let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64); - - let func = bx.context.get_target_builtin_function(builtin_name); - let param1_type = func.get_param(0).to_rvalue().get_type(); - let param2_type = func.get_param(1).to_rvalue().get_type(); - let lhs = bx.cx.bitcast_if_needed(lhs, param1_type); - let rhs = bx.cx.bitcast_if_needed(rhs, param2_type); - let result = bx.context.new_call(None, func, &[lhs, rhs]); - // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. - return Ok(bx.context.new_bitcast(None, result, vec_ty)); + + let result = + match (signed, is_add) { + (false, true) => { + let res = lhs + rhs; + let cmp = bx.context.new_comparison(None, ComparisonOp::LessThan, res, lhs); + res | cmp + }, + (true, true) => { + // Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition + // TODO(antoyo): improve using conditional operators if possible. + let arg_type = lhs.get_type(); + // TODO(antoyo): convert lhs and rhs to unsigned. + let sum = lhs + rhs; + let vector_type = arg_type.dyncast_vector().expect("vector type"); + let unit = vector_type.get_num_units(); + let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1); + let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]); + + let xor1 = lhs ^ rhs; + let xor2 = lhs ^ sum; + let and = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2; + let mask = and >> width; + + let one = bx.context.new_rvalue_one(elem_ty); + let ones = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]); + let shift1 = ones << width; + let shift2 = sum >> width; + let mask_min = shift1 ^ shift2; + + let and1 = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum; + let and2 = mask & mask_min; + + and1 + and2 + }, + (false, false) => { + let res = lhs - rhs; + let cmp = bx.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs); + res & cmp + }, + (true, false) => { + let arg_type = lhs.get_type(); + // TODO(antoyo): this uses the same algorithm from saturating add, but add the + // negative of the right operand. Find a proper subtraction algorithm. + let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs); + + // TODO(antoyo): convert lhs and rhs to unsigned. + let sum = lhs + rhs; + let vector_type = arg_type.dyncast_vector().expect("vector type"); + let unit = vector_type.get_num_units(); + let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1); + let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]); + + let xor1 = lhs ^ rhs; + let xor2 = lhs ^ sum; + let and = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2; + let mask = and >> width; + + let one = bx.context.new_rvalue_one(elem_ty); + let ones = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]); + let shift1 = ones << width; + let shift2 = sum >> width; + let mask_min = shift1 ^ shift2; + + let and1 = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum; + let and2 = mask & mask_min; + + and1 + and2 + } + }; + + return Ok(result); } macro_rules! arith_red { @@ -650,33 +958,50 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( add, 0.0 // TODO: Use this argument. ); - arith_red!(simd_reduce_mul_unordered: BinaryOp::Mult, vector_reduce_fmul_fast, false, mul, 1.0); + arith_red!( + simd_reduce_mul_unordered: BinaryOp::Mult, + vector_reduce_fmul_fast, + false, + mul, + 1.0 + ); + arith_red!( + simd_reduce_add_ordered: BinaryOp::Plus, + vector_reduce_fadd, + true, + add, + 0.0 + ); + arith_red!( + simd_reduce_mul_ordered: BinaryOp::Mult, + vector_reduce_fmul, + true, + mul, + 1.0 + ); + macro_rules! minmax_red { - ($name:ident: $reduction:ident) => { + ($name:ident: $int_red:ident, $float_red:ident) => { if name == sym::$name { require!( ret_ty == in_elem, InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } ); return match in_elem.kind() { - ty::Int(_) | ty::Uint(_) | ty::Float(_) => { - Ok(bx.$reduction(args[0].immediate())) - } - _ => return_error!(InvalidMonomorphizationUnsupportedElement { - span, - name, - in_ty, - elem_ty: in_elem, - ret_ty - }), + ty::Int(_) | ty::Uint(_) => Ok(bx.$int_red(args[0].immediate())), + ty::Float(_) => Ok(bx.$float_red(args[0].immediate())), + _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }), }; } }; } - minmax_red!(simd_reduce_min: vector_reduce_min); - minmax_red!(simd_reduce_max: vector_reduce_max); + minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin); + minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax); + // TODO(sadlerap): revisit these intrinsics to generate more optimal reductions + minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin); + minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax); macro_rules! bitwise_red { ($name:ident : $op:expr, $boolean:expr) => { @@ -699,15 +1024,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( }), } - // boolean reductions operate on vectors of i1s: - let i1 = bx.type_i1(); - let i1xn = bx.type_vector(i1, in_len as u64); - bx.trunc(args[0].immediate(), i1xn) + args[0].immediate() }; return match in_elem.kind() { ty::Int(_) | ty::Uint(_) => { let r = bx.vector_reduce_op(input, $op); - Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) }) + Ok(if !$boolean { r } else { bx.icmp(IntPredicate::IntNE, r, bx.context.new_rvalue_zero(r.get_type())) }) } _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, @@ -723,6 +1045,9 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( bitwise_red!(simd_reduce_and: BinaryOp::BitwiseAnd, false); bitwise_red!(simd_reduce_or: BinaryOp::BitwiseOr, false); + bitwise_red!(simd_reduce_xor: BinaryOp::BitwiseXor, false); + bitwise_red!(simd_reduce_all: BinaryOp::BitwiseAnd, true); + bitwise_red!(simd_reduce_any: BinaryOp::BitwiseOr, true); unimplemented!("simd {}", name); } diff --git a/src/lib.rs b/src/lib.rs index 44538b41528..1b7feb5f8a1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,7 @@ /* * TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?) * TODO(antoyo): support #[inline] attributes. - * TODO(antoyo): support LTO (gcc's equivalent to Thin LTO is enabled by -fwhopr: https://stackoverflow.com/questions/64954525/does-gcc-have-thin-lto). + * TODO(antoyo): support LTO (gcc's equivalent to Full LTO is -flto -flto-partition=one — https://documentation.suse.com/sbp/all/html/SBP-GCC-10/index.html). * * TODO(antoyo): remove the patches. */ @@ -23,6 +23,7 @@ extern crate rustc_apfloat; extern crate rustc_ast; +extern crate rustc_attr; extern crate rustc_codegen_ssa; extern crate rustc_data_structures; extern crate rustc_errors; @@ -43,6 +44,7 @@ mod abi; mod allocator; mod archive; mod asm; +mod attributes; mod back; mod base; mod builder; @@ -314,9 +316,12 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec { .filter(|_feature| { // TODO(antoyo): implement a way to get enabled feature in libgccjit. // Probably using the equivalent of __builtin_cpu_supports. + // TODO(antoyo): maybe use whatever outputs the following command: + // gcc -march=native -Q --help=target #[cfg(feature="master")] { - _feature.contains("sse") || _feature.contains("avx") + // NOTE: the CPU in the CI doesn't support sse4a, so disable it to make the stdarch tests pass in the CI. + (_feature.contains("sse") || _feature.contains("avx")) && !_feature.contains("avx512") && !_feature.contains("sse4a") } #[cfg(not(feature="master"))] { diff --git a/src/mono_item.rs b/src/mono_item.rs index a7c868354fb..c1f6340866c 100644 --- a/src/mono_item.rs +++ b/src/mono_item.rs @@ -1,38 +1,66 @@ +#[cfg(feature="master")] +use gccjit::{VarAttribute, FnAttribute}; use rustc_codegen_ssa::traits::PreDefineMethods; +use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::mono::{Linkage, Visibility}; use rustc_middle::ty::{self, Instance, TypeVisitableExt}; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf}; -use rustc_span::def_id::DefId; +use crate::attributes; use crate::base; use crate::context::CodegenCx; use crate::type_of::LayoutGccExt; impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { - fn predefine_static(&self, def_id: DefId, _linkage: Linkage, _visibility: Visibility, symbol_name: &str) { + #[cfg_attr(not(feature="master"), allow(unused_variables))] + fn predefine_static(&self, def_id: DefId, _linkage: Linkage, visibility: Visibility, symbol_name: &str) { let attrs = self.tcx.codegen_fn_attrs(def_id); let instance = Instance::mono(self.tcx, def_id); let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); - let gcc_type = self.layout_of(ty).gcc_type(self, true); + let gcc_type = self.layout_of(ty).gcc_type(self); let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section); + #[cfg(feature="master")] + global.add_attribute(VarAttribute::Visibility(base::visibility_to_gcc(visibility))); - // TODO(antoyo): set linkage and visibility. + // TODO(antoyo): set linkage. self.instances.borrow_mut().insert(instance, global); } - fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, _visibility: Visibility, symbol_name: &str) { + #[cfg_attr(not(feature="master"), allow(unused_variables))] + fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, visibility: Visibility, symbol_name: &str) { assert!(!instance.substs.needs_infer()); let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); self.linkage.set(base::linkage_to_gcc(linkage)); - let _decl = self.declare_fn(symbol_name, &fn_abi); + let decl = self.declare_fn(symbol_name, &fn_abi); //let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); + attributes::from_fn_attrs(self, decl, instance); + + // If we're compiling the compiler-builtins crate, e.g., the equivalent of + // compiler-rt, then we want to implicitly compile everything with hidden + // visibility as we're going to link this object all over the place but + // don't want the symbols to get exported. + if linkage != Linkage::Internal + && linkage != Linkage::Private + && self.tcx.is_compiler_builtins(LOCAL_CRATE) + { + #[cfg(feature="master")] + decl.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + } + else { + #[cfg(feature="master")] + decl.add_attribute(FnAttribute::Visibility(base::visibility_to_gcc(visibility))); + } + // TODO(antoyo): call set_link_section() to allow initializing argc/argv. // TODO(antoyo): set unique comdat. // TODO(antoyo): use inline attribute from there in linkage.set() above. + + self.functions.borrow_mut().insert(symbol_name.to_string(), decl); + self.function_instances.borrow_mut().insert(instance, unsafe { std::mem::transmute(decl) }); } } diff --git a/src/type_.rs b/src/type_.rs index 89a415cdb36..daa661f35c4 100644 --- a/src/type_.rs +++ b/src/type_.rs @@ -1,5 +1,3 @@ -use std::convert::TryInto; - use gccjit::{RValue, Struct, Type}; use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, TypeMembershipMethods}; use rustc_codegen_ssa::common::TypeKind; @@ -202,8 +200,9 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { value.get_type() } - fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> { - if let Some(struct_type) = ty.is_struct() { + fn type_array(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> { + // TODO: remove this as well? + /*if let Some(struct_type) = ty.is_struct() { if struct_type.get_field_count() == 0 { // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a // size of usize::MAX in test_binary_search, we workaround this by setting the size to @@ -211,14 +210,7 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { // FIXME(antoyo): fix gccjit API. len = 0; } - } - - // NOTE: see note above. Some other test uses usize::MAX. - if len == u64::MAX { - len = 0; - } - - let len: i32 = len.try_into().expect("array len"); + }*/ self.context.new_array_type(None, ty, len) } @@ -247,10 +239,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> { self.context.new_opaque_struct_type(None, name) } - - pub fn type_bool(&self) -> Type<'gcc> { - self.context.new_type::() - } } pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec>, bool) { @@ -273,7 +261,7 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout assert_eq!(offset.align_to(padding_align) + padding, target_offset); result.push(cx.type_padding_filler(padding, padding_align)); - result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME(antoyo): might need to check if the type is inside another, like Box. + result.push(field.gcc_type(cx)); offset = target_offset + field.size; prev_effective_align = effective_field_align; } diff --git a/src/type_of.rs b/src/type_of.rs index ea2ce765053..5df8c1a209d 100644 --- a/src/type_of.rs +++ b/src/type_of.rs @@ -6,7 +6,7 @@ use rustc_middle::bug; use rustc_middle::ty::{self, Ty, TypeVisitableExt}; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; use rustc_middle::ty::print::with_no_trimmed_paths; -use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; +use rustc_target::abi::{self, Abi, Align, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; use rustc_target::abi::call::{CastTarget, FnAbi, Reg}; use crate::abi::{FnAbiGccExt, GccType}; @@ -50,11 +50,25 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } -pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> { +impl<'a, 'tcx> CodegenCx<'a, 'tcx> { + pub fn align_of(&self, ty: Ty<'tcx>) -> Align { + self.layout_of(ty).align.abi + } +} + +fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> { match layout.abi { Abi::Scalar(_) => bug!("handled elsewhere"), Abi::Vector { ref element, count } => { let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO); + let element = + // NOTE: gcc doesn't allow pointer types in vectors. + if element.get_pointee().is_some() { + cx.usize_type + } + else { + element + }; return cx.context.new_vector_type(element, count); }, Abi::ScalarPair(..) => { @@ -114,7 +128,7 @@ pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLa }, } } - FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx, true), count), + FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx), count), FieldsShape::Arbitrary { .. } => match name { None => { @@ -133,7 +147,7 @@ pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLa pub trait LayoutGccExt<'tcx> { fn is_gcc_immediate(&self) -> bool; fn is_gcc_scalar_pair(&self) -> bool; - fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc>; + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>; fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>; @@ -168,8 +182,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { /// with the inner-most trailing unsized field using the "minimal unit" /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. - //TODO(antoyo): do we still need the set_fields parameter? - fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> { + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { if let Abi::Scalar(ref scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs // can be either fat or thin (data pointers of fat pointers). @@ -179,10 +192,10 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { let ty = match *self.ty.kind() { ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx, set_fields)) + cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx)) } ty::Adt(def, _) if def.is_box() => { - cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx, true)) + cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx)) } ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())), _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO), @@ -199,13 +212,6 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { }; let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned(); if let Some(ty) = cached_type { - let type_to_set_fields = cx.types_with_fields_to_set.borrow_mut().remove(&ty); - if let Some((struct_type, layout)) = type_to_set_fields { - // Since we might be trying to generate a type containing another type which is not - // completely generated yet, we deferred setting the fields until now. - let (fields, packed) = struct_fields(cx, layout); - cx.set_struct_body(struct_type, &fields, packed); - } return ty; } @@ -222,7 +228,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { if let Some(v) = variant_index { layout = layout.for_variant(cx, v); } - layout.gcc_type(cx, true) + layout.gcc_type(cx) } else { uncached_gcc_type(cx, *self, &mut defer) @@ -230,9 +236,9 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { cx.types.borrow_mut().insert((self.ty, variant_index), ty); - if let Some((ty, layout)) = defer { + if let Some((deferred_ty, layout)) = defer { let (fields, packed) = struct_fields(cx, layout); - cx.set_struct_body(ty, &fields, packed); + cx.set_struct_body(deferred_ty, &fields, packed); } ty @@ -244,7 +250,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { return cx.type_i1(); } } - self.gcc_type(cx, true) + self.gcc_type(cx) } fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> { @@ -273,7 +279,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { // pointee types, to avoid bitcasting every `OperandRef::deref`. match self.ty.kind() { ty::Ref(..) | ty::RawPtr(_) => { - return self.field(cx, index).gcc_type(cx, true); + return self.field(cx, index).gcc_type(cx); } // only wide pointer boxes are handled as pointers // thin pointer boxes with scalar allocators are handled by the general logic below @@ -343,7 +349,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> { - layout.gcc_type(self, true) + layout.gcc_type(self) } fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> { diff --git a/test.sh b/test.sh index c5ffb763673..6139892aefc 100755 --- a/test.sh +++ b/test.sh @@ -17,17 +17,20 @@ export LIBRARY_PATH="$GCC_PATH" flags= gcc_master_branch=1 channel="debug" -func=all +funcs=() build_only=0 +nb_parts=0 +current_part=0 while [[ $# -gt 0 ]]; do case $1 in --release) codegen_channel=release + channel="release" shift ;; --release-sysroot) - sysroot_channel=release + sysroot_channel="--release" shift ;; --no-default-features) @@ -40,43 +43,83 @@ while [[ $# -gt 0 ]]; do flags="$flags --features $1" shift ;; - --release) - channel="release" + "--test-rustc") + funcs+=(test_rustc) shift ;; - "--test-rustc") - func=test_rustc + "--test-successful-rustc") + funcs+=(test_successful_rustc) + shift + ;; + "--test-failing-rustc") + funcs+=(test_failing_rustc) shift ;; "--test-libcore") - func=test_libcore + funcs+=(test_libcore) shift ;; "--clean-ui-tests") - func=clean_ui_tests + funcs+=(clean_ui_tests) + shift + ;; + "--clean") + funcs+=(clean) shift ;; "--std-tests") - func=std_tests + funcs+=(std_tests) + shift + ;; + + "--asm-tests") + funcs+=(asm_tests) shift ;; "--extended-tests") - func=extended_sysroot_tests + funcs+=(extended_sysroot_tests) + shift + ;; + "--extended-rand-tests") + funcs+=(extended_rand_tests) + shift + ;; + "--extended-regex-example-tests") + funcs+=(extended_regex_example_tests) + shift + ;; + "--extended-regex-tests") + funcs+=(extended_regex_tests) + shift + ;; + + "--mini-tests") + funcs+=(mini_tests) shift ;; "--build-sysroot") - func=build_sysroot + funcs+=(build_sysroot) shift ;; "--build") build_only=1 shift ;; + "--nb-parts") + shift + nb_parts=$1 + shift + ;; + "--current-part") + shift + current_part=$1 + shift + ;; *) echo "Unknown option $1" exit 1 @@ -87,7 +130,6 @@ done if [[ $channel == "release" ]]; then export CHANNEL='release' CARGO_INCREMENTAL=1 cargo rustc --release $flags - shift else echo $LD_LIBRARY_PATH export CHANNEL='debug' @@ -95,6 +137,7 @@ else fi if (( $build_only == 1 )); then + echo "Since it's 'build-only', exiting..." exit fi @@ -119,7 +162,7 @@ function mini_tests() { function build_sysroot() { echo "[BUILD] sysroot" - time ./build_sysroot/build_sysroot.sh + time ./build_sysroot/build_sysroot.sh $sysroot_channel } function std_tests() { @@ -148,17 +191,57 @@ function std_tests() { $RUN_WRAPPER ./target/out/std_example --target $TARGET_TRIPLE echo "[AOT] subslice-patterns-const-eval" - $RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE + $RUSTC example/subslice-patterns-const-eval.rs --crate-type bin $TEST_FLAGS --target $TARGET_TRIPLE $RUN_WRAPPER ./target/out/subslice-patterns-const-eval echo "[AOT] track-caller-attribute" - $RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE + $RUSTC example/track-caller-attribute.rs --crate-type bin $TEST_FLAGS --target $TARGET_TRIPLE $RUN_WRAPPER ./target/out/track-caller-attribute echo "[BUILD] mod_bench" $RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE } +function setup_rustc() { + rust_toolchain=$(cat rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/') + + git clone https://github.com/rust-lang/rust.git || true + cd rust + git fetch + git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') + export RUSTFLAGS= + + rm config.toml || true + + cat > config.toml < res.txt diff -u res.txt examples/regexdna-output.txt + popd +} + +function extended_regex_tests() { + if (( $gcc_master_branch == 0 )); then + return + fi + pushd regex echo "[TEST] rust-lang/regex tests" + export CG_RUSTFLAGS="--cap-lints warn" # newer aho_corasick versions throw a deprecation warning ../cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q popd } -function test_rustc() { - echo - echo "[TEST] rust-lang/rust" - - rust_toolchain=$(cat rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/') - - git clone https://github.com/rust-lang/rust.git || true - cd rust - git fetch - git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') - export RUSTFLAGS= - - git apply ../rustc_patches/compile_test.patch || true +function extended_sysroot_tests() { + #pushd simple-raytracer + #echo "[BENCH COMPILE] ebobby/simple-raytracer" + #hyperfine --runs "${RUN_RUNS:-10}" --warmup 1 --prepare "cargo clean" \ + #"RUSTC=rustc RUSTFLAGS='' cargo build" \ + #"../cargo.sh build" - rm config.toml || true + #echo "[BENCH RUN] ebobby/simple-raytracer" + #cp ./target/debug/main ./raytracer_cg_gcc + #hyperfine --runs "${RUN_RUNS:-10}" ./raytracer_cg_llvm ./raytracer_cg_gcc + #popd - cat > config.toml < ui_tests + # To ensure it'll be always the same sub files, we sort the content. + sort ui_tests -o ui_tests + count=$((`wc -l < ui_tests` / $nb_parts)) + # We increment the number of tests by one because if this is an odd number, we would skip + # one test. + count=$((count + 1)) + split -d -l $count -a 1 ui_tests ui_tests.split + # Removing all tests. + find tests/ui -type f -name '*.rs' -not -path "*/auxiliary/*" -delete + # Putting back only the ones we want to test. + xargs -a "ui_tests.split$current_part" -d'\n' git checkout -- + fi echo "[TEST] rustc test suite" COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 tests/ui/ --rustc-args "$RUSTC_ARGS" } +function test_failing_rustc() { + test_rustc "1" +} + +function test_successful_rustc() { + test_rustc "0" +} + function clean_ui_tests() { - find rust/build/x86_64-unknown-linux-gnu/test/ui/ -name stamp -exec rm -rf {} \; + find rust/build/x86_64-unknown-linux-gnu/test/ui/ -name stamp -delete } function all() { @@ -283,9 +403,17 @@ function all() { mini_tests build_sysroot std_tests + #asm_tests test_libcore extended_sysroot_tests test_rustc } -$func +if [ ${#funcs[@]} -eq 0 ]; then + echo "No command passed, running '--all'..." + all +else + for t in ${funcs[@]}; do + $t + done +fi diff --git a/tests/lang_tests_common.rs b/tests/lang_tests_common.rs index 8e378177e24..06de26f7efc 100644 --- a/tests/lang_tests_common.rs +++ b/tests/lang_tests_common.rs @@ -46,11 +46,15 @@ pub fn main_inner(profile: Profile) { &format!("-Zcodegen-backend={}/target/debug/librustc_codegen_gcc.so", current_dir), "--sysroot", &format!("{}/build_sysroot/sysroot/", current_dir), "-Zno-parallel-llvm", - "-C", "panic=abort", "-C", "link-arg=-lc", "-o", exe.to_str().expect("to_str"), path.to_str().expect("to_str"), ]); + if let Some(flags) = option_env!("TEST_FLAGS") { + for flag in flags.split_whitespace() { + compiler.arg(&flag); + } + } match profile { Profile::Debug => {} Profile::Release => { diff --git a/tests/run/abort1.rs b/tests/run/abort1.rs index 291af5993aa..25041d93e74 100644 --- a/tests/run/abort1.rs +++ b/tests/run/abort1.rs @@ -33,6 +33,7 @@ mod intrinsics { use super::Sized; extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/tests/run/abort2.rs b/tests/run/abort2.rs index 3c87c567892..e7443c8dbe5 100644 --- a/tests/run/abort2.rs +++ b/tests/run/abort2.rs @@ -33,6 +33,7 @@ mod intrinsics { use super::Sized; extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/tests/run/array.rs b/tests/run/array.rs index 8b621d8a353..49b28d98f2f 100644 --- a/tests/run/array.rs +++ b/tests/run/array.rs @@ -79,7 +79,7 @@ pub unsafe fn drop_in_place(to_drop: *mut T) { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); intrinsics::abort(); @@ -105,6 +105,7 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/tests/run/assign.rs b/tests/run/assign.rs index eb38a8a3835..427c1a25033 100644 --- a/tests/run/assign.rs +++ b/tests/run/assign.rs @@ -57,6 +57,7 @@ mod libc { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } @@ -64,7 +65,7 @@ mod intrinsics { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); libc::fflush(libc::stdout); diff --git a/tests/run/closure.rs b/tests/run/closure.rs index 7121a5f0d52..8daa681abf7 100644 --- a/tests/run/closure.rs +++ b/tests/run/closure.rs @@ -97,10 +97,14 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } +#[lang = "tuple_trait"] +pub trait Tuple {} + #[lang = "unsize"] pub trait Unsize {} @@ -114,7 +118,7 @@ impl, U: ?Sized> CoerceUnsized<*mut U> for *mut T {} #[lang = "fn_once"] #[rustc_paren_sugar] -pub trait FnOnce { +pub trait FnOnce { #[lang = "fn_once_output"] type Output; @@ -123,7 +127,7 @@ pub trait FnOnce { #[lang = "fn_mut"] #[rustc_paren_sugar] -pub trait FnMut: FnOnce { +pub trait FnMut: FnOnce { extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output; } @@ -177,7 +181,7 @@ impl Add for isize { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); intrinsics::abort(); diff --git a/tests/run/condition.rs b/tests/run/condition.rs index 6a2e2d5bb11..b7a13081dea 100644 --- a/tests/run/condition.rs +++ b/tests/run/condition.rs @@ -82,7 +82,7 @@ pub unsafe fn drop_in_place(to_drop: *mut T) { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); intrinsics::abort(); @@ -108,6 +108,7 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/tests/run/fun_ptr.rs b/tests/run/fun_ptr.rs index a226fff79e5..8a196f774c8 100644 --- a/tests/run/fun_ptr.rs +++ b/tests/run/fun_ptr.rs @@ -76,7 +76,7 @@ pub unsafe fn drop_in_place(to_drop: *mut T) { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); intrinsics::abort(); @@ -102,6 +102,7 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/tests/run/int.rs b/tests/run/int.rs index 75779622b54..bfe73c38435 100644 --- a/tests/run/int.rs +++ b/tests/run/int.rs @@ -3,22 +3,14 @@ // Run-time: // status: 0 -#![feature(const_black_box, core_intrinsics, start)] - -#![no_std] - -#[panic_handler] -fn panic_handler(_: &core::panic::PanicInfo) -> ! { - core::intrinsics::abort(); -} +#![feature(const_black_box)] /* * Code */ -#[start] -fn main(_argc: isize, _argv: *const *const u8) -> isize { - use core::hint::black_box; +fn main() { + use std::hint::black_box; macro_rules! check { ($ty:ty, $expr:expr) => { @@ -335,6 +327,4 @@ fn main(_argc: isize, _argv: *const *const u8) -> isize { const VAL5: T = 73236519889708027473620326106273939584_i128; check_ops128!(); } - - 0 } diff --git a/tests/run/int_overflow.rs b/tests/run/int_overflow.rs index ea2c5add962..c3fcb3c0a2a 100644 --- a/tests/run/int_overflow.rs +++ b/tests/run/int_overflow.rs @@ -55,6 +55,7 @@ mod libc { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } @@ -62,7 +63,7 @@ mod intrinsics { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { // Panicking is expected iff overflow checking is enabled. #[cfg(debug_assertions)] diff --git a/tests/run/mut_ref.rs b/tests/run/mut_ref.rs index 52de20021f3..2a2ea8b8bf0 100644 --- a/tests/run/mut_ref.rs +++ b/tests/run/mut_ref.rs @@ -59,6 +59,7 @@ mod libc { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } @@ -66,7 +67,7 @@ mod intrinsics { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); libc::fflush(libc::stdout); diff --git a/tests/run/operations.rs b/tests/run/operations.rs index e078b37b4ab..67b9f241dbb 100644 --- a/tests/run/operations.rs +++ b/tests/run/operations.rs @@ -65,6 +65,7 @@ mod libc { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } @@ -72,7 +73,7 @@ mod intrinsics { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); libc::fflush(libc::stdout); diff --git a/tests/run/ptr_cast.rs b/tests/run/ptr_cast.rs index 6ac099ea145..da8a8295d56 100644 --- a/tests/run/ptr_cast.rs +++ b/tests/run/ptr_cast.rs @@ -76,7 +76,7 @@ pub unsafe fn drop_in_place(to_drop: *mut T) { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); intrinsics::abort(); @@ -102,6 +102,7 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/tests/run/slice.rs b/tests/run/slice.rs index ad9258ed0bd..96f1c4792e5 100644 --- a/tests/run/slice.rs +++ b/tests/run/slice.rs @@ -102,6 +102,7 @@ mod intrinsics { use super::Sized; extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/tests/run/static.rs b/tests/run/static.rs index 294add96844..19201f1df26 100644 --- a/tests/run/static.rs +++ b/tests/run/static.rs @@ -45,6 +45,7 @@ mod intrinsics { use super::Sized; extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/tools/check_intrinsics_duplicates.py b/tools/check_intrinsics_duplicates.py new file mode 100644 index 00000000000..c09fb3c759f --- /dev/null +++ b/tools/check_intrinsics_duplicates.py @@ -0,0 +1,67 @@ +import sys + + +def check_duplicates(): + auto_content = "" + manual_content = "" + + with open("src/intrinsic/llvm.rs", "r", encoding="utf8") as f: + manual_content = f.read() + with open("src/intrinsic/archs.rs", "r", encoding="utf8") as f: + auto_content = f.read() + + intrinsics_map = {} + for line in auto_content.splitlines(): + line = line.strip() + if not line.startswith('"'): + continue + parts = line.split('"') + if len(parts) != 5: + continue + intrinsics_map[parts[1]] = parts[3] + + if len(intrinsics_map) == 0: + print("No intrinsics found in auto code... Aborting.") + return 1 + print("Found {} intrinsics in auto code".format(len(intrinsics_map))) + errors = [] + lines = manual_content.splitlines() + pos = 0 + found = 0 + while pos < len(lines): + line = lines[pos].strip() + # This is our marker. + if line == "let gcc_name = match name {": + while pos < len(lines): + line = lines[pos].strip() + pos += 1 + if line == "};": + # We're done! + if found == 0: + print("No intrinsics found in manual code even though we found the " + "marker... Aborting...") + return 1 + for error in errors: + print("ERROR => {}".format(error)) + return 1 if len(errors) != 0 else 0 + parts = line.split('"') + if len(parts) != 5: + continue + found += 1 + if parts[1] in intrinsics_map: + if parts[3] != intrinsics_map[parts[1]]: + print("Same intrinsics (`{}` at line {}) but different GCC " + "translations: `{}` != `{}`".format( + parts[1], pos, intrinsics_map[parts[1]], parts[3])) + else: + errors.append("Duplicated intrinsics: `{}` at line {}. Please remove it " + " from manual code".format(parts[1], pos)) + # Weird but whatever... + return 1 if len(errors) != 0 else 0 + pos += 1 + print("No intrinsics found in manual code... Aborting") + return 1 + + +if __name__ == "__main__": + sys.exit(check_duplicates()) diff --git a/tools/generate_intrinsics.py b/tools/generate_intrinsics.py index 849c6e9c981..6188924b0d5 100644 --- a/tools/generate_intrinsics.py +++ b/tools/generate_intrinsics.py @@ -13,7 +13,7 @@ def run_command(command, cwd=None): sys.exit(1) -def clone_repository(repo_name, path, repo_url, sub_path=None): +def clone_repository(repo_name, path, repo_url, sub_paths=None): if os.path.exists(path): while True: choice = input("There is already a `{}` folder, do you want to update it? [y/N]".format(path)) @@ -27,12 +27,12 @@ def clone_repository(repo_name, path, repo_url, sub_path=None): else: print("Didn't understand answer...") print("Cloning {} repository...".format(repo_name)) - if sub_path is None: + if sub_paths is None: run_command(["git", "clone", repo_url, "--depth", "1", path]) else: run_command(["git", "clone", repo_url, "--filter=tree:0", "--no-checkout", path]) run_command(["git", "sparse-checkout", "init"], cwd=path) - run_command(["git", "sparse-checkout", "set", "add", sub_path], cwd=path) + run_command(["git", "sparse-checkout", "set", *sub_paths], cwd=path) run_command(["git", "checkout"], cwd=path) @@ -40,56 +40,45 @@ def append_intrinsic(array, intrinsic_name, translation): array.append((intrinsic_name, translation)) -def extract_instrinsics(intrinsics, file): - print("Extracting intrinsics from `{}`...".format(file)) - with open(file, "r", encoding="utf8") as f: - content = f.read() +def convert_to_string(content): + if content.__class__.__name__ == 'bytes': + return content.decode('utf-8') + return content - lines = content.splitlines() + +def extract_instrinsics_from_llvm(llvm_path, intrinsics): + p = subprocess.Popen( + ["llvm-tblgen", "llvm/IR/Intrinsics.td"], + cwd=os.path.join(llvm_path, "llvm/include"), + stdout=subprocess.PIPE) + output, err = p.communicate() + lines = convert_to_string(output).splitlines() pos = 0 - current_arch = None while pos < len(lines): - line = lines[pos].strip() - if line.startswith("let TargetPrefix ="): - current_arch = line.split('"')[1].strip() - if len(current_arch) == 0: - current_arch = None - elif current_arch is None: - pass - elif line == "}": - current_arch = None - elif line.startswith("def "): - content = "" - while not content.endswith(";") and not content.endswith("}") and pos < len(lines): - line = lines[pos].split(" // ")[0].strip() - content += line - pos += 1 - entries = re.findall('GCCBuiltin<"(\\w+)">', content) - if len(entries) > 0: - intrinsic = content.split("def ")[1].strip().split(":")[0].strip() - intrinsic = intrinsic.split("_") - if len(intrinsic) < 2 or intrinsic[0] != "int": - continue - intrinsic[0] = "llvm" - intrinsic = ".".join(intrinsic) - if current_arch not in intrinsics: - intrinsics[current_arch] = [] - for entry in entries: - append_intrinsic(intrinsics[current_arch], intrinsic, entry) + line = lines[pos] + if not line.startswith("def "): + pos += 1 continue - pos += 1 - continue - print("Done!") - - -def extract_instrinsics_from_llvm(llvm_path, intrinsics): - files = [] - intrinsics_path = os.path.join(llvm_path, "llvm/include/llvm/IR") - for (dirpath, dirnames, filenames) in walk(intrinsics_path): - files.extend([os.path.join(intrinsics_path, f) for f in filenames if f.endswith(".td")]) - - for file in files: - extract_instrinsics(intrinsics, file) + intrinsic = line.split(" ")[1].strip() + content = line + while pos < len(lines): + line = lines[pos].split(" // ")[0].strip() + content += line + pos += 1 + if line == "}": + break + entries = re.findall('string ClangBuiltinName = "(\\w+)";', content) + current_arch = re.findall('string TargetPrefix = "(\\w+)";', content) + if len(entries) == 1 and len(current_arch) == 1: + current_arch = current_arch[0] + intrinsic = intrinsic.split("_") + if len(intrinsic) < 2 or intrinsic[0] != "int": + continue + intrinsic[0] = "llvm" + intrinsic = ".".join(intrinsic) + if current_arch not in intrinsics: + intrinsics[current_arch] = [] + append_intrinsic(intrinsics[current_arch], intrinsic, entries[0]) def append_translation(json_data, p, array): @@ -193,6 +182,8 @@ def update_intrinsics(llvm_path, llvmint, llvmint2): for entry in intrinsics[arch]: if entry[2] == True: # if it is a duplicate out.write(' // [DUPLICATE]: "{}" => "{}",\n'.format(entry[0], entry[1])) + elif "_round_mask" in entry[1]: + out.write(' // [INVALID CONVERSION]: "{}" => "{}",\n'.format(entry[0], entry[1])) else: out.write(' "{}" => "{}",\n'.format(entry[0], entry[1])) out.write(' _ => unimplemented!("***** unsupported LLVM intrinsic {}", name),\n') @@ -219,7 +210,7 @@ def main(): "llvm-project", llvm_path, "https://github.com/llvm/llvm-project", - sub_path="llvm/include/llvm/IR", + sub_paths=["llvm/include/llvm/IR", "llvm/include/llvm/CodeGen/"], ) clone_repository( "llvmint", From 94538736a80df08c8344a4e6fe5b91de6a1b7843 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 5 Mar 2023 12:16:33 -0500 Subject: [PATCH 066/112] Update gccjit --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 65e76197559..80e57418940 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,7 +35,7 @@ dependencies = [ [[package]] name = "gccjit" version = "1.0.0" -source = "git+https://github.com/antoyo/gccjit.rs#bdb86fb5092895ff5589726b33250010c64d93f6" +source = "git+https://github.com/antoyo/gccjit.rs#eefb8c662d61477f34b7c32d26bcda5f1ef08432" dependencies = [ "gccjit_sys", ] @@ -43,7 +43,7 @@ dependencies = [ [[package]] name = "gccjit_sys" version = "0.0.1" -source = "git+https://github.com/antoyo/gccjit.rs#bdb86fb5092895ff5589726b33250010c64d93f6" +source = "git+https://github.com/antoyo/gccjit.rs#eefb8c662d61477f34b7c32d26bcda5f1ef08432" dependencies = [ "libc 0.1.12", ] From cf9c2f840f56eda73c7abd79fe3b94117f0926fb Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 5 Mar 2023 12:31:16 -0500 Subject: [PATCH 067/112] Fix for diagnostics --- locales/en-US.ftl | 3 +++ src/attributes.rs | 11 ++++++----- src/errors.rs | 9 +++++++++ 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/locales/en-US.ftl b/locales/en-US.ftl index 2181d49eeef..0a94a08f8dc 100644 --- a/locales/en-US.ftl +++ b/locales/en-US.ftl @@ -63,3 +63,6 @@ codegen_gcc_invalid_monomorphization_unsupported_operation = codegen_gcc_invalid_minimum_alignment = invalid minimum global alignment: {$err} + +codegen_gcc_tied_target_features = the target features {$features} must all be either enabled or disabled together + .help = add the missing features in a `target_feature` attribute diff --git a/src/attributes.rs b/src/attributes.rs index 243a1a36dd0..db841b1b524 100644 --- a/src/attributes.rs +++ b/src/attributes.rs @@ -9,7 +9,7 @@ use rustc_session::Session; use rustc_span::symbol::sym; use smallvec::{smallvec, SmallVec}; -use crate::context::CodegenCx; +use crate::{context::CodegenCx, errors::TiedTargetFeatures}; // Given a map from target_features to whether they are enabled or disabled, // ensure only valid combinations are allowed. @@ -84,10 +84,11 @@ pub fn from_fn_attrs<'gcc, 'tcx>( let span = cx.tcx .get_attr(instance.def_id(), sym::target_feature) .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span); - let msg = format!("the target features {} must all be either enabled or disabled together", features.join(", ")); - let mut err = cx.tcx.sess.struct_span_err(span, &msg); - err.help("add the missing features in a `target_feature` attribute"); - err.emit(); + cx.tcx.sess.create_err(TiedTargetFeatures { + features: features.join(", "), + span, + }) + .emit(); return; } diff --git a/src/errors.rs b/src/errors.rs index 5ea39606c08..9305bd1e043 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -227,3 +227,12 @@ pub(crate) struct UnwindingInlineAsm { pub(crate) struct InvalidMinimumAlignment { pub err: String, } + +#[derive(Diagnostic)] +#[diag(codegen_gcc_tied_target_features)] +#[help] +pub(crate) struct TiedTargetFeatures { + #[primary_span] + pub span: Span, + pub features: String, +} From 8652bbb0728fb60137c96caa9af7e5bfb1617923 Mon Sep 17 00:00:00 2001 From: Pietro Albini Date: Thu, 17 Nov 2022 10:33:10 +0100 Subject: [PATCH 068/112] replace legacy copyright annotations in submodules --- example/alloc_system.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/example/alloc_system.rs b/example/alloc_system.rs index fd01fcf1fc8..9ec18da90d8 100644 --- a/example/alloc_system.rs +++ b/example/alloc_system.rs @@ -1,12 +1,6 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +// SPDX-License-Identifier: MIT OR Apache-2.0 +// SPDX-FileCopyrightText: The Rust Project Developers (see https://thanks.rust-lang.org) + #![no_std] #![feature(allocator_api, rustc_private)] #![cfg_attr(any(unix, target_os = "redox"), feature(libc))] From fe93911ebcdc2f9c95e0d40e138fa2f7a3932093 Mon Sep 17 00:00:00 2001 From: est31 Date: Fri, 3 Mar 2023 00:18:38 +0100 Subject: [PATCH 069/112] Simplify message paths This makes it easier to open the messages file while developing on features. The commit was the result of automatted changes: for p in compiler/rustc_*; do mv $p/locales/en-US.ftl $p/messages.ftl; rmdir $p/locales; done for p in compiler/rustc_*; do sed -i "s#\.\./locales/en-US.ftl#../messages.ftl#" $p/src/lib.rs; done --- locales/en-US.ftl => messages.ftl | 0 src/lib.rs | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename locales/en-US.ftl => messages.ftl (100%) diff --git a/locales/en-US.ftl b/messages.ftl similarity index 100% rename from locales/en-US.ftl rename to messages.ftl diff --git a/src/lib.rs b/src/lib.rs index 1b7feb5f8a1..0b661505acc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -87,7 +87,7 @@ use rustc_span::Symbol; use rustc_span::fatal_error::FatalError; use tempfile::TempDir; -fluent_messages! { "../locales/en-US.ftl" } +fluent_messages! { "../messages.ftl" } pub struct PrintOnPanic String>(pub F); From ee38de5155b1526fa0b0b3cf118376259ac9d5b7 Mon Sep 17 00:00:00 2001 From: clubby789 Date: Mon, 27 Feb 2023 13:07:44 +0000 Subject: [PATCH 070/112] Remove uses of `box_syntax` in rustc and tools --- example/alloc_example.rs | 4 ++-- example/mini_core_hello_world.rs | 8 ++++---- example/mod_bench.rs | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/example/alloc_example.rs b/example/alloc_example.rs index c80348ca549..754e7931412 100644 --- a/example/alloc_example.rs +++ b/example/alloc_example.rs @@ -1,4 +1,4 @@ -#![feature(start, box_syntax, core_intrinsics, alloc_error_handler, lang_items)] +#![feature(start, core_intrinsics, alloc_error_handler, lang_items)] #![no_std] extern crate alloc; @@ -38,7 +38,7 @@ unsafe extern "C" fn _Unwind_Resume() { #[start] fn main(_argc: isize, _argv: *const *const u8) -> isize { - let world: Box<&str> = box "Hello World!\0"; + let world: Box<&str> = Box::new("Hello World!\0"); unsafe { puts(*world as *const str as *const u8); } diff --git a/example/mini_core_hello_world.rs b/example/mini_core_hello_world.rs index 993a31e68ea..cff26077740 100644 --- a/example/mini_core_hello_world.rs +++ b/example/mini_core_hello_world.rs @@ -1,7 +1,7 @@ // Adapted from https://github.com/sunfishcode/mir2cranelift/blob/master/rust-examples/nocore-hello-world.rs #![feature( - no_core, unboxed_closures, start, lang_items, box_syntax, never_type, linkage, + no_core, unboxed_closures, start, lang_items, never_type, linkage, extern_types, thread_local )] #![no_core] @@ -163,7 +163,7 @@ fn main() { let ptr: *const u8 = hello as *const [u8] as *const u8; puts(ptr); - let world: Box<&str> = box "World!\0"; + let world: Box<&str> = Box::new("World!\0"); puts(*world as *const str as *const u8); world as Box; @@ -223,10 +223,10 @@ fn main() { } } - let _ = box NoisyDrop { + let _ = Box::new(NoisyDrop { text: "Boxed outer got dropped!\0", inner: NoisyDropInner, - } as Box; + }) as Box; const FUNC_REF: Option = Some(main); #[allow(unreachable_code)] diff --git a/example/mod_bench.rs b/example/mod_bench.rs index 95bcad2cd17..5e2e7f25a2c 100644 --- a/example/mod_bench.rs +++ b/example/mod_bench.rs @@ -1,4 +1,4 @@ -#![feature(start, box_syntax, core_intrinsics, lang_items)] +#![feature(start, core_intrinsics, lang_items)] #![no_std] #[link(name = "c")] From 0c115bf8b8a6973711cb4e8b3c0f62e2551c20a5 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Thu, 16 Mar 2023 14:56:02 +0100 Subject: [PATCH 071/112] Use poison instead of undef In cases where it is legal, we should prefer poison values over undef values. This replaces undef with poison for aggregate construction and for uninhabited types. There are more places where we can likely use poison, but I wanted to stay conservative to start with. In particular the aggregate case is important for newer LLVM versions, which are not able to handle an undef base value during early optimization due to poison-propagation concerns. --- src/common.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/common.rs b/src/common.rs index 76fc7bd222e..ac04b61a306 100644 --- a/src/common.rs +++ b/src/common.rs @@ -73,6 +73,11 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } } + fn const_poison(&self, typ: Type<'gcc>) -> RValue<'gcc> { + // No distinction between undef and poison. + self.const_undef(typ) + } + fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> { self.gcc_int(typ, int) } From 2bfd89d91b11bba295ad727f27d417eb40616968 Mon Sep 17 00:00:00 2001 From: Ulrich Weigand Date: Thu, 30 Mar 2023 18:30:56 +0200 Subject: [PATCH 072/112] Update gccjit and remove libc 0.1 dependency --- Cargo.lock | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 80e57418940..0f2e152f8ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,7 +35,7 @@ dependencies = [ [[package]] name = "gccjit" version = "1.0.0" -source = "git+https://github.com/antoyo/gccjit.rs#eefb8c662d61477f34b7c32d26bcda5f1ef08432" +source = "git+https://github.com/antoyo/gccjit.rs#fe242b7eb26980e6c78859d51c8d4cc1e43381a3" dependencies = [ "gccjit_sys", ] @@ -43,9 +43,9 @@ dependencies = [ [[package]] name = "gccjit_sys" version = "0.0.1" -source = "git+https://github.com/antoyo/gccjit.rs#eefb8c662d61477f34b7c32d26bcda5f1ef08432" +source = "git+https://github.com/antoyo/gccjit.rs#fe242b7eb26980e6c78859d51c8d4cc1e43381a3" dependencies = [ - "libc 0.1.12", + "libc", ] [[package]] @@ -64,7 +64,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if", - "libc 0.2.112", + "libc", "wasi", ] @@ -74,7 +74,7 @@ version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ - "libc 0.2.112", + "libc", ] [[package]] @@ -85,7 +85,7 @@ checksum = "96bd995a092cac79868250589869b5a5d656b02a02bd74c8ebdc566dc7203090" dependencies = [ "fm", "getopts", - "libc 0.2.112", + "libc", "num_cpus", "termcolor", "threadpool", @@ -93,12 +93,6 @@ dependencies = [ "walkdir", ] -[[package]] -name = "libc" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e32a70cf75e5846d53a673923498228bbec6a8624708a9ea5645f075d6276122" - [[package]] name = "libc" version = "0.2.112" @@ -118,7 +112,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" dependencies = [ "hermit-abi", - "libc 0.2.112", + "libc", ] [[package]] @@ -133,7 +127,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ - "libc 0.2.112", + "libc", "rand_chacha", "rand_core", "rand_hc", @@ -234,7 +228,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ "cfg-if", - "libc 0.2.112", + "libc", "rand", "redox_syscall", "remove_dir_all", @@ -271,7 +265,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" dependencies = [ - "libc 0.2.112", + "libc", ] [[package]] From 556ab2628384398dedbc6b7700b1fd7899336c3d Mon Sep 17 00:00:00 2001 From: zhaixiaojuan Date: Tue, 28 Mar 2023 16:18:12 +0800 Subject: [PATCH 073/112] Define MIN_ALIGN for loongarch64 --- example/alloc_system.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/example/alloc_system.rs b/example/alloc_system.rs index 9ec18da90d8..046903fe5ac 100644 --- a/example/alloc_system.rs +++ b/example/alloc_system.rs @@ -15,6 +15,7 @@ const MIN_ALIGN: usize = 8; #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", + target_arch = "loongarch64", target_arch = "mips64", target_arch = "s390x", target_arch = "sparc64"))] From 387718fedcd4a235c9f2a439f2d655a43e950f21 Mon Sep 17 00:00:00 2001 From: Ian Douglas Scott Date: Wed, 5 Apr 2023 18:42:36 -0700 Subject: [PATCH 074/112] Add inline assembly support for m68k --- src/asm.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/asm.rs b/src/asm.rs index 41e9d61a10e..65de02b3567 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -593,6 +593,9 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r", InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w", InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a", + InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d", InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "d", // more specific than "r" InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f", InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r", @@ -664,6 +667,9 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl InlineAsmRegClass::Avr(_) => unimplemented!(), InlineAsmRegClass::Bpf(_) => unimplemented!(), InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(), + InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(), InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(), InlineAsmRegClass::Msp430(_) => unimplemented!(), @@ -849,6 +855,7 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option InlineAsmRegClass::Avr(_) => None, InlineAsmRegClass::S390x(_) => None, InlineAsmRegClass::Msp430(_) => None, + InlineAsmRegClass::M68k(_) => None, InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") } From 5f30b63b3bbed424565ae175f84ce94d49480962 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Wed, 22 Mar 2023 23:43:20 +0000 Subject: [PATCH 075/112] Remove #[alloc_error_handler] from the compiler and library --- example/alloc_example.rs | 7 +------ src/allocator.rs | 34 +--------------------------------- src/lib.rs | 4 ++-- 3 files changed, 4 insertions(+), 41 deletions(-) diff --git a/example/alloc_example.rs b/example/alloc_example.rs index 754e7931412..faff1dca23f 100644 --- a/example/alloc_example.rs +++ b/example/alloc_example.rs @@ -1,4 +1,4 @@ -#![feature(start, core_intrinsics, alloc_error_handler, lang_items)] +#![feature(start, core_intrinsics, lang_items)] #![no_std] extern crate alloc; @@ -21,11 +21,6 @@ fn panic_handler(_: &core::panic::PanicInfo) -> ! { core::intrinsics::abort(); } -#[alloc_error_handler] -fn alloc_error_handler(_: alloc::alloc::Layout) -> ! { - core::intrinsics::abort(); -} - #[lang = "eh_personality"] fn eh_personality() -> ! { loop {} diff --git a/src/allocator.rs b/src/allocator.rs index 4bad33ee879..e90db44ece1 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -5,11 +5,10 @@ use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS use rustc_middle::bug; use rustc_middle::ty::TyCtxt; use rustc_session::config::OomStrategy; -use rustc_span::symbol::sym; use crate::GccContext; -pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) { +pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind) { let context = &mods.context; let usize = match tcx.sess.target.pointer_width { @@ -87,37 +86,6 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643 } - let types = [usize, usize]; - let name = "__rust_alloc_error_handler".to_string(); - let args: Vec<_> = types.iter().enumerate() - .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) - .collect(); - let func = context.new_function(None, FunctionType::Exported, void, &args, name, false); - - if tcx.sess.target.default_hidden_visibility { - #[cfg(feature="master")] - func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); - } - - let callee = alloc_error_handler_kind.fn_name(sym::oom); - let args: Vec<_> = types.iter().enumerate() - .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) - .collect(); - let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false); - #[cfg(feature="master")] - callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); - - let block = func.new_block("entry"); - - let args = args - .iter() - .enumerate() - .map(|(i, _)| func.get_param(i as i32).to_rvalue()) - .collect::>(); - let _ret = context.new_call(None, callee, &args); - //llvm::LLVMSetTailCall(ret, True); - block.end_with_void_return(None); - let name = OomStrategy::SYMBOL.to_string(); let global = context.new_global(None, GlobalKind::Exported, i8, name); let value = tcx.sess.opts.unstable_opts.oom.should_panic(); diff --git a/src/lib.rs b/src/lib.rs index 0b661505acc..be710fefe49 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -162,11 +162,11 @@ impl CodegenBackend for GccCodegenBackend { } impl ExtraBackendMethods for GccCodegenBackend { - fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) -> Self::Module { + fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind) -> Self::Module { let mut mods = GccContext { context: Context::default(), }; - unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, alloc_error_handler_kind); } + unsafe { allocator::codegen(tcx, &mut mods, module_name, kind); } mods } From f40e4da5d99527ac8bed14e6c877e84f1a6a3851 Mon Sep 17 00:00:00 2001 From: Nilstrieb <48135649+Nilstrieb@users.noreply.github.com> Date: Sun, 16 Apr 2023 14:33:00 +0200 Subject: [PATCH 076/112] Add `rustc_fluent_macro` to decouple fluent from `rustc_macros` Fluent, with all the icu4x it brings in, takes quite some time to compile. `fluent_messages!` is only needed in further downstream rustc crates, but is blocking more upstream crates like `rustc_index`. By splitting it out, we allow `rustc_macros` to be compiled earlier, which speeds up `x check compiler` by about 5 seconds (and even more after the needless dependency on `serde_json` is removed from `rustc_data_structures`). --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 0b661505acc..1cabb05de97 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -27,6 +27,7 @@ extern crate rustc_attr; extern crate rustc_codegen_ssa; extern crate rustc_data_structures; extern crate rustc_errors; +extern crate rustc_fluent_macro; extern crate rustc_hir; extern crate rustc_macros; extern crate rustc_metadata; @@ -76,7 +77,7 @@ use rustc_codegen_ssa::target_features::supported_target_features; use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods}; use rustc_data_structures::fx::FxHashMap; use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, SubdiagnosticMessage}; -use rustc_macros::fluent_messages; +use rustc_fluent_macro::fluent_messages; use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::ty::TyCtxt; From 94735c7a5ce85c4afa75006a704844fcf637a1ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matthias=20Kr=C3=BCger?= Date: Tue, 25 Apr 2023 00:08:35 +0200 Subject: [PATCH 077/112] Revert "Remove #[alloc_error_handler] from the compiler and library" This reverts commit abc0660118cc95f47445fd33502a11dd448f5968. --- example/alloc_example.rs | 7 ++++++- src/allocator.rs | 34 +++++++++++++++++++++++++++++++++- src/lib.rs | 4 ++-- 3 files changed, 41 insertions(+), 4 deletions(-) diff --git a/example/alloc_example.rs b/example/alloc_example.rs index faff1dca23f..754e7931412 100644 --- a/example/alloc_example.rs +++ b/example/alloc_example.rs @@ -1,4 +1,4 @@ -#![feature(start, core_intrinsics, lang_items)] +#![feature(start, core_intrinsics, alloc_error_handler, lang_items)] #![no_std] extern crate alloc; @@ -21,6 +21,11 @@ fn panic_handler(_: &core::panic::PanicInfo) -> ! { core::intrinsics::abort(); } +#[alloc_error_handler] +fn alloc_error_handler(_: alloc::alloc::Layout) -> ! { + core::intrinsics::abort(); +} + #[lang = "eh_personality"] fn eh_personality() -> ! { loop {} diff --git a/src/allocator.rs b/src/allocator.rs index e90db44ece1..4bad33ee879 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -5,10 +5,11 @@ use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS use rustc_middle::bug; use rustc_middle::ty::TyCtxt; use rustc_session::config::OomStrategy; +use rustc_span::symbol::sym; use crate::GccContext; -pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind) { +pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) { let context = &mods.context; let usize = match tcx.sess.target.pointer_width { @@ -86,6 +87,37 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643 } + let types = [usize, usize]; + let name = "__rust_alloc_error_handler".to_string(); + let args: Vec<_> = types.iter().enumerate() + .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) + .collect(); + let func = context.new_function(None, FunctionType::Exported, void, &args, name, false); + + if tcx.sess.target.default_hidden_visibility { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + } + + let callee = alloc_error_handler_kind.fn_name(sym::oom); + let args: Vec<_> = types.iter().enumerate() + .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) + .collect(); + let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false); + #[cfg(feature="master")] + callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + + let block = func.new_block("entry"); + + let args = args + .iter() + .enumerate() + .map(|(i, _)| func.get_param(i as i32).to_rvalue()) + .collect::>(); + let _ret = context.new_call(None, callee, &args); + //llvm::LLVMSetTailCall(ret, True); + block.end_with_void_return(None); + let name = OomStrategy::SYMBOL.to_string(); let global = context.new_global(None, GlobalKind::Exported, i8, name); let value = tcx.sess.opts.unstable_opts.oom.should_panic(); diff --git a/src/lib.rs b/src/lib.rs index 1a20dbcebd4..1cabb05de97 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -163,11 +163,11 @@ impl CodegenBackend for GccCodegenBackend { } impl ExtraBackendMethods for GccCodegenBackend { - fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind) -> Self::Module { + fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) -> Self::Module { let mut mods = GccContext { context: Context::default(), }; - unsafe { allocator::codegen(tcx, &mut mods, module_name, kind); } + unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, alloc_error_handler_kind); } mods } From 7ec90e307793c5e8bd5a50e9556fdf92c6d8147d Mon Sep 17 00:00:00 2001 From: zhaixiaojuan Date: Fri, 19 Aug 2022 17:41:56 +0800 Subject: [PATCH 078/112] Add loongarch64 asm! support --- src/asm.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/asm.rs b/src/asm.rs index 65de02b3567..738cdb6f119 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -593,6 +593,8 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r", InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w", InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => "f", InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r", InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a", InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d", @@ -667,6 +669,8 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl InlineAsmRegClass::Avr(_) => unimplemented!(), InlineAsmRegClass::Bpf(_) => unimplemented!(), InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(), InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(), InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(), @@ -804,6 +808,7 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option } } InlineAsmRegClass::Hexagon(_) => None, + InlineAsmRegClass::LoongArch(_) => None, InlineAsmRegClass::Mips(_) => None, InlineAsmRegClass::Nvptx(_) => None, InlineAsmRegClass::PowerPC(_) => None, From 43f2b1696f7a02892b5408cc0292f0eb8ddecb6d Mon Sep 17 00:00:00 2001 From: Boxy Date: Thu, 27 Apr 2023 08:34:11 +0100 Subject: [PATCH 079/112] rename `needs_infer` to `has_infer` --- src/callee.rs | 2 +- src/mono_item.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/callee.rs b/src/callee.rs index ba1e8656208..433b2585f82 100644 --- a/src/callee.rs +++ b/src/callee.rs @@ -17,7 +17,7 @@ use crate::context::CodegenCx; pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> Function<'gcc> { let tcx = cx.tcx(); - assert!(!instance.substs.needs_infer()); + assert!(!instance.substs.has_infer()); assert!(!instance.substs.has_escaping_bound_vars()); let sym = tcx.symbol_name(instance).name; diff --git a/src/mono_item.rs b/src/mono_item.rs index c1f6340866c..342b830cedb 100644 --- a/src/mono_item.rs +++ b/src/mono_item.rs @@ -31,7 +31,7 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { #[cfg_attr(not(feature="master"), allow(unused_variables))] fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, visibility: Visibility, symbol_name: &str) { - assert!(!instance.substs.needs_infer()); + assert!(!instance.substs.has_infer()); let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); self.linkage.set(base::linkage_to_gcc(linkage)); From 0d183f8e74eca11bf525cb98cfe0de7789b90c8f Mon Sep 17 00:00:00 2001 From: Ramon de C Valle Date: Mon, 12 Dec 2022 22:42:44 -0800 Subject: [PATCH 080/112] Add cross-language LLVM CFI support to the Rust compiler This commit adds cross-language LLVM Control Flow Integrity (CFI) support to the Rust compiler by adding the `-Zsanitizer-cfi-normalize-integers` option to be used with Clang `-fsanitize-cfi-icall-normalize-integers` for normalizing integer types (see https://reviews.llvm.org/D139395). It provides forward-edge control flow protection for C or C++ and Rust -compiled code "mixed binaries" (i.e., for when C or C++ and Rust -compiled code share the same virtual address space). For more information about LLVM CFI and cross-language LLVM CFI support for the Rust compiler, see design document in the tracking issue #89653. Cross-language LLVM CFI can be enabled with -Zsanitizer=cfi and -Zsanitizer-cfi-normalize-integers, and requires proper (i.e., non-rustc) LTO (i.e., -Clinker-plugin-lto). --- src/asm.rs | 2 +- src/builder.rs | 10 ++++++---- src/intrinsic/mod.rs | 14 +++++++------- src/type_.rs | 12 ------------ 4 files changed, 14 insertions(+), 24 deletions(-) diff --git a/src/asm.rs b/src/asm.rs index 65de02b3567..cfbb3057a61 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -501,7 +501,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { if options.contains(InlineAsmOptions::NORETURN) { let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable"); let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) }; - self.call(self.type_void(), None, builtin_unreachable, &[], None); + self.call(self.type_void(), None, None, builtin_unreachable, &[], None); } // Write results to outputs. diff --git a/src/builder.rs b/src/builder.rs index a3c8142bea2..a66ddb6a09f 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -35,6 +35,7 @@ use rustc_codegen_ssa::traits::{ }; use rustc_data_structures::fx::FxHashSet; use rustc_middle::bug; +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::ty::{ParamEnv, Ty, TyCtxt}; use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout}; use rustc_span::Span; @@ -455,12 +456,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } #[cfg(feature="master")] - fn invoke(&mut self, typ: Type<'gcc>, _fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { + fn invoke(&mut self, typ: Type<'gcc>, fn_attrs: Option<&CodegenFnAttrs>, _fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { let try_block = self.current_func().new_block("try"); let current_block = self.block.clone(); self.block = try_block; - let call = self.call(typ, None, func, args, None); // TODO(antoyo): use funclet here? + let call = self.call(typ, fn_attrs, None, func, args, None); // TODO(antoyo): use funclet here? self.block = current_block; let return_value = self.current_func() @@ -483,8 +484,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } #[cfg(not(feature="master"))] - fn invoke(&mut self, typ: Type<'gcc>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { - let call_site = self.call(typ, None, func, args, None); + fn invoke(&mut self, typ: Type<'gcc>, fn_attrs: &CodegenFnAttrs, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { + let call_site = self.call(typ, fn_attrs, None, func, args, None); let condition = self.context.new_rvalue_from_int(self.bool_type, 1); self.llbb().end_with_conditional(None, condition, then, catch); if let Some(_fn_abi) = fn_abi { @@ -1351,6 +1352,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn call( &mut self, _typ: Type<'gcc>, + _fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index 94dc8c9e93b..60176874747 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -113,7 +113,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { _ if simple.is_some() => { // FIXME(antoyo): remove this cast when the API supports function. let func = unsafe { std::mem::transmute(simple.expect("simple")) }; - self.call(self.type_void(), None, func, &args.iter().map(|arg| arg.immediate()).collect::>(), None) + self.call(self.type_void(), None, None, func, &args.iter().map(|arg| arg.immediate()).collect::>(), None) }, sym::likely => { self.expect(args[0].immediate(), true) @@ -326,7 +326,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let masked = self.and(addr, mask); self.bitcast(masked, void_ptr_type) }, - + _ if name_str.starts_with("simd_") => { match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { Ok(llval) => llval, @@ -354,7 +354,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { fn abort(&mut self) { let func = self.context.get_builtin_function("abort"); let func: RValue<'gcc> = unsafe { std::mem::transmute(func) }; - self.call(self.type_void(), None, func, &[], None); + self.call(self.type_void(), None, None, func, &[], None); } fn assume(&mut self, value: Self::Value) { @@ -1135,7 +1135,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { fn try_intrinsic<'a, 'b, 'gcc, 'tcx>(bx: &'b mut Builder<'a, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) { if bx.sess().panic_strategy() == PanicStrategy::Abort { - bx.call(bx.type_void(), None, try_func, &[data], None); + bx.call(bx.type_void(), None, None, try_func, &[data], None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. let ret_align = bx.tcx.data_layout.i32_align.abi; @@ -1204,21 +1204,21 @@ fn codegen_gnu_try<'gcc>(bx: &mut Builder<'_, 'gcc, '_>, try_func: RValue<'gcc>, let zero = bx.cx.context.new_rvalue_zero(bx.int_type); let ptr = bx.cx.context.new_call(None, eh_pointer_builtin, &[zero]); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); - bx.call(catch_ty, None, catch_func, &[data, ptr], None); + bx.call(catch_ty, None, None, catch_func, &[data, ptr], None); bx.ret(bx.const_i32(1)); // NOTE: the blocks must be filled before adding the try/catch, otherwise gcc will not // generate a try/catch. // FIXME(antoyo): add a check in the libgccjit API to prevent this. bx.switch_to_block(current_block); - bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None); + bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None); }); let func = unsafe { std::mem::transmute(func) }; // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, None, func, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, None, func, &[try_func, data, catch_func], None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } diff --git a/src/type_.rs b/src/type_.rs index daa661f35c4..521b64ad34d 100644 --- a/src/type_.rs +++ b/src/type_.rs @@ -280,16 +280,4 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout } impl<'gcc, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'gcc, 'tcx> { - fn set_type_metadata(&self, _function: RValue<'gcc>, _typeid: String) { - // Unsupported. - } - - fn typeid_metadata(&self, _typeid: String) -> RValue<'gcc> { - // Unsupported. - self.context.new_rvalue_from_int(self.int_type, 0) - } - - fn set_kcfi_type_metadata(&self, _function: RValue<'gcc>, _kcfi_typeid: u32) { - // Unsupported. - } } From 77375ab81447300542d5533db3189bb1a38bcb93 Mon Sep 17 00:00:00 2001 From: Gary Guo Date: Sun, 6 Nov 2022 21:24:20 +0000 Subject: [PATCH 081/112] Use `landingpad filter` to encode aborting landing pad --- src/builder.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/builder.rs b/src/builder.rs index a66ddb6a09f..5fba09d795c 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1227,6 +1227,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { (value1, value2) } + fn filter_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) { + self.cleanup_landing_pad(pers_fn) + } + #[cfg(feature="master")] fn resume(&mut self, exn0: RValue<'gcc>, _exn1: RValue<'gcc>) { let exn_type = exn0.get_type(); From 59177fcb60cdccd7c6e5958f36bd4d2300fafcda Mon Sep 17 00:00:00 2001 From: Gary Guo Date: Sat, 8 Apr 2023 13:12:24 +0100 Subject: [PATCH 082/112] Add todo for filter landing pad --- src/builder.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/builder.rs b/src/builder.rs index 5fba09d795c..869344ce92d 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1228,6 +1228,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn filter_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) { + // TODO(antoyo): generate the correct landing pad self.cleanup_landing_pad(pers_fn) } From 35836b326b600728f1b4e8724d38fb8e469119c1 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 3 Jul 2021 16:46:41 +0200 Subject: [PATCH 083/112] Don't use an allocator shim for `#[global_allocator]` This makes it possible to use liballoc/libstd in combination with `--emit obj` if you use `#[global_allocator]`. Making it work for the default libstd allocator would require weak functions, which are not well supported on all systems. --- src/allocator.rs | 119 ++++++++++++++++++++++++----------------------- 1 file changed, 61 insertions(+), 58 deletions(-) diff --git a/src/allocator.rs b/src/allocator.rs index 4bad33ee879..f4105b91e98 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -1,11 +1,12 @@ #[cfg(feature="master")] use gccjit::FnAttribute; use gccjit::{FunctionType, GlobalKind, ToRValue}; -use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS}; +use rustc_ast::expand::allocator::{ + alloc_error_handler_name, AllocatorKind, AllocatorTy, ALLOCATOR_METHODS, +}; use rustc_middle::bug; use rustc_middle::ty::TyCtxt; use rustc_session::config::OomStrategy; -use rustc_span::symbol::sym; use crate::GccContext; @@ -22,69 +23,71 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam let i8p = i8.make_pointer(); let void = context.new_type::<()>(); - for method in ALLOCATOR_METHODS { - let mut types = Vec::with_capacity(method.inputs.len()); - for ty in method.inputs.iter() { - match *ty { - AllocatorTy::Layout => { - types.push(usize); - types.push(usize); + if kind == AllocatorKind::Default { + for method in ALLOCATOR_METHODS { + let mut types = Vec::with_capacity(method.inputs.len()); + for ty in method.inputs.iter() { + match *ty { + AllocatorTy::Layout => { + types.push(usize); + types.push(usize); + } + AllocatorTy::Ptr => types.push(i8p), + AllocatorTy::Usize => types.push(usize), + + AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"), } - AllocatorTy::Ptr => types.push(i8p), - AllocatorTy::Usize => types.push(usize), - - AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"), } - } - let output = match method.output { - AllocatorTy::ResultPtr => Some(i8p), - AllocatorTy::Unit => None, + let output = match method.output { + AllocatorTy::ResultPtr => Some(i8p), + AllocatorTy::Unit => None, - AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => { - panic!("invalid allocator output") - } - }; - let name = format!("__rust_{}", method.name); + AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => { + panic!("invalid allocator output") + } + }; + let name = format!("__rust_{}", method.name); + + let args: Vec<_> = types.iter().enumerate() + .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) + .collect(); + let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false); - let args: Vec<_> = types.iter().enumerate() - .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) - .collect(); - let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false); + if tcx.sess.target.options.default_hidden_visibility { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + } + if tcx.sess.must_emit_unwind_tables() { + // TODO(antoyo): emit unwind tables. + } - if tcx.sess.target.options.default_hidden_visibility { + let callee = AllocatorKind::Default.fn_name(method.name); + let args: Vec<_> = types.iter().enumerate() + .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) + .collect(); + let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false); #[cfg(feature="master")] - func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); - } - if tcx.sess.must_emit_unwind_tables() { - // TODO(antoyo): emit unwind tables. - } + callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + + let block = func.new_block("entry"); + + let args = args + .iter() + .enumerate() + .map(|(i, _)| func.get_param(i as i32).to_rvalue()) + .collect::>(); + let ret = context.new_call(None, callee, &args); + //llvm::LLVMSetTailCall(ret, True); + if output.is_some() { + block.end_with_return(None, ret); + } + else { + block.end_with_void_return(None); + } - let callee = kind.fn_name(method.name); - let args: Vec<_> = types.iter().enumerate() - .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) - .collect(); - let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false); - #[cfg(feature="master")] - callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); - - let block = func.new_block("entry"); - - let args = args - .iter() - .enumerate() - .map(|(i, _)| func.get_param(i as i32).to_rvalue()) - .collect::>(); - let ret = context.new_call(None, callee, &args); - //llvm::LLVMSetTailCall(ret, True); - if output.is_some() { - block.end_with_return(None, ret); - } - else { - block.end_with_void_return(None); + // TODO(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances + // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643 } - - // TODO(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances - // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643 } let types = [usize, usize]; @@ -99,7 +102,7 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); } - let callee = alloc_error_handler_kind.fn_name(sym::oom); + let callee = alloc_error_handler_name(alloc_error_handler_kind); let args: Vec<_> = types.iter().enumerate() .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .collect(); From c50427ba86aadcbec86bf9b35f0c32ee818c7705 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 3 Jul 2021 17:50:53 +0200 Subject: [PATCH 084/112] Split AllocatorKind::fn_name in global_fn_name and default_fn_name --- src/allocator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/allocator.rs b/src/allocator.rs index f4105b91e98..fe143aaad5f 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -2,7 +2,7 @@ use gccjit::FnAttribute; use gccjit::{FunctionType, GlobalKind, ToRValue}; use rustc_ast::expand::allocator::{ - alloc_error_handler_name, AllocatorKind, AllocatorTy, ALLOCATOR_METHODS, + alloc_error_handler_name, default_fn_name, AllocatorKind, AllocatorTy, ALLOCATOR_METHODS, }; use rustc_middle::bug; use rustc_middle::ty::TyCtxt; @@ -61,7 +61,7 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam // TODO(antoyo): emit unwind tables. } - let callee = AllocatorKind::Default.fn_name(method.name); + let callee = default_fn_name(method.name); let args: Vec<_> = types.iter().enumerate() .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .collect(); From 2be697f60d53fe321a74fb99d3cde452a94eb3da Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Tue, 6 Jul 2021 18:56:01 +0200 Subject: [PATCH 085/112] Use global_fn_name instead of format! --- src/allocator.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/allocator.rs b/src/allocator.rs index fe143aaad5f..ff68de8c8f1 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -2,7 +2,8 @@ use gccjit::FnAttribute; use gccjit::{FunctionType, GlobalKind, ToRValue}; use rustc_ast::expand::allocator::{ - alloc_error_handler_name, default_fn_name, AllocatorKind, AllocatorTy, ALLOCATOR_METHODS, + alloc_error_handler_name, default_fn_name, global_fn_name, AllocatorKind, AllocatorTy, + ALLOCATOR_METHODS, }; use rustc_middle::bug; use rustc_middle::ty::TyCtxt; @@ -46,7 +47,7 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam panic!("invalid allocator output") } }; - let name = format!("__rust_{}", method.name); + let name = global_fn_name(method.name); let args: Vec<_> = types.iter().enumerate() .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) From da466a2adc6aa4a1250a7022d216b35ef25e3fbd Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Sat, 10 Sep 2022 11:33:44 +0000 Subject: [PATCH 086/112] Prevent insta-stable no alloc shim support You will need to add the following as replacement for the old __rust_* definitions when not using the alloc shim. #[no_mangle] static __rust_no_alloc_shim_is_unstable: u8 = 0; --- src/allocator.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/allocator.rs b/src/allocator.rs index ff68de8c8f1..13f88192bbc 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -3,7 +3,7 @@ use gccjit::FnAttribute; use gccjit::{FunctionType, GlobalKind, ToRValue}; use rustc_ast::expand::allocator::{ alloc_error_handler_name, default_fn_name, global_fn_name, AllocatorKind, AllocatorTy, - ALLOCATOR_METHODS, + ALLOCATOR_METHODS, NO_ALLOC_SHIM_IS_UNSTABLE, }; use rustc_middle::bug; use rustc_middle::ty::TyCtxt; @@ -127,4 +127,9 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam let value = tcx.sess.opts.unstable_opts.oom.should_panic(); let value = context.new_rvalue_from_int(i8, value as i32); global.global_set_initializer_rvalue(value); + + let name = NO_ALLOC_SHIM_IS_UNSTABLE.to_string(); + let global = context.new_global(None, GlobalKind::Exported, i8, name); + let value = context.new_rvalue_from_int(i8, 0); + global.global_set_initializer_rvalue(value); } From e5b58425f67107e59aa062ecd1be250e0bbd4c8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Mon, 15 May 2023 06:24:45 +0200 Subject: [PATCH 087/112] Move expansion of query macros in rustc_middle to rustc_middle::query --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 1cabb05de97..442ce0ea542 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -80,8 +80,8 @@ use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, SubdiagnosticMes use rustc_fluent_macro::fluent_messages; use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; +use rustc_middle::query::Providers; use rustc_middle::ty::TyCtxt; -use rustc_middle::ty::query::Providers; use rustc_session::config::{Lto, OptLevel, OutputFilenames}; use rustc_session::Session; use rustc_span::Symbol; From d9d12d7c369f96e76bbe93224d8774a333c4b1b1 Mon Sep 17 00:00:00 2001 From: clubby789 Date: Tue, 23 May 2023 01:51:25 +0000 Subject: [PATCH 088/112] Ensure Fluent messages are in alphabetical order --- messages.ftl | 66 ++++++++++++++++++++++++++-------------------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/messages.ftl b/messages.ftl index 0a94a08f8dc..97bc8ef9d1b 100644 --- a/messages.ftl +++ b/messages.ftl @@ -1,68 +1,68 @@ -codegen_gcc_unwinding_inline_asm = - GCC backend does not support unwinding from inline asm - -codegen_gcc_lto_not_supported = - LTO is not supported. You may get a linker error. +codegen_gcc_invalid_minimum_alignment = + invalid minimum global alignment: {$err} codegen_gcc_invalid_monomorphization_basic_integer = invalid monomorphization of `{$name}` intrinsic: expected basic integer type, found `{$ty}` -codegen_gcc_invalid_monomorphization_invalid_float_vector = - invalid monomorphization of `{$name}` intrinsic: unsupported element type `{$elem_ty}` of floating-point vector `{$vec_ty}` - -codegen_gcc_invalid_monomorphization_not_float = - invalid monomorphization of `{$name}` intrinsic: `{$ty}` is not a floating-point type - -codegen_gcc_invalid_monomorphization_unrecognized = - invalid monomorphization of `{$name}` intrinsic: unrecognized intrinsic `{$name}` - codegen_gcc_invalid_monomorphization_expected_signed_unsigned = invalid monomorphization of `{$name}` intrinsic: expected element type `{$elem_ty}` of vector type `{$vec_ty}` to be a signed or unsigned integer type -codegen_gcc_invalid_monomorphization_unsupported_element = - invalid monomorphization of `{$name}` intrinsic: unsupported {$name} from `{$in_ty}` with element `{$elem_ty}` to `{$ret_ty}` +codegen_gcc_invalid_monomorphization_expected_simd = + invalid monomorphization of `{$name}` intrinsic: expected SIMD {$expected_ty} type, found non-SIMD `{$found_ty}` + +codegen_gcc_invalid_monomorphization_inserted_type = + invalid monomorphization of `{$name}` intrinsic: expected inserted type `{$in_elem}` (element of input `{$in_ty}`), found `{$out_ty}` codegen_gcc_invalid_monomorphization_invalid_bitmask = invalid monomorphization of `{$name}` intrinsic: invalid bitmask `{$ty}`, expected `u{$expected_int_bits}` or `[u8; {$expected_bytes}]` -codegen_gcc_invalid_monomorphization_simd_shuffle = - invalid monomorphization of `{$name}` intrinsic: simd_shuffle index must be an array of `u32`, got `{$ty}` - -codegen_gcc_invalid_monomorphization_expected_simd = - invalid monomorphization of `{$name}` intrinsic: expected SIMD {$expected_ty} type, found non-SIMD `{$found_ty}` +codegen_gcc_invalid_monomorphization_invalid_float_vector = + invalid monomorphization of `{$name}` intrinsic: unsupported element type `{$elem_ty}` of floating-point vector `{$vec_ty}` codegen_gcc_invalid_monomorphization_mask_type = invalid monomorphization of `{$name}` intrinsic: mask element type is `{$ty}`, expected `i_` +codegen_gcc_invalid_monomorphization_mismatched_lengths = + invalid monomorphization of `{$name}` intrinsic: mismatched lengths: mask length `{$m_len}` != other vector length `{$v_len}` + +codegen_gcc_invalid_monomorphization_not_float = + invalid monomorphization of `{$name}` intrinsic: `{$ty}` is not a floating-point type + +codegen_gcc_invalid_monomorphization_return_element = + invalid monomorphization of `{$name}` intrinsic: expected return element type `{$in_elem}` (element of input `{$in_ty}`), found `{$ret_ty}` with element type `{$out_ty}` + +codegen_gcc_invalid_monomorphization_return_integer_type = + invalid monomorphization of `{$name}` intrinsic: expected return type with integer elements, found `{$ret_ty}` with non-integer `{$out_ty}` + codegen_gcc_invalid_monomorphization_return_length = invalid monomorphization of `{$name}` intrinsic: expected return type of length {$in_len}, found `{$ret_ty}` with length {$out_len} codegen_gcc_invalid_monomorphization_return_length_input_type = invalid monomorphization of `{$name}` intrinsic: expected return type with length {$in_len} (same as input type `{$in_ty}`), found `{$ret_ty}` with length {$out_len} -codegen_gcc_invalid_monomorphization_return_element = - invalid monomorphization of `{$name}` intrinsic: expected return element type `{$in_elem}` (element of input `{$in_ty}`), found `{$ret_ty}` with element type `{$out_ty}` - codegen_gcc_invalid_monomorphization_return_type = invalid monomorphization of `{$name}` intrinsic: expected return type `{$in_elem}` (element of input `{$in_ty}`), found `{$ret_ty}` -codegen_gcc_invalid_monomorphization_inserted_type = - invalid monomorphization of `{$name}` intrinsic: expected inserted type `{$in_elem}` (element of input `{$in_ty}`), found `{$out_ty}` - -codegen_gcc_invalid_monomorphization_return_integer_type = - invalid monomorphization of `{$name}` intrinsic: expected return type with integer elements, found `{$ret_ty}` with non-integer `{$out_ty}` +codegen_gcc_invalid_monomorphization_simd_shuffle = + invalid monomorphization of `{$name}` intrinsic: simd_shuffle index must be an array of `u32`, got `{$ty}` -codegen_gcc_invalid_monomorphization_mismatched_lengths = - invalid monomorphization of `{$name}` intrinsic: mismatched lengths: mask length `{$m_len}` != other vector length `{$v_len}` +codegen_gcc_invalid_monomorphization_unrecognized = + invalid monomorphization of `{$name}` intrinsic: unrecognized intrinsic `{$name}` codegen_gcc_invalid_monomorphization_unsupported_cast = invalid monomorphization of `{$name}` intrinsic: unsupported cast from `{$in_ty}` with element `{$in_elem}` to `{$ret_ty}` with element `{$out_elem}` +codegen_gcc_invalid_monomorphization_unsupported_element = + invalid monomorphization of `{$name}` intrinsic: unsupported {$name} from `{$in_ty}` with element `{$elem_ty}` to `{$ret_ty}` + codegen_gcc_invalid_monomorphization_unsupported_operation = invalid monomorphization of `{$name}` intrinsic: unsupported operation on `{$in_ty}` with element `{$in_elem}` -codegen_gcc_invalid_minimum_alignment = - invalid minimum global alignment: {$err} +codegen_gcc_lto_not_supported = + LTO is not supported. You may get a linker error. codegen_gcc_tied_target_features = the target features {$features} must all be either enabled or disabled together .help = add the missing features in a `target_feature` attribute + +codegen_gcc_unwinding_inline_asm = + GCC backend does not support unwinding from inline asm From 80efecf18caf639184315645f3abdae19e3ee5af Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Fri, 19 May 2023 15:48:43 +0000 Subject: [PATCH 089/112] Stop creating intermediate places just to immediate convert them to operands --- src/common.rs | 42 +++++++++++++++++++----------------------- src/consts.rs | 17 +---------------- 2 files changed, 20 insertions(+), 39 deletions(-) diff --git a/src/common.rs b/src/common.rs index ac04b61a306..bad87db4732 100644 --- a/src/common.rs +++ b/src/common.rs @@ -1,17 +1,15 @@ use gccjit::LValue; use gccjit::{RValue, Type, ToRValue}; -use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ BaseTypeMethods, ConstMethods, - DerivedTypeMethods, MiscMethods, StaticMethods, }; use rustc_middle::mir::Mutability; -use rustc_middle::ty::layout::{TyAndLayout, LayoutOf}; +use rustc_middle::ty::layout::{LayoutOf}; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; -use rustc_target::abi::{self, HasDataLayout, Pointer, Size}; +use rustc_target::abi::{self, HasDataLayout, Pointer}; use crate::consts::const_alloc_to_gcc; use crate::context::CodegenCx; @@ -240,28 +238,26 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { const_alloc_to_gcc(self, alloc) } - fn from_const_alloc(&self, layout: TyAndLayout<'tcx>, alloc: ConstAllocation<'tcx>, offset: Size) -> PlaceRef<'tcx, RValue<'gcc>> { - assert_eq!(alloc.inner().align, layout.align.abi); - let ty = self.type_ptr_to(layout.gcc_type(self)); - let value = - if layout.size == Size::ZERO { - let value = self.const_usize(alloc.inner().align.bytes()); - self.const_bitcast(value, ty) - } - else { - let init = const_alloc_to_gcc(self, alloc); - let base_addr = self.static_addr_of(init, alloc.inner().align, None); - - let array = self.const_bitcast(base_addr, self.type_i8p()); - let value = self.context.new_array_access(None, array, self.const_usize(offset.bytes())).get_address(None); - self.const_bitcast(value, ty) - }; - PlaceRef::new_sized(value, layout) - } - fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> { self.context.new_cast(None, val, ty) } + + fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> { + if value.get_type() == self.bool_type.make_pointer() { + if let Some(pointee) = typ.get_pointee() { + if pointee.dyncast_vector().is_some() { + panic!() + } + } + } + // NOTE: since bitcast makes a value non-constant, don't bitcast if not necessary as some + // SIMD builtins require a constant value. + self.bitcast_if_needed(value, typ) + } + + fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value { + self.context.new_array_access(None, base_addr, self.const_usize(offset.bytes())).get_address(None) + } } pub trait SignType<'gcc, 'tcx> { diff --git a/src/consts.rs b/src/consts.rs index 792ab8f890d..873f652e6f1 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -1,6 +1,6 @@ #[cfg(feature = "master")] use gccjit::FnAttribute; -use gccjit::{Function, GlobalKind, LValue, RValue, ToRValue, Type}; +use gccjit::{Function, GlobalKind, LValue, RValue, ToRValue}; use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods}; use rustc_middle::span_bug; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; @@ -16,21 +16,6 @@ use crate::context::CodegenCx; use crate::errors::InvalidMinimumAlignment; use crate::type_of::LayoutGccExt; -impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { - pub fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> { - if value.get_type() == self.bool_type.make_pointer() { - if let Some(pointee) = typ.get_pointee() { - if pointee.dyncast_vector().is_some() { - panic!() - } - } - } - // NOTE: since bitcast makes a value non-constant, don't bitcast if not necessary as some - // SIMD builtins require a constant value. - self.bitcast_if_needed(value, typ) - } -} - fn set_global_alignment<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, gv: LValue<'gcc>, mut align: Align) { // The target may require greater alignment for globals than the type does. // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, From 3d0a0dccae89da3ef057dda6e84d480f7888eee9 Mon Sep 17 00:00:00 2001 From: Scott McMurray Date: Sun, 7 May 2023 03:00:41 -0700 Subject: [PATCH 090/112] Add a distinct `OperandValue::ZeroSized` variant for ZSTs These tend to have special handling in a bunch of places anyway, so the variant helps remember that. And I think it's easier to grok than non-Scalar Aggregates sometimes being `Immediates` (like I got wrong and caused 109992). As a minor bonus, it means we don't need to generate poison LLVM values for them to pass around in `OperandValue::Immediate`s. --- src/builder.rs | 2 +- src/type_of.rs | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 869344ce92d..f9ea0f00456 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -758,7 +758,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { - return OperandRef::new_zst(self, place.layout); + return OperandRef::zero_sized(place.layout); } fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) { diff --git a/src/type_of.rs b/src/type_of.rs index 5df8c1a209d..30a3fe67b85 100644 --- a/src/type_of.rs +++ b/src/type_of.rs @@ -159,8 +159,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { fn is_gcc_immediate(&self) -> bool { match self.abi { Abi::Scalar(_) | Abi::Vector { .. } => true, - Abi::ScalarPair(..) => false, - Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(), + Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false, } } From f16ca93dedd8f4b314e0276a2bbab011af1c9112 Mon Sep 17 00:00:00 2001 From: Deadbeef Date: Wed, 17 May 2023 10:30:14 +0000 Subject: [PATCH 091/112] Use translatable diagnostics in `rustc_const_eval` --- src/consts.rs | 2 +- src/context.rs | 15 +++------------ 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/src/consts.rs b/src/consts.rs index 873f652e6f1..33e3b0baa92 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -24,7 +24,7 @@ fn set_global_alignment<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, gv: LValue<'gcc> match Align::from_bits(min) { Ok(min) => align = align.max(min), Err(err) => { - cx.sess().emit_err(InvalidMinimumAlignment { err }); + cx.sess().emit_err(InvalidMinimumAlignment { err: err.to_string() }); } } } diff --git a/src/context.rs b/src/context.rs index 661681bdb50..08507e19652 100644 --- a/src/context.rs +++ b/src/context.rs @@ -477,7 +477,7 @@ impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { #[inline] fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { if let LayoutError::SizeOverflow(_) = err { - self.sess().emit_fatal(respan(span, err)) + self.sess().emit_fatal(respan(span, err.into_diagnostic())) } else { span_bug!(span, "failed to get layout for `{}`: {}", ty, err) } @@ -499,21 +499,12 @@ impl<'gcc, 'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { } else { match fn_abi_request { FnAbiRequest::OfFnPtr { sig, extra_args } => { - span_bug!( - span, - "`fn_abi_of_fn_ptr({}, {:?})` failed: {}", - sig, - extra_args, - err - ); + span_bug!(span, "`fn_abi_of_fn_ptr({sig}, {extra_args:?})` failed: {err:?}"); } FnAbiRequest::OfInstance { instance, extra_args } => { span_bug!( span, - "`fn_abi_of_instance({}, {:?})` failed: {}", - instance, - extra_args, - err + "`fn_abi_of_instance({instance}, {extra_args:?})` failed: {err:?}" ); } } From 8430ec5e50ba477ad6e55e4e80987f6c32525b23 Mon Sep 17 00:00:00 2001 From: Andrew Xie Date: Fri, 7 Apr 2023 15:56:33 -0400 Subject: [PATCH 092/112] Updated cranelift codegen to reflect modified trait signature --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 442ce0ea542..ea013c4428c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -75,7 +75,7 @@ use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig, use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; use rustc_codegen_ssa::target_features::supported_target_features; use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods}; -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::fx::FxIndexMap; use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, SubdiagnosticMessage}; use rustc_fluent_macro::fluent_messages; use rustc_metadata::EncodedMetadata; @@ -137,7 +137,7 @@ impl CodegenBackend for GccCodegenBackend { Box::new(res) } - fn join_codegen(&self, ongoing_codegen: Box, sess: &Session, _outputs: &OutputFilenames) -> Result<(CodegenResults, FxHashMap), ErrorGuaranteed> { + fn join_codegen(&self, ongoing_codegen: Box, sess: &Session, _outputs: &OutputFilenames) -> Result<(CodegenResults, FxIndexMap), ErrorGuaranteed> { let (codegen_results, work_products) = ongoing_codegen .downcast::>() .expect("Expected GccCodegenBackend's OngoingCodegen, found Box") From 8c2b14f7088c31c442f1ebc5c0ad7d5e22011963 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 11 Jun 2023 11:41:06 -0400 Subject: [PATCH 093/112] Update to nightly-2023-06-11 --- rust-toolchain | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain b/rust-toolchain index 933ecd45baa..90f9a621077 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2023-03-02" +channel = "nightly-2023-06-11" components = ["rust-src", "rustc-dev", "llvm-tools-preview"] From c8376e4c78cfdc4d55b6c818fc888a6d62dc08fe Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sat, 3 Jun 2023 09:04:12 -0400 Subject: [PATCH 094/112] Add usage of git subtree to readme --- Readme.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Readme.md b/Readme.md index bb741943892..593cd3f9bbf 100644 --- a/Readme.md +++ b/Readme.md @@ -193,7 +193,7 @@ Using git-subtree with `rustc` requires a patched git to make it work. The PR that is needed is [here](https://github.com/gitgitgadget/git/pull/493). Use the following instructions to install it: -``` +```bash git clone git@github.com:tqc/git.git cd git git checkout tqc/subtree @@ -204,6 +204,12 @@ make cp git-subtree ~/bin ``` +Then, do a sync with this command: + +```bash +PATH="$HOME/bin:$PATH" ~/bin/git-subtree push -P compiler/rustc_codegen_gcc/ ../rustc_codegen_gcc/ sync_branch_name +``` + ### How to use [mem-trace](https://github.com/antoyo/mem-trace) `rustc` needs to be built without `jemalloc` so that `mem-trace` can overload `malloc` since `jemalloc` is linked statically, so a `LD_PRELOAD`-ed library won't a chance to intercept the calls to `malloc`. From 94e5c2701228b6fc8d765136967ddc6d51dff6a9 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sat, 3 Jun 2023 09:34:56 -0400 Subject: [PATCH 095/112] Update libgccjit and mini_core --- Cargo.lock | 4 ++-- example/mini_core.rs | 16 +++++++++++++++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f2e152f8ce..1c8754bf675 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,7 +35,7 @@ dependencies = [ [[package]] name = "gccjit" version = "1.0.0" -source = "git+https://github.com/antoyo/gccjit.rs#fe242b7eb26980e6c78859d51c8d4cc1e43381a3" +source = "git+https://github.com/antoyo/gccjit.rs#d6e52626cfc6f487094a5d5ac66302baf3439984" dependencies = [ "gccjit_sys", ] @@ -43,7 +43,7 @@ dependencies = [ [[package]] name = "gccjit_sys" version = "0.0.1" -source = "git+https://github.com/antoyo/gccjit.rs#fe242b7eb26980e6c78859d51c8d4cc1e43381a3" +source = "git+https://github.com/antoyo/gccjit.rs#d6e52626cfc6f487094a5d5ac66302baf3439984" dependencies = [ "libc", ] diff --git a/example/mini_core.rs b/example/mini_core.rs index 637b8dc53fe..f0347db00cc 100644 --- a/example/mini_core.rs +++ b/example/mini_core.rs @@ -451,6 +451,9 @@ pub unsafe fn drop_in_place(to_drop: *mut T) { drop_in_place(to_drop); } +#[lang = "unpin"] +pub auto trait Unpin {} + #[lang = "deref"] pub trait Deref { type Target: ?Sized; @@ -486,7 +489,18 @@ impl DispatchFromDyn> for Unique where T: Uns #[lang = "owned_box"] pub struct Box(Unique, A); -impl, U: ?Sized, A: Allocator> CoerceUnsized> for Box {} +impl, U: ?Sized> CoerceUnsized> for Box {} + +impl Box { + pub fn new(val: T) -> Box { + unsafe { + let size = intrinsics::size_of::(); + let ptr = libc::malloc(size); + intrinsics::copy(&val as *const T as *const u8, ptr, size); + Box(Unique { pointer: NonNull(ptr as *const T), _marker: PhantomData }, Global) + } + } +} impl Drop for Box { fn drop(&mut self) { From 4115e09c13a975b42d506e2db6119d7c3ff1876e Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 4 Jun 2023 13:14:55 -0400 Subject: [PATCH 096/112] Fix for opaque pointers --- Cargo.toml | 2 +- src/builder.rs | 40 ++++++++++++++++++++++++++-------------- src/type_of.rs | 6 +++--- 3 files changed, 30 insertions(+), 18 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7285e3eaf51..81066d9ce1f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ master = ["gccjit/master"] gccjit = { git = "https://github.com/antoyo/gccjit.rs" } # Local copy. -# gccjit = { path = "../gccjit.rs" } +#gccjit = { path = "../gccjit.rs" } smallvec = { version = "1.6.1", features = ["union", "may_dangle"] } diff --git a/src/builder.rs b/src/builder.rs index f9ea0f00456..d578a0df51d 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -280,8 +280,17 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } } - fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { - let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr"); + fn function_ptr_call(&mut self, typ: Type<'gcc>, mut func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { + let gcc_func = + match func_ptr.get_type().dyncast_function_ptr_type() { + Some(func) => func, + None => { + // NOTE: due to opaque pointers now being used, we need to cast here. + let new_func_type = typ.dyncast_function_ptr_type().expect("function ptr"); + func_ptr = self.context.new_cast(None, func_ptr, typ); + new_func_type + }, + }; let func_name = format!("{:?}", func_ptr); let previous_arg_count = args.len(); let orig_args = args; @@ -424,16 +433,17 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.llbb().end_with_void_return(None) } - fn ret(&mut self, value: RValue<'gcc>) { - let value = - if self.structs_as_pointer.borrow().contains(&value) { - // NOTE: hack to workaround a limitation of the rustc API: see comment on - // CodegenCx.structs_as_pointer - value.dereference(None).to_rvalue() - } - else { - value - }; + fn ret(&mut self, mut value: RValue<'gcc>) { + if self.structs_as_pointer.borrow().contains(&value) { + // NOTE: hack to workaround a limitation of the rustc API: see comment on + // CodegenCx.structs_as_pointer + value = value.dereference(None).to_rvalue(); + } + let expected_return_type = self.current_func().get_return_type(); + if !expected_return_type.is_compatible_with(value.get_type()) { + // NOTE: due to opaque pointers now being used, we need to cast here. + value = self.context.new_cast(None, value, expected_return_type); + } self.llbb().end_with_return(None, value); } @@ -938,6 +948,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { element.get_address(None) } else if let Some(struct_type) = value_type.is_struct() { + // NOTE: due to opaque pointers now being used, we need to bitcast here. + let ptr = self.bitcast_if_needed(ptr, value_type.make_pointer()); ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None) } else { @@ -1356,7 +1368,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn call( &mut self, - _typ: Type<'gcc>, + typ: Type<'gcc>, _fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, @@ -1370,7 +1382,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } else { // If it's a not function that was defined, it's a function pointer. - self.function_ptr_call(func, args, funclet) + self.function_ptr_call(typ, func, args, funclet) }; if let Some(_fn_abi) = fn_abi { // TODO(bjorn3): Apply function attributes diff --git a/src/type_of.rs b/src/type_of.rs index 30a3fe67b85..74f016cf90a 100644 --- a/src/type_of.rs +++ b/src/type_of.rs @@ -383,8 +383,8 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { unimplemented!(); } - fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> { - // FIXME(antoyo): return correct type. - self.type_void() + fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> { + let (return_type, param_types, variadic, _) = fn_abi.gcc_type(self); + self.context.new_function_pointer_type(None, return_type, ¶m_types, variadic) } } From e74bc5113dac9f00b2eabcd1963d05e0f1a84ec2 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 4 Jun 2023 18:21:44 -0400 Subject: [PATCH 097/112] Attempt to fix the tests --- build_sysroot/prepare_sysroot_src.sh | 8 ++++---- src/intrinsic/simd.rs | 9 +++++++-- src/lib.rs | 2 ++ test.sh | 15 ++++++++++++--- 4 files changed, 25 insertions(+), 9 deletions(-) diff --git a/build_sysroot/prepare_sysroot_src.sh b/build_sysroot/prepare_sysroot_src.sh index 56768bbf1d0..71b3876bac2 100755 --- a/build_sysroot/prepare_sysroot_src.sh +++ b/build_sysroot/prepare_sysroot_src.sh @@ -29,10 +29,10 @@ git config user.name || git config user.name "None" git commit -m "Initial commit" -q for file in $(ls ../../patches/ | grep -v patcha); do -echo "[GIT] apply" $file -git apply ../../patches/$file -git add -A -git commit --no-gpg-sign -m "Patch $file" + echo "[GIT] apply" $file + git apply ../../patches/$file + git add -A + git commit --no-gpg-sign -m "Patch $file" done popd diff --git a/src/intrinsic/simd.rs b/src/intrinsic/simd.rs index b59c3a64f57..36b9c9b6364 100644 --- a/src/intrinsic/simd.rs +++ b/src/intrinsic/simd.rs @@ -165,10 +165,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( InvalidMonomorphizationReturnIntegerType { span, name, ret_ty, out_ty } ); + let arg1 = args[0].immediate(); + // NOTE: we get different vector types for the same vector type and libgccjit doesn't + // compare them as equal, so bitcast. + // FIXME(antoyo): allow comparing vector types as equal in libgccjit. + let arg2 = bx.context.new_bitcast(None, args[1].immediate(), arg1.get_type()); return Ok(compare_simd_types( bx, - args[0].immediate(), - args[1].immediate(), + arg1, + arg2, in_elem, llret_ty, cmp_op, diff --git a/src/lib.rs b/src/lib.rs index 204741ca3c1..3bf92c06302 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,5 @@ +// FIXME: simple programs now segfault with a sysroot compile in release mode. + /* * TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?) * TODO(antoyo): support #[inline] attributes. diff --git a/test.sh b/test.sh index 6139892aefc..c62c49cef99 100755 --- a/test.sh +++ b/test.sh @@ -214,12 +214,14 @@ function setup_rustc() { rm config.toml || true cat > config.toml < Date: Thu, 8 Jun 2023 20:51:29 -0400 Subject: [PATCH 098/112] Add missing cast to fix another issue caused by opaque pointers --- src/builder.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/builder.rs b/src/builder.rs index d578a0df51d..f2775421ccd 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -919,7 +919,9 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.context.new_bitcast(None, result, ptr_type) } - fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { + fn inbounds_gep(&mut self, typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { + // NOTE: due to opaque pointers now being used, we need to cast here. + let ptr = self.context.new_cast(None, ptr, typ.make_pointer()); // NOTE: array indexing is always considered in bounds in GCC (TODO(antoyo): to be verified). let mut indices = indices.into_iter(); let index = indices.next().expect("first index in inbounds_gep"); From 984b9c52ccc0d493d2215167ecb03d76e9cb30ec Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Fri, 9 Jun 2023 10:00:50 -0400 Subject: [PATCH 099/112] Improve sync instructions --- Readme.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/Readme.md b/Readme.md index 593cd3f9bbf..a93637d9038 100644 --- a/Readme.md +++ b/Readme.md @@ -208,8 +208,17 @@ Then, do a sync with this command: ```bash PATH="$HOME/bin:$PATH" ~/bin/git-subtree push -P compiler/rustc_codegen_gcc/ ../rustc_codegen_gcc/ sync_branch_name +cd ../rustc_codegen_gcc +git checkout master +git pull +git checkout sync_branch_name +git merge master ``` +TODO: write a script that does the above. + +https://rust-lang.zulipchat.com/#narrow/stream/301329-t-devtools/topic/subtree.20madness/near/258877725 + ### How to use [mem-trace](https://github.com/antoyo/mem-trace) `rustc` needs to be built without `jemalloc` so that `mem-trace` can overload `malloc` since `jemalloc` is linked statically, so a `LD_PRELOAD`-ed library won't a chance to intercept the calls to `malloc`. From e9708ebcefe398d5d8669e80260a4c73415ddc1d Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sat, 10 Jun 2023 14:08:33 -0400 Subject: [PATCH 100/112] Add note --- test.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test.sh b/test.sh index c62c49cef99..7c8fe55da14 100755 --- a/test.sh +++ b/test.sh @@ -356,11 +356,12 @@ function test_rustc() { # We need to overwrite the sysroot in the tests, now. # TODO(antoyo): find a faster way to do this. # FIXME: this makes the stderr different since it changes the line numbers. - for file in $(find tests/ui -type f -name '*.rs'); do - sed -ie "1i // compile-flags: --sysroot "$(pwd)"/../build_sysroot/sysroot\n" $file - done + #for file in $(find tests/ui -type f -name '*.rs'); do + #sed -ie "1i // compile-flags: --sysroot "$(pwd)"/../build_sysroot/sysroot\n" $file + #done - RUSTC_ARGS="$TEST_FLAGS -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext"" + # TODO: copy the sysroot at the correct location to not have to use the --sysroot flag. + RUSTC_ARGS="$TEST_FLAGS -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot" if [ $# -eq 0 ]; then # No argument supplied to the function. Doing nothing. From a0edbfb2d3f163f899156a2f2050b227463ba659 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 11 Jun 2023 11:35:35 -0400 Subject: [PATCH 101/112] Test to fix UI tests --- test.sh | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/test.sh b/test.sh index 7c8fe55da14..5c5c26699b0 100755 --- a/test.sh +++ b/test.sh @@ -213,6 +213,13 @@ function setup_rustc() { rm config.toml || true + # TODO: copy in build_sysroot/build_sysroot.sh instead to avoid having to rebuild stage0 libraries everytime? + my_toolchain_dir=$HOME/.rustup/toolchains/my_toolchain + rm -rf $my_toolchain_dir + cp -r $HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE $my_toolchain_dir + rm -rf $my_toolchain_dir/lib/rustlib/x86_64-unknown-linux-gnu/ + cp -r ../build_sysroot/sysroot/* $my_toolchain_dir + cat > config.toml < ../trace + COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 tests/ui/zero-sized/zero-sized-linkedlist-push.rs --rustc-args "$RUSTC_ARGS" } function test_failing_rustc() { From 90527b81c9c767adfef2c895fb8b776eb3ac3212 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 11 Jun 2023 16:04:00 -0400 Subject: [PATCH 102/112] Some fixes and cleanups --- rust-toolchain | 2 +- src/lib.rs | 2 +- test.sh | 17 ++++------------- 3 files changed, 6 insertions(+), 15 deletions(-) diff --git a/rust-toolchain b/rust-toolchain index 90f9a621077..2614fa081a8 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2023-06-11" +channel = "nightly-2023-06-10" components = ["rust-src", "rustc-dev", "llvm-tools-preview"] diff --git a/src/lib.rs b/src/lib.rs index 3bf92c06302..7a89a449b69 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -79,7 +79,7 @@ use rustc_codegen_ssa::target_features::supported_target_features; use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods}; use rustc_data_structures::fx::FxIndexMap; use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, SubdiagnosticMessage}; -use rustc_macros::fluent_messages; +use rustc_fluent_macro::fluent_messages; use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::query::Providers; diff --git a/test.sh b/test.sh index 5c5c26699b0..7c68e00771d 100755 --- a/test.sh +++ b/test.sh @@ -213,7 +213,8 @@ function setup_rustc() { rm config.toml || true - # TODO: copy in build_sysroot/build_sysroot.sh instead to avoid having to rebuild stage0 libraries everytime? + # TODO: move these lines to build_sysroot/build_sysroot.sh instead to avoid having to rebuild stage0 libraries everytime? + # Since we can't override the sysroot anymore, we create a new toolchain and manually overwrite the sysroot directory. my_toolchain_dir=$HOME/.rustup/toolchains/my_toolchain rm -rf $my_toolchain_dir cp -r $HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE $my_toolchain_dir @@ -362,16 +363,7 @@ function test_rustc() { git checkout tests/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs git checkout tests/ui/macros/rfc-2011-nicer-assert-messages/auxiliary/common.rs - # We need to overwrite the sysroot in the tests, now. - # TODO(antoyo): find a faster way to do this. - # FIXME: this makes the stderr different since it changes the line numbers. - #for file in $(find tests/ui -type f -name '*.rs'); do - #sed -ie "1i // compile-flags: --sysroot "$(pwd)"/../build_sysroot/sysroot\n" $file - #done - - # TODO: copy the sysroot at the correct location to not have to use the --sysroot flag. - #RUSTC_ARGS="$TEST_FLAGS -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext"" - RUSTC_ARGS="$TEST_FLAGS -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot $HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE/bin/rustc" + RUSTC_ARGS="$TEST_FLAGS -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext"" if [ $# -eq 0 ]; then @@ -404,8 +396,7 @@ function test_rustc() { fi echo "[TEST] rustc test suite" - #COMPILETEST_FORCE_STAGE0=1 strace -f ./x.py test --run always --stage 0 tests/ui/ --rustc-args "$RUSTC_ARGS" &> ../trace - COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 tests/ui/zero-sized/zero-sized-linkedlist-push.rs --rustc-args "$RUSTC_ARGS" + COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 tests/ui --rustc-args "$RUSTC_ARGS" } function test_failing_rustc() { From 3371fce044f1f4aafe258d21f3e03774d30a1cab Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 11 Jun 2023 18:21:00 -0400 Subject: [PATCH 103/112] Fix tests --- failing-ui-tests.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/failing-ui-tests.txt b/failing-ui-tests.txt index 8539e27ea6a..1b0e30c920a 100644 --- a/failing-ui-tests.txt +++ b/failing-ui-tests.txt @@ -54,8 +54,8 @@ tests/ui/issues/issue-40883.rs tests/ui/issues/issue-43853.rs tests/ui/issues/issue-47364.rs tests/ui/macros/rfc-2011-nicer-assert-messages/assert-without-captures-does-not-create-unnecessary-code.rs -tests/ui/rfc-2091-track-caller/std-panic-locations.rs -tests/ui/rfcs/rfc1857-drop-order.rs +tests/ui/rfcs/rfc-2091-track-caller/std-panic-locations.rs +tests/ui/rfcs/rfc-1857-stabilize-drop-order/drop-order.rs tests/ui/simd/issue-17170.rs tests/ui/simd/issue-39720.rs tests/ui/simd/issue-89193.rs From ef037e6d30b21363c48c47045b325a780fdde6d3 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 11 Jun 2023 19:27:39 -0400 Subject: [PATCH 104/112] Fix tests --- build_sysroot/Cargo.toml | 1 + failing-ui-tests.txt | 2 ++ src/declare.rs | 2 +- test.sh | 2 ++ 4 files changed, 6 insertions(+), 1 deletion(-) diff --git a/build_sysroot/Cargo.toml b/build_sysroot/Cargo.toml index cfadf47cc3f..a84f86a8218 100644 --- a/build_sysroot/Cargo.toml +++ b/build_sysroot/Cargo.toml @@ -9,6 +9,7 @@ compiler_builtins = "0.1" alloc = { path = "./sysroot_src/library/alloc" } std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] } test = { path = "./sysroot_src/library/test" } +proc_macro = { path = "./sysroot_src/library/proc_macro" } [patch.crates-io] rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" } diff --git a/failing-ui-tests.txt b/failing-ui-tests.txt index 1b0e30c920a..801464daae9 100644 --- a/failing-ui-tests.txt +++ b/failing-ui-tests.txt @@ -66,3 +66,5 @@ tests/ui/generator/panic-safe.rs tests/ui/issues/issue-14875.rs tests/ui/issues/issue-29948.rs tests/ui/panic-while-printing.rs +tests/ui/enum-discriminant/get_discr.rs +tests/ui/panics/nested_panic_caught.rs diff --git a/src/declare.rs b/src/declare.rs index 4748e7e4be2..493626c3cf5 100644 --- a/src/declare.rs +++ b/src/declare.rs @@ -132,7 +132,7 @@ fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*ll pub fn mangle_name(name: &str) -> String { name.replace(|char: char| { if !char.is_alphanumeric() && char != '_' { - debug_assert!("$.".contains(char), "Unsupported char in function name: {}", char); + debug_assert!("$.*".contains(char), "Unsupported char in function name {}: {}", name, char); true } else { diff --git a/test.sh b/test.sh index 7c68e00771d..72753e1d466 100755 --- a/test.sh +++ b/test.sh @@ -350,6 +350,8 @@ function test_rustc() { for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" tests/ui); do rm $test done + rm tests/ui/consts/const_cmp_type_id.rs + rm tests/ui/consts/issue-73976-monomorphic.rs git checkout -- tests/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed From 8bba64673c4fa7b19524efa563ae6233e0bda217 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 11 Jun 2023 20:01:24 -0400 Subject: [PATCH 105/112] Cleanup --- build_sysroot/build_sysroot.sh | 8 ++++++++ src/lib.rs | 2 -- test.sh | 8 -------- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/build_sysroot/build_sysroot.sh b/build_sysroot/build_sysroot.sh index 9d692d599f6..dc80e4fff80 100755 --- a/build_sysroot/build_sysroot.sh +++ b/build_sysroot/build_sysroot.sh @@ -28,3 +28,11 @@ fi # Copy files to sysroot mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ + +# Since we can't override the sysroot for the UI tests anymore, we create a new toolchain and manually overwrite the sysroot directory. +my_toolchain_dir=$HOME/.rustup/toolchains/my_toolchain +rm -rf $my_toolchain_dir +rust_toolchain=$(cat ../rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/') +cp -r $HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE $my_toolchain_dir +rm -rf $my_toolchain_dir/lib/rustlib/$TARGET_TRIPLE/ +cp -r ../build_sysroot/sysroot/* $my_toolchain_dir diff --git a/src/lib.rs b/src/lib.rs index 7a89a449b69..2a6b642782d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,3 @@ -// FIXME: simple programs now segfault with a sysroot compile in release mode. - /* * TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?) * TODO(antoyo): support #[inline] attributes. diff --git a/test.sh b/test.sh index 72753e1d466..a6e87fca162 100755 --- a/test.sh +++ b/test.sh @@ -213,13 +213,7 @@ function setup_rustc() { rm config.toml || true - # TODO: move these lines to build_sysroot/build_sysroot.sh instead to avoid having to rebuild stage0 libraries everytime? - # Since we can't override the sysroot anymore, we create a new toolchain and manually overwrite the sysroot directory. my_toolchain_dir=$HOME/.rustup/toolchains/my_toolchain - rm -rf $my_toolchain_dir - cp -r $HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE $my_toolchain_dir - rm -rf $my_toolchain_dir/lib/rustlib/x86_64-unknown-linux-gnu/ - cp -r ../build_sysroot/sysroot/* $my_toolchain_dir cat > config.toml < Date: Mon, 12 Jun 2023 20:51:29 -0400 Subject: [PATCH 106/112] Tests for the CI --- build_sysroot/build_sysroot.sh | 2 +- test.sh | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/build_sysroot/build_sysroot.sh b/build_sysroot/build_sysroot.sh index dc80e4fff80..063219aabd2 100755 --- a/build_sysroot/build_sysroot.sh +++ b/build_sysroot/build_sysroot.sh @@ -30,7 +30,7 @@ mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ # Since we can't override the sysroot for the UI tests anymore, we create a new toolchain and manually overwrite the sysroot directory. -my_toolchain_dir=$HOME/.rustup/toolchains/my_toolchain +my_toolchain_dir=$HOME/.rustup/toolchains/codegen_gcc_ui_tests rm -rf $my_toolchain_dir rust_toolchain=$(cat ../rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/') cp -r $HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE $my_toolchain_dir diff --git a/test.sh b/test.sh index a6e87fca162..6213ed49183 100755 --- a/test.sh +++ b/test.sh @@ -213,7 +213,7 @@ function setup_rustc() { rm config.toml || true - my_toolchain_dir=$HOME/.rustup/toolchains/my_toolchain + my_toolchain_dir=$HOME/.rustup/toolchains/codegen_gcc_ui_tests cat > config.toml < Date: Thu, 15 Jun 2023 20:45:20 -0400 Subject: [PATCH 107/112] Working, but requires a patched rustc --- build_sysroot/build_sysroot.sh | 4 ++-- test.sh | 15 +++++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/build_sysroot/build_sysroot.sh b/build_sysroot/build_sysroot.sh index 063219aabd2..6aea9b94b48 100755 --- a/build_sysroot/build_sysroot.sh +++ b/build_sysroot/build_sysroot.sh @@ -30,9 +30,9 @@ mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ # Since we can't override the sysroot for the UI tests anymore, we create a new toolchain and manually overwrite the sysroot directory. -my_toolchain_dir=$HOME/.rustup/toolchains/codegen_gcc_ui_tests -rm -rf $my_toolchain_dir rust_toolchain=$(cat ../rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/') +my_toolchain_dir=$HOME/.rustup/toolchains/codegen_gcc_ui_tests-$rust_toolchain-$TARGET_TRIPLE +rm -rf $my_toolchain_dir cp -r $HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE $my_toolchain_dir rm -rf $my_toolchain_dir/lib/rustlib/$TARGET_TRIPLE/ cp -r ../build_sysroot/sysroot/* $my_toolchain_dir diff --git a/test.sh b/test.sh index 6213ed49183..2e485b92739 100755 --- a/test.sh +++ b/test.sh @@ -209,11 +209,12 @@ function setup_rustc() { cd rust git fetch git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') + git am ../0001-Allow-overwriting-the-sysroot-compile-flag-via-rustc.patch export RUSTFLAGS= rm config.toml || true - my_toolchain_dir=$HOME/.rustup/toolchains/codegen_gcc_ui_tests + my_toolchain_dir=$HOME/.rustup/toolchains/codegen_gcc_ui_tests-$rust_toolchain-$TARGET_TRIPLE cat > config.toml < Date: Thu, 15 Jun 2023 20:51:29 -0400 Subject: [PATCH 108/112] Handle alignment of the load instruction --- src/builder.rs | 19 ++++++++++++++----- src/intrinsic/simd.rs | 9 ++++++--- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index f2775421ccd..bb23524d8af 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -729,17 +729,25 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { unimplemented!(); } - fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> { + fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> { let block = self.llbb(); let function = block.get_function(); // NOTE: instead of returning the dereference here, we have to assign it to a variable in // the current basic block. Otherwise, it could be used in another basic block, causing a // dereference after a drop, for instance. - // TODO(antoyo): handle align of the load instruction. - let ptr = self.context.new_cast(None, ptr, pointee_ty.make_pointer()); + // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type. + // Ideally, we shouldn't need to do this check. + let aligned_type = + if pointee_ty == self.cx.u128_type || pointee_ty == self.cx.i128_type { + pointee_ty + } + else { + pointee_ty.get_aligned(align.bytes()) + }; + let ptr = self.context.new_cast(None, ptr, aligned_type.make_pointer()); let deref = ptr.dereference(None).to_rvalue(); unsafe { RETURN_VALUE_COUNT += 1 }; - let loaded_value = function.new_local(None, pointee_ty, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT })); + let loaded_value = function.new_local(None, aligned_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT })); block.add_assignment(None, loaded_value, deref); loaded_value.to_rvalue() } @@ -1857,7 +1865,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { #[cfg(feature="master")] let (cond, element_type) = { - let then_val_vector_type = then_val.get_type().dyncast_vector().expect("vector type"); + // TODO(antoyo): dyncast_vector should not require a call to unqualified. + let then_val_vector_type = then_val.get_type().unqualified().dyncast_vector().expect("vector type"); let then_val_element_type = then_val_vector_type.get_element_type(); let then_val_element_size = then_val_element_type.get_size(); diff --git a/src/intrinsic/simd.rs b/src/intrinsic/simd.rs index 36b9c9b6364..9115cf97119 100644 --- a/src/intrinsic/simd.rs +++ b/src/intrinsic/simd.rs @@ -346,7 +346,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // endian and MSB-first for big endian. let vector = args[0].immediate(); - let vector_type = vector.get_type().dyncast_vector().expect("vector type"); + // TODO(antoyo): dyncast_vector should not require a call to unqualified. + let vector_type = vector.get_type().unqualified().dyncast_vector().expect("vector type"); let elem_type = vector_type.get_element_type(); let expected_int_bits = in_len.max(8); @@ -853,7 +854,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( (true, true) => { // Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition // TODO(antoyo): improve using conditional operators if possible. - let arg_type = lhs.get_type(); + // TODO(antoyo): dyncast_vector should not require a call to unqualified. + let arg_type = lhs.get_type().unqualified(); // TODO(antoyo): convert lhs and rhs to unsigned. let sum = lhs + rhs; let vector_type = arg_type.dyncast_vector().expect("vector type"); @@ -883,7 +885,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( res & cmp }, (true, false) => { - let arg_type = lhs.get_type(); + // TODO(antoyo): dyncast_vector should not require a call to unqualified. + let arg_type = lhs.get_type().unqualified(); // TODO(antoyo): this uses the same algorithm from saturating add, but add the // negative of the right operand. Find a proper subtraction algorithm. let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs); From 8560b07ebfe177049a3790f4452196250b1cfb06 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Thu, 15 Jun 2023 20:52:45 -0400 Subject: [PATCH 109/112] TO REVERT: temporarily add a patch for rustc --- ...g-the-sysroot-compile-flag-via-rustc.patch | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 0001-Allow-overwriting-the-sysroot-compile-flag-via-rustc.patch diff --git a/0001-Allow-overwriting-the-sysroot-compile-flag-via-rustc.patch b/0001-Allow-overwriting-the-sysroot-compile-flag-via-rustc.patch new file mode 100644 index 00000000000..f7860bd9105 --- /dev/null +++ b/0001-Allow-overwriting-the-sysroot-compile-flag-via-rustc.patch @@ -0,0 +1,27 @@ +From 8d5e85607d3d52f920990334ae1cfa9798ad9259 Mon Sep 17 00:00:00 2001 +From: Antoni Boucher +Date: Thu, 8 Jun 2023 17:27:34 -0400 +Subject: [PATCH] Allow overwriting the sysroot compile flag via --rustc-args + +--- + src/tools/compiletest/src/runtest.rs | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/src/tools/compiletest/src/runtest.rs b/src/tools/compiletest/src/runtest.rs +index 6582b534488..d16a7d66154 100644 +--- a/src/tools/compiletest/src/runtest.rs ++++ b/src/tools/compiletest/src/runtest.rs +@@ -1951,7 +1951,9 @@ fn make_compile_args( + rustc.arg("-Ztranslate-remapped-path-to-local-path=no"); + + // Optionally prevent default --sysroot if specified in test compile-flags. +- if !self.props.compile_flags.iter().any(|flag| flag.starts_with("--sysroot")) { ++ if !self.props.compile_flags.iter().any(|flag| flag.starts_with("--sysroot")) ++ && !self.config.host_rustcflags.iter().any(|flag| flag == "--sysroot") ++ { + // In stage 0, make sure we use `stage0-sysroot` instead of the bootstrap sysroot. + rustc.arg("--sysroot").arg(&self.config.sysroot_base); + } +-- +2.41.0 + From 3d7ec5923d2e4373a354e7aec879e09e48f21517 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sat, 17 Jun 2023 13:19:41 -0400 Subject: [PATCH 110/112] Fix for check_ptr_call for variadic functions --- build_sysroot/build_sysroot.sh | 13 +++++++------ src/builder.rs | 9 ++++++++- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/build_sysroot/build_sysroot.sh b/build_sysroot/build_sysroot.sh index 6aea9b94b48..7f2c8d6121d 100755 --- a/build_sysroot/build_sysroot.sh +++ b/build_sysroot/build_sysroot.sh @@ -30,9 +30,10 @@ mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ # Since we can't override the sysroot for the UI tests anymore, we create a new toolchain and manually overwrite the sysroot directory. -rust_toolchain=$(cat ../rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/') -my_toolchain_dir=$HOME/.rustup/toolchains/codegen_gcc_ui_tests-$rust_toolchain-$TARGET_TRIPLE -rm -rf $my_toolchain_dir -cp -r $HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE $my_toolchain_dir -rm -rf $my_toolchain_dir/lib/rustlib/$TARGET_TRIPLE/ -cp -r ../build_sysroot/sysroot/* $my_toolchain_dir +# TODO: to remove. +#rust_toolchain=$(cat ../rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/') +#my_toolchain_dir=$HOME/.rustup/toolchains/codegen_gcc_ui_tests-$rust_toolchain-$TARGET_TRIPLE +#rm -rf $my_toolchain_dir +#cp -r $HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE $my_toolchain_dir +#rm -rf $my_toolchain_dir/lib/rustlib/$TARGET_TRIPLE/ +#cp -r ../build_sysroot/sysroot/* $my_toolchain_dir diff --git a/src/builder.rs b/src/builder.rs index bb23524d8af..43d0aafbd50 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -181,6 +181,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { }) .collect(); + debug_assert_eq!(casted_args.len(), args.len()); + Cow::Owned(casted_args) } @@ -207,7 +209,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let func_name = format!("{:?}", func_ptr); - let casted_args: Vec<_> = param_types + let mut casted_args: Vec<_> = param_types .into_iter() .zip(args.iter()) .enumerate() @@ -237,6 +239,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { }) .collect(); + // NOTE: to take into account variadic functions. + for i in casted_args.len()..args.len() { + casted_args.push(args[i]); + } + Cow::Owned(casted_args) } From 607cbfda14ab8a4292d34afdffdf817f81fe95ab Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 18 Jun 2023 19:40:12 -0400 Subject: [PATCH 111/112] Cleanup and update to rustc 2023-06-19 --- ...g-the-sysroot-compile-flag-via-rustc.patch | 27 ---------- build_sysroot/build_sysroot.sh | 9 ---- example/mini_core.rs | 10 ++-- patches/0023-core-Ignore-failing-tests.patch | 49 ------------------- rust-toolchain | 2 +- src/common.rs | 4 ++ test.sh | 7 --- 7 files changed, 9 insertions(+), 99 deletions(-) delete mode 100644 0001-Allow-overwriting-the-sysroot-compile-flag-via-rustc.patch delete mode 100644 patches/0023-core-Ignore-failing-tests.patch diff --git a/0001-Allow-overwriting-the-sysroot-compile-flag-via-rustc.patch b/0001-Allow-overwriting-the-sysroot-compile-flag-via-rustc.patch deleted file mode 100644 index f7860bd9105..00000000000 --- a/0001-Allow-overwriting-the-sysroot-compile-flag-via-rustc.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 8d5e85607d3d52f920990334ae1cfa9798ad9259 Mon Sep 17 00:00:00 2001 -From: Antoni Boucher -Date: Thu, 8 Jun 2023 17:27:34 -0400 -Subject: [PATCH] Allow overwriting the sysroot compile flag via --rustc-args - ---- - src/tools/compiletest/src/runtest.rs | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/src/tools/compiletest/src/runtest.rs b/src/tools/compiletest/src/runtest.rs -index 6582b534488..d16a7d66154 100644 ---- a/src/tools/compiletest/src/runtest.rs -+++ b/src/tools/compiletest/src/runtest.rs -@@ -1951,7 +1951,9 @@ fn make_compile_args( - rustc.arg("-Ztranslate-remapped-path-to-local-path=no"); - - // Optionally prevent default --sysroot if specified in test compile-flags. -- if !self.props.compile_flags.iter().any(|flag| flag.starts_with("--sysroot")) { -+ if !self.props.compile_flags.iter().any(|flag| flag.starts_with("--sysroot")) -+ && !self.config.host_rustcflags.iter().any(|flag| flag == "--sysroot") -+ { - // In stage 0, make sure we use `stage0-sysroot` instead of the bootstrap sysroot. - rustc.arg("--sysroot").arg(&self.config.sysroot_base); - } --- -2.41.0 - diff --git a/build_sysroot/build_sysroot.sh b/build_sysroot/build_sysroot.sh index 7f2c8d6121d..9d692d599f6 100755 --- a/build_sysroot/build_sysroot.sh +++ b/build_sysroot/build_sysroot.sh @@ -28,12 +28,3 @@ fi # Copy files to sysroot mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ - -# Since we can't override the sysroot for the UI tests anymore, we create a new toolchain and manually overwrite the sysroot directory. -# TODO: to remove. -#rust_toolchain=$(cat ../rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/') -#my_toolchain_dir=$HOME/.rustup/toolchains/codegen_gcc_ui_tests-$rust_toolchain-$TARGET_TRIPLE -#rm -rf $my_toolchain_dir -#cp -r $HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE $my_toolchain_dir -#rm -rf $my_toolchain_dir/lib/rustlib/$TARGET_TRIPLE/ -#cp -r ../build_sysroot/sysroot/* $my_toolchain_dir diff --git a/example/mini_core.rs b/example/mini_core.rs index 835419efc3a..0cd7e6047c2 100644 --- a/example/mini_core.rs +++ b/example/mini_core.rs @@ -504,7 +504,10 @@ impl Box { impl Drop for Box { fn drop(&mut self) { - // drop is currently performed by compiler. + // inner value is dropped by compiler. + unsafe { + libc::free(self.0.pointer.0 as *mut u8); + } } } @@ -521,11 +524,6 @@ unsafe fn allocate(size: usize, _align: usize) -> *mut u8 { libc::malloc(size) } -#[lang = "box_free"] -unsafe fn box_free(ptr: Unique, _alloc: ()) { - libc::free(ptr.pointer.0 as *mut u8); -} - #[lang = "drop"] pub trait Drop { fn drop(&mut self); diff --git a/patches/0023-core-Ignore-failing-tests.patch b/patches/0023-core-Ignore-failing-tests.patch deleted file mode 100644 index ee5ba449fb8..00000000000 --- a/patches/0023-core-Ignore-failing-tests.patch +++ /dev/null @@ -1,49 +0,0 @@ -From dd82e95c9de212524e14fc60155de1ae40156dfc Mon Sep 17 00:00:00 2001 -From: bjorn3 -Date: Sun, 24 Nov 2019 15:34:06 +0100 -Subject: [PATCH] [core] Ignore failing tests - ---- - library/core/tests/iter.rs | 4 ++++ - library/core/tests/num/bignum.rs | 10 ++++++++++ - library/core/tests/num/mod.rs | 5 +++-- - library/core/tests/time.rs | 1 + - 4 files changed, 18 insertions(+), 2 deletions(-) - -diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs -index 4bc44e9..8e3c7a4 100644 ---- a/library/core/tests/array.rs -+++ b/library/core/tests/array.rs -@@ -242,6 +242,7 @@ fn iterator_drops() { - assert_eq!(i.get(), 5); - } - -+/* - // This test does not work on targets without panic=unwind support. - // To work around this problem, test is marked is should_panic, so it will - // be automagically skipped on unsuitable targets, such as -@@ -283,6 +284,7 @@ fn array_default_impl_avoids_leaks_on_panic() { - assert_eq!(COUNTER.load(Relaxed), 0); - panic!("test succeeded") - } -+*/ - - #[test] - fn empty_array_is_always_default() { -@@ -304,6 +304,7 @@ fn array_map() { - assert_eq!(b, [1, 2, 3]); - } - -+/* - // See note on above test for why `should_panic` is used. - #[test] - #[should_panic(expected = "test succeeded")] -@@ -332,6 +333,7 @@ fn array_map_drop_safety() { - assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create); - panic!("test succeeded") - } -+*/ - - #[test] - fn cell_allows_array_cycle() { --- 2.21.0 (Apple Git-122) diff --git a/rust-toolchain b/rust-toolchain index 2614fa081a8..ebb04d0069c 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2023-06-10" +channel = "nightly-2023-06-19" components = ["rust-src", "rustc-dev", "llvm-tools-preview"] diff --git a/src/common.rs b/src/common.rs index 7fa986e2737..b62f4676f70 100644 --- a/src/common.rs +++ b/src/common.rs @@ -108,6 +108,10 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { self.const_uint(self.type_u64(), i) } + fn const_u128(&self, i: u128) -> RValue<'gcc> { + self.const_uint_big(self.type_u128(), i) + } + fn const_usize(&self, i: u64) -> RValue<'gcc> { let bit_size = self.data_layout().pointer_size.bits(); if bit_size < 64 { diff --git a/test.sh b/test.sh index 2e485b92739..d12fe718a96 100755 --- a/test.sh +++ b/test.sh @@ -209,13 +209,10 @@ function setup_rustc() { cd rust git fetch git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') - git am ../0001-Allow-overwriting-the-sysroot-compile-flag-via-rustc.patch export RUSTFLAGS= rm config.toml || true - my_toolchain_dir=$HOME/.rustup/toolchains/codegen_gcc_ui_tests-$rust_toolchain-$TARGET_TRIPLE - cat > config.toml < Date: Sun, 18 Jun 2023 19:42:20 -0400 Subject: [PATCH 112/112] Fix indent --- src/asm.rs | 67 +++++++++++++++++++++++++++--------------------------- test.sh | 3 +-- 2 files changed, 34 insertions(+), 36 deletions(-) diff --git a/src/asm.rs b/src/asm.rs index be7ae603ca6..4c3b7f5036c 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -502,49 +502,48 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable"); let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) }; self.call(self.type_void(), None, None, builtin_unreachable, &[], None); - } + } - // Write results to outputs. - // - // We need to do this because: - // 1. Turning `PlaceRef` into `RValue` is error-prone and has nasty edge cases - // (especially with current `rustc_backend_ssa` API). - // 2. Not every output operand has an `out_place`, and it's required by `add_output_operand`. - // - // Instead, we generate a temporary output variable for each output operand, and then this loop, - // generates `out_place = tmp_var;` assignments if out_place exists. - for op in &outputs { - if let Some(place) = op.out_place { - OperandValue::Immediate(op.tmp_var.to_rvalue()).store(self, place); + // Write results to outputs. + // + // We need to do this because: + // 1. Turning `PlaceRef` into `RValue` is error-prone and has nasty edge cases + // (especially with current `rustc_backend_ssa` API). + // 2. Not every output operand has an `out_place`, and it's required by `add_output_operand`. + // + // Instead, we generate a temporary output variable for each output operand, and then this loop, + // generates `out_place = tmp_var;` assignments if out_place exists. + for op in &outputs { + if let Some(place) = op.out_place { + OperandValue::Immediate(op.tmp_var.to_rvalue()).store(self, place); + } } } - -} } fn estimate_template_length(template: &[InlineAsmTemplatePiece], constants_len: usize, att_dialect: bool) -> usize { -let len: usize = template.iter().map(|piece| { - match *piece { - InlineAsmTemplatePiece::String(ref string) => { - string.len() - } - InlineAsmTemplatePiece::Placeholder { .. } => { - // '%' + 1 char modifier + 1 char index - 3 + let len: usize = template.iter().map(|piece| { + match *piece { + InlineAsmTemplatePiece::String(ref string) => { + string.len() + } + InlineAsmTemplatePiece::Placeholder { .. } => { + // '%' + 1 char modifier + 1 char index + 3 + } } - } -}) -.sum(); + }) + .sum(); -// increase it by 5% to account for possible '%' signs that'll be duplicated -// I pulled the number out of blue, but should be fair enough -// as the upper bound -let mut res = (len as f32 * 1.05) as usize + constants_len; + // increase it by 5% to account for possible '%' signs that'll be duplicated + // I pulled the number out of blue, but should be fair enough + // as the upper bound + let mut res = (len as f32 * 1.05) as usize + constants_len; -if att_dialect { - res += INTEL_SYNTAX_INS.len() + ATT_SYNTAX_INS.len(); -} -res + if att_dialect { + res += INTEL_SYNTAX_INS.len() + ATT_SYNTAX_INS.len(); + } + res } /// Converts a register class to a GCC constraint code. diff --git a/test.sh b/test.sh index d12fe718a96..592997b8ab9 100755 --- a/test.sh +++ b/test.sh @@ -357,7 +357,6 @@ function test_rustc() { RUSTC_ARGS="$TEST_FLAGS -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot" - if [ $# -eq 0 ]; then # No argument supplied to the function. Doing nothing. echo "No argument provided. Keeping all UI tests" @@ -388,7 +387,7 @@ function test_rustc() { fi echo "[TEST] rustc test suite" - COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 tests/ui --rustc-args "$RUSTC_ARGS" + COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 tests/ui/ --rustc-args "$RUSTC_ARGS" } function test_failing_rustc() {