Skip to content

Commit 9067070

Browse files
authored
[RISCV] Re-separate unaligned scalar and vector memory features in the backend. (#88954)
This is largely a revert of commit e817966. As #88029 shows, there exists hardware that only supports unaligned scalar. I'm leaving how this gets exposed to the clang interface to a future patch.
1 parent 8aa061f commit 9067070

21 files changed

+71
-49
lines changed

clang/lib/Basic/Targets/RISCV.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,8 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
353353
if (ISAInfo->hasExtension("zfh") || ISAInfo->hasExtension("zhinx"))
354354
HasLegalHalfType = true;
355355

356-
FastUnalignedAccess = llvm::is_contained(Features, "+fast-unaligned-access");
356+
FastUnalignedAccess = llvm::is_contained(Features, "+unaligned-scalar-mem") &&
357+
llvm::is_contained(Features, "+unaligned-vector-mem");
357358

358359
if (llvm::is_contained(Features, "+experimental"))
359360
HasExperimental = true;

clang/lib/Driver/ToolChains/Arch/RISCV.cpp

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,10 @@ static void getRISCFeaturesFromMcpu(const Driver &D, const Arg *A,
6868
<< A->getSpelling() << Mcpu;
6969
}
7070

71-
if (llvm::RISCV::hasFastUnalignedAccess(Mcpu))
72-
Features.push_back("+fast-unaligned-access");
71+
if (llvm::RISCV::hasFastUnalignedAccess(Mcpu)) {
72+
Features.push_back("+unaligned-scalar-mem");
73+
Features.push_back("+unaligned-vector-mem");
74+
}
7375
}
7476

7577
void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
@@ -168,12 +170,16 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
168170
}
169171

170172
// Android requires fast unaligned access on RISCV64.
171-
if (Triple.isAndroid())
172-
Features.push_back("+fast-unaligned-access");
173+
if (Triple.isAndroid()) {
174+
Features.push_back("+unaligned-scalar-mem");
175+
Features.push_back("+unaligned-vector-mem");
176+
}
173177

174178
// -mstrict-align is default, unless -mno-strict-align is specified.
175179
AddTargetFeature(Args, Features, options::OPT_mno_strict_align,
176-
options::OPT_mstrict_align, "fast-unaligned-access");
180+
options::OPT_mstrict_align, "unaligned-scalar-mem");
181+
AddTargetFeature(Args, Features, options::OPT_mno_strict_align,
182+
options::OPT_mstrict_align, "unaligned-vector-mem");
177183

178184
// Now add any that the user explicitly requested on the command line,
179185
// which may override the defaults.

clang/test/Driver/riscv-features.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,8 @@
3838
// RUN: %clang --target=riscv32-unknown-elf -### %s -mno-strict-align 2>&1 | FileCheck %s -check-prefix=FAST-UNALIGNED-ACCESS
3939
// RUN: %clang --target=riscv32-unknown-elf -### %s -mstrict-align 2>&1 | FileCheck %s -check-prefix=NO-FAST-UNALIGNED-ACCESS
4040

41-
// FAST-UNALIGNED-ACCESS: "-target-feature" "+fast-unaligned-access"
42-
// NO-FAST-UNALIGNED-ACCESS: "-target-feature" "-fast-unaligned-access"
41+
// FAST-UNALIGNED-ACCESS: "-target-feature" "+unaligned-scalar-mem" "-target-feature" "+unaligned-vector-mem"
42+
// NO-FAST-UNALIGNED-ACCESS: "-target-feature" "-unaligned-scalar-mem" "-target-feature" "-unaligned-vector-mem"
4343

4444
// RUN: %clang --target=riscv32-unknown-elf -### %s 2>&1 | FileCheck %s -check-prefix=NOUWTABLE
4545
// RUN: %clang --target=riscv32-unknown-elf -fasynchronous-unwind-tables -### %s 2>&1 | FileCheck %s -check-prefix=UWTABLE

llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -326,8 +326,8 @@ bool RISCVExpandPseudo::expandRV32ZdinxStore(MachineBasicBlock &MBB,
326326
.setMemRefs(MMOLo);
327327

328328
if (MBBI->getOperand(2).isGlobal() || MBBI->getOperand(2).isCPI()) {
329-
// FIXME: Zdinx RV32 can not work on unaligned memory.
330-
assert(!STI->hasFastUnalignedAccess());
329+
// FIXME: Zdinx RV32 can not work on unaligned scalar memory.
330+
assert(!STI->enableUnalignedScalarMem());
331331

332332
assert(MBBI->getOperand(2).getOffset() % 8 == 0);
333333
MBBI->getOperand(2).setOffset(MBBI->getOperand(2).getOffset() + 4);

llvm/lib/Target/RISCV/RISCVFeatures.td

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1183,10 +1183,15 @@ def FeatureTrailingSeqCstFence : SubtargetFeature<"seq-cst-trailing-fence",
11831183
"true",
11841184
"Enable trailing fence for seq-cst store.">;
11851185

1186-
def FeatureFastUnalignedAccess
1187-
: SubtargetFeature<"fast-unaligned-access", "HasFastUnalignedAccess",
1188-
"true", "Has reasonably performant unaligned "
1189-
"loads and stores (both scalar and vector)">;
1186+
def FeatureUnalignedScalarMem
1187+
: SubtargetFeature<"unaligned-scalar-mem", "EnableUnalignedScalarMem",
1188+
"true", "Has reasonably performant unaligned scalar "
1189+
"loads and stores">;
1190+
1191+
def FeatureUnalignedVectorMem
1192+
: SubtargetFeature<"unaligned-vector-mem", "EnableUnalignedVectorMem",
1193+
"true", "Has reasonably performant unaligned vector "
1194+
"loads and stores">;
11901195

11911196
def FeaturePostRAScheduler : SubtargetFeature<"use-postra-scheduler",
11921197
"UsePostRAScheduler", "true", "Schedule again after register allocation">;

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1924,7 +1924,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
19241924
// replace. If we don't support unaligned scalar mem, prefer the constant
19251925
// pool.
19261926
// TODO: Can the caller pass down the alignment?
1927-
if (!Subtarget.hasFastUnalignedAccess())
1927+
if (!Subtarget.enableUnalignedScalarMem())
19281928
return true;
19291929

19301930
// Prefer to keep the load if it would require many instructions.
@@ -15837,7 +15837,7 @@ static bool matchIndexAsWiderOp(EVT VT, SDValue Index, SDValue Mask,
1583715837
if (WiderElementSize > ST.getELen()/8)
1583815838
return false;
1583915839

15840-
if (!ST.hasFastUnalignedAccess() && BaseAlign < WiderElementSize)
15840+
if (!ST.enableUnalignedVectorMem() && BaseAlign < WiderElementSize)
1584115841
return false;
1584215842

1584315843
for (unsigned i = 0; i < Index->getNumOperands(); i++) {
@@ -20663,8 +20663,8 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
2066320663
unsigned *Fast) const {
2066420664
if (!VT.isVector()) {
2066520665
if (Fast)
20666-
*Fast = Subtarget.hasFastUnalignedAccess();
20667-
return Subtarget.hasFastUnalignedAccess();
20666+
*Fast = Subtarget.enableUnalignedScalarMem();
20667+
return Subtarget.enableUnalignedScalarMem();
2066820668
}
2066920669

2067020670
// All vector implementations must support element alignment
@@ -20680,8 +20680,8 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
2068020680
// misaligned accesses. TODO: Work through the codegen implications of
2068120681
// allowing such accesses to be formed, and considered fast.
2068220682
if (Fast)
20683-
*Fast = Subtarget.hasFastUnalignedAccess();
20684-
return Subtarget.hasFastUnalignedAccess();
20683+
*Fast = Subtarget.enableUnalignedVectorMem();
20684+
return Subtarget.enableUnalignedVectorMem();
2068520685
}
2068620686

2068720687

@@ -20716,7 +20716,7 @@ EVT RISCVTargetLowering::getOptimalMemOpType(const MemOp &Op,
2071620716

2071720717
// Do we have sufficient alignment for our preferred VT? If not, revert
2071820718
// to largest size allowed by our alignment criteria.
20719-
if (PreferredVT != MVT::i8 && !Subtarget.hasFastUnalignedAccess()) {
20719+
if (PreferredVT != MVT::i8 && !Subtarget.enableUnalignedVectorMem()) {
2072020720
Align RequiredAlign(PreferredVT.getStoreSize());
2072120721
if (Op.isFixedDstAlign())
2072220722
RequiredAlign = std::min(RequiredAlign, Op.getDstAlign());
@@ -20908,7 +20908,7 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
2090820908
if (!isLegalElementTypeForRVV(ScalarType))
2090920909
return false;
2091020910

20911-
if (!Subtarget.hasFastUnalignedAccess() &&
20911+
if (!Subtarget.enableUnalignedVectorMem() &&
2091220912
Alignment < ScalarType.getStoreSize())
2091320913
return false;
2091420914

llvm/lib/Target/RISCV/RISCVProcessors.td

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,8 @@ def SIFIVE_P450 : RISCVProcessorModel<"sifive-p450", SiFiveP400Model,
257257
FeatureStdExtZbb,
258258
FeatureStdExtZbs,
259259
FeatureStdExtZfhmin,
260-
FeatureFastUnalignedAccess],
260+
FeatureUnalignedScalarMem,
261+
FeatureUnalignedVectorMem],
261262
[TuneNoDefaultUnroll,
262263
TuneConditionalCompressedMoveFusion,
263264
TuneLUIADDIFusion,
@@ -295,7 +296,8 @@ def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", SiFiveP600Model,
295296
FeatureStdExtZvkng,
296297
FeatureStdExtZvksc,
297298
FeatureStdExtZvksg,
298-
FeatureFastUnalignedAccess],
299+
FeatureUnalignedScalarMem,
300+
FeatureUnalignedVectorMem],
299301
[TuneNoDefaultUnroll,
300302
TuneConditionalCompressedMoveFusion,
301303
TuneLUIADDIFusion,

llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
228228
return false;
229229

230230
EVT ElemType = DataTypeVT.getScalarType();
231-
if (!ST->hasFastUnalignedAccess() && Alignment < ElemType.getStoreSize())
231+
if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
232232
return false;
233233

234234
return TLI->isLegalElementTypeForRVV(ElemType);
@@ -253,7 +253,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
253253
return false;
254254

255255
EVT ElemType = DataTypeVT.getScalarType();
256-
if (!ST->hasFastUnalignedAccess() && Alignment < ElemType.getStoreSize())
256+
if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
257257
return false;
258258

259259
return TLI->isLegalElementTypeForRVV(ElemType);

llvm/test/CodeGen/RISCV/memcpy-inline.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
44
; RUN: llc < %s -mtriple=riscv64 \
55
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
6-
; RUN: llc < %s -mtriple=riscv32 -mattr=+fast-unaligned-access \
6+
; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem \
77
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
8-
; RUN: llc < %s -mtriple=riscv64 -mattr=+fast-unaligned-access \
8+
; RUN: llc < %s -mtriple=riscv64 -mattr=+unaligned-scalar-mem \
99
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
1010

1111
; ----------------------------------------------------------------------

llvm/test/CodeGen/RISCV/memcpy.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
44
; RUN: llc < %s -mtriple=riscv64 \
55
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
6-
; RUN: llc < %s -mtriple=riscv32 -mattr=+fast-unaligned-access \
6+
; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem \
77
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
8-
; RUN: llc < %s -mtriple=riscv64 -mattr=+fast-unaligned-access \
8+
; RUN: llc < %s -mtriple=riscv64 -mattr=+unaligned-scalar-mem \
99
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
1010
%struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
1111

llvm/test/CodeGen/RISCV/memset-inline.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
44
; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
55
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
6-
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+fast-unaligned-access \
6+
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+unaligned-scalar-mem \
77
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
8-
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+fast-unaligned-access \
8+
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+unaligned-scalar-mem \
99
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
1010
%struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
1111

llvm/test/CodeGen/RISCV/pr56110.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
22
; RUN: llc < %s -mtriple=riscv32 | FileCheck %s
3-
; RUN: llc < %s -mtriple=riscv32 -mattr=+fast-unaligned-access | FileCheck %s
3+
; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem | FileCheck %s
44

55
define void @foo_set(ptr nocapture noundef %a, i32 noundef %v) {
66
; CHECK-LABEL: foo_set:

llvm/test/CodeGen/RISCV/riscv-func-target-feature.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ entry:
3636
}
3737

3838
; CHECK-NOT: .option push
39-
define void @test5() "target-features"="+fast-unaligned-access" {
39+
define void @test5() "target-features"="+unaligned-scalar-mem" {
4040
; CHECK-LABEL: test5
4141
; CHECK-NOT: .option pop
4242
entry:

llvm/test/CodeGen/RISCV/rvv/concat-vectors-constant-stride.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2-
; RUN: llc -mtriple=riscv32 -mattr=+v,+fast-unaligned-access -target-abi=ilp32 \
2+
; RUN: llc -mtriple=riscv32 -mattr=+v,+unaligned-vector-mem -target-abi=ilp32 \
33
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
4-
; RUN: llc -mtriple=riscv64 -mattr=+v,+fast-unaligned-access -target-abi=lp64 \
4+
; RUN: llc -mtriple=riscv64 -mattr=+v,+unaligned-vector-mem -target-abi=lp64 \
55
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
66

77
define void @constant_forward_stride(ptr %s, ptr %d) {

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
22
; RUN: llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,CHECK-NO-MISALIGN,RV32
33
; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,CHECK-NO-MISALIGN,RV64
4-
; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+fast-unaligned-access -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64,RV64-MISALIGN
4+
; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+unaligned-vector-mem -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64,RV64-MISALIGN
55

66
; RUN: llc -mtriple=riscv64 -mattr=+f,+zfh,+zve64f,+zvl128b,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,CHECK-NO-MISALIGN,ZVE64F
77

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
; RUN: | FileCheck %s --check-prefixes=SLOW,RV32-SLOW
44
; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s \
55
; RUN: | FileCheck %s --check-prefixes=SLOW,RV64-SLOW
6-
; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+fast-unaligned-access -verify-machineinstrs < %s \
6+
; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+unaligned-vector-mem -verify-machineinstrs < %s \
77
; RUN: | FileCheck %s --check-prefixes=FAST,RV32-FAST
8-
; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+fast-unaligned-access -verify-machineinstrs < %s \
8+
; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+unaligned-vector-mem -verify-machineinstrs < %s \
99
; RUN: | FileCheck %s --check-prefixes=FAST,RV64-FAST
1010

1111
define <4 x i32> @load_v4i32_align1(ptr %ptr) {

llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
44
; RUN: llc < %s -mtriple=riscv64 -mattr=+v \
55
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
6-
; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+fast-unaligned-access \
6+
; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+unaligned-scalar-mem,+unaligned-vector-mem \
77
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
8-
; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+fast-unaligned-access \
8+
; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+unaligned-scalar-mem,+unaligned-vector-mem \
99
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
1010

1111
; ----------------------------------------------------------------------

llvm/test/CodeGen/RISCV/rvv/memset-inline.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
44
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v \
55
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
6-
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+fast-unaligned-access \
6+
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+unaligned-scalar-mem,,+unaligned-vector-mem \
77
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
8-
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+fast-unaligned-access \
8+
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+unaligned-scalar-mem,+unaligned-vector-mem \
99
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
1010
%struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
1111

llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
; RUN: -verify-machineinstrs | FileCheck %s
44
; RUN: llc -mtriple riscv64 -mattr=+d,+zfh,+zvfh,+v < %s \
55
; RUN: -verify-machineinstrs | FileCheck %s
6-
; RUN: llc -mtriple riscv32 -mattr=+d,+zfh,+zvfh,+v,+fast-unaligned-access < %s \
6+
; RUN: llc -mtriple riscv32 -mattr=+d,+zfh,+zvfh,+v,+unaligned-vector-mem < %s \
77
; RUN: -verify-machineinstrs | FileCheck --check-prefix=FAST %s
8-
; RUN: llc -mtriple riscv64 -mattr=+d,+zfh,+zvfh,+v,+fast-unaligned-access < %s \
8+
; RUN: llc -mtriple riscv64 -mattr=+d,+zfh,+zvfh,+v,+unaligned-vector-mem < %s \
99
; RUN: -verify-machineinstrs | FileCheck --check-prefix=FAST %s
1010

1111

llvm/test/CodeGen/RISCV/unaligned-load-store.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
; RUN: | FileCheck -check-prefixes=ALL,SLOW,RV32I %s
44
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
55
; RUN: | FileCheck -check-prefixes=ALL,SLOW,RV64I %s
6-
; RUN: llc -mtriple=riscv32 -mattr=+fast-unaligned-access -verify-machineinstrs < %s \
6+
; RUN: llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -verify-machineinstrs < %s \
77
; RUN: | FileCheck -check-prefixes=ALL,FAST,RV32I-FAST %s
8-
; RUN: llc -mtriple=riscv64 -mattr=+fast-unaligned-access -verify-machineinstrs < %s \
8+
; RUN: llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -verify-machineinstrs < %s \
99
; RUN: | FileCheck -check-prefixes=ALL,FAST,RV64I-FAST %s
1010

1111
; A collection of cases showing codegen for unaligned loads and stores

llvm/utils/TableGen/RISCVTargetDefEmitter.cpp

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,11 +60,19 @@ static void EmitRISCVTargetDef(RecordKeeper &RK, raw_ostream &OS) {
6060
if (MArch.empty())
6161
MArch = getMArch(*Rec);
6262

63-
const bool FastUnalignedAccess =
63+
bool FastScalarUnalignedAccess =
6464
any_of(Rec->getValueAsListOfDefs("Features"), [&](auto &Feature) {
65-
return Feature->getValueAsString("Name") == "fast-unaligned-access";
65+
return Feature->getValueAsString("Name") == "unaligned-scalar-mem";
6666
});
6767

68+
bool FastVectorUnalignedAccess =
69+
any_of(Rec->getValueAsListOfDefs("Features"), [&](auto &Feature) {
70+
return Feature->getValueAsString("Name") == "unaligned-vector-mem";
71+
});
72+
73+
bool FastUnalignedAccess =
74+
FastScalarUnalignedAccess && FastVectorUnalignedAccess;
75+
6876
OS << "PROC(" << Rec->getName() << ", "
6977
<< "{\"" << Rec->getValueAsString("Name") << "\"}, "
7078
<< "{\"" << MArch << "\"}, " << FastUnalignedAccess << ")\n";

0 commit comments

Comments
 (0)