Skip to content

Commit 650dca5

Browse files
authored
[IR] Remove the AtomicMem*Inst helper classes (llvm#138710)
Migrate their usage to the `AnyMem*Inst` family, and add a isAtomic() query on the base class for that hierarchy. This matches the idioms we use for e.g. isAtomic on load, store, etc.. instructions, the existing isVolatile idioms on mem* routines, and allows us to more easily share code between atomic and non-atomic variants. As with llvm#138568, the goal here is to simplify the class hierarchy and make it easier to reason about. I'm moving from easiest to hardest, and will stop at some point when I hit "good enough". Longer term, I'd sorta like to merge or reverse the naming on the plain Mem*Inst and the AnyMem*Inst, but that's a much larger and more risky change. Not sure I'm going to actually do that.
1 parent 9d89b05 commit 650dca5

File tree

12 files changed

+65
-146
lines changed

12 files changed

+65
-146
lines changed

llvm/include/llvm/Analysis/MemoryLocation.h

-4
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,6 @@ class StoreInst;
3030
class MemTransferInst;
3131
class MemIntrinsic;
3232
class AtomicCmpXchgInst;
33-
class AtomicMemTransferInst;
34-
class AtomicMemIntrinsic;
3533
class AtomicRMWInst;
3634
class AnyMemTransferInst;
3735
class AnyMemIntrinsic;
@@ -253,13 +251,11 @@ class MemoryLocation {
253251

254252
/// Return a location representing the source of a memory transfer.
255253
static MemoryLocation getForSource(const MemTransferInst *MTI);
256-
static MemoryLocation getForSource(const AtomicMemTransferInst *MTI);
257254
static MemoryLocation getForSource(const AnyMemTransferInst *MTI);
258255

259256
/// Return a location representing the destination of a memory set or
260257
/// transfer.
261258
static MemoryLocation getForDest(const MemIntrinsic *MI);
262-
static MemoryLocation getForDest(const AtomicMemIntrinsic *MI);
263259
static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
264260
static std::optional<MemoryLocation> getForDest(const CallBase *CI,
265261
const TargetLibraryInfo &TLI);

llvm/include/llvm/IR/IntrinsicInst.h

+24-94
Original file line numberDiff line numberDiff line change
@@ -1107,100 +1107,6 @@ template <class BaseCL> class MemSetBase : public BaseCL {
11071107
}
11081108
};
11091109

1110-
// The common base class for the atomic memset/memmove/memcpy intrinsics
1111-
// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
1112-
class AtomicMemIntrinsic : public MemIntrinsicBase<AtomicMemIntrinsic> {
1113-
private:
1114-
enum { ARG_ELEMENTSIZE = 3 };
1115-
1116-
public:
1117-
Value *getRawElementSizeInBytes() const {
1118-
return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
1119-
}
1120-
1121-
ConstantInt *getElementSizeInBytesCst() const {
1122-
return cast<ConstantInt>(getRawElementSizeInBytes());
1123-
}
1124-
1125-
uint32_t getElementSizeInBytes() const {
1126-
return getElementSizeInBytesCst()->getZExtValue();
1127-
}
1128-
1129-
void setElementSizeInBytes(Constant *V) {
1130-
assert(V->getType() == Type::getInt8Ty(getContext()) &&
1131-
"setElementSizeInBytes called with value of wrong type!");
1132-
setArgOperand(ARG_ELEMENTSIZE, V);
1133-
}
1134-
1135-
static bool classof(const IntrinsicInst *I) {
1136-
switch (I->getIntrinsicID()) {
1137-
case Intrinsic::memcpy_element_unordered_atomic:
1138-
case Intrinsic::memmove_element_unordered_atomic:
1139-
case Intrinsic::memset_element_unordered_atomic:
1140-
return true;
1141-
default:
1142-
return false;
1143-
}
1144-
}
1145-
static bool classof(const Value *V) {
1146-
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
1147-
}
1148-
};
1149-
1150-
/// This class represents atomic memset intrinsic
1151-
// i.e. llvm.element.unordered.atomic.memset
1152-
class AtomicMemSetInst : public MemSetBase<AtomicMemIntrinsic> {
1153-
public:
1154-
static bool classof(const IntrinsicInst *I) {
1155-
return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic;
1156-
}
1157-
static bool classof(const Value *V) {
1158-
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
1159-
}
1160-
};
1161-
1162-
// This class wraps the atomic memcpy/memmove intrinsics
1163-
// i.e. llvm.element.unordered.atomic.memcpy/memmove
1164-
class AtomicMemTransferInst : public MemTransferBase<AtomicMemIntrinsic> {
1165-
public:
1166-
static bool classof(const IntrinsicInst *I) {
1167-
switch (I->getIntrinsicID()) {
1168-
case Intrinsic::memcpy_element_unordered_atomic:
1169-
case Intrinsic::memmove_element_unordered_atomic:
1170-
return true;
1171-
default:
1172-
return false;
1173-
}
1174-
}
1175-
static bool classof(const Value *V) {
1176-
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
1177-
}
1178-
};
1179-
1180-
/// This class represents the atomic memcpy intrinsic
1181-
/// i.e. llvm.element.unordered.atomic.memcpy
1182-
class AtomicMemCpyInst : public AtomicMemTransferInst {
1183-
public:
1184-
static bool classof(const IntrinsicInst *I) {
1185-
return I->getIntrinsicID() == Intrinsic::memcpy_element_unordered_atomic;
1186-
}
1187-
static bool classof(const Value *V) {
1188-
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
1189-
}
1190-
};
1191-
1192-
/// This class represents the atomic memmove intrinsic
1193-
/// i.e. llvm.element.unordered.atomic.memmove
1194-
class AtomicMemMoveInst : public AtomicMemTransferInst {
1195-
public:
1196-
static bool classof(const IntrinsicInst *I) {
1197-
return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic;
1198-
}
1199-
static bool classof(const Value *V) {
1200-
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
1201-
}
1202-
};
1203-
12041110
/// This is the common base class for memset/memcpy/memmove.
12051111
class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
12061112
private:
@@ -1345,6 +1251,9 @@ class MemMoveInst : public MemTransferInst {
13451251
// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
13461252
// and llvm.memset/memcpy/memmove
13471253
class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
1254+
private:
1255+
enum { ARG_ELEMENTSIZE = 3 };
1256+
13481257
public:
13491258
bool isVolatile() const {
13501259
// Only the non-atomic intrinsics can be volatile
@@ -1353,6 +1262,17 @@ class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
13531262
return false;
13541263
}
13551264

1265+
bool isAtomic() const {
1266+
switch (getIntrinsicID()) {
1267+
case Intrinsic::memcpy_element_unordered_atomic:
1268+
case Intrinsic::memmove_element_unordered_atomic:
1269+
case Intrinsic::memset_element_unordered_atomic:
1270+
return true;
1271+
default:
1272+
return false;
1273+
}
1274+
}
1275+
13561276
static bool classof(const IntrinsicInst *I) {
13571277
switch (I->getIntrinsicID()) {
13581278
case Intrinsic::memcpy:
@@ -1371,6 +1291,16 @@ class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
13711291
static bool classof(const Value *V) {
13721292
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
13731293
}
1294+
1295+
Value *getRawElementSizeInBytes() const {
1296+
assert(isAtomic());
1297+
return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
1298+
}
1299+
1300+
uint32_t getElementSizeInBytes() const {
1301+
assert(isAtomic());
1302+
return cast<ConstantInt>(getRawElementSizeInBytes())->getZExtValue();
1303+
}
13741304
};
13751305

13761306
/// This class represents any memset intrinsic

llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020
namespace llvm {
2121

22-
class AtomicMemCpyInst;
22+
class AnyMemCpyInst;
2323
class ConstantInt;
2424
class Instruction;
2525
class MemCpyInst;
@@ -62,10 +62,10 @@ void expandMemSetAsLoop(MemSetInst *MemSet);
6262
void expandMemSetPatternAsLoop(MemSetPatternInst *MemSet);
6363

6464
/// Expand \p AtomicMemCpy as a loop. \p AtomicMemCpy is not deleted.
65-
void expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemCpy,
65+
void expandAtomicMemCpyAsLoop(AnyMemCpyInst *AtomicMemCpy,
6666
const TargetTransformInfo &TTI,
6767
ScalarEvolution *SE);
6868

69-
} // End llvm namespace
69+
} // namespace llvm
7070

7171
#endif

llvm/lib/Analysis/MemoryLocation.cpp

-8
Original file line numberDiff line numberDiff line change
@@ -95,10 +95,6 @@ MemoryLocation MemoryLocation::getForSource(const MemTransferInst *MTI) {
9595
return getForSource(cast<AnyMemTransferInst>(MTI));
9696
}
9797

98-
MemoryLocation MemoryLocation::getForSource(const AtomicMemTransferInst *MTI) {
99-
return getForSource(cast<AnyMemTransferInst>(MTI));
100-
}
101-
10298
MemoryLocation MemoryLocation::getForSource(const AnyMemTransferInst *MTI) {
10399
assert(MTI->getRawSource() == MTI->getArgOperand(1));
104100
return getForArgument(MTI, 1, nullptr);
@@ -108,10 +104,6 @@ MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MI) {
108104
return getForDest(cast<AnyMemIntrinsic>(MI));
109105
}
110106

111-
MemoryLocation MemoryLocation::getForDest(const AtomicMemIntrinsic *MI) {
112-
return getForDest(cast<AnyMemIntrinsic>(MI));
113-
}
114-
115107
MemoryLocation MemoryLocation::getForDest(const AnyMemIntrinsic *MI) {
116108
assert(MI->getRawDest() == MI->getArgOperand(0));
117109
return getForArgument(MI, 0, nullptr);

llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -6525,7 +6525,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
65256525
return;
65266526
}
65276527
case Intrinsic::memcpy_element_unordered_atomic: {
6528-
const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
6528+
auto &MI = cast<AnyMemCpyInst>(I);
65296529
SDValue Dst = getValue(MI.getRawDest());
65306530
SDValue Src = getValue(MI.getRawSource());
65316531
SDValue Length = getValue(MI.getLength());
@@ -6541,7 +6541,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
65416541
return;
65426542
}
65436543
case Intrinsic::memmove_element_unordered_atomic: {
6544-
auto &MI = cast<AtomicMemMoveInst>(I);
6544+
auto &MI = cast<AnyMemMoveInst>(I);
65456545
SDValue Dst = getValue(MI.getRawDest());
65466546
SDValue Src = getValue(MI.getRawSource());
65476547
SDValue Length = getValue(MI.getLength());
@@ -6557,7 +6557,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
65576557
return;
65586558
}
65596559
case Intrinsic::memset_element_unordered_atomic: {
6560-
auto &MI = cast<AtomicMemSetInst>(I);
6560+
auto &MI = cast<AnyMemSetInst>(I);
65616561
SDValue Dst = getValue(MI.getRawDest());
65626562
SDValue Val = getValue(MI.getValue());
65636563
SDValue Length = getValue(MI.getLength());

llvm/lib/IR/IRBuilder.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
228228
CallInst *CI =
229229
CreateIntrinsic(Intrinsic::memset_element_unordered_atomic, Tys, Ops);
230230

231-
cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
231+
cast<AnyMemSetInst>(CI)->setDestAlignment(Alignment);
232232

233233
// Set the TBAA info if present.
234234
if (TBAATag)
@@ -293,7 +293,7 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
293293
CreateIntrinsic(Intrinsic::memcpy_element_unordered_atomic, Tys, Ops);
294294

295295
// Set the alignment of the pointer args.
296-
auto *AMCI = cast<AtomicMemCpyInst>(CI);
296+
auto *AMCI = cast<AnyMemCpyInst>(CI);
297297
AMCI->setDestAlignment(DstAlign);
298298
AMCI->setSourceAlignment(SrcAlign);
299299

llvm/lib/IR/Verifier.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -5617,7 +5617,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
56175617
case Intrinsic::memcpy_element_unordered_atomic:
56185618
case Intrinsic::memmove_element_unordered_atomic:
56195619
case Intrinsic::memset_element_unordered_atomic: {
5620-
const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
5620+
const auto *AMI = cast<AnyMemIntrinsic>(&Call);
56215621

56225622
ConstantInt *ElementSizeCI =
56235623
cast<ConstantInt>(AMI->getRawElementSizeInBytes());
@@ -5632,7 +5632,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
56325632
};
56335633
Check(IsValidAlignment(AMI->getDestAlign()),
56345634
"incorrect alignment of the destination argument", Call);
5635-
if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
5635+
if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
56365636
Check(IsValidAlignment(AMT->getSourceAlign()),
56375637
"incorrect alignment of the source argument", Call);
56385638
}

llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp

+21-22
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
164164
// introduce the unaligned memory access which will be later transformed
165165
// into libcall in CodeGen. This is not evident performance gain so disable
166166
// it now.
167-
if (isa<AtomicMemTransferInst>(MI))
167+
if (MI->isAtomic())
168168
if (*CopyDstAlign < Size || *CopySrcAlign < Size)
169169
return nullptr;
170170

@@ -204,7 +204,7 @@ Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
204204
L->setVolatile(MT->isVolatile());
205205
S->setVolatile(MT->isVolatile());
206206
}
207-
if (isa<AtomicMemTransferInst>(MI)) {
207+
if (MI->isAtomic()) {
208208
// atomics have to be unordered
209209
L->setOrdering(AtomicOrdering::Unordered);
210210
S->setOrdering(AtomicOrdering::Unordered);
@@ -255,9 +255,8 @@ Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
255255
// introduce the unaligned memory access which will be later transformed
256256
// into libcall in CodeGen. This is not evident performance gain so disable
257257
// it now.
258-
if (isa<AtomicMemSetInst>(MI))
259-
if (Alignment < Len)
260-
return nullptr;
258+
if (MI->isAtomic() && Alignment < Len)
259+
return nullptr;
261260

262261
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
263262
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
@@ -276,7 +275,7 @@ Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
276275
for_each(at::getDVRAssignmentMarkers(S), replaceOpForAssignmentMarkers);
277276

278277
S->setAlignment(Alignment);
279-
if (isa<AtomicMemSetInst>(MI))
278+
if (MI->isAtomic())
280279
S->setOrdering(AtomicOrdering::Unordered);
281280

282281
// Set the size of the copy to 0, it will be deleted on the next iteration.
@@ -1654,27 +1653,27 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
16541653
}
16551654

16561655
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1657-
if (!II) return visitCallBase(CI);
1658-
1659-
// For atomic unordered mem intrinsics if len is not a positive or
1660-
// not a multiple of element size then behavior is undefined.
1661-
if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II))
1662-
if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
1663-
if (NumBytes->isNegative() ||
1664-
(NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
1665-
CreateNonTerminatorUnreachable(AMI);
1666-
assert(AMI->getType()->isVoidTy() &&
1667-
"non void atomic unordered mem intrinsic");
1668-
return eraseInstFromFunction(*AMI);
1669-
}
1656+
if (!II)
1657+
return visitCallBase(CI);
16701658

16711659
// Intrinsics cannot occur in an invoke or a callbr, so handle them here
16721660
// instead of in visitCallBase.
16731661
if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
1674-
// memmove/cpy/set of zero bytes is a noop.
1675-
if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
1662+
if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(MI->getLength())) {
1663+
// memmove/cpy/set of zero bytes is a noop.
16761664
if (NumBytes->isNullValue())
16771665
return eraseInstFromFunction(CI);
1666+
1667+
// For atomic unordered mem intrinsics if len is not a positive or
1668+
// not a multiple of element size then behavior is undefined.
1669+
if (MI->isAtomic() &&
1670+
(NumBytes->isNegative() ||
1671+
(NumBytes->getZExtValue() % MI->getElementSizeInBytes() != 0))) {
1672+
CreateNonTerminatorUnreachable(MI);
1673+
assert(MI->getType()->isVoidTy() &&
1674+
"non void atomic unordered mem intrinsic");
1675+
return eraseInstFromFunction(*MI);
1676+
}
16781677
}
16791678

16801679
// No other transformations apply to volatile transfers.
@@ -1719,7 +1718,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
17191718
if (GVSrc->isConstant()) {
17201719
Module *M = CI.getModule();
17211720
Intrinsic::ID MemCpyID =
1722-
isa<AtomicMemMoveInst>(MMI)
1721+
MMI->isAtomic()
17231722
? Intrinsic::memcpy_element_unordered_atomic
17241723
: Intrinsic::memcpy;
17251724
Type *Tys[3] = { CI.getArgOperand(0)->getType(),

llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -670,10 +670,10 @@ static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart,
670670
assert(DeadSize > ToRemoveSize && "Can't remove more than original size");
671671

672672
uint64_t NewSize = DeadSize - ToRemoveSize;
673-
if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) {
673+
if (DeadIntrinsic->isAtomic()) {
674674
// When shortening an atomic memory intrinsic, the newly shortened
675675
// length must remain an integer multiple of the element size.
676-
const uint32_t ElementSize = AMI->getElementSizeInBytes();
676+
const uint32_t ElementSize = DeadIntrinsic->getElementSizeInBytes();
677677
if (0 != NewSize % ElementSize)
678678
return false;
679679
}

llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -3054,7 +3054,8 @@ bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT,
30543054
// non-leaf memcpy/memmove without deopt state just treat it as a leaf
30553055
// copy and don't produce a statepoint.
30563056
if (!AllowStatepointWithNoDeoptInfo && !Call->hasDeoptState()) {
3057-
assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) &&
3057+
assert(isa<AnyMemTransferInst>(Call) &&
3058+
cast<AnyMemTransferInst>(Call)->isAtomic() &&
30583059
"Don't expect any other calls here!");
30593060
return false;
30603061
}

0 commit comments

Comments
 (0)