59#define DEBUG_TYPE "atomic-expand"
63class AtomicExpandImpl {
82 bool tryExpandAtomicLoad(
LoadInst *LI);
83 bool expandAtomicLoadToLL(
LoadInst *LI);
84 bool expandAtomicLoadToCmpXchg(
LoadInst *LI);
87 void expandAtomicStoreToXChg(
StoreInst *SI);
94 void expandAtomicOpToLLSC(
98 void expandPartwordAtomicRMW(
106 static Value *insertRMWCmpXchgLoop(
122 void expandAtomicLoadToLibcall(
LoadInst *LI);
123 void expandAtomicStoreToLibcall(
StoreInst *LI);
149struct ReplacementIRBuilder
150 :
IRBuilder<InstSimplifyFolder, IRBuilderCallbackInserter> {
161 if (BB->getParent()->getAttributes().hasFnAttr(Attribute::StrictFP))
164 MMRAMD =
I->getMetadata(LLVMContext::MD_mmra);
169 I->setMetadata(LLVMContext::MD_mmra, MMRAMD);
175char AtomicExpandLegacy::ID = 0;
180 "Expand Atomic instructions",
false,
false)
188 return DL.getTypeStoreSize(LI->getType());
193 return DL.getTypeStoreSize(SI->getValueOperand()->getType());
210 Source.getAllMetadata(MD);
214 for (
auto [
ID,
N] : MD) {
216 case LLVMContext::MD_dbg:
217 case LLVMContext::MD_tbaa:
218 case LLVMContext::MD_tbaa_struct:
219 case LLVMContext::MD_alias_scope:
220 case LLVMContext::MD_noalias:
221 case LLVMContext::MD_noalias_addrspace:
222 case LLVMContext::MD_access_group:
223 case LLVMContext::MD_mmra:
229 else if (
ID == Ctx.
getMDKindID(
"amdgpu.no.fine.grained.memory"))
242template <
typename Inst>
245 Align Alignment =
I->getAlign();
246 return Alignment >=
Size &&
250bool AtomicExpandImpl::processAtomicInstr(
Instruction *
I) {
251 auto *LI = dyn_cast<LoadInst>(
I);
252 auto *
SI = dyn_cast<StoreInst>(
I);
253 auto *RMWI = dyn_cast<AtomicRMWInst>(
I);
254 auto *CASI = dyn_cast<AtomicCmpXchgInst>(
I);
256 bool MadeChange =
false;
264 expandAtomicLoadToLibcall(LI);
268 if (TLI->shouldCastAtomicLoadInIR(LI) ==
269 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
270 I = LI = convertAtomicLoadToIntegerType(LI);
278 expandAtomicStoreToLibcall(SI);
282 if (TLI->shouldCastAtomicStoreInIR(SI) ==
283 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
284 I =
SI = convertAtomicStoreToIntegerType(SI);
289 expandAtomicRMWToLibcall(RMWI);
293 if (TLI->shouldCastAtomicRMWIInIR(RMWI) ==
294 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
295 I = RMWI = convertAtomicXchgToIntegerType(RMWI);
300 expandAtomicCASToLibcall(CASI);
306 if (CASI->getCompareOperand()->getType()->isPointerTy()) {
309 I = CASI = convertCmpXchgToIntegerType(CASI);
315 if (TLI->shouldInsertFencesForAtomic(
I)) {
316 auto FenceOrdering = AtomicOrdering::Monotonic;
318 FenceOrdering = LI->getOrdering();
319 LI->setOrdering(AtomicOrdering::Monotonic);
321 FenceOrdering =
SI->getOrdering();
322 SI->setOrdering(AtomicOrdering::Monotonic);
325 FenceOrdering = RMWI->getOrdering();
326 RMWI->setOrdering(AtomicOrdering::Monotonic);
328 TLI->shouldExpandAtomicCmpXchgInIR(CASI) ==
329 TargetLoweringBase::AtomicExpansionKind::None &&
337 FenceOrdering = CASI->getMergedOrdering();
338 auto CASOrdering = TLI->atomicOperationOrderAfterFenceSplit(CASI);
340 CASI->setSuccessOrdering(CASOrdering);
341 CASI->setFailureOrdering(CASOrdering);
344 if (FenceOrdering != AtomicOrdering::Monotonic) {
345 MadeChange |= bracketInstWithFences(
I, FenceOrdering);
347 }
else if (
I->hasAtomicStore() &&
348 TLI->shouldInsertTrailingFenceForAtomicStore(
I)) {
349 auto FenceOrdering = AtomicOrdering::Monotonic;
351 FenceOrdering =
SI->getOrdering();
353 FenceOrdering = RMWI->getOrdering();
354 else if (CASI && TLI->shouldExpandAtomicCmpXchgInIR(CASI) !=
355 TargetLoweringBase::AtomicExpansionKind::LLSC)
357 FenceOrdering = CASI->getSuccessOrdering();
360 if (
auto TrailingFence =
361 TLI->emitTrailingFence(Builder,
I, FenceOrdering)) {
362 TrailingFence->moveAfter(
I);
368 MadeChange |= tryExpandAtomicLoad(LI);
370 MadeChange |= tryExpandAtomicStore(SI);
377 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
381 MadeChange |= tryExpandAtomicRMW(RMWI);
384 MadeChange |= tryExpandAtomicCmpXchg(CASI);
390 const auto *Subtarget =
TM->getSubtargetImpl(
F);
391 if (!Subtarget->enableAtomicExpand())
393 TLI = Subtarget->getTargetLowering();
394 DL = &
F.getDataLayout();
396 bool MadeChange =
false;
408 if (processAtomicInstr(&Inst)) {
420bool AtomicExpandLegacy::runOnFunction(
Function &
F) {
422 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
427 return AE.run(
F, TM);
431 return new AtomicExpandLegacy();
438 bool Changed = AE.run(
F, TM);
445bool AtomicExpandImpl::bracketInstWithFences(
Instruction *
I,
447 ReplacementIRBuilder Builder(
I, *
DL);
449 auto LeadingFence = TLI->emitLeadingFence(Builder,
I, Order);
451 auto TrailingFence = TLI->emitTrailingFence(Builder,
I, Order);
455 TrailingFence->moveAfter(
I);
457 return (LeadingFence || TrailingFence);
463 EVT VT = TLI->getMemValueType(
DL,
T);
474 Type *NewTy = getCorrespondingIntegerType(LI->
getType(),
M->getDataLayout());
476 ReplacementIRBuilder Builder(LI, *
DL);
480 auto *NewLI = Builder.CreateLoad(NewTy,
Addr);
481 NewLI->setAlignment(LI->
getAlign());
484 LLVM_DEBUG(
dbgs() <<
"Replaced " << *LI <<
" with " << *NewLI <<
"\n");
486 Value *NewVal = Builder.CreateBitCast(NewLI, LI->
getType());
493AtomicExpandImpl::convertAtomicXchgToIntegerType(
AtomicRMWInst *RMWI) {
498 getCorrespondingIntegerType(RMWI->
getType(),
M->getDataLayout());
500 ReplacementIRBuilder Builder(RMWI, *
DL);
505 ? Builder.CreatePtrToInt(Val, NewTy)
506 : Builder.CreateBitCast(Val, NewTy);
513 LLVM_DEBUG(
dbgs() <<
"Replaced " << *RMWI <<
" with " << *NewRMWI <<
"\n");
516 ? Builder.CreateIntToPtr(NewRMWI, RMWI->
getType())
517 : Builder.CreateBitCast(NewRMWI, RMWI->
getType());
523bool AtomicExpandImpl::tryExpandAtomicLoad(
LoadInst *LI) {
524 switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
528 expandAtomicOpToLLSC(
534 return expandAtomicLoadToLL(LI);
536 return expandAtomicLoadToCmpXchg(LI);
541 TLI->emitExpandAtomicLoad(LI);
548bool AtomicExpandImpl::tryExpandAtomicStore(
StoreInst *SI) {
549 switch (TLI->shouldExpandAtomicStoreInIR(SI)) {
553 TLI->emitExpandAtomicStore(SI);
556 expandAtomicStoreToXChg(SI);
566bool AtomicExpandImpl::expandAtomicLoadToLL(
LoadInst *LI) {
567 ReplacementIRBuilder Builder(LI, *
DL);
572 Value *Val = TLI->emitLoadLinked(Builder, LI->
getType(),
574 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
582bool AtomicExpandImpl::expandAtomicLoadToCmpXchg(
LoadInst *LI) {
583 ReplacementIRBuilder Builder(LI, *
DL);
592 Value *Pair = Builder.CreateAtomicCmpXchg(
595 Value *Loaded = Builder.CreateExtractValue(Pair, 0,
"loaded");
612 ReplacementIRBuilder Builder(SI, *
DL);
613 auto *
M =
SI->getModule();
614 Type *NewTy = getCorrespondingIntegerType(
SI->getValueOperand()->getType(),
616 Value *NewVal = Builder.CreateBitCast(
SI->getValueOperand(), NewTy);
624 LLVM_DEBUG(
dbgs() <<
"Replaced " << *SI <<
" with " << *NewSI <<
"\n");
625 SI->eraseFromParent();
629void AtomicExpandImpl::expandAtomicStoreToXChg(
StoreInst *SI) {
636 ReplacementIRBuilder Builder(SI, *
DL);
644 SI->getAlign(), RMWOrdering);
645 SI->eraseFromParent();
648 tryExpandAtomicRMW(AI);
668 Addr, Loaded, NewVal, AddrAlign, MemOpOrder,
680bool AtomicExpandImpl::tryExpandAtomicRMW(
AtomicRMWInst *AI) {
687 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
689 if (ValueSize < MinCASSize) {
690 expandPartwordAtomicRMW(AI,
703 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
705 if (ValueSize < MinCASSize) {
706 expandPartwordAtomicRMW(AI,
717 <<
"A compare and swap loop was generated for an atomic "
719 << MemScope <<
" memory scope";
726 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
728 if (ValueSize < MinCASSize) {
733 tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
737 expandAtomicRMWToMaskedIntrinsic(AI);
741 TLI->emitBitTestAtomicRMWIntrinsic(AI);
745 TLI->emitCmpArithAtomicRMWIntrinsic(AI);
751 TLI->emitExpandAtomicRMW(AI);
760struct PartwordMaskValues {
762 Type *WordType =
nullptr;
764 Type *IntValueType =
nullptr;
765 Value *AlignedAddr =
nullptr;
766 Align AlignedAddrAlignment;
768 Value *ShiftAmt =
nullptr;
770 Value *Inv_Mask =
nullptr;
775 auto PrintObj = [&
O](
auto *
V) {
782 O <<
"PartwordMaskValues {\n";
784 PrintObj(PMV.WordType);
786 PrintObj(PMV.ValueType);
787 O <<
" AlignedAddr: ";
788 PrintObj(PMV.AlignedAddr);
789 O <<
" AlignedAddrAlignment: " << PMV.AlignedAddrAlignment.value() <<
'\n';
791 PrintObj(PMV.ShiftAmt);
795 PrintObj(PMV.Inv_Mask);
821 unsigned MinWordSize) {
822 PartwordMaskValues PMV;
827 unsigned ValueSize =
DL.getTypeStoreSize(
ValueType);
829 PMV.ValueType = PMV.IntValueType =
ValueType;
830 if (PMV.ValueType->isFloatingPointTy() || PMV.ValueType->isVectorTy())
834 PMV.WordType = MinWordSize > ValueSize ?
Type::getIntNTy(Ctx, MinWordSize * 8)
836 if (PMV.ValueType == PMV.WordType) {
837 PMV.AlignedAddr =
Addr;
838 PMV.AlignedAddrAlignment = AddrAlign;
839 PMV.ShiftAmt = ConstantInt::get(PMV.ValueType, 0);
840 PMV.Mask = ConstantInt::get(PMV.ValueType, ~0,
true);
844 PMV.AlignedAddrAlignment =
Align(MinWordSize);
846 assert(ValueSize < MinWordSize);
849 IntegerType *IntTy =
DL.getIndexType(Ctx, PtrTy->getAddressSpace());
852 if (AddrAlign < MinWordSize) {
854 Intrinsic::ptrmask, {PtrTy, IntTy},
855 {
Addr, ConstantInt::get(IntTy, ~(
uint64_t)(MinWordSize - 1))},
nullptr,
859 PtrLSB = Builder.
CreateAnd(AddrInt, MinWordSize - 1,
"PtrLSB");
862 PMV.AlignedAddr =
Addr;
866 if (
DL.isLittleEndian()) {
868 PMV.ShiftAmt = Builder.
CreateShl(PtrLSB, 3);
872 Builder.
CreateXor(PtrLSB, MinWordSize - ValueSize), 3);
875 PMV.ShiftAmt = Builder.
CreateTrunc(PMV.ShiftAmt, PMV.WordType,
"ShiftAmt");
877 ConstantInt::get(PMV.WordType, (1 << (ValueSize * 8)) - 1), PMV.ShiftAmt,
880 PMV.Inv_Mask = Builder.
CreateNot(PMV.Mask,
"Inv_Mask");
886 const PartwordMaskValues &PMV) {
887 assert(WideWord->
getType() == PMV.WordType &&
"Widened type mismatch");
888 if (PMV.WordType == PMV.ValueType)
897 Value *Updated,
const PartwordMaskValues &PMV) {
898 assert(WideWord->
getType() == PMV.WordType &&
"Widened type mismatch");
899 assert(Updated->
getType() == PMV.ValueType &&
"Value type mismatch");
900 if (PMV.WordType == PMV.ValueType)
907 Builder.
CreateShl(ZExt, PMV.ShiftAmt,
"shifted",
true);
919 const PartwordMaskValues &PMV) {
926 Value *FinalVal = Builder.
CreateOr(Loaded_MaskOut, Shifted_Inc);
940 Value *FinalVal = Builder.
CreateOr(Loaded_MaskOut, NewVal_Masked);
977void AtomicExpandImpl::expandPartwordAtomicRMW(
983 tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
989 ReplacementIRBuilder Builder(AI, *
DL);
991 PartwordMaskValues PMV =
993 AI->
getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
995 Value *ValOperand_Shifted =
nullptr;
1000 Builder.CreateShl(Builder.CreateZExt(ValOp, PMV.WordType), PMV.ShiftAmt,
1001 "ValOperand_Shifted");
1011 OldResult = insertRMWCmpXchgLoop(
1012 Builder, PMV.WordType, PMV.AlignedAddr, PMV.AlignedAddrAlignment,
1016 OldResult = insertRMWLLSCLoop(Builder, PMV.WordType, PMV.AlignedAddr,
1017 PMV.AlignedAddrAlignment, MemOpOrder,
1028 ReplacementIRBuilder Builder(AI, *
DL);
1033 "Unable to widen operation");
1035 PartwordMaskValues PMV =
1037 AI->
getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1039 Value *ValOperand_Shifted =
1041 PMV.ShiftAmt,
"ValOperand_Shifted");
1047 Builder.
CreateOr(ValOperand_Shifted, PMV.Inv_Mask,
"AndOperand");
1049 NewOperand = ValOperand_Shifted;
1052 Op, PMV.AlignedAddr, NewOperand, PMV.AlignedAddrAlignment,
1105 ReplacementIRBuilder Builder(CI, *
DL);
1116 std::prev(BB->
end())->eraseFromParent();
1119 PartwordMaskValues PMV =
1121 CI->
getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1124 Value *NewVal_Shifted =
1126 Value *Cmp_Shifted =
1133 Value *InitLoaded_MaskOut = Builder.
CreateAnd(InitLoaded, PMV.Inv_Mask);
1139 Loaded_MaskOut->
addIncoming(InitLoaded_MaskOut, BB);
1142 Value *FullWord_NewVal = Builder.
CreateOr(Loaded_MaskOut, NewVal_Shifted);
1143 Value *FullWord_Cmp = Builder.
CreateOr(Loaded_MaskOut, Cmp_Shifted);
1145 PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, PMV.AlignedAddrAlignment,
1173 Loaded_MaskOut->
addIncoming(OldVal_MaskOut, FailureBB);
1188void AtomicExpandImpl::expandAtomicOpToLLSC(
1192 ReplacementIRBuilder Builder(
I, *
DL);
1193 Value *Loaded = insertRMWLLSCLoop(Builder, ResultType,
Addr, AddrAlign,
1194 MemOpOrder, PerformOp);
1196 I->replaceAllUsesWith(Loaded);
1197 I->eraseFromParent();
1200void AtomicExpandImpl::expandAtomicRMWToMaskedIntrinsic(
AtomicRMWInst *AI) {
1201 ReplacementIRBuilder Builder(AI, *
DL);
1203 PartwordMaskValues PMV =
1205 AI->
getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1213 CastOp = Instruction::SExt;
1217 PMV.ShiftAmt,
"ValOperand_Shifted");
1218 Value *OldResult = TLI->emitMaskedAtomicRMWIntrinsic(
1219 Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt,
1226void AtomicExpandImpl::expandAtomicCmpXchgToMaskedIntrinsic(
1228 ReplacementIRBuilder Builder(CI, *
DL);
1232 CI->
getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1240 Value *OldVal = TLI->emitMaskedAtomicCmpXchgIntrinsic(
1241 Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask,
1247 CmpVal_Shifted, Builder.
CreateAnd(OldVal, PMV.Mask),
"Success");
1254Value *AtomicExpandImpl::insertRMWLLSCLoop(
1263 F->getDataLayout().getTypeStoreSize(ResultTy) &&
1264 "Expected at least natural alignment at this point.");
1284 std::prev(BB->
end())->eraseFromParent();
1290 Value *Loaded = TLI->emitLoadLinked(Builder, ResultTy,
Addr, MemOpOrder);
1292 Value *NewVal = PerformOp(Builder, Loaded);
1294 Value *StoreSuccess =
1295 TLI->emitStoreConditional(Builder, NewVal,
Addr, MemOpOrder);
1313 M->getDataLayout());
1315 ReplacementIRBuilder Builder(CI, *
DL);
1327 LLVM_DEBUG(
dbgs() <<
"Replaced " << *CI <<
" with " << *NewCI <<
"\n");
1354 bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI);
1368 bool HasReleasedLoadBB = !CI->
isWeak() && ShouldInsertFencesForAtomic &&
1375 bool UseUnconditionalReleaseBarrier =
F->hasMinSize() && !CI->
isWeak();
1429 auto ReleasedLoadBB =
1433 auto ReleasingStoreBB =
1437 ReplacementIRBuilder Builder(CI, *
DL);
1442 std::prev(BB->
end())->eraseFromParent();
1444 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
1445 TLI->emitLeadingFence(Builder, CI, SuccessOrder);
1447 PartwordMaskValues PMV =
1449 CI->
getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1454 Value *UnreleasedLoad =
1455 TLI->emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1456 Value *UnreleasedLoadExtract =
1463 Builder.
CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB);
1466 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
1467 TLI->emitLeadingFence(Builder, CI, SuccessOrder);
1472 Builder.
CreatePHI(PMV.WordType, 2,
"loaded.trystore");
1473 LoadedTryStore->
addIncoming(UnreleasedLoad, ReleasingStoreBB);
1474 Value *NewValueInsert =
1476 Value *StoreSuccess = TLI->emitStoreConditional(Builder, NewValueInsert,
1477 PMV.AlignedAddr, MemOpOrder);
1480 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;
1482 CI->
isWeak() ? FailureBB : RetryBB);
1486 if (HasReleasedLoadBB) {
1488 TLI->emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1495 Builder.
CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
1497 LoadedTryStore->
addIncoming(SecondLoad, ReleasedLoadBB);
1504 if (ShouldInsertFencesForAtomic ||
1505 TLI->shouldInsertTrailingFenceForAtomicStore(CI))
1506 TLI->emitTrailingFence(Builder, CI, SuccessOrder);
1512 LoadedNoStore->
addIncoming(UnreleasedLoad, StartBB);
1513 if (HasReleasedLoadBB)
1514 LoadedNoStore->
addIncoming(SecondLoad, ReleasedLoadBB);
1519 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
1525 LoadedFailure->
addIncoming(LoadedNoStore, NoStoreBB);
1527 LoadedFailure->
addIncoming(LoadedTryStore, TryStoreBB);
1528 if (ShouldInsertFencesForAtomic)
1529 TLI->emitTrailingFence(Builder, CI, FailureOrder);
1539 LoadedExit->
addIncoming(LoadedTryStore, SuccessBB);
1540 LoadedExit->
addIncoming(LoadedFailure, FailureBB);
1547 Value *LoadedFull = LoadedExit;
1561 "weird extraction from { iN, i1 }");
1572 for (
auto *EV : PrunedInsts)
1589bool AtomicExpandImpl::isIdempotentRMW(
AtomicRMWInst *RMWI) {
1602 return C->isMinusOne();
1604 return C->isMaxValue(
true);
1606 return C->isMinValue(
true);
1608 return C->isMaxValue(
false);
1610 return C->isMinValue(
false);
1616bool AtomicExpandImpl::simplifyIdempotentRMW(
AtomicRMWInst *RMWI) {
1617 if (
auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
1618 tryExpandAtomicLoad(ResultingLoad);
1624Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
1655 std::prev(BB->
end())->eraseFromParent();
1665 Value *NewVal = PerformOp(Builder, Loaded);
1667 Value *NewLoaded =
nullptr;
1670 CreateCmpXchg(Builder,
Addr, Loaded, NewVal, AddrAlign,
1674 SSID,
Success, NewLoaded, MetadataSrc);
1686 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
1689 switch (TLI->shouldExpandAtomicCmpXchgInIR(CI)) {
1693 if (ValueSize < MinCASSize)
1694 return expandPartwordCmpXchg(CI);
1697 return expandAtomicCmpXchg(CI);
1700 expandAtomicCmpXchgToMaskedIntrinsic(CI);
1705 TLI->emitExpandAtomicCmpXchg(CI);
1715 Builder.setIsFPConstrained(
1720 Value *Loaded = AtomicExpandImpl::insertRMWCmpXchgLoop(
1724 return buildAtomicRMWValue(AI->getOperation(), Builder, Loaded,
1725 AI->getValOperand());
1748 unsigned LargestSize =
DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
1749 return Alignment >=
Size &&
1751 Size <= LargestSize;
1754void AtomicExpandImpl::expandAtomicLoadToLibcall(
LoadInst *
I) {
1755 static const RTLIB::Libcall Libcalls[6] = {
1756 RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
1757 RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
1760 bool expanded = expandAtomicOpToLibcall(
1761 I,
Size,
I->getAlign(),
I->getPointerOperand(),
nullptr,
nullptr,
1764 handleFailure(*
I,
"unsupported atomic load");
1767void AtomicExpandImpl::expandAtomicStoreToLibcall(
StoreInst *
I) {
1768 static const RTLIB::Libcall Libcalls[6] = {
1769 RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
1770 RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
1773 bool expanded = expandAtomicOpToLibcall(
1774 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getValueOperand(),
1777 handleFailure(*
I,
"unsupported atomic store");
1781 static const RTLIB::Libcall Libcalls[6] = {
1782 RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
1783 RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
1784 RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16};
1787 bool expanded = expandAtomicOpToLibcall(
1788 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getNewValOperand(),
1789 I->getCompareOperand(),
I->getSuccessOrdering(),
I->getFailureOrdering(),
1792 handleFailure(*
I,
"unsupported cmpxchg");
1796 static const RTLIB::Libcall LibcallsXchg[6] = {
1797 RTLIB::ATOMIC_EXCHANGE, RTLIB::ATOMIC_EXCHANGE_1,
1798 RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4,
1799 RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16};
1800 static const RTLIB::Libcall LibcallsAdd[6] = {
1801 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_ADD_1,
1802 RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4,
1803 RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16};
1804 static const RTLIB::Libcall LibcallsSub[6] = {
1805 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_SUB_1,
1806 RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4,
1807 RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16};
1808 static const RTLIB::Libcall LibcallsAnd[6] = {
1809 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_AND_1,
1810 RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4,
1811 RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16};
1812 static const RTLIB::Libcall LibcallsOr[6] = {
1813 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_OR_1,
1814 RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4,
1815 RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16};
1816 static const RTLIB::Libcall LibcallsXor[6] = {
1817 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_XOR_1,
1818 RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4,
1819 RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16};
1820 static const RTLIB::Libcall LibcallsNand[6] = {
1821 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_NAND_1,
1822 RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4,
1823 RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16};
1862void AtomicExpandImpl::expandAtomicRMWToLibcall(
AtomicRMWInst *
I) {
1868 if (!Libcalls.
empty())
1869 Success = expandAtomicOpToLibcall(
1870 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getValOperand(),
1885 Addr, Loaded, NewVal, Alignment, MemOpOrder,
1894 expandAtomicCASToLibcall(Pair);
1905bool AtomicExpandImpl::expandAtomicOpToLibcall(
1915 IRBuilder<> AllocaBuilder(&
I->getFunction()->getEntryBlock().front());
1920 const Align AllocaAlignment =
DL.getPrefTypeAlign(SizedIntTy);
1935 RTLIB::Libcall RTLibType;
1936 if (UseSizedLibcall) {
1939 RTLibType = Libcalls[1];
1942 RTLibType = Libcalls[2];
1945 RTLibType = Libcalls[3];
1948 RTLibType = Libcalls[4];
1951 RTLibType = Libcalls[5];
1954 }
else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) {
1955 RTLibType = Libcalls[0];
1962 if (!TLI->getLibcallName(RTLibType)) {
2002 if (!UseSizedLibcall) {
2004 Args.push_back(ConstantInt::get(
DL.getIntPtrType(Ctx),
Size));
2012 Value *PtrVal = PointerOperand;
2014 Args.push_back(PtrVal);
2018 AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->
getType());
2022 Args.push_back(AllocaCASExpected);
2027 if (UseSizedLibcall) {
2030 Args.push_back(IntValue);
2032 AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->
getType());
2036 Args.push_back(AllocaValue);
2041 if (!CASExpected && HasResult && !UseSizedLibcall) {
2042 AllocaResult = AllocaBuilder.CreateAlloca(
I->getType());
2045 Args.push_back(AllocaResult);
2049 Args.push_back(OrderingVal);
2053 Args.push_back(Ordering2Val);
2059 }
else if (HasResult && UseSizedLibcall)
2060 ResultTy = SizedIntTy;
2066 for (
Value *Arg : Args)
2070 M->getOrInsertFunction(TLI->getLibcallName(RTLibType), FnType, Attr);
2072 Call->setAttributes(Attr);
2076 if (ValueOperand && !UseSizedLibcall)
2082 Type *FinalResultTy =
I->getType();
2085 CASExpected->
getType(), AllocaCASExpected, AllocaAlignment);
2089 I->replaceAllUsesWith(V);
2090 }
else if (HasResult) {
2092 if (UseSizedLibcall)
2099 I->replaceAllUsesWith(V);
2101 I->eraseFromParent();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static Value * performMaskedAtomicOp(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder, Value *Loaded, Value *Shifted_Inc, Value *Inc, const PartwordMaskValues &PMV)
Emit IR to implement a masked version of a given atomicrmw operation.
static PartwordMaskValues createMaskInstrs(IRBuilderBase &Builder, Instruction *I, Type *ValueType, Value *Addr, Align AddrAlign, unsigned MinWordSize)
This is a helper function which builds instructions to provide values necessary for partword atomic o...
static bool canUseSizedAtomicCall(unsigned Size, Align Alignment, const DataLayout &DL)
static Value * extractMaskedValue(IRBuilderBase &Builder, Value *WideWord, const PartwordMaskValues &PMV)
static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr, Value *Loaded, Value *NewVal, Align AddrAlign, AtomicOrdering MemOpOrder, SyncScope::ID SSID, Value *&Success, Value *&NewLoaded, Instruction *MetadataSrc)
Expand Atomic static false unsigned getAtomicOpSize(LoadInst *LI)
static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I)
static Value * insertMaskedValue(IRBuilderBase &Builder, Value *WideWord, Value *Updated, const PartwordMaskValues &PMV)
static void copyMetadataForAtomic(Instruction &Dest, const Instruction &Source)
Copy metadata that's safe to preserve when widening atomics.
static ArrayRef< RTLIB::Libcall > GetRMWLibcall(AtomicRMWInst::BinOp Op)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
#define LLVM_ATTRIBUTE_UNUSED
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Module.h This file contains the declarations for the Module class.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
an instruction to allocate memory on the stack
void setAlignment(Align Align)
A container for analyses that lazily runs them and caches their results.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getNewValOperand()
AtomicOrdering getMergedOrdering() const
Returns a single ordering which is at least as strong as both the success and failure orderings for t...
void setWeak(bool IsWeak)
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Value * getPointerOperand()
static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering)
Returns the strongest permitted ordering on failure, given the desired ordering on success.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
Value * getPointerOperand()
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
AttributeList addRetAttribute(LLVMContext &C, Attribute::AttrKind Kind) const
Add a return value attribute to the list.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
reverse_iterator rbegin()
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
InstListType::reverse_iterator reverse_iterator
const Function * getParent() const
Return the enclosing method, or null if none.
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
FunctionPass class - This class is used to implement most global optimizations.
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
BasicBlockListType::iterator iterator
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Common base class shared among various IRBuilders.
AtomicCmpXchgInst * CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID=SyncScope::System)
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateLifetimeStart(Value *Ptr)
Create a lifetime.start intrinsic.
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LLVM_ABI CallInst * CreateLifetimeEnd(Value *Ptr)
Create a lifetime.end intrinsic.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
UnreachableInst * CreateUnreachable()
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
BasicBlock * GetInsertBlock() const
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
void setIsFPConstrained(bool IsCon)
Enable/Disable use of constrained floating point math.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
LLVM_ABI unsigned getMDKindID(StringRef Name) const
getMDKindID - Return a unique non-zero ID for the specified metadata kind.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
LLVM_ABI void getSyncScopeNames(SmallVectorImpl< StringRef > &SSNs) const
getSyncScopeNames - Populates client supplied SmallVector with synchronization scope names registered...
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
A Module instance is used to store all the information related to an LLVM module.
LLVMContext & getContext() const
Get the global data context.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
unsigned getMaxAtomicSizeInBitsSupported() const
Returns the maximum atomic operation size (in bits) supported by the backend.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Target-Independent Code Generator Pass Configuration Options.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isVoidTy() const
Return true if this is 'void'.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
bool isReleaseOrStronger(AtomicOrdering AO)
AtomicOrderingCABI toCABI(AtomicOrdering AO)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void initializeAtomicExpandLegacyPass(PassRegistry &)
@ Success
The lock was released successfully.
Value * buildAtomicRMWValue(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder, Value *Loaded, Value *Val)
Emit IR to implement the given atomicrmw operation on values in registers, returning the new value.
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun CreateCmpXchg)
Expand an atomic RMW instruction into a loop utilizing cmpxchg.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool lowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI)
Convert the given Cmpxchg into primitive load and compare.
bool lowerAtomicRMWInst(AtomicRMWInst *RMWI)
Convert the given RMWI into primitive load and stores, assuming that doing so is legal.
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
LLVM_ABI char & AtomicExpandID
AtomicExpandID – Lowers atomic operations in terms of either cmpxchg load-linked/store-conditional lo...
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.