LLVM 22.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
42 getTII().get(Opcode));
43}
44
46 getMBB().insert(getInsertPt(), MIB);
47 recordInsertion(MIB);
48 return MIB;
49}
50
53 const MDNode *Expr) {
54 assert(isa<DILocalVariable>(Variable) && "not a variable");
55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56 assert(
57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(BuildMI(getMF(), getDL(),
60 getTII().get(TargetOpcode::DBG_VALUE),
61 /*IsIndirect*/ false, Reg, Variable, Expr));
62}
63
66 const MDNode *Expr) {
67 assert(isa<DILocalVariable>(Variable) && "not a variable");
68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69 assert(
70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(BuildMI(getMF(), getDL(),
73 getTII().get(TargetOpcode::DBG_VALUE),
74 /*IsIndirect*/ true, Reg, Variable, Expr));
75}
76
78 const MDNode *Variable,
79 const MDNode *Expr) {
80 assert(isa<DILocalVariable>(Variable) && "not a variable");
81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82 assert(
83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
86 .addFrameIndex(FI)
87 .addImm(0)
88 .addMetadata(Variable)
89 .addMetadata(Expr));
90}
91
93 const MDNode *Variable,
94 const MDNode *Expr) {
95 assert(isa<DILocalVariable>(Variable) && "not a variable");
96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97 assert(
98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101
102 auto *NumericConstant = [&] () -> const Constant* {
103 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
104 if (CE->getOpcode() == Instruction::IntToPtr)
105 return CE->getOperand(0);
106 return &C;
107 }();
108
109 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
110 if (CI->getBitWidth() > 64)
111 MIB.addCImm(CI);
112 else
113 MIB.addImm(CI->getZExtValue());
114 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
115 MIB.addFPImm(CFP);
116 } else if (isa<ConstantPointerNull>(NumericConstant)) {
117 MIB.addImm(0);
118 } else {
119 // Insert $noreg if we didn't find a usable constant and had to drop it.
120 MIB.addReg(Register());
121 }
122
123 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
124 return insertInstr(MIB);
125}
126
128 assert(isa<DILabel>(Label) && "not a label");
129 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
130 "Expected inlined-at fields to agree");
131 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
132
133 return MIB.addMetadata(Label);
134}
135
137 const SrcOp &Size,
138 Align Alignment) {
139 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
140 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
141 Res.addDefToMIB(*getMRI(), MIB);
142 Size.addSrcToMIB(MIB);
143 MIB.addImm(Alignment.value());
144 return MIB;
145}
146
148 int Idx) {
149 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
150 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
151 Res.addDefToMIB(*getMRI(), MIB);
152 MIB.addFrameIndex(Idx);
153 return MIB;
154}
155
157 const GlobalValue *GV) {
158 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
160 GV->getType()->getAddressSpace() &&
161 "address space mismatch");
162
163 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
164 Res.addDefToMIB(*getMRI(), MIB);
165 MIB.addGlobalAddress(GV);
166 return MIB;
167}
168
170 unsigned Idx) {
171 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
172 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
173 Res.addDefToMIB(*getMRI(), MIB);
174 MIB.addConstantPoolIndex(Idx);
175 return MIB;
176}
177
179 unsigned JTI) {
180 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
181 .addJumpTableIndex(JTI);
182}
183
184void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
185 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
186 assert((Res == Op0) && "type mismatch");
187}
188
190 const LLT Op1) {
191 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
192 assert((Res == Op0 && Res == Op1) && "type mismatch");
193}
194
195void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
196 const LLT Op1) {
197 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
198 assert((Res == Op0) && "type mismatch");
199}
200
203 const SrcOp &Op1, std::optional<unsigned> Flags) {
204 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
205 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
206 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
207
208 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
209}
210
212 const SrcOp &Op0,
213 const SrcOp &Op1) {
214 return buildPtrAdd(Res, Op0, Op1,
217}
218
219std::optional<MachineInstrBuilder>
221 const LLT ValueTy, uint64_t Value,
222 std::optional<unsigned> Flags) {
223 assert(Res == 0 && "Res is a result argument");
224 assert(ValueTy.isScalar() && "invalid offset type");
225
226 if (Value == 0) {
227 Res = Op0;
228 return std::nullopt;
229 }
230
232 auto Cst = buildConstant(ValueTy, Value);
233 return buildPtrAdd(Res, Op0, Cst.getReg(0), Flags);
234}
235
236std::optional<MachineInstrBuilder> MachineIRBuilder::materializeObjectPtrOffset(
237 Register &Res, Register Op0, const LLT ValueTy, uint64_t Value) {
238 return materializePtrAdd(Res, Op0, ValueTy, Value,
241}
242
244 const SrcOp &Op0,
245 uint32_t NumBits) {
246 LLT PtrTy = Res.getLLTTy(*getMRI());
247 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
248 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
249 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
250 return buildPtrMask(Res, Op0, MaskReg);
251}
252
255 const SrcOp &Op0) {
256 LLT ResTy = Res.getLLTTy(*getMRI());
257 LLT Op0Ty = Op0.getLLTTy(*getMRI());
258
259 assert(ResTy.isVector() && "Res non vector type");
260
262 if (Op0Ty.isVector()) {
263 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
264 "Different vector element types");
265 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
266 "Op0 has more elements");
267 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
268
269 for (auto Op : Unmerge.getInstr()->defs())
270 Regs.push_back(Op.getReg());
271 } else {
272 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
273 "Op0 has more size");
274 Regs.push_back(Op0.getReg());
275 }
276 Register Undef =
277 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
278 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
279 for (unsigned i = 0; i < NumberOfPadElts; ++i)
280 Regs.push_back(Undef);
281 return buildMergeLikeInstr(Res, Regs);
282}
283
286 const SrcOp &Op0) {
287 LLT ResTy = Res.getLLTTy(*getMRI());
288 LLT Op0Ty = Op0.getLLTTy(*getMRI());
289
290 assert(Op0Ty.isVector() && "Non vector type");
291 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
292 (ResTy.isVector() &&
293 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
294 "Different vector element types");
295 assert(
296 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
297 "Op0 has fewer elements");
298
299 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
300 if (ResTy.isScalar())
301 return buildCopy(Res, Unmerge.getReg(0));
303 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
304 Regs.push_back(Unmerge.getReg(i));
305 return buildMergeLikeInstr(Res, Regs);
306}
307
309 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
310}
311
313 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
314 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
315}
316
318 unsigned JTI,
319 Register IndexReg) {
320 assert(getMRI()->getType(TablePtr).isPointer() &&
321 "Table reg must be a pointer");
322 return buildInstr(TargetOpcode::G_BRJT)
323 .addUse(TablePtr)
325 .addUse(IndexReg);
326}
327
329 const SrcOp &Op) {
330 return buildInstr(TargetOpcode::COPY, Res, Op);
331}
332
334 const ConstantInt &Val) {
335 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
336 LLT Ty = Res.getLLTTy(*getMRI());
337 LLT EltTy = Ty.getScalarType();
338 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
339 "creating constant with the wrong size");
340
341 assert(!Ty.isScalableVector() &&
342 "unexpected scalable vector in buildConstant");
343
344 if (Ty.isFixedVector()) {
345 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
346 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
347 .addCImm(&Val);
348 return buildSplatBuildVector(Res, Const);
349 }
350
351 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
352 Const->setDebugLoc(DebugLoc());
353 Res.addDefToMIB(*getMRI(), Const);
354 Const.addCImm(&Val);
355 return Const;
356}
357
359 int64_t Val) {
362 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
363 return buildConstant(Res, *CI);
364}
365
367 const ConstantFP &Val) {
368 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
369 LLT Ty = Res.getLLTTy(*getMRI());
370 LLT EltTy = Ty.getScalarType();
371
373 == EltTy.getSizeInBits() &&
374 "creating fconstant with the wrong size");
375
376 assert(!Ty.isPointer() && "invalid operand type");
377
378 assert(!Ty.isScalableVector() &&
379 "unexpected scalable vector in buildFConstant");
380
381 if (Ty.isFixedVector()) {
382 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
383 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
384 .addFPImm(&Val);
385
386 return buildSplatBuildVector(Res, Const);
387 }
388
389 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
390 Const->setDebugLoc(DebugLoc());
391 Res.addDefToMIB(*getMRI(), Const);
392 Const.addFPImm(&Val);
393 return Const;
394}
395
397 const APInt &Val) {
398 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
399 return buildConstant(Res, *CI);
400}
401
403 double Val) {
404 LLT DstTy = Res.getLLTTy(*getMRI());
405 auto &Ctx = getMF().getFunction().getContext();
406 auto *CFP =
407 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
408 return buildFConstant(Res, *CFP);
409}
410
412 const APFloat &Val) {
413 auto &Ctx = getMF().getFunction().getContext();
414 auto *CFP = ConstantFP::get(Ctx, Val);
415 return buildFConstant(Res, *CFP);
416}
417
420 const ConstantPtrAuth *CPA,
421 Register Addr, Register AddrDisc) {
422 auto MIB = buildInstr(TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
423 Res.addDefToMIB(*getMRI(), MIB);
424 MIB.addUse(Addr);
425 MIB.addImm(CPA->getKey()->getZExtValue());
426 MIB.addUse(AddrDisc);
427 MIB.addImm(CPA->getDiscriminator()->getZExtValue());
428 return MIB;
429}
430
432 MachineBasicBlock &Dest) {
433 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
434
435 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
436 Tst.addSrcToMIB(MIB);
437 MIB.addMBB(&Dest);
438 return MIB;
439}
440
443 MachinePointerInfo PtrInfo, Align Alignment,
445 const AAMDNodes &AAInfo) {
446 MMOFlags |= MachineMemOperand::MOLoad;
447 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
448
449 LLT Ty = Dst.getLLTTy(*getMRI());
450 MachineMemOperand *MMO =
451 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
452 return buildLoad(Dst, Addr, *MMO);
453}
454
456 const DstOp &Res,
457 const SrcOp &Addr,
458 MachineMemOperand &MMO) {
459 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
460 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
461
462 auto MIB = buildInstr(Opcode);
463 Res.addDefToMIB(*getMRI(), MIB);
464 Addr.addSrcToMIB(MIB);
465 MIB.addMemOperand(&MMO);
466 return MIB;
467}
468
470 const DstOp &Dst, const SrcOp &BasePtr,
471 MachineMemOperand &BaseMMO, int64_t Offset) {
472 LLT LoadTy = Dst.getLLTTy(*getMRI());
473 MachineMemOperand *OffsetMMO =
474 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
475
476 if (Offset == 0) // This may be a size or type changing load.
477 return buildLoad(Dst, BasePtr, *OffsetMMO);
478
479 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
480 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
481 auto ConstOffset = buildConstant(OffsetTy, Offset);
482 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
483 return buildLoad(Dst, Ptr, *OffsetMMO);
484}
485
487 const SrcOp &Addr,
488 MachineMemOperand &MMO) {
489 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
490 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
491
492 auto MIB = buildInstr(TargetOpcode::G_STORE);
493 Val.addSrcToMIB(MIB);
494 Addr.addSrcToMIB(MIB);
495 MIB.addMemOperand(&MMO);
496 return MIB;
497}
498
501 MachinePointerInfo PtrInfo, Align Alignment,
503 const AAMDNodes &AAInfo) {
504 MMOFlags |= MachineMemOperand::MOStore;
505 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
506
507 LLT Ty = Val.getLLTTy(*getMRI());
508 MachineMemOperand *MMO =
509 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
510 return buildStore(Val, Addr, *MMO);
511}
512
514 const SrcOp &Op) {
515 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
516}
517
519 const SrcOp &Op) {
520 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
521}
522
524 const SrcOp &Op,
525 std::optional<unsigned> Flags) {
526 return buildInstr(TargetOpcode::G_ZEXT, Res, Op, Flags);
527}
528
529unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
530 const auto *TLI = getMF().getSubtarget().getTargetLowering();
531 switch (TLI->getBooleanContents(IsVec, IsFP)) {
533 return TargetOpcode::G_SEXT;
535 return TargetOpcode::G_ZEXT;
536 default:
537 return TargetOpcode::G_ANYEXT;
538 }
539}
540
542 const SrcOp &Op,
543 bool IsFP) {
544 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
545 return buildInstr(ExtOp, Res, Op);
546}
547
549 const SrcOp &Op,
550 bool IsVector,
551 bool IsFP) {
552 const auto *TLI = getMF().getSubtarget().getTargetLowering();
553 switch (TLI->getBooleanContents(IsVector, IsFP)) {
555 return buildSExtInReg(Res, Op, 1);
557 return buildZExtInReg(Res, Op, 1);
559 return buildCopy(Res, Op);
560 }
561
562 llvm_unreachable("unexpected BooleanContent");
563}
564
566 const DstOp &Res,
567 const SrcOp &Op) {
568 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
569 TargetOpcode::G_SEXT == ExtOpc) &&
570 "Expecting Extending Opc");
571 assert(Res.getLLTTy(*getMRI()).isScalar() ||
572 Res.getLLTTy(*getMRI()).isVector());
573 assert(Res.getLLTTy(*getMRI()).isScalar() ==
574 Op.getLLTTy(*getMRI()).isScalar());
575
576 unsigned Opcode = TargetOpcode::COPY;
577 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
578 Op.getLLTTy(*getMRI()).getSizeInBits())
579 Opcode = ExtOpc;
580 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
581 Op.getLLTTy(*getMRI()).getSizeInBits())
582 Opcode = TargetOpcode::G_TRUNC;
583 else
584 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
585
586 return buildInstr(Opcode, Res, Op);
587}
588
590 const SrcOp &Op) {
591 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
592}
593
595 const SrcOp &Op) {
596 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
597}
598
600 const SrcOp &Op) {
601 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
602}
603
605 const SrcOp &Op,
606 int64_t ImmOp) {
607 LLT ResTy = Res.getLLTTy(*getMRI());
608 auto Mask = buildConstant(
609 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
610 return buildAnd(Res, Op, Mask);
611}
612
614 const SrcOp &Src) {
615 LLT SrcTy = Src.getLLTTy(*getMRI());
616 LLT DstTy = Dst.getLLTTy(*getMRI());
617 if (SrcTy == DstTy)
618 return buildCopy(Dst, Src);
619
620 unsigned Opcode;
621 if (SrcTy.isPointerOrPointerVector())
622 Opcode = TargetOpcode::G_PTRTOINT;
623 else if (DstTy.isPointerOrPointerVector())
624 Opcode = TargetOpcode::G_INTTOPTR;
625 else {
627 !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
628 Opcode = TargetOpcode::G_BITCAST;
629 }
630
631 return buildInstr(Opcode, Dst, Src);
632}
633
635 const SrcOp &Src,
636 uint64_t Index) {
637 LLT SrcTy = Src.getLLTTy(*getMRI());
638 LLT DstTy = Dst.getLLTTy(*getMRI());
639
640#ifndef NDEBUG
641 assert(SrcTy.isValid() && "invalid operand type");
642 assert(DstTy.isValid() && "invalid operand type");
643 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
644 "extracting off end of register");
645#endif
646
647 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
648 assert(Index == 0 && "insertion past the end of a register");
649 return buildCast(Dst, Src);
650 }
651
652 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
653 Dst.addDefToMIB(*getMRI(), Extract);
654 Src.addSrcToMIB(Extract);
655 Extract.addImm(Index);
656 return Extract;
657}
658
660 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
661}
662
664 ArrayRef<Register> Ops) {
665 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
666 // we need some temporary storage for the DstOp objects. Here we use a
667 // sufficiently large SmallVector to not go through the heap.
668 SmallVector<SrcOp, 8> TmpVec(Ops);
669 assert(TmpVec.size() > 1);
670 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
671}
672
675 ArrayRef<Register> Ops) {
676 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
677 // we need some temporary storage for the DstOp objects. Here we use a
678 // sufficiently large SmallVector to not go through the heap.
679 SmallVector<SrcOp, 8> TmpVec(Ops);
680 assert(TmpVec.size() > 1);
681 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
682}
683
686 std::initializer_list<SrcOp> Ops) {
687 assert(Ops.size() > 1);
688 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
689}
690
691unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
692 ArrayRef<SrcOp> SrcOps) const {
693 if (DstOp.getLLTTy(*getMRI()).isVector()) {
694 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
695 return TargetOpcode::G_CONCAT_VECTORS;
696 return TargetOpcode::G_BUILD_VECTOR;
697 }
698
699 return TargetOpcode::G_MERGE_VALUES;
700}
701
703 const SrcOp &Op) {
704 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
705 // we need some temporary storage for the DstOp objects. Here we use a
706 // sufficiently large SmallVector to not go through the heap.
707 SmallVector<DstOp, 8> TmpVec(Res);
708 assert(TmpVec.size() > 1);
709 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
710}
711
713 const SrcOp &Op) {
714 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
715 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
716 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
717}
718
721 const SrcOp &Op) {
722 LLT OpTy = Op.getLLTTy(*getMRI());
723 unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
724 SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
725 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
726}
727
729 const SrcOp &Op) {
730 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
731 // we need some temporary storage for the DstOp objects. Here we use a
732 // sufficiently large SmallVector to not go through the heap.
733 SmallVector<DstOp, 8> TmpVec(Res);
734 assert(TmpVec.size() > 1);
735 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
736}
737
739 ArrayRef<Register> Ops) {
740 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
741 // we need some temporary storage for the DstOp objects. Here we use a
742 // sufficiently large SmallVector to not go through the heap.
743 SmallVector<SrcOp, 8> TmpVec(Ops);
744 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
745}
746
749 ArrayRef<APInt> Ops) {
750 SmallVector<SrcOp> TmpVec;
751 TmpVec.reserve(Ops.size());
752 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
753 for (const auto &Op : Ops)
754 TmpVec.push_back(buildConstant(EltTy, Op));
755 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
756}
757
759 const SrcOp &Src) {
761 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
762}
763
766 ArrayRef<Register> Ops) {
767 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
768 // we need some temporary storage for the DstOp objects. Here we use a
769 // sufficiently large SmallVector to not go through the heap.
770 SmallVector<SrcOp, 8> TmpVec(Ops);
771 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
772 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
773 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
774 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
775}
776
778 const SrcOp &Src) {
779 LLT DstTy = Res.getLLTTy(*getMRI());
780 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
781 "Expected Src to match Dst elt ty");
782 auto UndefVec = buildUndef(DstTy);
783 auto Zero = buildConstant(LLT::scalar(64), 0);
784 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
785 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
786 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
787}
788
790 const SrcOp &Src) {
791 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
792 "Expected Src to match Dst elt ty");
793 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
794}
795
797 const SrcOp &Src1,
798 const SrcOp &Src2,
799 ArrayRef<int> Mask) {
800 LLT DstTy = Res.getLLTTy(*getMRI());
801 LLT Src1Ty = Src1.getLLTTy(*getMRI());
802 LLT Src2Ty = Src2.getLLTTy(*getMRI());
803 const LLT DstElemTy = DstTy.isVector() ? DstTy.getElementType() : DstTy;
804 const LLT ElemTy1 = Src1Ty.isVector() ? Src1Ty.getElementType() : Src1Ty;
805 const LLT ElemTy2 = Src2Ty.isVector() ? Src2Ty.getElementType() : Src2Ty;
806 assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
807 (void)DstElemTy;
808 (void)ElemTy1;
809 (void)ElemTy2;
810 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
811 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
812 .addShuffleMask(MaskAlloc);
813}
814
817 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
818 // we need some temporary storage for the DstOp objects. Here we use a
819 // sufficiently large SmallVector to not go through the heap.
820 SmallVector<SrcOp, 8> TmpVec(Ops);
821 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
822}
823
825 const SrcOp &Src,
826 const SrcOp &Op,
827 unsigned Index) {
828 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
829 Res.getLLTTy(*getMRI()).getSizeInBits() &&
830 "insertion past the end of a register");
831
832 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
833 Op.getLLTTy(*getMRI()).getSizeInBits()) {
834 return buildCast(Res, Op);
835 }
836
837 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
838}
839
841 unsigned Step) {
842 unsigned Bitwidth = Res.getLLTTy(*getMRI()).getElementType().getSizeInBits();
843 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(),
844 APInt(Bitwidth, Step));
845 auto StepVector = buildInstr(TargetOpcode::G_STEP_VECTOR);
846 StepVector->setDebugLoc(DebugLoc());
847 Res.addDefToMIB(*getMRI(), StepVector);
848 StepVector.addCImm(CI);
849 return StepVector;
850}
851
853 unsigned MinElts) {
854
857 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
858 return buildVScale(Res, *CI);
859}
860
862 const ConstantInt &MinElts) {
863 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
864 VScale->setDebugLoc(DebugLoc());
865 Res.addDefToMIB(*getMRI(), VScale);
866 VScale.addCImm(&MinElts);
867 return VScale;
868}
869
871 const APInt &MinElts) {
872 ConstantInt *CI =
873 ConstantInt::get(getMF().getFunction().getContext(), MinElts);
874 return buildVScale(Res, *CI);
875}
876
877static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
878 if (HasSideEffects && IsConvergent)
879 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
880 if (HasSideEffects)
881 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
882 if (IsConvergent)
883 return TargetOpcode::G_INTRINSIC_CONVERGENT;
884 return TargetOpcode::G_INTRINSIC;
885}
886
889 ArrayRef<Register> ResultRegs,
890 bool HasSideEffects, bool isConvergent) {
891 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
892 for (Register ResultReg : ResultRegs)
893 MIB.addDef(ResultReg);
894 MIB.addIntrinsicID(ID);
895 return MIB;
896}
897
900 ArrayRef<Register> ResultRegs) {
902 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
903 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
904 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
905}
906
909 bool HasSideEffects,
910 bool isConvergent) {
911 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
912 for (DstOp Result : Results)
913 Result.addDefToMIB(*getMRI(), MIB);
914 MIB.addIntrinsicID(ID);
915 return MIB;
916}
917
921 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
922 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
923 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
924}
925
928 std::optional<unsigned> Flags) {
929 return buildInstr(TargetOpcode::G_TRUNC, Res, Op, Flags);
930}
931
934 std::optional<unsigned> Flags) {
935 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
936}
937
939 const DstOp &Res,
940 const SrcOp &Op0,
941 const SrcOp &Op1,
942 std::optional<unsigned> Flags) {
943 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}, Flags);
944}
945
947 const DstOp &Res,
948 const SrcOp &Op0,
949 const SrcOp &Op1,
950 std::optional<unsigned> Flags) {
951
952 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
953}
954
956 const SrcOp &Op0,
957 const SrcOp &Op1) {
958 return buildInstr(TargetOpcode::G_SCMP, Res, {Op0, Op1});
962 const SrcOp &Op0,
963 const SrcOp &Op1) {
964 return buildInstr(TargetOpcode::G_UCMP, Res, {Op0, Op1});
965}
966
969 const SrcOp &Op0, const SrcOp &Op1,
970 std::optional<unsigned> Flags) {
972 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
973}
976 const SrcOp &Src0,
977 const SrcOp &Src1,
978 unsigned Idx) {
979 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
980 {Src0, Src1, uint64_t(Idx)});
981}
982
984 const SrcOp &Src,
985 unsigned Idx) {
986 return buildInstr(TargetOpcode::G_EXTRACT_SUBVECTOR, Res,
987 {Src, uint64_t(Idx)});
988}
989
992 const SrcOp &Elt, const SrcOp &Idx) {
993 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
994}
995
998 const SrcOp &Idx) {
999 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
1000}
1001
1003 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
1004 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
1005#ifndef NDEBUG
1006 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1007 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
1008 LLT AddrTy = Addr.getLLTTy(*getMRI());
1009 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1010 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1011 assert(OldValResTy.isScalar() && "invalid operand type");
1012 assert(SuccessResTy.isScalar() && "invalid operand type");
1013 assert(AddrTy.isPointer() && "invalid operand type");
1014 assert(CmpValTy.isValid() && "invalid operand type");
1015 assert(NewValTy.isValid() && "invalid operand type");
1016 assert(OldValResTy == CmpValTy && "type mismatch");
1017 assert(OldValResTy == NewValTy && "type mismatch");
1018#endif
1019
1020 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1021 OldValRes.addDefToMIB(*getMRI(), MIB);
1022 SuccessRes.addDefToMIB(*getMRI(), MIB);
1023 Addr.addSrcToMIB(MIB);
1024 CmpVal.addSrcToMIB(MIB);
1025 NewVal.addSrcToMIB(MIB);
1026 MIB.addMemOperand(&MMO);
1027 return MIB;
1028}
1029
1032 const SrcOp &CmpVal, const SrcOp &NewVal,
1033 MachineMemOperand &MMO) {
1034#ifndef NDEBUG
1035 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1036 LLT AddrTy = Addr.getLLTTy(*getMRI());
1037 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1038 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1039 assert(OldValResTy.isScalar() && "invalid operand type");
1040 assert(AddrTy.isPointer() && "invalid operand type");
1041 assert(CmpValTy.isValid() && "invalid operand type");
1042 assert(NewValTy.isValid() && "invalid operand type");
1043 assert(OldValResTy == CmpValTy && "type mismatch");
1044 assert(OldValResTy == NewValTy && "type mismatch");
1045#endif
1046
1047 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
1048 OldValRes.addDefToMIB(*getMRI(), MIB);
1049 Addr.addSrcToMIB(MIB);
1050 CmpVal.addSrcToMIB(MIB);
1051 NewVal.addSrcToMIB(MIB);
1052 MIB.addMemOperand(&MMO);
1053 return MIB;
1054}
1055
1057 unsigned Opcode, const DstOp &OldValRes,
1058 const SrcOp &Addr, const SrcOp &Val,
1059 MachineMemOperand &MMO) {
1060
1061#ifndef NDEBUG
1062 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1063 LLT AddrTy = Addr.getLLTTy(*getMRI());
1064 LLT ValTy = Val.getLLTTy(*getMRI());
1065 assert(AddrTy.isPointer() && "invalid operand type");
1066 assert(ValTy.isValid() && "invalid operand type");
1067 assert(OldValResTy == ValTy && "type mismatch");
1068 assert(MMO.isAtomic() && "not atomic mem operand");
1069#endif
1070
1071 auto MIB = buildInstr(Opcode);
1072 OldValRes.addDefToMIB(*getMRI(), MIB);
1073 Addr.addSrcToMIB(MIB);
1074 Val.addSrcToMIB(MIB);
1075 MIB.addMemOperand(&MMO);
1076 return MIB;
1077}
1078
1081 Register Val, MachineMemOperand &MMO) {
1082 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1083 MMO);
1084}
1087 Register Val, MachineMemOperand &MMO) {
1088 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1089 MMO);
1090}
1093 Register Val, MachineMemOperand &MMO) {
1094 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1095 MMO);
1096}
1099 Register Val, MachineMemOperand &MMO) {
1100 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1101 MMO);
1102}
1105 Register Val, MachineMemOperand &MMO) {
1106 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1107 MMO);
1108}
1110 Register Addr,
1111 Register Val,
1112 MachineMemOperand &MMO) {
1113 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1114 MMO);
1115}
1118 Register Val, MachineMemOperand &MMO) {
1119 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1120 MMO);
1121}
1124 Register Val, MachineMemOperand &MMO) {
1125 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1126 MMO);
1127}
1130 Register Val, MachineMemOperand &MMO) {
1131 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1132 MMO);
1133}
1136 Register Val, MachineMemOperand &MMO) {
1137 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1138 MMO);
1139}
1142 Register Val, MachineMemOperand &MMO) {
1143 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1144 MMO);
1145}
1146
1149 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1150 MachineMemOperand &MMO) {
1151 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1152 MMO);
1153}
1154
1156MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1157 MachineMemOperand &MMO) {
1158 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1159 MMO);
1160}
1161
1164 const SrcOp &Val, MachineMemOperand &MMO) {
1165 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1166 MMO);
1167}
1168
1171 const SrcOp &Val, MachineMemOperand &MMO) {
1172 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1173 MMO);
1174}
1175
1178 const SrcOp &Addr, const SrcOp &Val,
1179 MachineMemOperand &MMO) {
1180 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAXIMUM, OldValRes, Addr,
1181 Val, MMO);
1182}
1183
1186 const SrcOp &Addr, const SrcOp &Val,
1187 MachineMemOperand &MMO) {
1188 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMINIMUM, OldValRes, Addr,
1189 Val, MMO);
1190}
1191
1193MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1194 return buildInstr(TargetOpcode::G_FENCE)
1195 .addImm(Ordering)
1196 .addImm(Scope);
1197}
1198
1200 unsigned RW,
1201 unsigned Locality,
1202 unsigned CacheType,
1203 MachineMemOperand &MMO) {
1204 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1205 Addr.addSrcToMIB(MIB);
1206 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1207 MIB.addMemOperand(&MMO);
1208 return MIB;
1209}
1210
1213#ifndef NDEBUG
1214 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1215#endif
1216
1217 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1218}
1219
1220void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1221 bool IsExtend) {
1222#ifndef NDEBUG
1223 if (DstTy.isVector()) {
1224 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1225 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1226 "different number of elements in a trunc/ext");
1227 } else
1228 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1229
1230 if (IsExtend)
1232 "invalid narrowing extend");
1233 else
1235 "invalid widening trunc");
1236#endif
1237}
1238
1239void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1240 const LLT Op0Ty, const LLT Op1Ty) {
1241#ifndef NDEBUG
1242 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1243 "invalid operand type");
1244 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1245 if (ResTy.isScalar() || ResTy.isPointer())
1246 assert(TstTy.isScalar() && "type mismatch");
1247 else
1248 assert((TstTy.isScalar() ||
1249 (TstTy.isVector() &&
1250 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1251 "type mismatch");
1252#endif
1253}
1254
1257 ArrayRef<SrcOp> SrcOps,
1258 std::optional<unsigned> Flags) {
1259 switch (Opc) {
1260 default:
1261 break;
1262 case TargetOpcode::G_SELECT: {
1263 assert(DstOps.size() == 1 && "Invalid select");
1264 assert(SrcOps.size() == 3 && "Invalid select");
1266 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1267 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1268 break;
1269 }
1270 case TargetOpcode::G_FNEG:
1271 case TargetOpcode::G_ABS:
1272 // All these are unary ops.
1273 assert(DstOps.size() == 1 && "Invalid Dst");
1274 assert(SrcOps.size() == 1 && "Invalid Srcs");
1275 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1276 SrcOps[0].getLLTTy(*getMRI()));
1277 break;
1278 case TargetOpcode::G_ADD:
1279 case TargetOpcode::G_AND:
1280 case TargetOpcode::G_MUL:
1281 case TargetOpcode::G_OR:
1282 case TargetOpcode::G_SUB:
1283 case TargetOpcode::G_XOR:
1284 case TargetOpcode::G_UDIV:
1285 case TargetOpcode::G_SDIV:
1286 case TargetOpcode::G_UREM:
1287 case TargetOpcode::G_SREM:
1288 case TargetOpcode::G_SMIN:
1289 case TargetOpcode::G_SMAX:
1290 case TargetOpcode::G_UMIN:
1291 case TargetOpcode::G_UMAX:
1292 case TargetOpcode::G_UADDSAT:
1293 case TargetOpcode::G_SADDSAT:
1294 case TargetOpcode::G_USUBSAT:
1295 case TargetOpcode::G_SSUBSAT: {
1296 // All these are binary ops.
1297 assert(DstOps.size() == 1 && "Invalid Dst");
1298 assert(SrcOps.size() == 2 && "Invalid Srcs");
1299 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1300 SrcOps[0].getLLTTy(*getMRI()),
1301 SrcOps[1].getLLTTy(*getMRI()));
1302 break;
1303 }
1304 case TargetOpcode::G_SHL:
1305 case TargetOpcode::G_ASHR:
1306 case TargetOpcode::G_LSHR:
1307 case TargetOpcode::G_USHLSAT:
1308 case TargetOpcode::G_SSHLSAT: {
1309 assert(DstOps.size() == 1 && "Invalid Dst");
1310 assert(SrcOps.size() == 2 && "Invalid Srcs");
1311 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1312 SrcOps[0].getLLTTy(*getMRI()),
1313 SrcOps[1].getLLTTy(*getMRI()));
1314 break;
1315 }
1316 case TargetOpcode::G_SEXT:
1317 case TargetOpcode::G_ZEXT:
1318 case TargetOpcode::G_ANYEXT:
1319 assert(DstOps.size() == 1 && "Invalid Dst");
1320 assert(SrcOps.size() == 1 && "Invalid Srcs");
1321 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1322 SrcOps[0].getLLTTy(*getMRI()), true);
1323 break;
1324 case TargetOpcode::G_TRUNC:
1325 case TargetOpcode::G_FPTRUNC: {
1326 assert(DstOps.size() == 1 && "Invalid Dst");
1327 assert(SrcOps.size() == 1 && "Invalid Srcs");
1328 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1329 SrcOps[0].getLLTTy(*getMRI()), false);
1330 break;
1331 }
1332 case TargetOpcode::G_BITCAST: {
1333 assert(DstOps.size() == 1 && "Invalid Dst");
1334 assert(SrcOps.size() == 1 && "Invalid Srcs");
1335 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1336 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1337 break;
1338 }
1339 case TargetOpcode::COPY:
1340 assert(DstOps.size() == 1 && "Invalid Dst");
1341 // If the caller wants to add a subreg source it has to be done separately
1342 // so we may not have any SrcOps at this point yet.
1343 break;
1344 case TargetOpcode::G_FCMP:
1345 case TargetOpcode::G_ICMP: {
1346 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1347 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1348 // For F/ICMP, the first src operand is the predicate, followed by
1349 // the two comparands.
1350 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1351 "Expecting predicate");
1352 assert([&]() -> bool {
1353 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1354 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1355 : CmpInst::isFPPredicate(Pred);
1356 }() && "Invalid predicate");
1357 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1358 "Type mismatch");
1359 assert([&]() -> bool {
1360 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1361 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1362 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1363 return DstTy.isScalar();
1364 else
1365 return DstTy.isVector() &&
1366 DstTy.getElementCount() == Op0Ty.getElementCount();
1367 }() && "Type Mismatch");
1368 break;
1369 }
1370 case TargetOpcode::G_UNMERGE_VALUES: {
1371 assert(!DstOps.empty() && "Invalid trivial sequence");
1372 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1373 assert(llvm::all_of(DstOps,
1374 [&, this](const DstOp &Op) {
1375 return Op.getLLTTy(*getMRI()) ==
1376 DstOps[0].getLLTTy(*getMRI());
1377 }) &&
1378 "type mismatch in output list");
1379 assert((TypeSize::ScalarTy)DstOps.size() *
1380 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1381 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1382 "input operands do not cover output register");
1383 break;
1384 }
1385 case TargetOpcode::G_MERGE_VALUES: {
1386 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1387 assert(DstOps.size() == 1 && "Invalid Dst");
1388 assert(llvm::all_of(SrcOps,
1389 [&, this](const SrcOp &Op) {
1390 return Op.getLLTTy(*getMRI()) ==
1391 SrcOps[0].getLLTTy(*getMRI());
1392 }) &&
1393 "type mismatch in input list");
1394 assert((TypeSize::ScalarTy)SrcOps.size() *
1395 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1396 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1397 "input operands do not cover output register");
1398 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1399 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1400 break;
1401 }
1402 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1403 assert(DstOps.size() == 1 && "Invalid Dst size");
1404 assert(SrcOps.size() == 2 && "Invalid Src size");
1405 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1406 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1407 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1408 "Invalid operand type");
1409 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1410 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1411 DstOps[0].getLLTTy(*getMRI()) &&
1412 "Type mismatch");
1413 break;
1414 }
1415 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1416 assert(DstOps.size() == 1 && "Invalid dst size");
1417 assert(SrcOps.size() == 3 && "Invalid src size");
1418 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1419 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1420 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1421 SrcOps[1].getLLTTy(*getMRI()) &&
1422 "Type mismatch");
1423 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1424 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1425 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1426 "Type mismatch");
1427 break;
1428 }
1429 case TargetOpcode::G_BUILD_VECTOR: {
1430 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1431 "Must have at least 2 operands");
1432 assert(DstOps.size() == 1 && "Invalid DstOps");
1433 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1434 "Res type must be a vector");
1435 assert(llvm::all_of(SrcOps,
1436 [&, this](const SrcOp &Op) {
1437 return Op.getLLTTy(*getMRI()) ==
1438 SrcOps[0].getLLTTy(*getMRI());
1439 }) &&
1440 "type mismatch in input list");
1441 assert((TypeSize::ScalarTy)SrcOps.size() *
1442 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1443 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1444 "input scalars do not exactly cover the output vector register");
1445 break;
1446 }
1447 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1448 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1449 "Must have at least 2 operands");
1450 assert(DstOps.size() == 1 && "Invalid DstOps");
1451 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1452 "Res type must be a vector");
1453 assert(llvm::all_of(SrcOps,
1454 [&, this](const SrcOp &Op) {
1455 return Op.getLLTTy(*getMRI()) ==
1456 SrcOps[0].getLLTTy(*getMRI());
1457 }) &&
1458 "type mismatch in input list");
1459 break;
1460 }
1461 case TargetOpcode::G_CONCAT_VECTORS: {
1462 assert(DstOps.size() == 1 && "Invalid DstOps");
1463 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1464 "Must have at least 2 operands");
1465 assert(llvm::all_of(SrcOps,
1466 [&, this](const SrcOp &Op) {
1467 return (Op.getLLTTy(*getMRI()).isVector() &&
1468 Op.getLLTTy(*getMRI()) ==
1469 SrcOps[0].getLLTTy(*getMRI()));
1470 }) &&
1471 "type mismatch in input list");
1472 assert((TypeSize::ScalarTy)SrcOps.size() *
1473 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1474 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1475 "input vectors do not exactly cover the output vector register");
1476 break;
1477 }
1478 case TargetOpcode::G_UADDE: {
1479 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1480 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1481 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1482 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1483 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1484 "Invalid operand");
1485 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1486 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1487 "type mismatch");
1488 break;
1489 }
1490 }
1491
1492 auto MIB = buildInstr(Opc);
1493 for (const DstOp &Op : DstOps)
1494 Op.addDefToMIB(*getMRI(), MIB);
1495 for (const SrcOp &Op : SrcOps)
1496 Op.addSrcToMIB(MIB);
1497 if (Flags)
1498 MIB->setFlags(*Flags);
1499 return MIB;
1500}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Function Alias Analysis Results
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1457
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:306
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:142
The address of a basic block.
Definition: Constants.h:899
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:678
bool isFPPredicate() const
Definition: InstrTypes.h:784
bool isIntPredicate() const
Definition: InstrTypes.h:785
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:277
const APFloat & getValueAPF() const
Definition: Constants.h:320
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:163
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1032
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition: Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition: Constants.h:1065
This is an important base class in LLVM.
Definition: Constant.h:43
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:124
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Register getReg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:359
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:296
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:319
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:265
constexpr bool isScalar() const
Definition: LowLevelType.h:147
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:43
constexpr bool isValid() const
Definition: LowLevelType.h:146
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:160
constexpr bool isVector() const
Definition: LowLevelType.h:149
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:191
constexpr bool isPointer() const
Definition: LowLevelType.h:150
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:278
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
constexpr bool isPointerOrPointerVector() const
Definition: LowLevelType.h:154
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:178
constexpr LLT getScalarType() const
Definition: LowLevelType.h:206
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:64
Metadata node.
Definition: Metadata.h:1077
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWFMaximum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAXIMUM Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value, std::optional< unsigned > Flags=std::nullopt)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildObjectPtrOffset(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildAtomicRMWFMinimum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMINIMUM Addr, Val, MMO.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MDNode * getMMRAMetadata()
Get the current instruction's MMRA metadata.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
MachineInstrBuilder buildConstantPtrAuth(const DstOp &Res, const ConstantPtrAuth *CPA, Register Addr, Register AddrDisc)
Build and insert G_PTRAUTH_GLOBAL_VALUE.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:740
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_t size() const
Definition: SmallVector.h:79
void reserve(size_type N)
Definition: SmallVector.h:664
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:219
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:226
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
Definition: Intrinsics.cpp:743
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
DWARFExpression::Operation Op
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:657
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
static LLVM_ABI unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:388
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
MachineFunction * MF
MachineFunction under construction.
MDNode * MMRA
MMRA Metadata to be set on any instruction we create.
DebugLoc DL
Debug location to be set to any instruction we create.
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
MDNode * PCSections
PC sections metadata to be set to any instruction we create.
MachineBasicBlock::iterator II
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.
GISelChangeObserver * Observer
This class contains a discriminated union of information about pointers in memory operands,...
All attributes(register class or bank and low-level type) a virtual register can have.