LLVM 22.0.0git
SPIRVUtils.cpp
Go to the documentation of this file.
1//===--- SPIRVUtils.cpp ---- SPIR-V Utility Functions -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains miscellaneous utility functions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SPIRVUtils.h"
15#include "SPIRV.h"
16#include "SPIRVGlobalRegistry.h"
17#include "SPIRVInstrInfo.h"
18#include "SPIRVSubtarget.h"
19#include "llvm/ADT/StringRef.h"
26#include "llvm/IR/IntrinsicsSPIRV.h"
27#include <queue>
28#include <vector>
29
30namespace llvm {
31
32// The following functions are used to add these string literals as a series of
33// 32-bit integer operands with the correct format, and unpack them if necessary
34// when making string comparisons in compiler passes.
35// SPIR-V requires null-terminated UTF-8 strings padded to 32-bit alignment.
36static uint32_t convertCharsToWord(const StringRef &Str, unsigned i) {
37 uint32_t Word = 0u; // Build up this 32-bit word from 4 8-bit chars.
38 for (unsigned WordIndex = 0; WordIndex < 4; ++WordIndex) {
39 unsigned StrIndex = i + WordIndex;
40 uint8_t CharToAdd = 0; // Initilize char as padding/null.
41 if (StrIndex < Str.size()) { // If it's within the string, get a real char.
42 CharToAdd = Str[StrIndex];
43 }
44 Word |= (CharToAdd << (WordIndex * 8));
45 }
46 return Word;
47}
48
49// Get length including padding and null terminator.
50static size_t getPaddedLen(const StringRef &Str) {
51 return (Str.size() + 4) & ~3;
52}
53
54void addStringImm(const StringRef &Str, MCInst &Inst) {
55 const size_t PaddedLen = getPaddedLen(Str);
56 for (unsigned i = 0; i < PaddedLen; i += 4) {
57 // Add an operand for the 32-bits of chars or padding.
59 }
60}
61
63 const size_t PaddedLen = getPaddedLen(Str);
64 for (unsigned i = 0; i < PaddedLen; i += 4) {
65 // Add an operand for the 32-bits of chars or padding.
66 MIB.addImm(convertCharsToWord(Str, i));
67 }
68}
69
71 std::vector<Value *> &Args) {
72 const size_t PaddedLen = getPaddedLen(Str);
73 for (unsigned i = 0; i < PaddedLen; i += 4) {
74 // Add a vector element for the 32-bits of chars or padding.
75 Args.push_back(B.getInt32(convertCharsToWord(Str, i)));
76 }
77}
78
79std::string getStringImm(const MachineInstr &MI, unsigned StartIndex) {
80 return getSPIRVStringOperand(MI, StartIndex);
81}
82
85 assert(Def && Def->getOpcode() == TargetOpcode::G_GLOBAL_VALUE &&
86 "Expected G_GLOBAL_VALUE");
87 const GlobalValue *GV = Def->getOperand(1).getGlobal();
88 Value *V = GV->getOperand(0);
89 const ConstantDataArray *CDA = cast<ConstantDataArray>(V);
90 return CDA->getAsCString().str();
91}
92
93void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB) {
94 const auto Bitwidth = Imm.getBitWidth();
95 if (Bitwidth == 1)
96 return; // Already handled
97 else if (Bitwidth <= 32) {
98 MIB.addImm(Imm.getZExtValue());
99 // Asm Printer needs this info to print floating-type correctly
100 if (Bitwidth == 16)
102 return;
103 } else if (Bitwidth <= 64) {
104 uint64_t FullImm = Imm.getZExtValue();
105 uint32_t LowBits = FullImm & 0xffffffff;
106 uint32_t HighBits = (FullImm >> 32) & 0xffffffff;
107 MIB.addImm(LowBits).addImm(HighBits);
108 return;
109 }
110 report_fatal_error("Unsupported constant bitwidth");
111}
112
114 MachineIRBuilder &MIRBuilder) {
115 if (!Name.empty()) {
116 auto MIB = MIRBuilder.buildInstr(SPIRV::OpName).addUse(Target);
117 addStringImm(Name, MIB);
118 }
119}
120
122 const SPIRVInstrInfo &TII) {
123 if (!Name.empty()) {
124 auto MIB =
125 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpName))
126 .addUse(Target);
127 addStringImm(Name, MIB);
128 }
129}
130
132 const std::vector<uint32_t> &DecArgs,
133 StringRef StrImm) {
134 if (!StrImm.empty())
135 addStringImm(StrImm, MIB);
136 for (const auto &DecArg : DecArgs)
137 MIB.addImm(DecArg);
138}
139
141 SPIRV::Decoration::Decoration Dec,
142 const std::vector<uint32_t> &DecArgs, StringRef StrImm) {
143 auto MIB = MIRBuilder.buildInstr(SPIRV::OpDecorate)
144 .addUse(Reg)
145 .addImm(static_cast<uint32_t>(Dec));
146 finishBuildOpDecorate(MIB, DecArgs, StrImm);
147}
148
150 SPIRV::Decoration::Decoration Dec,
151 const std::vector<uint32_t> &DecArgs, StringRef StrImm) {
152 MachineBasicBlock &MBB = *I.getParent();
153 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpDecorate))
154 .addUse(Reg)
155 .addImm(static_cast<uint32_t>(Dec));
156 finishBuildOpDecorate(MIB, DecArgs, StrImm);
157}
158
160 SPIRV::Decoration::Decoration Dec, uint32_t Member,
161 const std::vector<uint32_t> &DecArgs,
162 StringRef StrImm) {
163 auto MIB = MIRBuilder.buildInstr(SPIRV::OpMemberDecorate)
164 .addUse(Reg)
165 .addImm(Member)
166 .addImm(static_cast<uint32_t>(Dec));
167 finishBuildOpDecorate(MIB, DecArgs, StrImm);
168}
169
171 const SPIRVInstrInfo &TII,
172 SPIRV::Decoration::Decoration Dec, uint32_t Member,
173 const std::vector<uint32_t> &DecArgs,
174 StringRef StrImm) {
175 MachineBasicBlock &MBB = *I.getParent();
176 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemberDecorate))
177 .addUse(Reg)
178 .addImm(Member)
179 .addImm(static_cast<uint32_t>(Dec));
180 finishBuildOpDecorate(MIB, DecArgs, StrImm);
181}
182
184 const MDNode *GVarMD) {
185 for (unsigned I = 0, E = GVarMD->getNumOperands(); I != E; ++I) {
186 auto *OpMD = dyn_cast<MDNode>(GVarMD->getOperand(I));
187 if (!OpMD)
188 report_fatal_error("Invalid decoration");
189 if (OpMD->getNumOperands() == 0)
190 report_fatal_error("Expect operand(s) of the decoration");
191 ConstantInt *DecorationId =
192 mdconst::dyn_extract<ConstantInt>(OpMD->getOperand(0));
193 if (!DecorationId)
194 report_fatal_error("Expect SPIR-V <Decoration> operand to be the first "
195 "element of the decoration");
196 auto MIB = MIRBuilder.buildInstr(SPIRV::OpDecorate)
197 .addUse(Reg)
198 .addImm(static_cast<uint32_t>(DecorationId->getZExtValue()));
199 for (unsigned OpI = 1, OpE = OpMD->getNumOperands(); OpI != OpE; ++OpI) {
200 if (ConstantInt *OpV =
201 mdconst::dyn_extract<ConstantInt>(OpMD->getOperand(OpI)))
202 MIB.addImm(static_cast<uint32_t>(OpV->getZExtValue()));
203 else if (MDString *OpV = dyn_cast<MDString>(OpMD->getOperand(OpI)))
204 addStringImm(OpV->getString(), MIB);
205 else
206 report_fatal_error("Unexpected operand of the decoration");
207 }
208 }
209}
210
212 MachineFunction *MF = I.getParent()->getParent();
213 MachineBasicBlock *MBB = &MF->front();
215 E = MBB->end();
216 bool IsHeader = false;
217 unsigned Opcode;
218 for (; It != E && It != I; ++It) {
219 Opcode = It->getOpcode();
220 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
221 IsHeader = true;
222 } else if (IsHeader &&
223 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
224 ++It;
225 break;
226 }
227 }
228 return It;
229}
230
233 if (I == MBB->begin())
234 return I;
235 --I;
236 while (I->isTerminator() || I->isDebugValue()) {
237 if (I == MBB->begin())
238 break;
239 --I;
240 }
241 return I;
242}
243
244SPIRV::StorageClass::StorageClass
245addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI) {
246 switch (AddrSpace) {
247 case 0:
248 return SPIRV::StorageClass::Function;
249 case 1:
250 return SPIRV::StorageClass::CrossWorkgroup;
251 case 2:
252 return SPIRV::StorageClass::UniformConstant;
253 case 3:
254 return SPIRV::StorageClass::Workgroup;
255 case 4:
256 return SPIRV::StorageClass::Generic;
257 case 5:
258 return STI.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
259 ? SPIRV::StorageClass::DeviceOnlyINTEL
260 : SPIRV::StorageClass::CrossWorkgroup;
261 case 6:
262 return STI.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
263 ? SPIRV::StorageClass::HostOnlyINTEL
264 : SPIRV::StorageClass::CrossWorkgroup;
265 case 7:
266 return SPIRV::StorageClass::Input;
267 case 8:
268 return SPIRV::StorageClass::Output;
269 case 9:
270 return SPIRV::StorageClass::CodeSectionINTEL;
271 case 10:
272 return SPIRV::StorageClass::Private;
273 case 11:
274 return SPIRV::StorageClass::StorageBuffer;
275 case 12:
276 return SPIRV::StorageClass::Uniform;
277 default:
278 report_fatal_error("Unknown address space");
279 }
280}
281
282SPIRV::MemorySemantics::MemorySemantics
283getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC) {
284 switch (SC) {
285 case SPIRV::StorageClass::StorageBuffer:
286 case SPIRV::StorageClass::Uniform:
287 return SPIRV::MemorySemantics::UniformMemory;
288 case SPIRV::StorageClass::Workgroup:
289 return SPIRV::MemorySemantics::WorkgroupMemory;
290 case SPIRV::StorageClass::CrossWorkgroup:
291 return SPIRV::MemorySemantics::CrossWorkgroupMemory;
292 case SPIRV::StorageClass::AtomicCounter:
293 return SPIRV::MemorySemantics::AtomicCounterMemory;
294 case SPIRV::StorageClass::Image:
295 return SPIRV::MemorySemantics::ImageMemory;
296 default:
297 return SPIRV::MemorySemantics::None;
298 }
299}
300
301SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord) {
302 switch (Ord) {
304 return SPIRV::MemorySemantics::Acquire;
306 return SPIRV::MemorySemantics::Release;
308 return SPIRV::MemorySemantics::AcquireRelease;
310 return SPIRV::MemorySemantics::SequentiallyConsistent;
314 return SPIRV::MemorySemantics::None;
315 }
316 llvm_unreachable(nullptr);
317}
318
319SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id) {
320 // Named by
321 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#_scope_id.
322 // We don't need aliases for Invocation and CrossDevice, as we already have
323 // them covered by "singlethread" and "" strings respectively (see
324 // implementation of LLVMContext::LLVMContext()).
325 static const llvm::SyncScope::ID SubGroup =
326 Ctx.getOrInsertSyncScopeID("subgroup");
327 static const llvm::SyncScope::ID WorkGroup =
328 Ctx.getOrInsertSyncScopeID("workgroup");
329 static const llvm::SyncScope::ID Device =
330 Ctx.getOrInsertSyncScopeID("device");
331
333 return SPIRV::Scope::Invocation;
334 else if (Id == llvm::SyncScope::System)
335 return SPIRV::Scope::CrossDevice;
336 else if (Id == SubGroup)
337 return SPIRV::Scope::Subgroup;
338 else if (Id == WorkGroup)
339 return SPIRV::Scope::Workgroup;
340 else if (Id == Device)
341 return SPIRV::Scope::Device;
342 return SPIRV::Scope::CrossDevice;
343}
344
346 const MachineRegisterInfo *MRI) {
347 MachineInstr *MI = MRI->getVRegDef(ConstReg);
348 MachineInstr *ConstInstr =
349 MI->getOpcode() == SPIRV::G_TRUNC || MI->getOpcode() == SPIRV::G_ZEXT
350 ? MRI->getVRegDef(MI->getOperand(1).getReg())
351 : MI;
352 if (auto *GI = dyn_cast<GIntrinsic>(ConstInstr)) {
353 if (GI->is(Intrinsic::spv_track_constant)) {
354 ConstReg = ConstInstr->getOperand(2).getReg();
355 return MRI->getVRegDef(ConstReg);
356 }
357 } else if (ConstInstr->getOpcode() == SPIRV::ASSIGN_TYPE) {
358 ConstReg = ConstInstr->getOperand(1).getReg();
359 return MRI->getVRegDef(ConstReg);
360 } else if (ConstInstr->getOpcode() == TargetOpcode::G_CONSTANT ||
361 ConstInstr->getOpcode() == TargetOpcode::G_FCONSTANT) {
362 ConstReg = ConstInstr->getOperand(0).getReg();
363 return ConstInstr;
364 }
365 return MRI->getVRegDef(ConstReg);
366}
367
369 const MachineInstr *MI = getDefInstrMaybeConstant(ConstReg, MRI);
370 assert(MI && MI->getOpcode() == TargetOpcode::G_CONSTANT);
371 return MI->getOperand(1).getCImm()->getValue().getZExtValue();
372}
373
374bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID) {
375 if (const auto *GI = dyn_cast<GIntrinsic>(&MI))
376 return GI->is(IntrinsicID);
377 return false;
378}
379
380Type *getMDOperandAsType(const MDNode *N, unsigned I) {
381 Type *ElementTy = cast<ValueAsMetadata>(N->getOperand(I))->getType();
382 return toTypedPointer(ElementTy);
383}
384
385// The set of names is borrowed from the SPIR-V translator.
386// TODO: may be implemented in SPIRVBuiltins.td.
387static bool isPipeOrAddressSpaceCastBI(const StringRef MangledName) {
388 return MangledName == "write_pipe_2" || MangledName == "read_pipe_2" ||
389 MangledName == "write_pipe_2_bl" || MangledName == "read_pipe_2_bl" ||
390 MangledName == "write_pipe_4" || MangledName == "read_pipe_4" ||
391 MangledName == "reserve_write_pipe" ||
392 MangledName == "reserve_read_pipe" ||
393 MangledName == "commit_write_pipe" ||
394 MangledName == "commit_read_pipe" ||
395 MangledName == "work_group_reserve_write_pipe" ||
396 MangledName == "work_group_reserve_read_pipe" ||
397 MangledName == "work_group_commit_write_pipe" ||
398 MangledName == "work_group_commit_read_pipe" ||
399 MangledName == "get_pipe_num_packets_ro" ||
400 MangledName == "get_pipe_max_packets_ro" ||
401 MangledName == "get_pipe_num_packets_wo" ||
402 MangledName == "get_pipe_max_packets_wo" ||
403 MangledName == "sub_group_reserve_write_pipe" ||
404 MangledName == "sub_group_reserve_read_pipe" ||
405 MangledName == "sub_group_commit_write_pipe" ||
406 MangledName == "sub_group_commit_read_pipe" ||
407 MangledName == "to_global" || MangledName == "to_local" ||
408 MangledName == "to_private";
409}
410
411static bool isEnqueueKernelBI(const StringRef MangledName) {
412 return MangledName == "__enqueue_kernel_basic" ||
413 MangledName == "__enqueue_kernel_basic_events" ||
414 MangledName == "__enqueue_kernel_varargs" ||
415 MangledName == "__enqueue_kernel_events_varargs";
416}
417
418static bool isKernelQueryBI(const StringRef MangledName) {
419 return MangledName == "__get_kernel_work_group_size_impl" ||
420 MangledName == "__get_kernel_sub_group_count_for_ndrange_impl" ||
421 MangledName == "__get_kernel_max_sub_group_size_for_ndrange_impl" ||
422 MangledName == "__get_kernel_preferred_work_group_size_multiple_impl";
423}
424
426 if (!Name.starts_with("__"))
427 return false;
428
430 isPipeOrAddressSpaceCastBI(Name.drop_front(2)) ||
431 Name == "__translate_sampler_initializer";
432}
433
435 bool IsNonMangledOCL = isNonMangledOCLBuiltin(Name);
436 bool IsNonMangledSPIRV = Name.starts_with("__spirv_");
437 bool IsNonMangledHLSL = Name.starts_with("__hlsl_");
438 bool IsMangled = Name.starts_with("_Z");
439
440 // Otherwise use simple demangling to return the function name.
441 if (IsNonMangledOCL || IsNonMangledSPIRV || IsNonMangledHLSL || !IsMangled)
442 return Name.str();
443
444 // Try to use the itanium demangler.
445 if (char *DemangledName = itaniumDemangle(Name.data())) {
446 std::string Result = DemangledName;
447 free(DemangledName);
448 return Result;
449 }
450
451 // Autocheck C++, maybe need to do explicit check of the source language.
452 // OpenCL C++ built-ins are declared in cl namespace.
453 // TODO: consider using 'St' abbriviation for cl namespace mangling.
454 // Similar to ::std:: in C++.
455 size_t Start, Len = 0;
456 size_t DemangledNameLenStart = 2;
457 if (Name.starts_with("_ZN")) {
458 // Skip CV and ref qualifiers.
459 size_t NameSpaceStart = Name.find_first_not_of("rVKRO", 3);
460 // All built-ins are in the ::cl:: namespace.
461 if (Name.substr(NameSpaceStart, 11) != "2cl7__spirv")
462 return std::string();
463 DemangledNameLenStart = NameSpaceStart + 11;
464 }
465 Start = Name.find_first_not_of("0123456789", DemangledNameLenStart);
466 [[maybe_unused]] bool Error =
467 Name.substr(DemangledNameLenStart, Start - DemangledNameLenStart)
468 .getAsInteger(10, Len);
469 assert(!Error && "Failed to parse demangled name length");
470 return Name.substr(Start, Len).str();
471}
472
474 if (Name.starts_with("opencl.") || Name.starts_with("ocl_") ||
475 Name.starts_with("spirv."))
476 return true;
477 return false;
478}
479
480bool isSpecialOpaqueType(const Type *Ty) {
481 if (const TargetExtType *ExtTy = dyn_cast<TargetExtType>(Ty))
482 return isTypedPointerWrapper(ExtTy)
483 ? false
484 : hasBuiltinTypePrefix(ExtTy->getName());
485
486 return false;
487}
488
489bool isEntryPoint(const Function &F) {
490 // OpenCL handling: any function with the SPIR_KERNEL
491 // calling convention will be a potential entry point.
492 if (F.getCallingConv() == CallingConv::SPIR_KERNEL)
493 return true;
494
495 // HLSL handling: special attribute are emitted from the
496 // front-end.
497 if (F.getFnAttribute("hlsl.shader").isValid())
498 return true;
499
500 return false;
501}
502
504 TypeName.consume_front("atomic_");
505 if (TypeName.consume_front("void"))
506 return Type::getVoidTy(Ctx);
507 else if (TypeName.consume_front("bool") || TypeName.consume_front("_Bool"))
508 return Type::getIntNTy(Ctx, 1);
509 else if (TypeName.consume_front("char") ||
510 TypeName.consume_front("signed char") ||
511 TypeName.consume_front("unsigned char") ||
512 TypeName.consume_front("uchar"))
513 return Type::getInt8Ty(Ctx);
514 else if (TypeName.consume_front("short") ||
515 TypeName.consume_front("signed short") ||
516 TypeName.consume_front("unsigned short") ||
517 TypeName.consume_front("ushort"))
518 return Type::getInt16Ty(Ctx);
519 else if (TypeName.consume_front("int") ||
520 TypeName.consume_front("signed int") ||
521 TypeName.consume_front("unsigned int") ||
522 TypeName.consume_front("uint"))
523 return Type::getInt32Ty(Ctx);
524 else if (TypeName.consume_front("long") ||
525 TypeName.consume_front("signed long") ||
526 TypeName.consume_front("unsigned long") ||
527 TypeName.consume_front("ulong"))
528 return Type::getInt64Ty(Ctx);
529 else if (TypeName.consume_front("half") ||
530 TypeName.consume_front("_Float16") ||
531 TypeName.consume_front("__fp16"))
532 return Type::getHalfTy(Ctx);
533 else if (TypeName.consume_front("float"))
534 return Type::getFloatTy(Ctx);
535 else if (TypeName.consume_front("double"))
536 return Type::getDoubleTy(Ctx);
537
538 // Unable to recognize SPIRV type name
539 return nullptr;
540}
541
542std::unordered_set<BasicBlock *>
543PartialOrderingVisitor::getReachableFrom(BasicBlock *Start) {
544 std::queue<BasicBlock *> ToVisit;
545 ToVisit.push(Start);
546
547 std::unordered_set<BasicBlock *> Output;
548 while (ToVisit.size() != 0) {
549 BasicBlock *BB = ToVisit.front();
550 ToVisit.pop();
551
552 if (Output.count(BB) != 0)
553 continue;
554 Output.insert(BB);
555
556 for (BasicBlock *Successor : successors(BB)) {
557 if (DT.dominates(Successor, BB))
558 continue;
559 ToVisit.push(Successor);
560 }
561 }
562
563 return Output;
564}
565
566bool PartialOrderingVisitor::CanBeVisited(BasicBlock *BB) const {
567 for (BasicBlock *P : predecessors(BB)) {
568 // Ignore back-edges.
569 if (DT.dominates(BB, P))
570 continue;
571
572 // One of the predecessor hasn't been visited. Not ready yet.
573 if (BlockToOrder.count(P) == 0)
574 return false;
575
576 // If the block is a loop exit, the loop must be finished before
577 // we can continue.
578 Loop *L = LI.getLoopFor(P);
579 if (L == nullptr || L->contains(BB))
580 continue;
581
582 // SPIR-V requires a single back-edge. And the backend first
583 // step transforms loops into the simplified format. If we have
584 // more than 1 back-edge, something is wrong.
585 assert(L->getNumBackEdges() <= 1);
586
587 // If the loop has no latch, loop's rank won't matter, so we can
588 // proceed.
589 BasicBlock *Latch = L->getLoopLatch();
590 assert(Latch);
591 if (Latch == nullptr)
592 continue;
593
594 // The latch is not ready yet, let's wait.
595 if (BlockToOrder.count(Latch) == 0)
596 return false;
597 }
598
599 return true;
600}
601
603 auto It = BlockToOrder.find(BB);
604 if (It != BlockToOrder.end())
605 return It->second.Rank;
606
607 size_t result = 0;
608 for (BasicBlock *P : predecessors(BB)) {
609 // Ignore back-edges.
610 if (DT.dominates(BB, P))
611 continue;
612
613 auto Iterator = BlockToOrder.end();
614 Loop *L = LI.getLoopFor(P);
615 BasicBlock *Latch = L ? L->getLoopLatch() : nullptr;
616
617 // If the predecessor is either outside a loop, or part of
618 // the same loop, simply take its rank + 1.
619 if (L == nullptr || L->contains(BB) || Latch == nullptr) {
620 Iterator = BlockToOrder.find(P);
621 } else {
622 // Otherwise, take the loop's rank (highest rank in the loop) as base.
623 // Since loops have a single latch, highest rank is easy to find.
624 // If the loop has no latch, then it doesn't matter.
625 Iterator = BlockToOrder.find(Latch);
626 }
627
628 assert(Iterator != BlockToOrder.end());
629 result = std::max(result, Iterator->second.Rank + 1);
630 }
631
632 return result;
633}
634
635size_t PartialOrderingVisitor::visit(BasicBlock *BB, size_t Unused) {
636 ToVisit.push(BB);
637 Queued.insert(BB);
638
639 size_t QueueIndex = 0;
640 while (ToVisit.size() != 0) {
641 BasicBlock *BB = ToVisit.front();
642 ToVisit.pop();
643
644 if (!CanBeVisited(BB)) {
645 ToVisit.push(BB);
646 if (QueueIndex >= ToVisit.size())
648 "No valid candidate in the queue. Is the graph reducible?");
649 QueueIndex++;
650 continue;
651 }
652
653 QueueIndex = 0;
654 size_t Rank = GetNodeRank(BB);
655 OrderInfo Info = {Rank, BlockToOrder.size()};
656 BlockToOrder.emplace(BB, Info);
657
658 for (BasicBlock *S : successors(BB)) {
659 if (Queued.count(S) != 0)
660 continue;
661 ToVisit.push(S);
662 Queued.insert(S);
663 }
664 }
665
666 return 0;
667}
668
670 DT.recalculate(F);
671 LI = LoopInfo(DT);
672
673 visit(&*F.begin(), 0);
674
675 Order.reserve(F.size());
676 for (auto &[BB, Info] : BlockToOrder)
677 Order.emplace_back(BB);
678
679 std::sort(Order.begin(), Order.end(), [&](const auto &LHS, const auto &RHS) {
680 return compare(LHS, RHS);
681 });
682}
683
685 const BasicBlock *RHS) const {
686 const OrderInfo &InfoLHS = BlockToOrder.at(const_cast<BasicBlock *>(LHS));
687 const OrderInfo &InfoRHS = BlockToOrder.at(const_cast<BasicBlock *>(RHS));
688 if (InfoLHS.Rank != InfoRHS.Rank)
689 return InfoLHS.Rank < InfoRHS.Rank;
690 return InfoLHS.TraversalIndex < InfoRHS.TraversalIndex;
691}
692
694 BasicBlock &Start, std::function<bool(BasicBlock *)> Op) {
695 std::unordered_set<BasicBlock *> Reachable = getReachableFrom(&Start);
696 assert(BlockToOrder.count(&Start) != 0);
697
698 // Skipping blocks with a rank inferior to |Start|'s rank.
699 auto It = Order.begin();
700 while (It != Order.end() && *It != &Start)
701 ++It;
702
703 // This is unexpected. Worst case |Start| is the last block,
704 // so It should point to the last block, not past-end.
705 assert(It != Order.end());
706
707 // By default, there is no rank limit. Setting it to the maximum value.
708 std::optional<size_t> EndRank = std::nullopt;
709 for (; It != Order.end(); ++It) {
710 if (EndRank.has_value() && BlockToOrder[*It].Rank > *EndRank)
711 break;
712
713 if (Reachable.count(*It) == 0) {
714 continue;
715 }
716
717 if (!Op(*It)) {
718 EndRank = BlockToOrder[*It].Rank;
719 }
720 }
721}
722
724 if (F.size() == 0)
725 return false;
726
727 bool Modified = false;
728 std::vector<BasicBlock *> Order;
729 Order.reserve(F.size());
730
732 llvm::append_range(Order, RPOT);
733
734 assert(&*F.begin() == Order[0]);
735 BasicBlock *LastBlock = &*F.begin();
736 for (BasicBlock *BB : Order) {
737 if (BB != LastBlock && &*LastBlock->getNextNode() != BB) {
738 Modified = true;
739 BB->moveAfter(LastBlock);
740 }
741 LastBlock = BB;
742 }
743
744 return Modified;
745}
746
748 MachineInstr *MaybeDef = MRI.getVRegDef(Reg);
749 if (MaybeDef && MaybeDef->getOpcode() == SPIRV::ASSIGN_TYPE)
750 MaybeDef = MRI.getVRegDef(MaybeDef->getOperand(1).getReg());
751 return MaybeDef;
752}
753
754bool getVacantFunctionName(Module &M, std::string &Name) {
755 // It's a bit of paranoia, but still we don't want to have even a chance that
756 // the loop will work for too long.
757 constexpr unsigned MaxIters = 1024;
758 for (unsigned I = 0; I < MaxIters; ++I) {
759 std::string OrdName = Name + Twine(I).str();
760 if (!M.getFunction(OrdName)) {
761 Name = std::move(OrdName);
762 return true;
763 }
764 }
765 return false;
766}
767
768// Assign SPIR-V type to the register. If the register has no valid assigned
769// class, set register LLT type and class according to the SPIR-V type.
772 bool Force) {
773 GR->assignSPIRVTypeToVReg(SpvType, Reg, MF);
774 if (!MRI->getRegClassOrNull(Reg) || Force) {
775 MRI->setRegClass(Reg, GR->getRegClass(SpvType));
776 MRI->setType(Reg, GR->getRegType(SpvType));
777 }
778}
779
780// Create a SPIR-V type, assign SPIR-V type to the register. If the register has
781// no valid assigned class, set register LLT type and class according to the
782// SPIR-V type.
784 MachineIRBuilder &MIRBuilder,
785 SPIRV::AccessQualifier::AccessQualifier AccessQual,
786 bool EmitIR, bool Force) {
788 GR->getOrCreateSPIRVType(Ty, MIRBuilder, AccessQual, EmitIR),
789 GR, MIRBuilder.getMRI(), MIRBuilder.getMF(), Force);
790}
791
792// Create a virtual register and assign SPIR-V type to the register. Set
793// register LLT type and class according to the SPIR-V type.
796 const MachineFunction &MF) {
797 Register Reg = MRI->createVirtualRegister(GR->getRegClass(SpvType));
798 MRI->setType(Reg, GR->getRegType(SpvType));
799 GR->assignSPIRVTypeToVReg(SpvType, Reg, MF);
800 return Reg;
801}
802
803// Create a virtual register and assign SPIR-V type to the register. Set
804// register LLT type and class according to the SPIR-V type.
806 MachineIRBuilder &MIRBuilder) {
807 return createVirtualRegister(SpvType, GR, MIRBuilder.getMRI(),
808 MIRBuilder.getMF());
809}
810
811// Create a SPIR-V type, virtual register and assign SPIR-V type to the
812// register. Set register LLT type and class according to the SPIR-V type.
814 const Type *Ty, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIRBuilder,
815 SPIRV::AccessQualifier::AccessQualifier AccessQual, bool EmitIR) {
817 GR->getOrCreateSPIRVType(Ty, MIRBuilder, AccessQual, EmitIR), GR,
818 MIRBuilder);
819}
820
822 Value *Arg, Value *Arg2, ArrayRef<Constant *> Imms,
823 IRBuilder<> &B) {
825 Args.push_back(Arg2);
826 Args.push_back(buildMD(Arg));
827 llvm::append_range(Args, Imms);
828 return B.CreateIntrinsic(IntrID, {Types}, Args);
829}
830
831// Return true if there is an opaque pointer type nested in the argument.
832bool isNestedPointer(const Type *Ty) {
833 if (Ty->isPtrOrPtrVectorTy())
834 return true;
835 if (const FunctionType *RefTy = dyn_cast<FunctionType>(Ty)) {
836 if (isNestedPointer(RefTy->getReturnType()))
837 return true;
838 for (const Type *ArgTy : RefTy->params())
839 if (isNestedPointer(ArgTy))
840 return true;
841 return false;
842 }
843 if (const ArrayType *RefTy = dyn_cast<ArrayType>(Ty))
844 return isNestedPointer(RefTy->getElementType());
845 return false;
846}
847
848bool isSpvIntrinsic(const Value *Arg) {
849 if (const auto *II = dyn_cast<IntrinsicInst>(Arg))
850 if (Function *F = II->getCalledFunction())
851 if (F->getName().starts_with("llvm.spv."))
852 return true;
853 return false;
854}
855
856// Function to create continued instructions for SPV_INTEL_long_composites
857// extension
858SmallVector<MachineInstr *, 4>
860 unsigned MinWC, unsigned ContinuedOpcode,
861 ArrayRef<Register> Args, Register ReturnRegister,
863
865 constexpr unsigned MaxWordCount = UINT16_MAX;
866 const size_t NumElements = Args.size();
867 size_t MaxNumElements = MaxWordCount - MinWC;
868 size_t SPIRVStructNumElements = NumElements;
869
870 if (NumElements > MaxNumElements) {
871 // Do adjustments for continued instructions which always had only one
872 // minumum word count.
873 SPIRVStructNumElements = MaxNumElements;
874 MaxNumElements = MaxWordCount - 1;
875 }
876
877 auto MIB =
878 MIRBuilder.buildInstr(Opcode).addDef(ReturnRegister).addUse(TypeID);
879
880 for (size_t I = 0; I < SPIRVStructNumElements; ++I)
881 MIB.addUse(Args[I]);
882
883 Instructions.push_back(MIB.getInstr());
884
885 for (size_t I = SPIRVStructNumElements; I < NumElements;
886 I += MaxNumElements) {
887 auto MIB = MIRBuilder.buildInstr(ContinuedOpcode);
888 for (size_t J = I; J < std::min(I + MaxNumElements, NumElements); ++J)
889 MIB.addUse(Args[J]);
890 Instructions.push_back(MIB.getInstr());
891 }
892 return Instructions;
893}
894
896 unsigned LC = SPIRV::LoopControl::None;
897 // Currently used only to store PartialCount value. Later when other
898 // LoopControls are added - this map should be sorted before making
899 // them loop_merge operands to satisfy 3.23. Loop Control requirements.
900 std::vector<std::pair<unsigned, unsigned>> MaskToValueMap;
901 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.disable")) {
902 LC |= SPIRV::LoopControl::DontUnroll;
903 } else {
904 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.enable") ||
905 getBooleanLoopAttribute(L, "llvm.loop.unroll.full")) {
906 LC |= SPIRV::LoopControl::Unroll;
907 }
908 std::optional<int> Count =
909 getOptionalIntLoopAttribute(L, "llvm.loop.unroll.count");
910 if (Count && Count != 1) {
911 LC |= SPIRV::LoopControl::PartialCount;
912 MaskToValueMap.emplace_back(
913 std::make_pair(SPIRV::LoopControl::PartialCount, *Count));
914 }
915 }
916 SmallVector<unsigned, 1> Result = {LC};
917 for (auto &[Mask, Val] : MaskToValueMap)
918 Result.push_back(Val);
919 return Result;
920}
921
922const std::set<unsigned> &getTypeFoldingSupportedOpcodes() {
923 // clang-format off
924 static const std::set<unsigned> TypeFoldingSupportingOpcs = {
925 TargetOpcode::G_ADD,
926 TargetOpcode::G_FADD,
927 TargetOpcode::G_STRICT_FADD,
928 TargetOpcode::G_SUB,
929 TargetOpcode::G_FSUB,
930 TargetOpcode::G_STRICT_FSUB,
931 TargetOpcode::G_MUL,
932 TargetOpcode::G_FMUL,
933 TargetOpcode::G_STRICT_FMUL,
934 TargetOpcode::G_SDIV,
935 TargetOpcode::G_UDIV,
936 TargetOpcode::G_FDIV,
937 TargetOpcode::G_STRICT_FDIV,
938 TargetOpcode::G_SREM,
939 TargetOpcode::G_UREM,
940 TargetOpcode::G_FREM,
941 TargetOpcode::G_STRICT_FREM,
942 TargetOpcode::G_FNEG,
943 TargetOpcode::G_CONSTANT,
944 TargetOpcode::G_FCONSTANT,
945 TargetOpcode::G_AND,
946 TargetOpcode::G_OR,
947 TargetOpcode::G_XOR,
948 TargetOpcode::G_SHL,
949 TargetOpcode::G_ASHR,
950 TargetOpcode::G_LSHR,
951 TargetOpcode::G_SELECT,
952 TargetOpcode::G_EXTRACT_VECTOR_ELT,
953 };
954 // clang-format on
955 return TypeFoldingSupportingOpcs;
956}
957
958bool isTypeFoldingSupported(unsigned Opcode) {
959 return getTypeFoldingSupportedOpcodes().count(Opcode) > 0;
960}
961
962// Traversing [g]MIR accounting for pseudo-instructions.
964 return (Def->getOpcode() == SPIRV::ASSIGN_TYPE ||
965 Def->getOpcode() == TargetOpcode::COPY)
966 ? MRI->getVRegDef(Def->getOperand(1).getReg())
967 : Def;
968}
969
971 if (MachineInstr *Def = MRI->getVRegDef(MO.getReg()))
972 return passCopy(Def, MRI);
973 return nullptr;
974}
975
977 if (MachineInstr *Def = getDef(MO, MRI)) {
978 if (Def->getOpcode() == TargetOpcode::G_CONSTANT ||
979 Def->getOpcode() == SPIRV::OpConstantI)
980 return Def;
981 }
982 return nullptr;
983}
984
985int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI) {
986 if (MachineInstr *Def = getImm(MO, MRI)) {
987 if (Def->getOpcode() == SPIRV::OpConstantI)
988 return Def->getOperand(2).getImm();
989 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
990 return Def->getOperand(1).getCImm()->getZExtValue();
991 }
992 llvm_unreachable("Unexpected integer constant pattern");
993}
994
996 const MachineInstr *ResType) {
997 return foldImm(ResType->getOperand(2), MRI);
998}
999
1002 // Find the position to insert the OpVariable instruction.
1003 // We will insert it after the last OpFunctionParameter, if any, or
1004 // after OpFunction otherwise.
1005 MachineBasicBlock::iterator VarPos = BB.begin();
1006 while (VarPos != BB.end() && VarPos->getOpcode() != SPIRV::OpFunction) {
1007 ++VarPos;
1008 }
1009 // Advance VarPos to the next instruction after OpFunction, it will either
1010 // be an OpFunctionParameter, so that we can start the next loop, or the
1011 // position to insert the OpVariable instruction.
1012 ++VarPos;
1013 while (VarPos != BB.end() &&
1014 VarPos->getOpcode() == SPIRV::OpFunctionParameter) {
1015 ++VarPos;
1016 }
1017 // VarPos is now pointing at after the last OpFunctionParameter, if any,
1018 // or after OpFunction, if no parameters.
1019 return VarPos != BB.end() && VarPos->getOpcode() == SPIRV::OpLabel ? ++VarPos
1020 : VarPos;
1021}
1022
1023} // namespace llvm
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
std::string Name
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineIRBuilder class.
Register Reg
Type::TypeID TypeID
uint64_t IntrinsicInst * II
#define P(N)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Class to represent array types.
Definition: DerivedTypes.h:398
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
const Instruction & front() const
Definition: BasicBlock.h:482
LLVM_ABI void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
Definition: BasicBlock.cpp:243
This class represents a function call, abstracting a target machine's calling convention.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition: Constants.h:702
StringRef getAsCString() const
If this array is isCString(), then this method returns the array (without the trailing null byte) as ...
Definition: Constants.h:675
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:163
This class represents an Operation in the Expression.
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Lightweight error class with error context and mandatory checking.
Definition: Error.h:159
Class to represent function types.
Definition: DerivedTypes.h:105
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2780
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
LLVM_ABI SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:40
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:188
void addOperand(const MCOperand Op)
Definition: MCInst.h:215
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:145
Metadata node.
Definition: Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1445
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1451
A single uniqued string.
Definition: Metadata.h:720
LLVM_ABI iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
MachineInstrBundleIterator< MachineInstr > iterator
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:72
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:587
void setAsmPrinterFlag(uint8_t Flag)
Set a flag for the AsmPrinter.
Definition: MachineInstr.h:390
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:595
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
size_t GetNodeRank(BasicBlock *BB) const
Definition: SPIRVUtils.cpp:602
void partialOrderVisit(BasicBlock &Start, std::function< bool(BasicBlock *)> Op)
Definition: SPIRVUtils.cpp:693
bool compare(const BasicBlock *LHS, const BasicBlock *RHS) const
Definition: SPIRVUtils.cpp:684
PartialOrderingVisitor(Function &F)
Definition: SPIRVUtils.cpp:669
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
LLT getRegType(SPIRVType *SpvType) const
bool canUseExtension(SPIRV::Extension::Extension E) const
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:233
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:151
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Definition: DerivedTypes.h:781
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:270
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
Value * getOperand(unsigned i) const
Definition: User.h:232
LLVM Value Representation.
Definition: Value.h:75
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:359
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:144
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:81
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:58
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:113
bool getVacantFunctionName(Module &M, std::string &Name)
Definition: SPIRVUtils.cpp:754
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
Definition: SPIRVUtils.cpp:79
LLVM_ABI bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name)
Returns true if Name is applied to TheLoop and enabled.
Definition: LoopInfo.cpp:1121
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
Definition: SPIRVUtils.h:330
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static void finishBuildOpDecorate(MachineInstrBuilder &MIB, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:131
bool isTypeFoldingSupported(unsigned Opcode)
Definition: SPIRVUtils.cpp:958
static uint32_t convertCharsToWord(const StringRef &Str, unsigned i)
Definition: SPIRVUtils.cpp:36
MachineInstr * getDef(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:970
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:93
auto successors(const MachineBasicBlock *BB)
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
Definition: SPIRVUtils.cpp:821
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2155
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
Definition: SPIRVUtils.cpp:995
bool sortBlocks(Function &F)
Definition: SPIRVUtils.cpp:723
SmallVector< unsigned, 1 > getSpirvLoopControlOperandsFromLoopMetadata(Loop *L)
Definition: SPIRVUtils.cpp:895
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:368
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
Definition: SPIRVUtils.cpp:859
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:283
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
bool isNestedPointer(const Type *Ty)
Definition: SPIRVUtils.cpp:832
MetadataAsValue * buildMD(Value *Arg)
Definition: SPIRVUtils.h:440
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
Definition: SPIRVUtils.cpp:434
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:140
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Definition: SPIRVUtils.cpp:211
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
Definition: SPIRVUtils.cpp:794
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:976
std::string getSPIRVStringOperand(const InstType &MI, unsigned StartIndex)
void buildOpMemberDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, uint32_t Member, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:159
Type * toTypedPointer(Type *Ty)
Definition: SPIRVUtils.h:385
DEMANGLE_ABI char * itaniumDemangle(std::string_view mangled_name, bool ParseParams=true)
Returns a non-NULL pointer to a NUL-terminated C style string that should be explicitly freed,...
bool isSpecialOpaqueType(const Type *Ty)
Definition: SPIRVUtils.cpp:480
void setRegClassType(Register Reg, SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF, bool Force)
Definition: SPIRVUtils.cpp:770
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
MachineBasicBlock::iterator getInsertPtValidEnd(MachineBasicBlock *MBB)
Definition: SPIRVUtils.cpp:231
static bool isNonMangledOCLBuiltin(StringRef Name)
Definition: SPIRVUtils.cpp:425
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:963
bool isEntryPoint(const Function &F)
Definition: SPIRVUtils.cpp:489
const std::set< unsigned > & getTypeFoldingSupportedOpcodes()
Definition: SPIRVUtils.cpp:922
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
Definition: SPIRVUtils.cpp:245
LLVM_ABI std::optional< int > getOptionalIntLoopAttribute(const Loop *TheLoop, StringRef Name)
Find named metadata for a loop with an integer value.
Definition: LoopInfo.cpp:1125
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
Definition: SPIRVUtils.cpp:319
static bool isPipeOrAddressSpaceCastBI(const StringRef MangledName)
Definition: SPIRVUtils.cpp:387
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
Definition: SPIRVUtils.cpp:83
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:985
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
Definition: SPIRVUtils.cpp:503
DWARFExpression::Operation Op
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:345
bool hasBuiltinTypePrefix(StringRef Name)
Definition: SPIRVUtils.cpp:473
Type * getMDOperandAsType(const MDNode *N, unsigned I)
Definition: SPIRVUtils.cpp:380
auto predecessors(const MachineBasicBlock *BB)
static size_t getPaddedLen(const StringRef &Str)
Definition: SPIRVUtils.cpp:50
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:374
void addStringImm(const StringRef &Str, MCInst &Inst)
Definition: SPIRVUtils.cpp:54
static bool isKernelQueryBI(const StringRef MangledName)
Definition: SPIRVUtils.cpp:418
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
Definition: SPIRVUtils.cpp:747
void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder, const MDNode *GVarMD)
Definition: SPIRVUtils.cpp:183
static bool isEnqueueKernelBI(const StringRef MangledName)
Definition: SPIRVUtils.cpp:411
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
Definition: SPIRVUtils.cpp:301
#define N