LLVM 21.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
44 const std::string BuiltinName;
46
50
57
58 bool isSpirvOp() const { return BuiltinName.rfind("__spirv_", 0) == 0; }
59};
60
63 InstructionSet::InstructionSet Set;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
74 bool IsElect;
84};
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
92 bool IsBlock;
93 bool IsWrite;
94 bool IsMedia;
95};
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
103};
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
111};
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
128 InstructionSet::InstructionSet Set;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
139};
140
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
143
146 InstructionSet::InstructionSet Set;
151 FPRoundingMode::FPRoundingMode RoundingMode;
152};
153
156 InstructionSet::InstructionSet Set;
160 FPRoundingMode::FPRoundingMode RoundingMode;
161};
162
163using namespace FPRoundingMode;
164#define GET_ConvertBuiltins_DECL
165#define GET_ConvertBuiltins_IMPL
166
167using namespace InstructionSet;
168#define GET_VectorLoadStoreBuiltins_DECL
169#define GET_VectorLoadStoreBuiltins_IMPL
170
171#define GET_CLMemoryScope_DECL
172#define GET_CLSamplerAddressingMode_DECL
173#define GET_CLMemoryFenceFlags_DECL
174#define GET_ExtendedBuiltins_DECL
175#include "SPIRVGenTables.inc"
176} // namespace SPIRV
177
178//===----------------------------------------------------------------------===//
179// Misc functions for looking up builtins and veryfying requirements using
180// TableGen records
181//===----------------------------------------------------------------------===//
182
183namespace SPIRV {
184/// Parses the name part of the demangled builtin call.
185std::string lookupBuiltinNameHelper(StringRef DemangledCall,
186 FPDecorationId *DecorationId) {
187 const static std::string PassPrefix = "(anonymous namespace)::";
188 std::string BuiltinName;
189 // Itanium Demangler result may have "(anonymous namespace)::" prefix
190 if (DemangledCall.starts_with(PassPrefix.c_str()))
191 BuiltinName = DemangledCall.substr(PassPrefix.length());
192 else
193 BuiltinName = DemangledCall;
194 // Extract the builtin function name and types of arguments from the call
195 // skeleton.
196 BuiltinName = BuiltinName.substr(0, BuiltinName.find('('));
197
198 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
199 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
200 BuiltinName = BuiltinName.substr(12);
201
202 // Check if the extracted name contains type information between angle
203 // brackets. If so, the builtin is an instantiated template - needs to have
204 // the information after angle brackets and return type removed.
205 std::size_t Pos1 = BuiltinName.rfind('<');
206 if (Pos1 != std::string::npos && BuiltinName.back() == '>') {
207 std::size_t Pos2 = BuiltinName.rfind(' ', Pos1);
208 if (Pos2 == std::string::npos)
209 Pos2 = 0;
210 else
211 ++Pos2;
212 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
213 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
214 }
215
216 // Check if the extracted name begins with:
217 // - "__spirv_ImageSampleExplicitLod"
218 // - "__spirv_ImageRead"
219 // - "__spirv_ImageQuerySizeLod"
220 // - "__spirv_UDotKHR"
221 // - "__spirv_SDotKHR"
222 // - "__spirv_SUDotKHR"
223 // - "__spirv_SDotAccSatKHR"
224 // - "__spirv_UDotAccSatKHR"
225 // - "__spirv_SUDotAccSatKHR"
226 // - "__spirv_ReadClockKHR"
227 // - "__spirv_SubgroupBlockReadINTEL"
228 // - "__spirv_SubgroupImageBlockReadINTEL"
229 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
230 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
231 // - "__spirv_Convert"
232 // - "__spirv_UConvert"
233 // - "__spirv_SConvert"
234 // - "__spirv_FConvert"
235 // - "__spirv_SatConvert"
236 // and contains return type information at the end "_R<type>".
237 // If so, extract the plain builtin name without the type information.
238 static const std::regex SpvWithR(
239 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageQuerySizeLod|UDotKHR|"
240 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
241 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
242 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
243 "Convert|"
244 "UConvert|SConvert|FConvert|SatConvert).*)_R[^_]*_?(\\w+)?.*");
245 std::smatch Match;
246 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
247 std::ssub_match SubMatch;
248 if (DecorationId && Match.size() > 3) {
249 SubMatch = Match[3];
250 *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
251 }
252 SubMatch = Match[1];
253 BuiltinName = SubMatch.str();
254 }
255
256 return BuiltinName;
257}
258} // namespace SPIRV
259
260/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
261/// the provided \p DemangledCall and specified \p Set.
262///
263/// The lookup follows the following algorithm, returning the first successful
264/// match:
265/// 1. Search with the plain demangled name (expecting a 1:1 match).
266/// 2. Search with the prefix before or suffix after the demangled name
267/// signyfying the type of the first argument.
268///
269/// \returns Wrapper around the demangled call and found builtin definition.
270static std::unique_ptr<const SPIRV::IncomingCall>
272 SPIRV::InstructionSet::InstructionSet Set,
273 Register ReturnRegister, const SPIRVType *ReturnType,
275 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
276
277 SmallVector<StringRef, 10> BuiltinArgumentTypes;
278 StringRef BuiltinArgs =
279 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
280 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
281
282 // Look up the builtin in the defined set. Start with the plain demangled
283 // name, expecting a 1:1 match in the defined builtin set.
284 const SPIRV::DemangledBuiltin *Builtin;
285 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
286 return std::make_unique<SPIRV::IncomingCall>(
287 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
288
289 // If the initial look up was unsuccessful and the demangled call takes at
290 // least 1 argument, add a prefix or suffix signifying the type of the first
291 // argument and repeat the search.
292 if (BuiltinArgumentTypes.size() >= 1) {
293 char FirstArgumentType = BuiltinArgumentTypes[0][0];
294 // Prefix to be added to the builtin's name for lookup.
295 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
296 std::string Prefix;
297
298 switch (FirstArgumentType) {
299 // Unsigned:
300 case 'u':
301 if (Set == SPIRV::InstructionSet::OpenCL_std)
302 Prefix = "u_";
303 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
304 Prefix = "u";
305 break;
306 // Signed:
307 case 'c':
308 case 's':
309 case 'i':
310 case 'l':
311 if (Set == SPIRV::InstructionSet::OpenCL_std)
312 Prefix = "s_";
313 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
314 Prefix = "s";
315 break;
316 // Floating-point:
317 case 'f':
318 case 'd':
319 case 'h':
320 if (Set == SPIRV::InstructionSet::OpenCL_std ||
321 Set == SPIRV::InstructionSet::GLSL_std_450)
322 Prefix = "f";
323 break;
324 }
325
326 // If argument-type name prefix was added, look up the builtin again.
327 if (!Prefix.empty() &&
328 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
329 return std::make_unique<SPIRV::IncomingCall>(
330 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
331
332 // If lookup with a prefix failed, find a suffix to be added to the
333 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
334 // an unsigned value has a suffix "u".
335 std::string Suffix;
336
337 switch (FirstArgumentType) {
338 // Unsigned:
339 case 'u':
340 Suffix = "u";
341 break;
342 // Signed:
343 case 'c':
344 case 's':
345 case 'i':
346 case 'l':
347 Suffix = "s";
348 break;
349 // Floating-point:
350 case 'f':
351 case 'd':
352 case 'h':
353 Suffix = "f";
354 break;
355 }
356
357 // If argument-type name suffix was added, look up the builtin again.
358 if (!Suffix.empty() &&
359 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
360 return std::make_unique<SPIRV::IncomingCall>(
361 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
362 }
363
364 // No builtin with such name was found in the set.
365 return nullptr;
366}
367
370 // We expect the following sequence of instructions:
371 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
372 // or = G_GLOBAL_VALUE @block_literal_global
373 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
374 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
375 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
376 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
377 MI->getOperand(1).isReg());
378 Register BitcastReg = MI->getOperand(1).getReg();
379 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
380 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
381 BitcastMI->getOperand(2).isReg());
382 Register ValueReg = BitcastMI->getOperand(2).getReg();
383 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
384 return ValueMI;
385}
386
387// Return an integer constant corresponding to the given register and
388// defined in spv_track_constant.
389// TODO: maybe unify with prelegalizer pass.
391 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
392 assert(isSpvIntrinsic(*DefMI, Intrinsic::spv_track_constant) &&
393 DefMI->getOperand(2).isReg());
394 MachineInstr *DefMI2 = MRI->getUniqueVRegDef(DefMI->getOperand(2).getReg());
395 assert(DefMI2->getOpcode() == TargetOpcode::G_CONSTANT &&
396 DefMI2->getOperand(1).isCImm());
397 return DefMI2->getOperand(1).getCImm()->getValue().getZExtValue();
398}
399
400// Return type of the instruction result from spv_assign_type intrinsic.
401// TODO: maybe unify with prelegalizer pass.
403 MachineInstr *NextMI = MI->getNextNode();
404 if (!NextMI)
405 return nullptr;
406 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
407 if ((NextMI = NextMI->getNextNode()) == nullptr)
408 return nullptr;
409 Register ValueReg = MI->getOperand(0).getReg();
410 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
411 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
412 NextMI->getOperand(1).getReg() != ValueReg)
413 return nullptr;
414 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
415 assert(Ty && "Type is expected");
416 return Ty;
417}
418
419static const Type *getBlockStructType(Register ParamReg,
421 // In principle, this information should be passed to us from Clang via
422 // an elementtype attribute. However, said attribute requires that
423 // the function call be an intrinsic, which is not. Instead, we rely on being
424 // able to trace this to the declaration of a variable: OpenCL C specification
425 // section 6.12.5 should guarantee that we can do this.
427 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
428 return MI->getOperand(1).getGlobal()->getType();
429 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
430 "Blocks in OpenCL C must be traceable to allocation site");
431 return getMachineInstrType(MI);
432}
433
434//===----------------------------------------------------------------------===//
435// Helper functions for building misc instructions
436//===----------------------------------------------------------------------===//
437
438/// Helper function building either a resulting scalar or vector bool register
439/// depending on the expected \p ResultType.
440///
441/// \returns Tuple of the resulting register and its type.
442static std::tuple<Register, SPIRVType *>
443buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
445 LLT Type;
446 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
447
448 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
449 unsigned VectorElements = ResultType->getOperand(2).getImm();
450 BoolType =
451 GR->getOrCreateSPIRVVectorType(BoolType, VectorElements, MIRBuilder);
453 cast<FixedVectorType>(GR->getTypeForSPIRVType(BoolType));
454 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
455 } else {
456 Type = LLT::scalar(1);
457 }
458
459 Register ResultRegister =
461 MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
462 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
463 return std::make_tuple(ResultRegister, BoolType);
464}
465
466/// Helper function for building either a vector or scalar select instruction
467/// depending on the expected \p ResultType.
468static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
469 Register ReturnRegister, Register SourceRegister,
470 const SPIRVType *ReturnType,
472 Register TrueConst, FalseConst;
473
474 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
475 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
477 TrueConst = GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType);
478 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType);
479 } else {
480 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType);
481 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType);
482 }
483
484 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
485 FalseConst);
486}
487
488/// Helper function for building a load instruction loading into the
489/// \p DestinationReg.
491 MachineIRBuilder &MIRBuilder,
492 SPIRVGlobalRegistry *GR, LLT LowLevelType,
493 Register DestinationReg = Register(0)) {
494 if (!DestinationReg.isValid())
495 DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
496 // TODO: consider using correct address space and alignment (p0 is canonical
497 // type for selection though).
499 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
500 return DestinationReg;
501}
502
503/// Helper function for building a load instruction for loading a builtin global
504/// variable of \p BuiltinValue value.
506 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
507 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
508 Register Reg = Register(0), bool isConst = true, bool hasLinkageTy = true) {
509 Register NewRegister =
510 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
511 MIRBuilder.getMRI()->setType(
512 NewRegister,
513 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
514 GR->getPointerSize()));
516 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
517 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
518
519 // Set up the global OpVariable with the necessary builtin decorations.
520 Register Variable = GR->buildGlobalVariable(
521 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
522 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst,
523 /* HasLinkageTy */ hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
524 false);
525
526 // Load the value from the global variable.
527 Register LoadedRegister =
528 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
529 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
530 return LoadedRegister;
531}
532
533/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
534/// and its definition, set the new register as a destination of the definition,
535/// assign SPIRVType to both registers. If SpirvTy is provided, use it as
536/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
537/// SPIRVPreLegalizer.cpp.
538extern Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
539 SPIRVGlobalRegistry *GR,
540 MachineIRBuilder &MIB,
541 MachineRegisterInfo &MRI);
542
543// TODO: Move to TableGen.
544static SPIRV::MemorySemantics::MemorySemantics
545getSPIRVMemSemantics(std::memory_order MemOrder) {
546 switch (MemOrder) {
547 case std::memory_order_relaxed:
548 return SPIRV::MemorySemantics::None;
549 case std::memory_order_acquire:
550 return SPIRV::MemorySemantics::Acquire;
551 case std::memory_order_release:
552 return SPIRV::MemorySemantics::Release;
553 case std::memory_order_acq_rel:
554 return SPIRV::MemorySemantics::AcquireRelease;
555 case std::memory_order_seq_cst:
556 return SPIRV::MemorySemantics::SequentiallyConsistent;
557 default:
558 report_fatal_error("Unknown CL memory scope");
559 }
560}
561
562static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
563 switch (ClScope) {
564 case SPIRV::CLMemoryScope::memory_scope_work_item:
565 return SPIRV::Scope::Invocation;
566 case SPIRV::CLMemoryScope::memory_scope_work_group:
567 return SPIRV::Scope::Workgroup;
568 case SPIRV::CLMemoryScope::memory_scope_device:
569 return SPIRV::Scope::Device;
570 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
571 return SPIRV::Scope::CrossDevice;
572 case SPIRV::CLMemoryScope::memory_scope_sub_group:
573 return SPIRV::Scope::Subgroup;
574 }
575 report_fatal_error("Unknown CL memory scope");
576}
577
579 MachineIRBuilder &MIRBuilder,
581 return GR->buildConstantInt(Val, MIRBuilder,
582 GR->getOrCreateSPIRVIntegerType(32, MIRBuilder));
583}
584
585static Register buildScopeReg(Register CLScopeRegister,
586 SPIRV::Scope::Scope Scope,
587 MachineIRBuilder &MIRBuilder,
590 if (CLScopeRegister.isValid()) {
591 auto CLScope =
592 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
593 Scope = getSPIRVScope(CLScope);
594
595 if (CLScope == static_cast<unsigned>(Scope)) {
596 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
597 return CLScopeRegister;
598 }
599 }
600 return buildConstantIntReg32(Scope, MIRBuilder, GR);
601}
602
605 if (MRI->getRegClassOrNull(Reg))
606 return;
607 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(Reg);
608 MRI->setRegClass(Reg,
609 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
610}
611
612static Register buildMemSemanticsReg(Register SemanticsRegister,
613 Register PtrRegister, unsigned &Semantics,
614 MachineIRBuilder &MIRBuilder,
616 if (SemanticsRegister.isValid()) {
617 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
618 std::memory_order Order =
619 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
620 Semantics =
621 getSPIRVMemSemantics(Order) |
623 if (static_cast<unsigned>(Order) == Semantics) {
624 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
625 return SemanticsRegister;
626 }
627 }
628 return buildConstantIntReg32(Semantics, MIRBuilder, GR);
629}
630
631static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
632 const SPIRV::IncomingCall *Call,
633 Register TypeReg,
634 ArrayRef<uint32_t> ImmArgs = {}) {
635 auto MIB = MIRBuilder.buildInstr(Opcode);
636 if (TypeReg.isValid())
637 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
638 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
639 for (unsigned i = 0; i < Sz; ++i)
640 MIB.addUse(Call->Arguments[i]);
641 for (uint32_t ImmArg : ImmArgs)
642 MIB.addImm(ImmArg);
643 return true;
644}
645
646/// Helper function for translating atomic init to OpStore.
648 MachineIRBuilder &MIRBuilder) {
649 if (Call->isSpirvOp())
650 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
651
652 assert(Call->Arguments.size() == 2 &&
653 "Need 2 arguments for atomic init translation");
654 MIRBuilder.buildInstr(SPIRV::OpStore)
655 .addUse(Call->Arguments[0])
656 .addUse(Call->Arguments[1]);
657 return true;
658}
659
660/// Helper function for building an atomic load instruction.
662 MachineIRBuilder &MIRBuilder,
664 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
665 if (Call->isSpirvOp())
666 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
667
668 Register PtrRegister = Call->Arguments[0];
669 // TODO: if true insert call to __translate_ocl_memory_sccope before
670 // OpAtomicLoad and the function implementation. We can use Translator's
671 // output for transcoding/atomic_explicit_arguments.cl as an example.
672 Register ScopeRegister =
673 Call->Arguments.size() > 1
674 ? Call->Arguments[1]
675 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
676 Register MemSemanticsReg;
677 if (Call->Arguments.size() > 2) {
678 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
679 MemSemanticsReg = Call->Arguments[2];
680 } else {
681 int Semantics =
682 SPIRV::MemorySemantics::SequentiallyConsistent |
684 MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
685 }
686
687 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
688 .addDef(Call->ReturnRegister)
689 .addUse(TypeReg)
690 .addUse(PtrRegister)
691 .addUse(ScopeRegister)
692 .addUse(MemSemanticsReg);
693 return true;
694}
695
696/// Helper function for building an atomic store instruction.
698 MachineIRBuilder &MIRBuilder,
700 if (Call->isSpirvOp())
701 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call, Register(0));
702
703 Register ScopeRegister =
704 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
705 Register PtrRegister = Call->Arguments[0];
706 int Semantics =
707 SPIRV::MemorySemantics::SequentiallyConsistent |
709 Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
710 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
711 .addUse(PtrRegister)
712 .addUse(ScopeRegister)
713 .addUse(MemSemanticsReg)
714 .addUse(Call->Arguments[1]);
715 return true;
716}
717
718/// Helper function for building an atomic compare-exchange instruction.
720 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
721 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
722 if (Call->isSpirvOp())
723 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
724 GR->getSPIRVTypeID(Call->ReturnType));
725
726 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
727 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
728
729 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
730 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
731 Register Desired = Call->Arguments[2]; // Value (C Desired).
732 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
733 LLT DesiredLLT = MRI->getType(Desired);
734
735 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
736 SPIRV::OpTypePointer);
737 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
738 (void)ExpectedType;
739 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
740 : ExpectedType == SPIRV::OpTypePointer);
741 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
742
743 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
744 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
745 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
746 SpvObjectPtrTy->getOperand(1).getImm());
747 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
748
749 Register MemSemEqualReg;
750 Register MemSemUnequalReg;
751 uint64_t MemSemEqual =
752 IsCmpxchg
753 ? SPIRV::MemorySemantics::None
754 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
755 uint64_t MemSemUnequal =
756 IsCmpxchg
757 ? SPIRV::MemorySemantics::None
758 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
759 if (Call->Arguments.size() >= 4) {
760 assert(Call->Arguments.size() >= 5 &&
761 "Need 5+ args for explicit atomic cmpxchg");
762 auto MemOrdEq =
763 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
764 auto MemOrdNeq =
765 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
766 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
767 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
768 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
769 MemSemEqualReg = Call->Arguments[3];
770 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
771 MemSemUnequalReg = Call->Arguments[4];
772 }
773 if (!MemSemEqualReg.isValid())
774 MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
775 if (!MemSemUnequalReg.isValid())
776 MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
777
778 Register ScopeReg;
779 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
780 if (Call->Arguments.size() >= 6) {
781 assert(Call->Arguments.size() == 6 &&
782 "Extra args for explicit atomic cmpxchg");
783 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
784 getIConstVal(Call->Arguments[5], MRI));
785 Scope = getSPIRVScope(ClScope);
786 if (ClScope == static_cast<unsigned>(Scope))
787 ScopeReg = Call->Arguments[5];
788 }
789 if (!ScopeReg.isValid())
790 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
791
792 Register Expected = IsCmpxchg
793 ? ExpectedArg
794 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
795 GR, LLT::scalar(64));
796 MRI->setType(Expected, DesiredLLT);
797 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
798 : Call->ReturnRegister;
799 if (!MRI->getRegClassOrNull(Tmp))
800 MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
801 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
802
803 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
804 MIRBuilder.buildInstr(Opcode)
805 .addDef(Tmp)
806 .addUse(GR->getSPIRVTypeID(IntTy))
807 .addUse(ObjectPtr)
808 .addUse(ScopeReg)
809 .addUse(MemSemEqualReg)
810 .addUse(MemSemUnequalReg)
811 .addUse(Desired)
813 if (!IsCmpxchg) {
814 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
815 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
816 }
817 return true;
818}
819
820/// Helper function for building atomic instructions.
821static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
822 MachineIRBuilder &MIRBuilder,
824 if (Call->isSpirvOp())
825 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
826 GR->getSPIRVTypeID(Call->ReturnType));
827
828 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
829 Register ScopeRegister =
830 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
831
832 assert(Call->Arguments.size() <= 4 &&
833 "Too many args for explicit atomic RMW");
834 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
835 MIRBuilder, GR, MRI);
836
837 Register PtrRegister = Call->Arguments[0];
838 unsigned Semantics = SPIRV::MemorySemantics::None;
839 Register MemSemanticsReg =
840 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
841 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
842 Semantics, MIRBuilder, GR);
843 Register ValueReg = Call->Arguments[1];
844 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
845 // support cl_ext_float_atomics
846 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
847 if (Opcode == SPIRV::OpAtomicIAdd) {
848 Opcode = SPIRV::OpAtomicFAddEXT;
849 } else if (Opcode == SPIRV::OpAtomicISub) {
850 // Translate OpAtomicISub applied to a floating type argument to
851 // OpAtomicFAddEXT with the negative value operand
852 Opcode = SPIRV::OpAtomicFAddEXT;
853 Register NegValueReg =
854 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
855 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
856 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
857 MIRBuilder.getMF());
858 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
859 .addDef(NegValueReg)
860 .addUse(ValueReg);
861 insertAssignInstr(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
862 MIRBuilder.getMF().getRegInfo());
863 ValueReg = NegValueReg;
864 }
865 }
866 MIRBuilder.buildInstr(Opcode)
867 .addDef(Call->ReturnRegister)
868 .addUse(ValueTypeReg)
869 .addUse(PtrRegister)
870 .addUse(ScopeRegister)
871 .addUse(MemSemanticsReg)
872 .addUse(ValueReg);
873 return true;
874}
875
876/// Helper function for building an atomic floating-type instruction.
878 unsigned Opcode,
879 MachineIRBuilder &MIRBuilder,
881 assert(Call->Arguments.size() == 4 &&
882 "Wrong number of atomic floating-type builtin");
883 Register PtrReg = Call->Arguments[0];
884 Register ScopeReg = Call->Arguments[1];
885 Register MemSemanticsReg = Call->Arguments[2];
886 Register ValueReg = Call->Arguments[3];
887 MIRBuilder.buildInstr(Opcode)
888 .addDef(Call->ReturnRegister)
889 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
890 .addUse(PtrReg)
891 .addUse(ScopeReg)
892 .addUse(MemSemanticsReg)
893 .addUse(ValueReg);
894 return true;
895}
896
897/// Helper function for building atomic flag instructions (e.g.
898/// OpAtomicFlagTestAndSet).
900 unsigned Opcode, MachineIRBuilder &MIRBuilder,
902 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
903 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
904 if (Call->isSpirvOp())
905 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
906 IsSet ? TypeReg : Register(0));
907
908 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
909 Register PtrRegister = Call->Arguments[0];
910 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
911 Register MemSemanticsReg =
912 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
913 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
914 Semantics, MIRBuilder, GR);
915
916 assert((Opcode != SPIRV::OpAtomicFlagClear ||
917 (Semantics != SPIRV::MemorySemantics::Acquire &&
918 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
919 "Invalid memory order argument!");
920
921 Register ScopeRegister =
922 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
923 ScopeRegister =
924 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
925
926 auto MIB = MIRBuilder.buildInstr(Opcode);
927 if (IsSet)
928 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
929
930 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
931 return true;
932}
933
934/// Helper function for building barriers, i.e., memory/control ordering
935/// operations.
936static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
937 MachineIRBuilder &MIRBuilder,
939 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
940 const auto *ST =
941 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
942 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
943 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
944 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
945 std::string DiagMsg = std::string(Builtin->Name) +
946 ": the builtin requires the following SPIR-V "
947 "extension: SPV_INTEL_split_barrier";
948 report_fatal_error(DiagMsg.c_str(), false);
949 }
950
951 if (Call->isSpirvOp())
952 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
953
954 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
955 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
956 unsigned MemSemantics = SPIRV::MemorySemantics::None;
957
958 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
959 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
960
961 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
962 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
963
964 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
965 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
966
967 if (Opcode == SPIRV::OpMemoryBarrier)
968 MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
969 getIConstVal(Call->Arguments[1], MRI))) |
970 MemSemantics;
971 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
972 MemSemantics |= SPIRV::MemorySemantics::Release;
973 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
974 MemSemantics |= SPIRV::MemorySemantics::Acquire;
975 else
976 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
977
978 Register MemSemanticsReg =
979 MemFlags == MemSemantics
980 ? Call->Arguments[0]
981 : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
982 Register ScopeReg;
983 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
984 SPIRV::Scope::Scope MemScope = Scope;
985 if (Call->Arguments.size() >= 2) {
986 assert(
987 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
988 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
989 "Extra args for explicitly scoped barrier");
990 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
991 : Call->Arguments[1];
992 SPIRV::CLMemoryScope CLScope =
993 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
994 MemScope = getSPIRVScope(CLScope);
995 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
996 (Opcode == SPIRV::OpMemoryBarrier))
997 Scope = MemScope;
998 if (CLScope == static_cast<unsigned>(Scope))
999 ScopeReg = Call->Arguments[1];
1000 }
1001
1002 if (!ScopeReg.isValid())
1003 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
1004
1005 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
1006 if (Opcode != SPIRV::OpMemoryBarrier)
1007 MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
1008 MIB.addUse(MemSemanticsReg);
1009 return true;
1010}
1011
1012/// Helper function for building extended bit operations.
1014 unsigned Opcode,
1015 MachineIRBuilder &MIRBuilder,
1016 SPIRVGlobalRegistry *GR) {
1017 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1018 const auto *ST =
1019 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1020 if ((Opcode == SPIRV::OpBitFieldInsert ||
1021 Opcode == SPIRV::OpBitFieldSExtract ||
1022 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1023 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1024 std::string DiagMsg = std::string(Builtin->Name) +
1025 ": the builtin requires the following SPIR-V "
1026 "extension: SPV_KHR_bit_instructions";
1027 report_fatal_error(DiagMsg.c_str(), false);
1028 }
1029
1030 // Generate SPIRV instruction accordingly.
1031 if (Call->isSpirvOp())
1032 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1033 GR->getSPIRVTypeID(Call->ReturnType));
1034
1035 auto MIB = MIRBuilder.buildInstr(Opcode)
1036 .addDef(Call->ReturnRegister)
1037 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1038 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1039 MIB.addUse(Call->Arguments[i]);
1040
1041 return true;
1042}
1043
1044static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1045 switch (dim) {
1046 case SPIRV::Dim::DIM_1D:
1047 case SPIRV::Dim::DIM_Buffer:
1048 return 1;
1049 case SPIRV::Dim::DIM_2D:
1050 case SPIRV::Dim::DIM_Cube:
1051 case SPIRV::Dim::DIM_Rect:
1052 return 2;
1053 case SPIRV::Dim::DIM_3D:
1054 return 3;
1055 default:
1056 report_fatal_error("Cannot get num components for given Dim");
1057 }
1058}
1059
1060/// Helper function for obtaining the number of size components.
1061static unsigned getNumSizeComponents(SPIRVType *imgType) {
1062 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1063 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1064 unsigned numComps = getNumComponentsForDim(dim);
1065 bool arrayed = imgType->getOperand(4).getImm() == 1;
1066 return arrayed ? numComps + 1 : numComps;
1067}
1068
1069//===----------------------------------------------------------------------===//
1070// Implementation functions for each builtin group
1071//===----------------------------------------------------------------------===//
1072
1073static bool generateExtInst(const SPIRV::IncomingCall *Call,
1074 MachineIRBuilder &MIRBuilder,
1075 SPIRVGlobalRegistry *GR) {
1076 // Lookup the extended instruction number in the TableGen records.
1077 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1079 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1080
1081 // Build extended instruction.
1082 auto MIB =
1083 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1084 .addDef(Call->ReturnRegister)
1085 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1086 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1087 .addImm(Number);
1088
1089 for (auto Argument : Call->Arguments)
1090 MIB.addUse(Argument);
1091 return true;
1092}
1093
1095 MachineIRBuilder &MIRBuilder,
1096 SPIRVGlobalRegistry *GR) {
1097 // Lookup the instruction opcode in the TableGen records.
1098 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1099 unsigned Opcode =
1100 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1101
1102 Register CompareRegister;
1103 SPIRVType *RelationType;
1104 std::tie(CompareRegister, RelationType) =
1105 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1106
1107 // Build relational instruction.
1108 auto MIB = MIRBuilder.buildInstr(Opcode)
1109 .addDef(CompareRegister)
1110 .addUse(GR->getSPIRVTypeID(RelationType));
1111
1112 for (auto Argument : Call->Arguments)
1113 MIB.addUse(Argument);
1114
1115 // Build select instruction.
1116 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1117 Call->ReturnType, GR);
1118}
1119
1121 MachineIRBuilder &MIRBuilder,
1122 SPIRVGlobalRegistry *GR) {
1123 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1124 const SPIRV::GroupBuiltin *GroupBuiltin =
1125 SPIRV::lookupGroupBuiltin(Builtin->Name);
1126
1127 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1128 if (Call->isSpirvOp()) {
1129 if (GroupBuiltin->NoGroupOperation)
1130 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1131 GR->getSPIRVTypeID(Call->ReturnType));
1132
1133 // Group Operation is a literal
1134 Register GroupOpReg = Call->Arguments[1];
1135 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1136 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1138 "Group Operation parameter must be an integer constant");
1139 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1140 Register ScopeReg = Call->Arguments[0];
1141 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1142 .addDef(Call->ReturnRegister)
1143 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1144 .addUse(ScopeReg)
1145 .addImm(GrpOp);
1146 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1147 MIB.addUse(Call->Arguments[i]);
1148 return true;
1149 }
1150
1151 Register Arg0;
1152 if (GroupBuiltin->HasBoolArg) {
1153 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
1154 Register BoolReg = Call->Arguments[0];
1155 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1156 if (!BoolRegType)
1157 report_fatal_error("Can't find a register's type definition");
1158 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1159 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1160 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1161 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1162 BoolType);
1163 } else {
1164 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1165 Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1166 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1167 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1168 MIRBuilder.buildICmp(CmpInst::ICMP_NE, Arg0, BoolReg,
1169 GR->buildConstantInt(0, MIRBuilder, BoolRegType));
1170 insertAssignInstr(Arg0, nullptr, BoolType, GR, MIRBuilder,
1171 MIRBuilder.getMF().getRegInfo());
1172 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1173 report_fatal_error("Expect a boolean argument");
1174 }
1175 // if BoolReg is a boolean register, we don't need to do anything
1176 }
1177 }
1178
1179 Register GroupResultRegister = Call->ReturnRegister;
1180 SPIRVType *GroupResultType = Call->ReturnType;
1181
1182 // TODO: maybe we need to check whether the result type is already boolean
1183 // and in this case do not insert select instruction.
1184 const bool HasBoolReturnTy =
1185 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1186 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1187 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1188
1189 if (HasBoolReturnTy)
1190 std::tie(GroupResultRegister, GroupResultType) =
1191 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1192
1193 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1194 : SPIRV::Scope::Workgroup;
1195 Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1196
1197 Register VecReg;
1198 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1199 Call->Arguments.size() > 2) {
1200 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1201 // scalar, a vector with 2 components, or a vector with 3 components.",
1202 // meaning that we must create a vector from the function arguments if
1203 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1204 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1205 Register ElemReg = Call->Arguments[1];
1206 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1207 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1208 report_fatal_error("Expect an integer <LocalId> argument");
1209 unsigned VecLen = Call->Arguments.size() - 1;
1210 VecReg = MRI->createGenericVirtualRegister(
1211 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1212 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1213 SPIRVType *VecType =
1214 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder);
1215 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1216 auto MIB =
1217 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1218 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1219 MIB.addUse(Call->Arguments[i]);
1220 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1221 }
1222 insertAssignInstr(VecReg, nullptr, VecType, GR, MIRBuilder,
1223 MIRBuilder.getMF().getRegInfo());
1224 }
1225
1226 // Build work/sub group instruction.
1227 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1228 .addDef(GroupResultRegister)
1229 .addUse(GR->getSPIRVTypeID(GroupResultType))
1230 .addUse(ScopeRegister);
1231
1232 if (!GroupBuiltin->NoGroupOperation)
1233 MIB.addImm(GroupBuiltin->GroupOperation);
1234 if (Call->Arguments.size() > 0) {
1235 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1236 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1237 if (VecReg.isValid())
1238 MIB.addUse(VecReg);
1239 else
1240 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1241 MIB.addUse(Call->Arguments[i]);
1242 }
1243
1244 // Build select instruction.
1245 if (HasBoolReturnTy)
1246 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1247 Call->ReturnType, GR);
1248 return true;
1249}
1250
1252 MachineIRBuilder &MIRBuilder,
1253 SPIRVGlobalRegistry *GR) {
1254 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1255 MachineFunction &MF = MIRBuilder.getMF();
1256 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1257 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1258 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1259
1260 if (IntelSubgroups->IsMedia &&
1261 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1262 std::string DiagMsg = std::string(Builtin->Name) +
1263 ": the builtin requires the following SPIR-V "
1264 "extension: SPV_INTEL_media_block_io";
1265 report_fatal_error(DiagMsg.c_str(), false);
1266 } else if (!IntelSubgroups->IsMedia &&
1267 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1268 std::string DiagMsg = std::string(Builtin->Name) +
1269 ": the builtin requires the following SPIR-V "
1270 "extension: SPV_INTEL_subgroups";
1271 report_fatal_error(DiagMsg.c_str(), false);
1272 }
1273
1274 uint32_t OpCode = IntelSubgroups->Opcode;
1275 if (Call->isSpirvOp()) {
1276 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1277 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1278 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1279 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1280 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1281 : Register(0));
1282 }
1283
1284 if (IntelSubgroups->IsBlock) {
1285 // Minimal number or arguments set in TableGen records is 1
1286 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1287 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1288 // TODO: add required validation from the specification:
1289 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1290 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1291 // dimensions require a capability."
1292 switch (OpCode) {
1293 case SPIRV::OpSubgroupBlockReadINTEL:
1294 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1295 break;
1296 case SPIRV::OpSubgroupBlockWriteINTEL:
1297 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1298 break;
1299 }
1300 }
1301 }
1302 }
1303
1304 // TODO: opaque pointers types should be eventually resolved in such a way
1305 // that validation of block read is enabled with respect to the following
1306 // specification requirement:
1307 // "'Result Type' may be a scalar or vector type, and its component type must
1308 // be equal to the type pointed to by 'Ptr'."
1309 // For example, function parameter type should not be default i8 pointer, but
1310 // depend on the result type of the instruction where it is used as a pointer
1311 // argument of OpSubgroupBlockReadINTEL
1312
1313 // Build Intel subgroups instruction
1315 IntelSubgroups->IsWrite
1316 ? MIRBuilder.buildInstr(OpCode)
1317 : MIRBuilder.buildInstr(OpCode)
1318 .addDef(Call->ReturnRegister)
1319 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1320 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1321 MIB.addUse(Call->Arguments[i]);
1322 return true;
1323}
1324
1326 MachineIRBuilder &MIRBuilder,
1327 SPIRVGlobalRegistry *GR) {
1328 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1329 MachineFunction &MF = MIRBuilder.getMF();
1330 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1331 if (!ST->canUseExtension(
1332 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1333 std::string DiagMsg = std::string(Builtin->Name) +
1334 ": the builtin requires the following SPIR-V "
1335 "extension: SPV_KHR_uniform_group_instructions";
1336 report_fatal_error(DiagMsg.c_str(), false);
1337 }
1338 const SPIRV::GroupUniformBuiltin *GroupUniform =
1339 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1340 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1341
1342 Register GroupResultReg = Call->ReturnRegister;
1343 Register ScopeReg = Call->Arguments[0];
1344 Register ValueReg = Call->Arguments[2];
1345
1346 // Group Operation
1347 Register ConstGroupOpReg = Call->Arguments[1];
1348 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1349 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1351 "expect a constant group operation for a uniform group instruction",
1352 false);
1353 const MachineOperand &ConstOperand = Const->getOperand(1);
1354 if (!ConstOperand.isCImm())
1355 report_fatal_error("uniform group instructions: group operation must be an "
1356 "integer constant",
1357 false);
1358
1359 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1360 .addDef(GroupResultReg)
1361 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1362 .addUse(ScopeReg);
1363 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1364 MIB.addUse(ValueReg);
1365
1366 return true;
1367}
1368
1370 MachineIRBuilder &MIRBuilder,
1371 SPIRVGlobalRegistry *GR) {
1372 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1373 MachineFunction &MF = MIRBuilder.getMF();
1374 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1375 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1376 std::string DiagMsg = std::string(Builtin->Name) +
1377 ": the builtin requires the following SPIR-V "
1378 "extension: SPV_KHR_shader_clock";
1379 report_fatal_error(DiagMsg.c_str(), false);
1380 }
1381
1382 Register ResultReg = Call->ReturnRegister;
1383
1384 // Deduce the `Scope` operand from the builtin function name.
1385 SPIRV::Scope::Scope ScopeArg =
1387 .EndsWith("device", SPIRV::Scope::Scope::Device)
1388 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1389 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1390 Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1391
1392 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1393 .addDef(ResultReg)
1394 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1395 .addUse(ScopeReg);
1396
1397 return true;
1398}
1399
1400// These queries ask for a single size_t result for a given dimension index, e.g
1401// size_t get_global_id(uint dimindex). In SPIR-V, the builtins corresonding to
1402// these values are all vec3 types, so we need to extract the correct index or
1403// return defaultVal (0 or 1 depending on the query). We also handle extending
1404// or tuncating in case size_t does not match the expected result type's
1405// bitwidth.
1406//
1407// For a constant index >= 3 we generate:
1408// %res = OpConstant %SizeT 0
1409//
1410// For other indices we generate:
1411// %g = OpVariable %ptr_V3_SizeT Input
1412// OpDecorate %g BuiltIn XXX
1413// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1414// OpDecorate %g Constant
1415// %loadedVec = OpLoad %V3_SizeT %g
1416//
1417// Then, if the index is constant < 3, we generate:
1418// %res = OpCompositeExtract %SizeT %loadedVec idx
1419// If the index is dynamic, we generate:
1420// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1421// %cmp = OpULessThan %bool %idx %const_3
1422// %res = OpSelect %SizeT %cmp %tmp %const_0
1423//
1424// If the bitwidth of %res does not match the expected return type, we add an
1425// extend or truncate.
1427 MachineIRBuilder &MIRBuilder,
1429 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1430 uint64_t DefaultValue) {
1431 Register IndexRegister = Call->Arguments[0];
1432 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1433 const unsigned PointerSize = GR->getPointerSize();
1434 const SPIRVType *PointerSizeType =
1435 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1436 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1437 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1438
1439 // Set up the final register to do truncation or extension on at the end.
1440 Register ToTruncate = Call->ReturnRegister;
1441
1442 // If the index is constant, we can statically determine if it is in range.
1443 bool IsConstantIndex =
1444 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1445
1446 // If it's out of range (max dimension is 3), we can just return the constant
1447 // default value (0 or 1 depending on which query function).
1448 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1449 Register DefaultReg = Call->ReturnRegister;
1450 if (PointerSize != ResultWidth) {
1451 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1452 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1453 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1454 MIRBuilder.getMF());
1455 ToTruncate = DefaultReg;
1456 }
1457 auto NewRegister =
1458 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1459 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1460 } else { // If it could be in range, we need to load from the given builtin.
1461 auto Vec3Ty =
1462 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder);
1463 Register LoadedVector =
1464 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1465 LLT::fixed_vector(3, PointerSize));
1466 // Set up the vreg to extract the result to (possibly a new temporary one).
1467 Register Extracted = Call->ReturnRegister;
1468 if (!IsConstantIndex || PointerSize != ResultWidth) {
1469 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1470 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1471 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1472 }
1473 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1474 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1475 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1476 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1477 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1478
1479 // If the index is dynamic, need check if it's < 3, and then use a select.
1480 if (!IsConstantIndex) {
1481 insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
1482 *MRI);
1483
1484 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1485 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
1486
1487 Register CompareRegister =
1488 MRI->createGenericVirtualRegister(LLT::scalar(1));
1489 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1490 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1491
1492 // Use G_ICMP to check if idxVReg < 3.
1493 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1494 GR->buildConstantInt(3, MIRBuilder, IndexType));
1495
1496 // Get constant for the default value (0 or 1 depending on which
1497 // function).
1498 Register DefaultRegister =
1499 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1500
1501 // Get a register for the selection result (possibly a new temporary one).
1502 Register SelectionResult = Call->ReturnRegister;
1503 if (PointerSize != ResultWidth) {
1504 SelectionResult =
1505 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1506 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1507 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1508 MIRBuilder.getMF());
1509 }
1510 // Create the final G_SELECT to return the extracted value or the default.
1511 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1512 DefaultRegister);
1513 ToTruncate = SelectionResult;
1514 } else {
1515 ToTruncate = Extracted;
1516 }
1517 }
1518 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1519 if (PointerSize != ResultWidth)
1520 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1521 return true;
1522}
1523
1525 MachineIRBuilder &MIRBuilder,
1526 SPIRVGlobalRegistry *GR) {
1527 // Lookup the builtin variable record.
1528 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1529 SPIRV::BuiltIn::BuiltIn Value =
1530 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1531
1532 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1533 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1534
1535 // Build a load instruction for the builtin variable.
1536 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1537 LLT LLType;
1538 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1539 LLType =
1540 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1541 else
1542 LLType = LLT::scalar(BitWidth);
1543
1544 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1545 LLType, Call->ReturnRegister);
1546}
1547
1549 MachineIRBuilder &MIRBuilder,
1550 SPIRVGlobalRegistry *GR) {
1551 // Lookup the instruction opcode in the TableGen records.
1552 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1553 unsigned Opcode =
1554 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1555
1556 switch (Opcode) {
1557 case SPIRV::OpStore:
1558 return buildAtomicInitInst(Call, MIRBuilder);
1559 case SPIRV::OpAtomicLoad:
1560 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1561 case SPIRV::OpAtomicStore:
1562 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1563 case SPIRV::OpAtomicCompareExchange:
1564 case SPIRV::OpAtomicCompareExchangeWeak:
1565 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1566 GR);
1567 case SPIRV::OpAtomicIAdd:
1568 case SPIRV::OpAtomicISub:
1569 case SPIRV::OpAtomicOr:
1570 case SPIRV::OpAtomicXor:
1571 case SPIRV::OpAtomicAnd:
1572 case SPIRV::OpAtomicExchange:
1573 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1574 case SPIRV::OpMemoryBarrier:
1575 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1576 case SPIRV::OpAtomicFlagTestAndSet:
1577 case SPIRV::OpAtomicFlagClear:
1578 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1579 default:
1580 if (Call->isSpirvOp())
1581 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1582 GR->getSPIRVTypeID(Call->ReturnType));
1583 return false;
1584 }
1585}
1586
1588 MachineIRBuilder &MIRBuilder,
1589 SPIRVGlobalRegistry *GR) {
1590 // Lookup the instruction opcode in the TableGen records.
1591 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1592 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1593
1594 switch (Opcode) {
1595 case SPIRV::OpAtomicFAddEXT:
1596 case SPIRV::OpAtomicFMinEXT:
1597 case SPIRV::OpAtomicFMaxEXT:
1598 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1599 default:
1600 return false;
1601 }
1602}
1603
1605 MachineIRBuilder &MIRBuilder,
1606 SPIRVGlobalRegistry *GR) {
1607 // Lookup the instruction opcode in the TableGen records.
1608 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1609 unsigned Opcode =
1610 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1611
1612 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1613}
1614
1616 MachineIRBuilder &MIRBuilder) {
1617 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1618 .addDef(Call->ReturnRegister)
1619 .addUse(Call->Arguments[0]);
1620 return true;
1621}
1622
1623static bool generateDotOrFMulInst(const StringRef DemangledCall,
1624 const SPIRV::IncomingCall *Call,
1625 MachineIRBuilder &MIRBuilder,
1626 SPIRVGlobalRegistry *GR) {
1627 if (Call->isSpirvOp())
1628 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1629 GR->getSPIRVTypeID(Call->ReturnType));
1630
1631 bool IsVec = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() ==
1632 SPIRV::OpTypeVector;
1633 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1634 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1635 bool IsSwapReq = false;
1636
1637 const auto *ST =
1638 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1639 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt) &&
1640 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1641 ST->isAtLeastSPIRVVer(VersionTuple(1, 6)))) {
1642 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1643 const SPIRV::IntegerDotProductBuiltin *IntDot =
1644 SPIRV::lookupIntegerDotProductBuiltin(Builtin->Name);
1645 if (IntDot) {
1646 OC = IntDot->Opcode;
1647 IsSwapReq = IntDot->IsSwapReq;
1648 } else if (IsVec) {
1649 // Handling "dot" and "dot_acc_sat" builtins which use vectors of
1650 // integers.
1651 LLVMContext &Ctx = MIRBuilder.getContext();
1653 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledCall, Ctx);
1654 bool IsFirstSigned = TypeStrs[0].trim()[0] != 'u';
1655 bool IsSecondSigned = TypeStrs[1].trim()[0] != 'u';
1656
1657 if (Call->BuiltinName == "dot") {
1658 if (IsFirstSigned && IsSecondSigned)
1659 OC = SPIRV::OpSDot;
1660 else if (!IsFirstSigned && !IsSecondSigned)
1661 OC = SPIRV::OpUDot;
1662 else {
1663 OC = SPIRV::OpSUDot;
1664 if (!IsFirstSigned)
1665 IsSwapReq = true;
1666 }
1667 } else if (Call->BuiltinName == "dot_acc_sat") {
1668 if (IsFirstSigned && IsSecondSigned)
1669 OC = SPIRV::OpSDotAccSat;
1670 else if (!IsFirstSigned && !IsSecondSigned)
1671 OC = SPIRV::OpUDotAccSat;
1672 else {
1673 OC = SPIRV::OpSUDotAccSat;
1674 if (!IsFirstSigned)
1675 IsSwapReq = true;
1676 }
1677 }
1678 }
1679 }
1680
1681 MachineInstrBuilder MIB = MIRBuilder.buildInstr(OC)
1682 .addDef(Call->ReturnRegister)
1683 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1684
1685 if (IsSwapReq) {
1686 MIB.addUse(Call->Arguments[1]);
1687 MIB.addUse(Call->Arguments[0]);
1688 // needed for dot_acc_sat* builtins
1689 for (size_t i = 2; i < Call->Arguments.size(); ++i)
1690 MIB.addUse(Call->Arguments[i]);
1691 } else {
1692 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1693 MIB.addUse(Call->Arguments[i]);
1694 }
1695
1696 // Add Packed Vector Format for Integer dot product builtins if arguments are
1697 // scalar
1698 if (!IsVec && OC != SPIRV::OpFMulS)
1699 MIB.addImm(0);
1700
1701 return true;
1702}
1703
1705 MachineIRBuilder &MIRBuilder,
1706 SPIRVGlobalRegistry *GR) {
1707 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1708 SPIRV::BuiltIn::BuiltIn Value =
1709 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1710
1711 // For now, we only support a single Wave intrinsic with a single return type.
1712 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1713 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1714
1716 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1717 /* isConst= */ false, /* hasLinkageTy= */ false);
1718}
1719
1720// We expect a builtin
1721// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1722// where %result is a pointer to where the result of the builtin execution
1723// is to be stored, and generate the following instructions:
1724// Res = Opcode RetType Operand1 Operand1
1725// OpStore RetVariable Res
1727 MachineIRBuilder &MIRBuilder,
1728 SPIRVGlobalRegistry *GR) {
1729 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1730 unsigned Opcode =
1731 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1732
1733 Register SRetReg = Call->Arguments[0];
1734 SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1735 SPIRVType *RetType = GR->getPointeeType(PtrRetType);
1736 if (!RetType)
1737 report_fatal_error("The first parameter must be a pointer");
1738 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1739 report_fatal_error("Expected struct type result for the arithmetic with "
1740 "overflow builtins");
1741
1742 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1743 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1744 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1745 report_fatal_error("Operands must have the same type");
1746 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1747 switch (Opcode) {
1748 case SPIRV::OpIAddCarryS:
1749 Opcode = SPIRV::OpIAddCarryV;
1750 break;
1751 case SPIRV::OpISubBorrowS:
1752 Opcode = SPIRV::OpISubBorrowV;
1753 break;
1754 }
1755
1756 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1757 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1758 if (const TargetRegisterClass *DstRC =
1759 MRI->getRegClassOrNull(Call->Arguments[1])) {
1760 MRI->setRegClass(ResReg, DstRC);
1761 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1762 } else {
1763 MRI->setType(ResReg, LLT::scalar(64));
1764 }
1765 GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
1766 MIRBuilder.buildInstr(Opcode)
1767 .addDef(ResReg)
1768 .addUse(GR->getSPIRVTypeID(RetType))
1769 .addUse(Call->Arguments[1])
1770 .addUse(Call->Arguments[2]);
1771 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1772 return true;
1773}
1774
1776 MachineIRBuilder &MIRBuilder,
1777 SPIRVGlobalRegistry *GR) {
1778 // Lookup the builtin record.
1779 SPIRV::BuiltIn::BuiltIn Value =
1780 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1781 uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize ||
1782 Value == SPIRV::BuiltIn::WorkgroupSize ||
1783 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1784 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0);
1785}
1786
1788 MachineIRBuilder &MIRBuilder,
1789 SPIRVGlobalRegistry *GR) {
1790 // Lookup the image size query component number in the TableGen records.
1791 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1792 uint32_t Component =
1793 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1794 // Query result may either be a vector or a scalar. If return type is not a
1795 // vector, expect only a single size component. Otherwise get the number of
1796 // expected components.
1797 SPIRVType *RetTy = Call->ReturnType;
1798 unsigned NumExpectedRetComponents = RetTy->getOpcode() == SPIRV::OpTypeVector
1799 ? RetTy->getOperand(2).getImm()
1800 : 1;
1801 // Get the actual number of query result/size components.
1802 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1803 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1804 Register QueryResult = Call->ReturnRegister;
1805 SPIRVType *QueryResultType = Call->ReturnType;
1806 if (NumExpectedRetComponents != NumActualRetComponents) {
1807 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1808 LLT::fixed_vector(NumActualRetComponents, 32));
1809 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
1810 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1811 QueryResultType = GR->getOrCreateSPIRVVectorType(
1812 IntTy, NumActualRetComponents, MIRBuilder);
1813 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1814 }
1815 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1816 unsigned Opcode =
1817 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1818 auto MIB = MIRBuilder.buildInstr(Opcode)
1819 .addDef(QueryResult)
1820 .addUse(GR->getSPIRVTypeID(QueryResultType))
1821 .addUse(Call->Arguments[0]);
1822 if (!IsDimBuf)
1823 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
1824 if (NumExpectedRetComponents == NumActualRetComponents)
1825 return true;
1826 if (NumExpectedRetComponents == 1) {
1827 // Only 1 component is expected, build OpCompositeExtract instruction.
1828 unsigned ExtractedComposite =
1829 Component == 3 ? NumActualRetComponents - 1 : Component;
1830 assert(ExtractedComposite < NumActualRetComponents &&
1831 "Invalid composite index!");
1832 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1833 SPIRVType *NewType = nullptr;
1834 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
1835 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
1836 if (TypeReg != NewTypeReg &&
1837 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
1838 TypeReg = NewTypeReg;
1839 }
1840 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1841 .addDef(Call->ReturnRegister)
1842 .addUse(TypeReg)
1843 .addUse(QueryResult)
1844 .addImm(ExtractedComposite);
1845 if (NewType != nullptr)
1846 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
1847 MIRBuilder.getMF().getRegInfo());
1848 } else {
1849 // More than 1 component is expected, fill a new vector.
1850 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1851 .addDef(Call->ReturnRegister)
1852 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1853 .addUse(QueryResult)
1854 .addUse(QueryResult);
1855 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
1856 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
1857 }
1858 return true;
1859}
1860
1862 MachineIRBuilder &MIRBuilder,
1863 SPIRVGlobalRegistry *GR) {
1864 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1865 "Image samples query result must be of int type!");
1866
1867 // Lookup the instruction opcode in the TableGen records.
1868 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1869 unsigned Opcode =
1870 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1871
1872 Register Image = Call->Arguments[0];
1873 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
1874 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
1875 (void)ImageDimensionality;
1876
1877 switch (Opcode) {
1878 case SPIRV::OpImageQuerySamples:
1879 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1880 "Image must be of 2D dimensionality");
1881 break;
1882 case SPIRV::OpImageQueryLevels:
1883 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1884 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1885 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1886 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1887 "Image must be of 1D/2D/3D/Cube dimensionality");
1888 break;
1889 }
1890
1891 MIRBuilder.buildInstr(Opcode)
1892 .addDef(Call->ReturnRegister)
1893 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1894 .addUse(Image);
1895 return true;
1896}
1897
1898// TODO: Move to TableGen.
1899static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1901 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1902 case SPIRV::CLK_ADDRESS_CLAMP:
1903 return SPIRV::SamplerAddressingMode::Clamp;
1904 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1905 return SPIRV::SamplerAddressingMode::ClampToEdge;
1906 case SPIRV::CLK_ADDRESS_REPEAT:
1907 return SPIRV::SamplerAddressingMode::Repeat;
1908 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1909 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1910 case SPIRV::CLK_ADDRESS_NONE:
1911 return SPIRV::SamplerAddressingMode::None;
1912 default:
1913 report_fatal_error("Unknown CL address mode");
1914 }
1915}
1916
1917static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
1918 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1919}
1920
1921static SPIRV::SamplerFilterMode::SamplerFilterMode
1923 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1924 return SPIRV::SamplerFilterMode::Linear;
1925 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1926 return SPIRV::SamplerFilterMode::Nearest;
1927 return SPIRV::SamplerFilterMode::Nearest;
1928}
1929
1930static bool generateReadImageInst(const StringRef DemangledCall,
1931 const SPIRV::IncomingCall *Call,
1932 MachineIRBuilder &MIRBuilder,
1933 SPIRVGlobalRegistry *GR) {
1934 Register Image = Call->Arguments[0];
1935 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1936 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
1937 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
1938 if (HasOclSampler) {
1939 Register Sampler = Call->Arguments[1];
1940
1941 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
1942 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
1943 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
1944 Sampler = GR->buildConstantSampler(
1946 getSamplerParamFromBitmask(SamplerMask),
1947 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder,
1948 GR->getSPIRVTypeForVReg(Sampler));
1949 }
1950 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1951 SPIRVType *SampledImageType =
1952 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1953 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1954
1955 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1956 .addDef(SampledImage)
1957 .addUse(GR->getSPIRVTypeID(SampledImageType))
1958 .addUse(Image)
1959 .addUse(Sampler);
1960
1962 MIRBuilder);
1963
1964 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
1965 SPIRVType *TempType =
1966 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder);
1967 Register TempRegister =
1968 MRI->createGenericVirtualRegister(GR->getRegType(TempType));
1969 MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
1970 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
1971 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1972 .addDef(TempRegister)
1973 .addUse(GR->getSPIRVTypeID(TempType))
1974 .addUse(SampledImage)
1975 .addUse(Call->Arguments[2]) // Coordinate.
1976 .addImm(SPIRV::ImageOperand::Lod)
1977 .addUse(Lod);
1978 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1979 .addDef(Call->ReturnRegister)
1980 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1981 .addUse(TempRegister)
1982 .addImm(0);
1983 } else {
1984 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1985 .addDef(Call->ReturnRegister)
1986 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1987 .addUse(SampledImage)
1988 .addUse(Call->Arguments[2]) // Coordinate.
1989 .addImm(SPIRV::ImageOperand::Lod)
1990 .addUse(Lod);
1991 }
1992 } else if (HasMsaa) {
1993 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1994 .addDef(Call->ReturnRegister)
1995 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1996 .addUse(Image)
1997 .addUse(Call->Arguments[1]) // Coordinate.
1998 .addImm(SPIRV::ImageOperand::Sample)
1999 .addUse(Call->Arguments[2]);
2000 } else {
2001 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2002 .addDef(Call->ReturnRegister)
2003 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2004 .addUse(Image)
2005 .addUse(Call->Arguments[1]); // Coordinate.
2006 }
2007 return true;
2008}
2009
2011 MachineIRBuilder &MIRBuilder,
2012 SPIRVGlobalRegistry *GR) {
2013 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
2014 .addUse(Call->Arguments[0]) // Image.
2015 .addUse(Call->Arguments[1]) // Coordinate.
2016 .addUse(Call->Arguments[2]); // Texel.
2017 return true;
2018}
2019
2020static bool generateSampleImageInst(const StringRef DemangledCall,
2021 const SPIRV::IncomingCall *Call,
2022 MachineIRBuilder &MIRBuilder,
2023 SPIRVGlobalRegistry *GR) {
2024 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2025 if (Call->Builtin->Name.contains_insensitive(
2026 "__translate_sampler_initializer")) {
2027 // Build sampler literal.
2028 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
2029 Register Sampler = GR->buildConstantSampler(
2030 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
2032 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType);
2033 return Sampler.isValid();
2034 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
2035 // Create OpSampledImage.
2036 Register Image = Call->Arguments[0];
2037 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2038 SPIRVType *SampledImageType =
2039 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2040 Register SampledImage =
2041 Call->ReturnRegister.isValid()
2042 ? Call->ReturnRegister
2043 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2044 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2045 .addDef(SampledImage)
2046 .addUse(GR->getSPIRVTypeID(SampledImageType))
2047 .addUse(Image)
2048 .addUse(Call->Arguments[1]); // Sampler.
2049 return true;
2050 } else if (Call->Builtin->Name.contains_insensitive(
2051 "__spirv_ImageSampleExplicitLod")) {
2052 // Sample an image using an explicit level of detail.
2053 std::string ReturnType = DemangledCall.str();
2054 if (DemangledCall.contains("_R")) {
2055 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
2056 ReturnType = ReturnType.substr(0, ReturnType.find('('));
2057 }
2058 SPIRVType *Type =
2059 Call->ReturnType
2060 ? Call->ReturnType
2061 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder);
2062 if (!Type) {
2063 std::string DiagMsg =
2064 "Unable to recognize SPIRV type name: " + ReturnType;
2065 report_fatal_error(DiagMsg.c_str());
2066 }
2067 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2068 .addDef(Call->ReturnRegister)
2070 .addUse(Call->Arguments[0]) // Image.
2071 .addUse(Call->Arguments[1]) // Coordinate.
2072 .addImm(SPIRV::ImageOperand::Lod)
2073 .addUse(Call->Arguments[3]);
2074 return true;
2075 }
2076 return false;
2077}
2078
2080 MachineIRBuilder &MIRBuilder) {
2081 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
2082 Call->Arguments[1], Call->Arguments[2]);
2083 return true;
2084}
2085
2087 MachineIRBuilder &MIRBuilder,
2088 SPIRVGlobalRegistry *GR) {
2089 return buildOpFromWrapper(MIRBuilder, SPIRV::OpCompositeConstruct, Call,
2090 GR->getSPIRVTypeID(Call->ReturnType));
2091}
2092
2094 MachineIRBuilder &MIRBuilder,
2095 SPIRVGlobalRegistry *GR) {
2096 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2097 unsigned Opcode =
2098 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2099 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2100 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2101 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2102 unsigned ArgSz = Call->Arguments.size();
2103 unsigned LiteralIdx = 0;
2104 switch (Opcode) {
2105 // Memory operand is optional and is literal.
2106 case SPIRV::OpCooperativeMatrixLoadKHR:
2107 LiteralIdx = ArgSz > 3 ? 3 : 0;
2108 break;
2109 case SPIRV::OpCooperativeMatrixStoreKHR:
2110 LiteralIdx = ArgSz > 4 ? 4 : 0;
2111 break;
2112 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2113 LiteralIdx = ArgSz > 7 ? 7 : 0;
2114 break;
2115 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2116 LiteralIdx = ArgSz > 8 ? 8 : 0;
2117 break;
2118 // Cooperative Matrix Operands operand is optional and is literal.
2119 case SPIRV::OpCooperativeMatrixMulAddKHR:
2120 LiteralIdx = ArgSz > 3 ? 3 : 0;
2121 break;
2122 };
2123
2125 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2126 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2127 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2128 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2129 .addUse(Call->Arguments[0]) // pointer
2130 .addUse(Call->Arguments[1]) // rows
2131 .addUse(Call->Arguments[2]) // columns
2132 .addImm(CacheLevel) // cache level
2133 .addUse(Call->Arguments[4]); // memory layout
2134 if (ArgSz > 5)
2135 MIB.addUse(Call->Arguments[5]); // stride
2136 if (ArgSz > 6) {
2137 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2138 MIB.addImm(MemOp); // memory operand
2139 }
2140 return true;
2141 }
2142 if (LiteralIdx > 0)
2143 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2144 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2145 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2146 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2147 if (!CoopMatrType)
2148 report_fatal_error("Can't find a register's type definition");
2149 MIRBuilder.buildInstr(Opcode)
2150 .addDef(Call->ReturnRegister)
2151 .addUse(TypeReg)
2152 .addUse(CoopMatrType->getOperand(0).getReg());
2153 return true;
2154 }
2155 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2156 IsSet ? TypeReg : Register(0), ImmArgs);
2157}
2158
2160 MachineIRBuilder &MIRBuilder,
2161 SPIRVGlobalRegistry *GR) {
2162 // Lookup the instruction opcode in the TableGen records.
2163 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2164 unsigned Opcode =
2165 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2166 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2167
2168 switch (Opcode) {
2169 case SPIRV::OpSpecConstant: {
2170 // Build the SpecID decoration.
2171 unsigned SpecId =
2172 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2173 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2174 {SpecId});
2175 // Determine the constant MI.
2176 Register ConstRegister = Call->Arguments[1];
2177 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2178 assert(Const &&
2179 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2180 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2181 "Argument should be either an int or floating-point constant");
2182 // Determine the opcode and built the OpSpec MI.
2183 const MachineOperand &ConstOperand = Const->getOperand(1);
2184 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2185 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2186 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2187 ? SPIRV::OpSpecConstantTrue
2188 : SPIRV::OpSpecConstantFalse;
2189 }
2190 auto MIB = MIRBuilder.buildInstr(Opcode)
2191 .addDef(Call->ReturnRegister)
2192 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2193
2194 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2195 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2196 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2197 else
2198 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2199 }
2200 return true;
2201 }
2202 case SPIRV::OpSpecConstantComposite: {
2203 auto MIB = MIRBuilder.buildInstr(Opcode)
2204 .addDef(Call->ReturnRegister)
2205 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2206 for (unsigned i = 0; i < Call->Arguments.size(); i++)
2207 MIB.addUse(Call->Arguments[i]);
2208 return true;
2209 }
2210 default:
2211 return false;
2212 }
2213}
2214
2216 MachineIRBuilder &MIRBuilder,
2217 SPIRVGlobalRegistry *GR) {
2218 // Lookup the instruction opcode in the TableGen records.
2219 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2220 unsigned Opcode =
2221 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2222
2223 return buildExtendedBitOpsInst(Call, Opcode, MIRBuilder, GR);
2224}
2225
2226static bool buildNDRange(const SPIRV::IncomingCall *Call,
2227 MachineIRBuilder &MIRBuilder,
2228 SPIRVGlobalRegistry *GR) {
2229 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2230 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2231 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2232 PtrType->getOperand(2).isReg());
2233 Register TypeReg = PtrType->getOperand(2).getReg();
2235 MachineFunction &MF = MIRBuilder.getMF();
2236 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2237 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2238 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2239 // three other arguments, so pass zero constant on absence.
2240 unsigned NumArgs = Call->Arguments.size();
2241 assert(NumArgs >= 2);
2242 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2243 Register LocalWorkSize =
2244 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2245 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2246 if (NumArgs < 4) {
2247 Register Const;
2248 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2249 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2250 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2251 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2252 DefInstr->getOperand(3).isReg());
2253 Register GWSPtr = DefInstr->getOperand(3).getReg();
2254 // TODO: Maybe simplify generation of the type of the fields.
2255 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2256 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2258 Type *FieldTy = ArrayType::get(BaseTy, Size);
2259 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(FieldTy, MIRBuilder);
2260 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2261 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2262 MIRBuilder.buildInstr(SPIRV::OpLoad)
2263 .addDef(GlobalWorkSize)
2264 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2265 .addUse(GWSPtr);
2266 const SPIRVSubtarget &ST =
2267 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2268 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2269 SpvFieldTy, *ST.getInstrInfo());
2270 } else {
2271 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy);
2272 }
2273 if (!LocalWorkSize.isValid())
2274 LocalWorkSize = Const;
2275 if (!GlobalWorkOffset.isValid())
2276 GlobalWorkOffset = Const;
2277 }
2278 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2279 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2280 .addDef(TmpReg)
2281 .addUse(TypeReg)
2282 .addUse(GlobalWorkSize)
2283 .addUse(LocalWorkSize)
2284 .addUse(GlobalWorkOffset);
2285 return MIRBuilder.buildInstr(SPIRV::OpStore)
2286 .addUse(Call->Arguments[0])
2287 .addUse(TmpReg);
2288}
2289
2290// TODO: maybe move to the global register.
2291static SPIRVType *
2293 SPIRVGlobalRegistry *GR) {
2294 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2295 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2296 Type *PtrType = PointerType::get(Context, SC1);
2297 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder);
2298}
2299
2301 MachineIRBuilder &MIRBuilder,
2302 SPIRVGlobalRegistry *GR) {
2303 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2304 const DataLayout &DL = MIRBuilder.getDataLayout();
2305 bool IsSpirvOp = Call->isSpirvOp();
2306 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2307 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2308
2309 // Make vararg instructions before OpEnqueueKernel.
2310 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2311 // local size operands as an array, so we need to unpack them.
2312 SmallVector<Register, 16> LocalSizes;
2313 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2314 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2315 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2316 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2317 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2318 GepMI->getOperand(3).isReg());
2319 Register ArrayReg = GepMI->getOperand(3).getReg();
2320 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2321 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2322 assert(LocalSizeTy && "Local size type is expected");
2323 const uint64_t LocalSizeNum =
2324 cast<ArrayType>(LocalSizeTy)->getNumElements();
2325 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2326 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2327 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2328 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2329 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2330 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2331 MRI->setType(Reg, LLType);
2332 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2333 auto GEPInst = MIRBuilder.buildIntrinsic(
2334 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2335 GEPInst
2336 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2337 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2338 .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2339 .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2340 LocalSizes.push_back(Reg);
2341 }
2342 }
2343
2344 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2345 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2346 .addDef(Call->ReturnRegister)
2347 .addUse(GR->getSPIRVTypeID(Int32Ty));
2348
2349 // Copy all arguments before block invoke function pointer.
2350 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2351 for (unsigned i = 0; i < BlockFIdx; i++)
2352 MIB.addUse(Call->Arguments[i]);
2353
2354 // If there are no event arguments in the original call, add dummy ones.
2355 if (!HasEvents) {
2356 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2357 Register NullPtr = GR->getOrCreateConstNullPtr(
2358 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2359 MIB.addUse(NullPtr); // Dummy wait events.
2360 MIB.addUse(NullPtr); // Dummy ret event.
2361 }
2362
2363 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2364 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2365 // Invoke: Pointer to invoke function.
2366 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2367
2368 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2369 // Param: Pointer to block literal.
2370 MIB.addUse(BlockLiteralReg);
2371
2372 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2373 // TODO: these numbers should be obtained from block literal structure.
2374 // Param Size: Size of block literal structure.
2375 MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2376 // Param Aligment: Aligment of block literal structure.
2377 MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2378 MIRBuilder, GR));
2379
2380 for (unsigned i = 0; i < LocalSizes.size(); i++)
2381 MIB.addUse(LocalSizes[i]);
2382 return true;
2383}
2384
2386 MachineIRBuilder &MIRBuilder,
2387 SPIRVGlobalRegistry *GR) {
2388 // Lookup the instruction opcode in the TableGen records.
2389 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2390 unsigned Opcode =
2391 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2392
2393 switch (Opcode) {
2394 case SPIRV::OpRetainEvent:
2395 case SPIRV::OpReleaseEvent:
2396 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2397 case SPIRV::OpCreateUserEvent:
2398 case SPIRV::OpGetDefaultQueue:
2399 return MIRBuilder.buildInstr(Opcode)
2400 .addDef(Call->ReturnRegister)
2401 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2402 case SPIRV::OpIsValidEvent:
2403 return MIRBuilder.buildInstr(Opcode)
2404 .addDef(Call->ReturnRegister)
2405 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2406 .addUse(Call->Arguments[0]);
2407 case SPIRV::OpSetUserEventStatus:
2408 return MIRBuilder.buildInstr(Opcode)
2409 .addUse(Call->Arguments[0])
2410 .addUse(Call->Arguments[1]);
2411 case SPIRV::OpCaptureEventProfilingInfo:
2412 return MIRBuilder.buildInstr(Opcode)
2413 .addUse(Call->Arguments[0])
2414 .addUse(Call->Arguments[1])
2415 .addUse(Call->Arguments[2]);
2416 case SPIRV::OpBuildNDRange:
2417 return buildNDRange(Call, MIRBuilder, GR);
2418 case SPIRV::OpEnqueueKernel:
2419 return buildEnqueueKernel(Call, MIRBuilder, GR);
2420 default:
2421 return false;
2422 }
2423}
2424
2426 MachineIRBuilder &MIRBuilder,
2427 SPIRVGlobalRegistry *GR) {
2428 // Lookup the instruction opcode in the TableGen records.
2429 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2430 unsigned Opcode =
2431 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2432
2433 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2434 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2435 if (Call->isSpirvOp())
2436 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2437 IsSet ? TypeReg : Register(0));
2438
2439 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2440
2441 switch (Opcode) {
2442 case SPIRV::OpGroupAsyncCopy: {
2443 SPIRVType *NewType =
2444 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2445 ? nullptr
2446 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder);
2447 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2448 unsigned NumArgs = Call->Arguments.size();
2449 Register EventReg = Call->Arguments[NumArgs - 1];
2450 bool Res = MIRBuilder.buildInstr(Opcode)
2451 .addDef(Call->ReturnRegister)
2452 .addUse(TypeReg)
2453 .addUse(Scope)
2454 .addUse(Call->Arguments[0])
2455 .addUse(Call->Arguments[1])
2456 .addUse(Call->Arguments[2])
2457 .addUse(Call->Arguments.size() > 4
2458 ? Call->Arguments[3]
2459 : buildConstantIntReg32(1, MIRBuilder, GR))
2460 .addUse(EventReg);
2461 if (NewType != nullptr)
2462 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2463 MIRBuilder.getMF().getRegInfo());
2464 return Res;
2465 }
2466 case SPIRV::OpGroupWaitEvents:
2467 return MIRBuilder.buildInstr(Opcode)
2468 .addUse(Scope)
2469 .addUse(Call->Arguments[0])
2470 .addUse(Call->Arguments[1]);
2471 default:
2472 return false;
2473 }
2474}
2475
2476static bool generateConvertInst(const StringRef DemangledCall,
2477 const SPIRV::IncomingCall *Call,
2478 MachineIRBuilder &MIRBuilder,
2479 SPIRVGlobalRegistry *GR) {
2480 // Lookup the conversion builtin in the TableGen records.
2481 const SPIRV::ConvertBuiltin *Builtin =
2482 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2483
2484 if (!Builtin && Call->isSpirvOp()) {
2485 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2486 unsigned Opcode =
2487 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2488 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2489 GR->getSPIRVTypeID(Call->ReturnType));
2490 }
2491
2492 if (Builtin->IsSaturated)
2493 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2494 SPIRV::Decoration::SaturatedConversion, {});
2495 if (Builtin->IsRounded)
2496 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2497 SPIRV::Decoration::FPRoundingMode,
2498 {(unsigned)Builtin->RoundingMode});
2499
2500 std::string NeedExtMsg; // no errors if empty
2501 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2502 unsigned Opcode = SPIRV::OpNop;
2503 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2504 // Int -> ...
2505 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2506 // Int -> Int
2507 if (Builtin->IsSaturated)
2508 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2509 : SPIRV::OpSatConvertSToU;
2510 else
2511 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2512 : SPIRV::OpSConvert;
2513 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2514 SPIRV::OpTypeFloat)) {
2515 // Int -> Float
2516 if (Builtin->IsBfloat16) {
2517 const auto *ST = static_cast<const SPIRVSubtarget *>(
2518 &MIRBuilder.getMF().getSubtarget());
2519 if (!ST->canUseExtension(
2520 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2521 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2522 IsRightComponentsNumber =
2523 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2524 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2525 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2526 } else {
2527 bool IsSourceSigned =
2528 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2529 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2530 }
2531 }
2532 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2533 SPIRV::OpTypeFloat)) {
2534 // Float -> ...
2535 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2536 // Float -> Int
2537 if (Builtin->IsBfloat16) {
2538 const auto *ST = static_cast<const SPIRVSubtarget *>(
2539 &MIRBuilder.getMF().getSubtarget());
2540 if (!ST->canUseExtension(
2541 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2542 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2543 IsRightComponentsNumber =
2544 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2545 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2546 Opcode = SPIRV::OpConvertFToBF16INTEL;
2547 } else {
2548 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2549 : SPIRV::OpConvertFToU;
2550 }
2551 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2552 SPIRV::OpTypeFloat)) {
2553 // Float -> Float
2554 Opcode = SPIRV::OpFConvert;
2555 }
2556 }
2557
2558 if (!NeedExtMsg.empty()) {
2559 std::string DiagMsg = std::string(Builtin->Name) +
2560 ": the builtin requires the following SPIR-V "
2561 "extension: " +
2562 NeedExtMsg;
2563 report_fatal_error(DiagMsg.c_str(), false);
2564 }
2565 if (!IsRightComponentsNumber) {
2566 std::string DiagMsg =
2567 std::string(Builtin->Name) +
2568 ": result and argument must have the same number of components";
2569 report_fatal_error(DiagMsg.c_str(), false);
2570 }
2571 assert(Opcode != SPIRV::OpNop &&
2572 "Conversion between the types not implemented!");
2573
2574 MIRBuilder.buildInstr(Opcode)
2575 .addDef(Call->ReturnRegister)
2576 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2577 .addUse(Call->Arguments[0]);
2578 return true;
2579}
2580
2582 MachineIRBuilder &MIRBuilder,
2583 SPIRVGlobalRegistry *GR) {
2584 // Lookup the vector load/store builtin in the TableGen records.
2585 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2586 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2587 Call->Builtin->Set);
2588 // Build extended instruction.
2589 auto MIB =
2590 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2591 .addDef(Call->ReturnRegister)
2592 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2593 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2594 .addImm(Builtin->Number);
2595 for (auto Argument : Call->Arguments)
2596 MIB.addUse(Argument);
2597 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2598 MIB.addImm(Builtin->ElementCount);
2599
2600 // Rounding mode should be passed as a last argument in the MI for builtins
2601 // like "vstorea_halfn_r".
2602 if (Builtin->IsRounded)
2603 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2604 return true;
2605}
2606
2608 MachineIRBuilder &MIRBuilder,
2609 SPIRVGlobalRegistry *GR) {
2610 // Lookup the instruction opcode in the TableGen records.
2611 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2612 unsigned Opcode =
2613 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2614 bool IsLoad = Opcode == SPIRV::OpLoad;
2615 // Build the instruction.
2616 auto MIB = MIRBuilder.buildInstr(Opcode);
2617 if (IsLoad) {
2618 MIB.addDef(Call->ReturnRegister);
2619 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2620 }
2621 // Add a pointer to the value to load/store.
2622 MIB.addUse(Call->Arguments[0]);
2623 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2624 // Add a value to store.
2625 if (!IsLoad)
2626 MIB.addUse(Call->Arguments[1]);
2627 // Add optional memory attributes and an alignment.
2628 unsigned NumArgs = Call->Arguments.size();
2629 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
2630 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2631 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
2632 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2633 return true;
2634}
2635
2636namespace SPIRV {
2637// Try to find a builtin function attributes by a demangled function name and
2638// return a tuple <builtin group, op code, ext instruction number>, or a special
2639// tuple value <-1, 0, 0> if the builtin function is not found.
2640// Not all builtin functions are supported, only those with a ready-to-use op
2641// code or instruction number defined in TableGen.
2642// TODO: consider a major rework of mapping demangled calls into a builtin
2643// functions to unify search and decrease number of individual cases.
2644std::tuple<int, unsigned, unsigned>
2645mapBuiltinToOpcode(const StringRef DemangledCall,
2646 SPIRV::InstructionSet::InstructionSet Set) {
2647 Register Reg;
2649 std::unique_ptr<const IncomingCall> Call =
2650 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
2651 if (!Call)
2652 return std::make_tuple(-1, 0, 0);
2653
2654 switch (Call->Builtin->Group) {
2655 case SPIRV::Relational:
2656 case SPIRV::Atomic:
2657 case SPIRV::Barrier:
2658 case SPIRV::CastToPtr:
2659 case SPIRV::ImageMiscQuery:
2660 case SPIRV::SpecConstant:
2661 case SPIRV::Enqueue:
2662 case SPIRV::AsyncCopy:
2663 case SPIRV::LoadStore:
2664 case SPIRV::CoopMatr:
2665 if (const auto *R =
2666 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2667 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2668 break;
2669 case SPIRV::Extended:
2670 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2671 Call->Builtin->Set))
2672 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2673 break;
2674 case SPIRV::VectorLoadStore:
2675 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2676 Call->Builtin->Set))
2677 return std::make_tuple(SPIRV::Extended, 0, R->Number);
2678 break;
2679 case SPIRV::Group:
2680 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2681 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2682 break;
2683 case SPIRV::AtomicFloating:
2684 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2685 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2686 break;
2687 case SPIRV::IntelSubgroups:
2688 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2689 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2690 break;
2691 case SPIRV::GroupUniform:
2692 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2693 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2694 break;
2695 case SPIRV::IntegerDot:
2696 if (const auto *R =
2697 SPIRV::lookupIntegerDotProductBuiltin(Call->Builtin->Name))
2698 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2699 break;
2700 case SPIRV::WriteImage:
2701 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2702 case SPIRV::Select:
2703 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
2704 case SPIRV::Construct:
2705 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2706 0);
2707 case SPIRV::KernelClock:
2708 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2709 default:
2710 return std::make_tuple(-1, 0, 0);
2711 }
2712 return std::make_tuple(-1, 0, 0);
2713}
2714
2715std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
2716 SPIRV::InstructionSet::InstructionSet Set,
2717 MachineIRBuilder &MIRBuilder,
2718 const Register OrigRet, const Type *OrigRetTy,
2719 const SmallVectorImpl<Register> &Args,
2720 SPIRVGlobalRegistry *GR) {
2721 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
2722
2723 // Lookup the builtin in the TableGen records.
2724 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OrigRet);
2725 assert(SpvType && "Inconsistent return register: expected valid type info");
2726 std::unique_ptr<const IncomingCall> Call =
2727 lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
2728
2729 if (!Call) {
2730 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
2731 return std::nullopt;
2732 }
2733
2734 // TODO: check if the provided args meet the builtin requirments.
2735 assert(Args.size() >= Call->Builtin->MinNumArgs &&
2736 "Too few arguments to generate the builtin");
2737 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2738 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
2739
2740 // Match the builtin with implementation based on the grouping.
2741 switch (Call->Builtin->Group) {
2742 case SPIRV::Extended:
2743 return generateExtInst(Call.get(), MIRBuilder, GR);
2744 case SPIRV::Relational:
2745 return generateRelationalInst(Call.get(), MIRBuilder, GR);
2746 case SPIRV::Group:
2747 return generateGroupInst(Call.get(), MIRBuilder, GR);
2748 case SPIRV::Variable:
2749 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
2750 case SPIRV::Atomic:
2751 return generateAtomicInst(Call.get(), MIRBuilder, GR);
2752 case SPIRV::AtomicFloating:
2753 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
2754 case SPIRV::Barrier:
2755 return generateBarrierInst(Call.get(), MIRBuilder, GR);
2756 case SPIRV::CastToPtr:
2757 return generateCastToPtrInst(Call.get(), MIRBuilder);
2758 case SPIRV::Dot:
2759 case SPIRV::IntegerDot:
2760 return generateDotOrFMulInst(DemangledCall, Call.get(), MIRBuilder, GR);
2761 case SPIRV::Wave:
2762 return generateWaveInst(Call.get(), MIRBuilder, GR);
2763 case SPIRV::ICarryBorrow:
2764 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
2765 case SPIRV::GetQuery:
2766 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
2767 case SPIRV::ImageSizeQuery:
2768 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
2769 case SPIRV::ImageMiscQuery:
2770 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
2771 case SPIRV::ReadImage:
2772 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2773 case SPIRV::WriteImage:
2774 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
2775 case SPIRV::SampleImage:
2776 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2777 case SPIRV::Select:
2778 return generateSelectInst(Call.get(), MIRBuilder);
2779 case SPIRV::Construct:
2780 return generateConstructInst(Call.get(), MIRBuilder, GR);
2781 case SPIRV::SpecConstant:
2782 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
2783 case SPIRV::Enqueue:
2784 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
2785 case SPIRV::AsyncCopy:
2786 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
2787 case SPIRV::Convert:
2788 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
2789 case SPIRV::VectorLoadStore:
2790 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
2791 case SPIRV::LoadStore:
2792 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
2793 case SPIRV::IntelSubgroups:
2794 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
2795 case SPIRV::GroupUniform:
2796 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
2797 case SPIRV::KernelClock:
2798 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
2799 case SPIRV::CoopMatr:
2800 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
2801 case SPIRV::ExtendedBitOps:
2802 return generateExtendedBitOpsInst(Call.get(), MIRBuilder, GR);
2803 }
2804 return false;
2805}
2806
2808 // Parse strings representing OpenCL builtin types.
2809 if (hasBuiltinTypePrefix(TypeStr)) {
2810 // OpenCL builtin types in demangled call strings have the following format:
2811 // e.g. ocl_image2d_ro
2812 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
2813 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
2814
2815 // Check if this is pointer to a builtin type and not just pointer
2816 // representing a builtin type. In case it is a pointer to builtin type,
2817 // this will require additional handling in the method calling
2818 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
2819 // base types.
2820 if (TypeStr.ends_with("*"))
2821 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
2822
2823 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
2824 Ctx);
2825 }
2826
2827 // Parse type name in either "typeN" or "type vector[N]" format, where
2828 // N is the number of elements of the vector.
2829 Type *BaseType;
2830 unsigned VecElts = 0;
2831
2832 BaseType = parseBasicTypeName(TypeStr, Ctx);
2833 if (!BaseType)
2834 // Unable to recognize SPIRV type name.
2835 return nullptr;
2836
2837 // Handle "typeN*" or "type vector[N]*".
2838 TypeStr.consume_back("*");
2839
2840 if (TypeStr.consume_front(" vector["))
2841 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
2842
2843 TypeStr.getAsInteger(10, VecElts);
2844 if (VecElts > 0)
2846 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
2847
2848 return BaseType;
2849}
2850
2852 const StringRef DemangledCall, LLVMContext &Ctx) {
2853 auto Pos1 = DemangledCall.find('(');
2854 if (Pos1 == StringRef::npos)
2855 return false;
2856 auto Pos2 = DemangledCall.find(')');
2857 if (Pos2 == StringRef::npos || Pos1 > Pos2)
2858 return false;
2859 DemangledCall.slice(Pos1 + 1, Pos2)
2860 .split(BuiltinArgsTypeStrs, ',', -1, false);
2861 return true;
2862}
2863
2865 unsigned ArgIdx, LLVMContext &Ctx) {
2866 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
2867 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
2868 if (ArgIdx >= BuiltinArgsTypeStrs.size())
2869 return nullptr;
2870 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
2871 return parseBuiltinCallArgumentType(TypeStr, Ctx);
2872}
2873
2877};
2878
2879#define GET_BuiltinTypes_DECL
2880#define GET_BuiltinTypes_IMPL
2881
2885};
2886
2887#define GET_OpenCLTypes_DECL
2888#define GET_OpenCLTypes_IMPL
2889
2890#include "SPIRVGenTables.inc"
2891} // namespace SPIRV
2892
2893//===----------------------------------------------------------------------===//
2894// Misc functions for parsing builtin types.
2895//===----------------------------------------------------------------------===//
2896
2898 if (Name.starts_with("void"))
2899 return Type::getVoidTy(Context);
2900 else if (Name.starts_with("int") || Name.starts_with("uint"))
2901 return Type::getInt32Ty(Context);
2902 else if (Name.starts_with("float"))
2903 return Type::getFloatTy(Context);
2904 else if (Name.starts_with("half"))
2905 return Type::getHalfTy(Context);
2906 report_fatal_error("Unable to recognize type!");
2907}
2908
2909//===----------------------------------------------------------------------===//
2910// Implementation functions for builtin types.
2911//===----------------------------------------------------------------------===//
2912
2914 const SPIRV::BuiltinType *TypeRecord,
2915 MachineIRBuilder &MIRBuilder,
2916 SPIRVGlobalRegistry *GR) {
2917 unsigned Opcode = TypeRecord->Opcode;
2918 // Create or get an existing type from GlobalRegistry.
2919 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
2920}
2921
2923 SPIRVGlobalRegistry *GR) {
2924 // Create or get an existing type from GlobalRegistry.
2925 return GR->getOrCreateOpTypeSampler(MIRBuilder);
2926}
2927
2928static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
2929 MachineIRBuilder &MIRBuilder,
2930 SPIRVGlobalRegistry *GR) {
2931 assert(ExtensionType->getNumIntParameters() == 1 &&
2932 "Invalid number of parameters for SPIR-V pipe builtin!");
2933 // Create or get an existing type from GlobalRegistry.
2934 return GR->getOrCreateOpTypePipe(MIRBuilder,
2935 SPIRV::AccessQualifier::AccessQualifier(
2936 ExtensionType->getIntParameter(0)));
2937}
2938
2939static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
2940 MachineIRBuilder &MIRBuilder,
2941 SPIRVGlobalRegistry *GR) {
2942 assert(ExtensionType->getNumIntParameters() == 4 &&
2943 "Invalid number of parameters for SPIR-V coop matrices builtin!");
2944 assert(ExtensionType->getNumTypeParameters() == 1 &&
2945 "SPIR-V coop matrices builtin type must have a type parameter!");
2946 const SPIRVType *ElemType =
2947 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2948 // Create or get an existing type from GlobalRegistry.
2949 return GR->getOrCreateOpTypeCoopMatr(
2950 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
2951 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2952 ExtensionType->getIntParameter(3));
2953}
2954
2955static SPIRVType *
2956getImageType(const TargetExtType *ExtensionType,
2957 const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2958 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
2959 assert(ExtensionType->getNumTypeParameters() == 1 &&
2960 "SPIR-V image builtin type must have sampled type parameter!");
2961 const SPIRVType *SampledType =
2962 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2963 assert((ExtensionType->getNumIntParameters() == 7 ||
2964 ExtensionType->getNumIntParameters() == 6) &&
2965 "Invalid number of parameters for SPIR-V image builtin!");
2966
2967 SPIRV::AccessQualifier::AccessQualifier accessQualifier =
2968 SPIRV::AccessQualifier::None;
2969 if (ExtensionType->getNumIntParameters() == 7) {
2970 accessQualifier = Qualifier == SPIRV::AccessQualifier::WriteOnly
2971 ? SPIRV::AccessQualifier::WriteOnly
2972 : SPIRV::AccessQualifier::AccessQualifier(
2973 ExtensionType->getIntParameter(6));
2974 }
2975
2976 // Create or get an existing type from GlobalRegistry.
2977 return GR->getOrCreateOpTypeImage(
2978 MIRBuilder, SampledType,
2979 SPIRV::Dim::Dim(ExtensionType->getIntParameter(0)),
2980 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2981 ExtensionType->getIntParameter(3), ExtensionType->getIntParameter(4),
2982 SPIRV::ImageFormat::ImageFormat(ExtensionType->getIntParameter(5)),
2983 accessQualifier);
2984}
2985
2987 MachineIRBuilder &MIRBuilder,
2988 SPIRVGlobalRegistry *GR) {
2989 SPIRVType *OpaqueImageType = getImageType(
2990 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2991 // Create or get an existing type from GlobalRegistry.
2992 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
2993}
2994
2995namespace SPIRV {
2997 LLVMContext &Context) {
2998 StringRef NameWithParameters = TypeName;
2999
3000 // Pointers-to-opaque-structs representing OpenCL types are first translated
3001 // to equivalent SPIR-V types. OpenCL builtin type names should have the
3002 // following format: e.g. %opencl.event_t
3003 if (NameWithParameters.starts_with("opencl.")) {
3004 const SPIRV::OpenCLType *OCLTypeRecord =
3005 SPIRV::lookupOpenCLType(NameWithParameters);
3006 if (!OCLTypeRecord)
3007 report_fatal_error("Missing TableGen record for OpenCL type: " +
3008 NameWithParameters);
3009 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
3010 // Continue with the SPIR-V builtin type...
3011 }
3012
3013 // Names of the opaque structs representing a SPIR-V builtins without
3014 // parameters should have the following format: e.g. %spirv.Event
3015 assert(NameWithParameters.starts_with("spirv.") &&
3016 "Unknown builtin opaque type!");
3017
3018 // Parameterized SPIR-V builtins names follow this format:
3019 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
3020 if (!NameWithParameters.contains('_'))
3021 return TargetExtType::get(Context, NameWithParameters);
3022
3023 SmallVector<StringRef> Parameters;
3024 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
3025 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
3026
3027 SmallVector<Type *, 1> TypeParameters;
3028 bool HasTypeParameter = !isDigit(Parameters[0][0]);
3029 if (HasTypeParameter)
3030 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
3031 SmallVector<unsigned> IntParameters;
3032 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3033 unsigned IntParameter = 0;
3034 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3035 (void)ValidLiteral;
3036 assert(ValidLiteral &&
3037 "Invalid format of SPIR-V builtin parameter literal!");
3038 IntParameters.push_back(IntParameter);
3039 }
3040 return TargetExtType::get(Context,
3041 NameWithParameters.substr(0, BaseNameLength),
3042 TypeParameters, IntParameters);
3043}
3044
3046 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3047 MachineIRBuilder &MIRBuilder,
3048 SPIRVGlobalRegistry *GR) {
3049 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
3050 // target(...) target extension types or pointers-to-opaque-structs. The
3051 // approach relying on structs is deprecated and works only in the non-opaque
3052 // pointer mode (-opaque-pointers=0).
3053 // In order to maintain compatibility with LLVM IR generated by older versions
3054 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
3055 // "translated" to target extension types. This translation is temporary and
3056 // will be removed in the future release of LLVM.
3057 const TargetExtType *BuiltinType = dyn_cast<TargetExtType>(OpaqueType);
3058 if (!BuiltinType)
3060 OpaqueType->getStructName().str(), MIRBuilder.getContext());
3061
3062 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
3063
3064 const StringRef Name = BuiltinType->getName();
3065 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
3066
3067 // Lookup the demangled builtin type in the TableGen records.
3068 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
3069 if (!TypeRecord)
3070 report_fatal_error("Missing TableGen record for builtin type: " + Name);
3071
3072 // "Lower" the BuiltinType into TargetType. The following get<...>Type methods
3073 // use the implementation details from TableGen records or TargetExtType
3074 // parameters to either create a new OpType<...> machine instruction or get an
3075 // existing equivalent SPIRVType from GlobalRegistry.
3076 SPIRVType *TargetType;
3077 switch (TypeRecord->Opcode) {
3078 case SPIRV::OpTypeImage:
3079 TargetType = getImageType(BuiltinType, AccessQual, MIRBuilder, GR);
3080 break;
3081 case SPIRV::OpTypePipe:
3082 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
3083 break;
3084 case SPIRV::OpTypeDeviceEvent:
3085 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
3086 break;
3087 case SPIRV::OpTypeSampler:
3088 TargetType = getSamplerType(MIRBuilder, GR);
3089 break;
3090 case SPIRV::OpTypeSampledImage:
3091 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
3092 break;
3093 case SPIRV::OpTypeCooperativeMatrixKHR:
3094 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
3095 break;
3096 default:
3097 TargetType =
3098 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
3099 break;
3100 }
3101
3102 // Emit OpName instruction if a new OpType<...> instruction was added
3103 // (equivalent type was not found in GlobalRegistry).
3104 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
3105 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
3106
3107 return TargetType;
3108}
3109} // namespace SPIRV
3110} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
return RetTy
#define LLVM_DEBUG(...)
Definition: Debug.h:106
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned Reg
static bool isDigit(const char C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
spirv structurize SPIRV
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
APInt bitcastToAPInt() const
Definition: APFloat.h:1351
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:1081
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1520
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:698
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
@ ICMP_NE
not equal
Definition: InstrTypes.h:695
const APFloat & getValueAPF() const
Definition: Constants.h:314
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:148
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Tagged union holding either a T or a Error.
Definition: Error.h:481
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:572
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:64
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:71
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:577
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:587
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:121
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR=true)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateOpTypeImage(MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim, uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled, SPIRV::ImageFormat::ImageFormat ImageFormat, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVPointerType(SPIRVType *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SClass=SPIRV::StorageClass::Function)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use)
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR=true, bool ZeroAsNull=true)
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:700
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition: StringRef.h:655
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:470
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:229
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:571
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:265
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:436
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:684
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:424
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:635
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition: StringRef.h:377
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
Definition: StringRef.h:347
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:297
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:277
static constexpr size_t npos
Definition: StringRef.h:53
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
StringSwitch & EndsWith(StringLiteral S, T Value)
Definition: StringSwitch.h:73
Class to represent struct types.
Definition: DerivedTypes.h:218
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Definition: DerivedTypes.h:753
unsigned getNumIntParameters() const
Definition: DerivedTypes.h:811
static TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition: Type.cpp:895
Type * getTypeParameter(unsigned i) const
Definition: DerivedTypes.h:801
unsigned getNumTypeParameters() const
Definition: DerivedTypes.h:802
unsigned getIntParameter(unsigned i) const
Definition: DerivedTypes.h:810
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getHalfTy(LLVMContext &C)
StringRef getStructName() const
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
LLVM Value Representation.
Definition: Value.h:74
Value(Type *Ty, unsigned scid)
Definition: Value.cpp:53
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
Definition: VersionTuple.h:29
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition: Core.cpp:884
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR)
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
StorageClass
Definition: XCOFF.h:170
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:103
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, bool hasLinkageTy=true)
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:83
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
Definition: SPIRVUtils.h:411
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
FPDecorationId
Definition: SPIRVUtils.h:409
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:326
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:245
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.h:163
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:130
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
Definition: SPIRVUtils.cpp:748
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
Definition: SPIRVUtils.cpp:459
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:307
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
const MachineInstr SPIRVType
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
bool hasBuiltinTypePrefix(StringRef Name)
Definition: SPIRVUtils.cpp:429
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
Definition: SPIRVUtils.cpp:338
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:332
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:257
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
BuiltIn::BuiltIn Value
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode