LLVM 22.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
44 const std::string BuiltinName;
46
50
57
58 bool isSpirvOp() const { return BuiltinName.rfind("__spirv_", 0) == 0; }
59};
60
63 InstructionSet::InstructionSet Set;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
74 bool IsElect;
84};
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
92 bool IsBlock;
93 bool IsWrite;
94 bool IsMedia;
95};
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
103};
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
111};
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
128 InstructionSet::InstructionSet Set;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
139};
140
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
143
146 InstructionSet::InstructionSet Set;
151 bool IsTF32;
152 FPRoundingMode::FPRoundingMode RoundingMode;
153};
154
157 InstructionSet::InstructionSet Set;
161 FPRoundingMode::FPRoundingMode RoundingMode;
162};
163
164using namespace FPRoundingMode;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
167
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
171
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
177} // namespace SPIRV
178
179//===----------------------------------------------------------------------===//
180// Misc functions for looking up builtins and veryfying requirements using
181// TableGen records
182//===----------------------------------------------------------------------===//
183
184namespace SPIRV {
185/// Parses the name part of the demangled builtin call.
186std::string lookupBuiltinNameHelper(StringRef DemangledCall,
187 FPDecorationId *DecorationId) {
188 StringRef PassPrefix = "(anonymous namespace)::";
189 std::string BuiltinName;
190 // Itanium Demangler result may have "(anonymous namespace)::" prefix
191 if (DemangledCall.starts_with(PassPrefix))
192 BuiltinName = DemangledCall.substr(PassPrefix.size());
193 else
194 BuiltinName = DemangledCall;
195 // Extract the builtin function name and types of arguments from the call
196 // skeleton.
197 BuiltinName = BuiltinName.substr(0, BuiltinName.find('('));
198
199 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
200 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
201 BuiltinName = BuiltinName.substr(12);
202
203 // Check if the extracted name contains type information between angle
204 // brackets. If so, the builtin is an instantiated template - needs to have
205 // the information after angle brackets and return type removed.
206 std::size_t Pos1 = BuiltinName.rfind('<');
207 if (Pos1 != std::string::npos && BuiltinName.back() == '>') {
208 std::size_t Pos2 = BuiltinName.rfind(' ', Pos1);
209 if (Pos2 == std::string::npos)
210 Pos2 = 0;
211 else
212 ++Pos2;
213 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
214 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
215 }
216
217 // Check if the extracted name begins with:
218 // - "__spirv_ImageSampleExplicitLod"
219 // - "__spirv_ImageRead"
220 // - "__spirv_ImageWrite"
221 // - "__spirv_ImageQuerySizeLod"
222 // - "__spirv_UDotKHR"
223 // - "__spirv_SDotKHR"
224 // - "__spirv_SUDotKHR"
225 // - "__spirv_SDotAccSatKHR"
226 // - "__spirv_UDotAccSatKHR"
227 // - "__spirv_SUDotAccSatKHR"
228 // - "__spirv_ReadClockKHR"
229 // - "__spirv_SubgroupBlockReadINTEL"
230 // - "__spirv_SubgroupImageBlockReadINTEL"
231 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
232 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
233 // - "__spirv_Convert"
234 // - "__spirv_Round"
235 // - "__spirv_UConvert"
236 // - "__spirv_SConvert"
237 // - "__spirv_FConvert"
238 // - "__spirv_SatConvert"
239 // and maybe contains return type information at the end "_R<type>".
240 // If so, extract the plain builtin name without the type information.
241 static const std::regex SpvWithR(
242 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
243 "UDotKHR|"
244 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
245 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
246 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
247 "Convert|Round|"
248 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
249 std::smatch Match;
250 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
251 std::ssub_match SubMatch;
252 if (DecorationId && Match.size() > 3) {
253 SubMatch = Match[4];
254 *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
255 }
256 SubMatch = Match[1];
257 BuiltinName = SubMatch.str();
258 }
259
260 return BuiltinName;
261}
262} // namespace SPIRV
263
264/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
265/// the provided \p DemangledCall and specified \p Set.
266///
267/// The lookup follows the following algorithm, returning the first successful
268/// match:
269/// 1. Search with the plain demangled name (expecting a 1:1 match).
270/// 2. Search with the prefix before or suffix after the demangled name
271/// signyfying the type of the first argument.
272///
273/// \returns Wrapper around the demangled call and found builtin definition.
274static std::unique_ptr<const SPIRV::IncomingCall>
276 SPIRV::InstructionSet::InstructionSet Set,
277 Register ReturnRegister, const SPIRVType *ReturnType,
279 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
280
281 SmallVector<StringRef, 10> BuiltinArgumentTypes;
282 StringRef BuiltinArgs =
283 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
284 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
285
286 // Look up the builtin in the defined set. Start with the plain demangled
287 // name, expecting a 1:1 match in the defined builtin set.
288 const SPIRV::DemangledBuiltin *Builtin;
289 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
290 return std::make_unique<SPIRV::IncomingCall>(
291 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
292
293 // If the initial look up was unsuccessful and the demangled call takes at
294 // least 1 argument, add a prefix or suffix signifying the type of the first
295 // argument and repeat the search.
296 if (BuiltinArgumentTypes.size() >= 1) {
297 char FirstArgumentType = BuiltinArgumentTypes[0][0];
298 // Prefix to be added to the builtin's name for lookup.
299 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
300 std::string Prefix;
301
302 switch (FirstArgumentType) {
303 // Unsigned:
304 case 'u':
305 if (Set == SPIRV::InstructionSet::OpenCL_std)
306 Prefix = "u_";
307 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
308 Prefix = "u";
309 break;
310 // Signed:
311 case 'c':
312 case 's':
313 case 'i':
314 case 'l':
315 if (Set == SPIRV::InstructionSet::OpenCL_std)
316 Prefix = "s_";
317 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
318 Prefix = "s";
319 break;
320 // Floating-point:
321 case 'f':
322 case 'd':
323 case 'h':
324 if (Set == SPIRV::InstructionSet::OpenCL_std ||
325 Set == SPIRV::InstructionSet::GLSL_std_450)
326 Prefix = "f";
327 break;
328 }
329
330 // If argument-type name prefix was added, look up the builtin again.
331 if (!Prefix.empty() &&
332 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
333 return std::make_unique<SPIRV::IncomingCall>(
334 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
335
336 // If lookup with a prefix failed, find a suffix to be added to the
337 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
338 // an unsigned value has a suffix "u".
339 std::string Suffix;
340
341 switch (FirstArgumentType) {
342 // Unsigned:
343 case 'u':
344 Suffix = "u";
345 break;
346 // Signed:
347 case 'c':
348 case 's':
349 case 'i':
350 case 'l':
351 Suffix = "s";
352 break;
353 // Floating-point:
354 case 'f':
355 case 'd':
356 case 'h':
357 Suffix = "f";
358 break;
359 }
360
361 // If argument-type name suffix was added, look up the builtin again.
362 if (!Suffix.empty() &&
363 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
364 return std::make_unique<SPIRV::IncomingCall>(
365 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
366 }
367
368 // No builtin with such name was found in the set.
369 return nullptr;
370}
371
374 // We expect the following sequence of instructions:
375 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
376 // or = G_GLOBAL_VALUE @block_literal_global
377 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
378 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
379 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
380 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
381 MI->getOperand(1).isReg());
382 Register BitcastReg = MI->getOperand(1).getReg();
383 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
384 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
385 BitcastMI->getOperand(2).isReg());
386 Register ValueReg = BitcastMI->getOperand(2).getReg();
387 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
388 return ValueMI;
389}
390
391// Return an integer constant corresponding to the given register and
392// defined in spv_track_constant.
393// TODO: maybe unify with prelegalizer pass.
395 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
396 assert(DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
397 DefMI->getOperand(1).isCImm());
398 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
399}
400
401// Return type of the instruction result from spv_assign_type intrinsic.
402// TODO: maybe unify with prelegalizer pass.
404 MachineInstr *NextMI = MI->getNextNode();
405 if (!NextMI)
406 return nullptr;
407 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
408 if ((NextMI = NextMI->getNextNode()) == nullptr)
409 return nullptr;
410 Register ValueReg = MI->getOperand(0).getReg();
411 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
412 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
413 NextMI->getOperand(1).getReg() != ValueReg)
414 return nullptr;
415 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
416 assert(Ty && "Type is expected");
417 return Ty;
418}
419
420static const Type *getBlockStructType(Register ParamReg,
422 // In principle, this information should be passed to us from Clang via
423 // an elementtype attribute. However, said attribute requires that
424 // the function call be an intrinsic, which is not. Instead, we rely on being
425 // able to trace this to the declaration of a variable: OpenCL C specification
426 // section 6.12.5 should guarantee that we can do this.
428 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
429 return MI->getOperand(1).getGlobal()->getType();
430 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
431 "Blocks in OpenCL C must be traceable to allocation site");
432 return getMachineInstrType(MI);
433}
434
435//===----------------------------------------------------------------------===//
436// Helper functions for building misc instructions
437//===----------------------------------------------------------------------===//
438
439/// Helper function building either a resulting scalar or vector bool register
440/// depending on the expected \p ResultType.
441///
442/// \returns Tuple of the resulting register and its type.
443static std::tuple<Register, SPIRVType *>
444buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
446 LLT Type;
447 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
448
449 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
450 unsigned VectorElements = ResultType->getOperand(2).getImm();
451 BoolType = GR->getOrCreateSPIRVVectorType(BoolType, VectorElements,
452 MIRBuilder, true);
454 cast<FixedVectorType>(GR->getTypeForSPIRVType(BoolType));
455 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
456 } else {
457 Type = LLT::scalar(1);
458 }
459
460 Register ResultRegister =
462 MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
463 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
464 return std::make_tuple(ResultRegister, BoolType);
465}
466
467/// Helper function for building either a vector or scalar select instruction
468/// depending on the expected \p ResultType.
469static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
470 Register ReturnRegister, Register SourceRegister,
471 const SPIRVType *ReturnType,
473 Register TrueConst, FalseConst;
474
475 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
476 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
478 TrueConst =
479 GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType, true);
480 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType, true);
481 } else {
482 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType, true);
483 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType, true);
484 }
485
486 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
487 FalseConst);
488}
489
490/// Helper function for building a load instruction loading into the
491/// \p DestinationReg.
493 MachineIRBuilder &MIRBuilder,
494 SPIRVGlobalRegistry *GR, LLT LowLevelType,
495 Register DestinationReg = Register(0)) {
496 if (!DestinationReg.isValid())
497 DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
498 // TODO: consider using correct address space and alignment (p0 is canonical
499 // type for selection though).
501 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
502 return DestinationReg;
503}
504
505/// Helper function for building a load instruction for loading a builtin global
506/// variable of \p BuiltinValue value.
508 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
509 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
510 Register Reg = Register(0), bool isConst = true, bool hasLinkageTy = true) {
511 Register NewRegister =
512 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
513 MIRBuilder.getMRI()->setType(
514 NewRegister,
515 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
516 GR->getPointerSize()));
518 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
519 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
520
521 // Set up the global OpVariable with the necessary builtin decorations.
522 Register Variable = GR->buildGlobalVariable(
523 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
524 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst,
525 /* HasLinkageTy */ hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
526 false);
527
528 // Load the value from the global variable.
529 Register LoadedRegister =
530 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
531 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
532 return LoadedRegister;
533}
534
535/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
536/// and its definition, set the new register as a destination of the definition,
537/// assign SPIRVType to both registers. If SpirvTy is provided, use it as
538/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
539/// SPIRVPreLegalizer.cpp.
540extern void insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
541 SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB,
542 MachineRegisterInfo &MRI);
543
544// TODO: Move to TableGen.
545static SPIRV::MemorySemantics::MemorySemantics
546getSPIRVMemSemantics(std::memory_order MemOrder) {
547 switch (MemOrder) {
548 case std::memory_order_relaxed:
549 return SPIRV::MemorySemantics::None;
550 case std::memory_order_acquire:
551 return SPIRV::MemorySemantics::Acquire;
552 case std::memory_order_release:
553 return SPIRV::MemorySemantics::Release;
554 case std::memory_order_acq_rel:
555 return SPIRV::MemorySemantics::AcquireRelease;
556 case std::memory_order_seq_cst:
557 return SPIRV::MemorySemantics::SequentiallyConsistent;
558 default:
559 report_fatal_error("Unknown CL memory scope");
560 }
561}
562
563static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
564 switch (ClScope) {
565 case SPIRV::CLMemoryScope::memory_scope_work_item:
566 return SPIRV::Scope::Invocation;
567 case SPIRV::CLMemoryScope::memory_scope_work_group:
568 return SPIRV::Scope::Workgroup;
569 case SPIRV::CLMemoryScope::memory_scope_device:
570 return SPIRV::Scope::Device;
571 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
572 return SPIRV::Scope::CrossDevice;
573 case SPIRV::CLMemoryScope::memory_scope_sub_group:
574 return SPIRV::Scope::Subgroup;
575 }
576 report_fatal_error("Unknown CL memory scope");
577}
578
580 MachineIRBuilder &MIRBuilder,
582 return GR->buildConstantInt(
583 Val, MIRBuilder, GR->getOrCreateSPIRVIntegerType(32, MIRBuilder), true);
584}
585
586static Register buildScopeReg(Register CLScopeRegister,
587 SPIRV::Scope::Scope Scope,
588 MachineIRBuilder &MIRBuilder,
591 if (CLScopeRegister.isValid()) {
592 auto CLScope =
593 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
594 Scope = getSPIRVScope(CLScope);
595
596 if (CLScope == static_cast<unsigned>(Scope)) {
597 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
598 return CLScopeRegister;
599 }
600 }
601 return buildConstantIntReg32(Scope, MIRBuilder, GR);
602}
603
606 if (MRI->getRegClassOrNull(Reg))
607 return;
608 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(Reg);
609 MRI->setRegClass(Reg,
610 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
611}
612
613static Register buildMemSemanticsReg(Register SemanticsRegister,
614 Register PtrRegister, unsigned &Semantics,
615 MachineIRBuilder &MIRBuilder,
617 if (SemanticsRegister.isValid()) {
618 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
619 std::memory_order Order =
620 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
621 Semantics =
622 getSPIRVMemSemantics(Order) |
624 if (static_cast<unsigned>(Order) == Semantics) {
625 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
626 return SemanticsRegister;
627 }
628 }
629 return buildConstantIntReg32(Semantics, MIRBuilder, GR);
630}
631
632static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
633 const SPIRV::IncomingCall *Call,
634 Register TypeReg,
635 ArrayRef<uint32_t> ImmArgs = {}) {
636 auto MIB = MIRBuilder.buildInstr(Opcode);
637 if (TypeReg.isValid())
638 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
639 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
640 for (unsigned i = 0; i < Sz; ++i)
641 MIB.addUse(Call->Arguments[i]);
642 for (uint32_t ImmArg : ImmArgs)
643 MIB.addImm(ImmArg);
644 return true;
645}
646
647/// Helper function for translating atomic init to OpStore.
649 MachineIRBuilder &MIRBuilder) {
650 if (Call->isSpirvOp())
651 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
652
653 assert(Call->Arguments.size() == 2 &&
654 "Need 2 arguments for atomic init translation");
655 MIRBuilder.buildInstr(SPIRV::OpStore)
656 .addUse(Call->Arguments[0])
657 .addUse(Call->Arguments[1]);
658 return true;
659}
660
661/// Helper function for building an atomic load instruction.
663 MachineIRBuilder &MIRBuilder,
665 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
666 if (Call->isSpirvOp())
667 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
668
669 Register PtrRegister = Call->Arguments[0];
670 // TODO: if true insert call to __translate_ocl_memory_sccope before
671 // OpAtomicLoad and the function implementation. We can use Translator's
672 // output for transcoding/atomic_explicit_arguments.cl as an example.
673 Register ScopeRegister =
674 Call->Arguments.size() > 1
675 ? Call->Arguments[1]
676 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
677 Register MemSemanticsReg;
678 if (Call->Arguments.size() > 2) {
679 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
680 MemSemanticsReg = Call->Arguments[2];
681 } else {
682 int Semantics =
683 SPIRV::MemorySemantics::SequentiallyConsistent |
685 MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
686 }
687
688 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
689 .addDef(Call->ReturnRegister)
690 .addUse(TypeReg)
691 .addUse(PtrRegister)
692 .addUse(ScopeRegister)
693 .addUse(MemSemanticsReg);
694 return true;
695}
696
697/// Helper function for building an atomic store instruction.
699 MachineIRBuilder &MIRBuilder,
701 if (Call->isSpirvOp())
702 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call,
703 Register(0));
704
705 Register ScopeRegister =
706 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
707 Register PtrRegister = Call->Arguments[0];
708 int Semantics =
709 SPIRV::MemorySemantics::SequentiallyConsistent |
711 Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
712 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
713 .addUse(PtrRegister)
714 .addUse(ScopeRegister)
715 .addUse(MemSemanticsReg)
716 .addUse(Call->Arguments[1]);
717 return true;
718}
719
720/// Helper function for building an atomic compare-exchange instruction.
722 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
723 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
724 if (Call->isSpirvOp())
725 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
726 GR->getSPIRVTypeID(Call->ReturnType));
727
728 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
729 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
730
731 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
732 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
733 Register Desired = Call->Arguments[2]; // Value (C Desired).
734 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
735 LLT DesiredLLT = MRI->getType(Desired);
736
737 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
738 SPIRV::OpTypePointer);
739 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
740 (void)ExpectedType;
741 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
742 : ExpectedType == SPIRV::OpTypePointer);
743 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
744
745 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
746 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
747 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
748 SpvObjectPtrTy->getOperand(1).getImm());
749 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
750
751 Register MemSemEqualReg;
752 Register MemSemUnequalReg;
753 uint64_t MemSemEqual =
754 IsCmpxchg
755 ? SPIRV::MemorySemantics::None
756 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
757 uint64_t MemSemUnequal =
758 IsCmpxchg
759 ? SPIRV::MemorySemantics::None
760 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
761 if (Call->Arguments.size() >= 4) {
762 assert(Call->Arguments.size() >= 5 &&
763 "Need 5+ args for explicit atomic cmpxchg");
764 auto MemOrdEq =
765 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
766 auto MemOrdNeq =
767 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
768 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
769 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
770 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
771 MemSemEqualReg = Call->Arguments[3];
772 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
773 MemSemUnequalReg = Call->Arguments[4];
774 }
775 if (!MemSemEqualReg.isValid())
776 MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
777 if (!MemSemUnequalReg.isValid())
778 MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
779
780 Register ScopeReg;
781 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
782 if (Call->Arguments.size() >= 6) {
783 assert(Call->Arguments.size() == 6 &&
784 "Extra args for explicit atomic cmpxchg");
785 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
786 getIConstVal(Call->Arguments[5], MRI));
787 Scope = getSPIRVScope(ClScope);
788 if (ClScope == static_cast<unsigned>(Scope))
789 ScopeReg = Call->Arguments[5];
790 }
791 if (!ScopeReg.isValid())
792 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
793
794 Register Expected = IsCmpxchg
795 ? ExpectedArg
796 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
797 GR, LLT::scalar(64));
798 MRI->setType(Expected, DesiredLLT);
799 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
800 : Call->ReturnRegister;
801 if (!MRI->getRegClassOrNull(Tmp))
802 MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
803 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
804
805 MIRBuilder.buildInstr(Opcode)
806 .addDef(Tmp)
807 .addUse(GR->getSPIRVTypeID(SpvDesiredTy))
808 .addUse(ObjectPtr)
809 .addUse(ScopeReg)
810 .addUse(MemSemEqualReg)
811 .addUse(MemSemUnequalReg)
812 .addUse(Desired)
814 if (!IsCmpxchg) {
815 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
816 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
817 }
818 return true;
819}
820
821/// Helper function for building atomic instructions.
822static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
823 MachineIRBuilder &MIRBuilder,
825 if (Call->isSpirvOp())
826 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
827 GR->getSPIRVTypeID(Call->ReturnType));
828
829 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
830 Register ScopeRegister =
831 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
832
833 assert(Call->Arguments.size() <= 4 &&
834 "Too many args for explicit atomic RMW");
835 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
836 MIRBuilder, GR, MRI);
837
838 Register PtrRegister = Call->Arguments[0];
839 unsigned Semantics = SPIRV::MemorySemantics::None;
840 Register MemSemanticsReg =
841 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
842 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
843 Semantics, MIRBuilder, GR);
844 Register ValueReg = Call->Arguments[1];
845 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
846 // support cl_ext_float_atomics
847 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
848 if (Opcode == SPIRV::OpAtomicIAdd) {
849 Opcode = SPIRV::OpAtomicFAddEXT;
850 } else if (Opcode == SPIRV::OpAtomicISub) {
851 // Translate OpAtomicISub applied to a floating type argument to
852 // OpAtomicFAddEXT with the negative value operand
853 Opcode = SPIRV::OpAtomicFAddEXT;
854 Register NegValueReg =
855 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
856 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
857 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
858 MIRBuilder.getMF());
859 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
860 .addDef(NegValueReg)
861 .addUse(ValueReg);
862 insertAssignInstr(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
863 MIRBuilder.getMF().getRegInfo());
864 ValueReg = NegValueReg;
865 }
866 }
867 MIRBuilder.buildInstr(Opcode)
868 .addDef(Call->ReturnRegister)
869 .addUse(ValueTypeReg)
870 .addUse(PtrRegister)
871 .addUse(ScopeRegister)
872 .addUse(MemSemanticsReg)
873 .addUse(ValueReg);
874 return true;
875}
876
877/// Helper function for building an atomic floating-type instruction.
879 unsigned Opcode,
880 MachineIRBuilder &MIRBuilder,
882 assert(Call->Arguments.size() == 4 &&
883 "Wrong number of atomic floating-type builtin");
884 Register PtrReg = Call->Arguments[0];
885 Register ScopeReg = Call->Arguments[1];
886 Register MemSemanticsReg = Call->Arguments[2];
887 Register ValueReg = Call->Arguments[3];
888 MIRBuilder.buildInstr(Opcode)
889 .addDef(Call->ReturnRegister)
890 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
891 .addUse(PtrReg)
892 .addUse(ScopeReg)
893 .addUse(MemSemanticsReg)
894 .addUse(ValueReg);
895 return true;
896}
897
898/// Helper function for building atomic flag instructions (e.g.
899/// OpAtomicFlagTestAndSet).
901 unsigned Opcode, MachineIRBuilder &MIRBuilder,
903 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
904 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
905 if (Call->isSpirvOp())
906 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
907 IsSet ? TypeReg : Register(0));
908
909 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
910 Register PtrRegister = Call->Arguments[0];
911 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
912 Register MemSemanticsReg =
913 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
914 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
915 Semantics, MIRBuilder, GR);
916
917 assert((Opcode != SPIRV::OpAtomicFlagClear ||
918 (Semantics != SPIRV::MemorySemantics::Acquire &&
919 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
920 "Invalid memory order argument!");
921
922 Register ScopeRegister =
923 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
924 ScopeRegister =
925 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
926
927 auto MIB = MIRBuilder.buildInstr(Opcode);
928 if (IsSet)
929 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
930
931 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
932 return true;
933}
934
935/// Helper function for building barriers, i.e., memory/control ordering
936/// operations.
937static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
938 MachineIRBuilder &MIRBuilder,
940 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
941 const auto *ST =
942 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
943 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
944 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
945 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
946 std::string DiagMsg = std::string(Builtin->Name) +
947 ": the builtin requires the following SPIR-V "
948 "extension: SPV_INTEL_split_barrier";
949 report_fatal_error(DiagMsg.c_str(), false);
950 }
951
952 if (Call->isSpirvOp())
953 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
954
955 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
956 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
957 unsigned MemSemantics = SPIRV::MemorySemantics::None;
958
959 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
960 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
961
962 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
963 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
964
965 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
966 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
967
968 if (Opcode == SPIRV::OpMemoryBarrier)
969 MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
970 getIConstVal(Call->Arguments[1], MRI))) |
971 MemSemantics;
972 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
973 MemSemantics |= SPIRV::MemorySemantics::Release;
974 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
975 MemSemantics |= SPIRV::MemorySemantics::Acquire;
976 else
977 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
978
979 Register MemSemanticsReg =
980 MemFlags == MemSemantics
981 ? Call->Arguments[0]
982 : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
983 Register ScopeReg;
984 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
985 SPIRV::Scope::Scope MemScope = Scope;
986 if (Call->Arguments.size() >= 2) {
987 assert(
988 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
989 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
990 "Extra args for explicitly scoped barrier");
991 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
992 : Call->Arguments[1];
993 SPIRV::CLMemoryScope CLScope =
994 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
995 MemScope = getSPIRVScope(CLScope);
996 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
997 (Opcode == SPIRV::OpMemoryBarrier))
998 Scope = MemScope;
999 if (CLScope == static_cast<unsigned>(Scope))
1000 ScopeReg = Call->Arguments[1];
1001 }
1002
1003 if (!ScopeReg.isValid())
1004 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
1005
1006 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
1007 if (Opcode != SPIRV::OpMemoryBarrier)
1008 MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
1009 MIB.addUse(MemSemanticsReg);
1010 return true;
1011}
1012
1013/// Helper function for building extended bit operations.
1015 unsigned Opcode,
1016 MachineIRBuilder &MIRBuilder,
1017 SPIRVGlobalRegistry *GR) {
1018 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1019 const auto *ST =
1020 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1021 if ((Opcode == SPIRV::OpBitFieldInsert ||
1022 Opcode == SPIRV::OpBitFieldSExtract ||
1023 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1024 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1025 std::string DiagMsg = std::string(Builtin->Name) +
1026 ": the builtin requires the following SPIR-V "
1027 "extension: SPV_KHR_bit_instructions";
1028 report_fatal_error(DiagMsg.c_str(), false);
1029 }
1030
1031 // Generate SPIRV instruction accordingly.
1032 if (Call->isSpirvOp())
1033 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1034 GR->getSPIRVTypeID(Call->ReturnType));
1035
1036 auto MIB = MIRBuilder.buildInstr(Opcode)
1037 .addDef(Call->ReturnRegister)
1038 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1039 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1040 MIB.addUse(Call->Arguments[i]);
1041
1042 return true;
1043}
1044
1045/// Helper function for building Intel's bindless image instructions.
1047 unsigned Opcode,
1048 MachineIRBuilder &MIRBuilder,
1049 SPIRVGlobalRegistry *GR) {
1050 // Generate SPIRV instruction accordingly.
1051 if (Call->isSpirvOp())
1052 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1053 GR->getSPIRVTypeID(Call->ReturnType));
1054
1055 MIRBuilder.buildInstr(Opcode)
1056 .addDef(Call->ReturnRegister)
1057 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1058 .addUse(Call->Arguments[0]);
1059
1060 return true;
1061}
1062
1063/// Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
1065 const SPIRV::IncomingCall *Call, unsigned Opcode,
1066 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1067 // Generate SPIRV instruction accordingly.
1068 if (Call->isSpirvOp())
1069 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1070 GR->getSPIRVTypeID(Call->ReturnType));
1071
1072 auto MIB = MIRBuilder.buildInstr(Opcode)
1073 .addDef(Call->ReturnRegister)
1074 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1075 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1076 MIB.addUse(Call->Arguments[i]);
1077
1078 return true;
1079}
1080
1081/// Helper function for building Intel's 2d block io instructions.
1083 unsigned Opcode,
1084 MachineIRBuilder &MIRBuilder,
1085 SPIRVGlobalRegistry *GR) {
1086 // Generate SPIRV instruction accordingly.
1087 if (Call->isSpirvOp())
1088 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1089
1090 auto MIB = MIRBuilder.buildInstr(Opcode)
1091 .addDef(Call->ReturnRegister)
1092 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1093 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1094 MIB.addUse(Call->Arguments[i]);
1095
1096 return true;
1097}
1098
1099static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1100 switch (dim) {
1101 case SPIRV::Dim::DIM_1D:
1102 case SPIRV::Dim::DIM_Buffer:
1103 return 1;
1104 case SPIRV::Dim::DIM_2D:
1105 case SPIRV::Dim::DIM_Cube:
1106 case SPIRV::Dim::DIM_Rect:
1107 return 2;
1108 case SPIRV::Dim::DIM_3D:
1109 return 3;
1110 default:
1111 report_fatal_error("Cannot get num components for given Dim");
1112 }
1113}
1114
1115/// Helper function for obtaining the number of size components.
1116static unsigned getNumSizeComponents(SPIRVType *imgType) {
1117 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1118 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1119 unsigned numComps = getNumComponentsForDim(dim);
1120 bool arrayed = imgType->getOperand(4).getImm() == 1;
1121 return arrayed ? numComps + 1 : numComps;
1122}
1123
1124//===----------------------------------------------------------------------===//
1125// Implementation functions for each builtin group
1126//===----------------------------------------------------------------------===//
1127
1128static bool generateExtInst(const SPIRV::IncomingCall *Call,
1129 MachineIRBuilder &MIRBuilder,
1130 SPIRVGlobalRegistry *GR) {
1131 // Lookup the extended instruction number in the TableGen records.
1132 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1134 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1135
1136 // Build extended instruction.
1137 auto MIB =
1138 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1139 .addDef(Call->ReturnRegister)
1140 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1141 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1142 .addImm(Number);
1143
1144 for (auto Argument : Call->Arguments)
1145 MIB.addUse(Argument);
1146 return true;
1147}
1148
1150 MachineIRBuilder &MIRBuilder,
1151 SPIRVGlobalRegistry *GR) {
1152 // Lookup the instruction opcode in the TableGen records.
1153 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1154 unsigned Opcode =
1155 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1156
1157 Register CompareRegister;
1158 SPIRVType *RelationType;
1159 std::tie(CompareRegister, RelationType) =
1160 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1161
1162 // Build relational instruction.
1163 auto MIB = MIRBuilder.buildInstr(Opcode)
1164 .addDef(CompareRegister)
1165 .addUse(GR->getSPIRVTypeID(RelationType));
1166
1167 for (auto Argument : Call->Arguments)
1168 MIB.addUse(Argument);
1169
1170 // Build select instruction.
1171 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1172 Call->ReturnType, GR);
1173}
1174
1176 MachineIRBuilder &MIRBuilder,
1177 SPIRVGlobalRegistry *GR) {
1178 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1179 const SPIRV::GroupBuiltin *GroupBuiltin =
1180 SPIRV::lookupGroupBuiltin(Builtin->Name);
1181
1182 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1183 if (Call->isSpirvOp()) {
1184 if (GroupBuiltin->NoGroupOperation) {
1186 if (GroupBuiltin->Opcode ==
1187 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1188 Call->Arguments.size() > 4)
1189 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[4], MRI));
1190 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1191 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
1192 }
1193
1194 // Group Operation is a literal
1195 Register GroupOpReg = Call->Arguments[1];
1196 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1197 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1199 "Group Operation parameter must be an integer constant");
1200 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1201 Register ScopeReg = Call->Arguments[0];
1202 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1203 .addDef(Call->ReturnRegister)
1204 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1205 .addUse(ScopeReg)
1206 .addImm(GrpOp);
1207 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1208 MIB.addUse(Call->Arguments[i]);
1209 return true;
1210 }
1211
1212 Register Arg0;
1213 if (GroupBuiltin->HasBoolArg) {
1214 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1215 Register BoolReg = Call->Arguments[0];
1216 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1217 if (!BoolRegType)
1218 report_fatal_error("Can't find a register's type definition");
1219 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1220 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1221 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1222 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1223 BoolType, true);
1224 } else {
1225 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1226 Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1227 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1228 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1229 MIRBuilder.buildICmp(
1230 CmpInst::ICMP_NE, Arg0, BoolReg,
1231 GR->buildConstantInt(0, MIRBuilder, BoolRegType, true));
1232 insertAssignInstr(Arg0, nullptr, BoolType, GR, MIRBuilder,
1233 MIRBuilder.getMF().getRegInfo());
1234 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1235 report_fatal_error("Expect a boolean argument");
1236 }
1237 // if BoolReg is a boolean register, we don't need to do anything
1238 }
1239 }
1240
1241 Register GroupResultRegister = Call->ReturnRegister;
1242 SPIRVType *GroupResultType = Call->ReturnType;
1243
1244 // TODO: maybe we need to check whether the result type is already boolean
1245 // and in this case do not insert select instruction.
1246 const bool HasBoolReturnTy =
1247 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1248 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1249 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1250
1251 if (HasBoolReturnTy)
1252 std::tie(GroupResultRegister, GroupResultType) =
1253 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1254
1255 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1256 : SPIRV::Scope::Workgroup;
1257 Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1258
1259 Register VecReg;
1260 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1261 Call->Arguments.size() > 2) {
1262 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1263 // scalar, a vector with 2 components, or a vector with 3 components.",
1264 // meaning that we must create a vector from the function arguments if
1265 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1266 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1267 Register ElemReg = Call->Arguments[1];
1268 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1269 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1270 report_fatal_error("Expect an integer <LocalId> argument");
1271 unsigned VecLen = Call->Arguments.size() - 1;
1272 VecReg = MRI->createGenericVirtualRegister(
1273 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1274 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1275 SPIRVType *VecType =
1276 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder, true);
1277 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1278 auto MIB =
1279 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1280 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1281 MIB.addUse(Call->Arguments[i]);
1282 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1283 }
1284 insertAssignInstr(VecReg, nullptr, VecType, GR, MIRBuilder,
1285 MIRBuilder.getMF().getRegInfo());
1286 }
1287
1288 // Build work/sub group instruction.
1289 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1290 .addDef(GroupResultRegister)
1291 .addUse(GR->getSPIRVTypeID(GroupResultType))
1292 .addUse(ScopeRegister);
1293
1294 if (!GroupBuiltin->NoGroupOperation)
1295 MIB.addImm(GroupBuiltin->GroupOperation);
1296 if (Call->Arguments.size() > 0) {
1297 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1298 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1299 if (VecReg.isValid())
1300 MIB.addUse(VecReg);
1301 else
1302 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1303 MIB.addUse(Call->Arguments[i]);
1304 }
1305
1306 // Build select instruction.
1307 if (HasBoolReturnTy)
1308 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1309 Call->ReturnType, GR);
1310 return true;
1311}
1312
1314 MachineIRBuilder &MIRBuilder,
1315 SPIRVGlobalRegistry *GR) {
1316 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1317 MachineFunction &MF = MIRBuilder.getMF();
1318 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1319 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1320 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1321
1322 if (IntelSubgroups->IsMedia &&
1323 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1324 std::string DiagMsg = std::string(Builtin->Name) +
1325 ": the builtin requires the following SPIR-V "
1326 "extension: SPV_INTEL_media_block_io";
1327 report_fatal_error(DiagMsg.c_str(), false);
1328 } else if (!IntelSubgroups->IsMedia &&
1329 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1330 std::string DiagMsg = std::string(Builtin->Name) +
1331 ": the builtin requires the following SPIR-V "
1332 "extension: SPV_INTEL_subgroups";
1333 report_fatal_error(DiagMsg.c_str(), false);
1334 }
1335
1336 uint32_t OpCode = IntelSubgroups->Opcode;
1337 if (Call->isSpirvOp()) {
1338 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1339 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1340 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1341 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1342 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1343 : Register(0));
1344 }
1345
1346 if (IntelSubgroups->IsBlock) {
1347 // Minimal number or arguments set in TableGen records is 1
1348 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1349 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1350 // TODO: add required validation from the specification:
1351 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1352 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1353 // dimensions require a capability."
1354 switch (OpCode) {
1355 case SPIRV::OpSubgroupBlockReadINTEL:
1356 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1357 break;
1358 case SPIRV::OpSubgroupBlockWriteINTEL:
1359 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1360 break;
1361 }
1362 }
1363 }
1364 }
1365
1366 // TODO: opaque pointers types should be eventually resolved in such a way
1367 // that validation of block read is enabled with respect to the following
1368 // specification requirement:
1369 // "'Result Type' may be a scalar or vector type, and its component type must
1370 // be equal to the type pointed to by 'Ptr'."
1371 // For example, function parameter type should not be default i8 pointer, but
1372 // depend on the result type of the instruction where it is used as a pointer
1373 // argument of OpSubgroupBlockReadINTEL
1374
1375 // Build Intel subgroups instruction
1377 IntelSubgroups->IsWrite
1378 ? MIRBuilder.buildInstr(OpCode)
1379 : MIRBuilder.buildInstr(OpCode)
1380 .addDef(Call->ReturnRegister)
1381 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1382 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1383 MIB.addUse(Call->Arguments[i]);
1384 return true;
1385}
1386
1388 MachineIRBuilder &MIRBuilder,
1389 SPIRVGlobalRegistry *GR) {
1390 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1391 MachineFunction &MF = MIRBuilder.getMF();
1392 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1393 if (!ST->canUseExtension(
1394 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1395 std::string DiagMsg = std::string(Builtin->Name) +
1396 ": the builtin requires the following SPIR-V "
1397 "extension: SPV_KHR_uniform_group_instructions";
1398 report_fatal_error(DiagMsg.c_str(), false);
1399 }
1400 const SPIRV::GroupUniformBuiltin *GroupUniform =
1401 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1402 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1403
1404 Register GroupResultReg = Call->ReturnRegister;
1405 Register ScopeReg = Call->Arguments[0];
1406 Register ValueReg = Call->Arguments[2];
1407
1408 // Group Operation
1409 Register ConstGroupOpReg = Call->Arguments[1];
1410 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1411 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1413 "expect a constant group operation for a uniform group instruction",
1414 false);
1415 const MachineOperand &ConstOperand = Const->getOperand(1);
1416 if (!ConstOperand.isCImm())
1417 report_fatal_error("uniform group instructions: group operation must be an "
1418 "integer constant",
1419 false);
1420
1421 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1422 .addDef(GroupResultReg)
1423 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1424 .addUse(ScopeReg);
1425 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1426 MIB.addUse(ValueReg);
1427
1428 return true;
1429}
1430
1432 MachineIRBuilder &MIRBuilder,
1433 SPIRVGlobalRegistry *GR) {
1434 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1435 MachineFunction &MF = MIRBuilder.getMF();
1436 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1437 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1438 std::string DiagMsg = std::string(Builtin->Name) +
1439 ": the builtin requires the following SPIR-V "
1440 "extension: SPV_KHR_shader_clock";
1441 report_fatal_error(DiagMsg.c_str(), false);
1442 }
1443
1444 Register ResultReg = Call->ReturnRegister;
1445
1446 // Deduce the `Scope` operand from the builtin function name.
1447 SPIRV::Scope::Scope ScopeArg =
1449 .EndsWith("device", SPIRV::Scope::Scope::Device)
1450 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1451 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1452 Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1453
1454 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1455 .addDef(ResultReg)
1456 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1457 .addUse(ScopeReg);
1458
1459 return true;
1460}
1461
1462// These queries ask for a single size_t result for a given dimension index,
1463// e.g. size_t get_global_id(uint dimindex). In SPIR-V, the builtins
1464// corresponding to these values are all vec3 types, so we need to extract the
1465// correct index or return DefaultValue (0 or 1 depending on the query). We also
1466// handle extending or truncating in case size_t does not match the expected
1467// result type's bitwidth.
1468//
1469// For a constant index >= 3 we generate:
1470// %res = OpConstant %SizeT DefaultValue
1471//
1472// For other indices we generate:
1473// %g = OpVariable %ptr_V3_SizeT Input
1474// OpDecorate %g BuiltIn XXX
1475// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1476// OpDecorate %g Constant
1477// %loadedVec = OpLoad %V3_SizeT %g
1478//
1479// Then, if the index is constant < 3, we generate:
1480// %res = OpCompositeExtract %SizeT %loadedVec idx
1481// If the index is dynamic, we generate:
1482// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1483// %cmp = OpULessThan %bool %idx %const_3
1484// %res = OpSelect %SizeT %cmp %tmp %const_<DefaultValue>
1485//
1486// If the bitwidth of %res does not match the expected return type, we add an
1487// extend or truncate.
1489 MachineIRBuilder &MIRBuilder,
1491 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1492 uint64_t DefaultValue) {
1493 Register IndexRegister = Call->Arguments[0];
1494 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1495 const unsigned PointerSize = GR->getPointerSize();
1496 const SPIRVType *PointerSizeType =
1497 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1498 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1499 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1500
1501 // Set up the final register to do truncation or extension on at the end.
1502 Register ToTruncate = Call->ReturnRegister;
1503
1504 // If the index is constant, we can statically determine if it is in range.
1505 bool IsConstantIndex =
1506 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1507
1508 // If it's out of range (max dimension is 3), we can just return the constant
1509 // default value (0 or 1 depending on which query function).
1510 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1511 Register DefaultReg = Call->ReturnRegister;
1512 if (PointerSize != ResultWidth) {
1513 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1514 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1515 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1516 MIRBuilder.getMF());
1517 ToTruncate = DefaultReg;
1518 }
1519 auto NewRegister =
1520 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1521 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1522 } else { // If it could be in range, we need to load from the given builtin.
1523 auto Vec3Ty =
1524 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder, true);
1525 Register LoadedVector =
1526 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1527 LLT::fixed_vector(3, PointerSize));
1528 // Set up the vreg to extract the result to (possibly a new temporary one).
1529 Register Extracted = Call->ReturnRegister;
1530 if (!IsConstantIndex || PointerSize != ResultWidth) {
1531 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1532 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1533 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1534 }
1535 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1536 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1537 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1538 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1539 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1540
1541 // If the index is dynamic, need check if it's < 3, and then use a select.
1542 if (!IsConstantIndex) {
1543 insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
1544 *MRI);
1545
1546 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1547 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1548
1549 Register CompareRegister =
1550 MRI->createGenericVirtualRegister(LLT::scalar(1));
1551 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1552 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1553
1554 // Use G_ICMP to check if idxVReg < 3.
1555 MIRBuilder.buildICmp(
1556 CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1557 GR->buildConstantInt(3, MIRBuilder, IndexType, true));
1558
1559 // Get constant for the default value (0 or 1 depending on which
1560 // function).
1561 Register DefaultRegister =
1562 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1563
1564 // Get a register for the selection result (possibly a new temporary one).
1565 Register SelectionResult = Call->ReturnRegister;
1566 if (PointerSize != ResultWidth) {
1567 SelectionResult =
1568 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1569 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1570 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1571 MIRBuilder.getMF());
1572 }
1573 // Create the final G_SELECT to return the extracted value or the default.
1574 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1575 DefaultRegister);
1576 ToTruncate = SelectionResult;
1577 } else {
1578 ToTruncate = Extracted;
1579 }
1580 }
1581 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1582 if (PointerSize != ResultWidth)
1583 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1584 return true;
1585}
1586
1588 MachineIRBuilder &MIRBuilder,
1589 SPIRVGlobalRegistry *GR) {
1590 // Lookup the builtin variable record.
1591 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1592 SPIRV::BuiltIn::BuiltIn Value =
1593 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1594
1595 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1596 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1597
1598 // Build a load instruction for the builtin variable.
1599 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1600 LLT LLType;
1601 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1602 LLType =
1603 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1604 else
1605 LLType = LLT::scalar(BitWidth);
1606
1607 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1608 LLType, Call->ReturnRegister);
1609}
1610
1612 MachineIRBuilder &MIRBuilder,
1613 SPIRVGlobalRegistry *GR) {
1614 // Lookup the instruction opcode in the TableGen records.
1615 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1616 unsigned Opcode =
1617 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1618
1619 switch (Opcode) {
1620 case SPIRV::OpStore:
1621 return buildAtomicInitInst(Call, MIRBuilder);
1622 case SPIRV::OpAtomicLoad:
1623 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1624 case SPIRV::OpAtomicStore:
1625 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1626 case SPIRV::OpAtomicCompareExchange:
1627 case SPIRV::OpAtomicCompareExchangeWeak:
1628 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1629 GR);
1630 case SPIRV::OpAtomicIAdd:
1631 case SPIRV::OpAtomicISub:
1632 case SPIRV::OpAtomicOr:
1633 case SPIRV::OpAtomicXor:
1634 case SPIRV::OpAtomicAnd:
1635 case SPIRV::OpAtomicExchange:
1636 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1637 case SPIRV::OpMemoryBarrier:
1638 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1639 case SPIRV::OpAtomicFlagTestAndSet:
1640 case SPIRV::OpAtomicFlagClear:
1641 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1642 default:
1643 if (Call->isSpirvOp())
1644 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1645 GR->getSPIRVTypeID(Call->ReturnType));
1646 return false;
1647 }
1648}
1649
1651 MachineIRBuilder &MIRBuilder,
1652 SPIRVGlobalRegistry *GR) {
1653 // Lookup the instruction opcode in the TableGen records.
1654 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1655 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1656
1657 switch (Opcode) {
1658 case SPIRV::OpAtomicFAddEXT:
1659 case SPIRV::OpAtomicFMinEXT:
1660 case SPIRV::OpAtomicFMaxEXT:
1661 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1662 default:
1663 return false;
1664 }
1665}
1666
1668 MachineIRBuilder &MIRBuilder,
1669 SPIRVGlobalRegistry *GR) {
1670 // Lookup the instruction opcode in the TableGen records.
1671 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1672 unsigned Opcode =
1673 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1674
1675 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1676}
1677
1679 MachineIRBuilder &MIRBuilder,
1680 SPIRVGlobalRegistry *GR) {
1681 // Lookup the instruction opcode in the TableGen records.
1682 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1683 unsigned Opcode =
1684 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1685
1686 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1687 SPIRV::StorageClass::StorageClass ResSC =
1688 GR->getPointerStorageClass(Call->ReturnRegister);
1689 if (!isGenericCastablePtr(ResSC))
1690 return false;
1691
1692 MIRBuilder.buildInstr(Opcode)
1693 .addDef(Call->ReturnRegister)
1694 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1695 .addUse(Call->Arguments[0])
1696 .addImm(ResSC);
1697 } else {
1698 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1699 .addDef(Call->ReturnRegister)
1700 .addUse(Call->Arguments[0]);
1701 }
1702 return true;
1703}
1704
1705static bool generateDotOrFMulInst(const StringRef DemangledCall,
1706 const SPIRV::IncomingCall *Call,
1707 MachineIRBuilder &MIRBuilder,
1708 SPIRVGlobalRegistry *GR) {
1709 if (Call->isSpirvOp())
1710 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1711 GR->getSPIRVTypeID(Call->ReturnType));
1712
1713 bool IsVec = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() ==
1714 SPIRV::OpTypeVector;
1715 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1716 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1717 bool IsSwapReq = false;
1718
1719 const auto *ST =
1720 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1721 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt) &&
1722 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1723 ST->isAtLeastSPIRVVer(VersionTuple(1, 6)))) {
1724 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1725 const SPIRV::IntegerDotProductBuiltin *IntDot =
1726 SPIRV::lookupIntegerDotProductBuiltin(Builtin->Name);
1727 if (IntDot) {
1728 OC = IntDot->Opcode;
1729 IsSwapReq = IntDot->IsSwapReq;
1730 } else if (IsVec) {
1731 // Handling "dot" and "dot_acc_sat" builtins which use vectors of
1732 // integers.
1733 LLVMContext &Ctx = MIRBuilder.getContext();
1735 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledCall, Ctx);
1736 bool IsFirstSigned = TypeStrs[0].trim()[0] != 'u';
1737 bool IsSecondSigned = TypeStrs[1].trim()[0] != 'u';
1738
1739 if (Call->BuiltinName == "dot") {
1740 if (IsFirstSigned && IsSecondSigned)
1741 OC = SPIRV::OpSDot;
1742 else if (!IsFirstSigned && !IsSecondSigned)
1743 OC = SPIRV::OpUDot;
1744 else {
1745 OC = SPIRV::OpSUDot;
1746 if (!IsFirstSigned)
1747 IsSwapReq = true;
1748 }
1749 } else if (Call->BuiltinName == "dot_acc_sat") {
1750 if (IsFirstSigned && IsSecondSigned)
1751 OC = SPIRV::OpSDotAccSat;
1752 else if (!IsFirstSigned && !IsSecondSigned)
1753 OC = SPIRV::OpUDotAccSat;
1754 else {
1755 OC = SPIRV::OpSUDotAccSat;
1756 if (!IsFirstSigned)
1757 IsSwapReq = true;
1758 }
1759 }
1760 }
1761 }
1762
1763 MachineInstrBuilder MIB = MIRBuilder.buildInstr(OC)
1764 .addDef(Call->ReturnRegister)
1765 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1766
1767 if (IsSwapReq) {
1768 MIB.addUse(Call->Arguments[1]);
1769 MIB.addUse(Call->Arguments[0]);
1770 // needed for dot_acc_sat* builtins
1771 for (size_t i = 2; i < Call->Arguments.size(); ++i)
1772 MIB.addUse(Call->Arguments[i]);
1773 } else {
1774 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1775 MIB.addUse(Call->Arguments[i]);
1776 }
1777
1778 // Add Packed Vector Format for Integer dot product builtins if arguments are
1779 // scalar
1780 if (!IsVec && OC != SPIRV::OpFMulS)
1781 MIB.addImm(0);
1782
1783 return true;
1784}
1785
1787 MachineIRBuilder &MIRBuilder,
1788 SPIRVGlobalRegistry *GR) {
1789 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1790 SPIRV::BuiltIn::BuiltIn Value =
1791 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1792
1793 // For now, we only support a single Wave intrinsic with a single return type.
1794 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1795 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1796
1798 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1799 /* isConst= */ false, /* hasLinkageTy= */ false);
1800}
1801
1802// We expect a builtin
1803// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1804// where %result is a pointer to where the result of the builtin execution
1805// is to be stored, and generate the following instructions:
1806// Res = Opcode RetType Operand1 Operand1
1807// OpStore RetVariable Res
1809 MachineIRBuilder &MIRBuilder,
1810 SPIRVGlobalRegistry *GR) {
1811 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1812 unsigned Opcode =
1813 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1814
1815 Register SRetReg = Call->Arguments[0];
1816 SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1817 SPIRVType *RetType = GR->getPointeeType(PtrRetType);
1818 if (!RetType)
1819 report_fatal_error("The first parameter must be a pointer");
1820 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1821 report_fatal_error("Expected struct type result for the arithmetic with "
1822 "overflow builtins");
1823
1824 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1825 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1826 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1827 report_fatal_error("Operands must have the same type");
1828 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1829 switch (Opcode) {
1830 case SPIRV::OpIAddCarryS:
1831 Opcode = SPIRV::OpIAddCarryV;
1832 break;
1833 case SPIRV::OpISubBorrowS:
1834 Opcode = SPIRV::OpISubBorrowV;
1835 break;
1836 }
1837
1838 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1839 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1840 if (const TargetRegisterClass *DstRC =
1841 MRI->getRegClassOrNull(Call->Arguments[1])) {
1842 MRI->setRegClass(ResReg, DstRC);
1843 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1844 } else {
1845 MRI->setType(ResReg, LLT::scalar(64));
1846 }
1847 GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
1848 MIRBuilder.buildInstr(Opcode)
1849 .addDef(ResReg)
1850 .addUse(GR->getSPIRVTypeID(RetType))
1851 .addUse(Call->Arguments[1])
1852 .addUse(Call->Arguments[2]);
1853 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1854 return true;
1855}
1856
1858 MachineIRBuilder &MIRBuilder,
1859 SPIRVGlobalRegistry *GR) {
1860 // Lookup the builtin record.
1861 SPIRV::BuiltIn::BuiltIn Value =
1862 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1863 const bool IsDefaultOne = (Value == SPIRV::BuiltIn::GlobalSize ||
1864 Value == SPIRV::BuiltIn::NumWorkgroups ||
1865 Value == SPIRV::BuiltIn::WorkgroupSize ||
1866 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1867 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefaultOne ? 1 : 0);
1868}
1869
1871 MachineIRBuilder &MIRBuilder,
1872 SPIRVGlobalRegistry *GR) {
1873 // Lookup the image size query component number in the TableGen records.
1874 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1875 uint32_t Component =
1876 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1877 // Query result may either be a vector or a scalar. If return type is not a
1878 // vector, expect only a single size component. Otherwise get the number of
1879 // expected components.
1880 unsigned NumExpectedRetComponents =
1881 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
1882 ? Call->ReturnType->getOperand(2).getImm()
1883 : 1;
1884 // Get the actual number of query result/size components.
1885 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1886 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1887 Register QueryResult = Call->ReturnRegister;
1888 SPIRVType *QueryResultType = Call->ReturnType;
1889 if (NumExpectedRetComponents != NumActualRetComponents) {
1890 unsigned Bitwidth = Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
1891 ? Call->ReturnType->getOperand(1).getImm()
1892 : 32;
1893 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1894 LLT::fixed_vector(NumActualRetComponents, Bitwidth));
1895 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
1896 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(Bitwidth, MIRBuilder);
1897 QueryResultType = GR->getOrCreateSPIRVVectorType(
1898 IntTy, NumActualRetComponents, MIRBuilder, true);
1899 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1900 }
1901 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1902 unsigned Opcode =
1903 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1904 auto MIB = MIRBuilder.buildInstr(Opcode)
1905 .addDef(QueryResult)
1906 .addUse(GR->getSPIRVTypeID(QueryResultType))
1907 .addUse(Call->Arguments[0]);
1908 if (!IsDimBuf)
1909 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
1910 if (NumExpectedRetComponents == NumActualRetComponents)
1911 return true;
1912 if (NumExpectedRetComponents == 1) {
1913 // Only 1 component is expected, build OpCompositeExtract instruction.
1914 unsigned ExtractedComposite =
1915 Component == 3 ? NumActualRetComponents - 1 : Component;
1916 assert(ExtractedComposite < NumActualRetComponents &&
1917 "Invalid composite index!");
1918 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1919 SPIRVType *NewType = nullptr;
1920 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
1921 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
1922 if (TypeReg != NewTypeReg &&
1923 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
1924 TypeReg = NewTypeReg;
1925 }
1926 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1927 .addDef(Call->ReturnRegister)
1928 .addUse(TypeReg)
1929 .addUse(QueryResult)
1930 .addImm(ExtractedComposite);
1931 if (NewType != nullptr)
1932 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
1933 MIRBuilder.getMF().getRegInfo());
1934 } else {
1935 // More than 1 component is expected, fill a new vector.
1936 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1937 .addDef(Call->ReturnRegister)
1938 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1939 .addUse(QueryResult)
1940 .addUse(QueryResult);
1941 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
1942 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
1943 }
1944 return true;
1945}
1946
1948 MachineIRBuilder &MIRBuilder,
1949 SPIRVGlobalRegistry *GR) {
1950 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1951 "Image samples query result must be of int type!");
1952
1953 // Lookup the instruction opcode in the TableGen records.
1954 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1955 unsigned Opcode =
1956 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1957
1958 Register Image = Call->Arguments[0];
1959 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
1960 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
1961 (void)ImageDimensionality;
1962
1963 switch (Opcode) {
1964 case SPIRV::OpImageQuerySamples:
1965 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1966 "Image must be of 2D dimensionality");
1967 break;
1968 case SPIRV::OpImageQueryLevels:
1969 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1970 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1971 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1972 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1973 "Image must be of 1D/2D/3D/Cube dimensionality");
1974 break;
1975 }
1976
1977 MIRBuilder.buildInstr(Opcode)
1978 .addDef(Call->ReturnRegister)
1979 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1980 .addUse(Image);
1981 return true;
1982}
1983
1984// TODO: Move to TableGen.
1985static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1987 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1988 case SPIRV::CLK_ADDRESS_CLAMP:
1989 return SPIRV::SamplerAddressingMode::Clamp;
1990 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1991 return SPIRV::SamplerAddressingMode::ClampToEdge;
1992 case SPIRV::CLK_ADDRESS_REPEAT:
1993 return SPIRV::SamplerAddressingMode::Repeat;
1994 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1995 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1996 case SPIRV::CLK_ADDRESS_NONE:
1997 return SPIRV::SamplerAddressingMode::None;
1998 default:
1999 report_fatal_error("Unknown CL address mode");
2000 }
2001}
2002
2003static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
2004 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2005}
2006
2007static SPIRV::SamplerFilterMode::SamplerFilterMode
2009 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2010 return SPIRV::SamplerFilterMode::Linear;
2011 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2012 return SPIRV::SamplerFilterMode::Nearest;
2013 return SPIRV::SamplerFilterMode::Nearest;
2014}
2015
2016static bool generateReadImageInst(const StringRef DemangledCall,
2017 const SPIRV::IncomingCall *Call,
2018 MachineIRBuilder &MIRBuilder,
2019 SPIRVGlobalRegistry *GR) {
2020 if (Call->isSpirvOp())
2021 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageRead, Call,
2022 GR->getSPIRVTypeID(Call->ReturnType));
2023 Register Image = Call->Arguments[0];
2024 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2025 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
2026 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
2027 if (HasOclSampler) {
2028 Register Sampler = Call->Arguments[1];
2029
2030 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
2031 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
2032 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
2033 Sampler = GR->buildConstantSampler(
2035 getSamplerParamFromBitmask(SamplerMask),
2036 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder);
2037 }
2038 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2039 SPIRVType *SampledImageType =
2040 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2041 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2042
2043 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2044 .addDef(SampledImage)
2045 .addUse(GR->getSPIRVTypeID(SampledImageType))
2046 .addUse(Image)
2047 .addUse(Sampler);
2048
2050 MIRBuilder);
2051
2052 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2053 SPIRVType *TempType =
2054 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder, true);
2055 Register TempRegister =
2056 MRI->createGenericVirtualRegister(GR->getRegType(TempType));
2057 MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
2058 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
2059 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2060 .addDef(TempRegister)
2061 .addUse(GR->getSPIRVTypeID(TempType))
2062 .addUse(SampledImage)
2063 .addUse(Call->Arguments[2]) // Coordinate.
2064 .addImm(SPIRV::ImageOperand::Lod)
2065 .addUse(Lod);
2066 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2067 .addDef(Call->ReturnRegister)
2068 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2069 .addUse(TempRegister)
2070 .addImm(0);
2071 } else {
2072 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2073 .addDef(Call->ReturnRegister)
2074 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2075 .addUse(SampledImage)
2076 .addUse(Call->Arguments[2]) // Coordinate.
2077 .addImm(SPIRV::ImageOperand::Lod)
2078 .addUse(Lod);
2079 }
2080 } else if (HasMsaa) {
2081 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2082 .addDef(Call->ReturnRegister)
2083 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2084 .addUse(Image)
2085 .addUse(Call->Arguments[1]) // Coordinate.
2086 .addImm(SPIRV::ImageOperand::Sample)
2087 .addUse(Call->Arguments[2]);
2088 } else {
2089 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2090 .addDef(Call->ReturnRegister)
2091 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2092 .addUse(Image)
2093 .addUse(Call->Arguments[1]); // Coordinate.
2094 }
2095 return true;
2096}
2097
2099 MachineIRBuilder &MIRBuilder,
2100 SPIRVGlobalRegistry *GR) {
2101 if (Call->isSpirvOp())
2102 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageWrite, Call,
2103 Register(0));
2104 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
2105 .addUse(Call->Arguments[0]) // Image.
2106 .addUse(Call->Arguments[1]) // Coordinate.
2107 .addUse(Call->Arguments[2]); // Texel.
2108 return true;
2109}
2110
2111static bool generateSampleImageInst(const StringRef DemangledCall,
2112 const SPIRV::IncomingCall *Call,
2113 MachineIRBuilder &MIRBuilder,
2114 SPIRVGlobalRegistry *GR) {
2115 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2116 if (Call->Builtin->Name.contains_insensitive(
2117 "__translate_sampler_initializer")) {
2118 // Build sampler literal.
2119 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
2120 Register Sampler = GR->buildConstantSampler(
2121 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
2123 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder);
2124 return Sampler.isValid();
2125 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
2126 // Create OpSampledImage.
2127 Register Image = Call->Arguments[0];
2128 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2129 SPIRVType *SampledImageType =
2130 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2131 Register SampledImage =
2132 Call->ReturnRegister.isValid()
2133 ? Call->ReturnRegister
2134 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2135 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2136 .addDef(SampledImage)
2137 .addUse(GR->getSPIRVTypeID(SampledImageType))
2138 .addUse(Image)
2139 .addUse(Call->Arguments[1]); // Sampler.
2140 return true;
2141 } else if (Call->Builtin->Name.contains_insensitive(
2142 "__spirv_ImageSampleExplicitLod")) {
2143 // Sample an image using an explicit level of detail.
2144 std::string ReturnType = DemangledCall.str();
2145 if (DemangledCall.contains("_R")) {
2146 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
2147 ReturnType = ReturnType.substr(0, ReturnType.find('('));
2148 }
2149 SPIRVType *Type =
2150 Call->ReturnType
2151 ? Call->ReturnType
2152 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder, true);
2153 if (!Type) {
2154 std::string DiagMsg =
2155 "Unable to recognize SPIRV type name: " + ReturnType;
2156 report_fatal_error(DiagMsg.c_str());
2157 }
2158 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2159 .addDef(Call->ReturnRegister)
2161 .addUse(Call->Arguments[0]) // Image.
2162 .addUse(Call->Arguments[1]) // Coordinate.
2163 .addImm(SPIRV::ImageOperand::Lod)
2164 .addUse(Call->Arguments[3]);
2165 return true;
2166 }
2167 return false;
2168}
2169
2171 MachineIRBuilder &MIRBuilder) {
2172 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
2173 Call->Arguments[1], Call->Arguments[2]);
2174 return true;
2175}
2176
2178 MachineIRBuilder &MIRBuilder,
2179 SPIRVGlobalRegistry *GR) {
2180 createContinuedInstructions(MIRBuilder, SPIRV::OpCompositeConstruct, 3,
2181 SPIRV::OpCompositeConstructContinuedINTEL,
2182 Call->Arguments, Call->ReturnRegister,
2183 GR->getSPIRVTypeID(Call->ReturnType));
2184 return true;
2185}
2186
2188 MachineIRBuilder &MIRBuilder,
2189 SPIRVGlobalRegistry *GR) {
2190 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2191 unsigned Opcode =
2192 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2193 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2194 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2195 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2196 unsigned ArgSz = Call->Arguments.size();
2197 unsigned LiteralIdx = 0;
2198 switch (Opcode) {
2199 // Memory operand is optional and is literal.
2200 case SPIRV::OpCooperativeMatrixLoadKHR:
2201 LiteralIdx = ArgSz > 3 ? 3 : 0;
2202 break;
2203 case SPIRV::OpCooperativeMatrixStoreKHR:
2204 LiteralIdx = ArgSz > 4 ? 4 : 0;
2205 break;
2206 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2207 LiteralIdx = ArgSz > 7 ? 7 : 0;
2208 break;
2209 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2210 LiteralIdx = ArgSz > 8 ? 8 : 0;
2211 break;
2212 // Cooperative Matrix Operands operand is optional and is literal.
2213 case SPIRV::OpCooperativeMatrixMulAddKHR:
2214 LiteralIdx = ArgSz > 3 ? 3 : 0;
2215 break;
2216 };
2217
2219 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2220 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2221 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2222 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2223 .addUse(Call->Arguments[0]) // pointer
2224 .addUse(Call->Arguments[1]) // rows
2225 .addUse(Call->Arguments[2]) // columns
2226 .addImm(CacheLevel) // cache level
2227 .addUse(Call->Arguments[4]); // memory layout
2228 if (ArgSz > 5)
2229 MIB.addUse(Call->Arguments[5]); // stride
2230 if (ArgSz > 6) {
2231 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2232 MIB.addImm(MemOp); // memory operand
2233 }
2234 return true;
2235 }
2236 if (LiteralIdx > 0)
2237 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2238 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2239 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2240 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2241 if (!CoopMatrType)
2242 report_fatal_error("Can't find a register's type definition");
2243 MIRBuilder.buildInstr(Opcode)
2244 .addDef(Call->ReturnRegister)
2245 .addUse(TypeReg)
2246 .addUse(CoopMatrType->getOperand(0).getReg());
2247 return true;
2248 }
2249 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2250 IsSet ? TypeReg : Register(0), ImmArgs);
2251}
2252
2254 MachineIRBuilder &MIRBuilder,
2255 SPIRVGlobalRegistry *GR) {
2256 // Lookup the instruction opcode in the TableGen records.
2257 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2258 unsigned Opcode =
2259 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2260 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2261
2262 switch (Opcode) {
2263 case SPIRV::OpSpecConstant: {
2264 // Build the SpecID decoration.
2265 unsigned SpecId =
2266 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2267 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2268 {SpecId});
2269 // Determine the constant MI.
2270 Register ConstRegister = Call->Arguments[1];
2271 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2272 assert(Const &&
2273 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2274 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2275 "Argument should be either an int or floating-point constant");
2276 // Determine the opcode and built the OpSpec MI.
2277 const MachineOperand &ConstOperand = Const->getOperand(1);
2278 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2279 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2280 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2281 ? SPIRV::OpSpecConstantTrue
2282 : SPIRV::OpSpecConstantFalse;
2283 }
2284 auto MIB = MIRBuilder.buildInstr(Opcode)
2285 .addDef(Call->ReturnRegister)
2286 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2287
2288 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2289 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2290 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2291 else
2292 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2293 }
2294 return true;
2295 }
2296 case SPIRV::OpSpecConstantComposite: {
2297 createContinuedInstructions(MIRBuilder, Opcode, 3,
2298 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2299 Call->Arguments, Call->ReturnRegister,
2300 GR->getSPIRVTypeID(Call->ReturnType));
2301 return true;
2302 }
2303 default:
2304 return false;
2305 }
2306}
2307
2309 MachineIRBuilder &MIRBuilder,
2310 SPIRVGlobalRegistry *GR) {
2311 // Lookup the instruction opcode in the TableGen records.
2312 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2313 unsigned Opcode =
2314 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2315
2316 return buildExtendedBitOpsInst(Call, Opcode, MIRBuilder, GR);
2317}
2318
2320 MachineIRBuilder &MIRBuilder,
2321 SPIRVGlobalRegistry *GR) {
2322 // Lookup the instruction opcode in the TableGen records.
2323 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2324 unsigned Opcode =
2325 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2326
2327 return buildBindlessImageINTELInst(Call, Opcode, MIRBuilder, GR);
2328}
2329
2330static bool
2332 MachineIRBuilder &MIRBuilder,
2333 SPIRVGlobalRegistry *GR) {
2334 // Lookup the instruction opcode in the TableGen records.
2335 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2336 unsigned Opcode =
2337 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2338
2339 return buildTernaryBitwiseFunctionINTELInst(Call, Opcode, MIRBuilder, GR);
2340}
2341
2343 MachineIRBuilder &MIRBuilder,
2344 SPIRVGlobalRegistry *GR) {
2345 // Lookup the instruction opcode in the TableGen records.
2346 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2347 unsigned Opcode =
2348 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2349
2350 return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR);
2351}
2352
2353static bool buildNDRange(const SPIRV::IncomingCall *Call,
2354 MachineIRBuilder &MIRBuilder,
2355 SPIRVGlobalRegistry *GR) {
2356 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2357 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2358 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2359 PtrType->getOperand(2).isReg());
2360 Register TypeReg = PtrType->getOperand(2).getReg();
2362 MachineFunction &MF = MIRBuilder.getMF();
2363 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2364 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2365 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2366 // three other arguments, so pass zero constant on absence.
2367 unsigned NumArgs = Call->Arguments.size();
2368 assert(NumArgs >= 2);
2369 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2370 Register LocalWorkSize =
2371 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2372 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2373 if (NumArgs < 4) {
2374 Register Const;
2375 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2376 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2377 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2378 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2379 DefInstr->getOperand(3).isReg());
2380 Register GWSPtr = DefInstr->getOperand(3).getReg();
2381 // TODO: Maybe simplify generation of the type of the fields.
2382 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2383 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2385 Type *FieldTy = ArrayType::get(BaseTy, Size);
2386 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(
2387 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
2388 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2389 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2390 MIRBuilder.buildInstr(SPIRV::OpLoad)
2391 .addDef(GlobalWorkSize)
2392 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2393 .addUse(GWSPtr);
2394 const SPIRVSubtarget &ST =
2395 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2396 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2397 SpvFieldTy, *ST.getInstrInfo());
2398 } else {
2399 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy, true);
2400 }
2401 if (!LocalWorkSize.isValid())
2402 LocalWorkSize = Const;
2403 if (!GlobalWorkOffset.isValid())
2404 GlobalWorkOffset = Const;
2405 }
2406 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2407 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2408 .addDef(TmpReg)
2409 .addUse(TypeReg)
2410 .addUse(GlobalWorkSize)
2411 .addUse(LocalWorkSize)
2412 .addUse(GlobalWorkOffset);
2413 return MIRBuilder.buildInstr(SPIRV::OpStore)
2414 .addUse(Call->Arguments[0])
2415 .addUse(TmpReg);
2416}
2417
2418// TODO: maybe move to the global register.
2419static SPIRVType *
2421 SPIRVGlobalRegistry *GR) {
2422 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2423 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2424 Type *PtrType = PointerType::get(Context, SC1);
2425 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder,
2426 SPIRV::AccessQualifier::ReadWrite, true);
2427}
2428
2430 MachineIRBuilder &MIRBuilder,
2431 SPIRVGlobalRegistry *GR) {
2432 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2433 const DataLayout &DL = MIRBuilder.getDataLayout();
2434 bool IsSpirvOp = Call->isSpirvOp();
2435 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2436 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2437
2438 // Make vararg instructions before OpEnqueueKernel.
2439 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2440 // local size operands as an array, so we need to unpack them.
2441 SmallVector<Register, 16> LocalSizes;
2442 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2443 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2444 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2445 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2446 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2447 GepMI->getOperand(3).isReg());
2448 Register ArrayReg = GepMI->getOperand(3).getReg();
2449 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2450 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2451 assert(LocalSizeTy && "Local size type is expected");
2452 const uint64_t LocalSizeNum =
2453 cast<ArrayType>(LocalSizeTy)->getNumElements();
2454 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2455 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2456 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2457 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2458 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2459 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2460 MRI->setType(Reg, LLType);
2461 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2462 auto GEPInst = MIRBuilder.buildIntrinsic(
2463 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2464 GEPInst
2465 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2466 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2467 .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2468 .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2469 LocalSizes.push_back(Reg);
2470 }
2471 }
2472
2473 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2474 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2475 .addDef(Call->ReturnRegister)
2476 .addUse(GR->getSPIRVTypeID(Int32Ty));
2477
2478 // Copy all arguments before block invoke function pointer.
2479 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2480 for (unsigned i = 0; i < BlockFIdx; i++)
2481 MIB.addUse(Call->Arguments[i]);
2482
2483 // If there are no event arguments in the original call, add dummy ones.
2484 if (!HasEvents) {
2485 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2486 Register NullPtr = GR->getOrCreateConstNullPtr(
2487 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2488 MIB.addUse(NullPtr); // Dummy wait events.
2489 MIB.addUse(NullPtr); // Dummy ret event.
2490 }
2491
2492 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2493 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2494 // Invoke: Pointer to invoke function.
2495 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2496
2497 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2498 // Param: Pointer to block literal.
2499 MIB.addUse(BlockLiteralReg);
2500
2501 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2502 // TODO: these numbers should be obtained from block literal structure.
2503 // Param Size: Size of block literal structure.
2504 MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2505 // Param Aligment: Aligment of block literal structure.
2506 MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2507 MIRBuilder, GR));
2508
2509 for (unsigned i = 0; i < LocalSizes.size(); i++)
2510 MIB.addUse(LocalSizes[i]);
2511 return true;
2512}
2513
2515 MachineIRBuilder &MIRBuilder,
2516 SPIRVGlobalRegistry *GR) {
2517 // Lookup the instruction opcode in the TableGen records.
2518 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2519 unsigned Opcode =
2520 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2521
2522 switch (Opcode) {
2523 case SPIRV::OpRetainEvent:
2524 case SPIRV::OpReleaseEvent:
2525 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2526 case SPIRV::OpCreateUserEvent:
2527 case SPIRV::OpGetDefaultQueue:
2528 return MIRBuilder.buildInstr(Opcode)
2529 .addDef(Call->ReturnRegister)
2530 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2531 case SPIRV::OpIsValidEvent:
2532 return MIRBuilder.buildInstr(Opcode)
2533 .addDef(Call->ReturnRegister)
2534 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2535 .addUse(Call->Arguments[0]);
2536 case SPIRV::OpSetUserEventStatus:
2537 return MIRBuilder.buildInstr(Opcode)
2538 .addUse(Call->Arguments[0])
2539 .addUse(Call->Arguments[1]);
2540 case SPIRV::OpCaptureEventProfilingInfo:
2541 return MIRBuilder.buildInstr(Opcode)
2542 .addUse(Call->Arguments[0])
2543 .addUse(Call->Arguments[1])
2544 .addUse(Call->Arguments[2]);
2545 case SPIRV::OpBuildNDRange:
2546 return buildNDRange(Call, MIRBuilder, GR);
2547 case SPIRV::OpEnqueueKernel:
2548 return buildEnqueueKernel(Call, MIRBuilder, GR);
2549 default:
2550 return false;
2551 }
2552}
2553
2555 MachineIRBuilder &MIRBuilder,
2556 SPIRVGlobalRegistry *GR) {
2557 // Lookup the instruction opcode in the TableGen records.
2558 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2559 unsigned Opcode =
2560 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2561
2562 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2563 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2564 if (Call->isSpirvOp())
2565 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2566 IsSet ? TypeReg : Register(0));
2567
2568 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2569
2570 switch (Opcode) {
2571 case SPIRV::OpGroupAsyncCopy: {
2572 SPIRVType *NewType =
2573 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2574 ? nullptr
2575 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder, true);
2576 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2577 unsigned NumArgs = Call->Arguments.size();
2578 Register EventReg = Call->Arguments[NumArgs - 1];
2579 bool Res = MIRBuilder.buildInstr(Opcode)
2580 .addDef(Call->ReturnRegister)
2581 .addUse(TypeReg)
2582 .addUse(Scope)
2583 .addUse(Call->Arguments[0])
2584 .addUse(Call->Arguments[1])
2585 .addUse(Call->Arguments[2])
2586 .addUse(Call->Arguments.size() > 4
2587 ? Call->Arguments[3]
2588 : buildConstantIntReg32(1, MIRBuilder, GR))
2589 .addUse(EventReg);
2590 if (NewType != nullptr)
2591 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2592 MIRBuilder.getMF().getRegInfo());
2593 return Res;
2594 }
2595 case SPIRV::OpGroupWaitEvents:
2596 return MIRBuilder.buildInstr(Opcode)
2597 .addUse(Scope)
2598 .addUse(Call->Arguments[0])
2599 .addUse(Call->Arguments[1]);
2600 default:
2601 return false;
2602 }
2603}
2604
2605static bool generateConvertInst(const StringRef DemangledCall,
2606 const SPIRV::IncomingCall *Call,
2607 MachineIRBuilder &MIRBuilder,
2608 SPIRVGlobalRegistry *GR) {
2609 // Lookup the conversion builtin in the TableGen records.
2610 const SPIRV::ConvertBuiltin *Builtin =
2611 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2612
2613 if (!Builtin && Call->isSpirvOp()) {
2614 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2615 unsigned Opcode =
2616 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2617 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2618 GR->getSPIRVTypeID(Call->ReturnType));
2619 }
2620
2621 assert(Builtin && "Conversion builtin not found.");
2622 if (Builtin->IsSaturated)
2623 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2624 SPIRV::Decoration::SaturatedConversion, {});
2625 if (Builtin->IsRounded)
2626 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2627 SPIRV::Decoration::FPRoundingMode,
2628 {(unsigned)Builtin->RoundingMode});
2629
2630 std::string NeedExtMsg; // no errors if empty
2631 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2632 unsigned Opcode = SPIRV::OpNop;
2633 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2634 // Int -> ...
2635 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2636 // Int -> Int
2637 if (Builtin->IsSaturated)
2638 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2639 : SPIRV::OpSatConvertSToU;
2640 else
2641 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2642 : SPIRV::OpSConvert;
2643 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2644 SPIRV::OpTypeFloat)) {
2645 // Int -> Float
2646 if (Builtin->IsBfloat16) {
2647 const auto *ST = static_cast<const SPIRVSubtarget *>(
2648 &MIRBuilder.getMF().getSubtarget());
2649 if (!ST->canUseExtension(
2650 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2651 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2652 IsRightComponentsNumber =
2653 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2654 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2655 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2656 } else {
2657 bool IsSourceSigned =
2658 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2659 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2660 }
2661 }
2662 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2663 SPIRV::OpTypeFloat)) {
2664 // Float -> ...
2665 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2666 // Float -> Int
2667 if (Builtin->IsBfloat16) {
2668 const auto *ST = static_cast<const SPIRVSubtarget *>(
2669 &MIRBuilder.getMF().getSubtarget());
2670 if (!ST->canUseExtension(
2671 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2672 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2673 IsRightComponentsNumber =
2674 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2675 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2676 Opcode = SPIRV::OpConvertFToBF16INTEL;
2677 } else {
2678 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2679 : SPIRV::OpConvertFToU;
2680 }
2681 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2682 SPIRV::OpTypeFloat)) {
2683 if (Builtin->IsTF32) {
2684 const auto *ST = static_cast<const SPIRVSubtarget *>(
2685 &MIRBuilder.getMF().getSubtarget());
2686 if (!ST->canUseExtension(
2687 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
2688 NeedExtMsg = "SPV_INTEL_tensor_float32_conversion";
2689 IsRightComponentsNumber =
2690 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2691 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2692 Opcode = SPIRV::OpRoundFToTF32INTEL;
2693 } else {
2694 // Float -> Float
2695 Opcode = SPIRV::OpFConvert;
2696 }
2697 }
2698 }
2699
2700 if (!NeedExtMsg.empty()) {
2701 std::string DiagMsg = std::string(Builtin->Name) +
2702 ": the builtin requires the following SPIR-V "
2703 "extension: " +
2704 NeedExtMsg;
2705 report_fatal_error(DiagMsg.c_str(), false);
2706 }
2707 if (!IsRightComponentsNumber) {
2708 std::string DiagMsg =
2709 std::string(Builtin->Name) +
2710 ": result and argument must have the same number of components";
2711 report_fatal_error(DiagMsg.c_str(), false);
2712 }
2713 assert(Opcode != SPIRV::OpNop &&
2714 "Conversion between the types not implemented!");
2715
2716 MIRBuilder.buildInstr(Opcode)
2717 .addDef(Call->ReturnRegister)
2718 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2719 .addUse(Call->Arguments[0]);
2720 return true;
2721}
2722
2724 MachineIRBuilder &MIRBuilder,
2725 SPIRVGlobalRegistry *GR) {
2726 // Lookup the vector load/store builtin in the TableGen records.
2727 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2728 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2729 Call->Builtin->Set);
2730 // Build extended instruction.
2731 auto MIB =
2732 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2733 .addDef(Call->ReturnRegister)
2734 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2735 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2736 .addImm(Builtin->Number);
2737 for (auto Argument : Call->Arguments)
2738 MIB.addUse(Argument);
2739 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2740 MIB.addImm(Builtin->ElementCount);
2741
2742 // Rounding mode should be passed as a last argument in the MI for builtins
2743 // like "vstorea_halfn_r".
2744 if (Builtin->IsRounded)
2745 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2746 return true;
2747}
2748
2750 MachineIRBuilder &MIRBuilder,
2751 SPIRVGlobalRegistry *GR) {
2752 // Lookup the instruction opcode in the TableGen records.
2753 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2754 unsigned Opcode =
2755 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2756 bool IsLoad = Opcode == SPIRV::OpLoad;
2757 // Build the instruction.
2758 auto MIB = MIRBuilder.buildInstr(Opcode);
2759 if (IsLoad) {
2760 MIB.addDef(Call->ReturnRegister);
2761 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2762 }
2763 // Add a pointer to the value to load/store.
2764 MIB.addUse(Call->Arguments[0]);
2765 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2766 // Add a value to store.
2767 if (!IsLoad)
2768 MIB.addUse(Call->Arguments[1]);
2769 // Add optional memory attributes and an alignment.
2770 unsigned NumArgs = Call->Arguments.size();
2771 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
2772 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2773 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
2774 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2775 return true;
2776}
2777
2778namespace SPIRV {
2779// Try to find a builtin function attributes by a demangled function name and
2780// return a tuple <builtin group, op code, ext instruction number>, or a special
2781// tuple value <-1, 0, 0> if the builtin function is not found.
2782// Not all builtin functions are supported, only those with a ready-to-use op
2783// code or instruction number defined in TableGen.
2784// TODO: consider a major rework of mapping demangled calls into a builtin
2785// functions to unify search and decrease number of individual cases.
2786std::tuple<int, unsigned, unsigned>
2787mapBuiltinToOpcode(const StringRef DemangledCall,
2788 SPIRV::InstructionSet::InstructionSet Set) {
2789 Register Reg;
2791 std::unique_ptr<const IncomingCall> Call =
2792 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
2793 if (!Call)
2794 return std::make_tuple(-1, 0, 0);
2795
2796 switch (Call->Builtin->Group) {
2797 case SPIRV::Relational:
2798 case SPIRV::Atomic:
2799 case SPIRV::Barrier:
2800 case SPIRV::CastToPtr:
2801 case SPIRV::ImageMiscQuery:
2802 case SPIRV::SpecConstant:
2803 case SPIRV::Enqueue:
2804 case SPIRV::AsyncCopy:
2805 case SPIRV::LoadStore:
2806 case SPIRV::CoopMatr:
2807 if (const auto *R =
2808 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2809 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2810 break;
2811 case SPIRV::Extended:
2812 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2813 Call->Builtin->Set))
2814 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2815 break;
2816 case SPIRV::VectorLoadStore:
2817 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2818 Call->Builtin->Set))
2819 return std::make_tuple(SPIRV::Extended, 0, R->Number);
2820 break;
2821 case SPIRV::Group:
2822 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2823 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2824 break;
2825 case SPIRV::AtomicFloating:
2826 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2827 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2828 break;
2829 case SPIRV::IntelSubgroups:
2830 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2831 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2832 break;
2833 case SPIRV::GroupUniform:
2834 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2835 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2836 break;
2837 case SPIRV::IntegerDot:
2838 if (const auto *R =
2839 SPIRV::lookupIntegerDotProductBuiltin(Call->Builtin->Name))
2840 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2841 break;
2842 case SPIRV::WriteImage:
2843 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2844 case SPIRV::Select:
2845 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
2846 case SPIRV::Construct:
2847 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2848 0);
2849 case SPIRV::KernelClock:
2850 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2851 default:
2852 return std::make_tuple(-1, 0, 0);
2853 }
2854 return std::make_tuple(-1, 0, 0);
2855}
2856
2857std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
2858 SPIRV::InstructionSet::InstructionSet Set,
2859 MachineIRBuilder &MIRBuilder,
2860 const Register OrigRet, const Type *OrigRetTy,
2861 const SmallVectorImpl<Register> &Args,
2862 SPIRVGlobalRegistry *GR) {
2863 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
2864
2865 // Lookup the builtin in the TableGen records.
2866 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OrigRet);
2867 assert(SpvType && "Inconsistent return register: expected valid type info");
2868 std::unique_ptr<const IncomingCall> Call =
2869 lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
2870
2871 if (!Call) {
2872 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
2873 return std::nullopt;
2874 }
2875
2876 // TODO: check if the provided args meet the builtin requirments.
2877 assert(Args.size() >= Call->Builtin->MinNumArgs &&
2878 "Too few arguments to generate the builtin");
2879 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2880 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
2881
2882 // Match the builtin with implementation based on the grouping.
2883 switch (Call->Builtin->Group) {
2884 case SPIRV::Extended:
2885 return generateExtInst(Call.get(), MIRBuilder, GR);
2886 case SPIRV::Relational:
2887 return generateRelationalInst(Call.get(), MIRBuilder, GR);
2888 case SPIRV::Group:
2889 return generateGroupInst(Call.get(), MIRBuilder, GR);
2890 case SPIRV::Variable:
2891 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
2892 case SPIRV::Atomic:
2893 return generateAtomicInst(Call.get(), MIRBuilder, GR);
2894 case SPIRV::AtomicFloating:
2895 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
2896 case SPIRV::Barrier:
2897 return generateBarrierInst(Call.get(), MIRBuilder, GR);
2898 case SPIRV::CastToPtr:
2899 return generateCastToPtrInst(Call.get(), MIRBuilder, GR);
2900 case SPIRV::Dot:
2901 case SPIRV::IntegerDot:
2902 return generateDotOrFMulInst(DemangledCall, Call.get(), MIRBuilder, GR);
2903 case SPIRV::Wave:
2904 return generateWaveInst(Call.get(), MIRBuilder, GR);
2905 case SPIRV::ICarryBorrow:
2906 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
2907 case SPIRV::GetQuery:
2908 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
2909 case SPIRV::ImageSizeQuery:
2910 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
2911 case SPIRV::ImageMiscQuery:
2912 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
2913 case SPIRV::ReadImage:
2914 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2915 case SPIRV::WriteImage:
2916 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
2917 case SPIRV::SampleImage:
2918 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2919 case SPIRV::Select:
2920 return generateSelectInst(Call.get(), MIRBuilder);
2921 case SPIRV::Construct:
2922 return generateConstructInst(Call.get(), MIRBuilder, GR);
2923 case SPIRV::SpecConstant:
2924 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
2925 case SPIRV::Enqueue:
2926 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
2927 case SPIRV::AsyncCopy:
2928 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
2929 case SPIRV::Convert:
2930 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
2931 case SPIRV::VectorLoadStore:
2932 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
2933 case SPIRV::LoadStore:
2934 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
2935 case SPIRV::IntelSubgroups:
2936 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
2937 case SPIRV::GroupUniform:
2938 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
2939 case SPIRV::KernelClock:
2940 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
2941 case SPIRV::CoopMatr:
2942 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
2943 case SPIRV::ExtendedBitOps:
2944 return generateExtendedBitOpsInst(Call.get(), MIRBuilder, GR);
2945 case SPIRV::BindlessINTEL:
2946 return generateBindlessImageINTELInst(Call.get(), MIRBuilder, GR);
2947 case SPIRV::TernaryBitwiseINTEL:
2948 return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR);
2949 case SPIRV::Block2DLoadStore:
2950 return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR);
2951 }
2952 return false;
2953}
2954
2956 // Parse strings representing OpenCL builtin types.
2957 if (hasBuiltinTypePrefix(TypeStr)) {
2958 // OpenCL builtin types in demangled call strings have the following format:
2959 // e.g. ocl_image2d_ro
2960 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
2961 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
2962
2963 // Check if this is pointer to a builtin type and not just pointer
2964 // representing a builtin type. In case it is a pointer to builtin type,
2965 // this will require additional handling in the method calling
2966 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
2967 // base types.
2968 if (TypeStr.ends_with("*"))
2969 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
2970
2971 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
2972 Ctx);
2973 }
2974
2975 // Parse type name in either "typeN" or "type vector[N]" format, where
2976 // N is the number of elements of the vector.
2977 Type *BaseType;
2978 unsigned VecElts = 0;
2979
2980 BaseType = parseBasicTypeName(TypeStr, Ctx);
2981 if (!BaseType)
2982 // Unable to recognize SPIRV type name.
2983 return nullptr;
2984
2985 // Handle "typeN*" or "type vector[N]*".
2986 TypeStr.consume_back("*");
2987
2988 if (TypeStr.consume_front(" vector["))
2989 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
2990
2991 TypeStr.getAsInteger(10, VecElts);
2992 if (VecElts > 0)
2994 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
2995
2996 return BaseType;
2997}
2998
3000 const StringRef DemangledCall, LLVMContext &Ctx) {
3001 auto Pos1 = DemangledCall.find('(');
3002 if (Pos1 == StringRef::npos)
3003 return false;
3004 auto Pos2 = DemangledCall.find(')');
3005 if (Pos2 == StringRef::npos || Pos1 > Pos2)
3006 return false;
3007 DemangledCall.slice(Pos1 + 1, Pos2)
3008 .split(BuiltinArgsTypeStrs, ',', -1, false);
3009 return true;
3010}
3011
3013 unsigned ArgIdx, LLVMContext &Ctx) {
3014 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
3015 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
3016 if (ArgIdx >= BuiltinArgsTypeStrs.size())
3017 return nullptr;
3018 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3019 return parseBuiltinCallArgumentType(TypeStr, Ctx);
3020}
3021
3025};
3026
3027#define GET_BuiltinTypes_DECL
3028#define GET_BuiltinTypes_IMPL
3029
3033};
3034
3035#define GET_OpenCLTypes_DECL
3036#define GET_OpenCLTypes_IMPL
3037
3038#include "SPIRVGenTables.inc"
3039} // namespace SPIRV
3040
3041//===----------------------------------------------------------------------===//
3042// Misc functions for parsing builtin types.
3043//===----------------------------------------------------------------------===//
3044
3046 if (Name.starts_with("void"))
3047 return Type::getVoidTy(Context);
3048 else if (Name.starts_with("int") || Name.starts_with("uint"))
3049 return Type::getInt32Ty(Context);
3050 else if (Name.starts_with("float"))
3051 return Type::getFloatTy(Context);
3052 else if (Name.starts_with("half"))
3053 return Type::getHalfTy(Context);
3054 report_fatal_error("Unable to recognize type!");
3055}
3056
3057//===----------------------------------------------------------------------===//
3058// Implementation functions for builtin types.
3059//===----------------------------------------------------------------------===//
3060
3062 const SPIRV::BuiltinType *TypeRecord,
3063 MachineIRBuilder &MIRBuilder,
3064 SPIRVGlobalRegistry *GR) {
3065 unsigned Opcode = TypeRecord->Opcode;
3066 // Create or get an existing type from GlobalRegistry.
3067 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
3068}
3069
3071 SPIRVGlobalRegistry *GR) {
3072 // Create or get an existing type from GlobalRegistry.
3073 return GR->getOrCreateOpTypeSampler(MIRBuilder);
3074}
3075
3076static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
3077 MachineIRBuilder &MIRBuilder,
3078 SPIRVGlobalRegistry *GR) {
3079 assert(ExtensionType->getNumIntParameters() == 1 &&
3080 "Invalid number of parameters for SPIR-V pipe builtin!");
3081 // Create or get an existing type from GlobalRegistry.
3082 return GR->getOrCreateOpTypePipe(MIRBuilder,
3083 SPIRV::AccessQualifier::AccessQualifier(
3084 ExtensionType->getIntParameter(0)));
3085}
3086
3087static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
3088 MachineIRBuilder &MIRBuilder,
3089 SPIRVGlobalRegistry *GR) {
3090 assert(ExtensionType->getNumIntParameters() == 4 &&
3091 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3092 assert(ExtensionType->getNumTypeParameters() == 1 &&
3093 "SPIR-V coop matrices builtin type must have a type parameter!");
3094 const SPIRVType *ElemType =
3095 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder,
3096 SPIRV::AccessQualifier::ReadWrite, true);
3097 // Create or get an existing type from GlobalRegistry.
3098 return GR->getOrCreateOpTypeCoopMatr(
3099 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
3100 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
3101 ExtensionType->getIntParameter(3), true);
3102}
3103
3105 MachineIRBuilder &MIRBuilder,
3106 SPIRVGlobalRegistry *GR) {
3107 SPIRVType *OpaqueImageType = GR->getImageType(
3108 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3109 // Create or get an existing type from GlobalRegistry.
3110 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
3111}
3112
3113static SPIRVType *getInlineSpirvType(const TargetExtType *ExtensionType,
3114 MachineIRBuilder &MIRBuilder,
3115 SPIRVGlobalRegistry *GR) {
3116 assert(ExtensionType->getNumIntParameters() == 3 &&
3117 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3118 "parameter");
3119 auto Opcode = ExtensionType->getIntParameter(0);
3120
3122 for (Type *Param : ExtensionType->type_params()) {
3123 if (const TargetExtType *ParamEType = dyn_cast<TargetExtType>(Param)) {
3124 if (ParamEType->getName() == "spirv.IntegralConstant") {
3125 assert(ParamEType->getNumTypeParameters() == 1 &&
3126 "Inline SPIR-V integral constant builtin must have a type "
3127 "parameter");
3128 assert(ParamEType->getNumIntParameters() == 1 &&
3129 "Inline SPIR-V integral constant builtin must have a "
3130 "value parameter");
3131
3132 auto OperandValue = ParamEType->getIntParameter(0);
3133 auto *OperandType = ParamEType->getTypeParameter(0);
3134
3135 const SPIRVType *OperandSPIRVType = GR->getOrCreateSPIRVType(
3136 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3137
3139 OperandValue, MIRBuilder, OperandSPIRVType, true)));
3140 continue;
3141 } else if (ParamEType->getName() == "spirv.Literal") {
3142 assert(ParamEType->getNumTypeParameters() == 0 &&
3143 "Inline SPIR-V literal builtin does not take type "
3144 "parameters");
3145 assert(ParamEType->getNumIntParameters() == 1 &&
3146 "Inline SPIR-V literal builtin must have an integer "
3147 "parameter");
3148
3149 auto OperandValue = ParamEType->getIntParameter(0);
3150
3151 Operands.push_back(MCOperand::createImm(OperandValue));
3152 continue;
3153 }
3154 }
3155 const SPIRVType *TypeOperand = GR->getOrCreateSPIRVType(
3156 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3157 Operands.push_back(MCOperand::createReg(GR->getSPIRVTypeID(TypeOperand)));
3158 }
3159
3160 return GR->getOrCreateUnknownType(ExtensionType, MIRBuilder, Opcode,
3161 Operands);
3162}
3163
3164static SPIRVType *getVulkanBufferType(const TargetExtType *ExtensionType,
3165 MachineIRBuilder &MIRBuilder,
3166 SPIRVGlobalRegistry *GR) {
3167 assert(ExtensionType->getNumTypeParameters() == 1 &&
3168 "Vulkan buffers have exactly one type for the type of the buffer.");
3169 assert(ExtensionType->getNumIntParameters() == 2 &&
3170 "Vulkan buffer have 2 integer parameters: storage class and is "
3171 "writable.");
3172
3173 auto *T = ExtensionType->getTypeParameter(0);
3174 auto SC = static_cast<SPIRV::StorageClass::StorageClass>(
3175 ExtensionType->getIntParameter(0));
3176 bool IsWritable = ExtensionType->getIntParameter(1);
3177 return GR->getOrCreateVulkanBufferType(MIRBuilder, T, SC, IsWritable);
3178}
3179
3180static SPIRVType *getLayoutType(const TargetExtType *ExtensionType,
3181 MachineIRBuilder &MIRBuilder,
3182 SPIRVGlobalRegistry *GR) {
3183 return GR->getOrCreateLayoutType(MIRBuilder, ExtensionType);
3184}
3185
3186namespace SPIRV {
3188 LLVMContext &Context) {
3189 StringRef NameWithParameters = TypeName;
3190
3191 // Pointers-to-opaque-structs representing OpenCL types are first translated
3192 // to equivalent SPIR-V types. OpenCL builtin type names should have the
3193 // following format: e.g. %opencl.event_t
3194 if (NameWithParameters.starts_with("opencl.")) {
3195 const SPIRV::OpenCLType *OCLTypeRecord =
3196 SPIRV::lookupOpenCLType(NameWithParameters);
3197 if (!OCLTypeRecord)
3198 report_fatal_error("Missing TableGen record for OpenCL type: " +
3199 NameWithParameters);
3200 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
3201 // Continue with the SPIR-V builtin type...
3202 }
3203
3204 // Names of the opaque structs representing a SPIR-V builtins without
3205 // parameters should have the following format: e.g. %spirv.Event
3206 assert(NameWithParameters.starts_with("spirv.") &&
3207 "Unknown builtin opaque type!");
3208
3209 // Parameterized SPIR-V builtins names follow this format:
3210 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
3211 if (!NameWithParameters.contains('_'))
3212 return TargetExtType::get(Context, NameWithParameters);
3213
3214 SmallVector<StringRef> Parameters;
3215 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
3216 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
3217
3218 SmallVector<Type *, 1> TypeParameters;
3219 bool HasTypeParameter = !isDigit(Parameters[0][0]);
3220 if (HasTypeParameter)
3221 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
3222 SmallVector<unsigned> IntParameters;
3223 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3224 unsigned IntParameter = 0;
3225 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3226 (void)ValidLiteral;
3227 assert(ValidLiteral &&
3228 "Invalid format of SPIR-V builtin parameter literal!");
3229 IntParameters.push_back(IntParameter);
3230 }
3232 NameWithParameters.substr(0, BaseNameLength),
3233 TypeParameters, IntParameters);
3234}
3235
3237 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3238 MachineIRBuilder &MIRBuilder,
3239 SPIRVGlobalRegistry *GR) {
3240 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
3241 // target(...) target extension types or pointers-to-opaque-structs. The
3242 // approach relying on structs is deprecated and works only in the non-opaque
3243 // pointer mode (-opaque-pointers=0).
3244 // In order to maintain compatibility with LLVM IR generated by older versions
3245 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
3246 // "translated" to target extension types. This translation is temporary and
3247 // will be removed in the future release of LLVM.
3248 const TargetExtType *BuiltinType = dyn_cast<TargetExtType>(OpaqueType);
3249 if (!BuiltinType)
3251 OpaqueType->getStructName().str(), MIRBuilder.getContext());
3252
3253 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
3254
3255 const StringRef Name = BuiltinType->getName();
3256 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
3257
3258 SPIRVType *TargetType;
3259 if (Name == "spirv.Type") {
3260 TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR);
3261 } else if (Name == "spirv.VulkanBuffer") {
3262 TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR);
3263 } else if (Name == "spirv.Layout") {
3264 TargetType = getLayoutType(BuiltinType, MIRBuilder, GR);
3265 } else {
3266 // Lookup the demangled builtin type in the TableGen records.
3267 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
3268 if (!TypeRecord)
3269 report_fatal_error("Missing TableGen record for builtin type: " + Name);
3270
3271 // "Lower" the BuiltinType into TargetType. The following get<...>Type
3272 // methods use the implementation details from TableGen records or
3273 // TargetExtType parameters to either create a new OpType<...> machine
3274 // instruction or get an existing equivalent SPIRVType from
3275 // GlobalRegistry.
3276
3277 switch (TypeRecord->Opcode) {
3278 case SPIRV::OpTypeImage:
3279 TargetType = GR->getImageType(BuiltinType, AccessQual, MIRBuilder);
3280 break;
3281 case SPIRV::OpTypePipe:
3282 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
3283 break;
3284 case SPIRV::OpTypeDeviceEvent:
3285 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
3286 break;
3287 case SPIRV::OpTypeSampler:
3288 TargetType = getSamplerType(MIRBuilder, GR);
3289 break;
3290 case SPIRV::OpTypeSampledImage:
3291 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
3292 break;
3293 case SPIRV::OpTypeCooperativeMatrixKHR:
3294 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
3295 break;
3296 default:
3297 TargetType =
3298 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
3299 break;
3300 }
3301 }
3302
3303 // Emit OpName instruction if a new OpType<...> instruction was added
3304 // (equivalent type was not found in GlobalRegistry).
3305 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
3306 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
3307
3308 return TargetType;
3309}
3310} // namespace SPIRV
3311} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
Register Reg
static bool isDigit(const char C)
spirv structurize SPIRV
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition: Debug.h:119
APInt bitcastToAPInt() const
Definition: APFloat.h:1353
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:1079
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1540
This class represents an incoming formal argument to a Function.
Definition: Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:703
@ ICMP_EQ
equal
Definition: InstrTypes.h:699
@ ICMP_NE
not equal
Definition: InstrTypes.h:700
const APFloat & getValueAPF() const
Definition: Constants.h:320
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:154
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Tagged union holding either a T or a Error.
Definition: Error.h:485
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:592
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:359
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:319
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:65
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:43
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:58
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:101
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
static MCOperand createReg(MCRegister Reg)
Definition: MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:145
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:72
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:587
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:595
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:107
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
SPIRVType * getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR, bool ZeroAsNull=true)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:710
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition: StringRef.h:665
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:480
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:233
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:581
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:269
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:446
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:694
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:154
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:434
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:645
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition: StringRef.h:384
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
Definition: StringRef.h:353
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:301
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:281
static constexpr size_t npos
Definition: StringRef.h:57
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
StringSwitch & EndsWith(StringLiteral S, T Value)
Definition: StringSwitch.h:73
Class to represent struct types.
Definition: DerivedTypes.h:218
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Definition: DerivedTypes.h:781
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
Definition: DerivedTypes.h:818
unsigned getNumIntParameters() const
Definition: DerivedTypes.h:838
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition: Type.cpp:908
Type * getTypeParameter(unsigned i) const
Definition: DerivedTypes.h:828
unsigned getNumTypeParameters() const
Definition: DerivedTypes.h:829
unsigned getIntParameter(unsigned i) const
Definition: DerivedTypes.h:837
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
LLVM Value Representation.
Definition: Value.h:75
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition: Value.cpp:53
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
Definition: VersionTuple.h:30
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:359
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition: Core.cpp:883
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR)
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
StorageClass
Definition: XCOFF.h:171
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:113
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static SPIRVType * getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, bool hasLinkageTy=true)
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:93
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
Definition: SPIRVUtils.h:478
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
FPDecorationId
Definition: SPIRVUtils.h:476
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:368
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
Definition: SPIRVUtils.cpp:859
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:283
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.h:191
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:140
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
Definition: SPIRVUtils.cpp:794
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.h:176
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
Definition: SPIRVUtils.cpp:503
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:345
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:223
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1886
const MachineInstr SPIRVType
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
bool hasBuiltinTypePrefix(StringRef Name)
Definition: SPIRVUtils.cpp:473
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
Definition: SPIRVUtils.cpp:380
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:374
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:856
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:266
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
BuiltIn::BuiltIn Value
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode