LLVM 22.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
60
63 InstructionSet::InstructionSet Set;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
128 InstructionSet::InstructionSet Set;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
140
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
143
146 InstructionSet::InstructionSet Set;
151 bool IsTF32;
152 FPRoundingMode::FPRoundingMode RoundingMode;
153};
154
157 InstructionSet::InstructionSet Set;
161 FPRoundingMode::FPRoundingMode RoundingMode;
162};
163
164using namespace FPRoundingMode;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
167
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
171
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
177} // namespace SPIRV
178
179//===----------------------------------------------------------------------===//
180// Misc functions for looking up builtins and veryfying requirements using
181// TableGen records
182//===----------------------------------------------------------------------===//
183
184namespace SPIRV {
185/// Parses the name part of the demangled builtin call.
186std::string lookupBuiltinNameHelper(StringRef DemangledCall,
187 FPDecorationId *DecorationId) {
188 StringRef PassPrefix = "(anonymous namespace)::";
189 std::string BuiltinName;
190 // Itanium Demangler result may have "(anonymous namespace)::" prefix
191 if (DemangledCall.starts_with(PassPrefix))
192 BuiltinName = DemangledCall.substr(PassPrefix.size());
193 else
194 BuiltinName = DemangledCall;
195 // Extract the builtin function name and types of arguments from the call
196 // skeleton.
197 BuiltinName = BuiltinName.substr(0, BuiltinName.find('('));
198
199 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
200 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
201 BuiltinName = BuiltinName.substr(12);
202
203 // Check if the extracted name contains type information between angle
204 // brackets. If so, the builtin is an instantiated template - needs to have
205 // the information after angle brackets and return type removed.
206 std::size_t Pos1 = BuiltinName.rfind('<');
207 if (Pos1 != std::string::npos && BuiltinName.back() == '>') {
208 std::size_t Pos2 = BuiltinName.rfind(' ', Pos1);
209 if (Pos2 == std::string::npos)
210 Pos2 = 0;
211 else
212 ++Pos2;
213 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
214 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
215 }
216
217 // Check if the extracted name begins with:
218 // - "__spirv_ImageSampleExplicitLod"
219 // - "__spirv_ImageRead"
220 // - "__spirv_ImageWrite"
221 // - "__spirv_ImageQuerySizeLod"
222 // - "__spirv_UDotKHR"
223 // - "__spirv_SDotKHR"
224 // - "__spirv_SUDotKHR"
225 // - "__spirv_SDotAccSatKHR"
226 // - "__spirv_UDotAccSatKHR"
227 // - "__spirv_SUDotAccSatKHR"
228 // - "__spirv_ReadClockKHR"
229 // - "__spirv_SubgroupBlockReadINTEL"
230 // - "__spirv_SubgroupImageBlockReadINTEL"
231 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
232 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
233 // - "__spirv_Convert"
234 // - "__spirv_Round"
235 // - "__spirv_UConvert"
236 // - "__spirv_SConvert"
237 // - "__spirv_FConvert"
238 // - "__spirv_SatConvert"
239 // and maybe contains return type information at the end "_R<type>".
240 // If so, extract the plain builtin name without the type information.
241 static const std::regex SpvWithR(
242 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
243 "UDotKHR|"
244 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
245 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
246 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
247 "Convert|Round|"
248 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
249 std::smatch Match;
250 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
251 std::ssub_match SubMatch;
252 if (DecorationId && Match.size() > 3) {
253 SubMatch = Match[4];
254 *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
255 }
256 SubMatch = Match[1];
257 BuiltinName = SubMatch.str();
258 }
259
260 return BuiltinName;
261}
262} // namespace SPIRV
263
264/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
265/// the provided \p DemangledCall and specified \p Set.
266///
267/// The lookup follows the following algorithm, returning the first successful
268/// match:
269/// 1. Search with the plain demangled name (expecting a 1:1 match).
270/// 2. Search with the prefix before or suffix after the demangled name
271/// signyfying the type of the first argument.
272///
273/// \returns Wrapper around the demangled call and found builtin definition.
274static std::unique_ptr<const SPIRV::IncomingCall>
276 SPIRV::InstructionSet::InstructionSet Set,
277 Register ReturnRegister, const SPIRVType *ReturnType,
279 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
280
281 SmallVector<StringRef, 10> BuiltinArgumentTypes;
282 StringRef BuiltinArgs =
283 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
284 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
285
286 // Look up the builtin in the defined set. Start with the plain demangled
287 // name, expecting a 1:1 match in the defined builtin set.
288 const SPIRV::DemangledBuiltin *Builtin;
289 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
290 return std::make_unique<SPIRV::IncomingCall>(
291 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
292
293 // If the initial look up was unsuccessful and the demangled call takes at
294 // least 1 argument, add a prefix or suffix signifying the type of the first
295 // argument and repeat the search.
296 if (BuiltinArgumentTypes.size() >= 1) {
297 char FirstArgumentType = BuiltinArgumentTypes[0][0];
298 // Prefix to be added to the builtin's name for lookup.
299 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
300 std::string Prefix;
301
302 switch (FirstArgumentType) {
303 // Unsigned:
304 case 'u':
305 if (Set == SPIRV::InstructionSet::OpenCL_std)
306 Prefix = "u_";
307 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
308 Prefix = "u";
309 break;
310 // Signed:
311 case 'c':
312 case 's':
313 case 'i':
314 case 'l':
315 if (Set == SPIRV::InstructionSet::OpenCL_std)
316 Prefix = "s_";
317 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
318 Prefix = "s";
319 break;
320 // Floating-point:
321 case 'f':
322 case 'd':
323 case 'h':
324 if (Set == SPIRV::InstructionSet::OpenCL_std ||
325 Set == SPIRV::InstructionSet::GLSL_std_450)
326 Prefix = "f";
327 break;
328 }
329
330 // If argument-type name prefix was added, look up the builtin again.
331 if (!Prefix.empty() &&
332 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
333 return std::make_unique<SPIRV::IncomingCall>(
334 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
335
336 // If lookup with a prefix failed, find a suffix to be added to the
337 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
338 // an unsigned value has a suffix "u".
339 std::string Suffix;
340
341 switch (FirstArgumentType) {
342 // Unsigned:
343 case 'u':
344 Suffix = "u";
345 break;
346 // Signed:
347 case 'c':
348 case 's':
349 case 'i':
350 case 'l':
351 Suffix = "s";
352 break;
353 // Floating-point:
354 case 'f':
355 case 'd':
356 case 'h':
357 Suffix = "f";
358 break;
359 }
360
361 // If argument-type name suffix was added, look up the builtin again.
362 if (!Suffix.empty() &&
363 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
364 return std::make_unique<SPIRV::IncomingCall>(
365 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
366 }
367
368 // No builtin with such name was found in the set.
369 return nullptr;
370}
371
374 // We expect the following sequence of instructions:
375 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
376 // or = G_GLOBAL_VALUE @block_literal_global
377 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
378 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
379 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
380 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
381 MI->getOperand(1).isReg());
382 Register BitcastReg = MI->getOperand(1).getReg();
383 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
384 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
385 BitcastMI->getOperand(2).isReg());
386 Register ValueReg = BitcastMI->getOperand(2).getReg();
387 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
388 return ValueMI;
389}
390
391// Return an integer constant corresponding to the given register and
392// defined in spv_track_constant.
393// TODO: maybe unify with prelegalizer pass.
395 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
396 assert(DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
397 DefMI->getOperand(1).isCImm());
398 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
399}
400
401// Return type of the instruction result from spv_assign_type intrinsic.
402// TODO: maybe unify with prelegalizer pass.
404 MachineInstr *NextMI = MI->getNextNode();
405 if (!NextMI)
406 return nullptr;
407 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
408 if ((NextMI = NextMI->getNextNode()) == nullptr)
409 return nullptr;
410 Register ValueReg = MI->getOperand(0).getReg();
411 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
412 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
413 NextMI->getOperand(1).getReg() != ValueReg)
414 return nullptr;
415 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
416 assert(Ty && "Type is expected");
417 return Ty;
418}
419
420static const Type *getBlockStructType(Register ParamReg,
422 // In principle, this information should be passed to us from Clang via
423 // an elementtype attribute. However, said attribute requires that
424 // the function call be an intrinsic, which is not. Instead, we rely on being
425 // able to trace this to the declaration of a variable: OpenCL C specification
426 // section 6.12.5 should guarantee that we can do this.
428 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
429 return MI->getOperand(1).getGlobal()->getType();
430 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
431 "Blocks in OpenCL C must be traceable to allocation site");
432 return getMachineInstrType(MI);
433}
434
435//===----------------------------------------------------------------------===//
436// Helper functions for building misc instructions
437//===----------------------------------------------------------------------===//
438
439/// Helper function building either a resulting scalar or vector bool register
440/// depending on the expected \p ResultType.
441///
442/// \returns Tuple of the resulting register and its type.
443static std::tuple<Register, SPIRVType *>
444buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
446 LLT Type;
447 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
448
449 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
450 unsigned VectorElements = ResultType->getOperand(2).getImm();
451 BoolType = GR->getOrCreateSPIRVVectorType(BoolType, VectorElements,
452 MIRBuilder, true);
455 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
456 } else {
457 Type = LLT::scalar(1);
458 }
459
460 Register ResultRegister =
462 MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
463 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
464 return std::make_tuple(ResultRegister, BoolType);
465}
466
467/// Helper function for building either a vector or scalar select instruction
468/// depending on the expected \p ResultType.
469static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
470 Register ReturnRegister, Register SourceRegister,
471 const SPIRVType *ReturnType,
473 Register TrueConst, FalseConst;
474
475 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
476 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
478 TrueConst =
479 GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType, true);
480 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType, true);
481 } else {
482 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType, true);
483 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType, true);
484 }
485
486 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
487 FalseConst);
488}
489
490/// Helper function for building a load instruction loading into the
491/// \p DestinationReg.
493 MachineIRBuilder &MIRBuilder,
494 SPIRVGlobalRegistry *GR, LLT LowLevelType,
495 Register DestinationReg = Register(0)) {
496 if (!DestinationReg.isValid())
497 DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
498 // TODO: consider using correct address space and alignment (p0 is canonical
499 // type for selection though).
501 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
502 return DestinationReg;
503}
504
505/// Helper function for building a load instruction for loading a builtin global
506/// variable of \p BuiltinValue value.
508 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
509 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
510 Register Reg = Register(0), bool isConst = true, bool hasLinkageTy = true) {
511 Register NewRegister =
512 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
513 MIRBuilder.getMRI()->setType(
514 NewRegister,
515 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
516 GR->getPointerSize()));
518 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
519 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
520
521 // Set up the global OpVariable with the necessary builtin decorations.
522 Register Variable = GR->buildGlobalVariable(
523 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
524 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst,
525 /* HasLinkageTy */ hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
526 false);
527
528 // Load the value from the global variable.
529 Register LoadedRegister =
530 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
531 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
532 return LoadedRegister;
533}
534
535/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
536/// and its definition, set the new register as a destination of the definition,
537/// assign SPIRVType to both registers. If SpirvTy is provided, use it as
538/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
539/// SPIRVPreLegalizer.cpp.
540extern void insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
541 SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB,
542 MachineRegisterInfo &MRI);
543
544// TODO: Move to TableGen.
545static SPIRV::MemorySemantics::MemorySemantics
546getSPIRVMemSemantics(std::memory_order MemOrder) {
547 switch (MemOrder) {
548 case std::memory_order_relaxed:
549 return SPIRV::MemorySemantics::None;
550 case std::memory_order_acquire:
551 return SPIRV::MemorySemantics::Acquire;
552 case std::memory_order_release:
553 return SPIRV::MemorySemantics::Release;
554 case std::memory_order_acq_rel:
555 return SPIRV::MemorySemantics::AcquireRelease;
556 case std::memory_order_seq_cst:
557 return SPIRV::MemorySemantics::SequentiallyConsistent;
558 default:
559 report_fatal_error("Unknown CL memory scope");
560 }
561}
562
563static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
564 switch (ClScope) {
565 case SPIRV::CLMemoryScope::memory_scope_work_item:
566 return SPIRV::Scope::Invocation;
567 case SPIRV::CLMemoryScope::memory_scope_work_group:
568 return SPIRV::Scope::Workgroup;
569 case SPIRV::CLMemoryScope::memory_scope_device:
570 return SPIRV::Scope::Device;
571 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
572 return SPIRV::Scope::CrossDevice;
573 case SPIRV::CLMemoryScope::memory_scope_sub_group:
574 return SPIRV::Scope::Subgroup;
575 }
576 report_fatal_error("Unknown CL memory scope");
577}
578
580 MachineIRBuilder &MIRBuilder,
582 return GR->buildConstantInt(
583 Val, MIRBuilder, GR->getOrCreateSPIRVIntegerType(32, MIRBuilder), true);
584}
585
586static Register buildScopeReg(Register CLScopeRegister,
587 SPIRV::Scope::Scope Scope,
588 MachineIRBuilder &MIRBuilder,
591 if (CLScopeRegister.isValid()) {
592 auto CLScope =
593 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
594 Scope = getSPIRVScope(CLScope);
595
596 if (CLScope == static_cast<unsigned>(Scope)) {
597 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
598 return CLScopeRegister;
599 }
600 }
601 return buildConstantIntReg32(Scope, MIRBuilder, GR);
602}
603
606 if (MRI->getRegClassOrNull(Reg))
607 return;
608 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(Reg);
609 MRI->setRegClass(Reg,
610 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
611}
612
613static Register buildMemSemanticsReg(Register SemanticsRegister,
614 Register PtrRegister, unsigned &Semantics,
615 MachineIRBuilder &MIRBuilder,
617 if (SemanticsRegister.isValid()) {
618 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
619 std::memory_order Order =
620 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
621 Semantics =
622 getSPIRVMemSemantics(Order) |
624 if (static_cast<unsigned>(Order) == Semantics) {
625 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
626 return SemanticsRegister;
627 }
628 }
629 return buildConstantIntReg32(Semantics, MIRBuilder, GR);
630}
631
632static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
634 Register TypeReg,
635 ArrayRef<uint32_t> ImmArgs = {}) {
636 auto MIB = MIRBuilder.buildInstr(Opcode);
637 if (TypeReg.isValid())
638 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
639 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
640 for (unsigned i = 0; i < Sz; ++i)
641 MIB.addUse(Call->Arguments[i]);
642 for (uint32_t ImmArg : ImmArgs)
643 MIB.addImm(ImmArg);
644 return true;
645}
646
647/// Helper function for translating atomic init to OpStore.
649 MachineIRBuilder &MIRBuilder) {
650 if (Call->isSpirvOp())
651 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
652
653 assert(Call->Arguments.size() == 2 &&
654 "Need 2 arguments for atomic init translation");
655 MIRBuilder.buildInstr(SPIRV::OpStore)
656 .addUse(Call->Arguments[0])
657 .addUse(Call->Arguments[1]);
658 return true;
659}
660
661/// Helper function for building an atomic load instruction.
663 MachineIRBuilder &MIRBuilder,
665 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
666 if (Call->isSpirvOp())
667 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
668
669 Register PtrRegister = Call->Arguments[0];
670 // TODO: if true insert call to __translate_ocl_memory_sccope before
671 // OpAtomicLoad and the function implementation. We can use Translator's
672 // output for transcoding/atomic_explicit_arguments.cl as an example.
673 Register ScopeRegister =
674 Call->Arguments.size() > 1
675 ? Call->Arguments[1]
676 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
677 Register MemSemanticsReg;
678 if (Call->Arguments.size() > 2) {
679 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
680 MemSemanticsReg = Call->Arguments[2];
681 } else {
682 int Semantics =
683 SPIRV::MemorySemantics::SequentiallyConsistent |
685 MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
686 }
687
688 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
689 .addDef(Call->ReturnRegister)
690 .addUse(TypeReg)
691 .addUse(PtrRegister)
692 .addUse(ScopeRegister)
693 .addUse(MemSemanticsReg);
694 return true;
695}
696
697/// Helper function for building an atomic store instruction.
699 MachineIRBuilder &MIRBuilder,
701 if (Call->isSpirvOp())
702 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call,
703 Register(0));
704
705 Register ScopeRegister =
706 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
707 Register PtrRegister = Call->Arguments[0];
708 int Semantics =
709 SPIRV::MemorySemantics::SequentiallyConsistent |
711 Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
712 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
713 .addUse(PtrRegister)
714 .addUse(ScopeRegister)
715 .addUse(MemSemanticsReg)
716 .addUse(Call->Arguments[1]);
717 return true;
718}
719
720/// Helper function for building an atomic compare-exchange instruction.
722 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
723 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
724 if (Call->isSpirvOp())
725 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
726 GR->getSPIRVTypeID(Call->ReturnType));
727
728 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
729 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
730
731 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
732 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
733 Register Desired = Call->Arguments[2]; // Value (C Desired).
734 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
735 LLT DesiredLLT = MRI->getType(Desired);
736
737 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
738 SPIRV::OpTypePointer);
739 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
740 (void)ExpectedType;
741 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
742 : ExpectedType == SPIRV::OpTypePointer);
743 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
744
745 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
746 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
747 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
748 SpvObjectPtrTy->getOperand(1).getImm());
749 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
750
751 Register MemSemEqualReg;
752 Register MemSemUnequalReg;
753 uint64_t MemSemEqual =
754 IsCmpxchg
755 ? SPIRV::MemorySemantics::None
756 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
757 uint64_t MemSemUnequal =
758 IsCmpxchg
759 ? SPIRV::MemorySemantics::None
760 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
761 if (Call->Arguments.size() >= 4) {
762 assert(Call->Arguments.size() >= 5 &&
763 "Need 5+ args for explicit atomic cmpxchg");
764 auto MemOrdEq =
765 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
766 auto MemOrdNeq =
767 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
768 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
769 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
770 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
771 MemSemEqualReg = Call->Arguments[3];
772 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
773 MemSemUnequalReg = Call->Arguments[4];
774 }
775 if (!MemSemEqualReg.isValid())
776 MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
777 if (!MemSemUnequalReg.isValid())
778 MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
779
780 Register ScopeReg;
781 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
782 if (Call->Arguments.size() >= 6) {
783 assert(Call->Arguments.size() == 6 &&
784 "Extra args for explicit atomic cmpxchg");
785 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
786 getIConstVal(Call->Arguments[5], MRI));
787 Scope = getSPIRVScope(ClScope);
788 if (ClScope == static_cast<unsigned>(Scope))
789 ScopeReg = Call->Arguments[5];
790 }
791 if (!ScopeReg.isValid())
792 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
793
794 Register Expected = IsCmpxchg
795 ? ExpectedArg
796 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
797 GR, LLT::scalar(64));
798 MRI->setType(Expected, DesiredLLT);
799 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
800 : Call->ReturnRegister;
801 if (!MRI->getRegClassOrNull(Tmp))
802 MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
803 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
804
805 MIRBuilder.buildInstr(Opcode)
806 .addDef(Tmp)
807 .addUse(GR->getSPIRVTypeID(SpvDesiredTy))
808 .addUse(ObjectPtr)
809 .addUse(ScopeReg)
810 .addUse(MemSemEqualReg)
811 .addUse(MemSemUnequalReg)
812 .addUse(Desired)
814 if (!IsCmpxchg) {
815 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
816 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
817 }
818 return true;
819}
820
821/// Helper function for building atomic instructions.
822static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
823 MachineIRBuilder &MIRBuilder,
825 if (Call->isSpirvOp())
826 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
827 GR->getSPIRVTypeID(Call->ReturnType));
828
829 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
830 Register ScopeRegister =
831 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
832
833 assert(Call->Arguments.size() <= 4 &&
834 "Too many args for explicit atomic RMW");
835 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
836 MIRBuilder, GR, MRI);
837
838 Register PtrRegister = Call->Arguments[0];
839 unsigned Semantics = SPIRV::MemorySemantics::None;
840 Register MemSemanticsReg =
841 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
842 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
843 Semantics, MIRBuilder, GR);
844 Register ValueReg = Call->Arguments[1];
845 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
846 // support cl_ext_float_atomics
847 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
848 if (Opcode == SPIRV::OpAtomicIAdd) {
849 Opcode = SPIRV::OpAtomicFAddEXT;
850 } else if (Opcode == SPIRV::OpAtomicISub) {
851 // Translate OpAtomicISub applied to a floating type argument to
852 // OpAtomicFAddEXT with the negative value operand
853 Opcode = SPIRV::OpAtomicFAddEXT;
854 Register NegValueReg =
855 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
856 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
857 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
858 MIRBuilder.getMF());
859 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
860 .addDef(NegValueReg)
861 .addUse(ValueReg);
862 insertAssignInstr(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
863 MIRBuilder.getMF().getRegInfo());
864 ValueReg = NegValueReg;
865 }
866 }
867 MIRBuilder.buildInstr(Opcode)
868 .addDef(Call->ReturnRegister)
869 .addUse(ValueTypeReg)
870 .addUse(PtrRegister)
871 .addUse(ScopeRegister)
872 .addUse(MemSemanticsReg)
873 .addUse(ValueReg);
874 return true;
875}
876
877/// Helper function for building an atomic floating-type instruction.
879 unsigned Opcode,
880 MachineIRBuilder &MIRBuilder,
882 assert(Call->Arguments.size() == 4 &&
883 "Wrong number of atomic floating-type builtin");
884 Register PtrReg = Call->Arguments[0];
885 Register ScopeReg = Call->Arguments[1];
886 Register MemSemanticsReg = Call->Arguments[2];
887 Register ValueReg = Call->Arguments[3];
888 MIRBuilder.buildInstr(Opcode)
889 .addDef(Call->ReturnRegister)
890 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
891 .addUse(PtrReg)
892 .addUse(ScopeReg)
893 .addUse(MemSemanticsReg)
894 .addUse(ValueReg);
895 return true;
896}
897
898/// Helper function for building atomic flag instructions (e.g.
899/// OpAtomicFlagTestAndSet).
901 unsigned Opcode, MachineIRBuilder &MIRBuilder,
903 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
904 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
905 if (Call->isSpirvOp())
906 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
907 IsSet ? TypeReg : Register(0));
908
909 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
910 Register PtrRegister = Call->Arguments[0];
911 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
912 Register MemSemanticsReg =
913 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
914 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
915 Semantics, MIRBuilder, GR);
916
917 assert((Opcode != SPIRV::OpAtomicFlagClear ||
918 (Semantics != SPIRV::MemorySemantics::Acquire &&
919 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
920 "Invalid memory order argument!");
921
922 Register ScopeRegister =
923 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
924 ScopeRegister =
925 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
926
927 auto MIB = MIRBuilder.buildInstr(Opcode);
928 if (IsSet)
929 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
930
931 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
932 return true;
933}
934
935/// Helper function for building barriers, i.e., memory/control ordering
936/// operations.
937static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
938 MachineIRBuilder &MIRBuilder,
940 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
941 const auto *ST =
942 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
943 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
944 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
945 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
946 std::string DiagMsg = std::string(Builtin->Name) +
947 ": the builtin requires the following SPIR-V "
948 "extension: SPV_INTEL_split_barrier";
949 report_fatal_error(DiagMsg.c_str(), false);
950 }
951
952 if (Call->isSpirvOp())
953 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
954
955 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
956 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
957 unsigned MemSemantics = SPIRV::MemorySemantics::None;
958
959 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
960 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
961
962 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
963 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
964
965 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
966 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
967
968 if (Opcode == SPIRV::OpMemoryBarrier)
969 MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
970 getIConstVal(Call->Arguments[1], MRI))) |
971 MemSemantics;
972 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
973 MemSemantics |= SPIRV::MemorySemantics::Release;
974 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
975 MemSemantics |= SPIRV::MemorySemantics::Acquire;
976 else
977 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
978
979 Register MemSemanticsReg =
980 MemFlags == MemSemantics
981 ? Call->Arguments[0]
982 : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
983 Register ScopeReg;
984 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
985 SPIRV::Scope::Scope MemScope = Scope;
986 if (Call->Arguments.size() >= 2) {
987 assert(
988 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
989 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
990 "Extra args for explicitly scoped barrier");
991 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
992 : Call->Arguments[1];
993 SPIRV::CLMemoryScope CLScope =
994 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
995 MemScope = getSPIRVScope(CLScope);
996 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
997 (Opcode == SPIRV::OpMemoryBarrier))
998 Scope = MemScope;
999 if (CLScope == static_cast<unsigned>(Scope))
1000 ScopeReg = Call->Arguments[1];
1001 }
1002
1003 if (!ScopeReg.isValid())
1004 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
1005
1006 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
1007 if (Opcode != SPIRV::OpMemoryBarrier)
1008 MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
1009 MIB.addUse(MemSemanticsReg);
1010 return true;
1011}
1012
1013/// Helper function for building extended bit operations.
1015 unsigned Opcode,
1016 MachineIRBuilder &MIRBuilder,
1017 SPIRVGlobalRegistry *GR) {
1018 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1019 const auto *ST =
1020 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1021 if ((Opcode == SPIRV::OpBitFieldInsert ||
1022 Opcode == SPIRV::OpBitFieldSExtract ||
1023 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1024 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1025 std::string DiagMsg = std::string(Builtin->Name) +
1026 ": the builtin requires the following SPIR-V "
1027 "extension: SPV_KHR_bit_instructions";
1028 report_fatal_error(DiagMsg.c_str(), false);
1029 }
1030
1031 // Generate SPIRV instruction accordingly.
1032 if (Call->isSpirvOp())
1033 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1034 GR->getSPIRVTypeID(Call->ReturnType));
1035
1036 auto MIB = MIRBuilder.buildInstr(Opcode)
1037 .addDef(Call->ReturnRegister)
1038 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1039 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1040 MIB.addUse(Call->Arguments[i]);
1041
1042 return true;
1043}
1044
1045/// Helper function for building Intel's bindless image instructions.
1047 unsigned Opcode,
1048 MachineIRBuilder &MIRBuilder,
1049 SPIRVGlobalRegistry *GR) {
1050 // Generate SPIRV instruction accordingly.
1051 if (Call->isSpirvOp())
1052 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1053 GR->getSPIRVTypeID(Call->ReturnType));
1054
1055 MIRBuilder.buildInstr(Opcode)
1056 .addDef(Call->ReturnRegister)
1057 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1058 .addUse(Call->Arguments[0]);
1059
1060 return true;
1061}
1062
1063/// Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
1065 const SPIRV::IncomingCall *Call, unsigned Opcode,
1066 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1067 // Generate SPIRV instruction accordingly.
1068 if (Call->isSpirvOp())
1069 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1070 GR->getSPIRVTypeID(Call->ReturnType));
1071
1072 auto MIB = MIRBuilder.buildInstr(Opcode)
1073 .addDef(Call->ReturnRegister)
1074 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1075 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1076 MIB.addUse(Call->Arguments[i]);
1077
1078 return true;
1079}
1080
1081/// Helper function for building Intel's 2d block io instructions.
1083 unsigned Opcode,
1084 MachineIRBuilder &MIRBuilder,
1085 SPIRVGlobalRegistry *GR) {
1086 // Generate SPIRV instruction accordingly.
1087 if (Call->isSpirvOp())
1088 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1089
1090 auto MIB = MIRBuilder.buildInstr(Opcode)
1091 .addDef(Call->ReturnRegister)
1092 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1093 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1094 MIB.addUse(Call->Arguments[i]);
1095
1096 return true;
1097}
1098
1099static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
1100 unsigned Scope, MachineIRBuilder &MIRBuilder,
1101 SPIRVGlobalRegistry *GR) {
1102 switch (Opcode) {
1103 case SPIRV::OpCommitReadPipe:
1104 case SPIRV::OpCommitWritePipe:
1105 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1106 case SPIRV::OpGroupCommitReadPipe:
1107 case SPIRV::OpGroupCommitWritePipe:
1108 case SPIRV::OpGroupReserveReadPipePackets:
1109 case SPIRV::OpGroupReserveWritePipePackets: {
1110 Register ScopeConstReg =
1111 MIRBuilder.buildConstant(LLT::scalar(32), Scope).getReg(0);
1112 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1113 MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
1115 MIB = MIRBuilder.buildInstr(Opcode);
1116 // Add Return register and type.
1117 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1118 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1119 MIB.addDef(Call->ReturnRegister)
1120 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1121
1122 MIB.addUse(ScopeConstReg);
1123 for (unsigned int i = 0; i < Call->Arguments.size(); ++i)
1124 MIB.addUse(Call->Arguments[i]);
1125
1126 return true;
1127 }
1128 default:
1129 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1130 GR->getSPIRVTypeID(Call->ReturnType));
1131 }
1132}
1133
1134static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1135 switch (dim) {
1136 case SPIRV::Dim::DIM_1D:
1137 case SPIRV::Dim::DIM_Buffer:
1138 return 1;
1139 case SPIRV::Dim::DIM_2D:
1140 case SPIRV::Dim::DIM_Cube:
1141 case SPIRV::Dim::DIM_Rect:
1142 return 2;
1143 case SPIRV::Dim::DIM_3D:
1144 return 3;
1145 default:
1146 report_fatal_error("Cannot get num components for given Dim");
1147 }
1148}
1149
1150/// Helper function for obtaining the number of size components.
1151static unsigned getNumSizeComponents(SPIRVType *imgType) {
1152 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1153 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1154 unsigned numComps = getNumComponentsForDim(dim);
1155 bool arrayed = imgType->getOperand(4).getImm() == 1;
1156 return arrayed ? numComps + 1 : numComps;
1157}
1158
1159//===----------------------------------------------------------------------===//
1160// Implementation functions for each builtin group
1161//===----------------------------------------------------------------------===//
1162
1164 MachineIRBuilder &MIRBuilder,
1165 SPIRVGlobalRegistry *GR, const CallBase &CB) {
1166 // Lookup the extended instruction number in the TableGen records.
1167 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1169 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1170 // fmin_common and fmax_common are now deprecated, and we should use fmin and
1171 // fmax with NotInf and NotNaN flags instead. Keep original number to add
1172 // later the NoNans and NoInfs flags.
1173 uint32_t OrigNumber = Number;
1174 const SPIRVSubtarget &ST =
1175 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
1176 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
1177 (Number == SPIRV::OpenCLExtInst::fmin_common ||
1178 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1179 Number = (Number == SPIRV::OpenCLExtInst::fmin_common)
1180 ? SPIRV::OpenCLExtInst::fmin
1181 : SPIRV::OpenCLExtInst::fmax;
1182 }
1183
1184 // Build extended instruction.
1185 auto MIB =
1186 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1187 .addDef(Call->ReturnRegister)
1188 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1189 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1190 .addImm(Number);
1191
1192 for (auto Argument : Call->Arguments)
1193 MIB.addUse(Argument);
1194 MIB.getInstr()->copyIRFlags(CB);
1195 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1196 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1197 // Add NoNans and NoInfs flags to fmin/fmax instruction.
1198 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoNans);
1199 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoInfs);
1200 }
1201 return true;
1202}
1203
1205 MachineIRBuilder &MIRBuilder,
1206 SPIRVGlobalRegistry *GR) {
1207 // Lookup the instruction opcode in the TableGen records.
1208 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1209 unsigned Opcode =
1210 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1211
1212 Register CompareRegister;
1213 SPIRVType *RelationType;
1214 std::tie(CompareRegister, RelationType) =
1215 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1216
1217 // Build relational instruction.
1218 auto MIB = MIRBuilder.buildInstr(Opcode)
1219 .addDef(CompareRegister)
1220 .addUse(GR->getSPIRVTypeID(RelationType));
1221
1222 for (auto Argument : Call->Arguments)
1223 MIB.addUse(Argument);
1224
1225 // Build select instruction.
1226 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1227 Call->ReturnType, GR);
1228}
1229
1231 MachineIRBuilder &MIRBuilder,
1232 SPIRVGlobalRegistry *GR) {
1233 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1234 const SPIRV::GroupBuiltin *GroupBuiltin =
1235 SPIRV::lookupGroupBuiltin(Builtin->Name);
1236
1237 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1238 if (Call->isSpirvOp()) {
1239 if (GroupBuiltin->NoGroupOperation) {
1241 if (GroupBuiltin->Opcode ==
1242 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1243 Call->Arguments.size() > 4)
1244 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[4], MRI));
1245 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1246 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
1247 }
1248
1249 // Group Operation is a literal
1250 Register GroupOpReg = Call->Arguments[1];
1251 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1252 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1254 "Group Operation parameter must be an integer constant");
1255 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1256 Register ScopeReg = Call->Arguments[0];
1257 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1258 .addDef(Call->ReturnRegister)
1259 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1260 .addUse(ScopeReg)
1261 .addImm(GrpOp);
1262 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1263 MIB.addUse(Call->Arguments[i]);
1264 return true;
1265 }
1266
1267 Register Arg0;
1268 if (GroupBuiltin->HasBoolArg) {
1269 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1270 Register BoolReg = Call->Arguments[0];
1271 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1272 if (!BoolRegType)
1273 report_fatal_error("Can't find a register's type definition");
1274 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1275 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1276 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1277 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1278 BoolType, true);
1279 } else {
1280 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1281 Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1282 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1283 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1284 MIRBuilder.buildICmp(
1285 CmpInst::ICMP_NE, Arg0, BoolReg,
1286 GR->buildConstantInt(0, MIRBuilder, BoolRegType, true));
1287 insertAssignInstr(Arg0, nullptr, BoolType, GR, MIRBuilder,
1288 MIRBuilder.getMF().getRegInfo());
1289 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1290 report_fatal_error("Expect a boolean argument");
1291 }
1292 // if BoolReg is a boolean register, we don't need to do anything
1293 }
1294 }
1295
1296 Register GroupResultRegister = Call->ReturnRegister;
1297 SPIRVType *GroupResultType = Call->ReturnType;
1298
1299 // TODO: maybe we need to check whether the result type is already boolean
1300 // and in this case do not insert select instruction.
1301 const bool HasBoolReturnTy =
1302 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1303 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1304 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1305
1306 if (HasBoolReturnTy)
1307 std::tie(GroupResultRegister, GroupResultType) =
1308 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1309
1310 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1311 : SPIRV::Scope::Workgroup;
1312 Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1313
1314 Register VecReg;
1315 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1316 Call->Arguments.size() > 2) {
1317 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1318 // scalar, a vector with 2 components, or a vector with 3 components.",
1319 // meaning that we must create a vector from the function arguments if
1320 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1321 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1322 Register ElemReg = Call->Arguments[1];
1323 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1324 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1325 report_fatal_error("Expect an integer <LocalId> argument");
1326 unsigned VecLen = Call->Arguments.size() - 1;
1327 VecReg = MRI->createGenericVirtualRegister(
1328 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1329 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1330 SPIRVType *VecType =
1331 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder, true);
1332 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1333 auto MIB =
1334 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1335 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1336 MIB.addUse(Call->Arguments[i]);
1337 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1338 }
1339 insertAssignInstr(VecReg, nullptr, VecType, GR, MIRBuilder,
1340 MIRBuilder.getMF().getRegInfo());
1341 }
1342
1343 // Build work/sub group instruction.
1344 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1345 .addDef(GroupResultRegister)
1346 .addUse(GR->getSPIRVTypeID(GroupResultType))
1347 .addUse(ScopeRegister);
1348
1349 if (!GroupBuiltin->NoGroupOperation)
1350 MIB.addImm(GroupBuiltin->GroupOperation);
1351 if (Call->Arguments.size() > 0) {
1352 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1353 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1354 if (VecReg.isValid())
1355 MIB.addUse(VecReg);
1356 else
1357 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1358 MIB.addUse(Call->Arguments[i]);
1359 }
1360
1361 // Build select instruction.
1362 if (HasBoolReturnTy)
1363 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1364 Call->ReturnType, GR);
1365 return true;
1366}
1367
1369 MachineIRBuilder &MIRBuilder,
1370 SPIRVGlobalRegistry *GR) {
1371 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1372 MachineFunction &MF = MIRBuilder.getMF();
1373 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1374 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1375 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1376
1377 if (IntelSubgroups->IsMedia &&
1378 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1379 std::string DiagMsg = std::string(Builtin->Name) +
1380 ": the builtin requires the following SPIR-V "
1381 "extension: SPV_INTEL_media_block_io";
1382 report_fatal_error(DiagMsg.c_str(), false);
1383 } else if (!IntelSubgroups->IsMedia &&
1384 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1385 std::string DiagMsg = std::string(Builtin->Name) +
1386 ": the builtin requires the following SPIR-V "
1387 "extension: SPV_INTEL_subgroups";
1388 report_fatal_error(DiagMsg.c_str(), false);
1389 }
1390
1391 uint32_t OpCode = IntelSubgroups->Opcode;
1392 if (Call->isSpirvOp()) {
1393 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1394 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1395 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1396 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1397 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1398 : Register(0));
1399 }
1400
1401 if (IntelSubgroups->IsBlock) {
1402 // Minimal number or arguments set in TableGen records is 1
1403 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1404 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1405 // TODO: add required validation from the specification:
1406 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1407 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1408 // dimensions require a capability."
1409 switch (OpCode) {
1410 case SPIRV::OpSubgroupBlockReadINTEL:
1411 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1412 break;
1413 case SPIRV::OpSubgroupBlockWriteINTEL:
1414 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1415 break;
1416 }
1417 }
1418 }
1419 }
1420
1421 // TODO: opaque pointers types should be eventually resolved in such a way
1422 // that validation of block read is enabled with respect to the following
1423 // specification requirement:
1424 // "'Result Type' may be a scalar or vector type, and its component type must
1425 // be equal to the type pointed to by 'Ptr'."
1426 // For example, function parameter type should not be default i8 pointer, but
1427 // depend on the result type of the instruction where it is used as a pointer
1428 // argument of OpSubgroupBlockReadINTEL
1429
1430 // Build Intel subgroups instruction
1432 IntelSubgroups->IsWrite
1433 ? MIRBuilder.buildInstr(OpCode)
1434 : MIRBuilder.buildInstr(OpCode)
1435 .addDef(Call->ReturnRegister)
1436 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1437 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1438 MIB.addUse(Call->Arguments[i]);
1439 return true;
1440}
1441
1443 MachineIRBuilder &MIRBuilder,
1444 SPIRVGlobalRegistry *GR) {
1445 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1446 MachineFunction &MF = MIRBuilder.getMF();
1447 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1448 if (!ST->canUseExtension(
1449 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1450 std::string DiagMsg = std::string(Builtin->Name) +
1451 ": the builtin requires the following SPIR-V "
1452 "extension: SPV_KHR_uniform_group_instructions";
1453 report_fatal_error(DiagMsg.c_str(), false);
1454 }
1455 const SPIRV::GroupUniformBuiltin *GroupUniform =
1456 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1457 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1458
1459 Register GroupResultReg = Call->ReturnRegister;
1460 Register ScopeReg = Call->Arguments[0];
1461 Register ValueReg = Call->Arguments[2];
1462
1463 // Group Operation
1464 Register ConstGroupOpReg = Call->Arguments[1];
1465 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1466 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1468 "expect a constant group operation for a uniform group instruction",
1469 false);
1470 const MachineOperand &ConstOperand = Const->getOperand(1);
1471 if (!ConstOperand.isCImm())
1472 report_fatal_error("uniform group instructions: group operation must be an "
1473 "integer constant",
1474 false);
1475
1476 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1477 .addDef(GroupResultReg)
1478 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1479 .addUse(ScopeReg);
1480 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1481 MIB.addUse(ValueReg);
1482
1483 return true;
1484}
1485
1487 MachineIRBuilder &MIRBuilder,
1488 SPIRVGlobalRegistry *GR) {
1489 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1490 MachineFunction &MF = MIRBuilder.getMF();
1491 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1492 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1493 std::string DiagMsg = std::string(Builtin->Name) +
1494 ": the builtin requires the following SPIR-V "
1495 "extension: SPV_KHR_shader_clock";
1496 report_fatal_error(DiagMsg.c_str(), false);
1497 }
1498
1499 Register ResultReg = Call->ReturnRegister;
1500
1501 // Deduce the `Scope` operand from the builtin function name.
1502 SPIRV::Scope::Scope ScopeArg =
1504 .EndsWith("device", SPIRV::Scope::Scope::Device)
1505 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1506 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1507 Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1508
1509 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1510 .addDef(ResultReg)
1511 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1512 .addUse(ScopeReg);
1513
1514 return true;
1515}
1516
1517// These queries ask for a single size_t result for a given dimension index,
1518// e.g. size_t get_global_id(uint dimindex). In SPIR-V, the builtins
1519// corresponding to these values are all vec3 types, so we need to extract the
1520// correct index or return DefaultValue (0 or 1 depending on the query). We also
1521// handle extending or truncating in case size_t does not match the expected
1522// result type's bitwidth.
1523//
1524// For a constant index >= 3 we generate:
1525// %res = OpConstant %SizeT DefaultValue
1526//
1527// For other indices we generate:
1528// %g = OpVariable %ptr_V3_SizeT Input
1529// OpDecorate %g BuiltIn XXX
1530// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1531// OpDecorate %g Constant
1532// %loadedVec = OpLoad %V3_SizeT %g
1533//
1534// Then, if the index is constant < 3, we generate:
1535// %res = OpCompositeExtract %SizeT %loadedVec idx
1536// If the index is dynamic, we generate:
1537// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1538// %cmp = OpULessThan %bool %idx %const_3
1539// %res = OpSelect %SizeT %cmp %tmp %const_<DefaultValue>
1540//
1541// If the bitwidth of %res does not match the expected return type, we add an
1542// extend or truncate.
1544 MachineIRBuilder &MIRBuilder,
1546 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1547 uint64_t DefaultValue) {
1548 Register IndexRegister = Call->Arguments[0];
1549 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1550 const unsigned PointerSize = GR->getPointerSize();
1551 const SPIRVType *PointerSizeType =
1552 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1553 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1554 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1555
1556 // Set up the final register to do truncation or extension on at the end.
1557 Register ToTruncate = Call->ReturnRegister;
1558
1559 // If the index is constant, we can statically determine if it is in range.
1560 bool IsConstantIndex =
1561 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1562
1563 // If it's out of range (max dimension is 3), we can just return the constant
1564 // default value (0 or 1 depending on which query function).
1565 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1566 Register DefaultReg = Call->ReturnRegister;
1567 if (PointerSize != ResultWidth) {
1568 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1569 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1570 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1571 MIRBuilder.getMF());
1572 ToTruncate = DefaultReg;
1573 }
1574 auto NewRegister =
1575 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1576 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1577 } else { // If it could be in range, we need to load from the given builtin.
1578 auto Vec3Ty =
1579 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder, true);
1580 Register LoadedVector =
1581 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1582 LLT::fixed_vector(3, PointerSize));
1583 // Set up the vreg to extract the result to (possibly a new temporary one).
1584 Register Extracted = Call->ReturnRegister;
1585 if (!IsConstantIndex || PointerSize != ResultWidth) {
1586 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1587 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1588 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1589 }
1590 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1591 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1592 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1593 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1594 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1595
1596 // If the index is dynamic, need check if it's < 3, and then use a select.
1597 if (!IsConstantIndex) {
1598 insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
1599 *MRI);
1600
1601 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1602 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1603
1604 Register CompareRegister =
1605 MRI->createGenericVirtualRegister(LLT::scalar(1));
1606 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1607 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1608
1609 // Use G_ICMP to check if idxVReg < 3.
1610 MIRBuilder.buildICmp(
1611 CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1612 GR->buildConstantInt(3, MIRBuilder, IndexType, true));
1613
1614 // Get constant for the default value (0 or 1 depending on which
1615 // function).
1616 Register DefaultRegister =
1617 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1618
1619 // Get a register for the selection result (possibly a new temporary one).
1620 Register SelectionResult = Call->ReturnRegister;
1621 if (PointerSize != ResultWidth) {
1622 SelectionResult =
1623 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1624 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1625 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1626 MIRBuilder.getMF());
1627 }
1628 // Create the final G_SELECT to return the extracted value or the default.
1629 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1630 DefaultRegister);
1631 ToTruncate = SelectionResult;
1632 } else {
1633 ToTruncate = Extracted;
1634 }
1635 }
1636 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1637 if (PointerSize != ResultWidth)
1638 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1639 return true;
1640}
1641
1643 MachineIRBuilder &MIRBuilder,
1644 SPIRVGlobalRegistry *GR) {
1645 // Lookup the builtin variable record.
1646 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1647 SPIRV::BuiltIn::BuiltIn Value =
1648 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1649
1650 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1651 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1652
1653 // Build a load instruction for the builtin variable.
1654 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1655 LLT LLType;
1656 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1657 LLType =
1658 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1659 else
1660 LLType = LLT::scalar(BitWidth);
1661
1662 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1663 LLType, Call->ReturnRegister);
1664}
1665
1667 MachineIRBuilder &MIRBuilder,
1668 SPIRVGlobalRegistry *GR) {
1669 // Lookup the instruction opcode in the TableGen records.
1670 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1671 unsigned Opcode =
1672 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1673
1674 switch (Opcode) {
1675 case SPIRV::OpStore:
1676 return buildAtomicInitInst(Call, MIRBuilder);
1677 case SPIRV::OpAtomicLoad:
1678 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1679 case SPIRV::OpAtomicStore:
1680 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1681 case SPIRV::OpAtomicCompareExchange:
1682 case SPIRV::OpAtomicCompareExchangeWeak:
1683 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1684 GR);
1685 case SPIRV::OpAtomicIAdd:
1686 case SPIRV::OpAtomicISub:
1687 case SPIRV::OpAtomicOr:
1688 case SPIRV::OpAtomicXor:
1689 case SPIRV::OpAtomicAnd:
1690 case SPIRV::OpAtomicExchange:
1691 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1692 case SPIRV::OpMemoryBarrier:
1693 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1694 case SPIRV::OpAtomicFlagTestAndSet:
1695 case SPIRV::OpAtomicFlagClear:
1696 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1697 default:
1698 if (Call->isSpirvOp())
1699 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1700 GR->getSPIRVTypeID(Call->ReturnType));
1701 return false;
1702 }
1703}
1704
1706 MachineIRBuilder &MIRBuilder,
1707 SPIRVGlobalRegistry *GR) {
1708 // Lookup the instruction opcode in the TableGen records.
1709 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1710 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1711
1712 switch (Opcode) {
1713 case SPIRV::OpAtomicFAddEXT:
1714 case SPIRV::OpAtomicFMinEXT:
1715 case SPIRV::OpAtomicFMaxEXT:
1716 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1717 default:
1718 return false;
1719 }
1720}
1721
1723 MachineIRBuilder &MIRBuilder,
1724 SPIRVGlobalRegistry *GR) {
1725 // Lookup the instruction opcode in the TableGen records.
1726 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1727 unsigned Opcode =
1728 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1729
1730 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1731}
1732
1734 MachineIRBuilder &MIRBuilder,
1735 SPIRVGlobalRegistry *GR) {
1736 // Lookup the instruction opcode in the TableGen records.
1737 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1738 unsigned Opcode =
1739 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1740
1741 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1742 SPIRV::StorageClass::StorageClass ResSC =
1743 GR->getPointerStorageClass(Call->ReturnRegister);
1744 if (!isGenericCastablePtr(ResSC))
1745 return false;
1746
1747 MIRBuilder.buildInstr(Opcode)
1748 .addDef(Call->ReturnRegister)
1749 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1750 .addUse(Call->Arguments[0])
1751 .addImm(ResSC);
1752 } else {
1753 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1754 .addDef(Call->ReturnRegister)
1755 .addUse(Call->Arguments[0]);
1756 }
1757 return true;
1758}
1759
1760static bool generateDotOrFMulInst(const StringRef DemangledCall,
1762 MachineIRBuilder &MIRBuilder,
1763 SPIRVGlobalRegistry *GR) {
1764 if (Call->isSpirvOp())
1765 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1766 GR->getSPIRVTypeID(Call->ReturnType));
1767
1768 bool IsVec = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() ==
1769 SPIRV::OpTypeVector;
1770 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1771 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1772 bool IsSwapReq = false;
1773
1774 const auto *ST =
1775 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1776 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt) &&
1777 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1778 ST->isAtLeastSPIRVVer(VersionTuple(1, 6)))) {
1779 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1780 const SPIRV::IntegerDotProductBuiltin *IntDot =
1781 SPIRV::lookupIntegerDotProductBuiltin(Builtin->Name);
1782 if (IntDot) {
1783 OC = IntDot->Opcode;
1784 IsSwapReq = IntDot->IsSwapReq;
1785 } else if (IsVec) {
1786 // Handling "dot" and "dot_acc_sat" builtins which use vectors of
1787 // integers.
1788 LLVMContext &Ctx = MIRBuilder.getContext();
1790 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledCall, Ctx);
1791 bool IsFirstSigned = TypeStrs[0].trim()[0] != 'u';
1792 bool IsSecondSigned = TypeStrs[1].trim()[0] != 'u';
1793
1794 if (Call->BuiltinName == "dot") {
1795 if (IsFirstSigned && IsSecondSigned)
1796 OC = SPIRV::OpSDot;
1797 else if (!IsFirstSigned && !IsSecondSigned)
1798 OC = SPIRV::OpUDot;
1799 else {
1800 OC = SPIRV::OpSUDot;
1801 if (!IsFirstSigned)
1802 IsSwapReq = true;
1803 }
1804 } else if (Call->BuiltinName == "dot_acc_sat") {
1805 if (IsFirstSigned && IsSecondSigned)
1806 OC = SPIRV::OpSDotAccSat;
1807 else if (!IsFirstSigned && !IsSecondSigned)
1808 OC = SPIRV::OpUDotAccSat;
1809 else {
1810 OC = SPIRV::OpSUDotAccSat;
1811 if (!IsFirstSigned)
1812 IsSwapReq = true;
1813 }
1814 }
1815 }
1816 }
1817
1818 MachineInstrBuilder MIB = MIRBuilder.buildInstr(OC)
1819 .addDef(Call->ReturnRegister)
1820 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1821
1822 if (IsSwapReq) {
1823 MIB.addUse(Call->Arguments[1]);
1824 MIB.addUse(Call->Arguments[0]);
1825 // needed for dot_acc_sat* builtins
1826 for (size_t i = 2; i < Call->Arguments.size(); ++i)
1827 MIB.addUse(Call->Arguments[i]);
1828 } else {
1829 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1830 MIB.addUse(Call->Arguments[i]);
1831 }
1832
1833 // Add Packed Vector Format for Integer dot product builtins if arguments are
1834 // scalar
1835 if (!IsVec && OC != SPIRV::OpFMulS)
1836 MIB.addImm(SPIRV::PackedVectorFormat4x8Bit);
1837
1838 return true;
1839}
1840
1842 MachineIRBuilder &MIRBuilder,
1843 SPIRVGlobalRegistry *GR) {
1844 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1845 SPIRV::BuiltIn::BuiltIn Value =
1846 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1847
1848 // For now, we only support a single Wave intrinsic with a single return type.
1849 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1850 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1851
1853 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1854 /* isConst= */ false, /* hasLinkageTy= */ false);
1855}
1856
1857// We expect a builtin
1858// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1859// where %result is a pointer to where the result of the builtin execution
1860// is to be stored, and generate the following instructions:
1861// Res = Opcode RetType Operand1 Operand1
1862// OpStore RetVariable Res
1864 MachineIRBuilder &MIRBuilder,
1865 SPIRVGlobalRegistry *GR) {
1866 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1867 unsigned Opcode =
1868 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1869
1870 Register SRetReg = Call->Arguments[0];
1871 SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1872 SPIRVType *RetType = GR->getPointeeType(PtrRetType);
1873 if (!RetType)
1874 report_fatal_error("The first parameter must be a pointer");
1875 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1876 report_fatal_error("Expected struct type result for the arithmetic with "
1877 "overflow builtins");
1878
1879 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1880 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1881 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1882 report_fatal_error("Operands must have the same type");
1883 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1884 switch (Opcode) {
1885 case SPIRV::OpIAddCarryS:
1886 Opcode = SPIRV::OpIAddCarryV;
1887 break;
1888 case SPIRV::OpISubBorrowS:
1889 Opcode = SPIRV::OpISubBorrowV;
1890 break;
1891 }
1892
1893 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1894 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1895 if (const TargetRegisterClass *DstRC =
1896 MRI->getRegClassOrNull(Call->Arguments[1])) {
1897 MRI->setRegClass(ResReg, DstRC);
1898 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1899 } else {
1900 MRI->setType(ResReg, LLT::scalar(64));
1901 }
1902 GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
1903 MIRBuilder.buildInstr(Opcode)
1904 .addDef(ResReg)
1905 .addUse(GR->getSPIRVTypeID(RetType))
1906 .addUse(Call->Arguments[1])
1907 .addUse(Call->Arguments[2]);
1908 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1909 return true;
1910}
1911
1913 MachineIRBuilder &MIRBuilder,
1914 SPIRVGlobalRegistry *GR) {
1915 // Lookup the builtin record.
1916 SPIRV::BuiltIn::BuiltIn Value =
1917 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1918 const bool IsDefaultOne = (Value == SPIRV::BuiltIn::GlobalSize ||
1919 Value == SPIRV::BuiltIn::NumWorkgroups ||
1920 Value == SPIRV::BuiltIn::WorkgroupSize ||
1921 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1922 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefaultOne ? 1 : 0);
1923}
1924
1926 MachineIRBuilder &MIRBuilder,
1927 SPIRVGlobalRegistry *GR) {
1928 // Lookup the image size query component number in the TableGen records.
1929 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1930 uint32_t Component =
1931 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1932 // Query result may either be a vector or a scalar. If return type is not a
1933 // vector, expect only a single size component. Otherwise get the number of
1934 // expected components.
1935 unsigned NumExpectedRetComponents =
1936 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
1937 ? Call->ReturnType->getOperand(2).getImm()
1938 : 1;
1939 // Get the actual number of query result/size components.
1940 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1941 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1942 Register QueryResult = Call->ReturnRegister;
1943 SPIRVType *QueryResultType = Call->ReturnType;
1944 if (NumExpectedRetComponents != NumActualRetComponents) {
1945 unsigned Bitwidth = Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
1946 ? Call->ReturnType->getOperand(1).getImm()
1947 : 32;
1948 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1949 LLT::fixed_vector(NumActualRetComponents, Bitwidth));
1950 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
1951 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(Bitwidth, MIRBuilder);
1952 QueryResultType = GR->getOrCreateSPIRVVectorType(
1953 IntTy, NumActualRetComponents, MIRBuilder, true);
1954 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1955 }
1956 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1957 unsigned Opcode =
1958 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1959 auto MIB = MIRBuilder.buildInstr(Opcode)
1960 .addDef(QueryResult)
1961 .addUse(GR->getSPIRVTypeID(QueryResultType))
1962 .addUse(Call->Arguments[0]);
1963 if (!IsDimBuf)
1964 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
1965 if (NumExpectedRetComponents == NumActualRetComponents)
1966 return true;
1967 if (NumExpectedRetComponents == 1) {
1968 // Only 1 component is expected, build OpCompositeExtract instruction.
1969 unsigned ExtractedComposite =
1970 Component == 3 ? NumActualRetComponents - 1 : Component;
1971 assert(ExtractedComposite < NumActualRetComponents &&
1972 "Invalid composite index!");
1973 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1974 SPIRVType *NewType = nullptr;
1975 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
1976 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
1977 if (TypeReg != NewTypeReg &&
1978 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
1979 TypeReg = NewTypeReg;
1980 }
1981 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1982 .addDef(Call->ReturnRegister)
1983 .addUse(TypeReg)
1984 .addUse(QueryResult)
1985 .addImm(ExtractedComposite);
1986 if (NewType != nullptr)
1987 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
1988 MIRBuilder.getMF().getRegInfo());
1989 } else {
1990 // More than 1 component is expected, fill a new vector.
1991 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1992 .addDef(Call->ReturnRegister)
1993 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1994 .addUse(QueryResult)
1995 .addUse(QueryResult);
1996 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
1997 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
1998 }
1999 return true;
2000}
2001
2003 MachineIRBuilder &MIRBuilder,
2004 SPIRVGlobalRegistry *GR) {
2005 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2006 "Image samples query result must be of int type!");
2007
2008 // Lookup the instruction opcode in the TableGen records.
2009 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2010 unsigned Opcode =
2011 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2012
2013 Register Image = Call->Arguments[0];
2014 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
2015 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
2016 (void)ImageDimensionality;
2017
2018 switch (Opcode) {
2019 case SPIRV::OpImageQuerySamples:
2020 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2021 "Image must be of 2D dimensionality");
2022 break;
2023 case SPIRV::OpImageQueryLevels:
2024 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2025 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2026 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2027 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2028 "Image must be of 1D/2D/3D/Cube dimensionality");
2029 break;
2030 }
2031
2032 MIRBuilder.buildInstr(Opcode)
2033 .addDef(Call->ReturnRegister)
2034 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2035 .addUse(Image);
2036 return true;
2037}
2038
2039// TODO: Move to TableGen.
2040static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2042 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2043 case SPIRV::CLK_ADDRESS_CLAMP:
2044 return SPIRV::SamplerAddressingMode::Clamp;
2045 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2046 return SPIRV::SamplerAddressingMode::ClampToEdge;
2047 case SPIRV::CLK_ADDRESS_REPEAT:
2048 return SPIRV::SamplerAddressingMode::Repeat;
2049 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2050 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2051 case SPIRV::CLK_ADDRESS_NONE:
2052 return SPIRV::SamplerAddressingMode::None;
2053 default:
2054 report_fatal_error("Unknown CL address mode");
2055 }
2056}
2057
2058static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
2059 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2060}
2061
2062static SPIRV::SamplerFilterMode::SamplerFilterMode
2064 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2065 return SPIRV::SamplerFilterMode::Linear;
2066 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2067 return SPIRV::SamplerFilterMode::Nearest;
2068 return SPIRV::SamplerFilterMode::Nearest;
2069}
2070
2071static bool generateReadImageInst(const StringRef DemangledCall,
2073 MachineIRBuilder &MIRBuilder,
2074 SPIRVGlobalRegistry *GR) {
2075 if (Call->isSpirvOp())
2076 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageRead, Call,
2077 GR->getSPIRVTypeID(Call->ReturnType));
2078 Register Image = Call->Arguments[0];
2079 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2080 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
2081 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
2082 if (HasOclSampler) {
2083 Register Sampler = Call->Arguments[1];
2084
2085 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
2086 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
2087 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
2088 Sampler = GR->buildConstantSampler(
2090 getSamplerParamFromBitmask(SamplerMask),
2091 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder);
2092 }
2093 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2094 SPIRVType *SampledImageType =
2095 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2096 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2097
2098 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2099 .addDef(SampledImage)
2100 .addUse(GR->getSPIRVTypeID(SampledImageType))
2101 .addUse(Image)
2102 .addUse(Sampler);
2103
2105 MIRBuilder);
2106
2107 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2108 SPIRVType *TempType =
2109 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder, true);
2110 Register TempRegister =
2111 MRI->createGenericVirtualRegister(GR->getRegType(TempType));
2112 MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
2113 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
2114 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2115 .addDef(TempRegister)
2116 .addUse(GR->getSPIRVTypeID(TempType))
2117 .addUse(SampledImage)
2118 .addUse(Call->Arguments[2]) // Coordinate.
2119 .addImm(SPIRV::ImageOperand::Lod)
2120 .addUse(Lod);
2121 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2122 .addDef(Call->ReturnRegister)
2123 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2124 .addUse(TempRegister)
2125 .addImm(0);
2126 } else {
2127 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2128 .addDef(Call->ReturnRegister)
2129 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2130 .addUse(SampledImage)
2131 .addUse(Call->Arguments[2]) // Coordinate.
2132 .addImm(SPIRV::ImageOperand::Lod)
2133 .addUse(Lod);
2134 }
2135 } else if (HasMsaa) {
2136 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2137 .addDef(Call->ReturnRegister)
2138 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2139 .addUse(Image)
2140 .addUse(Call->Arguments[1]) // Coordinate.
2141 .addImm(SPIRV::ImageOperand::Sample)
2142 .addUse(Call->Arguments[2]);
2143 } else {
2144 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2145 .addDef(Call->ReturnRegister)
2146 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2147 .addUse(Image)
2148 .addUse(Call->Arguments[1]); // Coordinate.
2149 }
2150 return true;
2151}
2152
2154 MachineIRBuilder &MIRBuilder,
2155 SPIRVGlobalRegistry *GR) {
2156 if (Call->isSpirvOp())
2157 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageWrite, Call,
2158 Register(0));
2159 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
2160 .addUse(Call->Arguments[0]) // Image.
2161 .addUse(Call->Arguments[1]) // Coordinate.
2162 .addUse(Call->Arguments[2]); // Texel.
2163 return true;
2164}
2165
2166static bool generateSampleImageInst(const StringRef DemangledCall,
2168 MachineIRBuilder &MIRBuilder,
2169 SPIRVGlobalRegistry *GR) {
2170 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2171 if (Call->Builtin->Name.contains_insensitive(
2172 "__translate_sampler_initializer")) {
2173 // Build sampler literal.
2174 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
2175 Register Sampler = GR->buildConstantSampler(
2176 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
2178 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder);
2179 return Sampler.isValid();
2180 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
2181 // Create OpSampledImage.
2182 Register Image = Call->Arguments[0];
2183 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2184 SPIRVType *SampledImageType =
2185 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2186 Register SampledImage =
2187 Call->ReturnRegister.isValid()
2188 ? Call->ReturnRegister
2189 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2190 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2191 .addDef(SampledImage)
2192 .addUse(GR->getSPIRVTypeID(SampledImageType))
2193 .addUse(Image)
2194 .addUse(Call->Arguments[1]); // Sampler.
2195 return true;
2196 } else if (Call->Builtin->Name.contains_insensitive(
2197 "__spirv_ImageSampleExplicitLod")) {
2198 // Sample an image using an explicit level of detail.
2199 std::string ReturnType = DemangledCall.str();
2200 if (DemangledCall.contains("_R")) {
2201 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
2202 ReturnType = ReturnType.substr(0, ReturnType.find('('));
2203 }
2204 SPIRVType *Type =
2205 Call->ReturnType
2206 ? Call->ReturnType
2207 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder, true);
2208 if (!Type) {
2209 std::string DiagMsg =
2210 "Unable to recognize SPIRV type name: " + ReturnType;
2211 report_fatal_error(DiagMsg.c_str());
2212 }
2213 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2214 .addDef(Call->ReturnRegister)
2216 .addUse(Call->Arguments[0]) // Image.
2217 .addUse(Call->Arguments[1]) // Coordinate.
2218 .addImm(SPIRV::ImageOperand::Lod)
2219 .addUse(Call->Arguments[3]);
2220 return true;
2221 }
2222 return false;
2223}
2224
2226 MachineIRBuilder &MIRBuilder) {
2227 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
2228 Call->Arguments[1], Call->Arguments[2]);
2229 return true;
2230}
2231
2233 MachineIRBuilder &MIRBuilder,
2234 SPIRVGlobalRegistry *GR) {
2235 createContinuedInstructions(MIRBuilder, SPIRV::OpCompositeConstruct, 3,
2236 SPIRV::OpCompositeConstructContinuedINTEL,
2237 Call->Arguments, Call->ReturnRegister,
2238 GR->getSPIRVTypeID(Call->ReturnType));
2239 return true;
2240}
2241
2243 MachineIRBuilder &MIRBuilder,
2244 SPIRVGlobalRegistry *GR) {
2245 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2246 unsigned Opcode =
2247 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2248 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2249 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2250 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2251 unsigned ArgSz = Call->Arguments.size();
2252 unsigned LiteralIdx = 0;
2253 switch (Opcode) {
2254 // Memory operand is optional and is literal.
2255 case SPIRV::OpCooperativeMatrixLoadKHR:
2256 LiteralIdx = ArgSz > 3 ? 3 : 0;
2257 break;
2258 case SPIRV::OpCooperativeMatrixStoreKHR:
2259 LiteralIdx = ArgSz > 4 ? 4 : 0;
2260 break;
2261 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2262 LiteralIdx = ArgSz > 7 ? 7 : 0;
2263 break;
2264 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2265 LiteralIdx = ArgSz > 8 ? 8 : 0;
2266 break;
2267 // Cooperative Matrix Operands operand is optional and is literal.
2268 case SPIRV::OpCooperativeMatrixMulAddKHR:
2269 LiteralIdx = ArgSz > 3 ? 3 : 0;
2270 break;
2271 };
2272
2274 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2275 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2276 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2277 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2278 .addUse(Call->Arguments[0]) // pointer
2279 .addUse(Call->Arguments[1]) // rows
2280 .addUse(Call->Arguments[2]) // columns
2281 .addImm(CacheLevel) // cache level
2282 .addUse(Call->Arguments[4]); // memory layout
2283 if (ArgSz > 5)
2284 MIB.addUse(Call->Arguments[5]); // stride
2285 if (ArgSz > 6) {
2286 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2287 MIB.addImm(MemOp); // memory operand
2288 }
2289 return true;
2290 }
2291 if (LiteralIdx > 0)
2292 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2293 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2294 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2295 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2296 if (!CoopMatrType)
2297 report_fatal_error("Can't find a register's type definition");
2298 MIRBuilder.buildInstr(Opcode)
2299 .addDef(Call->ReturnRegister)
2300 .addUse(TypeReg)
2301 .addUse(CoopMatrType->getOperand(0).getReg());
2302 return true;
2303 }
2304 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2305 IsSet ? TypeReg : Register(0), ImmArgs);
2306}
2307
2309 MachineIRBuilder &MIRBuilder,
2310 SPIRVGlobalRegistry *GR) {
2311 // Lookup the instruction opcode in the TableGen records.
2312 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2313 unsigned Opcode =
2314 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2315 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2316
2317 switch (Opcode) {
2318 case SPIRV::OpSpecConstant: {
2319 // Build the SpecID decoration.
2320 unsigned SpecId =
2321 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2322 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2323 {SpecId});
2324 // Determine the constant MI.
2325 Register ConstRegister = Call->Arguments[1];
2326 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2327 assert(Const &&
2328 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2329 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2330 "Argument should be either an int or floating-point constant");
2331 // Determine the opcode and built the OpSpec MI.
2332 const MachineOperand &ConstOperand = Const->getOperand(1);
2333 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2334 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2335 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2336 ? SPIRV::OpSpecConstantTrue
2337 : SPIRV::OpSpecConstantFalse;
2338 }
2339 auto MIB = MIRBuilder.buildInstr(Opcode)
2340 .addDef(Call->ReturnRegister)
2341 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2342
2343 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2344 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2345 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2346 else
2347 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2348 }
2349 return true;
2350 }
2351 case SPIRV::OpSpecConstantComposite: {
2352 createContinuedInstructions(MIRBuilder, Opcode, 3,
2353 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2354 Call->Arguments, Call->ReturnRegister,
2355 GR->getSPIRVTypeID(Call->ReturnType));
2356 return true;
2357 }
2358 default:
2359 return false;
2360 }
2361}
2362
2364 MachineIRBuilder &MIRBuilder,
2365 SPIRVGlobalRegistry *GR) {
2366 // Lookup the instruction opcode in the TableGen records.
2367 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2368 unsigned Opcode =
2369 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2370
2371 return buildExtendedBitOpsInst(Call, Opcode, MIRBuilder, GR);
2372}
2373
2375 MachineIRBuilder &MIRBuilder,
2376 SPIRVGlobalRegistry *GR) {
2377 // Lookup the instruction opcode in the TableGen records.
2378 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2379 unsigned Opcode =
2380 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2381
2382 return buildBindlessImageINTELInst(Call, Opcode, MIRBuilder, GR);
2383}
2384
2385static bool
2387 MachineIRBuilder &MIRBuilder,
2388 SPIRVGlobalRegistry *GR) {
2389 // Lookup the instruction opcode in the TableGen records.
2390 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2391 unsigned Opcode =
2392 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2393
2394 return buildTernaryBitwiseFunctionINTELInst(Call, Opcode, MIRBuilder, GR);
2395}
2396
2398 MachineIRBuilder &MIRBuilder,
2399 SPIRVGlobalRegistry *GR) {
2400 // Lookup the instruction opcode in the TableGen records.
2401 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2402 unsigned Opcode =
2403 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2404
2405 return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR);
2406}
2407
2409 MachineIRBuilder &MIRBuilder,
2410 SPIRVGlobalRegistry *GR) {
2411 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2412 unsigned Opcode =
2413 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2414
2415 unsigned Scope = SPIRV::Scope::Workgroup;
2416 if (Builtin->Name.contains("sub_group"))
2417 Scope = SPIRV::Scope::Subgroup;
2418
2419 return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR);
2420}
2421
2423 MachineIRBuilder &MIRBuilder,
2424 SPIRVGlobalRegistry *GR) {
2425 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2426 unsigned Opcode =
2427 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2428
2429 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2430 unsigned ArgSz = Call->Arguments.size();
2432 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2433 // Memory operand is optional and is literal.
2434 if (ArgSz > 3)
2435 ImmArgs.push_back(
2436 getConstFromIntrinsic(Call->Arguments[/*Literal index*/ 3], MRI));
2437
2438 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2439 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2440 IsSet ? TypeReg : Register(0), ImmArgs);
2441}
2442
2444 MachineIRBuilder &MIRBuilder,
2445 SPIRVGlobalRegistry *GR) {
2446 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2447 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2448 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2449 PtrType->getOperand(2).isReg());
2450 Register TypeReg = PtrType->getOperand(2).getReg();
2452 MachineFunction &MF = MIRBuilder.getMF();
2453 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2454 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2455 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2456 // three other arguments, so pass zero constant on absence.
2457 unsigned NumArgs = Call->Arguments.size();
2458 assert(NumArgs >= 2);
2459 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2460 Register LocalWorkSize =
2461 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2462 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2463 if (NumArgs < 4) {
2464 Register Const;
2465 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2466 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2467 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2468 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2469 DefInstr->getOperand(3).isReg());
2470 Register GWSPtr = DefInstr->getOperand(3).getReg();
2471 // TODO: Maybe simplify generation of the type of the fields.
2472 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2473 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2475 Type *FieldTy = ArrayType::get(BaseTy, Size);
2476 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(
2477 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
2478 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2479 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2480 MIRBuilder.buildInstr(SPIRV::OpLoad)
2481 .addDef(GlobalWorkSize)
2482 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2483 .addUse(GWSPtr);
2484 const SPIRVSubtarget &ST =
2485 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2486 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2487 SpvFieldTy, *ST.getInstrInfo());
2488 } else {
2489 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy, true);
2490 }
2491 if (!LocalWorkSize.isValid())
2492 LocalWorkSize = Const;
2493 if (!GlobalWorkOffset.isValid())
2494 GlobalWorkOffset = Const;
2495 }
2496 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2497 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2498 .addDef(TmpReg)
2499 .addUse(TypeReg)
2500 .addUse(GlobalWorkSize)
2501 .addUse(LocalWorkSize)
2502 .addUse(GlobalWorkOffset);
2503 return MIRBuilder.buildInstr(SPIRV::OpStore)
2504 .addUse(Call->Arguments[0])
2505 .addUse(TmpReg);
2506}
2507
2508// TODO: maybe move to the global register.
2509static SPIRVType *
2511 SPIRVGlobalRegistry *GR) {
2512 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2513 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2514 Type *PtrType = PointerType::get(Context, SC1);
2515 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder,
2516 SPIRV::AccessQualifier::ReadWrite, true);
2517}
2518
2520 MachineIRBuilder &MIRBuilder,
2521 SPIRVGlobalRegistry *GR) {
2522 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2523 const DataLayout &DL = MIRBuilder.getDataLayout();
2524 bool IsSpirvOp = Call->isSpirvOp();
2525 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2526 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2527
2528 // Make vararg instructions before OpEnqueueKernel.
2529 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2530 // local size operands as an array, so we need to unpack them.
2531 SmallVector<Register, 16> LocalSizes;
2532 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2533 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2534 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2535 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2536 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2537 GepMI->getOperand(3).isReg());
2538 Register ArrayReg = GepMI->getOperand(3).getReg();
2539 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2540 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2541 assert(LocalSizeTy && "Local size type is expected");
2542 const uint64_t LocalSizeNum =
2543 cast<ArrayType>(LocalSizeTy)->getNumElements();
2544 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2545 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2546 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2547 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2548 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2549 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2550 MRI->setType(Reg, LLType);
2551 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2552 auto GEPInst = MIRBuilder.buildIntrinsic(
2553 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2554 GEPInst
2555 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2556 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2557 .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2558 .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2559 LocalSizes.push_back(Reg);
2560 }
2561 }
2562
2563 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2564 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2565 .addDef(Call->ReturnRegister)
2567
2568 // Copy all arguments before block invoke function pointer.
2569 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2570 for (unsigned i = 0; i < BlockFIdx; i++)
2571 MIB.addUse(Call->Arguments[i]);
2572
2573 // If there are no event arguments in the original call, add dummy ones.
2574 if (!HasEvents) {
2575 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2576 Register NullPtr = GR->getOrCreateConstNullPtr(
2577 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2578 MIB.addUse(NullPtr); // Dummy wait events.
2579 MIB.addUse(NullPtr); // Dummy ret event.
2580 }
2581
2582 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2583 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2584 // Invoke: Pointer to invoke function.
2585 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2586
2587 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2588 // Param: Pointer to block literal.
2589 MIB.addUse(BlockLiteralReg);
2590
2591 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2592 // TODO: these numbers should be obtained from block literal structure.
2593 // Param Size: Size of block literal structure.
2594 MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2595 // Param Aligment: Aligment of block literal structure.
2596 MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2597 MIRBuilder, GR));
2598
2599 for (unsigned i = 0; i < LocalSizes.size(); i++)
2600 MIB.addUse(LocalSizes[i]);
2601 return true;
2602}
2603
2605 MachineIRBuilder &MIRBuilder,
2606 SPIRVGlobalRegistry *GR) {
2607 // Lookup the instruction opcode in the TableGen records.
2608 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2609 unsigned Opcode =
2610 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2611
2612 switch (Opcode) {
2613 case SPIRV::OpRetainEvent:
2614 case SPIRV::OpReleaseEvent:
2615 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2616 case SPIRV::OpCreateUserEvent:
2617 case SPIRV::OpGetDefaultQueue:
2618 return MIRBuilder.buildInstr(Opcode)
2619 .addDef(Call->ReturnRegister)
2620 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2621 case SPIRV::OpIsValidEvent:
2622 return MIRBuilder.buildInstr(Opcode)
2623 .addDef(Call->ReturnRegister)
2624 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2625 .addUse(Call->Arguments[0]);
2626 case SPIRV::OpSetUserEventStatus:
2627 return MIRBuilder.buildInstr(Opcode)
2628 .addUse(Call->Arguments[0])
2629 .addUse(Call->Arguments[1]);
2630 case SPIRV::OpCaptureEventProfilingInfo:
2631 return MIRBuilder.buildInstr(Opcode)
2632 .addUse(Call->Arguments[0])
2633 .addUse(Call->Arguments[1])
2634 .addUse(Call->Arguments[2]);
2635 case SPIRV::OpBuildNDRange:
2636 return buildNDRange(Call, MIRBuilder, GR);
2637 case SPIRV::OpEnqueueKernel:
2638 return buildEnqueueKernel(Call, MIRBuilder, GR);
2639 default:
2640 return false;
2641 }
2642}
2643
2645 MachineIRBuilder &MIRBuilder,
2646 SPIRVGlobalRegistry *GR) {
2647 // Lookup the instruction opcode in the TableGen records.
2648 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2649 unsigned Opcode =
2650 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2651
2652 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2653 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2654 if (Call->isSpirvOp())
2655 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2656 IsSet ? TypeReg : Register(0));
2657
2658 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2659
2660 switch (Opcode) {
2661 case SPIRV::OpGroupAsyncCopy: {
2662 SPIRVType *NewType =
2663 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2664 ? nullptr
2665 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder, true);
2666 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2667 unsigned NumArgs = Call->Arguments.size();
2668 Register EventReg = Call->Arguments[NumArgs - 1];
2669 bool Res = MIRBuilder.buildInstr(Opcode)
2670 .addDef(Call->ReturnRegister)
2671 .addUse(TypeReg)
2672 .addUse(Scope)
2673 .addUse(Call->Arguments[0])
2674 .addUse(Call->Arguments[1])
2675 .addUse(Call->Arguments[2])
2676 .addUse(Call->Arguments.size() > 4
2677 ? Call->Arguments[3]
2678 : buildConstantIntReg32(1, MIRBuilder, GR))
2679 .addUse(EventReg);
2680 if (NewType != nullptr)
2681 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2682 MIRBuilder.getMF().getRegInfo());
2683 return Res;
2684 }
2685 case SPIRV::OpGroupWaitEvents:
2686 return MIRBuilder.buildInstr(Opcode)
2687 .addUse(Scope)
2688 .addUse(Call->Arguments[0])
2689 .addUse(Call->Arguments[1]);
2690 default:
2691 return false;
2692 }
2693}
2694
2695static bool generateConvertInst(const StringRef DemangledCall,
2697 MachineIRBuilder &MIRBuilder,
2698 SPIRVGlobalRegistry *GR) {
2699 // Lookup the conversion builtin in the TableGen records.
2700 const SPIRV::ConvertBuiltin *Builtin =
2701 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2702
2703 if (!Builtin && Call->isSpirvOp()) {
2704 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2705 unsigned Opcode =
2706 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2707 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2708 GR->getSPIRVTypeID(Call->ReturnType));
2709 }
2710
2711 assert(Builtin && "Conversion builtin not found.");
2712 if (Builtin->IsSaturated)
2713 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2714 SPIRV::Decoration::SaturatedConversion, {});
2715 if (Builtin->IsRounded)
2716 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2717 SPIRV::Decoration::FPRoundingMode,
2718 {(unsigned)Builtin->RoundingMode});
2719
2720 std::string NeedExtMsg; // no errors if empty
2721 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2722 unsigned Opcode = SPIRV::OpNop;
2723 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2724 // Int -> ...
2725 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2726 // Int -> Int
2727 if (Builtin->IsSaturated)
2728 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2729 : SPIRV::OpSatConvertSToU;
2730 else
2731 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2732 : SPIRV::OpSConvert;
2733 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2734 SPIRV::OpTypeFloat)) {
2735 // Int -> Float
2736 if (Builtin->IsBfloat16) {
2737 const auto *ST = static_cast<const SPIRVSubtarget *>(
2738 &MIRBuilder.getMF().getSubtarget());
2739 if (!ST->canUseExtension(
2740 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2741 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2742 IsRightComponentsNumber =
2743 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2744 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2745 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2746 } else {
2747 bool IsSourceSigned =
2748 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2749 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2750 }
2751 }
2752 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2753 SPIRV::OpTypeFloat)) {
2754 // Float -> ...
2755 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2756 // Float -> Int
2757 if (Builtin->IsBfloat16) {
2758 const auto *ST = static_cast<const SPIRVSubtarget *>(
2759 &MIRBuilder.getMF().getSubtarget());
2760 if (!ST->canUseExtension(
2761 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2762 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2763 IsRightComponentsNumber =
2764 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2765 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2766 Opcode = SPIRV::OpConvertFToBF16INTEL;
2767 } else {
2768 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2769 : SPIRV::OpConvertFToU;
2770 }
2771 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2772 SPIRV::OpTypeFloat)) {
2773 if (Builtin->IsTF32) {
2774 const auto *ST = static_cast<const SPIRVSubtarget *>(
2775 &MIRBuilder.getMF().getSubtarget());
2776 if (!ST->canUseExtension(
2777 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
2778 NeedExtMsg = "SPV_INTEL_tensor_float32_conversion";
2779 IsRightComponentsNumber =
2780 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2781 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2782 Opcode = SPIRV::OpRoundFToTF32INTEL;
2783 } else {
2784 // Float -> Float
2785 Opcode = SPIRV::OpFConvert;
2786 }
2787 }
2788 }
2789
2790 if (!NeedExtMsg.empty()) {
2791 std::string DiagMsg = std::string(Builtin->Name) +
2792 ": the builtin requires the following SPIR-V "
2793 "extension: " +
2794 NeedExtMsg;
2795 report_fatal_error(DiagMsg.c_str(), false);
2796 }
2797 if (!IsRightComponentsNumber) {
2798 std::string DiagMsg =
2799 std::string(Builtin->Name) +
2800 ": result and argument must have the same number of components";
2801 report_fatal_error(DiagMsg.c_str(), false);
2802 }
2803 assert(Opcode != SPIRV::OpNop &&
2804 "Conversion between the types not implemented!");
2805
2806 MIRBuilder.buildInstr(Opcode)
2807 .addDef(Call->ReturnRegister)
2808 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2809 .addUse(Call->Arguments[0]);
2810 return true;
2811}
2812
2814 MachineIRBuilder &MIRBuilder,
2815 SPIRVGlobalRegistry *GR) {
2816 // Lookup the vector load/store builtin in the TableGen records.
2817 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2818 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2819 Call->Builtin->Set);
2820 // Build extended instruction.
2821 auto MIB =
2822 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2823 .addDef(Call->ReturnRegister)
2824 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2825 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2826 .addImm(Builtin->Number);
2827 for (auto Argument : Call->Arguments)
2828 MIB.addUse(Argument);
2829 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2830 MIB.addImm(Builtin->ElementCount);
2831
2832 // Rounding mode should be passed as a last argument in the MI for builtins
2833 // like "vstorea_halfn_r".
2834 if (Builtin->IsRounded)
2835 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2836 return true;
2837}
2838
2840 MachineIRBuilder &MIRBuilder,
2841 SPIRVGlobalRegistry *GR) {
2842 // Lookup the instruction opcode in the TableGen records.
2843 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2844 unsigned Opcode =
2845 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2846 bool IsLoad = Opcode == SPIRV::OpLoad;
2847 // Build the instruction.
2848 auto MIB = MIRBuilder.buildInstr(Opcode);
2849 if (IsLoad) {
2850 MIB.addDef(Call->ReturnRegister);
2851 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2852 }
2853 // Add a pointer to the value to load/store.
2854 MIB.addUse(Call->Arguments[0]);
2855 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2856 // Add a value to store.
2857 if (!IsLoad)
2858 MIB.addUse(Call->Arguments[1]);
2859 // Add optional memory attributes and an alignment.
2860 unsigned NumArgs = Call->Arguments.size();
2861 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
2862 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2863 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
2864 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2865 return true;
2866}
2867
2868namespace SPIRV {
2869// Try to find a builtin function attributes by a demangled function name and
2870// return a tuple <builtin group, op code, ext instruction number>, or a special
2871// tuple value <-1, 0, 0> if the builtin function is not found.
2872// Not all builtin functions are supported, only those with a ready-to-use op
2873// code or instruction number defined in TableGen.
2874// TODO: consider a major rework of mapping demangled calls into a builtin
2875// functions to unify search and decrease number of individual cases.
2876std::tuple<int, unsigned, unsigned>
2877mapBuiltinToOpcode(const StringRef DemangledCall,
2878 SPIRV::InstructionSet::InstructionSet Set) {
2879 Register Reg;
2881 std::unique_ptr<const IncomingCall> Call =
2882 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
2883 if (!Call)
2884 return std::make_tuple(-1, 0, 0);
2885
2886 switch (Call->Builtin->Group) {
2887 case SPIRV::Relational:
2888 case SPIRV::Atomic:
2889 case SPIRV::Barrier:
2890 case SPIRV::CastToPtr:
2891 case SPIRV::ImageMiscQuery:
2892 case SPIRV::SpecConstant:
2893 case SPIRV::Enqueue:
2894 case SPIRV::AsyncCopy:
2895 case SPIRV::LoadStore:
2896 case SPIRV::CoopMatr:
2897 if (const auto *R =
2898 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2899 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2900 break;
2901 case SPIRV::Extended:
2902 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2903 Call->Builtin->Set))
2904 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2905 break;
2906 case SPIRV::VectorLoadStore:
2907 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2908 Call->Builtin->Set))
2909 return std::make_tuple(SPIRV::Extended, 0, R->Number);
2910 break;
2911 case SPIRV::Group:
2912 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2913 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2914 break;
2915 case SPIRV::AtomicFloating:
2916 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2917 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2918 break;
2919 case SPIRV::IntelSubgroups:
2920 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2921 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2922 break;
2923 case SPIRV::GroupUniform:
2924 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2925 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2926 break;
2927 case SPIRV::IntegerDot:
2928 if (const auto *R =
2929 SPIRV::lookupIntegerDotProductBuiltin(Call->Builtin->Name))
2930 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2931 break;
2932 case SPIRV::WriteImage:
2933 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2934 case SPIRV::Select:
2935 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
2936 case SPIRV::Construct:
2937 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2938 0);
2939 case SPIRV::KernelClock:
2940 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2941 default:
2942 return std::make_tuple(-1, 0, 0);
2943 }
2944 return std::make_tuple(-1, 0, 0);
2945}
2946
2947std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
2948 SPIRV::InstructionSet::InstructionSet Set,
2949 MachineIRBuilder &MIRBuilder,
2950 const Register OrigRet, const Type *OrigRetTy,
2951 const SmallVectorImpl<Register> &Args,
2952 SPIRVGlobalRegistry *GR, const CallBase &CB) {
2953 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
2954
2955 // Lookup the builtin in the TableGen records.
2956 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OrigRet);
2957 assert(SpvType && "Inconsistent return register: expected valid type info");
2958 std::unique_ptr<const IncomingCall> Call =
2959 lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
2960
2961 if (!Call) {
2962 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
2963 return std::nullopt;
2964 }
2965
2966 // TODO: check if the provided args meet the builtin requirments.
2967 assert(Args.size() >= Call->Builtin->MinNumArgs &&
2968 "Too few arguments to generate the builtin");
2969 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2970 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
2971
2972 // Match the builtin with implementation based on the grouping.
2973 switch (Call->Builtin->Group) {
2974 case SPIRV::Extended:
2975 return generateExtInst(Call.get(), MIRBuilder, GR, CB);
2976 case SPIRV::Relational:
2977 return generateRelationalInst(Call.get(), MIRBuilder, GR);
2978 case SPIRV::Group:
2979 return generateGroupInst(Call.get(), MIRBuilder, GR);
2980 case SPIRV::Variable:
2981 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
2982 case SPIRV::Atomic:
2983 return generateAtomicInst(Call.get(), MIRBuilder, GR);
2984 case SPIRV::AtomicFloating:
2985 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
2986 case SPIRV::Barrier:
2987 return generateBarrierInst(Call.get(), MIRBuilder, GR);
2988 case SPIRV::CastToPtr:
2989 return generateCastToPtrInst(Call.get(), MIRBuilder, GR);
2990 case SPIRV::Dot:
2991 case SPIRV::IntegerDot:
2992 return generateDotOrFMulInst(DemangledCall, Call.get(), MIRBuilder, GR);
2993 case SPIRV::Wave:
2994 return generateWaveInst(Call.get(), MIRBuilder, GR);
2995 case SPIRV::ICarryBorrow:
2996 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
2997 case SPIRV::GetQuery:
2998 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
2999 case SPIRV::ImageSizeQuery:
3000 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
3001 case SPIRV::ImageMiscQuery:
3002 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
3003 case SPIRV::ReadImage:
3004 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3005 case SPIRV::WriteImage:
3006 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
3007 case SPIRV::SampleImage:
3008 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3009 case SPIRV::Select:
3010 return generateSelectInst(Call.get(), MIRBuilder);
3011 case SPIRV::Construct:
3012 return generateConstructInst(Call.get(), MIRBuilder, GR);
3013 case SPIRV::SpecConstant:
3014 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
3015 case SPIRV::Enqueue:
3016 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
3017 case SPIRV::AsyncCopy:
3018 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
3019 case SPIRV::Convert:
3020 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
3021 case SPIRV::VectorLoadStore:
3022 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
3023 case SPIRV::LoadStore:
3024 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
3025 case SPIRV::IntelSubgroups:
3026 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
3027 case SPIRV::GroupUniform:
3028 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
3029 case SPIRV::KernelClock:
3030 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
3031 case SPIRV::CoopMatr:
3032 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
3033 case SPIRV::ExtendedBitOps:
3034 return generateExtendedBitOpsInst(Call.get(), MIRBuilder, GR);
3035 case SPIRV::BindlessINTEL:
3036 return generateBindlessImageINTELInst(Call.get(), MIRBuilder, GR);
3037 case SPIRV::TernaryBitwiseINTEL:
3038 return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR);
3039 case SPIRV::Block2DLoadStore:
3040 return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR);
3041 case SPIRV::Pipe:
3042 return generatePipeInst(Call.get(), MIRBuilder, GR);
3043 case SPIRV::PredicatedLoadStore:
3044 return generatePredicatedLoadStoreInst(Call.get(), MIRBuilder, GR);
3045 }
3046 return false;
3047}
3048
3050 // Parse strings representing OpenCL builtin types.
3051 if (hasBuiltinTypePrefix(TypeStr)) {
3052 // OpenCL builtin types in demangled call strings have the following format:
3053 // e.g. ocl_image2d_ro
3054 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
3055 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
3056
3057 // Check if this is pointer to a builtin type and not just pointer
3058 // representing a builtin type. In case it is a pointer to builtin type,
3059 // this will require additional handling in the method calling
3060 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
3061 // base types.
3062 if (TypeStr.ends_with("*"))
3063 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
3064
3065 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
3066 Ctx);
3067 }
3068
3069 // Parse type name in either "typeN" or "type vector[N]" format, where
3070 // N is the number of elements of the vector.
3071 Type *BaseType;
3072 unsigned VecElts = 0;
3073
3074 BaseType = parseBasicTypeName(TypeStr, Ctx);
3075 if (!BaseType)
3076 // Unable to recognize SPIRV type name.
3077 return nullptr;
3078
3079 // Handle "typeN*" or "type vector[N]*".
3080 TypeStr.consume_back("*");
3081
3082 if (TypeStr.consume_front(" vector["))
3083 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
3084
3085 TypeStr.getAsInteger(10, VecElts);
3086 if (VecElts > 0)
3088 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
3089
3090 return BaseType;
3091}
3092
3094 const StringRef DemangledCall, LLVMContext &Ctx) {
3095 auto Pos1 = DemangledCall.find('(');
3096 if (Pos1 == StringRef::npos)
3097 return false;
3098 auto Pos2 = DemangledCall.find(')');
3099 if (Pos2 == StringRef::npos || Pos1 > Pos2)
3100 return false;
3101 DemangledCall.slice(Pos1 + 1, Pos2)
3102 .split(BuiltinArgsTypeStrs, ',', -1, false);
3103 return true;
3104}
3105
3107 unsigned ArgIdx, LLVMContext &Ctx) {
3108 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
3109 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
3110 if (ArgIdx >= BuiltinArgsTypeStrs.size())
3111 return nullptr;
3112 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3113 return parseBuiltinCallArgumentType(TypeStr, Ctx);
3114}
3115
3120
3121#define GET_BuiltinTypes_DECL
3122#define GET_BuiltinTypes_IMPL
3123
3128
3129#define GET_OpenCLTypes_DECL
3130#define GET_OpenCLTypes_IMPL
3131
3132#include "SPIRVGenTables.inc"
3133} // namespace SPIRV
3134
3135//===----------------------------------------------------------------------===//
3136// Misc functions for parsing builtin types.
3137//===----------------------------------------------------------------------===//
3138
3139static Type *parseTypeString(const StringRef Name, LLVMContext &Context) {
3140 if (Name.starts_with("void"))
3141 return Type::getVoidTy(Context);
3142 else if (Name.starts_with("int") || Name.starts_with("uint"))
3143 return Type::getInt32Ty(Context);
3144 else if (Name.starts_with("float"))
3145 return Type::getFloatTy(Context);
3146 else if (Name.starts_with("half"))
3147 return Type::getHalfTy(Context);
3148 report_fatal_error("Unable to recognize type!");
3149}
3150
3151//===----------------------------------------------------------------------===//
3152// Implementation functions for builtin types.
3153//===----------------------------------------------------------------------===//
3154
3156 const SPIRV::BuiltinType *TypeRecord,
3157 MachineIRBuilder &MIRBuilder,
3158 SPIRVGlobalRegistry *GR) {
3159 unsigned Opcode = TypeRecord->Opcode;
3160 // Create or get an existing type from GlobalRegistry.
3161 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
3162}
3163
3165 SPIRVGlobalRegistry *GR) {
3166 // Create or get an existing type from GlobalRegistry.
3167 return GR->getOrCreateOpTypeSampler(MIRBuilder);
3168}
3169
3170static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
3171 MachineIRBuilder &MIRBuilder,
3172 SPIRVGlobalRegistry *GR) {
3173 assert(ExtensionType->getNumIntParameters() == 1 &&
3174 "Invalid number of parameters for SPIR-V pipe builtin!");
3175 // Create or get an existing type from GlobalRegistry.
3176 return GR->getOrCreateOpTypePipe(MIRBuilder,
3177 SPIRV::AccessQualifier::AccessQualifier(
3178 ExtensionType->getIntParameter(0)));
3179}
3180
3181static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
3182 MachineIRBuilder &MIRBuilder,
3183 SPIRVGlobalRegistry *GR) {
3184 assert(ExtensionType->getNumIntParameters() == 4 &&
3185 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3186 assert(ExtensionType->getNumTypeParameters() == 1 &&
3187 "SPIR-V coop matrices builtin type must have a type parameter!");
3188 const SPIRVType *ElemType =
3189 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder,
3190 SPIRV::AccessQualifier::ReadWrite, true);
3191 // Create or get an existing type from GlobalRegistry.
3192 return GR->getOrCreateOpTypeCoopMatr(
3193 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
3194 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
3195 ExtensionType->getIntParameter(3), true);
3196}
3197
3199 MachineIRBuilder &MIRBuilder,
3200 SPIRVGlobalRegistry *GR) {
3201 SPIRVType *OpaqueImageType = GR->getImageType(
3202 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3203 // Create or get an existing type from GlobalRegistry.
3204 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
3205}
3206
3207static SPIRVType *getInlineSpirvType(const TargetExtType *ExtensionType,
3208 MachineIRBuilder &MIRBuilder,
3209 SPIRVGlobalRegistry *GR) {
3210 assert(ExtensionType->getNumIntParameters() == 3 &&
3211 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3212 "parameter");
3213 auto Opcode = ExtensionType->getIntParameter(0);
3214
3216 for (Type *Param : ExtensionType->type_params()) {
3217 if (const TargetExtType *ParamEType = dyn_cast<TargetExtType>(Param)) {
3218 if (ParamEType->getName() == "spirv.IntegralConstant") {
3219 assert(ParamEType->getNumTypeParameters() == 1 &&
3220 "Inline SPIR-V integral constant builtin must have a type "
3221 "parameter");
3222 assert(ParamEType->getNumIntParameters() == 1 &&
3223 "Inline SPIR-V integral constant builtin must have a "
3224 "value parameter");
3225
3226 auto OperandValue = ParamEType->getIntParameter(0);
3227 auto *OperandType = ParamEType->getTypeParameter(0);
3228
3229 const SPIRVType *OperandSPIRVType = GR->getOrCreateSPIRVType(
3230 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3231
3233 OperandValue, MIRBuilder, OperandSPIRVType, true)));
3234 continue;
3235 } else if (ParamEType->getName() == "spirv.Literal") {
3236 assert(ParamEType->getNumTypeParameters() == 0 &&
3237 "Inline SPIR-V literal builtin does not take type "
3238 "parameters");
3239 assert(ParamEType->getNumIntParameters() == 1 &&
3240 "Inline SPIR-V literal builtin must have an integer "
3241 "parameter");
3242
3243 auto OperandValue = ParamEType->getIntParameter(0);
3244
3245 Operands.push_back(MCOperand::createImm(OperandValue));
3246 continue;
3247 }
3248 }
3249 const SPIRVType *TypeOperand = GR->getOrCreateSPIRVType(
3250 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3251 Operands.push_back(MCOperand::createReg(GR->getSPIRVTypeID(TypeOperand)));
3252 }
3253
3254 return GR->getOrCreateUnknownType(ExtensionType, MIRBuilder, Opcode,
3255 Operands);
3256}
3257
3258static SPIRVType *getVulkanBufferType(const TargetExtType *ExtensionType,
3259 MachineIRBuilder &MIRBuilder,
3260 SPIRVGlobalRegistry *GR) {
3261 assert(ExtensionType->getNumTypeParameters() == 1 &&
3262 "Vulkan buffers have exactly one type for the type of the buffer.");
3263 assert(ExtensionType->getNumIntParameters() == 2 &&
3264 "Vulkan buffer have 2 integer parameters: storage class and is "
3265 "writable.");
3266
3267 auto *T = ExtensionType->getTypeParameter(0);
3268 auto SC = static_cast<SPIRV::StorageClass::StorageClass>(
3269 ExtensionType->getIntParameter(0));
3270 bool IsWritable = ExtensionType->getIntParameter(1);
3271 return GR->getOrCreateVulkanBufferType(MIRBuilder, T, SC, IsWritable);
3272}
3273
3274static SPIRVType *getLayoutType(const TargetExtType *ExtensionType,
3275 MachineIRBuilder &MIRBuilder,
3276 SPIRVGlobalRegistry *GR) {
3277 return GR->getOrCreateLayoutType(MIRBuilder, ExtensionType);
3278}
3279
3280namespace SPIRV {
3282 LLVMContext &Context) {
3283 StringRef NameWithParameters = TypeName;
3284
3285 // Pointers-to-opaque-structs representing OpenCL types are first translated
3286 // to equivalent SPIR-V types. OpenCL builtin type names should have the
3287 // following format: e.g. %opencl.event_t
3288 if (NameWithParameters.starts_with("opencl.")) {
3289 const SPIRV::OpenCLType *OCLTypeRecord =
3290 SPIRV::lookupOpenCLType(NameWithParameters);
3291 if (!OCLTypeRecord)
3292 report_fatal_error("Missing TableGen record for OpenCL type: " +
3293 NameWithParameters);
3294 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
3295 // Continue with the SPIR-V builtin type...
3296 }
3297
3298 // Names of the opaque structs representing a SPIR-V builtins without
3299 // parameters should have the following format: e.g. %spirv.Event
3300 assert(NameWithParameters.starts_with("spirv.") &&
3301 "Unknown builtin opaque type!");
3302
3303 // Parameterized SPIR-V builtins names follow this format:
3304 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
3305 if (!NameWithParameters.contains('_'))
3306 return TargetExtType::get(Context, NameWithParameters);
3307
3308 SmallVector<StringRef> Parameters;
3309 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
3310 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
3311
3312 SmallVector<Type *, 1> TypeParameters;
3313 bool HasTypeParameter = !isDigit(Parameters[0][0]);
3314 if (HasTypeParameter)
3315 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
3316 SmallVector<unsigned> IntParameters;
3317 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3318 unsigned IntParameter = 0;
3319 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3320 (void)ValidLiteral;
3321 assert(ValidLiteral &&
3322 "Invalid format of SPIR-V builtin parameter literal!");
3323 IntParameters.push_back(IntParameter);
3324 }
3325 return TargetExtType::get(Context,
3326 NameWithParameters.substr(0, BaseNameLength),
3327 TypeParameters, IntParameters);
3328}
3329
3331 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3332 MachineIRBuilder &MIRBuilder,
3333 SPIRVGlobalRegistry *GR) {
3334 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
3335 // target(...) target extension types or pointers-to-opaque-structs. The
3336 // approach relying on structs is deprecated and works only in the non-opaque
3337 // pointer mode (-opaque-pointers=0).
3338 // In order to maintain compatibility with LLVM IR generated by older versions
3339 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
3340 // "translated" to target extension types. This translation is temporary and
3341 // will be removed in the future release of LLVM.
3343 if (!BuiltinType)
3345 OpaqueType->getStructName().str(), MIRBuilder.getContext());
3346
3347 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
3348
3349 const StringRef Name = BuiltinType->getName();
3350 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
3351
3352 SPIRVType *TargetType;
3353 if (Name == "spirv.Type") {
3354 TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR);
3355 } else if (Name == "spirv.VulkanBuffer") {
3356 TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR);
3357 } else if (Name == "spirv.Layout") {
3358 TargetType = getLayoutType(BuiltinType, MIRBuilder, GR);
3359 } else {
3360 // Lookup the demangled builtin type in the TableGen records.
3361 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
3362 if (!TypeRecord)
3363 report_fatal_error("Missing TableGen record for builtin type: " + Name);
3364
3365 // "Lower" the BuiltinType into TargetType. The following get<...>Type
3366 // methods use the implementation details from TableGen records or
3367 // TargetExtType parameters to either create a new OpType<...> machine
3368 // instruction or get an existing equivalent SPIRVType from
3369 // GlobalRegistry.
3370
3371 switch (TypeRecord->Opcode) {
3372 case SPIRV::OpTypeImage:
3373 TargetType = GR->getImageType(BuiltinType, AccessQual, MIRBuilder);
3374 break;
3375 case SPIRV::OpTypePipe:
3376 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
3377 break;
3378 case SPIRV::OpTypeDeviceEvent:
3379 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
3380 break;
3381 case SPIRV::OpTypeSampler:
3382 TargetType = getSamplerType(MIRBuilder, GR);
3383 break;
3384 case SPIRV::OpTypeSampledImage:
3385 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
3386 break;
3387 case SPIRV::OpTypeCooperativeMatrixKHR:
3388 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
3389 break;
3390 default:
3391 TargetType =
3392 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
3393 break;
3394 }
3395 }
3396
3397 // Emit OpName instruction if a new OpType<...> instruction was added
3398 // (equivalent type was not found in GlobalRegistry).
3399 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
3400 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
3401
3402 return TargetType;
3403}
3404} // namespace SPIRV
3405} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:58
mir Rename Register Operands
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define T
spirv structurize SPIRV
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
APInt bitcastToAPInt() const
Definition APFloat.h:1353
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition APFloat.h:1079
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:234
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
const APFloat & getValueAPF() const
Definition Constants.h:320
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Tagged union holding either a T or a Error.
Definition Error.h:485
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isValid() const
Definition Register.h:107
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
SPIRVType * getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR, bool ZeroAsNull=true)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition StringRef.h:657
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:225
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:573
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:438
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition StringRef.h:686
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:426
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition StringRef.h:376
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
Definition StringRef.h:345
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition StringRef.h:293
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
static constexpr size_t npos
Definition StringRef.h:57
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent struct types.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
unsigned getNumIntParameters() const
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:908
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:283
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition Core.cpp:883
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR, const CallBase &CB)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static SPIRVType * getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const CallBase &CB)
void insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, bool hasLinkageTy=true)
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
Definition SPIRVUtils.h:529
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
FPDecorationId
Definition SPIRVUtils.h:527
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:239
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
const MachineInstr SPIRVType
static SPIRVType * getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:224
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, unsigned Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1869
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool generatePipeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:867
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
Definition APFloat.cpp:266
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode