LLVM 22.0.0git
AMDHSAKernelDescriptor.h
Go to the documentation of this file.
1//===--- AMDHSAKernelDescriptor.h -----------------------------*- C++ -*---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// AMDHSA kernel descriptor definitions. For more information, visit
11/// https://llvm.org/docs/AMDGPUUsage.html#kernel-descriptor
12///
13/// \warning
14/// Any changes to this file should also be audited for corresponding changes
15/// needed in both the assembler and disassembler, namely:
16/// * AMDGPUAsmPrinter.{cpp,h}
17/// * AMDGPUTargetStreamer.{cpp,h}
18/// * AMDGPUDisassembler.{cpp,h}
19//
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
23#define LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
24
25#include <cstddef>
26#include <cstdint>
27
28// Gets offset of specified member in specified type.
29#ifndef offsetof
30#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE*)0)->MEMBER)
31#endif // offsetof
32
33// Creates enumeration entries used for packing bits into integers. Enumeration
34// entries include bit shift amount, bit width, and bit mask.
35#ifndef AMDHSA_BITS_ENUM_ENTRY
36#define AMDHSA_BITS_ENUM_ENTRY(NAME, SHIFT, WIDTH) \
37 NAME ## _SHIFT = (SHIFT), \
38 NAME ## _WIDTH = (WIDTH), \
39 NAME = (((1 << (WIDTH)) - 1) << (SHIFT))
40#endif // AMDHSA_BITS_ENUM_ENTRY
41
42// Gets bits for specified bit mask from specified source.
43#ifndef AMDHSA_BITS_GET
44#define AMDHSA_BITS_GET(SRC, MSK) ((SRC & MSK) >> MSK ## _SHIFT)
45#endif // AMDHSA_BITS_GET
46
47// Sets bits for specified bit mask in specified destination.
48#ifndef AMDHSA_BITS_SET
49#define AMDHSA_BITS_SET(DST, MSK, VAL) \
50 do { \
51 auto local = VAL; \
52 DST &= ~MSK; \
53 DST |= ((local << MSK##_SHIFT) & MSK); \
54 } while (0)
55#endif // AMDHSA_BITS_SET
56
57namespace llvm {
58namespace amdhsa {
59
60// Floating point rounding modes. Must match hardware definition.
61enum : uint8_t {
66};
67
68// Floating point denorm modes. Must match hardware definition.
69enum : uint8_t {
74};
75
76// System VGPR workitem IDs. Must match hardware definition.
77enum : uint8_t {
82};
83
84// Compute program resource register 1. Must match hardware definition.
85// GFX6+.
86#define COMPUTE_PGM_RSRC1(NAME, SHIFT, WIDTH) \
87 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_##NAME, SHIFT, WIDTH)
88// [GFX6-GFX8].
89#define COMPUTE_PGM_RSRC1_GFX6_GFX8(NAME, SHIFT, WIDTH) \
90 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX8_##NAME, SHIFT, WIDTH)
91// [GFX6-GFX9].
92#define COMPUTE_PGM_RSRC1_GFX6_GFX9(NAME, SHIFT, WIDTH) \
93 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX9_##NAME, SHIFT, WIDTH)
94// [GFX6-GFX11].
95#define COMPUTE_PGM_RSRC1_GFX6_GFX11(NAME, SHIFT, WIDTH) \
96 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX11_##NAME, SHIFT, WIDTH)
97// [GFX6-GFX120].
98#define COMPUTE_PGM_RSRC1_GFX6_GFX120(NAME, SHIFT, WIDTH) \
99 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX120_##NAME, SHIFT, WIDTH)
100// GFX9+.
101#define COMPUTE_PGM_RSRC1_GFX9_PLUS(NAME, SHIFT, WIDTH) \
102 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX9_PLUS_##NAME, SHIFT, WIDTH)
103// GFX10+.
104#define COMPUTE_PGM_RSRC1_GFX10_PLUS(NAME, SHIFT, WIDTH) \
105 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX10_PLUS_##NAME, SHIFT, WIDTH)
106// GFX12+.
107#define COMPUTE_PGM_RSRC1_GFX12_PLUS(NAME, SHIFT, WIDTH) \
108 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX12_PLUS_##NAME, SHIFT, WIDTH)
109// [GFX125].
110#define COMPUTE_PGM_RSRC1_GFX125(NAME, SHIFT, WIDTH) \
111 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX125_##NAME, SHIFT, WIDTH)
112enum : int32_t {
113 COMPUTE_PGM_RSRC1(GRANULATED_WORKITEM_VGPR_COUNT, 0, 6),
114 COMPUTE_PGM_RSRC1(GRANULATED_WAVEFRONT_SGPR_COUNT, 6, 4),
115 COMPUTE_PGM_RSRC1(PRIORITY, 10, 2),
116 COMPUTE_PGM_RSRC1(FLOAT_ROUND_MODE_32, 12, 2),
117 COMPUTE_PGM_RSRC1(FLOAT_ROUND_MODE_16_64, 14, 2),
118 COMPUTE_PGM_RSRC1(FLOAT_DENORM_MODE_32, 16, 2),
119 COMPUTE_PGM_RSRC1(FLOAT_DENORM_MODE_16_64, 18, 2),
120 COMPUTE_PGM_RSRC1(PRIV, 20, 1),
121 COMPUTE_PGM_RSRC1_GFX6_GFX11(ENABLE_DX10_CLAMP, 21, 1),
122 COMPUTE_PGM_RSRC1_GFX12_PLUS(ENABLE_WG_RR_EN, 21, 1),
123 COMPUTE_PGM_RSRC1(DEBUG_MODE, 22, 1),
124 COMPUTE_PGM_RSRC1_GFX6_GFX11(ENABLE_IEEE_MODE, 23, 1),
125 COMPUTE_PGM_RSRC1_GFX12_PLUS(DISABLE_PERF, 23, 1),
126 COMPUTE_PGM_RSRC1(BULKY, 24, 1),
127 COMPUTE_PGM_RSRC1(CDBG_USER, 25, 1),
131 COMPUTE_PGM_RSRC1_GFX125(FLAT_SCRATCH_IS_NV, 27, 1),
132 COMPUTE_PGM_RSRC1(RESERVED2, 28, 1),
135 COMPUTE_PGM_RSRC1_GFX10_PLUS(MEM_ORDERED, 30, 1),
136 COMPUTE_PGM_RSRC1_GFX10_PLUS(FWD_PROGRESS, 31, 1),
137};
138#undef COMPUTE_PGM_RSRC1
139
140// Compute program resource register 2. Must match hardware definition.
141// GFX6+.
142#define COMPUTE_PGM_RSRC2(NAME, SHIFT, WIDTH) \
143 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_##NAME, SHIFT, WIDTH)
144// [GFX6-GFX11].
145#define COMPUTE_PGM_RSRC2_GFX6_GFX11(NAME, SHIFT, WIDTH) \
146 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX6_GFX11_##NAME, SHIFT, WIDTH)
147// [GFX6-GFX120].
148#define COMPUTE_PGM_RSRC2_GFX6_GFX120(NAME, SHIFT, WIDTH) \
149 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX6_GFX120_##NAME, SHIFT, WIDTH)
150// GFX12+.
151#define COMPUTE_PGM_RSRC2_GFX12_PLUS(NAME, SHIFT, WIDTH) \
152 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX12_PLUS_##NAME, SHIFT, WIDTH)
153// [GFX120].
154#define COMPUTE_PGM_RSRC2_GFX120(NAME, SHIFT, WIDTH) \
155 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX120_##NAME, SHIFT, WIDTH)
156// [GFX125].
157#define COMPUTE_PGM_RSRC2_GFX125(NAME, SHIFT, WIDTH) \
158 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX125_##NAME, SHIFT, WIDTH)
159enum : int32_t {
160 COMPUTE_PGM_RSRC2(ENABLE_PRIVATE_SEGMENT, 0, 1),
161 COMPUTE_PGM_RSRC2_GFX6_GFX120(USER_SGPR_COUNT, 1, 5),
162 COMPUTE_PGM_RSRC2_GFX6_GFX11(ENABLE_TRAP_HANDLER, 6, 1),
163 COMPUTE_PGM_RSRC2_GFX120(ENABLE_DYNAMIC_VGPR, 6, 1),
164 COMPUTE_PGM_RSRC2_GFX125(USER_SGPR_COUNT, 1, 6),
165 COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_X, 7, 1),
166 COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_Y, 8, 1),
167 COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_Z, 9, 1),
168 COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_INFO, 10, 1),
169 COMPUTE_PGM_RSRC2(ENABLE_VGPR_WORKITEM_ID, 11, 2),
170 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_ADDRESS_WATCH, 13, 1),
171 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_MEMORY, 14, 1),
172 COMPUTE_PGM_RSRC2(GRANULATED_LDS_SIZE, 15, 9),
173 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, 24, 1),
174 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_FP_DENORMAL_SOURCE, 25, 1),
175 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, 26, 1),
176 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW, 27, 1),
177 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW, 28, 1),
178 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_INEXACT, 29, 1),
179 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO, 30, 1),
180 COMPUTE_PGM_RSRC2(RESERVED0, 31, 1),
181};
182#undef COMPUTE_PGM_RSRC2
183
184// Compute program resource register 3 for GFX90A+. Must match hardware
185// definition.
186#define COMPUTE_PGM_RSRC3_GFX90A(NAME, SHIFT, WIDTH) \
187 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX90A_ ## NAME, SHIFT, WIDTH)
188enum : int32_t {
189 COMPUTE_PGM_RSRC3_GFX90A(ACCUM_OFFSET, 0, 6),
190 COMPUTE_PGM_RSRC3_GFX90A(RESERVED0, 6, 10),
191 COMPUTE_PGM_RSRC3_GFX90A(TG_SPLIT, 16, 1),
192 COMPUTE_PGM_RSRC3_GFX90A(RESERVED1, 17, 15),
193};
194#undef COMPUTE_PGM_RSRC3_GFX90A
195
196// Compute program resource register 3 for GFX10+. Must match hardware
197// definition.
198// GFX10+.
199#define COMPUTE_PGM_RSRC3_GFX10_PLUS(NAME, SHIFT, WIDTH) \
200 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_PLUS_##NAME, SHIFT, WIDTH)
201// [GFX10].
202#define COMPUTE_PGM_RSRC3_GFX10(NAME, SHIFT, WIDTH) \
203 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_##NAME, SHIFT, WIDTH)
204// [GFX10-GFX11].
205#define COMPUTE_PGM_RSRC3_GFX10_GFX11(NAME, SHIFT, WIDTH) \
206 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_GFX11_##NAME, SHIFT, WIDTH)
207// [GFX10-GFX120].
208#define COMPUTE_PGM_RSRC3_GFX10_GFX120(NAME, SHIFT, WIDTH) \
209 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_GFX120_##NAME, SHIFT, WIDTH)
210// GFX11+.
211#define COMPUTE_PGM_RSRC3_GFX11_PLUS(NAME, SHIFT, WIDTH) \
212 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX11_PLUS_##NAME, SHIFT, WIDTH)
213// [GFX11].
214#define COMPUTE_PGM_RSRC3_GFX11(NAME, SHIFT, WIDTH) \
215 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX11_##NAME, SHIFT, WIDTH)
216// GFX12+.
217#define COMPUTE_PGM_RSRC3_GFX12_PLUS(NAME, SHIFT, WIDTH) \
218 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX12_PLUS_##NAME, SHIFT, WIDTH)
219// [GFX125].
220#define COMPUTE_PGM_RSRC3_GFX125(NAME, SHIFT, WIDTH) \
221 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX125_##NAME, SHIFT, WIDTH)
222enum : int32_t {
223 COMPUTE_PGM_RSRC3_GFX10_GFX11(SHARED_VGPR_COUNT, 0, 4),
225 COMPUTE_PGM_RSRC3_GFX10(RESERVED1, 4, 8),
226 COMPUTE_PGM_RSRC3_GFX11(INST_PREF_SIZE, 4, 6),
227 COMPUTE_PGM_RSRC3_GFX11(TRAP_ON_START, 10, 1),
228 COMPUTE_PGM_RSRC3_GFX11(TRAP_ON_END, 11, 1),
229 COMPUTE_PGM_RSRC3_GFX12_PLUS(INST_PREF_SIZE, 4, 8),
231 COMPUTE_PGM_RSRC3_GFX10_GFX11(RESERVED3, 13, 1),
232 COMPUTE_PGM_RSRC3_GFX12_PLUS(GLG_EN, 13, 1),
234 COMPUTE_PGM_RSRC3_GFX125(NAMED_BAR_CNT, 14, 3),
235 COMPUTE_PGM_RSRC3_GFX125(ENABLE_DYNAMIC_VGPR, 17, 1),
236 COMPUTE_PGM_RSRC3_GFX125(TCP_SPLIT, 18, 3),
237 COMPUTE_PGM_RSRC3_GFX125(ENABLE_DIDT_THROTTLE, 21, 1),
238 COMPUTE_PGM_RSRC3_GFX10_PLUS(RESERVED5, 22, 9),
239 COMPUTE_PGM_RSRC3_GFX10(RESERVED6, 31, 1),
241};
242#undef COMPUTE_PGM_RSRC3_GFX10_PLUS
243
244// Kernel code properties. Must be kept backwards compatible.
245#define KERNEL_CODE_PROPERTY(NAME, SHIFT, WIDTH) \
246 AMDHSA_BITS_ENUM_ENTRY(KERNEL_CODE_PROPERTY_ ## NAME, SHIFT, WIDTH)
247enum : int32_t {
248 KERNEL_CODE_PROPERTY(ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER, 0, 1),
249 KERNEL_CODE_PROPERTY(ENABLE_SGPR_DISPATCH_PTR, 1, 1),
250 KERNEL_CODE_PROPERTY(ENABLE_SGPR_QUEUE_PTR, 2, 1),
251 KERNEL_CODE_PROPERTY(ENABLE_SGPR_KERNARG_SEGMENT_PTR, 3, 1),
252 KERNEL_CODE_PROPERTY(ENABLE_SGPR_DISPATCH_ID, 4, 1),
253 KERNEL_CODE_PROPERTY(ENABLE_SGPR_FLAT_SCRATCH_INIT, 5, 1),
254 KERNEL_CODE_PROPERTY(ENABLE_SGPR_PRIVATE_SEGMENT_SIZE, 6, 1),
255 KERNEL_CODE_PROPERTY(RESERVED0, 7, 2),
256 KERNEL_CODE_PROPERTY(USES_CU_STORES, 9, 1), // GFX12.5 +cu-stores
257 KERNEL_CODE_PROPERTY(ENABLE_WAVEFRONT_SIZE32, 10, 1), // GFX10+
258 KERNEL_CODE_PROPERTY(USES_DYNAMIC_STACK, 11, 1),
259 KERNEL_CODE_PROPERTY(RESERVED1, 12, 4),
260};
261#undef KERNEL_CODE_PROPERTY
262
263// Kernarg preload specification.
264#define KERNARG_PRELOAD_SPEC(NAME, SHIFT, WIDTH) \
265 AMDHSA_BITS_ENUM_ENTRY(KERNARG_PRELOAD_SPEC_##NAME, SHIFT, WIDTH)
266enum : int32_t {
267 KERNARG_PRELOAD_SPEC(LENGTH, 0, 7),
268 KERNARG_PRELOAD_SPEC(OFFSET, 7, 9),
269};
270#undef KERNARG_PRELOAD_SPEC
271
272// Kernel descriptor. Must be kept backwards compatible.
280 uint32_t compute_pgm_rsrc3; // GFX10+ and GFX90A+
286};
287
288enum : uint32_t {
302
303static_assert(
304 sizeof(kernel_descriptor_t) == 64,
305 "invalid size for kernel_descriptor_t");
306static_assert(offsetof(kernel_descriptor_t, group_segment_fixed_size) ==
308 "invalid offset for group_segment_fixed_size");
309static_assert(offsetof(kernel_descriptor_t, private_segment_fixed_size) ==
311 "invalid offset for private_segment_fixed_size");
312static_assert(offsetof(kernel_descriptor_t, kernarg_size) ==
314 "invalid offset for kernarg_size");
315static_assert(offsetof(kernel_descriptor_t, reserved0) == RESERVED0_OFFSET,
316 "invalid offset for reserved0");
317static_assert(offsetof(kernel_descriptor_t, kernel_code_entry_byte_offset) ==
319 "invalid offset for kernel_code_entry_byte_offset");
320static_assert(offsetof(kernel_descriptor_t, reserved1) == RESERVED1_OFFSET,
321 "invalid offset for reserved1");
322static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc3) ==
324 "invalid offset for compute_pgm_rsrc3");
325static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc1) ==
327 "invalid offset for compute_pgm_rsrc1");
328static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc2) ==
330 "invalid offset for compute_pgm_rsrc2");
331static_assert(offsetof(kernel_descriptor_t, kernel_code_properties) ==
333 "invalid offset for kernel_code_properties");
334static_assert(offsetof(kernel_descriptor_t, kernarg_preload) ==
336 "invalid offset for kernarg_preload");
337static_assert(offsetof(kernel_descriptor_t, reserved3) == RESERVED3_OFFSET,
338 "invalid offset for reserved3");
339
340} // end namespace amdhsa
341} // end namespace llvm
342
343#endif // LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
#define offsetof(TYPE, MEMBER)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18