LLVM 22.0.0git
AMDHSAKernelDescriptor.h
Go to the documentation of this file.
1//===--- AMDHSAKernelDescriptor.h -----------------------------*- C++ -*---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// AMDHSA kernel descriptor definitions. For more information, visit
11/// https://llvm.org/docs/AMDGPUUsage.html#kernel-descriptor
12///
13/// \warning
14/// Any changes to this file should also be audited for corresponding changes
15/// needed in both the assembler and disassembler, namely:
16/// * AMDGPUAsmPrinter.{cpp,h}
17/// * AMDGPUTargetStreamer.{cpp,h}
18/// * AMDGPUDisassembler.{cpp,h}
19//
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
23#define LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
24
25#include <cstddef>
26#include <cstdint>
27
28// Creates enumeration entries used for packing bits into integers. Enumeration
29// entries include bit shift amount, bit width, and bit mask.
30#ifndef AMDHSA_BITS_ENUM_ENTRY
31#define AMDHSA_BITS_ENUM_ENTRY(NAME, SHIFT, WIDTH) \
32 NAME ## _SHIFT = (SHIFT), \
33 NAME ## _WIDTH = (WIDTH), \
34 NAME = (((1 << (WIDTH)) - 1) << (SHIFT))
35#endif // AMDHSA_BITS_ENUM_ENTRY
36
37// Gets bits for specified bit mask from specified source.
38#ifndef AMDHSA_BITS_GET
39#define AMDHSA_BITS_GET(SRC, MSK) ((SRC & MSK) >> MSK ## _SHIFT)
40#endif // AMDHSA_BITS_GET
41
42// Sets bits for specified bit mask in specified destination.
43#ifndef AMDHSA_BITS_SET
44#define AMDHSA_BITS_SET(DST, MSK, VAL) \
45 do { \
46 auto local = VAL; \
47 DST &= ~MSK; \
48 DST |= ((local << MSK##_SHIFT) & MSK); \
49 } while (0)
50#endif // AMDHSA_BITS_SET
51
52namespace llvm {
53namespace amdhsa {
54
55// Floating point rounding modes. Must match hardware definition.
56enum : uint8_t {
61};
62
63// Floating point denorm modes. Must match hardware definition.
64enum : uint8_t {
69};
70
71// System VGPR workitem IDs. Must match hardware definition.
72enum : uint8_t {
77};
78
79// Compute program resource register 1. Must match hardware definition.
80// GFX6+.
81#define COMPUTE_PGM_RSRC1(NAME, SHIFT, WIDTH) \
82 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_##NAME, SHIFT, WIDTH)
83// [GFX6-GFX8].
84#define COMPUTE_PGM_RSRC1_GFX6_GFX8(NAME, SHIFT, WIDTH) \
85 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX8_##NAME, SHIFT, WIDTH)
86// [GFX6-GFX9].
87#define COMPUTE_PGM_RSRC1_GFX6_GFX9(NAME, SHIFT, WIDTH) \
88 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX9_##NAME, SHIFT, WIDTH)
89// [GFX6-GFX11].
90#define COMPUTE_PGM_RSRC1_GFX6_GFX11(NAME, SHIFT, WIDTH) \
91 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX11_##NAME, SHIFT, WIDTH)
92// [GFX6-GFX120].
93#define COMPUTE_PGM_RSRC1_GFX6_GFX120(NAME, SHIFT, WIDTH) \
94 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX120_##NAME, SHIFT, WIDTH)
95// GFX9+.
96#define COMPUTE_PGM_RSRC1_GFX9_PLUS(NAME, SHIFT, WIDTH) \
97 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX9_PLUS_##NAME, SHIFT, WIDTH)
98// GFX10+.
99#define COMPUTE_PGM_RSRC1_GFX10_PLUS(NAME, SHIFT, WIDTH) \
100 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX10_PLUS_##NAME, SHIFT, WIDTH)
101// GFX12+.
102#define COMPUTE_PGM_RSRC1_GFX12_PLUS(NAME, SHIFT, WIDTH) \
103 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX12_PLUS_##NAME, SHIFT, WIDTH)
104// [GFX125].
105#define COMPUTE_PGM_RSRC1_GFX125(NAME, SHIFT, WIDTH) \
106 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX125_##NAME, SHIFT, WIDTH)
107enum : int32_t {
108 COMPUTE_PGM_RSRC1(GRANULATED_WORKITEM_VGPR_COUNT, 0, 6),
109 COMPUTE_PGM_RSRC1(GRANULATED_WAVEFRONT_SGPR_COUNT, 6, 4),
110 COMPUTE_PGM_RSRC1(PRIORITY, 10, 2),
111 COMPUTE_PGM_RSRC1(FLOAT_ROUND_MODE_32, 12, 2),
112 COMPUTE_PGM_RSRC1(FLOAT_ROUND_MODE_16_64, 14, 2),
113 COMPUTE_PGM_RSRC1(FLOAT_DENORM_MODE_32, 16, 2),
114 COMPUTE_PGM_RSRC1(FLOAT_DENORM_MODE_16_64, 18, 2),
115 COMPUTE_PGM_RSRC1(PRIV, 20, 1),
116 COMPUTE_PGM_RSRC1_GFX6_GFX11(ENABLE_DX10_CLAMP, 21, 1),
117 COMPUTE_PGM_RSRC1_GFX12_PLUS(ENABLE_WG_RR_EN, 21, 1),
118 COMPUTE_PGM_RSRC1(DEBUG_MODE, 22, 1),
119 COMPUTE_PGM_RSRC1_GFX6_GFX11(ENABLE_IEEE_MODE, 23, 1),
120 COMPUTE_PGM_RSRC1_GFX12_PLUS(DISABLE_PERF, 23, 1),
121 COMPUTE_PGM_RSRC1(BULKY, 24, 1),
122 COMPUTE_PGM_RSRC1(CDBG_USER, 25, 1),
126 COMPUTE_PGM_RSRC1_GFX125(FLAT_SCRATCH_IS_NV, 27, 1),
127 COMPUTE_PGM_RSRC1(RESERVED2, 28, 1),
130 COMPUTE_PGM_RSRC1_GFX10_PLUS(MEM_ORDERED, 30, 1),
131 COMPUTE_PGM_RSRC1_GFX10_PLUS(FWD_PROGRESS, 31, 1),
132};
133#undef COMPUTE_PGM_RSRC1
134
135// Compute program resource register 2. Must match hardware definition.
136// GFX6+.
137#define COMPUTE_PGM_RSRC2(NAME, SHIFT, WIDTH) \
138 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_##NAME, SHIFT, WIDTH)
139// [GFX6-GFX11].
140#define COMPUTE_PGM_RSRC2_GFX6_GFX11(NAME, SHIFT, WIDTH) \
141 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX6_GFX11_##NAME, SHIFT, WIDTH)
142// [GFX6-GFX120].
143#define COMPUTE_PGM_RSRC2_GFX6_GFX120(NAME, SHIFT, WIDTH) \
144 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX6_GFX120_##NAME, SHIFT, WIDTH)
145// GFX12+.
146#define COMPUTE_PGM_RSRC2_GFX12_PLUS(NAME, SHIFT, WIDTH) \
147 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX12_PLUS_##NAME, SHIFT, WIDTH)
148// [GFX120].
149#define COMPUTE_PGM_RSRC2_GFX120(NAME, SHIFT, WIDTH) \
150 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX120_##NAME, SHIFT, WIDTH)
151// [GFX125].
152#define COMPUTE_PGM_RSRC2_GFX125(NAME, SHIFT, WIDTH) \
153 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX125_##NAME, SHIFT, WIDTH)
154enum : int32_t {
155 COMPUTE_PGM_RSRC2(ENABLE_PRIVATE_SEGMENT, 0, 1),
156 COMPUTE_PGM_RSRC2_GFX6_GFX120(USER_SGPR_COUNT, 1, 5),
157 COMPUTE_PGM_RSRC2_GFX6_GFX11(ENABLE_TRAP_HANDLER, 6, 1),
158 COMPUTE_PGM_RSRC2_GFX120(ENABLE_DYNAMIC_VGPR, 6, 1),
159 COMPUTE_PGM_RSRC2_GFX125(USER_SGPR_COUNT, 1, 6),
160 COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_X, 7, 1),
161 COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_Y, 8, 1),
162 COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_Z, 9, 1),
163 COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_INFO, 10, 1),
164 COMPUTE_PGM_RSRC2(ENABLE_VGPR_WORKITEM_ID, 11, 2),
165 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_ADDRESS_WATCH, 13, 1),
166 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_MEMORY, 14, 1),
167 COMPUTE_PGM_RSRC2(GRANULATED_LDS_SIZE, 15, 9),
168 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, 24, 1),
169 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_FP_DENORMAL_SOURCE, 25, 1),
170 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, 26, 1),
171 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW, 27, 1),
172 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW, 28, 1),
173 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_INEXACT, 29, 1),
174 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO, 30, 1),
175 COMPUTE_PGM_RSRC2(RESERVED0, 31, 1),
176};
177#undef COMPUTE_PGM_RSRC2
178
179// Compute program resource register 3 for GFX90A+. Must match hardware
180// definition.
181#define COMPUTE_PGM_RSRC3_GFX90A(NAME, SHIFT, WIDTH) \
182 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX90A_ ## NAME, SHIFT, WIDTH)
183enum : int32_t {
184 COMPUTE_PGM_RSRC3_GFX90A(ACCUM_OFFSET, 0, 6),
185 COMPUTE_PGM_RSRC3_GFX90A(RESERVED0, 6, 10),
186 COMPUTE_PGM_RSRC3_GFX90A(TG_SPLIT, 16, 1),
187 COMPUTE_PGM_RSRC3_GFX90A(RESERVED1, 17, 15),
188};
189#undef COMPUTE_PGM_RSRC3_GFX90A
190
191// Compute program resource register 3 for GFX10+. Must match hardware
192// definition.
193// GFX10+.
194#define COMPUTE_PGM_RSRC3_GFX10_PLUS(NAME, SHIFT, WIDTH) \
195 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_PLUS_##NAME, SHIFT, WIDTH)
196// [GFX10].
197#define COMPUTE_PGM_RSRC3_GFX10(NAME, SHIFT, WIDTH) \
198 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_##NAME, SHIFT, WIDTH)
199// [GFX10-GFX11].
200#define COMPUTE_PGM_RSRC3_GFX10_GFX11(NAME, SHIFT, WIDTH) \
201 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_GFX11_##NAME, SHIFT, WIDTH)
202// [GFX10-GFX120].
203#define COMPUTE_PGM_RSRC3_GFX10_GFX120(NAME, SHIFT, WIDTH) \
204 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_GFX120_##NAME, SHIFT, WIDTH)
205// GFX11+.
206#define COMPUTE_PGM_RSRC3_GFX11_PLUS(NAME, SHIFT, WIDTH) \
207 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX11_PLUS_##NAME, SHIFT, WIDTH)
208// [GFX11].
209#define COMPUTE_PGM_RSRC3_GFX11(NAME, SHIFT, WIDTH) \
210 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX11_##NAME, SHIFT, WIDTH)
211// GFX12+.
212#define COMPUTE_PGM_RSRC3_GFX12_PLUS(NAME, SHIFT, WIDTH) \
213 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX12_PLUS_##NAME, SHIFT, WIDTH)
214// [GFX125].
215#define COMPUTE_PGM_RSRC3_GFX125(NAME, SHIFT, WIDTH) \
216 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX125_##NAME, SHIFT, WIDTH)
217enum : int32_t {
218 COMPUTE_PGM_RSRC3_GFX10_GFX11(SHARED_VGPR_COUNT, 0, 4),
220 COMPUTE_PGM_RSRC3_GFX10(RESERVED1, 4, 8),
221 COMPUTE_PGM_RSRC3_GFX11(INST_PREF_SIZE, 4, 6),
222 COMPUTE_PGM_RSRC3_GFX11(TRAP_ON_START, 10, 1),
223 COMPUTE_PGM_RSRC3_GFX11(TRAP_ON_END, 11, 1),
224 COMPUTE_PGM_RSRC3_GFX12_PLUS(INST_PREF_SIZE, 4, 8),
226 COMPUTE_PGM_RSRC3_GFX10_GFX11(RESERVED3, 13, 1),
227 COMPUTE_PGM_RSRC3_GFX12_PLUS(GLG_EN, 13, 1),
229 COMPUTE_PGM_RSRC3_GFX125(NAMED_BAR_CNT, 14, 3),
230 COMPUTE_PGM_RSRC3_GFX125(ENABLE_DYNAMIC_VGPR, 17, 1),
231 COMPUTE_PGM_RSRC3_GFX125(TCP_SPLIT, 18, 3),
232 COMPUTE_PGM_RSRC3_GFX125(ENABLE_DIDT_THROTTLE, 21, 1),
233 COMPUTE_PGM_RSRC3_GFX10_PLUS(RESERVED5, 22, 9),
234 COMPUTE_PGM_RSRC3_GFX10(RESERVED6, 31, 1),
236};
237#undef COMPUTE_PGM_RSRC3_GFX10_PLUS
238
239// Kernel code properties. Must be kept backwards compatible.
240#define KERNEL_CODE_PROPERTY(NAME, SHIFT, WIDTH) \
241 AMDHSA_BITS_ENUM_ENTRY(KERNEL_CODE_PROPERTY_ ## NAME, SHIFT, WIDTH)
242enum : int32_t {
243 KERNEL_CODE_PROPERTY(ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER, 0, 1),
244 KERNEL_CODE_PROPERTY(ENABLE_SGPR_DISPATCH_PTR, 1, 1),
245 KERNEL_CODE_PROPERTY(ENABLE_SGPR_QUEUE_PTR, 2, 1),
246 KERNEL_CODE_PROPERTY(ENABLE_SGPR_KERNARG_SEGMENT_PTR, 3, 1),
247 KERNEL_CODE_PROPERTY(ENABLE_SGPR_DISPATCH_ID, 4, 1),
248 KERNEL_CODE_PROPERTY(ENABLE_SGPR_FLAT_SCRATCH_INIT, 5, 1),
249 KERNEL_CODE_PROPERTY(ENABLE_SGPR_PRIVATE_SEGMENT_SIZE, 6, 1),
250 KERNEL_CODE_PROPERTY(RESERVED0, 7, 3),
251 KERNEL_CODE_PROPERTY(ENABLE_WAVEFRONT_SIZE32, 10, 1), // GFX10+
252 KERNEL_CODE_PROPERTY(USES_DYNAMIC_STACK, 11, 1),
253 KERNEL_CODE_PROPERTY(RESERVED1, 12, 4),
254};
255#undef KERNEL_CODE_PROPERTY
256
257// Kernarg preload specification.
258#define KERNARG_PRELOAD_SPEC(NAME, SHIFT, WIDTH) \
259 AMDHSA_BITS_ENUM_ENTRY(KERNARG_PRELOAD_SPEC_##NAME, SHIFT, WIDTH)
260enum : int32_t {
261 KERNARG_PRELOAD_SPEC(LENGTH, 0, 7),
262 KERNARG_PRELOAD_SPEC(OFFSET, 7, 9),
263};
264#undef KERNARG_PRELOAD_SPEC
265
266// Kernel descriptor. Must be kept backwards compatible.
281
282enum : uint32_t {
295};
296
297static_assert(
298 sizeof(kernel_descriptor_t) == 64,
299 "invalid size for kernel_descriptor_t");
300static_assert(offsetof(kernel_descriptor_t, group_segment_fixed_size) ==
302 "invalid offset for group_segment_fixed_size");
303static_assert(offsetof(kernel_descriptor_t, private_segment_fixed_size) ==
305 "invalid offset for private_segment_fixed_size");
306static_assert(offsetof(kernel_descriptor_t, kernarg_size) ==
308 "invalid offset for kernarg_size");
309static_assert(offsetof(kernel_descriptor_t, reserved0) == RESERVED0_OFFSET,
310 "invalid offset for reserved0");
311static_assert(offsetof(kernel_descriptor_t, kernel_code_entry_byte_offset) ==
313 "invalid offset for kernel_code_entry_byte_offset");
314static_assert(offsetof(kernel_descriptor_t, reserved1) == RESERVED1_OFFSET,
315 "invalid offset for reserved1");
316static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc3) ==
318 "invalid offset for compute_pgm_rsrc3");
319static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc1) ==
321 "invalid offset for compute_pgm_rsrc1");
322static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc2) ==
324 "invalid offset for compute_pgm_rsrc2");
325static_assert(offsetof(kernel_descriptor_t, kernel_code_properties) ==
327 "invalid offset for kernel_code_properties");
328static_assert(offsetof(kernel_descriptor_t, kernarg_preload) ==
330 "invalid offset for kernarg_preload");
331static_assert(offsetof(kernel_descriptor_t, reserved3) == RESERVED3_OFFSET,
332 "invalid offset for reserved3");
333
334} // end namespace amdhsa
335} // end namespace llvm
336
337#endif // LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
#define offsetof(TYPE, MEMBER)
This is an optimization pass for GlobalISel generic memory operations.