22#ifndef LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
23#define LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
30#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE*)0)->MEMBER)
35#ifndef AMDHSA_BITS_ENUM_ENTRY
36#define AMDHSA_BITS_ENUM_ENTRY(NAME, SHIFT, WIDTH) \
37 NAME ## _SHIFT = (SHIFT), \
38 NAME ## _WIDTH = (WIDTH), \
39 NAME = (((1 << (WIDTH)) - 1) << (SHIFT))
43#ifndef AMDHSA_BITS_GET
44#define AMDHSA_BITS_GET(SRC, MSK) ((SRC & MSK) >> MSK ## _SHIFT)
48#ifndef AMDHSA_BITS_SET
49#define AMDHSA_BITS_SET(DST, MSK, VAL) \
53 DST |= ((local << MSK##_SHIFT) & MSK); \
86#define COMPUTE_PGM_RSRC1(NAME, SHIFT, WIDTH) \
87 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_##NAME, SHIFT, WIDTH)
89#define COMPUTE_PGM_RSRC1_GFX6_GFX8(NAME, SHIFT, WIDTH) \
90 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX8_##NAME, SHIFT, WIDTH)
92#define COMPUTE_PGM_RSRC1_GFX6_GFX9(NAME, SHIFT, WIDTH) \
93 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX9_##NAME, SHIFT, WIDTH)
95#define COMPUTE_PGM_RSRC1_GFX6_GFX11(NAME, SHIFT, WIDTH) \
96 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX11_##NAME, SHIFT, WIDTH)
98#define COMPUTE_PGM_RSRC1_GFX6_GFX120(NAME, SHIFT, WIDTH) \
99 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX120_##NAME, SHIFT, WIDTH)
101#define COMPUTE_PGM_RSRC1_GFX9_PLUS(NAME, SHIFT, WIDTH) \
102 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX9_PLUS_##NAME, SHIFT, WIDTH)
104#define COMPUTE_PGM_RSRC1_GFX10_PLUS(NAME, SHIFT, WIDTH) \
105 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX10_PLUS_##NAME, SHIFT, WIDTH)
107#define COMPUTE_PGM_RSRC1_GFX12_PLUS(NAME, SHIFT, WIDTH) \
108 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX12_PLUS_##NAME, SHIFT, WIDTH)
110#define COMPUTE_PGM_RSRC1_GFX125(NAME, SHIFT, WIDTH) \
111 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX125_##NAME, SHIFT, WIDTH)
138#undef COMPUTE_PGM_RSRC1
142#define COMPUTE_PGM_RSRC2(NAME, SHIFT, WIDTH) \
143 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_##NAME, SHIFT, WIDTH)
145#define COMPUTE_PGM_RSRC2_GFX6_GFX11(NAME, SHIFT, WIDTH) \
146 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX6_GFX11_##NAME, SHIFT, WIDTH)
148#define COMPUTE_PGM_RSRC2_GFX6_GFX120(NAME, SHIFT, WIDTH) \
149 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX6_GFX120_##NAME, SHIFT, WIDTH)
151#define COMPUTE_PGM_RSRC2_GFX12_PLUS(NAME, SHIFT, WIDTH) \
152 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX12_PLUS_##NAME, SHIFT, WIDTH)
154#define COMPUTE_PGM_RSRC2_GFX120(NAME, SHIFT, WIDTH) \
155 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX120_##NAME, SHIFT, WIDTH)
157#define COMPUTE_PGM_RSRC2_GFX125(NAME, SHIFT, WIDTH) \
158 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX125_##NAME, SHIFT, WIDTH)
182#undef COMPUTE_PGM_RSRC2
186#define COMPUTE_PGM_RSRC3_GFX90A(NAME, SHIFT, WIDTH) \
187 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX90A_ ## NAME, SHIFT, WIDTH)
194#undef COMPUTE_PGM_RSRC3_GFX90A
199#define COMPUTE_PGM_RSRC3_GFX10_PLUS(NAME, SHIFT, WIDTH) \
200 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_PLUS_##NAME, SHIFT, WIDTH)
202#define COMPUTE_PGM_RSRC3_GFX10(NAME, SHIFT, WIDTH) \
203 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_##NAME, SHIFT, WIDTH)
205#define COMPUTE_PGM_RSRC3_GFX10_GFX11(NAME, SHIFT, WIDTH) \
206 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_GFX11_##NAME, SHIFT, WIDTH)
208#define COMPUTE_PGM_RSRC3_GFX10_GFX120(NAME, SHIFT, WIDTH) \
209 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_GFX120_##NAME, SHIFT, WIDTH)
211#define COMPUTE_PGM_RSRC3_GFX11_PLUS(NAME, SHIFT, WIDTH) \
212 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX11_PLUS_##NAME, SHIFT, WIDTH)
214#define COMPUTE_PGM_RSRC3_GFX11(NAME, SHIFT, WIDTH) \
215 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX11_##NAME, SHIFT, WIDTH)
217#define COMPUTE_PGM_RSRC3_GFX12_PLUS(NAME, SHIFT, WIDTH) \
218 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX12_PLUS_##NAME, SHIFT, WIDTH)
220#define COMPUTE_PGM_RSRC3_GFX125(NAME, SHIFT, WIDTH) \
221 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX125_##NAME, SHIFT, WIDTH)
242#undef COMPUTE_PGM_RSRC3_GFX10_PLUS
245#define KERNEL_CODE_PROPERTY(NAME, SHIFT, WIDTH) \
246 AMDHSA_BITS_ENUM_ENTRY(KERNEL_CODE_PROPERTY_ ## NAME, SHIFT, WIDTH)
261#undef KERNEL_CODE_PROPERTY
264#define KERNARG_PRELOAD_SPEC(NAME, SHIFT, WIDTH) \
265 AMDHSA_BITS_ENUM_ENTRY(KERNARG_PRELOAD_SPEC_##NAME, SHIFT, WIDTH)
270#undef KERNARG_PRELOAD_SPEC
304 sizeof(kernel_descriptor_t) == 64,
305 "invalid size for kernel_descriptor_t");
306static_assert(
offsetof(kernel_descriptor_t, group_segment_fixed_size) ==
308 "invalid offset for group_segment_fixed_size");
309static_assert(
offsetof(kernel_descriptor_t, private_segment_fixed_size) ==
311 "invalid offset for private_segment_fixed_size");
312static_assert(
offsetof(kernel_descriptor_t, kernarg_size) ==
314 "invalid offset for kernarg_size");
316 "invalid offset for reserved0");
317static_assert(
offsetof(kernel_descriptor_t, kernel_code_entry_byte_offset) ==
319 "invalid offset for kernel_code_entry_byte_offset");
321 "invalid offset for reserved1");
322static_assert(
offsetof(kernel_descriptor_t, compute_pgm_rsrc3) ==
324 "invalid offset for compute_pgm_rsrc3");
325static_assert(
offsetof(kernel_descriptor_t, compute_pgm_rsrc1) ==
327 "invalid offset for compute_pgm_rsrc1");
328static_assert(
offsetof(kernel_descriptor_t, compute_pgm_rsrc2) ==
330 "invalid offset for compute_pgm_rsrc2");
331static_assert(
offsetof(kernel_descriptor_t, kernel_code_properties) ==
333 "invalid offset for kernel_code_properties");
334static_assert(
offsetof(kernel_descriptor_t, kernarg_preload) ==
336 "invalid offset for kernarg_preload");
338 "invalid offset for reserved3");
#define offsetof(TYPE, MEMBER)
@ FLOAT_ROUND_MODE_PLUS_INFINITY
@ FLOAT_ROUND_MODE_NEAR_EVEN
@ FLOAT_ROUND_MODE_MINUS_INFINITY
@ FLOAT_DENORM_MODE_FLUSH_SRC
@ FLOAT_DENORM_MODE_FLUSH_DST
@ FLOAT_DENORM_MODE_FLUSH_SRC_DST
@ FLOAT_DENORM_MODE_FLUSH_NONE
@ SYSTEM_VGPR_WORKITEM_ID_UNDEFINED
@ SYSTEM_VGPR_WORKITEM_ID_X
@ SYSTEM_VGPR_WORKITEM_ID_X_Y
@ SYSTEM_VGPR_WORKITEM_ID_X_Y_Z
@ COMPUTE_PGM_RSRC1_GFX10_PLUS
@ COMPUTE_PGM_RSRC1_GFX12_PLUS
@ COMPUTE_PGM_RSRC1_GFX6_GFX120
@ COMPUTE_PGM_RSRC1_GFX6_GFX8
@ COMPUTE_PGM_RSRC1_GFX125
@ COMPUTE_PGM_RSRC1_GFX6_GFX9
@ COMPUTE_PGM_RSRC1_GFX6_GFX11
@ COMPUTE_PGM_RSRC1_GFX9_PLUS
@ KERNEL_CODE_PROPERTIES_OFFSET
@ GROUP_SEGMENT_FIXED_SIZE_OFFSET
@ COMPUTE_PGM_RSRC3_OFFSET
@ KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET
@ COMPUTE_PGM_RSRC1_OFFSET
@ COMPUTE_PGM_RSRC2_OFFSET
@ PRIVATE_SEGMENT_FIXED_SIZE_OFFSET
@ COMPUTE_PGM_RSRC3_GFX90A
@ COMPUTE_PGM_RSRC3_GFX125
@ COMPUTE_PGM_RSRC3_GFX11
@ COMPUTE_PGM_RSRC3_GFX10
@ COMPUTE_PGM_RSRC3_GFX10_PLUS
@ COMPUTE_PGM_RSRC3_GFX12_PLUS
@ COMPUTE_PGM_RSRC3_GFX11_PLUS
@ COMPUTE_PGM_RSRC3_GFX10_GFX11
@ COMPUTE_PGM_RSRC3_GFX10_GFX120
@ COMPUTE_PGM_RSRC2_GFX120
@ COMPUTE_PGM_RSRC2_GFX125
@ COMPUTE_PGM_RSRC2_GFX6_GFX11
@ COMPUTE_PGM_RSRC2_GFX6_GFX120
This is an optimization pass for GlobalISel generic memory operations.
uint32_t group_segment_fixed_size
uint32_t compute_pgm_rsrc1
uint32_t private_segment_fixed_size
uint32_t compute_pgm_rsrc2
uint16_t kernel_code_properties
uint32_t compute_pgm_rsrc3
int64_t kernel_code_entry_byte_offset