30#include "llvm/IR/IntrinsicsRISCV.h"
34using namespace LegalityPredicates;
35using namespace LegalizeMutations;
39 std::initializer_list<LLT> IntOrFPVecTys,
42 return ST.hasVInstructions() &&
43 (Query.Types[TypeIdx].getScalarSizeInBits() != 64 ||
44 ST.hasVInstructionsI64()) &&
45 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
56 return ST.hasVInstructions() &&
57 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
64 std::initializer_list<LLT> PtrVecTys,
67 return ST.hasVInstructions() &&
68 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
69 ST.getELen() == 64) &&
70 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 16 ||
71 Query.Types[TypeIdx].getScalarSizeInBits() == 32);
77 : STI(ST), XLen(STI.getXLen()), sXLen(
LLT::scalar(XLen)) {
127 using namespace TargetOpcode;
129 auto BoolVecTys = {nxv1s1, nxv2s1, nxv4s1, nxv8s1, nxv16s1, nxv32s1, nxv64s1};
131 auto IntOrFPVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
132 nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
133 nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
134 nxv1s64, nxv2s64, nxv4s64, nxv8s64};
136 auto PtrVecTys = {nxv1p0, nxv2p0, nxv4p0, nxv8p0, nxv16p0};
152 {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
158 {G_UADDSAT, G_SADDSAT, G_USUBSAT, G_SSUBSAT, G_SSHLSAT, G_USHLSAT})
162 .legalFor({{sXLen, sXLen}})
163 .customFor(ST.is64Bit(), {{s32, s32}})
164 .widenScalarToNextPow2(0)
168 getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
169 .legalFor({{s32, s16}})
170 .legalFor(
ST.is64Bit(), {{s64, s16}, {s64, s32}})
176 getActionDefinitionsBuilder(G_SEXT_INREG)
178 .clampScalar(0, sXLen, sXLen)
182 for (
unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
183 auto &MergeUnmergeActions = getActionDefinitionsBuilder(
Op);
184 unsigned BigTyIdx =
Op == G_MERGE_VALUES ? 0 : 1;
185 unsigned LitTyIdx =
Op == G_MERGE_VALUES ? 1 : 0;
186 if (XLen == 32 &&
ST.hasStdExtD()) {
187 MergeUnmergeActions.legalIf(
190 MergeUnmergeActions.widenScalarToNextPow2(LitTyIdx, XLen)
191 .widenScalarToNextPow2(BigTyIdx, XLen)
192 .clampScalar(LitTyIdx, sXLen, sXLen)
193 .clampScalar(BigTyIdx, sXLen, sXLen);
196 getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
198 getActionDefinitionsBuilder({G_ROTR, G_ROTL})
199 .legalFor(
ST.hasStdExtZbb() ||
ST.hasStdExtZbkb(), {{sXLen, sXLen}})
200 .customFor(
ST.is64Bit() && (
ST.hasStdExtZbb() ||
ST.hasStdExtZbkb()),
204 getActionDefinitionsBuilder(G_BITREVERSE).maxScalar(0, sXLen).lower();
206 getActionDefinitionsBuilder(G_BITCAST).legalIf(
212 auto &BSWAPActions = getActionDefinitionsBuilder(G_BSWAP);
213 if (
ST.hasStdExtZbb() ||
ST.hasStdExtZbkb())
214 BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);
216 BSWAPActions.maxScalar(0, sXLen).lower();
218 auto &CountZerosActions = getActionDefinitionsBuilder({G_CTLZ, G_CTTZ});
219 auto &CountZerosUndefActions =
220 getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF});
221 if (
ST.hasStdExtZbb()) {
222 CountZerosActions.legalFor({{sXLen, sXLen}})
223 .customFor({{s32, s32}})
224 .clampScalar(0, s32, sXLen)
225 .widenScalarToNextPow2(0)
226 .scalarSameSizeAs(1, 0);
228 CountZerosActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
229 CountZerosUndefActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0);
231 CountZerosUndefActions.lower();
233 auto &CTPOPActions = getActionDefinitionsBuilder(G_CTPOP);
234 if (
ST.hasStdExtZbb()) {
235 CTPOPActions.legalFor({{sXLen, sXLen}})
236 .clampScalar(0, sXLen, sXLen)
237 .scalarSameSizeAs(1, 0);
239 CTPOPActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
242 getActionDefinitionsBuilder(G_CONSTANT)
244 .legalFor(!
ST.is64Bit(), {s32})
245 .customFor(
ST.is64Bit(), {s64})
246 .widenScalarToNextPow2(0)
247 .clampScalar(0, sXLen, sXLen);
250 getActionDefinitionsBuilder(G_FREEZE)
251 .legalFor({s16, s32, p0})
252 .legalFor(
ST.is64Bit(), {s64})
255 .widenScalarToNextPow2(0)
256 .clampScalar(0, s16, sXLen);
260 getActionDefinitionsBuilder(
261 {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER})
262 .legalFor({s32, sXLen, p0})
265 .widenScalarToNextPow2(0)
266 .clampScalar(0, s32, sXLen);
268 getActionDefinitionsBuilder(G_ICMP)
269 .legalFor({{sXLen, sXLen}, {sXLen, p0}})
272 .widenScalarOrEltToNextPow2OrMinSize(1, 8)
273 .clampScalar(1, sXLen, sXLen)
274 .clampScalar(0, sXLen, sXLen);
276 getActionDefinitionsBuilder(G_SELECT)
277 .legalFor({{s32, sXLen}, {p0, sXLen}})
280 .legalFor(XLen == 64 ||
ST.hasStdExtD(), {{s64, sXLen}})
281 .widenScalarToNextPow2(0)
282 .clampScalar(0, s32, (XLen == 64 ||
ST.hasStdExtD()) ? s64 : s32)
283 .clampScalar(1, sXLen, sXLen);
285 auto &LoadActions = getActionDefinitionsBuilder(G_LOAD);
286 auto &StoreActions = getActionDefinitionsBuilder(G_STORE);
287 auto &ExtLoadActions = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD});
292 auto getScalarMemAlign = [&
ST](
unsigned Size) {
293 return ST.enableUnalignedScalarMem() ? 8 :
Size;
296 LoadActions.legalForTypesWithMemDesc(
297 {{s16, p0, s8, getScalarMemAlign(8)},
298 {s32, p0, s8, getScalarMemAlign(8)},
299 {s16, p0, s16, getScalarMemAlign(16)},
300 {s32, p0, s16, getScalarMemAlign(16)},
301 {s32, p0, s32, getScalarMemAlign(32)},
302 {p0, p0, sXLen, getScalarMemAlign(XLen)}});
303 StoreActions.legalForTypesWithMemDesc(
304 {{s16, p0, s8, getScalarMemAlign(8)},
305 {s32, p0, s8, getScalarMemAlign(8)},
306 {s16, p0, s16, getScalarMemAlign(16)},
307 {s32, p0, s16, getScalarMemAlign(16)},
308 {s32, p0, s32, getScalarMemAlign(32)},
309 {p0, p0, sXLen, getScalarMemAlign(XLen)}});
310 ExtLoadActions.legalForTypesWithMemDesc(
311 {{sXLen, p0, s8, getScalarMemAlign(8)},
312 {sXLen, p0, s16, getScalarMemAlign(16)}});
314 LoadActions.legalForTypesWithMemDesc(
315 {{s64, p0, s8, getScalarMemAlign(8)},
316 {s64, p0, s16, getScalarMemAlign(16)},
317 {s64, p0, s32, getScalarMemAlign(32)},
318 {s64, p0, s64, getScalarMemAlign(64)}});
319 StoreActions.legalForTypesWithMemDesc(
320 {{s64, p0, s8, getScalarMemAlign(8)},
321 {s64, p0, s16, getScalarMemAlign(16)},
322 {s64, p0, s32, getScalarMemAlign(32)},
323 {s64, p0, s64, getScalarMemAlign(64)}});
324 ExtLoadActions.legalForTypesWithMemDesc(
325 {{s64, p0, s32, getScalarMemAlign(32)}});
326 }
else if (
ST.hasStdExtD()) {
327 LoadActions.legalForTypesWithMemDesc(
328 {{s64, p0, s64, getScalarMemAlign(64)}});
329 StoreActions.legalForTypesWithMemDesc(
330 {{s64, p0, s64, getScalarMemAlign(64)}});
334 if (
ST.hasVInstructions()) {
335 LoadActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
336 {nxv4s8, p0, nxv4s8, 8},
337 {nxv8s8, p0, nxv8s8, 8},
338 {nxv16s8, p0, nxv16s8, 8},
339 {nxv32s8, p0, nxv32s8, 8},
340 {nxv64s8, p0, nxv64s8, 8},
341 {nxv2s16, p0, nxv2s16, 16},
342 {nxv4s16, p0, nxv4s16, 16},
343 {nxv8s16, p0, nxv8s16, 16},
344 {nxv16s16, p0, nxv16s16, 16},
345 {nxv32s16, p0, nxv32s16, 16},
346 {nxv2s32, p0, nxv2s32, 32},
347 {nxv4s32, p0, nxv4s32, 32},
348 {nxv8s32, p0, nxv8s32, 32},
349 {nxv16s32, p0, nxv16s32, 32}});
350 StoreActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
351 {nxv4s8, p0, nxv4s8, 8},
352 {nxv8s8, p0, nxv8s8, 8},
353 {nxv16s8, p0, nxv16s8, 8},
354 {nxv32s8, p0, nxv32s8, 8},
355 {nxv64s8, p0, nxv64s8, 8},
356 {nxv2s16, p0, nxv2s16, 16},
357 {nxv4s16, p0, nxv4s16, 16},
358 {nxv8s16, p0, nxv8s16, 16},
359 {nxv16s16, p0, nxv16s16, 16},
360 {nxv32s16, p0, nxv32s16, 16},
361 {nxv2s32, p0, nxv2s32, 32},
362 {nxv4s32, p0, nxv4s32, 32},
363 {nxv8s32, p0, nxv8s32, 32},
364 {nxv16s32, p0, nxv16s32, 32}});
366 if (
ST.getELen() == 64) {
367 LoadActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
368 {nxv1s16, p0, nxv1s16, 16},
369 {nxv1s32, p0, nxv1s32, 32}});
370 StoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
371 {nxv1s16, p0, nxv1s16, 16},
372 {nxv1s32, p0, nxv1s32, 32}});
375 if (
ST.hasVInstructionsI64()) {
376 LoadActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
377 {nxv2s64, p0, nxv2s64, 64},
378 {nxv4s64, p0, nxv4s64, 64},
379 {nxv8s64, p0, nxv8s64, 64}});
380 StoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
381 {nxv2s64, p0, nxv2s64, 64},
382 {nxv4s64, p0, nxv4s64, 64},
383 {nxv8s64, p0, nxv8s64, 64}});
392 if (XLen <=
ST.getELen()) {
398 LoadActions.widenScalarToNextPow2(0, 8)
399 .lowerIfMemSizeNotByteSizePow2()
400 .clampScalar(0, s16, sXLen)
403 .clampScalar(0, s16, sXLen)
404 .lowerIfMemSizeNotByteSizePow2()
407 ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, sXLen, sXLen).lower();
409 getActionDefinitionsBuilder({G_PTR_ADD, G_PTRMASK}).legalFor({{p0, sXLen}});
411 getActionDefinitionsBuilder(G_PTRTOINT)
412 .legalFor({{sXLen, p0}})
413 .clampScalar(0, sXLen, sXLen);
415 getActionDefinitionsBuilder(G_INTTOPTR)
416 .legalFor({{p0, sXLen}})
417 .clampScalar(1, sXLen, sXLen);
419 getActionDefinitionsBuilder(G_BRCOND).legalFor({sXLen}).minScalar(0, sXLen);
421 getActionDefinitionsBuilder(G_BRJT).customFor({{p0, sXLen}});
423 getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
425 getActionDefinitionsBuilder(G_PHI)
426 .legalFor({p0, s32, sXLen})
427 .widenScalarToNextPow2(0)
428 .clampScalar(0, s32, sXLen);
430 getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE, G_CONSTANT_POOL})
433 if (
ST.hasStdExtZmmul()) {
434 getActionDefinitionsBuilder(G_MUL)
436 .widenScalarToNextPow2(0)
437 .clampScalar(0, sXLen, sXLen);
440 getActionDefinitionsBuilder({G_SMULH, G_UMULH})
445 getActionDefinitionsBuilder({G_SMULO, G_UMULO}).minScalar(0, sXLen).lower();
447 getActionDefinitionsBuilder(G_MUL)
448 .libcallFor({sXLen, sDoubleXLen})
449 .widenScalarToNextPow2(0)
450 .clampScalar(0, sXLen, sDoubleXLen);
452 getActionDefinitionsBuilder({G_SMULH, G_UMULH}).lowerFor({sXLen});
454 getActionDefinitionsBuilder({G_SMULO, G_UMULO})
459 .widenScalarIf(
typeIs(0, sXLen),
464 if (
ST.hasStdExtM()) {
465 getActionDefinitionsBuilder({G_SDIV, G_UDIV, G_UREM})
468 .libcallFor({sDoubleXLen})
469 .clampScalar(0, s32, sDoubleXLen)
470 .widenScalarToNextPow2(0);
471 getActionDefinitionsBuilder(G_SREM)
473 .libcallFor({sDoubleXLen})
474 .clampScalar(0, sXLen, sDoubleXLen)
475 .widenScalarToNextPow2(0);
477 getActionDefinitionsBuilder({G_UDIV, G_SDIV, G_UREM, G_SREM})
478 .libcallFor({sXLen, sDoubleXLen})
479 .clampScalar(0, sXLen, sDoubleXLen)
480 .widenScalarToNextPow2(0);
484 getActionDefinitionsBuilder({G_SDIVREM, G_UDIVREM}).lower();
486 getActionDefinitionsBuilder(G_ABS)
487 .customFor(
ST.hasStdExtZbb(), {sXLen})
488 .minScalar(
ST.hasStdExtZbb(), 0, sXLen)
491 getActionDefinitionsBuilder({G_UMAX, G_UMIN, G_SMAX, G_SMIN})
492 .legalFor(
ST.hasStdExtZbb(), {sXLen})
493 .minScalar(
ST.hasStdExtZbb(), 0, sXLen)
496 getActionDefinitionsBuilder({G_SCMP, G_UCMP}).lower();
498 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
500 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).
libcall();
502 getActionDefinitionsBuilder({G_DYN_STACKALLOC, G_STACKSAVE, G_STACKRESTORE})
508 getActionDefinitionsBuilder(
509 {G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMA, G_FSQRT, G_FMAXNUM, G_FMINNUM})
510 .legalFor(
ST.hasStdExtF(), {s32})
511 .legalFor(
ST.hasStdExtD(), {s64})
512 .legalFor(
ST.hasStdExtZfh(), {s16})
513 .libcallFor({s32, s64})
514 .libcallFor(
ST.is64Bit(), {s128});
516 getActionDefinitionsBuilder({G_FNEG, G_FABS})
517 .legalFor(
ST.hasStdExtF(), {s32})
518 .legalFor(
ST.hasStdExtD(), {s64})
519 .legalFor(
ST.hasStdExtZfh(), {s16})
520 .lowerFor({s32, s64, s128});
522 getActionDefinitionsBuilder(G_FREM)
523 .libcallFor({s32, s64})
524 .libcallFor(
ST.is64Bit(), {s128})
528 getActionDefinitionsBuilder(G_FCOPYSIGN)
529 .legalFor(
ST.hasStdExtF(), {{s32, s32}})
530 .legalFor(
ST.hasStdExtD(), {{s64, s64}, {s32, s64}, {s64, s32}})
531 .legalFor(
ST.hasStdExtZfh(), {{s16, s16}, {s16, s32}, {s32, s16}})
532 .legalFor(
ST.hasStdExtZfh() &&
ST.hasStdExtD(), {{s16, s64}, {s64, s16}})
536 getActionDefinitionsBuilder(G_FPTRUNC)
537 .legalFor(
ST.hasStdExtD(), {{s32, s64}})
538 .legalFor(
ST.hasStdExtZfh(), {{s16, s32}})
539 .legalFor(
ST.hasStdExtZfh() &&
ST.hasStdExtD(), {{s16, s64}})
540 .libcallFor({{s32, s64}})
541 .libcallFor(
ST.is64Bit(), {{s32, s128}, {s64, s128}});
542 getActionDefinitionsBuilder(G_FPEXT)
543 .legalFor(
ST.hasStdExtD(), {{s64, s32}})
544 .legalFor(
ST.hasStdExtZfh(), {{s32, s16}})
545 .legalFor(
ST.hasStdExtZfh() &&
ST.hasStdExtD(), {{s64, s16}})
546 .libcallFor({{s64, s32}})
547 .libcallFor(
ST.is64Bit(), {{s128, s32}, {s128, s64}});
549 getActionDefinitionsBuilder(G_FCMP)
550 .legalFor(
ST.hasStdExtF(), {{sXLen, s32}})
551 .legalFor(
ST.hasStdExtD(), {{sXLen, s64}})
552 .legalFor(
ST.hasStdExtZfh(), {{sXLen, s16}})
553 .clampScalar(0, sXLen, sXLen)
554 .libcallFor({{sXLen, s32}, {sXLen, s64}})
555 .libcallFor(
ST.is64Bit(), {{sXLen, s128}});
558 getActionDefinitionsBuilder(G_IS_FPCLASS)
559 .customFor(
ST.hasStdExtF(), {{s1, s32}})
560 .customFor(
ST.hasStdExtD(), {{s1, s64}})
561 .customFor(
ST.hasStdExtZfh(), {{s1, s16}})
562 .lowerFor({{s1, s32}, {s1, s64}});
564 getActionDefinitionsBuilder(G_FCONSTANT)
565 .legalFor(
ST.hasStdExtF(), {s32})
566 .legalFor(
ST.hasStdExtD(), {s64})
567 .legalFor(
ST.hasStdExtZfh(), {s16})
568 .lowerFor({s32, s64, s128});
570 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
571 .legalFor(
ST.hasStdExtF(), {{sXLen, s32}})
572 .legalFor(
ST.hasStdExtD(), {{sXLen, s64}})
573 .legalFor(
ST.hasStdExtZfh(), {{sXLen, s16}})
574 .customFor(
ST.is64Bit() &&
ST.hasStdExtF(), {{s32, s32}})
575 .customFor(
ST.is64Bit() &&
ST.hasStdExtD(), {{s32, s64}})
576 .customFor(
ST.is64Bit() &&
ST.hasStdExtZfh(), {{s32, s16}})
577 .widenScalarToNextPow2(0)
579 .libcallFor({{s32, s32}, {s64, s32}, {s32, s64}, {s64, s64}})
580 .libcallFor(
ST.is64Bit(), {{s32, s128}, {s64, s128}})
581 .libcallFor(
ST.is64Bit(), {{s128, s32}, {s128, s64}, {s128, s128}});
583 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
584 .legalFor(
ST.hasStdExtF(), {{s32, sXLen}})
585 .legalFor(
ST.hasStdExtD(), {{s64, sXLen}})
586 .legalFor(
ST.hasStdExtZfh(), {{s16, sXLen}})
587 .widenScalarToNextPow2(1)
591 return Query.
Types[0].isScalar() && Query.
Types[1].isScalar() &&
592 (Query.
Types[1].getSizeInBits() <
ST.getXLen()) &&
593 ((
ST.hasStdExtF() && Query.
Types[0].getSizeInBits() == 32) ||
594 (
ST.hasStdExtD() && Query.
Types[0].getSizeInBits() == 64) ||
595 (
ST.hasStdExtZfh() &&
596 Query.
Types[0].getSizeInBits() == 16));
601 .libcallFor({{s32, s32}, {s64, s32}, {s32, s64}, {s64, s64}})
602 .libcallFor(
ST.is64Bit(), {{s128, s32}, {s128, s64}})
603 .libcallFor(
ST.is64Bit(), {{s32, s128}, {s64, s128}, {s128, s128}});
606 getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR, G_FRINT, G_FNEARBYINT,
607 G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND,
608 G_INTRINSIC_ROUNDEVEN})
609 .legalFor(
ST.hasStdExtZfa(), {s32})
610 .legalFor(
ST.hasStdExtZfa() &&
ST.hasStdExtD(), {s64})
611 .legalFor(
ST.hasStdExtZfa() &&
ST.hasStdExtZfh(), {s16})
612 .libcallFor({s32, s64})
613 .libcallFor(
ST.is64Bit(), {s128});
615 getActionDefinitionsBuilder({G_FMAXIMUM, G_FMINIMUM})
616 .legalFor(
ST.hasStdExtZfa(), {s32})
617 .legalFor(
ST.hasStdExtZfa() &&
ST.hasStdExtD(), {s64})
618 .legalFor(
ST.hasStdExtZfa() &&
ST.hasStdExtZfh(), {s16});
620 getActionDefinitionsBuilder({G_FCOS, G_FSIN, G_FTAN, G_FPOW, G_FLOG, G_FLOG2,
621 G_FLOG10, G_FEXP, G_FEXP2, G_FEXP10, G_FACOS,
622 G_FASIN, G_FATAN, G_FATAN2, G_FCOSH, G_FSINH,
624 .libcallFor({s32, s64})
625 .libcallFor(
ST.is64Bit(), {s128});
626 getActionDefinitionsBuilder({G_FPOWI, G_FLDEXP})
627 .libcallFor({{s32, s32}, {s64, s32}})
628 .libcallFor(
ST.is64Bit(), {s128, s32});
630 getActionDefinitionsBuilder(G_VASTART).customFor({p0});
634 getActionDefinitionsBuilder(G_VAARG)
637 .clampScalar(0, sXLen, sXLen)
638 .lowerForCartesianProduct({sXLen, p0}, {p0});
640 getActionDefinitionsBuilder(G_VSCALE)
641 .clampScalar(0, sXLen, sXLen)
645 getActionDefinitionsBuilder(G_SPLAT_VECTOR)
657 if (
ST.hasVInstructionsF64() &&
ST.hasStdExtD())
658 SplatActions.legalIf(
all(
660 else if (
ST.hasVInstructionsI64())
661 SplatActions.customIf(
all(
665 SplatActions.clampScalar(1, sXLen, sXLen);
674 getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
683 Query.
Types[0].getElementCount().divideCoefficientBy(8), 8);
684 return std::pair(0, CastTy);
692 getActionDefinitionsBuilder(G_INSERT_SUBVECTOR)
698 getActionDefinitionsBuilder(G_ATOMICRMW_ADD)
699 .legalFor(
ST.hasStdExtA(), {{sXLen, p0}})
700 .libcallFor(!
ST.hasStdExtA(), {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})
701 .clampScalar(0, sXLen, sXLen);
703 getLegacyLegalizerInfo().computeTables();
710 switch (IntrinsicID) {
713 case Intrinsic::vacopy: {
724 LLT PtrTy =
MRI.getType(DstLst);
730 auto Tmp = MIRBuilder.
buildLoad(PtrTy,
MI.getOperand(2), *LoadMMO);
735 MIRBuilder.
buildStore(Tmp, DstLst, *StoreMMO);
737 MI.eraseFromParent();
740 case Intrinsic::riscv_masked_atomicrmw_add:
748 assert(
MI.getOpcode() == TargetOpcode::G_VASTART);
755 MIRBuilder.
buildStore(FINAddr,
MI.getOperand(0).getReg(),
756 *
MI.memoperands()[0]);
757 MI.eraseFromParent();
764 auto &MF = *
MI.getParent()->getParent();
769 LLT PtrTy =
MRI.getType(PtrReg);
770 Register IndexReg =
MI.getOperand(2).getReg();
771 LLT IndexTy =
MRI.getType(IndexReg);
777 IndexReg = MIRBuilder.
buildShl(IndexTy, IndexReg, ShiftAmt).
getReg(0);
794 STI.
is64Bit() ? TargetOpcode::G_SEXTLOAD : TargetOpcode::G_LOAD;
812 MI.eraseFromParent();
816bool RISCVLegalizerInfo::shouldBeInConstantPool(
const APInt &APImm,
817 bool ShouldOptForSize)
const {
837 if (ShouldOptForSize)
845 unsigned ShiftAmt, AddOpc;
866 uint64_t Val =
MI.getOperand(1).getCImm()->getZExtValue();
870 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
872 }
else if (
Log2 > 3) {
873 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
876 MIB.
buildInstr(RISCV::G_READ_VLENB, {Dst}, {});
878 }
else if ((Val % 8) == 0) {
881 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
884 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
888 MI.eraseFromParent();
899 unsigned Opc =
MI.getOpcode();
900 assert(
Opc == TargetOpcode::G_ZEXT ||
Opc == TargetOpcode::G_SEXT ||
901 Opc == TargetOpcode::G_ANYEXT);
907 LLT DstTy =
MRI.getType(Dst);
908 int64_t ExtTrueVal =
Opc == TargetOpcode::G_SEXT ? -1 : 1;
915 MI.eraseFromParent();
923 "Machine instructions must be Load/Store.");
930 LLT DataTy =
MRI.getType(DstReg);
934 if (!
MI.hasOneMemOperand())
942 if (TLI->allowsMemoryAccessForAlignment(Ctx,
DL, VT, *MMO))
946 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
947 "Unexpected unaligned RVV load type");
950 unsigned NumElements =
974 return MIB.
buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});
979static std::pair<MachineInstrBuilder, MachineInstrBuilder>
1001 return MIB.
buildInstr(RISCV::G_SPLAT_VECTOR_SPLIT_I64_VL, {Dst},
1002 {Passthru,
Lo,
Hi, VL});
1012 Unmerge.getReg(1), VL, MIB,
MRI);
1021 assert(
MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);
1026 Register SplatVal =
MI.getOperand(1).getReg();
1028 LLT VecTy =
MRI.getType(Dst);
1032 if (XLenTy.getSizeInBits() == 32 &&
1037 MI.eraseFromParent();
1045 MIB.
buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});
1046 MI.eraseFromParent();
1051 MIB.
buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});
1052 MI.eraseFromParent();
1060 auto ZExtSplatVal = MIB.
buildZExt(InterEltTy, SplatVal);
1067 MI.eraseFromParent();
1073 "Unexpected vector LLT");
1079bool RISCVLegalizerInfo::legalizeExtractSubvector(
MachineInstr &
MI,
1094 LLT LitTy =
MRI.getType(Dst);
1095 LLT BigTy =
MRI.getType(Src);
1104 auto BigZExt = MIB.
buildZExt(ExtBigTy, Src);
1109 MI.eraseFromParent();
1120 unsigned RemIdx = Decompose.second;
1137 LLT InterLitTy = BigTy;
1143 assert(Decompose.first != RISCV::NoSubRegister);
1154 auto SlidedownAmt = MIB.
buildVScale(XLenTy, RemIdx);
1158 RISCV::G_VSLIDEDOWN_VL, {InterLitTy},
1159 {MIB.
buildUndef(InterLitTy), Vec, SlidedownAmt, Mask, VL, Policy});
1165 MI.eraseFromParent();
1169bool RISCVLegalizerInfo::legalizeInsertSubvector(
MachineInstr &
MI,
1181 LLT BigTy =
MRI.getType(BigVec);
1182 LLT LitTy =
MRI.getType(LitVec);
1185 MRI.getVRegDef(BigVec)->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1195 if (BigTyMinElts >= 8 && LitTyMinElts >= 8)
1209 unsigned SubRegIdx, RemIdx;
1210 std::tie(SubRegIdx, RemIdx) =
1217 bool ExactlyVecRegSized =
1225 if (RemIdx == 0 && ExactlyVecRegSized)
1243 LLT InterLitTy = BigTy;
1245 unsigned AlignedIdx =
Idx - RemIdx;
1264 bool NeedInsertSubvec =
1267 NeedInsertSubvec ?
MRI.createGenericVirtualRegister(InterLitTy) : Dst;
1270 {AlignedExtract,
Insert, VL});
1272 auto SlideupAmt = MIB.
buildVScale(XLenTy, RemIdx);
1274 VL = MIB.
buildAdd(XLenTy, SlideupAmt, VL);
1284 MIB.
buildInstr(RISCV::G_VSLIDEUP_VL, {InsertedDst},
1285 {AlignedExtract,
Insert, SlideupAmt,
Mask, VL, Policy});
1290 if (NeedInsertSubvec)
1293 MI.eraseFromParent();
1301 case TargetOpcode::G_ASHR:
1302 return RISCV::G_SRAW;
1303 case TargetOpcode::G_LSHR:
1304 return RISCV::G_SRLW;
1305 case TargetOpcode::G_SHL:
1306 return RISCV::G_SLLW;
1307 case TargetOpcode::G_SDIV:
1308 return RISCV::G_DIVW;
1309 case TargetOpcode::G_UDIV:
1310 return RISCV::G_DIVUW;
1311 case TargetOpcode::G_UREM:
1312 return RISCV::G_REMUW;
1313 case TargetOpcode::G_ROTL:
1314 return RISCV::G_ROLW;
1315 case TargetOpcode::G_ROTR:
1316 return RISCV::G_RORW;
1317 case TargetOpcode::G_CTLZ:
1318 return RISCV::G_CLZW;
1319 case TargetOpcode::G_CTTZ:
1320 return RISCV::G_CTZW;
1321 case TargetOpcode::G_FPTOSI:
1322 return RISCV::G_FCVT_W_RV64;
1323 case TargetOpcode::G_FPTOUI:
1324 return RISCV::G_FCVT_WU_RV64;
1334 switch (
MI.getOpcode()) {
1338 case TargetOpcode::G_ABS:
1341 case TargetOpcode::G_CONSTANT: {
1345 bool ShouldOptForSize =
F.hasOptSize();
1347 if (!shouldBeInConstantPool(ConstVal->
getValue(), ShouldOptForSize))
1351 case TargetOpcode::G_SUB:
1352 case TargetOpcode::G_ADD: {
1357 Register DstALU =
MRI.createGenericVirtualRegister(sXLen);
1363 MIRBuilder.
buildInstr(TargetOpcode::G_TRUNC, {MO}, {DstSext});
1369 case TargetOpcode::G_SEXT_INREG: {
1370 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
1371 int64_t SizeInBits =
MI.getOperand(2).getImm();
1376 if (STI.hasStdExtZbb() && (SizeInBits == 8 || SizeInBits == 16))
1382 case TargetOpcode::G_ASHR:
1383 case TargetOpcode::G_LSHR:
1384 case TargetOpcode::G_SHL: {
1388 unsigned ExtOpc = TargetOpcode::G_ANYEXT;
1389 if (
MI.getOpcode() == TargetOpcode::G_ASHR)
1390 ExtOpc = TargetOpcode::G_SEXT;
1391 else if (
MI.getOpcode() == TargetOpcode::G_LSHR)
1392 ExtOpc = TargetOpcode::G_ZEXT;
1410 case TargetOpcode::G_SDIV:
1411 case TargetOpcode::G_UDIV:
1412 case TargetOpcode::G_UREM:
1413 case TargetOpcode::G_ROTL:
1414 case TargetOpcode::G_ROTR: {
1423 case TargetOpcode::G_CTLZ:
1424 case TargetOpcode::G_CTTZ: {
1432 case TargetOpcode::G_FPTOSI:
1433 case TargetOpcode::G_FPTOUI: {
1441 case TargetOpcode::G_IS_FPCLASS: {
1442 Register GISFPCLASS =
MI.getOperand(0).getReg();
1453 auto GFClass = MIB.
buildInstr(RISCV::G_FCLASS, {sXLen}, {Src});
1454 auto And = MIB.
buildAnd(sXLen, GFClass, FClassMask);
1457 MI.eraseFromParent();
1460 case TargetOpcode::G_BRJT:
1461 return legalizeBRJT(
MI, MIRBuilder);
1462 case TargetOpcode::G_VASTART:
1463 return legalizeVAStart(
MI, MIRBuilder);
1464 case TargetOpcode::G_VSCALE:
1465 return legalizeVScale(
MI, MIRBuilder);
1466 case TargetOpcode::G_ZEXT:
1467 case TargetOpcode::G_SEXT:
1468 case TargetOpcode::G_ANYEXT:
1469 return legalizeExt(
MI, MIRBuilder);
1470 case TargetOpcode::G_SPLAT_VECTOR:
1471 return legalizeSplatVector(
MI, MIRBuilder);
1472 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1473 return legalizeExtractSubvector(
MI, MIRBuilder);
1474 case TargetOpcode::G_INSERT_SUBVECTOR:
1475 return legalizeInsertSubvector(
MI, Helper, MIRBuilder);
1476 case TargetOpcode::G_LOAD:
1477 case TargetOpcode::G_STORE:
1478 return legalizeLoadStore(
MI, Helper, MIRBuilder);
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
static LLT getLMUL1Ty(LLT VecTy)
static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.
static std::pair< MachineInstrBuilder, MachineInstrBuilder > buildDefaultVLOps(LLT VecTy, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Gets the two common "VL" operands: an all-ones mask and the vector length.
static LegalityPredicate typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list< LLT > BoolVecTys, const RISCVSubtarget &ST)
static MachineInstrBuilder buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru, const SrcOp &Scalar, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static LegalityPredicate typeIsLegalIntOrFPVec(unsigned TypeIdx, std::initializer_list< LLT > IntOrFPVecTys, const RISCVSubtarget &ST)
static MachineInstrBuilder buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo, Register Hi, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static LLT getMaskTypeFor(LLT VecTy)
Return the type of the mask type suitable for masking the provided vector type.
static LegalityPredicate typeIsLegalPtrVec(unsigned TypeIdx, std::initializer_list< LLT > PtrVecTys, const RISCVSubtarget &ST)
static unsigned getRISCVWOpcode(unsigned Opcode)
This file declares the targeting of the Machinelegalizer class for RISC-V.
Class for arbitrary precision integers.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
unsigned getBitWidth() const
Return the number of bits in the APInt.
LLVM_ABI APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
int64_t getSExtValue() const
Get sign extended value.
This is the shared class of boolean and integer constants.
const APInt & getValue() const
Return the constant as an APInt value reference.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
static constexpr ElementCount getScalable(ScalarTy MinVal)
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
Represents a insert subvector.
Register getSubVec() const
Register getBigVec() const
uint64_t getIndexImm() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
This is an important class for using LLVM in a threaded context.
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
LLVM_ABI void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx, unsigned ExtOpcode)
Legalize a single operand OpIdx of the machine instruction MI as a Use by extending the operand's typ...
LLVM_ABI LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI)
LLVM_ABI LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by replacing the value type.
@ Legalized
Instruction has been legalized and the MachineFunction changed.
LLVM_ABI LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
LLVM_ABI LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy)
Legalize an instruction by performing the operation on a wider scalar type (for example a 16-bit addi...
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LLVM_ABI LegalizeResult lowerConstant(MachineInstr &MI)
LLVM_ABI void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx=0, unsigned TruncOpcode=TargetOpcode::G_TRUNC)
Legalize a single operand OpIdx of the machine instruction MI as a Def by extending the operand's typ...
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
Helper class to build MachineInstr.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildIntToPtr(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_INTTOPTR instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineFunction & getMF()
Getter for the function we currently build.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
LLVM_ABI unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
LLVM_ABI unsigned getEntryAlignment(const DataLayout &TD) const
getEntryAlignment - Return the alignment of each entry in the jump table.
JTEntryKind getEntryKind() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
static MachineOperand CreateImm(int64_t Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
RISCVLegalizerInfo(const RISCVSubtarget &ST)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getVarArgsFrameIndex() const
unsigned getRealMinVLen() const
Quantity expandVScale(Quantity X) const
If the ElementCount or TypeSize X is scalable and VScale (VLEN) is exactly known, returns X converted...
unsigned getMaxBuildIntsCost() const
bool useConstantPoolForLargeInts() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static RISCVVType::VLMUL getLMUL(MVT VT)
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
LLVM_ABI LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LLVM_ABI LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LLVM_ABI LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
@ TAIL_UNDISTURBED_MASK_UNDISTURBED
static constexpr unsigned RVVBitsPerBlock
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
unsigned Log2(Align A)
Returns the log2 of the alignment.
std::function< bool(const LegalityQuery &)> LegalityPredicate
This struct is a compact representation of a valid (non-zero power of two) alignment.
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getJumpTable(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a jump table entry.