LLVM 22.0.0git
CombinerHelper.h
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===--------------------------------------------------------------------===//
8/// \file
9/// This contains common combine transformations that may be used in a combine
10/// pass,or by the target elsewhere.
11/// Targets can pick individual opcode transformations from the helper or use
12/// tryCombine which invokes all transformations. All of the transformations
13/// return true if the MachineInstruction changed and false otherwise.
14///
15//===--------------------------------------------------------------------===//
16
17#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
18#define LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
19
20#include "llvm/ADT/DenseMap.h"
26#include "llvm/IR/InstrTypes.h"
27#include <functional>
28
29namespace llvm {
30
32class APInt;
33class ConstantFP;
34class GPtrAdd;
35class GZExtLoad;
39class MachineInstr;
40class MachineOperand;
43class LegalizerInfo;
44struct LegalityQuery;
45class RegisterBank;
47class TargetLowering;
49
51 LLT Ty; // The result type of the extend.
52 unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
54};
55
60 bool RematOffset = false; // True if Offset is a constant that needs to be
61 // rematerialized before the new load/store.
62 bool IsPre = false;
63};
64
66 int64_t Imm;
69 unsigned Flags;
70};
71
74 int64_t Imm;
75};
76
83
92
93using BuildFnTy = std::function<void(MachineIRBuilder &)>;
94
96 SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
98 unsigned Opcode = 0; /// The opcode for the produced instruction.
99 OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
103};
104
106 /// Describes instructions to be built during a combine.
110 std::initializer_list<InstructionBuildSteps> InstrsToBuild)
112};
113
115protected:
125
126public:
128 bool IsPreLegalize, GISelValueTracking *VT = nullptr,
129 MachineDominatorTree *MDT = nullptr,
130 const LegalizerInfo *LI = nullptr);
131
133
135 return Builder;
136 }
137
138 const TargetLowering &getTargetLowering() const;
139
140 const MachineFunction &getMachineFunction() const;
141
142 const DataLayout &getDataLayout() const;
143
144 LLVMContext &getContext() const;
145
146 /// \returns true if the combiner is running pre-legalization.
147 bool isPreLegalize() const;
148
149 /// \returns true if \p Query is legal on the target.
150 bool isLegal(const LegalityQuery &Query) const;
151
152 /// \return true if the combine is running prior to legalization, or if \p
153 /// Query is legal on the target.
154 bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
155
156 /// \return true if \p Query is legal on the target, or if \p Query will
157 /// perform WidenScalar action on the target.
158 bool isLegalOrHasWidenScalar(const LegalityQuery &Query) const;
159
160 /// \return true if the combine is running prior to legalization, or if \p Ty
161 /// is a legal integer constant type on the target.
162 bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const;
163
164 /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
165 void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
166
167 /// Replace a single register operand with a new register and inform the
168 /// observer of the changes.
170 Register ToReg) const;
171
172 /// Replace the opcode in instruction with a new opcode and inform the
173 /// observer of the changes.
174 void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const;
175
176 /// Get the register bank of \p Reg.
177 /// If Reg has not been assigned a register, a register class,
178 /// or a register bank, then this returns nullptr.
179 ///
180 /// \pre Reg.isValid()
181 const RegisterBank *getRegBank(Register Reg) const;
182
183 /// Set the register bank of \p Reg.
184 /// Does nothing if the RegBank is null.
185 /// This is the counterpart to getRegBank.
186 void setRegBank(Register Reg, const RegisterBank *RegBank) const;
187
188 /// If \p MI is COPY, try to combine it.
189 /// Returns true if MI changed.
190 bool tryCombineCopy(MachineInstr &MI) const;
191 bool matchCombineCopy(MachineInstr &MI) const;
192 void applyCombineCopy(MachineInstr &MI) const;
193
194 /// Returns true if \p DefMI precedes \p UseMI or they are the same
195 /// instruction. Both must be in the same basic block.
196 bool isPredecessor(const MachineInstr &DefMI,
197 const MachineInstr &UseMI) const;
198
199 /// Returns true if \p DefMI dominates \p UseMI. By definition an
200 /// instruction dominates itself.
201 ///
202 /// If we haven't been provided with a MachineDominatorTree during
203 /// construction, this function returns a conservative result that tracks just
204 /// a single basic block.
205 bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI) const;
206
207 /// If \p MI is extend that consumes the result of a load, try to combine it.
208 /// Returns true if MI changed.
211 PreferredTuple &MatchInfo) const;
213 PreferredTuple &MatchInfo) const;
214
215 /// Match (and (load x), mask) -> zextload x
217 BuildFnTy &MatchInfo) const;
218
219 /// Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed
220 /// load.
222 BuildFnTy &MatchInfo) const;
223
225 IndexedLoadStoreMatchInfo &MatchInfo) const;
227 IndexedLoadStoreMatchInfo &MatchInfo) const;
228
231
232 /// Match sext_inreg(load p), imm -> sextload p
234 std::tuple<Register, unsigned> &MatchInfo) const;
236 std::tuple<Register, unsigned> &MatchInfo) const;
237
238 /// Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM
239 /// when their source operands are identical.
240 bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const;
241 void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const;
242
243 /// If a brcond's true block is not the fallthrough, make it so by inverting
244 /// the condition and swapping operands.
246 MachineInstr *&BrCond) const;
248 MachineInstr *&BrCond) const;
249
250 /// If \p MI is G_CONCAT_VECTORS, try to combine it.
251 /// Returns true if MI changed.
252 /// Right now, we support:
253 /// - concat_vector(undef, undef) => undef
254 /// - concat_vector(build_vector(A, B), build_vector(C, D)) =>
255 /// build_vector(A, B, C, D)
256 /// ==========================================================
257 /// Check if the G_CONCAT_VECTORS \p MI is undef or if it
258 /// can be flattened into a build_vector.
259 /// In the first case \p Ops will be empty
260 /// In the second case \p Ops will contain the operands
261 /// needed to produce the flattened build_vector.
262 ///
263 /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
266 /// Replace \p MI with a flattened build_vector with \p Ops
267 /// or an implicit_def if \p Ops is empty.
270
273 /// Replace \p MI with a flattened build_vector with \p Ops
274 /// or an implicit_def if \p Ops is empty.
277
278 /// Replace \p MI with a build_vector.
281
282 /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
283 /// Returns true if MI changed.
284 ///
285 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
287 /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
288 /// concat_vectors.
289 /// \p Ops will contain the operands needed to produce the flattened
290 /// concat_vectors.
291 ///
292 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
295 /// Replace \p MI with a concat_vectors with \p Ops.
297 const ArrayRef<Register> Ops) const;
300
301 /// Optimize memcpy intrinsics et al, e.g. constant len calls.
302 /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
303 ///
304 /// For example (pre-indexed):
305 ///
306 /// $addr = G_PTR_ADD $base, $offset
307 /// [...]
308 /// $val = G_LOAD $addr
309 /// [...]
310 /// $whatever = COPY $addr
311 ///
312 /// -->
313 ///
314 /// $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre)
315 /// [...]
316 /// $whatever = COPY $addr
317 ///
318 /// or (post-indexed):
319 ///
320 /// G_STORE $val, $base
321 /// [...]
322 /// $addr = G_PTR_ADD $base, $offset
323 /// [...]
324 /// $whatever = COPY $addr
325 ///
326 /// -->
327 ///
328 /// $addr = G_INDEXED_STORE $val, $base, $offset
329 /// [...]
330 /// $whatever = COPY $addr
331 bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0) const;
332
333 bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const;
334 void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const;
335
336 /// Fold (shift (shift base, x), y) -> (shift base (x+y))
337 bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const;
338 void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const;
339
340 /// If we have a shift-by-constant of a bitwise logic op that itself has a
341 /// shift-by-constant operand with identical opcode, we may be able to convert
342 /// that into 2 independent shifts followed by the logic op.
344 ShiftOfShiftedLogic &MatchInfo) const;
346 ShiftOfShiftedLogic &MatchInfo) const;
347
348 bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) const;
349
350 /// Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (shift x, (C1 + C2))
352 MachineInstr &ShiftMI) const;
354 LshrOfTruncOfLshr &MatchInfo) const;
355
356 /// Transform a multiply by a power-of-2 value to a left shift.
357 bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
358 void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
359
360 // Transform a G_SUB with constant on the RHS to G_ADD.
361 bool matchCombineSubToAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
362
363 // Transform a G_SHL with an extended source into a narrower shift if
364 // possible.
366 RegisterImmPair &MatchData) const;
368 const RegisterImmPair &MatchData) const;
369
370 /// Fold away a merge of an unmerge of the corresponding values.
371 bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo) const;
372
373 /// Reduce a shift by a constant to an unmerge and a shift on a half sized
374 /// type. This will not produce a shift smaller than \p TargetShiftSize.
375 bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
376 unsigned &ShiftVal) const;
378 const unsigned &ShiftVal) const;
380 unsigned TargetShiftAmount) const;
381
382 /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
387
388 /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
390 SmallVectorImpl<APInt> &Csts) const;
392 SmallVectorImpl<APInt> &Csts) const;
393
394 /// Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
397 std::function<void(MachineIRBuilder &)> &MatchInfo) const;
398
399 /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
402
403 /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
406
407 /// Transform fp_instr(cst) to constant result of the fp operation.
409 const ConstantFP *Cst) const;
410
411 /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
414
415 /// Transform PtrToInt(IntToPtr(x)) to x.
417
418 /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
419 /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
420 bool
422 std::pair<Register, bool> &PtrRegAndCommute) const;
423 void
425 std::pair<Register, bool> &PtrRegAndCommute) const;
426
427 // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
430
431 /// Transform anyext(trunc(x)) to x.
433
434 /// Transform zext(trunc(x)) to x.
436
437 /// Transform trunc (shl x, K) to shl (trunc x), K
438 /// if K < VT.getScalarSizeInBits().
439 ///
440 /// Transforms trunc ([al]shr x, K) to (trunc ([al]shr (MidVT (trunc x)), K))
441 /// if K <= (MidVT.getScalarSizeInBits() - VT.getScalarSizeInBits())
442 /// MidVT is obtained by finding a legal type between the trunc's src and dst
443 /// types.
444 bool
446 std::pair<MachineInstr *, LLT> &MatchInfo) const;
447 void
449 std::pair<MachineInstr *, LLT> &MatchInfo) const;
450
451 /// Return true if any explicit use operand on \p MI is defined by a
452 /// G_IMPLICIT_DEF.
454
455 /// Return true if all register explicit use operands on \p MI are defined by
456 /// a G_IMPLICIT_DEF.
458
459 /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
461
462 /// Return true if a G_STORE instruction \p MI is storing an undef value.
463 bool matchUndefStore(MachineInstr &MI) const;
464
465 /// Return true if a G_SELECT instruction \p MI has an undef comparison.
467
468 /// Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
470
471 /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
472 /// true, \p OpIdx will store the operand index of the known selected value.
473 bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) const;
474
475 /// Replace an instruction with a G_FCONSTANT with value \p C.
476 void replaceInstWithFConstant(MachineInstr &MI, double C) const;
477
478 /// Replace an instruction with an G_FCONSTANT with value \p CFP.
480
481 /// Replace an instruction with a G_CONSTANT with value \p C.
482 void replaceInstWithConstant(MachineInstr &MI, int64_t C) const;
483
484 /// Replace an instruction with a G_CONSTANT with value \p C.
486
487 /// Replace an instruction with a G_IMPLICIT_DEF.
489
490 /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
492
493 /// Delete \p MI and replace all of its uses with \p Replacement.
495 Register Replacement) const;
496
497 /// @brief Replaces the shift amount in \p MI with ShiftAmt % BW
498 /// @param MI
500
501 /// Return true if \p MOP1 and \p MOP2 are register operands are defined by
502 /// equivalent instructions.
503 bool matchEqualDefs(const MachineOperand &MOP1,
504 const MachineOperand &MOP2) const;
505
506 /// Return true if \p MOP is defined by a G_CONSTANT or splat with a value equal to
507 /// \p C.
508 bool matchConstantOp(const MachineOperand &MOP, int64_t C) const;
509
510 /// Return true if \p MOP is defined by a G_FCONSTANT or splat with a value exactly
511 /// equal to \p C.
512 bool matchConstantFPOp(const MachineOperand &MOP, double C) const;
513
514 /// @brief Checks if constant at \p ConstIdx is larger than \p MI 's bitwidth
515 /// @param ConstIdx Index of the constant
516 bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx) const;
517
518 /// Optimize (cond ? x : x) -> x
520
521 /// Optimize (x op x) -> x
522 bool matchBinOpSameVal(MachineInstr &MI) const;
523
524 /// Check if operand \p OpIdx is zero.
525 bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) const;
526
527 /// Check if operand \p OpIdx is undef.
528 bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const;
529
530 /// Check if operand \p OpIdx is known to be a power of 2.
532 unsigned OpIdx) const;
533
534 /// Erase \p MI
535 void eraseInst(MachineInstr &MI) const;
536
537 /// Return true if MI is a G_ADD which can be simplified to a G_SUB.
539 std::tuple<Register, Register> &MatchInfo) const;
541 std::tuple<Register, Register> &MatchInfo) const;
542
543 /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
545 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const;
546
547 /// Replace \p MI with a series of instructions described in \p MatchInfo.
549 InstructionStepsMatchInfo &MatchInfo) const;
550
551 /// Match ashr (shl x, C), C -> sext_inreg (C)
553 std::tuple<Register, int64_t> &MatchInfo) const;
555 std::tuple<Register, int64_t> &MatchInfo) const;
556
557 /// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
558 bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
559
560 /// \return true if \p MI is a G_AND instruction whose operands are x and y
561 /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
562 ///
563 /// \param [in] MI - The G_AND instruction.
564 /// \param [out] Replacement - A register the G_AND should be replaced with on
565 /// success.
566 bool matchRedundantAnd(MachineInstr &MI, Register &Replacement) const;
567
568 /// \return true if \p MI is a G_OR instruction whose operands are x and y
569 /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
570 /// value.)
571 ///
572 /// \param [in] MI - The G_OR instruction.
573 /// \param [out] Replacement - A register the G_OR should be replaced with on
574 /// success.
575 bool matchRedundantOr(MachineInstr &MI, Register &Replacement) const;
576
577 /// \return true if \p MI is a G_SEXT_INREG that can be erased.
579
580 /// Combine inverting a result of a compare into the opposite cond code.
582 SmallVectorImpl<Register> &RegsToNegate) const;
584 SmallVectorImpl<Register> &RegsToNegate) const;
585
586 /// Fold (xor (and x, y), y) -> (and (not x), y)
587 ///{
589 std::pair<Register, Register> &MatchInfo) const;
591 std::pair<Register, Register> &MatchInfo) const;
592 ///}
593
594 /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
595 bool matchPtrAddZero(MachineInstr &MI) const;
596 void applyPtrAddZero(MachineInstr &MI) const;
597
598 /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
600
601 /// Push a binary operator through a select on constants.
602 ///
603 /// binop (select cond, K0, K1), K2 ->
604 /// select cond, (binop K0, K2), (binop K1, K2)
605 bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo) const;
607 const unsigned &SelectOpNo) const;
608
610 SmallVectorImpl<Register> &MatchInfo) const;
611
613 SmallVectorImpl<Register> &MatchInfo) const;
614
615 /// Match expression trees of the form
616 ///
617 /// \code
618 /// sN *a = ...
619 /// sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
620 /// \endcode
621 ///
622 /// And check if the tree can be replaced with a M-bit load + possibly a
623 /// bswap.
624 bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo) const;
625
628
631
634 SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo) const;
637 SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo) const;
638
639 /// Use a function which takes in a MachineIRBuilder to perform a combine.
640 /// By default, it erases the instruction \p MI from the function.
641 void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo) const;
642 /// Use a function which takes in a MachineIRBuilder to perform a combine.
643 /// This variant does not erase \p MI after calling the build function.
644 void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo) const;
645
646 bool matchOrShiftToFunnelShift(MachineInstr &MI, BuildFnTy &MatchInfo) const;
651
652 bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const;
653 void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const;
654
655 /// \returns true if a G_ICMP instruction \p MI can be replaced with a true
656 /// or false constant based off of KnownBits information.
658 int64_t &MatchInfo) const;
659
660 /// \returns true if a G_ICMP \p MI can be replaced with its LHS based off of
661 /// KnownBits information.
662 bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo) const;
663
664 /// \returns true if (and (or x, c1), c2) can be replaced with (and x, c2)
665 bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const;
666
668 BuildFnTy &MatchInfo) const;
669 /// Match: and (lshr x, cst), mask -> ubfx x, cst, width
671 BuildFnTy &MatchInfo) const;
672
673 /// Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width
675 BuildFnTy &MatchInfo) const;
676
677 /// Match: shr (and x, n), k -> ubfx x, pos, width
679 BuildFnTy &MatchInfo) const;
680
681 // Helpers for reassociation:
683 BuildFnTy &MatchInfo) const;
686 BuildFnTy &MatchInfo) const;
689 BuildFnTy &MatchInfo) const;
690 /// Reassociate pointer calculations with G_ADD involved, to allow better
691 /// addressing mode usage.
692 bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
693
694 /// Try to reassociate to reassociate operands of a commutative binop.
695 bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0,
696 Register Op1, BuildFnTy &MatchInfo) const;
697 /// Reassociate commutative binary operations like G_ADD.
698 bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo) const;
699
700 /// Do constant folding when opportunities are exposed after MIR building.
701 bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo) const;
702
703 /// Do constant folding when opportunities are exposed after MIR building.
704 bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo) const;
705
706 /// Do constant FP folding when opportunities are exposed after MIR building.
707 bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo) const;
708
709 /// Constant fold G_FMA/G_FMAD.
710 bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo) const;
711
712 /// \returns true if it is possible to narrow the width of a scalar binop
713 /// feeding a G_AND instruction \p MI.
714 bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
715
716 /// Given an G_UDIV \p MI or G_UREM \p MI expressing a divide by constant,
717 /// return an expression that implements it by multiplying by a magic number.
718 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
720 /// Combine G_UDIV or G_UREM by constant into a multiply by magic constant.
723
724 /// Given an G_SDIV \p MI or G_SREM \p MI expressing a signed divide by
725 /// constant, return an expression that implements it by multiplying by a
726 /// magic number. Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's
727 /// Guide".
729 /// Combine G_SDIV or G_SREM by constant into a multiply by magic constant.
732
733 /// Given an G_SDIV \p MI expressing a signed divided by a pow2 constant,
734 /// return expressions that implements it by shifting.
735 bool matchDivByPow2(MachineInstr &MI, bool IsSigned) const;
736 void applySDivByPow2(MachineInstr &MI) const;
737 /// Given an G_UDIV \p MI expressing an unsigned divided by a pow2 constant,
738 /// return expressions that implements it by shifting.
739 void applyUDivByPow2(MachineInstr &MI) const;
740
741 // G_UMULH x, (1 << c)) -> x >> (bitwidth - c)
742 bool matchUMulHToLShr(MachineInstr &MI) const;
743 void applyUMulHToLShr(MachineInstr &MI) const;
744
745 // Combine trunc(smin(smax(x, C1), C2)) -> truncssat_s(x)
746 // or trunc(smax(smin(x, C2), C1)) -> truncssat_s(x).
747 bool matchTruncSSatS(MachineInstr &MI, Register &MatchInfo) const;
748 void applyTruncSSatS(MachineInstr &MI, Register &MatchInfo) const;
749
750 // Combine trunc(smin(smax(x, 0), C)) -> truncssat_u(x)
751 // or trunc(smax(smin(x, C), 0)) -> truncssat_u(x)
752 // or trunc(umin(smax(x, 0), C)) -> truncssat_u(x)
753 bool matchTruncSSatU(MachineInstr &MI, Register &MatchInfo) const;
754 void applyTruncSSatU(MachineInstr &MI, Register &MatchInfo) const;
755
756 // Combine trunc(umin(x, C)) -> truncusat_u(x).
757 bool matchTruncUSatU(MachineInstr &MI, MachineInstr &MinMI) const;
758
759 // Combine truncusat_u(fptoui(x)) -> fptoui_sat(x)
761
762 /// Try to transform \p MI by using all of the above
763 /// combine functions. Returns true if changed.
765
766 /// Emit loads and stores that perform the given memcpy.
767 /// Assumes \p MI is a G_MEMCPY_INLINE
768 /// TODO: implement dynamically sized inline memcpy,
769 /// and rename: s/bool tryEmit/void emit/
771
772 /// Match:
773 /// (G_UMULO x, 2) -> (G_UADDO x, x)
774 /// (G_SMULO x, 2) -> (G_SADDO x, x)
775 bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) const;
776
777 /// Match:
778 /// (G_*MULO x, 0) -> 0 + no carry out
779 bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) const;
780
781 /// Match:
782 /// (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
783 /// (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
784 bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) const;
785
786 /// Transform (fadd x, fneg(y)) -> (fsub x, y)
787 /// (fadd fneg(x), y) -> (fsub y, x)
788 /// (fsub x, fneg(y)) -> (fadd x, y)
789 /// (fmul fneg(x), fneg(y)) -> (fmul x, y)
790 /// (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
791 /// (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
792 /// (fma fneg(x), fneg(y), z) -> (fma x, y, z)
793 bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo) const;
794
795 bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) const;
796 void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) const;
797
798 bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally,
799 bool &HasFMAD, bool &Aggressive,
800 bool CanReassociate = false) const;
801
802 /// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
803 /// (fadd (fmul x, y), z) -> (fmad x, y, z)
805 BuildFnTy &MatchInfo) const;
806
807 /// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
808 /// (fadd (fpext (fmul x, y)), z) -> (fmad (fpext x), (fpext y), z)
810 BuildFnTy &MatchInfo) const;
811
812 /// Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
813 /// (fadd (fmad x, y, (fmul u, v)), z) -> (fmad x, y, (fmad u, v, z))
815 BuildFnTy &MatchInfo) const;
816
817 // Transform (fadd (fma x, y, (fpext (fmul u, v))), z)
818 // -> (fma x, y, (fma (fpext u), (fpext v), z))
819 // (fadd (fmad x, y, (fpext (fmul u, v))), z)
820 // -> (fmad x, y, (fmad (fpext u), (fpext v), z))
821 bool
823 BuildFnTy &MatchInfo) const;
824
825 /// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
826 /// (fsub (fmul x, y), z) -> (fmad x, y, -z)
828 BuildFnTy &MatchInfo) const;
829
830 /// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
831 /// (fsub (fneg (fmul, x, y)), z) -> (fmad (fneg x), y, (fneg z))
833 BuildFnTy &MatchInfo) const;
834
835 /// Transform (fsub (fpext (fmul x, y)), z)
836 /// -> (fma (fpext x), (fpext y), (fneg z))
837 /// (fsub (fpext (fmul x, y)), z)
838 /// -> (fmad (fpext x), (fpext y), (fneg z))
840 BuildFnTy &MatchInfo) const;
841
842 /// Transform (fsub (fpext (fneg (fmul x, y))), z)
843 /// -> (fneg (fma (fpext x), (fpext y), z))
844 /// (fsub (fpext (fneg (fmul x, y))), z)
845 /// -> (fneg (fmad (fpext x), (fpext y), z))
847 BuildFnTy &MatchInfo) const;
848
849 bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info) const;
850
851 /// Transform G_ADD(x, G_SUB(y, x)) to y.
852 /// Transform G_ADD(G_SUB(y, x), x) to y.
853 bool matchAddSubSameReg(MachineInstr &MI, Register &Src) const;
854
856 Register &MatchInfo) const;
857 bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const;
859 Register &MatchInfo) const;
860
861 /// Transform:
862 /// (x + y) - y -> x
863 /// (x + y) - x -> y
864 /// x - (y + x) -> 0 - y
865 /// x - (x + z) -> 0 - z
866 bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo) const;
867
868 /// \returns true if it is possible to simplify a select instruction \p MI
869 /// to a min/max instruction of some sort.
871 BuildFnTy &MatchInfo) const;
872
873 /// Transform:
874 /// (X + Y) == X -> Y == 0
875 /// (X - Y) == X -> Y == 0
876 /// (X ^ Y) == X -> Y == 0
877 /// (X + Y) != X -> Y != 0
878 /// (X - Y) != X -> Y != 0
879 /// (X ^ Y) != X -> Y != 0
881 BuildFnTy &MatchInfo) const;
882
883 /// Match shifts greater or equal to the range (the bitwidth of the result
884 /// datatype, or the effective bitwidth of the source value).
886 std::optional<int64_t> &MatchInfo) const;
887
888 /// Match constant LHS ops that should be commuted.
890
891 /// Combine sext of trunc.
892 bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
893
894 /// Combine zext of trunc.
895 bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
896
897 /// Combine zext nneg to sext.
898 bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
899
900 /// Match constant LHS FP ops that should be commuted.
902
903 // Given a binop \p MI, commute operands 1 and 2.
905
906 /// Combine select to integer min/max.
907 bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
908
909 /// Tranform (neg (min/max x, (neg x))) into (max/min x, (neg x)).
910 bool matchSimplifyNegMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const;
911
912 /// Combine selects.
913 bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) const;
914
915 /// Combine ands.
916 bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
917
918 /// Combine ors.
919 bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) const;
920
921 /// trunc (binop X, C) --> binop (trunc X, trunc C).
922 bool matchNarrowBinop(const MachineInstr &TruncMI,
923 const MachineInstr &BinopMI,
924 BuildFnTy &MatchInfo) const;
925
926 bool matchCastOfInteger(const MachineInstr &CastMI, APInt &MatchInfo) const;
927
928 /// Combine addos.
929 bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) const;
930
931 /// Combine extract vector element.
932 bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo) const;
933
934 /// Combine extract vector element with a build vector on the vector register.
936 const MachineInstr &MI2,
937 BuildFnTy &MatchInfo) const;
938
939 /// Combine extract vector element with a build vector trunc on the vector
940 /// register.
941 bool
943 BuildFnTy &MatchInfo) const;
944
945 /// Combine extract vector element with a shuffle vector on the vector
946 /// register.
948 const MachineInstr &MI2,
949 BuildFnTy &MatchInfo) const;
950
951 /// Combine extract vector element with a insert vector element on the vector
952 /// register and different indices.
953 bool
955 BuildFnTy &MatchInfo) const;
956
957 /// Remove references to rhs if it is undef
958 bool matchShuffleUndefRHS(MachineInstr &MI, BuildFnTy &MatchInfo) const;
959
960 /// Turn shuffle a, b, mask -> shuffle undef, b, mask iff mask does not
961 /// reference a.
962 bool matchShuffleDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const;
963
964 /// Use a function which takes in a MachineIRBuilder to perform a combine.
965 /// By default, it erases the instruction def'd on \p MO from the function.
966 void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
967
968 /// Match FPOWI if it's safe to extend it into a series of multiplications.
969 bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent) const;
970
971 /// Expands FPOWI into a series of multiplications and a division if the
972 /// exponent is negative.
973 void applyExpandFPowI(MachineInstr &MI, int64_t Exponent) const;
974
975 /// Combine insert vector element OOB.
977 BuildFnTy &MatchInfo) const;
978
980 BuildFnTy &MatchInfo) const;
981
982 bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
983
984 bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
985
986 bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
987
988 bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
989
990 /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
991 bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI,
992 BuildFnTy &MatchInfo) const;
993
994 bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI,
995 BuildFnTy &MatchInfo) const;
997 BuildFnTy &MatchInfo) const;
998
1000 BuildFnTy &MatchInfo) const;
1001
1003 BuildFnTy &MatchInfo) const;
1004
1006 BuildFnTy &MatchInfo) const;
1007
1008 // fold ((A-C1)+C2) -> (A+(C2-C1))
1010 BuildFnTy &MatchInfo) const;
1011
1012 bool matchExtOfExt(const MachineInstr &FirstMI, const MachineInstr &SecondMI,
1013 BuildFnTy &MatchInfo) const;
1014
1015 bool matchCastOfBuildVector(const MachineInstr &CastMI,
1016 const MachineInstr &BVMI,
1017 BuildFnTy &MatchInfo) const;
1018
1020 BuildFnTy &MatchInfo) const;
1022 BuildFnTy &MatchInfo) const;
1023
1024 // unmerge_values(anyext(build vector)) -> build vector(anyext)
1026 BuildFnTy &MatchInfo) const;
1027
1028 // merge_values(_, undef) -> anyext
1029 bool matchMergeXAndUndef(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
1030
1031 // merge_values(_, zero) -> zext
1032 bool matchMergeXAndZero(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
1033
1034 // overflow sub
1035 bool matchSuboCarryOut(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
1036
1037 // (sext_inreg (sext_inreg x, K0), K1)
1039 BuildFnTy &MatchInfo) const;
1040
1041private:
1042 /// Checks for legality of an indexed variant of \p LdSt.
1043 bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
1044 /// Given a non-indexed load or store instruction \p MI, find an offset that
1045 /// can be usefully and legally folded into it as a post-indexing operation.
1046 ///
1047 /// \returns true if a candidate is found.
1048 bool findPostIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
1049 Register &Offset, bool &RematOffset) const;
1050
1051 /// Given a non-indexed load or store instruction \p MI, find an offset that
1052 /// can be usefully and legally folded into it as a pre-indexing operation.
1053 ///
1054 /// \returns true if a candidate is found.
1055 bool findPreIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
1056 Register &Offset) const;
1057
1058 /// Helper function for matchLoadOrCombine. Searches for Registers
1059 /// which may have been produced by a load instruction + some arithmetic.
1060 ///
1061 /// \param [in] Root - The search root.
1062 ///
1063 /// \returns The Registers found during the search.
1064 std::optional<SmallVector<Register, 8>>
1065 findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
1066
1067 /// Helper function for matchLoadOrCombine.
1068 ///
1069 /// Checks if every register in \p RegsToVisit is defined by a load
1070 /// instruction + some arithmetic.
1071 ///
1072 /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
1073 /// at to the index of the load.
1074 /// \param [in] MemSizeInBits - The number of bits each load should produce.
1075 ///
1076 /// \returns On success, a 3-tuple containing lowest-index load found, the
1077 /// lowest index, and the last load in the sequence.
1078 std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
1079 findLoadOffsetsForLoadOrCombine(
1081 const SmallVector<Register, 8> &RegsToVisit,
1082 const unsigned MemSizeInBits) const;
1083
1084 /// Examines the G_PTR_ADD instruction \p PtrAdd and determines if performing
1085 /// a re-association of its operands would break an existing legal addressing
1086 /// mode that the address computation currently represents.
1087 bool reassociationCanBreakAddressingModePattern(MachineInstr &PtrAdd) const;
1088
1089 /// Behavior when a floating point min/max is given one NaN and one
1090 /// non-NaN as input.
1091 enum class SelectPatternNaNBehaviour {
1092 NOT_APPLICABLE = 0, /// NaN behavior not applicable.
1093 RETURNS_NAN, /// Given one NaN input, returns the NaN.
1094 RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
1095 RETURNS_ANY /// Given one NaN input, can return either (or both operands are
1096 /// known non-NaN.)
1097 };
1098
1099 /// \returns which of \p LHS and \p RHS would be the result of a non-equality
1100 /// floating point comparison where one of \p LHS and \p RHS may be NaN.
1101 ///
1102 /// If both \p LHS and \p RHS may be NaN, returns
1103 /// SelectPatternNaNBehaviour::NOT_APPLICABLE.
1104 SelectPatternNaNBehaviour
1105 computeRetValAgainstNaN(Register LHS, Register RHS,
1106 bool IsOrderedComparison) const;
1107
1108 /// Determines the floating point min/max opcode which should be used for
1109 /// a G_SELECT fed by a G_FCMP with predicate \p Pred.
1110 ///
1111 /// \returns 0 if this G_SELECT should not be combined to a floating point
1112 /// min or max. If it should be combined, returns one of
1113 ///
1114 /// * G_FMAXNUM
1115 /// * G_FMAXIMUM
1116 /// * G_FMINNUM
1117 /// * G_FMINIMUM
1118 ///
1119 /// Helper function for matchFPSelectToMinMax.
1120 unsigned getFPMinMaxOpcForSelect(CmpInst::Predicate Pred, LLT DstTy,
1121 SelectPatternNaNBehaviour VsNaNRetVal) const;
1122
1123 /// Handle floating point cases for matchSimplifySelectToMinMax.
1124 ///
1125 /// E.g.
1126 ///
1127 /// select (fcmp uge x, 1.0) x, 1.0 -> fmax x, 1.0
1128 /// select (fcmp uge x, 1.0) 1.0, x -> fminnm x, 1.0
1129 bool matchFPSelectToMinMax(Register Dst, Register Cond, Register TrueVal,
1130 Register FalseVal, BuildFnTy &MatchInfo) const;
1131
1132 /// Try to fold selects to logical operations.
1133 bool tryFoldBoolSelectToLogic(GSelect *Select, BuildFnTy &MatchInfo) const;
1134
1135 bool tryFoldSelectOfConstants(GSelect *Select, BuildFnTy &MatchInfo) const;
1136
1137 bool isOneOrOneSplat(Register Src, bool AllowUndefs) const;
1138 bool isZeroOrZeroSplat(Register Src, bool AllowUndefs) const;
1139 bool isConstantSplatVector(Register Src, int64_t SplatValue,
1140 bool AllowUndefs) const;
1141 bool isConstantOrConstantVectorI(Register Src) const;
1142
1143 std::optional<APInt> getConstantOrConstantSplatVector(Register Src) const;
1144
1145 /// Fold (icmp Pred1 V1, C1) && (icmp Pred2 V2, C2)
1146 /// or (icmp Pred1 V1, C1) || (icmp Pred2 V2, C2)
1147 /// into a single comparison using range-based reasoning.
1148 bool tryFoldAndOrOrICmpsUsingRanges(GLogicalBinOp *Logic,
1149 BuildFnTy &MatchInfo) const;
1150
1151 // Simplify (cmp cc0 x, y) (&& or ||) (cmp cc1 x, y) -> cmp cc2 x, y.
1152 bool tryFoldLogicOfFCmps(GLogicalBinOp *Logic, BuildFnTy &MatchInfo) const;
1153
1154 bool isCastFree(unsigned Opcode, LLT ToTy, LLT FromTy) const;
1155
1156 bool constantFoldICmp(const GICmp &ICmp, const GIConstant &LHSCst,
1157 const GIConstant &RHSCst, BuildFnTy &MatchInfo) const;
1158 bool constantFoldFCmp(const GFCmp &FCmp, const GFConstant &LHSCst,
1159 const GFConstant &RHSCst, BuildFnTy &MatchInfo) const;
1160};
1161} // namespace llvm
1162
1163#endif
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
AMDGPU Register Bank Select
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file defines the DenseMap class.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Implement a low-level type suitable for MachineInstr level instruction selection.
mir Rename Register Operands
Register Reg
MachineInstr unsigned OpIdx
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo) const
bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchFoldC2MinusAPlusC1(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match expression trees of the form.
bool tryCombine(MachineInstr &MI) const
Try to transform MI by using all of the above combine functions.
const RegisterBank * getRegBank(Register Reg) const
Get the register bank of Reg.
void applyPtrAddZero(MachineInstr &MI) const
bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2) const
Return true if MOP1 and MOP2 are register operands are defined by equivalent instructions.
void applyUDivOrURemByConst(MachineInstr &MI) const
bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo) const
Do constant folding when opportunities are exposed after MIR building.
void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) const
bool matchUnmergeValuesAnyExtBuildVector(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchSelectSameVal(MachineInstr &MI) const
Optimize (cond ? x : x) -> x.
bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_*ADDE x, y, 0) -> (G_*ADDO x, y) (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
bool matchShuffleToExtract(MachineInstr &MI) const
bool matchReassocConstantInnerRHS(GPtrAdd &MI, MachineInstr *RHS, BuildFnTy &MatchInfo) const
bool matchBitfieldExtractFromShr(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width.
bool matchFoldAMinusC1PlusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchTruncSSatU(MachineInstr &MI, Register &MatchInfo) const
void applySimplifyURemByPow2(MachineInstr &MI) const
Combine G_UREM x, (known power of 2) to an add and bitmasking.
bool matchCombineUnmergeZExtToZExt(MachineInstr &MI) const
Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0.
bool matchPtrAddZero(MachineInstr &MI) const
}
void applyCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops) const
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
void applyXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo) const
bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally, bool &HasFMAD, bool &Aggressive, bool CanReassociate=false) const
bool matchFoldAPlusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg) const
void applyCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts) const
bool matchShiftsTooBig(MachineInstr &MI, std::optional< int64_t > &MatchInfo) const
Match shifts greater or equal to the range (the bitwidth of the result datatype, or the effective bit...
bool matchCombineFAddFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) (fadd (fpext (fmul x,...
bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const
void applyCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops) const
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
void replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement) const
Delete MI and replace all of its uses with Replacement.
void applyCombineShuffleToBuildVector(MachineInstr &MI) const
bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine zext of trunc.
bool matchCombineExtractedVectorLoad(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed load.
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const
MachineRegisterInfo::replaceRegWith() and inform the observer of the changes.
void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, Register ToReg) const
Replace a single register operand with a new register and inform the observer of the changes.
bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo) const
Reassociate commutative binary operations like G_ADD.
bool matchExtractVectorElementWithBuildVectorTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine extract vector element with a build vector trunc on the vector register.
void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCommuteConstantToRHS(MachineInstr &MI) const
Match constant LHS ops that should be commuted.
const DataLayout & getDataLayout() const
bool matchBinOpSameVal(MachineInstr &MI) const
Optimize (x op x) -> x.
bool matchSimplifyNegMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const
Tranform (neg (min/max x, (neg x))) into (max/min x, (neg x)).
bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const
Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM when their source operands are iden...
bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine zext nneg to sext.
void applyUMulHToLShr(MachineInstr &MI) const
void applyNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate) const
bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const
Fold (shift (shift base, x), y) -> (shift base (x+y))
void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) const
bool matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const
bool matchAllExplicitUsesAreUndef(MachineInstr &MI) const
Return true if all register explicit use operands on MI are defined by a G_IMPLICIT_DEF.
bool matchOrShiftToFunnelShift(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI) const
Returns true if DefMI precedes UseMI or they are the same instruction.
bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const
bool matchTruncSSatS(MachineInstr &MI, Register &MatchInfo) const
const TargetLowering & getTargetLowering() const
bool matchExtractVectorElementWithDifferentIndices(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine extract vector element with a insert vector element on the vector register and different indi...
bool matchShuffleUndefRHS(MachineInstr &MI, BuildFnTy &MatchInfo) const
Remove references to rhs if it is undef.
void applyBuildInstructionSteps(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const
Replace MI with a series of instructions described in MatchInfo.
void applySDivByPow2(MachineInstr &MI) const
void applySimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo) const
void applyUDivByPow2(MachineInstr &MI) const
Given an G_UDIV MI expressing an unsigned divided by a pow2 constant, return expressions that impleme...
bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine ors.
bool matchLshrOfTruncOfLshr(MachineInstr &MI, LshrOfTruncOfLshr &MatchInfo, MachineInstr &ShiftMI) const
Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (shift x, (C1 + C2))
bool matchInsertVectorElementOOB(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine insert vector element OOB.
bool matchSimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo) const
Return true if MI is a G_ADD which can be simplified to a G_SUB.
void replaceInstWithConstant(MachineInstr &MI, int64_t C) const
Replace an instruction with a G_CONSTANT with value C.
bool tryEmitMemcpyInline(MachineInstr &MI) const
Emit loads and stores that perform the given memcpy.
bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z)) (fsub (fpext (fmul x,...
void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) const
bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx) const
Checks if constant at ConstIdx is larger than MI 's bitwidth.
GISelValueTracking * getValueTracking() const
void applyCombineCopy(MachineInstr &MI) const
bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine extract vector element.
bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine sext of trunc.
bool matchAddSubSameReg(MachineInstr &MI, Register &Src) const
Transform G_ADD(x, G_SUB(y, x)) to y.
bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData) const
bool matchMergeXAndZero(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute) const
bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fmul x, y), z) -> (fma x, y, -z) (fsub (fmul x, y), z) -> (fmad x,...
bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z)) (fadd (fmad x,...
bool matchSextTruncSextLoad(MachineInstr &MI) const
bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo) const
Fold away a merge of an unmerge of the corresponding values.
bool matchCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo) const
bool matchDivByPow2(MachineInstr &MI, bool IsSigned) const
Given an G_SDIV MI expressing a signed divided by a pow2 constant, return expressions that implements...
bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd x, fneg(y)) -> (fsub x, y) (fadd fneg(x), y) -> (fsub y, x) (fsub x,...
bool matchCombineLoadWithAndMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match (and (load x), mask) -> zextload x.
bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fmul x, y), z) -> (fma x, y, z) (fadd (fmul x, y), z) -> (fmad x,...
bool matchCombineCopy(MachineInstr &MI) const
bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI) const
void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const
bool matchXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo) const
Fold (xor (and x, y), y) -> (and (not x), y) {.
bool matchCombineShuffleVector(MachineInstr &MI, SmallVectorImpl< Register > &Ops) const
Check if the G_SHUFFLE_VECTOR MI can be replaced by a concat_vectors.
void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst) const
bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI, BuildFnTy &MatchInfo) const
Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
bool matchCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute) const
Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y) Transform G_ADD y,...
void replaceInstWithFConstant(MachineInstr &MI, double C) const
Replace an instruction with a G_FCONSTANT with value C.
bool matchMergeXAndUndef(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchFunnelShiftToRotate(MachineInstr &MI) const
Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
bool matchRedundantSExtInReg(MachineInstr &MI) const
void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const
Replace the opcode in instruction with a new opcode and inform the observer of the changes.
void applyFunnelShiftConstantModulo(MachineInstr &MI) const
Replaces the shift amount in MI with ShiftAmt % BW.
bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) const
Check if operand OpIdx is zero.
bool matchFoldC1Minus2MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineShlOfExtend(MachineInstr &MI, const RegisterImmPair &MatchData) const
void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const
CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B, bool IsPreLegalize, GISelValueTracking *VT=nullptr, MachineDominatorTree *MDT=nullptr, const LegalizerInfo *LI=nullptr)
bool matchShuffleDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
Turn shuffle a, b, mask -> shuffle undef, b, mask iff mask does not reference a.
bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const
Transform a multiply by a power-of-2 value to a left shift.
bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst) const
bool matchCombineUnmergeUndef(MachineInstr &MI, std::function< void(MachineIRBuilder &)> &MatchInfo) const
Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo) const
SelectOperand is the operand in binary operator MI that is the select to fold.
bool matchFoldAMinusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const
bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_UMULO x, 2) -> (G_UADDO x, x) (G_SMULO x, 2) -> (G_SADDO x, x)
bool matchCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops) const
void applySextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo) const
bool tryCombineCopy(MachineInstr &MI) const
If MI is COPY, try to combine it.
bool matchTruncUSatU(MachineInstr &MI, MachineInstr &MinMI) const
bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchExtOfExt(const MachineInstr &FirstMI, const MachineInstr &SecondMI, BuildFnTy &MatchInfo) const
bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Reassociate pointer calculations with G_ADD involved, to allow better addressing mode usage.
bool matchCanonicalizeFCmp(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineShuffleVector(MachineInstr &MI, const ArrayRef< Register > Ops) const
Replace MI with a concat_vectors with Ops.
bool matchUndefShuffleVectorMask(MachineInstr &MI) const
Return true if a G_SHUFFLE_VECTOR instruction MI has an undef mask.
bool matchAnyExplicitUseIsUndef(MachineInstr &MI) const
Return true if any explicit use operand on MI is defined by a G_IMPLICIT_DEF.
bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) const
Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
bool matchCombineSubToAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const
If we have a shift-by-constant of a bitwise logic op that itself has a shift-by-constant operand with...
bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx) const
Check if operand OpIdx is known to be a power of 2.
bool matchCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops) const
If MI is G_CONCAT_VECTORS, try to combine it.
bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI) const
Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
bool matchExtractVectorElementWithShuffleVector(const MachineInstr &MI, const MachineInstr &MI2, BuildFnTy &MatchInfo) const
Combine extract vector element with a shuffle vector on the vector register.
bool matchExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo) const
LLVMContext & getContext() const
void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const
bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const
bool matchNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate) const
Combine inverting a result of a compare into the opposite cond code.
bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo) const
Match sext_inreg(load p), imm -> sextload p.
bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine select to integer min/max.
bool matchCombineShuffleToBuildVector(MachineInstr &MI) const
Replace MI with a build_vector.
void applyCombineConstantFoldFpUnary(MachineInstr &MI, const ConstantFP *Cst) const
Transform fp_instr(cst) to constant result of the fp operation.
bool isLegal(const LegalityQuery &Query) const
bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo) const
bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0, Register Op1, BuildFnTy &MatchInfo) const
Try to reassociate to reassociate operands of a commutative binop.
void eraseInst(MachineInstr &MI) const
Erase MI.
bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo) const
Do constant FP folding when opportunities are exposed after MIR building.
void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const
bool matchUndefStore(MachineInstr &MI) const
Return true if a G_STORE instruction MI is storing an undef value.
MachineRegisterInfo & MRI
void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) const
Transform PtrToInt(IntToPtr(x)) to x.
void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI) const
bool matchConstantFPOp(const MachineOperand &MOP, double C) const
Return true if MOP is defined by a G_FCONSTANT or splat with a value exactly equal to C.
MachineInstr * buildUDivOrURemUsingMul(MachineInstr &MI) const
Given an G_UDIV MI or G_UREM MI expressing a divide by constant, return an expression that implements...
void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg) const
bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo) const
Push a binary operator through a select on constants.
bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount) const
bool tryCombineExtendingLoads(MachineInstr &MI) const
If MI is extend that consumes the result of a load, try to combine it.
bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const
bool matchBuildVectorIdentityFold(MachineInstr &MI, Register &MatchInfo) const
bool matchBitfieldExtractFromShrAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: shr (and x, n), k -> ubfx x, pos, width.
void applyTruncSSatS(MachineInstr &MI, Register &MatchInfo) const
bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo) const
Do constant folding when opportunities are exposed after MIR building.
bool tryCombineShuffleVector(MachineInstr &MI) const
Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
void applyRotateOutOfRange(MachineInstr &MI) const
bool matchReassocFoldConstantsInSubTree(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo) const
bool matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const
Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
bool matchBitfieldExtractFromAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: and (lshr x, cst), mask -> ubfx x, cst, width.
bool matchBitfieldExtractFromSExtInReg(MachineInstr &MI, BuildFnTy &MatchInfo) const
Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
bool matchNarrowBinop(const MachineInstr &TruncMI, const MachineInstr &BinopMI, BuildFnTy &MatchInfo) const
trunc (binop X, C) --> binop (trunc X, trunc C).
bool matchUndefSelectCmp(MachineInstr &MI) const
Return true if a G_SELECT instruction MI has an undef comparison.
bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
void replaceInstWithUndef(MachineInstr &MI) const
Replace an instruction with a G_IMPLICIT_DEF.
bool matchRedundantBinOpInEquality(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform: (X + Y) == X -> Y == 0 (X - Y) == X -> Y == 0 (X ^ Y) == X -> Y == 0 (X + Y) !...
bool matchOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond) const
If a brcond's true block is not the fallthrough, make it so by inverting the condition and swapping o...
bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine addos.
void applyAshShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo) const
bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine selects.
bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo) const
bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) const
Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) const
bool matchRotateOutOfRange(MachineInstr &MI) const
void applyExpandFPowI(MachineInstr &MI, int64_t Exponent) const
Expands FPOWI into a series of multiplications and a division if the exponent is negative.
void setRegBank(Register Reg, const RegisterBank *RegBank) const
Set the register bank of Reg.
bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) const
Return true if a G_SELECT instruction MI has a constant comparison.
bool matchCommuteFPConstantToRHS(MachineInstr &MI) const
Match constant LHS FP ops that should be commuted.
void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const
bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info) const
bool matchRedundantOr(MachineInstr &MI, Register &Replacement) const
void applyTruncSSatU(MachineInstr &MI, Register &MatchInfo) const
bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fpext (fneg (fmul x, y))), z) -> (fneg (fma (fpext x), (fpext y),...
bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const
bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
void applyCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo) const
bool matchConstantOp(const MachineOperand &MOP, int64_t C) const
Return true if MOP is defined by a G_CONSTANT or splat with a value equal to C.
const LegalizerInfo * LI
void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const
bool matchUMulHToLShr(MachineInstr &MI) const
MachineDominatorTree * MDT
MachineIRBuilder & getBuilder() const
void applyFunnelShiftToRotate(MachineInstr &MI) const
bool matchSimplifySelectToMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchTruncUSatUToFPTOUISat(MachineInstr &MI, MachineInstr &SrcMI) const
const RegisterBankInfo * RBI
bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_*MULO x, 0) -> 0 + no carry out.
GISelValueTracking * VT
bool matchCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts) const
Transform G_UNMERGE Constant -> Constant1, Constant2, ...
void applyShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const
const TargetRegisterInfo * TRI
bool matchRedundantAnd(MachineInstr &MI, Register &Replacement) const
bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI) const
Returns true if DefMI dominates UseMI.
GISelChangeObserver & Observer
void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo) const
Transform trunc (shl x, K) to shl (trunc x), K if K < VT.getScalarSizeInBits().
bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize, unsigned &ShiftVal) const
Reduce a shift by a constant to an unmerge and a shift on a half sized type.
bool matchUDivOrURemByConst(MachineInstr &MI) const
Combine G_UDIV or G_UREM by constant into a multiply by magic constant.
bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine ands.
bool matchSuboCarryOut(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchRedundantSextInReg(MachineInstr &Root, MachineInstr &Other, BuildFnTy &MatchInfo) const
bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo) const
Constant fold G_FMA/G_FMAD.
bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) (fsub (fneg (fmul,...
bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg) const
Transform zext(trunc(x)) to x.
bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const
Check if operand OpIdx is undef.
void applyLshrOfTruncOfLshr(MachineInstr &MI, LshrOfTruncOfLshr &MatchInfo) const
bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0) const
Optimize memcpy intrinsics et al, e.g.
bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI, BuildFnTy &MatchInfo) const
void applySDivOrSRemByConst(MachineInstr &MI) const
void applyShuffleToExtract(MachineInstr &MI) const
MachineInstr * buildSDivOrSRemUsingMul(MachineInstr &MI) const
Given an G_SDIV MI or G_SREM MI expressing a signed divide by constant, return an expression that imp...
bool isLegalOrHasWidenScalar(const LegalityQuery &Query) const
bool matchCanonicalizeICmp(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchCastOfBuildVector(const MachineInstr &CastMI, const MachineInstr &BVMI, BuildFnTy &MatchInfo) const
bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform: (x + y) - y -> x (x + y) - x -> y x - (y + x) -> 0 - y x - (x + z) -> 0 - z.
bool matchReassocConstantInnerLHS(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo) const
bool matchCastOfInteger(const MachineInstr &CastMI, APInt &MatchInfo) const
bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0.
bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) const
Transform anyext(trunc(x)) to x.
void applyExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo) const
MachineIRBuilder & Builder
void applyCommuteBinOpOperands(MachineInstr &MI) const
void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx) const
Delete MI and replace all of its uses with its OpIdx-th operand.
void applySextTruncSextLoad(MachineInstr &MI) const
const MachineFunction & getMachineFunction() const
bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchExtractVectorElementWithBuildVector(const MachineInstr &MI, const MachineInstr &MI2, BuildFnTy &MatchInfo) const
Combine extract vector element with a build vector on the vector register.
bool matchSDivOrSRemByConst(MachineInstr &MI) const
Combine G_SDIV or G_SREM by constant into a multiply by magic constant.
void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond) const
void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal) const
bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI, BuildFnTy &MatchInfo) const
bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent) const
Match FPOWI if it's safe to extend it into a series of multiplications.
void applyCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo) const
bool matchCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands) const
Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
void applyCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands) const
bool matchAshrShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo) const
Match ashr (shl x, C), C -> sext_inreg (C)
void applyCombineUnmergeZExtToZExt(MachineInstr &MI) const
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:277
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Represent a G_FCMP.
An floating-point-like constant.
Definition Utils.h:689
Represent a G_ICMP.
An integer-like constant.
Definition Utils.h:650
Abstract class that contains various methods for clients to notify about changes.
Represents any type of generic load or store.
Represents a logical binary operation.
Represents a G_PTR_ADD.
Represents a G_SELECT.
Represents a G_ZEXTLOAD.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Helper class to build MachineInstr.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Holds all the information related to register banks.
This class implements the register bank concept.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
std::function< void(MachineIRBuilder &)> BuildFnTy
SmallVector< std::function< void(MachineInstrBuilder &)>, 4 > OperandBuildSteps
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
@ Other
Any other memory.
Definition ModRef.h:68
LLVM_ABI bool isZeroOrZeroSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
InstructionBuildSteps()=default
Operands to be added to the instruction.
OperandBuildSteps OperandFns
The opcode for the produced instruction.
InstructionStepsMatchInfo(std::initializer_list< InstructionBuildSteps > InstrsToBuild)
SmallVector< InstructionBuildSteps, 2 > InstrsToBuild
Describes instructions to be built during a combine.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
const RegisterBank * Bank